FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_periph.c
1 /*-
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/8.2/sys/cam/cam_periph.c 211993 2010-08-30 10:30:35Z attilio $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/linker_set.h>
39 #include <sys/bio.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/buf.h>
43 #include <sys/proc.h>
44 #include <sys/devicestat.h>
45 #include <sys/bus.h>
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48
49 #include <cam/cam.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_queue.h>
52 #include <cam/cam_xpt_periph.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_debug.h>
55 #include <cam/cam_sim.h>
56
57 #include <cam/scsi/scsi_all.h>
58 #include <cam/scsi/scsi_message.h>
59 #include <cam/scsi/scsi_pass.h>
60
61 static u_int camperiphnextunit(struct periph_driver *p_drv,
62 u_int newunit, int wired,
63 path_id_t pathid, target_id_t target,
64 lun_id_t lun);
65 static u_int camperiphunit(struct periph_driver *p_drv,
66 path_id_t pathid, target_id_t target,
67 lun_id_t lun);
68 static void camperiphdone(struct cam_periph *periph,
69 union ccb *done_ccb);
70 static void camperiphfree(struct cam_periph *periph);
71 static int camperiphscsistatuserror(union ccb *ccb,
72 cam_flags camflags,
73 u_int32_t sense_flags,
74 int *openings,
75 u_int32_t *relsim_flags,
76 u_int32_t *timeout,
77 const char **action_string);
78 static int camperiphscsisenseerror(union ccb *ccb,
79 cam_flags camflags,
80 u_int32_t sense_flags,
81 int *openings,
82 u_int32_t *relsim_flags,
83 u_int32_t *timeout,
84 const char **action_string);
85
86 static int nperiph_drivers;
87 static int initialized = 0;
88 struct periph_driver **periph_drivers;
89
90 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
91
92 static int periph_selto_delay = 1000;
93 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
94 static int periph_noresrc_delay = 500;
95 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
96 static int periph_busy_delay = 500;
97 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
98
99
100 void
101 periphdriver_register(void *data)
102 {
103 struct periph_driver *drv = (struct periph_driver *)data;
104 struct periph_driver **newdrivers, **old;
105 int ndrivers;
106
107 ndrivers = nperiph_drivers + 2;
108 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
109 M_WAITOK);
110 if (periph_drivers)
111 bcopy(periph_drivers, newdrivers,
112 sizeof(*newdrivers) * nperiph_drivers);
113 newdrivers[nperiph_drivers] = drv;
114 newdrivers[nperiph_drivers + 1] = NULL;
115 old = periph_drivers;
116 periph_drivers = newdrivers;
117 if (old)
118 free(old, M_CAMPERIPH);
119 nperiph_drivers++;
120 /* If driver marked as early or it is late now, initialize it. */
121 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
122 initialized > 1)
123 (*drv->init)();
124 }
125
126 void
127 periphdriver_init(int level)
128 {
129 int i, early;
130
131 initialized = max(initialized, level);
132 for (i = 0; periph_drivers[i] != NULL; i++) {
133 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
134 if (early == initialized)
135 (*periph_drivers[i]->init)();
136 }
137 }
138
139 cam_status
140 cam_periph_alloc(periph_ctor_t *periph_ctor,
141 periph_oninv_t *periph_oninvalidate,
142 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
143 char *name, cam_periph_type type, struct cam_path *path,
144 ac_callback_t *ac_callback, ac_code code, void *arg)
145 {
146 struct periph_driver **p_drv;
147 struct cam_sim *sim;
148 struct cam_periph *periph;
149 struct cam_periph *cur_periph;
150 path_id_t path_id;
151 target_id_t target_id;
152 lun_id_t lun_id;
153 cam_status status;
154 u_int init_level;
155
156 init_level = 0;
157 /*
158 * Handle Hot-Plug scenarios. If there is already a peripheral
159 * of our type assigned to this path, we are likely waiting for
160 * final close on an old, invalidated, peripheral. If this is
161 * the case, queue up a deferred call to the peripheral's async
162 * handler. If it looks like a mistaken re-allocation, complain.
163 */
164 if ((periph = cam_periph_find(path, name)) != NULL) {
165
166 if ((periph->flags & CAM_PERIPH_INVALID) != 0
167 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
168 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
169 periph->deferred_callback = ac_callback;
170 periph->deferred_ac = code;
171 return (CAM_REQ_INPROG);
172 } else {
173 printf("cam_periph_alloc: attempt to re-allocate "
174 "valid device %s%d rejected\n",
175 periph->periph_name, periph->unit_number);
176 }
177 return (CAM_REQ_INVALID);
178 }
179
180 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
181 M_NOWAIT);
182
183 if (periph == NULL)
184 return (CAM_RESRC_UNAVAIL);
185
186 init_level++;
187
188
189 sim = xpt_path_sim(path);
190 path_id = xpt_path_path_id(path);
191 target_id = xpt_path_target_id(path);
192 lun_id = xpt_path_lun_id(path);
193 bzero(periph, sizeof(*periph));
194 cam_init_pinfo(&periph->pinfo);
195 periph->periph_start = periph_start;
196 periph->periph_dtor = periph_dtor;
197 periph->periph_oninval = periph_oninvalidate;
198 periph->type = type;
199 periph->periph_name = name;
200 periph->immediate_priority = CAM_PRIORITY_NONE;
201 periph->refcount = 0;
202 periph->sim = sim;
203 SLIST_INIT(&periph->ccb_list);
204 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
205 if (status != CAM_REQ_CMP)
206 goto failure;
207 periph->path = path;
208
209 xpt_lock_buses();
210 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
211 if (strcmp((*p_drv)->driver_name, name) == 0)
212 break;
213 }
214 if (*p_drv == NULL) {
215 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
216 xpt_free_path(periph->path);
217 free(periph, M_CAMPERIPH);
218 xpt_unlock_buses();
219 return (CAM_REQ_INVALID);
220 }
221 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
222 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
223 while (cur_periph != NULL
224 && cur_periph->unit_number < periph->unit_number)
225 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
226 if (cur_periph != NULL) {
227 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
228 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
229 } else {
230 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
231 (*p_drv)->generation++;
232 }
233 xpt_unlock_buses();
234
235 init_level++;
236
237 status = xpt_add_periph(periph);
238 if (status != CAM_REQ_CMP)
239 goto failure;
240
241 init_level++;
242
243 status = periph_ctor(periph, arg);
244
245 if (status == CAM_REQ_CMP)
246 init_level++;
247
248 failure:
249 switch (init_level) {
250 case 4:
251 /* Initialized successfully */
252 break;
253 case 3:
254 xpt_remove_periph(periph);
255 /* FALLTHROUGH */
256 case 2:
257 xpt_lock_buses();
258 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
259 xpt_unlock_buses();
260 xpt_free_path(periph->path);
261 /* FALLTHROUGH */
262 case 1:
263 free(periph, M_CAMPERIPH);
264 /* FALLTHROUGH */
265 case 0:
266 /* No cleanup to perform. */
267 break;
268 default:
269 panic("cam_periph_alloc: Unkown init level");
270 }
271 return(status);
272 }
273
274 /*
275 * Find a peripheral structure with the specified path, target, lun,
276 * and (optionally) type. If the name is NULL, this function will return
277 * the first peripheral driver that matches the specified path.
278 */
279 struct cam_periph *
280 cam_periph_find(struct cam_path *path, char *name)
281 {
282 struct periph_driver **p_drv;
283 struct cam_periph *periph;
284
285 xpt_lock_buses();
286 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
287
288 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
289 continue;
290
291 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
292 if (xpt_path_comp(periph->path, path) == 0) {
293 xpt_unlock_buses();
294 mtx_assert(periph->sim->mtx, MA_OWNED);
295 return(periph);
296 }
297 }
298 if (name != NULL) {
299 xpt_unlock_buses();
300 return(NULL);
301 }
302 }
303 xpt_unlock_buses();
304 return(NULL);
305 }
306
307 cam_status
308 cam_periph_acquire(struct cam_periph *periph)
309 {
310
311 if (periph == NULL)
312 return(CAM_REQ_CMP_ERR);
313
314 xpt_lock_buses();
315 periph->refcount++;
316 xpt_unlock_buses();
317
318 return(CAM_REQ_CMP);
319 }
320
321 void
322 cam_periph_release_locked(struct cam_periph *periph)
323 {
324
325 if (periph == NULL)
326 return;
327
328 xpt_lock_buses();
329 if (periph->refcount != 0) {
330 periph->refcount--;
331 } else {
332 xpt_print(periph->path, "%s: release %p when refcount is zero\n ", __func__, periph);
333 }
334 if (periph->refcount == 0
335 && (periph->flags & CAM_PERIPH_INVALID)) {
336 camperiphfree(periph);
337 }
338 xpt_unlock_buses();
339 }
340
341 void
342 cam_periph_release(struct cam_periph *periph)
343 {
344 struct cam_sim *sim;
345
346 if (periph == NULL)
347 return;
348
349 sim = periph->sim;
350 mtx_assert(sim->mtx, MA_NOTOWNED);
351 mtx_lock(sim->mtx);
352 cam_periph_release_locked(periph);
353 mtx_unlock(sim->mtx);
354 }
355
356 int
357 cam_periph_hold(struct cam_periph *periph, int priority)
358 {
359 int error;
360
361 /*
362 * Increment the reference count on the peripheral
363 * while we wait for our lock attempt to succeed
364 * to ensure the peripheral doesn't disappear out
365 * from user us while we sleep.
366 */
367
368 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
369 return (ENXIO);
370
371 mtx_assert(periph->sim->mtx, MA_OWNED);
372 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
373 periph->flags |= CAM_PERIPH_LOCK_WANTED;
374 if ((error = mtx_sleep(periph, periph->sim->mtx, priority,
375 "caplck", 0)) != 0) {
376 cam_periph_release_locked(periph);
377 return (error);
378 }
379 }
380
381 periph->flags |= CAM_PERIPH_LOCKED;
382 return (0);
383 }
384
385 void
386 cam_periph_unhold(struct cam_periph *periph)
387 {
388
389 mtx_assert(periph->sim->mtx, MA_OWNED);
390
391 periph->flags &= ~CAM_PERIPH_LOCKED;
392 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
393 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
394 wakeup(periph);
395 }
396
397 cam_periph_release_locked(periph);
398 }
399
400 /*
401 * Look for the next unit number that is not currently in use for this
402 * peripheral type starting at "newunit". Also exclude unit numbers that
403 * are reserved by for future "hardwiring" unless we already know that this
404 * is a potential wired device. Only assume that the device is "wired" the
405 * first time through the loop since after that we'll be looking at unit
406 * numbers that did not match a wiring entry.
407 */
408 static u_int
409 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
410 path_id_t pathid, target_id_t target, lun_id_t lun)
411 {
412 struct cam_periph *periph;
413 char *periph_name;
414 int i, val, dunit, r;
415 const char *dname, *strval;
416
417 periph_name = p_drv->driver_name;
418 for (;;newunit++) {
419
420 for (periph = TAILQ_FIRST(&p_drv->units);
421 periph != NULL && periph->unit_number != newunit;
422 periph = TAILQ_NEXT(periph, unit_links))
423 ;
424
425 if (periph != NULL && periph->unit_number == newunit) {
426 if (wired != 0) {
427 xpt_print(periph->path, "Duplicate Wired "
428 "Device entry!\n");
429 xpt_print(periph->path, "Second device (%s "
430 "device at scbus%d target %d lun %d) will "
431 "not be wired\n", periph_name, pathid,
432 target, lun);
433 wired = 0;
434 }
435 continue;
436 }
437 if (wired)
438 break;
439
440 /*
441 * Don't match entries like "da 4" as a wired down
442 * device, but do match entries like "da 4 target 5"
443 * or even "da 4 scbus 1".
444 */
445 i = 0;
446 dname = periph_name;
447 for (;;) {
448 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
449 if (r != 0)
450 break;
451 /* if no "target" and no specific scbus, skip */
452 if (resource_int_value(dname, dunit, "target", &val) &&
453 (resource_string_value(dname, dunit, "at",&strval)||
454 strcmp(strval, "scbus") == 0))
455 continue;
456 if (newunit == dunit)
457 break;
458 }
459 if (r != 0)
460 break;
461 }
462 return (newunit);
463 }
464
465 static u_int
466 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
467 target_id_t target, lun_id_t lun)
468 {
469 u_int unit;
470 int wired, i, val, dunit;
471 const char *dname, *strval;
472 char pathbuf[32], *periph_name;
473
474 periph_name = p_drv->driver_name;
475 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
476 unit = 0;
477 i = 0;
478 dname = periph_name;
479 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
480 wired = 0) {
481 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
482 if (strcmp(strval, pathbuf) != 0)
483 continue;
484 wired++;
485 }
486 if (resource_int_value(dname, dunit, "target", &val) == 0) {
487 if (val != target)
488 continue;
489 wired++;
490 }
491 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
492 if (val != lun)
493 continue;
494 wired++;
495 }
496 if (wired != 0) {
497 unit = dunit;
498 break;
499 }
500 }
501
502 /*
503 * Either start from 0 looking for the next unit or from
504 * the unit number given in the resource config. This way,
505 * if we have wildcard matches, we don't return the same
506 * unit number twice.
507 */
508 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
509
510 return (unit);
511 }
512
513 void
514 cam_periph_invalidate(struct cam_periph *periph)
515 {
516
517 /*
518 * We only call this routine the first time a peripheral is
519 * invalidated.
520 */
521 if (((periph->flags & CAM_PERIPH_INVALID) == 0)
522 && (periph->periph_oninval != NULL))
523 periph->periph_oninval(periph);
524
525 periph->flags |= CAM_PERIPH_INVALID;
526 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
527
528 xpt_lock_buses();
529 if (periph->refcount == 0)
530 camperiphfree(periph);
531 else if (periph->refcount < 0)
532 printf("cam_invalidate_periph: refcount < 0!!\n");
533 xpt_unlock_buses();
534 }
535
536 static void
537 camperiphfree(struct cam_periph *periph)
538 {
539 struct periph_driver **p_drv;
540
541 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
542 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
543 break;
544 }
545 if (*p_drv == NULL) {
546 printf("camperiphfree: attempt to free non-existant periph\n");
547 return;
548 }
549
550 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
551 (*p_drv)->generation++;
552 xpt_unlock_buses();
553
554 if (periph->periph_dtor != NULL)
555 periph->periph_dtor(periph);
556 xpt_remove_periph(periph);
557
558 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
559 union ccb ccb;
560 void *arg;
561
562 switch (periph->deferred_ac) {
563 case AC_FOUND_DEVICE:
564 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
565 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
566 xpt_action(&ccb);
567 arg = &ccb;
568 break;
569 case AC_PATH_REGISTERED:
570 ccb.ccb_h.func_code = XPT_PATH_INQ;
571 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
572 xpt_action(&ccb);
573 arg = &ccb;
574 break;
575 default:
576 arg = NULL;
577 break;
578 }
579 periph->deferred_callback(NULL, periph->deferred_ac,
580 periph->path, arg);
581 }
582 xpt_free_path(periph->path);
583 free(periph, M_CAMPERIPH);
584 xpt_lock_buses();
585 }
586
587 /*
588 * Map user virtual pointers into kernel virtual address space, so we can
589 * access the memory. This won't work on physical pointers, for now it's
590 * up to the caller to check for that. (XXX KDM -- should we do that here
591 * instead?) This also only works for up to MAXPHYS memory. Since we use
592 * buffers to map stuff in and out, we're limited to the buffer size.
593 */
594 int
595 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
596 {
597 int numbufs, i, j;
598 int flags[CAM_PERIPH_MAXMAPS];
599 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
600 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
601 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
602 /* Some controllers may not be able to handle more data. */
603 size_t maxmap = DFLTPHYS;
604
605 switch(ccb->ccb_h.func_code) {
606 case XPT_DEV_MATCH:
607 if (ccb->cdm.match_buf_len == 0) {
608 printf("cam_periph_mapmem: invalid match buffer "
609 "length 0\n");
610 return(EINVAL);
611 }
612 if (ccb->cdm.pattern_buf_len > 0) {
613 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
614 lengths[0] = ccb->cdm.pattern_buf_len;
615 dirs[0] = CAM_DIR_OUT;
616 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
617 lengths[1] = ccb->cdm.match_buf_len;
618 dirs[1] = CAM_DIR_IN;
619 numbufs = 2;
620 } else {
621 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
622 lengths[0] = ccb->cdm.match_buf_len;
623 dirs[0] = CAM_DIR_IN;
624 numbufs = 1;
625 }
626 /*
627 * This request will not go to the hardware, no reason
628 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
629 */
630 maxmap = MAXPHYS;
631 break;
632 case XPT_SCSI_IO:
633 case XPT_CONT_TARGET_IO:
634 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
635 return(0);
636
637 data_ptrs[0] = &ccb->csio.data_ptr;
638 lengths[0] = ccb->csio.dxfer_len;
639 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
640 numbufs = 1;
641 break;
642 case XPT_ATA_IO:
643 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
644 return(0);
645
646 data_ptrs[0] = &ccb->ataio.data_ptr;
647 lengths[0] = ccb->ataio.dxfer_len;
648 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
649 numbufs = 1;
650 break;
651 default:
652 return(EINVAL);
653 break; /* NOTREACHED */
654 }
655
656 /*
657 * Check the transfer length and permissions first, so we don't
658 * have to unmap any previously mapped buffers.
659 */
660 for (i = 0; i < numbufs; i++) {
661
662 flags[i] = 0;
663
664 /*
665 * The userland data pointer passed in may not be page
666 * aligned. vmapbuf() truncates the address to a page
667 * boundary, so if the address isn't page aligned, we'll
668 * need enough space for the given transfer length, plus
669 * whatever extra space is necessary to make it to the page
670 * boundary.
671 */
672 if ((lengths[i] +
673 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
674 printf("cam_periph_mapmem: attempt to map %lu bytes, "
675 "which is greater than %lu\n",
676 (long)(lengths[i] +
677 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
678 (u_long)maxmap);
679 return(E2BIG);
680 }
681
682 if (dirs[i] & CAM_DIR_OUT) {
683 flags[i] = BIO_WRITE;
684 }
685
686 if (dirs[i] & CAM_DIR_IN) {
687 flags[i] = BIO_READ;
688 }
689
690 }
691
692 /* this keeps the current process from getting swapped */
693 /*
694 * XXX KDM should I use P_NOSWAP instead?
695 */
696 PHOLD(curproc);
697
698 for (i = 0; i < numbufs; i++) {
699 /*
700 * Get the buffer.
701 */
702 mapinfo->bp[i] = getpbuf(NULL);
703
704 /* save the buffer's data address */
705 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
706
707 /* put our pointer in the data slot */
708 mapinfo->bp[i]->b_data = *data_ptrs[i];
709
710 /* set the transfer length, we know it's < MAXPHYS */
711 mapinfo->bp[i]->b_bufsize = lengths[i];
712
713 /* set the direction */
714 mapinfo->bp[i]->b_iocmd = flags[i];
715
716 /*
717 * Map the buffer into kernel memory.
718 *
719 * Note that useracc() alone is not a sufficient test.
720 * vmapbuf() can still fail due to a smaller file mapped
721 * into a larger area of VM, or if userland races against
722 * vmapbuf() after the useracc() check.
723 */
724 if (vmapbuf(mapinfo->bp[i]) < 0) {
725 for (j = 0; j < i; ++j) {
726 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
727 vunmapbuf(mapinfo->bp[j]);
728 relpbuf(mapinfo->bp[j], NULL);
729 }
730 relpbuf(mapinfo->bp[i], NULL);
731 PRELE(curproc);
732 return(EACCES);
733 }
734
735 /* set our pointer to the new mapped area */
736 *data_ptrs[i] = mapinfo->bp[i]->b_data;
737
738 mapinfo->num_bufs_used++;
739 }
740
741 /*
742 * Now that we've gotten this far, change ownership to the kernel
743 * of the buffers so that we don't run afoul of returning to user
744 * space with locks (on the buffer) held.
745 */
746 for (i = 0; i < numbufs; i++) {
747 BUF_KERNPROC(mapinfo->bp[i]);
748 }
749
750
751 return(0);
752 }
753
754 /*
755 * Unmap memory segments mapped into kernel virtual address space by
756 * cam_periph_mapmem().
757 */
758 void
759 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
760 {
761 int numbufs, i;
762 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
763
764 if (mapinfo->num_bufs_used <= 0) {
765 /* allow ourselves to be swapped once again */
766 PRELE(curproc);
767 return;
768 }
769
770 switch (ccb->ccb_h.func_code) {
771 case XPT_DEV_MATCH:
772 numbufs = min(mapinfo->num_bufs_used, 2);
773
774 if (numbufs == 1) {
775 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
776 } else {
777 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
778 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
779 }
780 break;
781 case XPT_SCSI_IO:
782 case XPT_CONT_TARGET_IO:
783 data_ptrs[0] = &ccb->csio.data_ptr;
784 numbufs = min(mapinfo->num_bufs_used, 1);
785 break;
786 case XPT_ATA_IO:
787 data_ptrs[0] = &ccb->ataio.data_ptr;
788 numbufs = min(mapinfo->num_bufs_used, 1);
789 break;
790 default:
791 /* allow ourselves to be swapped once again */
792 PRELE(curproc);
793 return;
794 break; /* NOTREACHED */
795 }
796
797 for (i = 0; i < numbufs; i++) {
798 /* Set the user's pointer back to the original value */
799 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
800
801 /* unmap the buffer */
802 vunmapbuf(mapinfo->bp[i]);
803
804 /* release the buffer */
805 relpbuf(mapinfo->bp[i], NULL);
806 }
807
808 /* allow ourselves to be swapped once again */
809 PRELE(curproc);
810 }
811
812 union ccb *
813 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
814 {
815 struct ccb_hdr *ccb_h;
816
817 mtx_assert(periph->sim->mtx, MA_OWNED);
818 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
819
820 while (SLIST_FIRST(&periph->ccb_list) == NULL) {
821 if (periph->immediate_priority > priority)
822 periph->immediate_priority = priority;
823 xpt_schedule(periph, priority);
824 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
825 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
826 break;
827 mtx_assert(periph->sim->mtx, MA_OWNED);
828 mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb",
829 0);
830 }
831
832 ccb_h = SLIST_FIRST(&periph->ccb_list);
833 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
834 return ((union ccb *)ccb_h);
835 }
836
837 void
838 cam_periph_ccbwait(union ccb *ccb)
839 {
840 struct cam_sim *sim;
841
842 sim = xpt_path_sim(ccb->ccb_h.path);
843 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
844 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
845 mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0);
846 }
847
848 int
849 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
850 int (*error_routine)(union ccb *ccb,
851 cam_flags camflags,
852 u_int32_t sense_flags))
853 {
854 union ccb *ccb;
855 int error;
856 int found;
857
858 error = found = 0;
859
860 switch(cmd){
861 case CAMGETPASSTHRU:
862 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
863 xpt_setup_ccb(&ccb->ccb_h,
864 ccb->ccb_h.path,
865 CAM_PRIORITY_NORMAL);
866 ccb->ccb_h.func_code = XPT_GDEVLIST;
867
868 /*
869 * Basically, the point of this is that we go through
870 * getting the list of devices, until we find a passthrough
871 * device. In the current version of the CAM code, the
872 * only way to determine what type of device we're dealing
873 * with is by its name.
874 */
875 while (found == 0) {
876 ccb->cgdl.index = 0;
877 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
878 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
879
880 /* we want the next device in the list */
881 xpt_action(ccb);
882 if (strncmp(ccb->cgdl.periph_name,
883 "pass", 4) == 0){
884 found = 1;
885 break;
886 }
887 }
888 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
889 (found == 0)) {
890 ccb->cgdl.periph_name[0] = '\0';
891 ccb->cgdl.unit_number = 0;
892 break;
893 }
894 }
895
896 /* copy the result back out */
897 bcopy(ccb, addr, sizeof(union ccb));
898
899 /* and release the ccb */
900 xpt_release_ccb(ccb);
901
902 break;
903 default:
904 error = ENOTTY;
905 break;
906 }
907 return(error);
908 }
909
910 int
911 cam_periph_runccb(union ccb *ccb,
912 int (*error_routine)(union ccb *ccb,
913 cam_flags camflags,
914 u_int32_t sense_flags),
915 cam_flags camflags, u_int32_t sense_flags,
916 struct devstat *ds)
917 {
918 struct cam_sim *sim;
919 int error;
920
921 error = 0;
922 sim = xpt_path_sim(ccb->ccb_h.path);
923 mtx_assert(sim->mtx, MA_OWNED);
924
925 /*
926 * If the user has supplied a stats structure, and if we understand
927 * this particular type of ccb, record the transaction start.
928 */
929 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO ||
930 ccb->ccb_h.func_code == XPT_ATA_IO))
931 devstat_start_transaction(ds, NULL);
932
933 xpt_action(ccb);
934
935 do {
936 cam_periph_ccbwait(ccb);
937 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
938 error = 0;
939 else if (error_routine != NULL)
940 error = (*error_routine)(ccb, camflags, sense_flags);
941 else
942 error = 0;
943
944 } while (error == ERESTART);
945
946 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
947 cam_release_devq(ccb->ccb_h.path,
948 /* relsim_flags */0,
949 /* openings */0,
950 /* timeout */0,
951 /* getcount_only */ FALSE);
952 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
953 }
954
955 if (ds != NULL) {
956 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
957 devstat_end_transaction(ds,
958 ccb->csio.dxfer_len,
959 ccb->csio.tag_action & 0x3,
960 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
961 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
962 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
963 DEVSTAT_WRITE :
964 DEVSTAT_READ, NULL, NULL);
965 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
966 devstat_end_transaction(ds,
967 ccb->ataio.dxfer_len,
968 ccb->ataio.tag_action & 0x3,
969 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
970 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
971 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
972 DEVSTAT_WRITE :
973 DEVSTAT_READ, NULL, NULL);
974 }
975 }
976
977 return(error);
978 }
979
980 void
981 cam_freeze_devq(struct cam_path *path)
982 {
983
984 cam_freeze_devq_arg(path, 0, 0);
985 }
986
987 void
988 cam_freeze_devq_arg(struct cam_path *path, uint32_t flags, uint32_t arg)
989 {
990 struct ccb_relsim crs;
991
992 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NONE);
993 crs.ccb_h.func_code = XPT_FREEZE_QUEUE;
994 crs.release_flags = flags;
995 crs.openings = arg;
996 crs.release_timeout = arg;
997 xpt_action((union ccb *)&crs);
998 }
999
1000 u_int32_t
1001 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1002 u_int32_t openings, u_int32_t arg,
1003 int getcount_only)
1004 {
1005 struct ccb_relsim crs;
1006
1007 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1008 crs.ccb_h.func_code = XPT_REL_SIMQ;
1009 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1010 crs.release_flags = relsim_flags;
1011 crs.openings = openings;
1012 crs.release_timeout = arg;
1013 xpt_action((union ccb *)&crs);
1014 return (crs.qfrozen_cnt);
1015 }
1016
1017 #define saved_ccb_ptr ppriv_ptr0
1018 #define recovery_depth ppriv_field1
1019 static void
1020 camperiphsensedone(struct cam_periph *periph, union ccb *done_ccb)
1021 {
1022 union ccb *saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1023 cam_status status;
1024 int frozen = 0;
1025 u_int sense_key;
1026 int depth = done_ccb->ccb_h.recovery_depth;
1027
1028 status = done_ccb->ccb_h.status;
1029 if (status & CAM_DEV_QFRZN) {
1030 frozen = 1;
1031 /*
1032 * Clear freeze flag now for case of retry,
1033 * freeze will be dropped later.
1034 */
1035 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1036 }
1037 status &= CAM_STATUS_MASK;
1038 switch (status) {
1039 case CAM_REQ_CMP:
1040 {
1041 /*
1042 * If we manually retrieved sense into a CCB and got
1043 * something other than "NO SENSE" send the updated CCB
1044 * back to the client via xpt_done() to be processed via
1045 * the error recovery code again.
1046 */
1047 sense_key = saved_ccb->csio.sense_data.flags;
1048 sense_key &= SSD_KEY;
1049 if (sense_key != SSD_KEY_NO_SENSE) {
1050 saved_ccb->ccb_h.status |=
1051 CAM_AUTOSNS_VALID;
1052 } else {
1053 saved_ccb->ccb_h.status &=
1054 ~CAM_STATUS_MASK;
1055 saved_ccb->ccb_h.status |=
1056 CAM_AUTOSENSE_FAIL;
1057 }
1058 bcopy(saved_ccb, done_ccb, sizeof(union ccb));
1059 xpt_free_ccb(saved_ccb);
1060 break;
1061 }
1062 default:
1063 bcopy(saved_ccb, done_ccb, sizeof(union ccb));
1064 xpt_free_ccb(saved_ccb);
1065 done_ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1066 done_ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1067 break;
1068 }
1069 periph->flags &= ~CAM_PERIPH_SENSE_INPROG;
1070 /*
1071 * If it is the end of recovery, drop freeze, taken due to
1072 * CAM_DEV_QFREEZE flag, set on recovery request.
1073 */
1074 if (depth == 0) {
1075 cam_release_devq(done_ccb->ccb_h.path,
1076 /*relsim_flags*/0,
1077 /*openings*/0,
1078 /*timeout*/0,
1079 /*getcount_only*/0);
1080 }
1081 /*
1082 * Copy frozen flag from recovery request if it is set there
1083 * for some reason.
1084 */
1085 if (frozen != 0)
1086 done_ccb->ccb_h.status |= CAM_DEV_QFRZN;
1087 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
1088 }
1089
1090 static void
1091 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1092 {
1093 union ccb *saved_ccb, *save_ccb;
1094 cam_status status;
1095 int frozen = 0;
1096 struct scsi_start_stop_unit *scsi_cmd;
1097 u_int32_t relsim_flags, timeout;
1098
1099 status = done_ccb->ccb_h.status;
1100 if (status & CAM_DEV_QFRZN) {
1101 frozen = 1;
1102 /*
1103 * Clear freeze flag now for case of retry,
1104 * freeze will be dropped later.
1105 */
1106 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1107 }
1108
1109 timeout = 0;
1110 relsim_flags = 0;
1111 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1112
1113 switch (status & CAM_STATUS_MASK) {
1114 case CAM_REQ_CMP:
1115 {
1116 /*
1117 * If we have successfully taken a device from the not
1118 * ready to ready state, re-scan the device and re-get
1119 * the inquiry information. Many devices (mostly disks)
1120 * don't properly report their inquiry information unless
1121 * they are spun up.
1122 */
1123 scsi_cmd = (struct scsi_start_stop_unit *)
1124 &done_ccb->csio.cdb_io.cdb_bytes;
1125
1126 if (scsi_cmd->opcode == START_STOP_UNIT)
1127 xpt_async(AC_INQ_CHANGED,
1128 done_ccb->ccb_h.path, NULL);
1129 goto final;
1130 }
1131 case CAM_SCSI_STATUS_ERROR:
1132 scsi_cmd = (struct scsi_start_stop_unit *)
1133 &done_ccb->csio.cdb_io.cdb_bytes;
1134 if (status & CAM_AUTOSNS_VALID) {
1135 struct ccb_getdev cgd;
1136 struct scsi_sense_data *sense;
1137 int error_code, sense_key, asc, ascq;
1138 scsi_sense_action err_action;
1139
1140 sense = &done_ccb->csio.sense_data;
1141 scsi_extract_sense(sense, &error_code,
1142 &sense_key, &asc, &ascq);
1143 /*
1144 * Grab the inquiry data for this device.
1145 */
1146 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
1147 CAM_PRIORITY_NORMAL);
1148 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1149 xpt_action((union ccb *)&cgd);
1150 err_action = scsi_error_action(&done_ccb->csio,
1151 &cgd.inq_data, 0);
1152 /*
1153 * If the error is "invalid field in CDB",
1154 * and the load/eject flag is set, turn the
1155 * flag off and try again. This is just in
1156 * case the drive in question barfs on the
1157 * load eject flag. The CAM code should set
1158 * the load/eject flag by default for
1159 * removable media.
1160 */
1161 /* XXX KDM
1162 * Should we check to see what the specific
1163 * scsi status is?? Or does it not matter
1164 * since we already know that there was an
1165 * error, and we know what the specific
1166 * error code was, and we know what the
1167 * opcode is..
1168 */
1169 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1170 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1171 (asc == 0x24) && (ascq == 0x00) &&
1172 (done_ccb->ccb_h.retry_count > 0)) {
1173
1174 scsi_cmd->how &= ~SSS_LOEJ;
1175 xpt_action(done_ccb);
1176 } else if ((done_ccb->ccb_h.retry_count > 1)
1177 && ((err_action & SS_MASK) != SS_FAIL)) {
1178
1179 /*
1180 * In this case, the error recovery
1181 * command failed, but we've got
1182 * some retries left on it. Give
1183 * it another try unless this is an
1184 * unretryable error.
1185 */
1186 /* set the timeout to .5 sec */
1187 relsim_flags =
1188 RELSIM_RELEASE_AFTER_TIMEOUT;
1189 timeout = 500;
1190 xpt_action(done_ccb);
1191 break;
1192 } else {
1193 /*
1194 * Perform the final retry with the original
1195 * CCB so that final error processing is
1196 * performed by the owner of the CCB.
1197 */
1198 goto final;
1199 }
1200 } else {
1201 save_ccb = xpt_alloc_ccb_nowait();
1202 if (save_ccb == NULL)
1203 goto final;
1204 bcopy(done_ccb, save_ccb, sizeof(*save_ccb));
1205 periph->flags |= CAM_PERIPH_SENSE_INPROG;
1206 /*
1207 * Send a Request Sense to the device. We
1208 * assume that we are in a contingent allegiance
1209 * condition so we do not tag this request.
1210 */
1211 scsi_request_sense(&done_ccb->csio, /*retries*/1,
1212 camperiphsensedone,
1213 &save_ccb->csio.sense_data,
1214 sizeof(save_ccb->csio.sense_data),
1215 CAM_TAG_ACTION_NONE,
1216 /*sense_len*/SSD_FULL_SIZE,
1217 /*timeout*/5000);
1218 done_ccb->ccb_h.pinfo.priority--;
1219 done_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1220 done_ccb->ccb_h.saved_ccb_ptr = save_ccb;
1221 done_ccb->ccb_h.recovery_depth++;
1222 xpt_action(done_ccb);
1223 }
1224 break;
1225 default:
1226 final:
1227 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1228 xpt_free_ccb(saved_ccb);
1229 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1230 xpt_action(done_ccb);
1231 break;
1232 }
1233
1234 /* decrement the retry count */
1235 /*
1236 * XXX This isn't appropriate in all cases. Restructure,
1237 * so that the retry count is only decremented on an
1238 * actual retry. Remeber that the orignal ccb had its
1239 * retry count dropped before entering recovery, so
1240 * doing it again is a bug.
1241 */
1242 if (done_ccb->ccb_h.retry_count > 0)
1243 done_ccb->ccb_h.retry_count--;
1244 /*
1245 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on recovery
1246 * request.
1247 */
1248 cam_release_devq(done_ccb->ccb_h.path,
1249 /*relsim_flags*/relsim_flags,
1250 /*openings*/0,
1251 /*timeout*/timeout,
1252 /*getcount_only*/0);
1253 /* Drop freeze taken, if this recovery request got error. */
1254 if (frozen != 0) {
1255 cam_release_devq(done_ccb->ccb_h.path,
1256 /*relsim_flags*/0,
1257 /*openings*/0,
1258 /*timeout*/0,
1259 /*getcount_only*/0);
1260 }
1261 }
1262
1263 /*
1264 * Generic Async Event handler. Peripheral drivers usually
1265 * filter out the events that require personal attention,
1266 * and leave the rest to this function.
1267 */
1268 void
1269 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1270 struct cam_path *path, void *arg)
1271 {
1272 switch (code) {
1273 case AC_LOST_DEVICE:
1274 cam_periph_invalidate(periph);
1275 break;
1276 default:
1277 break;
1278 }
1279 }
1280
1281 void
1282 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1283 {
1284 struct ccb_getdevstats cgds;
1285
1286 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1287 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1288 xpt_action((union ccb *)&cgds);
1289 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1290 }
1291
1292 void
1293 cam_periph_freeze_after_event(struct cam_periph *periph,
1294 struct timeval* event_time, u_int duration_ms)
1295 {
1296 struct timeval delta;
1297 struct timeval duration_tv;
1298
1299 microtime(&delta);
1300 timevalsub(&delta, event_time);
1301 duration_tv.tv_sec = duration_ms / 1000;
1302 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1303 if (timevalcmp(&delta, &duration_tv, <)) {
1304 timevalsub(&duration_tv, &delta);
1305
1306 duration_ms = duration_tv.tv_sec * 1000;
1307 duration_ms += duration_tv.tv_usec / 1000;
1308 cam_freeze_devq(periph->path);
1309 cam_release_devq(periph->path,
1310 RELSIM_RELEASE_AFTER_TIMEOUT,
1311 /*reduction*/0,
1312 /*timeout*/duration_ms,
1313 /*getcount_only*/0);
1314 }
1315
1316 }
1317
1318 static int
1319 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags,
1320 u_int32_t sense_flags,
1321 int *openings, u_int32_t *relsim_flags,
1322 u_int32_t *timeout, const char **action_string)
1323 {
1324 int error;
1325
1326 switch (ccb->csio.scsi_status) {
1327 case SCSI_STATUS_OK:
1328 case SCSI_STATUS_COND_MET:
1329 case SCSI_STATUS_INTERMED:
1330 case SCSI_STATUS_INTERMED_COND_MET:
1331 error = 0;
1332 break;
1333 case SCSI_STATUS_CMD_TERMINATED:
1334 case SCSI_STATUS_CHECK_COND:
1335 if (bootverbose)
1336 xpt_print(ccb->ccb_h.path, "SCSI status error\n");
1337 error = camperiphscsisenseerror(ccb,
1338 camflags,
1339 sense_flags,
1340 openings,
1341 relsim_flags,
1342 timeout,
1343 action_string);
1344 break;
1345 case SCSI_STATUS_QUEUE_FULL:
1346 {
1347 /* no decrement */
1348 struct ccb_getdevstats cgds;
1349
1350 /*
1351 * First off, find out what the current
1352 * transaction counts are.
1353 */
1354 xpt_setup_ccb(&cgds.ccb_h,
1355 ccb->ccb_h.path,
1356 CAM_PRIORITY_NORMAL);
1357 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1358 xpt_action((union ccb *)&cgds);
1359
1360 /*
1361 * If we were the only transaction active, treat
1362 * the QUEUE FULL as if it were a BUSY condition.
1363 */
1364 if (cgds.dev_active != 0) {
1365 int total_openings;
1366
1367 /*
1368 * Reduce the number of openings to
1369 * be 1 less than the amount it took
1370 * to get a queue full bounded by the
1371 * minimum allowed tag count for this
1372 * device.
1373 */
1374 total_openings = cgds.dev_active + cgds.dev_openings;
1375 *openings = cgds.dev_active;
1376 if (*openings < cgds.mintags)
1377 *openings = cgds.mintags;
1378 if (*openings < total_openings)
1379 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1380 else {
1381 /*
1382 * Some devices report queue full for
1383 * temporary resource shortages. For
1384 * this reason, we allow a minimum
1385 * tag count to be entered via a
1386 * quirk entry to prevent the queue
1387 * count on these devices from falling
1388 * to a pessimisticly low value. We
1389 * still wait for the next successful
1390 * completion, however, before queueing
1391 * more transactions to the device.
1392 */
1393 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1394 }
1395 *timeout = 0;
1396 error = ERESTART;
1397 if (bootverbose) {
1398 xpt_print(ccb->ccb_h.path, "Queue full\n");
1399 }
1400 break;
1401 }
1402 /* FALLTHROUGH */
1403 }
1404 case SCSI_STATUS_BUSY:
1405 /*
1406 * Restart the queue after either another
1407 * command completes or a 1 second timeout.
1408 */
1409 if (bootverbose) {
1410 xpt_print(ccb->ccb_h.path, "Device busy\n");
1411 }
1412 if (ccb->ccb_h.retry_count > 0) {
1413 ccb->ccb_h.retry_count--;
1414 error = ERESTART;
1415 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1416 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1417 *timeout = 1000;
1418 } else {
1419 error = EIO;
1420 }
1421 break;
1422 case SCSI_STATUS_RESERV_CONFLICT:
1423 xpt_print(ccb->ccb_h.path, "Reservation conflict\n");
1424 error = EIO;
1425 break;
1426 default:
1427 xpt_print(ccb->ccb_h.path, "SCSI status 0x%x\n",
1428 ccb->csio.scsi_status);
1429 error = EIO;
1430 break;
1431 }
1432 return (error);
1433 }
1434
1435 static int
1436 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
1437 u_int32_t sense_flags,
1438 int *openings, u_int32_t *relsim_flags,
1439 u_int32_t *timeout, const char **action_string)
1440 {
1441 struct cam_periph *periph;
1442 union ccb *orig_ccb = ccb;
1443 int error;
1444
1445 periph = xpt_path_periph(ccb->ccb_h.path);
1446 if (periph->flags &
1447 (CAM_PERIPH_RECOVERY_INPROG | CAM_PERIPH_SENSE_INPROG)) {
1448 /*
1449 * If error recovery is already in progress, don't attempt
1450 * to process this error, but requeue it unconditionally
1451 * and attempt to process it once error recovery has
1452 * completed. This failed command is probably related to
1453 * the error that caused the currently active error recovery
1454 * action so our current recovery efforts should also
1455 * address this command. Be aware that the error recovery
1456 * code assumes that only one recovery action is in progress
1457 * on a particular peripheral instance at any given time
1458 * (e.g. only one saved CCB for error recovery) so it is
1459 * imperitive that we don't violate this assumption.
1460 */
1461 error = ERESTART;
1462 } else {
1463 scsi_sense_action err_action;
1464 struct ccb_getdev cgd;
1465
1466 /*
1467 * Grab the inquiry data for this device.
1468 */
1469 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1470 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1471 xpt_action((union ccb *)&cgd);
1472
1473 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1474 err_action = scsi_error_action(&ccb->csio,
1475 &cgd.inq_data,
1476 sense_flags);
1477 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1478 err_action = SS_REQSENSE;
1479 else
1480 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1481
1482 error = err_action & SS_ERRMASK;
1483
1484 /*
1485 * If the recovery action will consume a retry,
1486 * make sure we actually have retries available.
1487 */
1488 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1489 if (ccb->ccb_h.retry_count > 0)
1490 ccb->ccb_h.retry_count--;
1491 else {
1492 *action_string = "Retries exhausted";
1493 goto sense_error_done;
1494 }
1495 }
1496
1497 if ((err_action & SS_MASK) >= SS_START) {
1498 /*
1499 * Do common portions of commands that
1500 * use recovery CCBs.
1501 */
1502 orig_ccb = xpt_alloc_ccb_nowait();
1503 if (orig_ccb == NULL) {
1504 *action_string = "Can't allocate recovery CCB";
1505 goto sense_error_done;
1506 }
1507 /*
1508 * Clear freeze flag for original request here, as
1509 * this freeze will be dropped as part of ERESTART.
1510 */
1511 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1512 bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1513 }
1514
1515 switch (err_action & SS_MASK) {
1516 case SS_NOP:
1517 *action_string = "No recovery action needed";
1518 error = 0;
1519 break;
1520 case SS_RETRY:
1521 *action_string = "Retrying command (per sense data)";
1522 error = ERESTART;
1523 break;
1524 case SS_FAIL:
1525 *action_string = "Unretryable error";
1526 break;
1527 case SS_START:
1528 {
1529 int le;
1530
1531 /*
1532 * Send a start unit command to the device, and
1533 * then retry the command.
1534 */
1535 *action_string = "Attempting to start unit";
1536 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1537
1538 /*
1539 * Check for removable media and set
1540 * load/eject flag appropriately.
1541 */
1542 if (SID_IS_REMOVABLE(&cgd.inq_data))
1543 le = TRUE;
1544 else
1545 le = FALSE;
1546
1547 scsi_start_stop(&ccb->csio,
1548 /*retries*/1,
1549 camperiphdone,
1550 MSG_SIMPLE_Q_TAG,
1551 /*start*/TRUE,
1552 /*load/eject*/le,
1553 /*immediate*/FALSE,
1554 SSD_FULL_SIZE,
1555 /*timeout*/50000);
1556 break;
1557 }
1558 case SS_TUR:
1559 {
1560 /*
1561 * Send a Test Unit Ready to the device.
1562 * If the 'many' flag is set, we send 120
1563 * test unit ready commands, one every half
1564 * second. Otherwise, we just send one TUR.
1565 * We only want to do this if the retry
1566 * count has not been exhausted.
1567 */
1568 int retries;
1569
1570 if ((err_action & SSQ_MANY) != 0) {
1571 *action_string = "Polling device for readiness";
1572 retries = 120;
1573 } else {
1574 *action_string = "Testing device for readiness";
1575 retries = 1;
1576 }
1577 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1578 scsi_test_unit_ready(&ccb->csio,
1579 retries,
1580 camperiphdone,
1581 MSG_SIMPLE_Q_TAG,
1582 SSD_FULL_SIZE,
1583 /*timeout*/5000);
1584
1585 /*
1586 * Accomplish our 500ms delay by deferring
1587 * the release of our device queue appropriately.
1588 */
1589 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1590 *timeout = 500;
1591 break;
1592 }
1593 case SS_REQSENSE:
1594 {
1595 *action_string = "Requesting SCSI sense data";
1596 periph->flags |= CAM_PERIPH_SENSE_INPROG;
1597 /*
1598 * Send a Request Sense to the device. We
1599 * assume that we are in a contingent allegiance
1600 * condition so we do not tag this request.
1601 */
1602 scsi_request_sense(&ccb->csio, /*retries*/1,
1603 camperiphsensedone,
1604 &orig_ccb->csio.sense_data,
1605 sizeof(orig_ccb->csio.sense_data),
1606 CAM_TAG_ACTION_NONE,
1607 /*sense_len*/SSD_FULL_SIZE,
1608 /*timeout*/5000);
1609 break;
1610 }
1611 default:
1612 panic("Unhandled error action %x", err_action);
1613 }
1614
1615 if ((err_action & SS_MASK) >= SS_START) {
1616 /*
1617 * Drop the priority, so that the recovery
1618 * CCB is the first to execute. Freeze the queue
1619 * after this command is sent so that we can
1620 * restore the old csio and have it queued in
1621 * the proper order before we release normal
1622 * transactions to the device.
1623 */
1624 ccb->ccb_h.pinfo.priority--;
1625 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1626 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1627 ccb->ccb_h.recovery_depth = 0;
1628 error = ERESTART;
1629 }
1630
1631 sense_error_done:
1632 if ((err_action & SSQ_PRINT_SENSE) != 0
1633 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1634 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1635 }
1636 return (error);
1637 }
1638
1639 /*
1640 * Generic error handler. Peripheral drivers usually filter
1641 * out the errors that they handle in a unique mannor, then
1642 * call this function.
1643 */
1644 int
1645 cam_periph_error(union ccb *ccb, cam_flags camflags,
1646 u_int32_t sense_flags, union ccb *save_ccb)
1647 {
1648 const char *action_string;
1649 cam_status status;
1650 int frozen;
1651 int error, printed = 0;
1652 int openings;
1653 u_int32_t relsim_flags;
1654 u_int32_t timeout = 0;
1655
1656 action_string = NULL;
1657 status = ccb->ccb_h.status;
1658 frozen = (status & CAM_DEV_QFRZN) != 0;
1659 status &= CAM_STATUS_MASK;
1660 openings = relsim_flags = 0;
1661
1662 switch (status) {
1663 case CAM_REQ_CMP:
1664 error = 0;
1665 break;
1666 case CAM_SCSI_STATUS_ERROR:
1667 error = camperiphscsistatuserror(ccb,
1668 camflags,
1669 sense_flags,
1670 &openings,
1671 &relsim_flags,
1672 &timeout,
1673 &action_string);
1674 break;
1675 case CAM_AUTOSENSE_FAIL:
1676 xpt_print(ccb->ccb_h.path, "AutoSense failed\n");
1677 error = EIO; /* we have to kill the command */
1678 break;
1679 case CAM_ATA_STATUS_ERROR:
1680 if (bootverbose && printed == 0) {
1681 xpt_print(ccb->ccb_h.path, "ATA status error\n");
1682 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1683 printed++;
1684 }
1685 /* FALLTHROUGH */
1686 case CAM_REQ_CMP_ERR:
1687 if (bootverbose && printed == 0) {
1688 xpt_print(ccb->ccb_h.path,
1689 "Request completed with CAM_REQ_CMP_ERR\n");
1690 printed++;
1691 }
1692 /* FALLTHROUGH */
1693 case CAM_CMD_TIMEOUT:
1694 if (bootverbose && printed == 0) {
1695 xpt_print(ccb->ccb_h.path, "Command timed out\n");
1696 printed++;
1697 }
1698 /* FALLTHROUGH */
1699 case CAM_UNEXP_BUSFREE:
1700 if (bootverbose && printed == 0) {
1701 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n");
1702 printed++;
1703 }
1704 /* FALLTHROUGH */
1705 case CAM_UNCOR_PARITY:
1706 if (bootverbose && printed == 0) {
1707 xpt_print(ccb->ccb_h.path,
1708 "Uncorrected parity error\n");
1709 printed++;
1710 }
1711 /* FALLTHROUGH */
1712 case CAM_DATA_RUN_ERR:
1713 if (bootverbose && printed == 0) {
1714 xpt_print(ccb->ccb_h.path, "Data overrun\n");
1715 printed++;
1716 }
1717 error = EIO; /* we have to kill the command */
1718 /* decrement the number of retries */
1719 if (ccb->ccb_h.retry_count > 0) {
1720 ccb->ccb_h.retry_count--;
1721 error = ERESTART;
1722 } else {
1723 action_string = "Retries exhausted";
1724 error = EIO;
1725 }
1726 break;
1727 case CAM_UA_ABORT:
1728 case CAM_UA_TERMIO:
1729 case CAM_MSG_REJECT_REC:
1730 /* XXX Don't know that these are correct */
1731 error = EIO;
1732 break;
1733 case CAM_SEL_TIMEOUT:
1734 {
1735 struct cam_path *newpath;
1736
1737 if ((camflags & CAM_RETRY_SELTO) != 0) {
1738 if (ccb->ccb_h.retry_count > 0) {
1739
1740 ccb->ccb_h.retry_count--;
1741 error = ERESTART;
1742 if (bootverbose && printed == 0) {
1743 xpt_print(ccb->ccb_h.path,
1744 "Selection timeout\n");
1745 printed++;
1746 }
1747
1748 /*
1749 * Wait a bit to give the device
1750 * time to recover before we try again.
1751 */
1752 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1753 timeout = periph_selto_delay;
1754 break;
1755 }
1756 }
1757 error = ENXIO;
1758 /* Should we do more if we can't create the path?? */
1759 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1760 xpt_path_path_id(ccb->ccb_h.path),
1761 xpt_path_target_id(ccb->ccb_h.path),
1762 CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1763 break;
1764
1765 /*
1766 * Let peripheral drivers know that this device has gone
1767 * away.
1768 */
1769 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1770 xpt_free_path(newpath);
1771 break;
1772 }
1773 case CAM_REQ_INVALID:
1774 case CAM_PATH_INVALID:
1775 case CAM_DEV_NOT_THERE:
1776 case CAM_NO_HBA:
1777 case CAM_PROVIDE_FAIL:
1778 case CAM_REQ_TOO_BIG:
1779 case CAM_LUN_INVALID:
1780 case CAM_TID_INVALID:
1781 error = EINVAL;
1782 break;
1783 case CAM_SCSI_BUS_RESET:
1784 case CAM_BDR_SENT:
1785 /*
1786 * Commands that repeatedly timeout and cause these
1787 * kinds of error recovery actions, should return
1788 * CAM_CMD_TIMEOUT, which allows us to safely assume
1789 * that this command was an innocent bystander to
1790 * these events and should be unconditionally
1791 * retried.
1792 */
1793 if (bootverbose && printed == 0) {
1794 xpt_print_path(ccb->ccb_h.path);
1795 if (status == CAM_BDR_SENT)
1796 printf("Bus Device Reset sent\n");
1797 else
1798 printf("Bus Reset issued\n");
1799 printed++;
1800 }
1801 /* FALLTHROUGH */
1802 case CAM_REQUEUE_REQ:
1803 /* Unconditional requeue */
1804 error = ERESTART;
1805 if (bootverbose && printed == 0) {
1806 xpt_print(ccb->ccb_h.path, "Request requeued\n");
1807 printed++;
1808 }
1809 break;
1810 case CAM_RESRC_UNAVAIL:
1811 /* Wait a bit for the resource shortage to abate. */
1812 timeout = periph_noresrc_delay;
1813 /* FALLTHROUGH */
1814 case CAM_BUSY:
1815 if (timeout == 0) {
1816 /* Wait a bit for the busy condition to abate. */
1817 timeout = periph_busy_delay;
1818 }
1819 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1820 /* FALLTHROUGH */
1821 default:
1822 /* decrement the number of retries */
1823 if (ccb->ccb_h.retry_count > 0) {
1824 ccb->ccb_h.retry_count--;
1825 error = ERESTART;
1826 if (bootverbose && printed == 0) {
1827 xpt_print(ccb->ccb_h.path, "CAM status 0x%x\n",
1828 status);
1829 printed++;
1830 }
1831 } else {
1832 error = EIO;
1833 action_string = "Retries exhausted";
1834 }
1835 break;
1836 }
1837
1838 /*
1839 * If we have and error and are booting verbosely, whine
1840 * *unless* this was a non-retryable selection timeout.
1841 */
1842 if (error != 0 && bootverbose &&
1843 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) {
1844 if (error != ERESTART) {
1845 if (action_string == NULL)
1846 action_string = "Unretryable error";
1847 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
1848 error, action_string);
1849 } else if (action_string != NULL)
1850 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1851 else
1852 xpt_print(ccb->ccb_h.path, "Retrying command\n");
1853 }
1854
1855 /* Attempt a retry */
1856 if (error == ERESTART || error == 0) {
1857 if (frozen != 0)
1858 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1859 if (error == ERESTART)
1860 xpt_action(ccb);
1861 if (frozen != 0)
1862 cam_release_devq(ccb->ccb_h.path,
1863 relsim_flags,
1864 openings,
1865 timeout,
1866 /*getcount_only*/0);
1867 }
1868
1869 return (error);
1870 }
Cache object: 8d69dd74b92c92e3c4cb782f210cfb9b
|