FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_periph.c
1 /*-
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/11.2/sys/cam/cam_periph.c 328680 2018-02-01 16:35:40Z mav $");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/bio.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/buf.h>
42 #include <sys/proc.h>
43 #include <sys/devicestat.h>
44 #include <sys/bus.h>
45 #include <sys/sbuf.h>
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48
49 #include <cam/cam.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_queue.h>
52 #include <cam/cam_xpt_periph.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_debug.h>
55 #include <cam/cam_sim.h>
56
57 #include <cam/scsi/scsi_all.h>
58 #include <cam/scsi/scsi_message.h>
59 #include <cam/scsi/scsi_pass.h>
60
61 static u_int camperiphnextunit(struct periph_driver *p_drv,
62 u_int newunit, int wired,
63 path_id_t pathid, target_id_t target,
64 lun_id_t lun);
65 static u_int camperiphunit(struct periph_driver *p_drv,
66 path_id_t pathid, target_id_t target,
67 lun_id_t lun);
68 static void camperiphdone(struct cam_periph *periph,
69 union ccb *done_ccb);
70 static void camperiphfree(struct cam_periph *periph);
71 static int camperiphscsistatuserror(union ccb *ccb,
72 union ccb **orig_ccb,
73 cam_flags camflags,
74 u_int32_t sense_flags,
75 int *openings,
76 u_int32_t *relsim_flags,
77 u_int32_t *timeout,
78 u_int32_t *action,
79 const char **action_string);
80 static int camperiphscsisenseerror(union ccb *ccb,
81 union ccb **orig_ccb,
82 cam_flags camflags,
83 u_int32_t sense_flags,
84 int *openings,
85 u_int32_t *relsim_flags,
86 u_int32_t *timeout,
87 u_int32_t *action,
88 const char **action_string);
89 static void cam_periph_devctl_notify(union ccb *ccb);
90
91 static int nperiph_drivers;
92 static int initialized = 0;
93 struct periph_driver **periph_drivers;
94
95 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
96
97 static int periph_selto_delay = 1000;
98 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
99 static int periph_noresrc_delay = 500;
100 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
101 static int periph_busy_delay = 500;
102 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
103
104
105 void
106 periphdriver_register(void *data)
107 {
108 struct periph_driver *drv = (struct periph_driver *)data;
109 struct periph_driver **newdrivers, **old;
110 int ndrivers;
111
112 again:
113 ndrivers = nperiph_drivers + 2;
114 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
115 M_WAITOK);
116 xpt_lock_buses();
117 if (ndrivers != nperiph_drivers + 2) {
118 /*
119 * Lost race against itself; go around.
120 */
121 xpt_unlock_buses();
122 free(newdrivers, M_CAMPERIPH);
123 goto again;
124 }
125 if (periph_drivers)
126 bcopy(periph_drivers, newdrivers,
127 sizeof(*newdrivers) * nperiph_drivers);
128 newdrivers[nperiph_drivers] = drv;
129 newdrivers[nperiph_drivers + 1] = NULL;
130 old = periph_drivers;
131 periph_drivers = newdrivers;
132 nperiph_drivers++;
133 xpt_unlock_buses();
134 if (old)
135 free(old, M_CAMPERIPH);
136 /* If driver marked as early or it is late now, initialize it. */
137 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
138 initialized > 1)
139 (*drv->init)();
140 }
141
142 int
143 periphdriver_unregister(void *data)
144 {
145 struct periph_driver *drv = (struct periph_driver *)data;
146 int error, n;
147
148 /* If driver marked as early or it is late now, deinitialize it. */
149 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
150 initialized > 1) {
151 if (drv->deinit == NULL) {
152 printf("CAM periph driver '%s' doesn't have deinit.\n",
153 drv->driver_name);
154 return (EOPNOTSUPP);
155 }
156 error = drv->deinit();
157 if (error != 0)
158 return (error);
159 }
160
161 xpt_lock_buses();
162 for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
163 ;
164 KASSERT(n < nperiph_drivers,
165 ("Periph driver '%s' was not registered", drv->driver_name));
166 for (; n + 1 < nperiph_drivers; n++)
167 periph_drivers[n] = periph_drivers[n + 1];
168 periph_drivers[n + 1] = NULL;
169 nperiph_drivers--;
170 xpt_unlock_buses();
171 return (0);
172 }
173
174 void
175 periphdriver_init(int level)
176 {
177 int i, early;
178
179 initialized = max(initialized, level);
180 for (i = 0; periph_drivers[i] != NULL; i++) {
181 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
182 if (early == initialized)
183 (*periph_drivers[i]->init)();
184 }
185 }
186
187 cam_status
188 cam_periph_alloc(periph_ctor_t *periph_ctor,
189 periph_oninv_t *periph_oninvalidate,
190 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
191 char *name, cam_periph_type type, struct cam_path *path,
192 ac_callback_t *ac_callback, ac_code code, void *arg)
193 {
194 struct periph_driver **p_drv;
195 struct cam_sim *sim;
196 struct cam_periph *periph;
197 struct cam_periph *cur_periph;
198 path_id_t path_id;
199 target_id_t target_id;
200 lun_id_t lun_id;
201 cam_status status;
202 u_int init_level;
203
204 init_level = 0;
205 /*
206 * Handle Hot-Plug scenarios. If there is already a peripheral
207 * of our type assigned to this path, we are likely waiting for
208 * final close on an old, invalidated, peripheral. If this is
209 * the case, queue up a deferred call to the peripheral's async
210 * handler. If it looks like a mistaken re-allocation, complain.
211 */
212 if ((periph = cam_periph_find(path, name)) != NULL) {
213
214 if ((periph->flags & CAM_PERIPH_INVALID) != 0
215 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
216 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
217 periph->deferred_callback = ac_callback;
218 periph->deferred_ac = code;
219 return (CAM_REQ_INPROG);
220 } else {
221 printf("cam_periph_alloc: attempt to re-allocate "
222 "valid device %s%d rejected flags %#x "
223 "refcount %d\n", periph->periph_name,
224 periph->unit_number, periph->flags,
225 periph->refcount);
226 }
227 return (CAM_REQ_INVALID);
228 }
229
230 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
231 M_NOWAIT|M_ZERO);
232
233 if (periph == NULL)
234 return (CAM_RESRC_UNAVAIL);
235
236 init_level++;
237
238
239 sim = xpt_path_sim(path);
240 path_id = xpt_path_path_id(path);
241 target_id = xpt_path_target_id(path);
242 lun_id = xpt_path_lun_id(path);
243 periph->periph_start = periph_start;
244 periph->periph_dtor = periph_dtor;
245 periph->periph_oninval = periph_oninvalidate;
246 periph->type = type;
247 periph->periph_name = name;
248 periph->scheduled_priority = CAM_PRIORITY_NONE;
249 periph->immediate_priority = CAM_PRIORITY_NONE;
250 periph->refcount = 1; /* Dropped by invalidation. */
251 periph->sim = sim;
252 SLIST_INIT(&periph->ccb_list);
253 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
254 if (status != CAM_REQ_CMP)
255 goto failure;
256 periph->path = path;
257
258 xpt_lock_buses();
259 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
260 if (strcmp((*p_drv)->driver_name, name) == 0)
261 break;
262 }
263 if (*p_drv == NULL) {
264 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
265 xpt_unlock_buses();
266 xpt_free_path(periph->path);
267 free(periph, M_CAMPERIPH);
268 return (CAM_REQ_INVALID);
269 }
270 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
271 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
272 while (cur_periph != NULL
273 && cur_periph->unit_number < periph->unit_number)
274 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
275 if (cur_periph != NULL) {
276 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
277 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
278 } else {
279 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
280 (*p_drv)->generation++;
281 }
282 xpt_unlock_buses();
283
284 init_level++;
285
286 status = xpt_add_periph(periph);
287 if (status != CAM_REQ_CMP)
288 goto failure;
289
290 init_level++;
291 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
292
293 status = periph_ctor(periph, arg);
294
295 if (status == CAM_REQ_CMP)
296 init_level++;
297
298 failure:
299 switch (init_level) {
300 case 4:
301 /* Initialized successfully */
302 break;
303 case 3:
304 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
305 xpt_remove_periph(periph);
306 /* FALLTHROUGH */
307 case 2:
308 xpt_lock_buses();
309 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
310 xpt_unlock_buses();
311 xpt_free_path(periph->path);
312 /* FALLTHROUGH */
313 case 1:
314 free(periph, M_CAMPERIPH);
315 /* FALLTHROUGH */
316 case 0:
317 /* No cleanup to perform. */
318 break;
319 default:
320 panic("%s: Unknown init level", __func__);
321 }
322 return(status);
323 }
324
325 /*
326 * Find a peripheral structure with the specified path, target, lun,
327 * and (optionally) type. If the name is NULL, this function will return
328 * the first peripheral driver that matches the specified path.
329 */
330 struct cam_periph *
331 cam_periph_find(struct cam_path *path, char *name)
332 {
333 struct periph_driver **p_drv;
334 struct cam_periph *periph;
335
336 xpt_lock_buses();
337 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
338
339 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
340 continue;
341
342 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
343 if (xpt_path_comp(periph->path, path) == 0) {
344 xpt_unlock_buses();
345 cam_periph_assert(periph, MA_OWNED);
346 return(periph);
347 }
348 }
349 if (name != NULL) {
350 xpt_unlock_buses();
351 return(NULL);
352 }
353 }
354 xpt_unlock_buses();
355 return(NULL);
356 }
357
358 /*
359 * Find peripheral driver instances attached to the specified path.
360 */
361 int
362 cam_periph_list(struct cam_path *path, struct sbuf *sb)
363 {
364 struct sbuf local_sb;
365 struct periph_driver **p_drv;
366 struct cam_periph *periph;
367 int count;
368 int sbuf_alloc_len;
369
370 sbuf_alloc_len = 16;
371 retry:
372 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
373 count = 0;
374 xpt_lock_buses();
375 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
376
377 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
378 if (xpt_path_comp(periph->path, path) != 0)
379 continue;
380
381 if (sbuf_len(&local_sb) != 0)
382 sbuf_cat(&local_sb, ",");
383
384 sbuf_printf(&local_sb, "%s%d", periph->periph_name,
385 periph->unit_number);
386
387 if (sbuf_error(&local_sb) == ENOMEM) {
388 sbuf_alloc_len *= 2;
389 xpt_unlock_buses();
390 sbuf_delete(&local_sb);
391 goto retry;
392 }
393 count++;
394 }
395 }
396 xpt_unlock_buses();
397 sbuf_finish(&local_sb);
398 sbuf_cpy(sb, sbuf_data(&local_sb));
399 sbuf_delete(&local_sb);
400 return (count);
401 }
402
403 cam_status
404 cam_periph_acquire(struct cam_periph *periph)
405 {
406 cam_status status;
407
408 status = CAM_REQ_CMP_ERR;
409 if (periph == NULL)
410 return (status);
411
412 xpt_lock_buses();
413 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
414 periph->refcount++;
415 status = CAM_REQ_CMP;
416 }
417 xpt_unlock_buses();
418
419 return (status);
420 }
421
422 void
423 cam_periph_doacquire(struct cam_periph *periph)
424 {
425
426 xpt_lock_buses();
427 KASSERT(periph->refcount >= 1,
428 ("cam_periph_doacquire() with refcount == %d", periph->refcount));
429 periph->refcount++;
430 xpt_unlock_buses();
431 }
432
433 void
434 cam_periph_release_locked_buses(struct cam_periph *periph)
435 {
436
437 cam_periph_assert(periph, MA_OWNED);
438 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
439 if (--periph->refcount == 0)
440 camperiphfree(periph);
441 }
442
443 void
444 cam_periph_release_locked(struct cam_periph *periph)
445 {
446
447 if (periph == NULL)
448 return;
449
450 xpt_lock_buses();
451 cam_periph_release_locked_buses(periph);
452 xpt_unlock_buses();
453 }
454
455 void
456 cam_periph_release(struct cam_periph *periph)
457 {
458 struct mtx *mtx;
459
460 if (periph == NULL)
461 return;
462
463 cam_periph_assert(periph, MA_NOTOWNED);
464 mtx = cam_periph_mtx(periph);
465 mtx_lock(mtx);
466 cam_periph_release_locked(periph);
467 mtx_unlock(mtx);
468 }
469
470 int
471 cam_periph_hold(struct cam_periph *periph, int priority)
472 {
473 int error;
474
475 /*
476 * Increment the reference count on the peripheral
477 * while we wait for our lock attempt to succeed
478 * to ensure the peripheral doesn't disappear out
479 * from user us while we sleep.
480 */
481
482 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
483 return (ENXIO);
484
485 cam_periph_assert(periph, MA_OWNED);
486 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
487 periph->flags |= CAM_PERIPH_LOCK_WANTED;
488 if ((error = cam_periph_sleep(periph, periph, priority,
489 "caplck", 0)) != 0) {
490 cam_periph_release_locked(periph);
491 return (error);
492 }
493 if (periph->flags & CAM_PERIPH_INVALID) {
494 cam_periph_release_locked(periph);
495 return (ENXIO);
496 }
497 }
498
499 periph->flags |= CAM_PERIPH_LOCKED;
500 return (0);
501 }
502
503 void
504 cam_periph_unhold(struct cam_periph *periph)
505 {
506
507 cam_periph_assert(periph, MA_OWNED);
508
509 periph->flags &= ~CAM_PERIPH_LOCKED;
510 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
511 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
512 wakeup(periph);
513 }
514
515 cam_periph_release_locked(periph);
516 }
517
518 /*
519 * Look for the next unit number that is not currently in use for this
520 * peripheral type starting at "newunit". Also exclude unit numbers that
521 * are reserved by for future "hardwiring" unless we already know that this
522 * is a potential wired device. Only assume that the device is "wired" the
523 * first time through the loop since after that we'll be looking at unit
524 * numbers that did not match a wiring entry.
525 */
526 static u_int
527 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
528 path_id_t pathid, target_id_t target, lun_id_t lun)
529 {
530 struct cam_periph *periph;
531 char *periph_name;
532 int i, val, dunit, r;
533 const char *dname, *strval;
534
535 periph_name = p_drv->driver_name;
536 for (;;newunit++) {
537
538 for (periph = TAILQ_FIRST(&p_drv->units);
539 periph != NULL && periph->unit_number != newunit;
540 periph = TAILQ_NEXT(periph, unit_links))
541 ;
542
543 if (periph != NULL && periph->unit_number == newunit) {
544 if (wired != 0) {
545 xpt_print(periph->path, "Duplicate Wired "
546 "Device entry!\n");
547 xpt_print(periph->path, "Second device (%s "
548 "device at scbus%d target %d lun %d) will "
549 "not be wired\n", periph_name, pathid,
550 target, lun);
551 wired = 0;
552 }
553 continue;
554 }
555 if (wired)
556 break;
557
558 /*
559 * Don't match entries like "da 4" as a wired down
560 * device, but do match entries like "da 4 target 5"
561 * or even "da 4 scbus 1".
562 */
563 i = 0;
564 dname = periph_name;
565 for (;;) {
566 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
567 if (r != 0)
568 break;
569 /* if no "target" and no specific scbus, skip */
570 if (resource_int_value(dname, dunit, "target", &val) &&
571 (resource_string_value(dname, dunit, "at",&strval)||
572 strcmp(strval, "scbus") == 0))
573 continue;
574 if (newunit == dunit)
575 break;
576 }
577 if (r != 0)
578 break;
579 }
580 return (newunit);
581 }
582
583 static u_int
584 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
585 target_id_t target, lun_id_t lun)
586 {
587 u_int unit;
588 int wired, i, val, dunit;
589 const char *dname, *strval;
590 char pathbuf[32], *periph_name;
591
592 periph_name = p_drv->driver_name;
593 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
594 unit = 0;
595 i = 0;
596 dname = periph_name;
597 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
598 wired = 0) {
599 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
600 if (strcmp(strval, pathbuf) != 0)
601 continue;
602 wired++;
603 }
604 if (resource_int_value(dname, dunit, "target", &val) == 0) {
605 if (val != target)
606 continue;
607 wired++;
608 }
609 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
610 if (val != lun)
611 continue;
612 wired++;
613 }
614 if (wired != 0) {
615 unit = dunit;
616 break;
617 }
618 }
619
620 /*
621 * Either start from 0 looking for the next unit or from
622 * the unit number given in the resource config. This way,
623 * if we have wildcard matches, we don't return the same
624 * unit number twice.
625 */
626 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
627
628 return (unit);
629 }
630
631 void
632 cam_periph_invalidate(struct cam_periph *periph)
633 {
634
635 cam_periph_assert(periph, MA_OWNED);
636 /*
637 * We only call this routine the first time a peripheral is
638 * invalidated.
639 */
640 if ((periph->flags & CAM_PERIPH_INVALID) != 0)
641 return;
642
643 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
644 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
645 xpt_denounce_periph(periph);
646 periph->flags |= CAM_PERIPH_INVALID;
647 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
648 if (periph->periph_oninval != NULL)
649 periph->periph_oninval(periph);
650 cam_periph_release_locked(periph);
651 }
652
653 static void
654 camperiphfree(struct cam_periph *periph)
655 {
656 struct periph_driver **p_drv;
657 struct periph_driver *drv;
658
659 cam_periph_assert(periph, MA_OWNED);
660 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
661 periph->periph_name, periph->unit_number));
662 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
663 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
664 break;
665 }
666 if (*p_drv == NULL) {
667 printf("camperiphfree: attempt to free non-existant periph\n");
668 return;
669 }
670 /*
671 * Cache a pointer to the periph_driver structure. If a
672 * periph_driver is added or removed from the array (see
673 * periphdriver_register()) while we drop the toplogy lock
674 * below, p_drv may change. This doesn't protect against this
675 * particular periph_driver going away. That will require full
676 * reference counting in the periph_driver infrastructure.
677 */
678 drv = *p_drv;
679
680 /*
681 * We need to set this flag before dropping the topology lock, to
682 * let anyone who is traversing the list that this peripheral is
683 * about to be freed, and there will be no more reference count
684 * checks.
685 */
686 periph->flags |= CAM_PERIPH_FREE;
687
688 /*
689 * The peripheral destructor semantics dictate calling with only the
690 * SIM mutex held. Since it might sleep, it should not be called
691 * with the topology lock held.
692 */
693 xpt_unlock_buses();
694
695 /*
696 * We need to call the peripheral destructor prior to removing the
697 * peripheral from the list. Otherwise, we risk running into a
698 * scenario where the peripheral unit number may get reused
699 * (because it has been removed from the list), but some resources
700 * used by the peripheral are still hanging around. In particular,
701 * the devfs nodes used by some peripherals like the pass(4) driver
702 * aren't fully cleaned up until the destructor is run. If the
703 * unit number is reused before the devfs instance is fully gone,
704 * devfs will panic.
705 */
706 if (periph->periph_dtor != NULL)
707 periph->periph_dtor(periph);
708
709 /*
710 * The peripheral list is protected by the topology lock.
711 */
712 xpt_lock_buses();
713
714 TAILQ_REMOVE(&drv->units, periph, unit_links);
715 drv->generation++;
716
717 xpt_remove_periph(periph);
718
719 xpt_unlock_buses();
720 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
721 xpt_print(periph->path, "Periph destroyed\n");
722 else
723 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
724
725 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
726 union ccb ccb;
727 void *arg;
728
729 switch (periph->deferred_ac) {
730 case AC_FOUND_DEVICE:
731 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
732 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
733 xpt_action(&ccb);
734 arg = &ccb;
735 break;
736 case AC_PATH_REGISTERED:
737 ccb.ccb_h.func_code = XPT_PATH_INQ;
738 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
739 xpt_action(&ccb);
740 arg = &ccb;
741 break;
742 default:
743 arg = NULL;
744 break;
745 }
746 periph->deferred_callback(NULL, periph->deferred_ac,
747 periph->path, arg);
748 }
749 xpt_free_path(periph->path);
750 free(periph, M_CAMPERIPH);
751 xpt_lock_buses();
752 }
753
754 /*
755 * Map user virtual pointers into kernel virtual address space, so we can
756 * access the memory. This is now a generic function that centralizes most
757 * of the sanity checks on the data flags, if any.
758 * This also only works for up to MAXPHYS memory. Since we use
759 * buffers to map stuff in and out, we're limited to the buffer size.
760 */
761 int
762 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
763 u_int maxmap)
764 {
765 int numbufs, i, j;
766 int flags[CAM_PERIPH_MAXMAPS];
767 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
768 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
769 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
770
771 if (maxmap == 0)
772 maxmap = DFLTPHYS; /* traditional default */
773 else if (maxmap > MAXPHYS)
774 maxmap = MAXPHYS; /* for safety */
775 switch(ccb->ccb_h.func_code) {
776 case XPT_DEV_MATCH:
777 if (ccb->cdm.match_buf_len == 0) {
778 printf("cam_periph_mapmem: invalid match buffer "
779 "length 0\n");
780 return(EINVAL);
781 }
782 if (ccb->cdm.pattern_buf_len > 0) {
783 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
784 lengths[0] = ccb->cdm.pattern_buf_len;
785 dirs[0] = CAM_DIR_OUT;
786 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
787 lengths[1] = ccb->cdm.match_buf_len;
788 dirs[1] = CAM_DIR_IN;
789 numbufs = 2;
790 } else {
791 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
792 lengths[0] = ccb->cdm.match_buf_len;
793 dirs[0] = CAM_DIR_IN;
794 numbufs = 1;
795 }
796 /*
797 * This request will not go to the hardware, no reason
798 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
799 */
800 maxmap = MAXPHYS;
801 break;
802 case XPT_SCSI_IO:
803 case XPT_CONT_TARGET_IO:
804 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
805 return(0);
806 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
807 return (EINVAL);
808 data_ptrs[0] = &ccb->csio.data_ptr;
809 lengths[0] = ccb->csio.dxfer_len;
810 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
811 numbufs = 1;
812 break;
813 case XPT_ATA_IO:
814 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
815 return(0);
816 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
817 return (EINVAL);
818 data_ptrs[0] = &ccb->ataio.data_ptr;
819 lengths[0] = ccb->ataio.dxfer_len;
820 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
821 numbufs = 1;
822 break;
823 case XPT_SMP_IO:
824 data_ptrs[0] = &ccb->smpio.smp_request;
825 lengths[0] = ccb->smpio.smp_request_len;
826 dirs[0] = CAM_DIR_OUT;
827 data_ptrs[1] = &ccb->smpio.smp_response;
828 lengths[1] = ccb->smpio.smp_response_len;
829 dirs[1] = CAM_DIR_IN;
830 numbufs = 2;
831 break;
832 case XPT_NVME_IO:
833 case XPT_NVME_ADMIN:
834 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
835 return (0);
836 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
837 return (EINVAL);
838 data_ptrs[0] = &ccb->nvmeio.data_ptr;
839 lengths[0] = ccb->nvmeio.dxfer_len;
840 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
841 numbufs = 1;
842 break;
843 case XPT_DEV_ADVINFO:
844 if (ccb->cdai.bufsiz == 0)
845 return (0);
846
847 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
848 lengths[0] = ccb->cdai.bufsiz;
849 dirs[0] = CAM_DIR_IN;
850 numbufs = 1;
851
852 /*
853 * This request will not go to the hardware, no reason
854 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
855 */
856 maxmap = MAXPHYS;
857 break;
858 default:
859 return(EINVAL);
860 break; /* NOTREACHED */
861 }
862
863 /*
864 * Check the transfer length and permissions first, so we don't
865 * have to unmap any previously mapped buffers.
866 */
867 for (i = 0; i < numbufs; i++) {
868
869 flags[i] = 0;
870
871 /*
872 * The userland data pointer passed in may not be page
873 * aligned. vmapbuf() truncates the address to a page
874 * boundary, so if the address isn't page aligned, we'll
875 * need enough space for the given transfer length, plus
876 * whatever extra space is necessary to make it to the page
877 * boundary.
878 */
879 if ((lengths[i] +
880 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
881 printf("cam_periph_mapmem: attempt to map %lu bytes, "
882 "which is greater than %lu\n",
883 (long)(lengths[i] +
884 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
885 (u_long)maxmap);
886 return(E2BIG);
887 }
888
889 if (dirs[i] & CAM_DIR_OUT) {
890 flags[i] = BIO_WRITE;
891 }
892
893 if (dirs[i] & CAM_DIR_IN) {
894 flags[i] = BIO_READ;
895 }
896
897 }
898
899 /*
900 * This keeps the kernel stack of current thread from getting
901 * swapped. In low-memory situations where the kernel stack might
902 * otherwise get swapped out, this holds it and allows the thread
903 * to make progress and release the kernel mapped pages sooner.
904 *
905 * XXX KDM should I use P_NOSWAP instead?
906 */
907 PHOLD(curproc);
908
909 for (i = 0; i < numbufs; i++) {
910 /*
911 * Get the buffer.
912 */
913 mapinfo->bp[i] = getpbuf(NULL);
914
915 /* put our pointer in the data slot */
916 mapinfo->bp[i]->b_data = *data_ptrs[i];
917
918 /* save the user's data address */
919 mapinfo->bp[i]->b_caller1 = *data_ptrs[i];
920
921 /* set the transfer length, we know it's < MAXPHYS */
922 mapinfo->bp[i]->b_bufsize = lengths[i];
923
924 /* set the direction */
925 mapinfo->bp[i]->b_iocmd = flags[i];
926
927 /*
928 * Map the buffer into kernel memory.
929 *
930 * Note that useracc() alone is not a sufficient test.
931 * vmapbuf() can still fail due to a smaller file mapped
932 * into a larger area of VM, or if userland races against
933 * vmapbuf() after the useracc() check.
934 */
935 if (vmapbuf(mapinfo->bp[i], 1) < 0) {
936 for (j = 0; j < i; ++j) {
937 *data_ptrs[j] = mapinfo->bp[j]->b_caller1;
938 vunmapbuf(mapinfo->bp[j]);
939 relpbuf(mapinfo->bp[j], NULL);
940 }
941 relpbuf(mapinfo->bp[i], NULL);
942 PRELE(curproc);
943 return(EACCES);
944 }
945
946 /* set our pointer to the new mapped area */
947 *data_ptrs[i] = mapinfo->bp[i]->b_data;
948
949 mapinfo->num_bufs_used++;
950 }
951
952 /*
953 * Now that we've gotten this far, change ownership to the kernel
954 * of the buffers so that we don't run afoul of returning to user
955 * space with locks (on the buffer) held.
956 */
957 for (i = 0; i < numbufs; i++) {
958 BUF_KERNPROC(mapinfo->bp[i]);
959 }
960
961
962 return(0);
963 }
964
965 /*
966 * Unmap memory segments mapped into kernel virtual address space by
967 * cam_periph_mapmem().
968 */
969 void
970 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
971 {
972 int numbufs, i;
973 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
974
975 if (mapinfo->num_bufs_used <= 0) {
976 /* nothing to free and the process wasn't held. */
977 return;
978 }
979
980 switch (ccb->ccb_h.func_code) {
981 case XPT_DEV_MATCH:
982 numbufs = min(mapinfo->num_bufs_used, 2);
983
984 if (numbufs == 1) {
985 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
986 } else {
987 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
988 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
989 }
990 break;
991 case XPT_SCSI_IO:
992 case XPT_CONT_TARGET_IO:
993 data_ptrs[0] = &ccb->csio.data_ptr;
994 numbufs = min(mapinfo->num_bufs_used, 1);
995 break;
996 case XPT_ATA_IO:
997 data_ptrs[0] = &ccb->ataio.data_ptr;
998 numbufs = min(mapinfo->num_bufs_used, 1);
999 break;
1000 case XPT_SMP_IO:
1001 numbufs = min(mapinfo->num_bufs_used, 2);
1002 data_ptrs[0] = &ccb->smpio.smp_request;
1003 data_ptrs[1] = &ccb->smpio.smp_response;
1004 break;
1005 case XPT_DEV_ADVINFO:
1006 numbufs = min(mapinfo->num_bufs_used, 1);
1007 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1008 break;
1009 case XPT_NVME_IO:
1010 case XPT_NVME_ADMIN:
1011 data_ptrs[0] = &ccb->nvmeio.data_ptr;
1012 numbufs = min(mapinfo->num_bufs_used, 1);
1013 break;
1014 default:
1015 /* allow ourselves to be swapped once again */
1016 PRELE(curproc);
1017 return;
1018 break; /* NOTREACHED */
1019 }
1020
1021 for (i = 0; i < numbufs; i++) {
1022 /* Set the user's pointer back to the original value */
1023 *data_ptrs[i] = mapinfo->bp[i]->b_caller1;
1024
1025 /* unmap the buffer */
1026 vunmapbuf(mapinfo->bp[i]);
1027
1028 /* release the buffer */
1029 relpbuf(mapinfo->bp[i], NULL);
1030 }
1031
1032 /* allow ourselves to be swapped once again */
1033 PRELE(curproc);
1034 }
1035
1036 int
1037 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1038 int (*error_routine)(union ccb *ccb,
1039 cam_flags camflags,
1040 u_int32_t sense_flags))
1041 {
1042 union ccb *ccb;
1043 int error;
1044 int found;
1045
1046 error = found = 0;
1047
1048 switch(cmd){
1049 case CAMGETPASSTHRU:
1050 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1051 xpt_setup_ccb(&ccb->ccb_h,
1052 ccb->ccb_h.path,
1053 CAM_PRIORITY_NORMAL);
1054 ccb->ccb_h.func_code = XPT_GDEVLIST;
1055
1056 /*
1057 * Basically, the point of this is that we go through
1058 * getting the list of devices, until we find a passthrough
1059 * device. In the current version of the CAM code, the
1060 * only way to determine what type of device we're dealing
1061 * with is by its name.
1062 */
1063 while (found == 0) {
1064 ccb->cgdl.index = 0;
1065 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1066 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1067
1068 /* we want the next device in the list */
1069 xpt_action(ccb);
1070 if (strncmp(ccb->cgdl.periph_name,
1071 "pass", 4) == 0){
1072 found = 1;
1073 break;
1074 }
1075 }
1076 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1077 (found == 0)) {
1078 ccb->cgdl.periph_name[0] = '\0';
1079 ccb->cgdl.unit_number = 0;
1080 break;
1081 }
1082 }
1083
1084 /* copy the result back out */
1085 bcopy(ccb, addr, sizeof(union ccb));
1086
1087 /* and release the ccb */
1088 xpt_release_ccb(ccb);
1089
1090 break;
1091 default:
1092 error = ENOTTY;
1093 break;
1094 }
1095 return(error);
1096 }
1097
1098 static void
1099 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1100 {
1101
1102 panic("%s: already done with ccb %p", __func__, done_ccb);
1103 }
1104
1105 static void
1106 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1107 {
1108
1109 /* Caller will release the CCB */
1110 xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1111 done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
1112 wakeup(&done_ccb->ccb_h.cbfcnp);
1113 }
1114
1115 static void
1116 cam_periph_ccbwait(union ccb *ccb)
1117 {
1118
1119 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1120 while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
1121 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
1122 PRIBIO, "cbwait", 0);
1123 }
1124 KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1125 (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
1126 ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1127 "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1128 ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
1129 }
1130
1131 int
1132 cam_periph_runccb(union ccb *ccb,
1133 int (*error_routine)(union ccb *ccb,
1134 cam_flags camflags,
1135 u_int32_t sense_flags),
1136 cam_flags camflags, u_int32_t sense_flags,
1137 struct devstat *ds)
1138 {
1139 struct bintime *starttime;
1140 struct bintime ltime;
1141 int error;
1142
1143 starttime = NULL;
1144 xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1145 KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1146 ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1147 ccb->ccb_h.func_code, ccb->ccb_h.flags));
1148
1149 /*
1150 * If the user has supplied a stats structure, and if we understand
1151 * this particular type of ccb, record the transaction start.
1152 */
1153 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1154 ccb->ccb_h.func_code == XPT_ATA_IO)) {
1155 starttime = <ime;
1156 binuptime(starttime);
1157 devstat_start_transaction(ds, starttime);
1158 }
1159
1160 ccb->ccb_h.cbfcnp = cam_periph_done;
1161 xpt_action(ccb);
1162
1163 do {
1164 cam_periph_ccbwait(ccb);
1165 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1166 error = 0;
1167 else if (error_routine != NULL) {
1168 ccb->ccb_h.cbfcnp = cam_periph_done;
1169 error = (*error_routine)(ccb, camflags, sense_flags);
1170 } else
1171 error = 0;
1172
1173 } while (error == ERESTART);
1174
1175 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1176 cam_release_devq(ccb->ccb_h.path,
1177 /* relsim_flags */0,
1178 /* openings */0,
1179 /* timeout */0,
1180 /* getcount_only */ FALSE);
1181 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1182 }
1183
1184 if (ds != NULL) {
1185 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1186 devstat_end_transaction(ds,
1187 ccb->csio.dxfer_len - ccb->csio.resid,
1188 ccb->csio.tag_action & 0x3,
1189 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1190 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
1191 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1192 DEVSTAT_WRITE :
1193 DEVSTAT_READ, NULL, starttime);
1194 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1195 devstat_end_transaction(ds,
1196 ccb->ataio.dxfer_len - ccb->ataio.resid,
1197 0, /* Not used in ATA */
1198 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1199 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
1200 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1201 DEVSTAT_WRITE :
1202 DEVSTAT_READ, NULL, starttime);
1203 }
1204 }
1205
1206 return(error);
1207 }
1208
1209 void
1210 cam_freeze_devq(struct cam_path *path)
1211 {
1212 struct ccb_hdr ccb_h;
1213
1214 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1215 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1216 ccb_h.func_code = XPT_NOOP;
1217 ccb_h.flags = CAM_DEV_QFREEZE;
1218 xpt_action((union ccb *)&ccb_h);
1219 }
1220
1221 u_int32_t
1222 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1223 u_int32_t openings, u_int32_t arg,
1224 int getcount_only)
1225 {
1226 struct ccb_relsim crs;
1227
1228 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1229 relsim_flags, openings, arg, getcount_only));
1230 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1231 crs.ccb_h.func_code = XPT_REL_SIMQ;
1232 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1233 crs.release_flags = relsim_flags;
1234 crs.openings = openings;
1235 crs.release_timeout = arg;
1236 xpt_action((union ccb *)&crs);
1237 return (crs.qfrozen_cnt);
1238 }
1239
1240 #define saved_ccb_ptr ppriv_ptr0
1241 static void
1242 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1243 {
1244 union ccb *saved_ccb;
1245 cam_status status;
1246 struct scsi_start_stop_unit *scsi_cmd;
1247 int error_code, sense_key, asc, ascq;
1248
1249 scsi_cmd = (struct scsi_start_stop_unit *)
1250 &done_ccb->csio.cdb_io.cdb_bytes;
1251 status = done_ccb->ccb_h.status;
1252
1253 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1254 if (scsi_extract_sense_ccb(done_ccb,
1255 &error_code, &sense_key, &asc, &ascq)) {
1256 /*
1257 * If the error is "invalid field in CDB",
1258 * and the load/eject flag is set, turn the
1259 * flag off and try again. This is just in
1260 * case the drive in question barfs on the
1261 * load eject flag. The CAM code should set
1262 * the load/eject flag by default for
1263 * removable media.
1264 */
1265 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1266 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1267 (asc == 0x24) && (ascq == 0x00)) {
1268 scsi_cmd->how &= ~SSS_LOEJ;
1269 if (status & CAM_DEV_QFRZN) {
1270 cam_release_devq(done_ccb->ccb_h.path,
1271 0, 0, 0, 0);
1272 done_ccb->ccb_h.status &=
1273 ~CAM_DEV_QFRZN;
1274 }
1275 xpt_action(done_ccb);
1276 goto out;
1277 }
1278 }
1279 if (cam_periph_error(done_ccb,
1280 0, SF_RETRY_UA | SF_NO_PRINT, NULL) == ERESTART)
1281 goto out;
1282 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1283 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1284 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1285 }
1286 } else {
1287 /*
1288 * If we have successfully taken a device from the not
1289 * ready to ready state, re-scan the device and re-get
1290 * the inquiry information. Many devices (mostly disks)
1291 * don't properly report their inquiry information unless
1292 * they are spun up.
1293 */
1294 if (scsi_cmd->opcode == START_STOP_UNIT)
1295 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1296 }
1297
1298 /*
1299 * Perform the final retry with the original CCB so that final
1300 * error processing is performed by the owner of the CCB.
1301 */
1302 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1303 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1304 xpt_free_ccb(saved_ccb);
1305 if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1306 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1307 xpt_action(done_ccb);
1308
1309 out:
1310 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1311 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1312 }
1313
1314 /*
1315 * Generic Async Event handler. Peripheral drivers usually
1316 * filter out the events that require personal attention,
1317 * and leave the rest to this function.
1318 */
1319 void
1320 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1321 struct cam_path *path, void *arg)
1322 {
1323 switch (code) {
1324 case AC_LOST_DEVICE:
1325 cam_periph_invalidate(periph);
1326 break;
1327 default:
1328 break;
1329 }
1330 }
1331
1332 void
1333 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1334 {
1335 struct ccb_getdevstats cgds;
1336
1337 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1338 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1339 xpt_action((union ccb *)&cgds);
1340 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1341 }
1342
1343 void
1344 cam_periph_freeze_after_event(struct cam_periph *periph,
1345 struct timeval* event_time, u_int duration_ms)
1346 {
1347 struct timeval delta;
1348 struct timeval duration_tv;
1349
1350 if (!timevalisset(event_time))
1351 return;
1352
1353 microtime(&delta);
1354 timevalsub(&delta, event_time);
1355 duration_tv.tv_sec = duration_ms / 1000;
1356 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1357 if (timevalcmp(&delta, &duration_tv, <)) {
1358 timevalsub(&duration_tv, &delta);
1359
1360 duration_ms = duration_tv.tv_sec * 1000;
1361 duration_ms += duration_tv.tv_usec / 1000;
1362 cam_freeze_devq(periph->path);
1363 cam_release_devq(periph->path,
1364 RELSIM_RELEASE_AFTER_TIMEOUT,
1365 /*reduction*/0,
1366 /*timeout*/duration_ms,
1367 /*getcount_only*/0);
1368 }
1369
1370 }
1371
1372 static int
1373 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1374 cam_flags camflags, u_int32_t sense_flags,
1375 int *openings, u_int32_t *relsim_flags,
1376 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1377 {
1378 int error;
1379
1380 switch (ccb->csio.scsi_status) {
1381 case SCSI_STATUS_OK:
1382 case SCSI_STATUS_COND_MET:
1383 case SCSI_STATUS_INTERMED:
1384 case SCSI_STATUS_INTERMED_COND_MET:
1385 error = 0;
1386 break;
1387 case SCSI_STATUS_CMD_TERMINATED:
1388 case SCSI_STATUS_CHECK_COND:
1389 error = camperiphscsisenseerror(ccb, orig_ccb,
1390 camflags,
1391 sense_flags,
1392 openings,
1393 relsim_flags,
1394 timeout,
1395 action,
1396 action_string);
1397 break;
1398 case SCSI_STATUS_QUEUE_FULL:
1399 {
1400 /* no decrement */
1401 struct ccb_getdevstats cgds;
1402
1403 /*
1404 * First off, find out what the current
1405 * transaction counts are.
1406 */
1407 xpt_setup_ccb(&cgds.ccb_h,
1408 ccb->ccb_h.path,
1409 CAM_PRIORITY_NORMAL);
1410 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1411 xpt_action((union ccb *)&cgds);
1412
1413 /*
1414 * If we were the only transaction active, treat
1415 * the QUEUE FULL as if it were a BUSY condition.
1416 */
1417 if (cgds.dev_active != 0) {
1418 int total_openings;
1419
1420 /*
1421 * Reduce the number of openings to
1422 * be 1 less than the amount it took
1423 * to get a queue full bounded by the
1424 * minimum allowed tag count for this
1425 * device.
1426 */
1427 total_openings = cgds.dev_active + cgds.dev_openings;
1428 *openings = cgds.dev_active;
1429 if (*openings < cgds.mintags)
1430 *openings = cgds.mintags;
1431 if (*openings < total_openings)
1432 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1433 else {
1434 /*
1435 * Some devices report queue full for
1436 * temporary resource shortages. For
1437 * this reason, we allow a minimum
1438 * tag count to be entered via a
1439 * quirk entry to prevent the queue
1440 * count on these devices from falling
1441 * to a pessimisticly low value. We
1442 * still wait for the next successful
1443 * completion, however, before queueing
1444 * more transactions to the device.
1445 */
1446 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1447 }
1448 *timeout = 0;
1449 error = ERESTART;
1450 *action &= ~SSQ_PRINT_SENSE;
1451 break;
1452 }
1453 /* FALLTHROUGH */
1454 }
1455 case SCSI_STATUS_BUSY:
1456 /*
1457 * Restart the queue after either another
1458 * command completes or a 1 second timeout.
1459 */
1460 if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1461 (ccb->ccb_h.retry_count--) > 0) {
1462 error = ERESTART;
1463 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1464 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1465 *timeout = 1000;
1466 } else {
1467 error = EIO;
1468 }
1469 break;
1470 case SCSI_STATUS_RESERV_CONFLICT:
1471 default:
1472 error = EIO;
1473 break;
1474 }
1475 return (error);
1476 }
1477
1478 static int
1479 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1480 cam_flags camflags, u_int32_t sense_flags,
1481 int *openings, u_int32_t *relsim_flags,
1482 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1483 {
1484 struct cam_periph *periph;
1485 union ccb *orig_ccb = ccb;
1486 int error, recoveryccb;
1487
1488 periph = xpt_path_periph(ccb->ccb_h.path);
1489 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1490 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1491 /*
1492 * If error recovery is already in progress, don't attempt
1493 * to process this error, but requeue it unconditionally
1494 * and attempt to process it once error recovery has
1495 * completed. This failed command is probably related to
1496 * the error that caused the currently active error recovery
1497 * action so our current recovery efforts should also
1498 * address this command. Be aware that the error recovery
1499 * code assumes that only one recovery action is in progress
1500 * on a particular peripheral instance at any given time
1501 * (e.g. only one saved CCB for error recovery) so it is
1502 * imperitive that we don't violate this assumption.
1503 */
1504 error = ERESTART;
1505 *action &= ~SSQ_PRINT_SENSE;
1506 } else {
1507 scsi_sense_action err_action;
1508 struct ccb_getdev cgd;
1509
1510 /*
1511 * Grab the inquiry data for this device.
1512 */
1513 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1514 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1515 xpt_action((union ccb *)&cgd);
1516
1517 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1518 sense_flags);
1519 error = err_action & SS_ERRMASK;
1520
1521 /*
1522 * Do not autostart sequential access devices
1523 * to avoid unexpected tape loading.
1524 */
1525 if ((err_action & SS_MASK) == SS_START &&
1526 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1527 *action_string = "Will not autostart a "
1528 "sequential access device";
1529 goto sense_error_done;
1530 }
1531
1532 /*
1533 * Avoid recovery recursion if recovery action is the same.
1534 */
1535 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1536 if (((err_action & SS_MASK) == SS_START &&
1537 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1538 ((err_action & SS_MASK) == SS_TUR &&
1539 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1540 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1541 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1542 *timeout = 500;
1543 }
1544 }
1545
1546 /*
1547 * If the recovery action will consume a retry,
1548 * make sure we actually have retries available.
1549 */
1550 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1551 if (ccb->ccb_h.retry_count > 0 &&
1552 (periph->flags & CAM_PERIPH_INVALID) == 0)
1553 ccb->ccb_h.retry_count--;
1554 else {
1555 *action_string = "Retries exhausted";
1556 goto sense_error_done;
1557 }
1558 }
1559
1560 if ((err_action & SS_MASK) >= SS_START) {
1561 /*
1562 * Do common portions of commands that
1563 * use recovery CCBs.
1564 */
1565 orig_ccb = xpt_alloc_ccb_nowait();
1566 if (orig_ccb == NULL) {
1567 *action_string = "Can't allocate recovery CCB";
1568 goto sense_error_done;
1569 }
1570 /*
1571 * Clear freeze flag for original request here, as
1572 * this freeze will be dropped as part of ERESTART.
1573 */
1574 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1575 bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1576 }
1577
1578 switch (err_action & SS_MASK) {
1579 case SS_NOP:
1580 *action_string = "No recovery action needed";
1581 error = 0;
1582 break;
1583 case SS_RETRY:
1584 *action_string = "Retrying command (per sense data)";
1585 error = ERESTART;
1586 break;
1587 case SS_FAIL:
1588 *action_string = "Unretryable error";
1589 break;
1590 case SS_START:
1591 {
1592 int le;
1593
1594 /*
1595 * Send a start unit command to the device, and
1596 * then retry the command.
1597 */
1598 *action_string = "Attempting to start unit";
1599 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1600
1601 /*
1602 * Check for removable media and set
1603 * load/eject flag appropriately.
1604 */
1605 if (SID_IS_REMOVABLE(&cgd.inq_data))
1606 le = TRUE;
1607 else
1608 le = FALSE;
1609
1610 scsi_start_stop(&ccb->csio,
1611 /*retries*/1,
1612 camperiphdone,
1613 MSG_SIMPLE_Q_TAG,
1614 /*start*/TRUE,
1615 /*load/eject*/le,
1616 /*immediate*/FALSE,
1617 SSD_FULL_SIZE,
1618 /*timeout*/50000);
1619 break;
1620 }
1621 case SS_TUR:
1622 {
1623 /*
1624 * Send a Test Unit Ready to the device.
1625 * If the 'many' flag is set, we send 120
1626 * test unit ready commands, one every half
1627 * second. Otherwise, we just send one TUR.
1628 * We only want to do this if the retry
1629 * count has not been exhausted.
1630 */
1631 int retries;
1632
1633 if ((err_action & SSQ_MANY) != 0) {
1634 *action_string = "Polling device for readiness";
1635 retries = 120;
1636 } else {
1637 *action_string = "Testing device for readiness";
1638 retries = 1;
1639 }
1640 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1641 scsi_test_unit_ready(&ccb->csio,
1642 retries,
1643 camperiphdone,
1644 MSG_SIMPLE_Q_TAG,
1645 SSD_FULL_SIZE,
1646 /*timeout*/5000);
1647
1648 /*
1649 * Accomplish our 500ms delay by deferring
1650 * the release of our device queue appropriately.
1651 */
1652 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1653 *timeout = 500;
1654 break;
1655 }
1656 default:
1657 panic("Unhandled error action %x", err_action);
1658 }
1659
1660 if ((err_action & SS_MASK) >= SS_START) {
1661 /*
1662 * Drop the priority, so that the recovery
1663 * CCB is the first to execute. Freeze the queue
1664 * after this command is sent so that we can
1665 * restore the old csio and have it queued in
1666 * the proper order before we release normal
1667 * transactions to the device.
1668 */
1669 ccb->ccb_h.pinfo.priority--;
1670 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1671 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1672 error = ERESTART;
1673 *orig = orig_ccb;
1674 }
1675
1676 sense_error_done:
1677 *action = err_action;
1678 }
1679 return (error);
1680 }
1681
1682 /*
1683 * Generic error handler. Peripheral drivers usually filter
1684 * out the errors that they handle in a unique manner, then
1685 * call this function.
1686 */
1687 int
1688 cam_periph_error(union ccb *ccb, cam_flags camflags,
1689 u_int32_t sense_flags, union ccb *save_ccb)
1690 {
1691 struct cam_path *newpath;
1692 union ccb *orig_ccb, *scan_ccb;
1693 struct cam_periph *periph;
1694 const char *action_string;
1695 cam_status status;
1696 int frozen, error, openings, devctl_err;
1697 u_int32_t action, relsim_flags, timeout;
1698
1699 action = SSQ_PRINT_SENSE;
1700 periph = xpt_path_periph(ccb->ccb_h.path);
1701 action_string = NULL;
1702 status = ccb->ccb_h.status;
1703 frozen = (status & CAM_DEV_QFRZN) != 0;
1704 status &= CAM_STATUS_MASK;
1705 devctl_err = openings = relsim_flags = timeout = 0;
1706 orig_ccb = ccb;
1707
1708 /* Filter the errors that should be reported via devctl */
1709 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1710 case CAM_CMD_TIMEOUT:
1711 case CAM_REQ_ABORTED:
1712 case CAM_REQ_CMP_ERR:
1713 case CAM_REQ_TERMIO:
1714 case CAM_UNREC_HBA_ERROR:
1715 case CAM_DATA_RUN_ERR:
1716 case CAM_SCSI_STATUS_ERROR:
1717 case CAM_ATA_STATUS_ERROR:
1718 case CAM_SMP_STATUS_ERROR:
1719 devctl_err++;
1720 break;
1721 default:
1722 break;
1723 }
1724
1725 switch (status) {
1726 case CAM_REQ_CMP:
1727 error = 0;
1728 action &= ~SSQ_PRINT_SENSE;
1729 break;
1730 case CAM_SCSI_STATUS_ERROR:
1731 error = camperiphscsistatuserror(ccb, &orig_ccb,
1732 camflags, sense_flags, &openings, &relsim_flags,
1733 &timeout, &action, &action_string);
1734 break;
1735 case CAM_AUTOSENSE_FAIL:
1736 error = EIO; /* we have to kill the command */
1737 break;
1738 case CAM_UA_ABORT:
1739 case CAM_UA_TERMIO:
1740 case CAM_MSG_REJECT_REC:
1741 /* XXX Don't know that these are correct */
1742 error = EIO;
1743 break;
1744 case CAM_SEL_TIMEOUT:
1745 if ((camflags & CAM_RETRY_SELTO) != 0) {
1746 if (ccb->ccb_h.retry_count > 0 &&
1747 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1748 ccb->ccb_h.retry_count--;
1749 error = ERESTART;
1750
1751 /*
1752 * Wait a bit to give the device
1753 * time to recover before we try again.
1754 */
1755 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1756 timeout = periph_selto_delay;
1757 break;
1758 }
1759 action_string = "Retries exhausted";
1760 }
1761 /* FALLTHROUGH */
1762 case CAM_DEV_NOT_THERE:
1763 error = ENXIO;
1764 action = SSQ_LOST;
1765 break;
1766 case CAM_REQ_INVALID:
1767 case CAM_PATH_INVALID:
1768 case CAM_NO_HBA:
1769 case CAM_PROVIDE_FAIL:
1770 case CAM_REQ_TOO_BIG:
1771 case CAM_LUN_INVALID:
1772 case CAM_TID_INVALID:
1773 case CAM_FUNC_NOTAVAIL:
1774 error = EINVAL;
1775 break;
1776 case CAM_SCSI_BUS_RESET:
1777 case CAM_BDR_SENT:
1778 /*
1779 * Commands that repeatedly timeout and cause these
1780 * kinds of error recovery actions, should return
1781 * CAM_CMD_TIMEOUT, which allows us to safely assume
1782 * that this command was an innocent bystander to
1783 * these events and should be unconditionally
1784 * retried.
1785 */
1786 case CAM_REQUEUE_REQ:
1787 /* Unconditional requeue if device is still there */
1788 if (periph->flags & CAM_PERIPH_INVALID) {
1789 action_string = "Periph was invalidated";
1790 error = EIO;
1791 } else if (sense_flags & SF_NO_RETRY) {
1792 error = EIO;
1793 action_string = "Retry was blocked";
1794 } else {
1795 error = ERESTART;
1796 action &= ~SSQ_PRINT_SENSE;
1797 }
1798 break;
1799 case CAM_RESRC_UNAVAIL:
1800 /* Wait a bit for the resource shortage to abate. */
1801 timeout = periph_noresrc_delay;
1802 /* FALLTHROUGH */
1803 case CAM_BUSY:
1804 if (timeout == 0) {
1805 /* Wait a bit for the busy condition to abate. */
1806 timeout = periph_busy_delay;
1807 }
1808 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1809 /* FALLTHROUGH */
1810 case CAM_ATA_STATUS_ERROR:
1811 case CAM_REQ_CMP_ERR:
1812 case CAM_CMD_TIMEOUT:
1813 case CAM_UNEXP_BUSFREE:
1814 case CAM_UNCOR_PARITY:
1815 case CAM_DATA_RUN_ERR:
1816 default:
1817 if (periph->flags & CAM_PERIPH_INVALID) {
1818 error = EIO;
1819 action_string = "Periph was invalidated";
1820 } else if (ccb->ccb_h.retry_count == 0) {
1821 error = EIO;
1822 action_string = "Retries exhausted";
1823 } else if (sense_flags & SF_NO_RETRY) {
1824 error = EIO;
1825 action_string = "Retry was blocked";
1826 } else {
1827 ccb->ccb_h.retry_count--;
1828 error = ERESTART;
1829 }
1830 break;
1831 }
1832
1833 if ((sense_flags & SF_PRINT_ALWAYS) ||
1834 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
1835 action |= SSQ_PRINT_SENSE;
1836 else if (sense_flags & SF_NO_PRINT)
1837 action &= ~SSQ_PRINT_SENSE;
1838 if ((action & SSQ_PRINT_SENSE) != 0)
1839 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1840 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
1841 if (error != ERESTART) {
1842 if (action_string == NULL)
1843 action_string = "Unretryable error";
1844 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
1845 error, action_string);
1846 } else if (action_string != NULL)
1847 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1848 else
1849 xpt_print(ccb->ccb_h.path, "Retrying command\n");
1850 }
1851
1852 if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
1853 cam_periph_devctl_notify(orig_ccb);
1854
1855 if ((action & SSQ_LOST) != 0) {
1856 lun_id_t lun_id;
1857
1858 /*
1859 * For a selection timeout, we consider all of the LUNs on
1860 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
1861 * then we only get rid of the device(s) specified by the
1862 * path in the original CCB.
1863 */
1864 if (status == CAM_SEL_TIMEOUT)
1865 lun_id = CAM_LUN_WILDCARD;
1866 else
1867 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
1868
1869 /* Should we do more if we can't create the path?? */
1870 if (xpt_create_path(&newpath, periph,
1871 xpt_path_path_id(ccb->ccb_h.path),
1872 xpt_path_target_id(ccb->ccb_h.path),
1873 lun_id) == CAM_REQ_CMP) {
1874
1875 /*
1876 * Let peripheral drivers know that this
1877 * device has gone away.
1878 */
1879 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1880 xpt_free_path(newpath);
1881 }
1882 }
1883
1884 /* Broadcast UNIT ATTENTIONs to all periphs. */
1885 if ((action & SSQ_UA) != 0)
1886 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
1887
1888 /* Rescan target on "Reported LUNs data has changed" */
1889 if ((action & SSQ_RESCAN) != 0) {
1890 if (xpt_create_path(&newpath, NULL,
1891 xpt_path_path_id(ccb->ccb_h.path),
1892 xpt_path_target_id(ccb->ccb_h.path),
1893 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
1894
1895 scan_ccb = xpt_alloc_ccb_nowait();
1896 if (scan_ccb != NULL) {
1897 scan_ccb->ccb_h.path = newpath;
1898 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
1899 scan_ccb->crcn.flags = 0;
1900 xpt_rescan(scan_ccb);
1901 } else {
1902 xpt_print(newpath,
1903 "Can't allocate CCB to rescan target\n");
1904 xpt_free_path(newpath);
1905 }
1906 }
1907 }
1908
1909 /* Attempt a retry */
1910 if (error == ERESTART || error == 0) {
1911 if (frozen != 0)
1912 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1913 if (error == ERESTART)
1914 xpt_action(ccb);
1915 if (frozen != 0)
1916 cam_release_devq(ccb->ccb_h.path,
1917 relsim_flags,
1918 openings,
1919 timeout,
1920 /*getcount_only*/0);
1921 }
1922
1923 return (error);
1924 }
1925
1926 #define CAM_PERIPH_DEVD_MSG_SIZE 256
1927
1928 static void
1929 cam_periph_devctl_notify(union ccb *ccb)
1930 {
1931 struct cam_periph *periph;
1932 struct ccb_getdev *cgd;
1933 struct sbuf sb;
1934 int serr, sk, asc, ascq;
1935 char *sbmsg, *type;
1936
1937 sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
1938 if (sbmsg == NULL)
1939 return;
1940
1941 sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
1942
1943 periph = xpt_path_periph(ccb->ccb_h.path);
1944 sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
1945 periph->unit_number);
1946
1947 sbuf_printf(&sb, "serial=\"");
1948 if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
1949 xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
1950 CAM_PRIORITY_NORMAL);
1951 cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1952 xpt_action((union ccb *)cgd);
1953
1954 if (cgd->ccb_h.status == CAM_REQ_CMP)
1955 sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
1956 xpt_free_ccb((union ccb *)cgd);
1957 }
1958 sbuf_printf(&sb, "\" ");
1959 sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
1960
1961 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1962 case CAM_CMD_TIMEOUT:
1963 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
1964 type = "timeout";
1965 break;
1966 case CAM_SCSI_STATUS_ERROR:
1967 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
1968 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
1969 sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
1970 serr, sk, asc, ascq);
1971 type = "error";
1972 break;
1973 case CAM_ATA_STATUS_ERROR:
1974 sbuf_printf(&sb, "RES=\"");
1975 ata_res_sbuf(&ccb->ataio.res, &sb);
1976 sbuf_printf(&sb, "\" ");
1977 type = "error";
1978 break;
1979 default:
1980 type = "error";
1981 break;
1982 }
1983
1984 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1985 sbuf_printf(&sb, "CDB=\"");
1986 scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
1987 sbuf_printf(&sb, "\" ");
1988 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1989 sbuf_printf(&sb, "ACB=\"");
1990 ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
1991 sbuf_printf(&sb, "\" ");
1992 }
1993
1994 if (sbuf_finish(&sb) == 0)
1995 devctl_notify("CAM", "periph", type, sbuf_data(&sb));
1996 sbuf_delete(&sb);
1997 free(sbmsg, M_CAMPERIPH);
1998 }
1999
Cache object: dcf04722527660cda751a40be5c3b8d2
|