FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_periph.c
1 /*-
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification, immediately at the beginning of the file.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/bio.h>
41 #include <sys/conf.h>
42 #include <sys/devctl.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/buf.h>
46 #include <sys/proc.h>
47 #include <sys/devicestat.h>
48 #include <sys/sbuf.h>
49 #include <sys/sysctl.h>
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_queue.h>
56 #include <cam/cam_xpt_periph.h>
57 #include <cam/cam_periph.h>
58 #include <cam/cam_debug.h>
59 #include <cam/cam_sim.h>
60
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 #include <cam/scsi/scsi_pass.h>
64
65 static u_int camperiphnextunit(struct periph_driver *p_drv,
66 u_int newunit, int wired,
67 path_id_t pathid, target_id_t target,
68 lun_id_t lun);
69 static u_int camperiphunit(struct periph_driver *p_drv,
70 path_id_t pathid, target_id_t target,
71 lun_id_t lun);
72 static void camperiphdone(struct cam_periph *periph,
73 union ccb *done_ccb);
74 static void camperiphfree(struct cam_periph *periph);
75 static int camperiphscsistatuserror(union ccb *ccb,
76 union ccb **orig_ccb,
77 cam_flags camflags,
78 u_int32_t sense_flags,
79 int *openings,
80 u_int32_t *relsim_flags,
81 u_int32_t *timeout,
82 u_int32_t *action,
83 const char **action_string);
84 static int camperiphscsisenseerror(union ccb *ccb,
85 union ccb **orig_ccb,
86 cam_flags camflags,
87 u_int32_t sense_flags,
88 int *openings,
89 u_int32_t *relsim_flags,
90 u_int32_t *timeout,
91 u_int32_t *action,
92 const char **action_string);
93 static void cam_periph_devctl_notify(union ccb *ccb);
94
95 static int nperiph_drivers;
96 static int initialized = 0;
97 struct periph_driver **periph_drivers;
98
99 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
100
101 static int periph_selto_delay = 1000;
102 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
103 static int periph_noresrc_delay = 500;
104 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
105 static int periph_busy_delay = 500;
106 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
107
108 static u_int periph_mapmem_thresh = 65536;
109 SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN,
110 &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping");
111
112 void
113 periphdriver_register(void *data)
114 {
115 struct periph_driver *drv = (struct periph_driver *)data;
116 struct periph_driver **newdrivers, **old;
117 int ndrivers;
118
119 again:
120 ndrivers = nperiph_drivers + 2;
121 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
122 M_WAITOK);
123 xpt_lock_buses();
124 if (ndrivers != nperiph_drivers + 2) {
125 /*
126 * Lost race against itself; go around.
127 */
128 xpt_unlock_buses();
129 free(newdrivers, M_CAMPERIPH);
130 goto again;
131 }
132 if (periph_drivers)
133 bcopy(periph_drivers, newdrivers,
134 sizeof(*newdrivers) * nperiph_drivers);
135 newdrivers[nperiph_drivers] = drv;
136 newdrivers[nperiph_drivers + 1] = NULL;
137 old = periph_drivers;
138 periph_drivers = newdrivers;
139 nperiph_drivers++;
140 xpt_unlock_buses();
141 if (old)
142 free(old, M_CAMPERIPH);
143 /* If driver marked as early or it is late now, initialize it. */
144 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
145 initialized > 1)
146 (*drv->init)();
147 }
148
149 int
150 periphdriver_unregister(void *data)
151 {
152 struct periph_driver *drv = (struct periph_driver *)data;
153 int error, n;
154
155 /* If driver marked as early or it is late now, deinitialize it. */
156 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
157 initialized > 1) {
158 if (drv->deinit == NULL) {
159 printf("CAM periph driver '%s' doesn't have deinit.\n",
160 drv->driver_name);
161 return (EOPNOTSUPP);
162 }
163 error = drv->deinit();
164 if (error != 0)
165 return (error);
166 }
167
168 xpt_lock_buses();
169 for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
170 ;
171 KASSERT(n < nperiph_drivers,
172 ("Periph driver '%s' was not registered", drv->driver_name));
173 for (; n + 1 < nperiph_drivers; n++)
174 periph_drivers[n] = periph_drivers[n + 1];
175 periph_drivers[n + 1] = NULL;
176 nperiph_drivers--;
177 xpt_unlock_buses();
178 return (0);
179 }
180
181 void
182 periphdriver_init(int level)
183 {
184 int i, early;
185
186 initialized = max(initialized, level);
187 for (i = 0; periph_drivers[i] != NULL; i++) {
188 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
189 if (early == initialized)
190 (*periph_drivers[i]->init)();
191 }
192 }
193
194 cam_status
195 cam_periph_alloc(periph_ctor_t *periph_ctor,
196 periph_oninv_t *periph_oninvalidate,
197 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
198 char *name, cam_periph_type type, struct cam_path *path,
199 ac_callback_t *ac_callback, ac_code code, void *arg)
200 {
201 struct periph_driver **p_drv;
202 struct cam_sim *sim;
203 struct cam_periph *periph;
204 struct cam_periph *cur_periph;
205 path_id_t path_id;
206 target_id_t target_id;
207 lun_id_t lun_id;
208 cam_status status;
209 u_int init_level;
210
211 init_level = 0;
212 /*
213 * Handle Hot-Plug scenarios. If there is already a peripheral
214 * of our type assigned to this path, we are likely waiting for
215 * final close on an old, invalidated, peripheral. If this is
216 * the case, queue up a deferred call to the peripheral's async
217 * handler. If it looks like a mistaken re-allocation, complain.
218 */
219 if ((periph = cam_periph_find(path, name)) != NULL) {
220 if ((periph->flags & CAM_PERIPH_INVALID) != 0
221 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
222 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
223 periph->deferred_callback = ac_callback;
224 periph->deferred_ac = code;
225 return (CAM_REQ_INPROG);
226 } else {
227 printf("cam_periph_alloc: attempt to re-allocate "
228 "valid device %s%d rejected flags %#x "
229 "refcount %d\n", periph->periph_name,
230 periph->unit_number, periph->flags,
231 periph->refcount);
232 }
233 return (CAM_REQ_INVALID);
234 }
235
236 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
237 M_NOWAIT|M_ZERO);
238
239 if (periph == NULL)
240 return (CAM_RESRC_UNAVAIL);
241
242 init_level++;
243
244 sim = xpt_path_sim(path);
245 path_id = xpt_path_path_id(path);
246 target_id = xpt_path_target_id(path);
247 lun_id = xpt_path_lun_id(path);
248 periph->periph_start = periph_start;
249 periph->periph_dtor = periph_dtor;
250 periph->periph_oninval = periph_oninvalidate;
251 periph->type = type;
252 periph->periph_name = name;
253 periph->scheduled_priority = CAM_PRIORITY_NONE;
254 periph->immediate_priority = CAM_PRIORITY_NONE;
255 periph->refcount = 1; /* Dropped by invalidation. */
256 periph->sim = sim;
257 SLIST_INIT(&periph->ccb_list);
258 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
259 if (status != CAM_REQ_CMP)
260 goto failure;
261 periph->path = path;
262
263 xpt_lock_buses();
264 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
265 if (strcmp((*p_drv)->driver_name, name) == 0)
266 break;
267 }
268 if (*p_drv == NULL) {
269 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
270 xpt_unlock_buses();
271 xpt_free_path(periph->path);
272 free(periph, M_CAMPERIPH);
273 return (CAM_REQ_INVALID);
274 }
275 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
276 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
277 while (cur_periph != NULL
278 && cur_periph->unit_number < periph->unit_number)
279 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
280 if (cur_periph != NULL) {
281 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
282 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
283 } else {
284 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
285 (*p_drv)->generation++;
286 }
287 xpt_unlock_buses();
288
289 init_level++;
290
291 status = xpt_add_periph(periph);
292 if (status != CAM_REQ_CMP)
293 goto failure;
294
295 init_level++;
296 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
297
298 status = periph_ctor(periph, arg);
299
300 if (status == CAM_REQ_CMP)
301 init_level++;
302
303 failure:
304 switch (init_level) {
305 case 4:
306 /* Initialized successfully */
307 break;
308 case 3:
309 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
310 xpt_remove_periph(periph);
311 /* FALLTHROUGH */
312 case 2:
313 xpt_lock_buses();
314 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
315 xpt_unlock_buses();
316 xpt_free_path(periph->path);
317 /* FALLTHROUGH */
318 case 1:
319 free(periph, M_CAMPERIPH);
320 /* FALLTHROUGH */
321 case 0:
322 /* No cleanup to perform. */
323 break;
324 default:
325 panic("%s: Unknown init level", __func__);
326 }
327 return(status);
328 }
329
330 /*
331 * Find a peripheral structure with the specified path, target, lun,
332 * and (optionally) type. If the name is NULL, this function will return
333 * the first peripheral driver that matches the specified path.
334 */
335 struct cam_periph *
336 cam_periph_find(struct cam_path *path, char *name)
337 {
338 struct periph_driver **p_drv;
339 struct cam_periph *periph;
340
341 xpt_lock_buses();
342 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
343 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
344 continue;
345
346 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
347 if (xpt_path_comp(periph->path, path) == 0) {
348 xpt_unlock_buses();
349 cam_periph_assert(periph, MA_OWNED);
350 return(periph);
351 }
352 }
353 if (name != NULL) {
354 xpt_unlock_buses();
355 return(NULL);
356 }
357 }
358 xpt_unlock_buses();
359 return(NULL);
360 }
361
362 /*
363 * Find peripheral driver instances attached to the specified path.
364 */
365 int
366 cam_periph_list(struct cam_path *path, struct sbuf *sb)
367 {
368 struct sbuf local_sb;
369 struct periph_driver **p_drv;
370 struct cam_periph *periph;
371 int count;
372 int sbuf_alloc_len;
373
374 sbuf_alloc_len = 16;
375 retry:
376 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
377 count = 0;
378 xpt_lock_buses();
379 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
380 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
381 if (xpt_path_comp(periph->path, path) != 0)
382 continue;
383
384 if (sbuf_len(&local_sb) != 0)
385 sbuf_cat(&local_sb, ",");
386
387 sbuf_printf(&local_sb, "%s%d", periph->periph_name,
388 periph->unit_number);
389
390 if (sbuf_error(&local_sb) == ENOMEM) {
391 sbuf_alloc_len *= 2;
392 xpt_unlock_buses();
393 sbuf_delete(&local_sb);
394 goto retry;
395 }
396 count++;
397 }
398 }
399 xpt_unlock_buses();
400 sbuf_finish(&local_sb);
401 if (sbuf_len(sb) != 0)
402 sbuf_cat(sb, ",");
403 sbuf_cat(sb, sbuf_data(&local_sb));
404 sbuf_delete(&local_sb);
405 return (count);
406 }
407
408 int
409 cam_periph_acquire(struct cam_periph *periph)
410 {
411 int status;
412
413 if (periph == NULL)
414 return (EINVAL);
415
416 status = ENOENT;
417 xpt_lock_buses();
418 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
419 periph->refcount++;
420 status = 0;
421 }
422 xpt_unlock_buses();
423
424 return (status);
425 }
426
427 void
428 cam_periph_doacquire(struct cam_periph *periph)
429 {
430
431 xpt_lock_buses();
432 KASSERT(periph->refcount >= 1,
433 ("cam_periph_doacquire() with refcount == %d", periph->refcount));
434 periph->refcount++;
435 xpt_unlock_buses();
436 }
437
438 void
439 cam_periph_release_locked_buses(struct cam_periph *periph)
440 {
441
442 cam_periph_assert(periph, MA_OWNED);
443 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
444 if (--periph->refcount == 0)
445 camperiphfree(periph);
446 }
447
448 void
449 cam_periph_release_locked(struct cam_periph *periph)
450 {
451
452 if (periph == NULL)
453 return;
454
455 xpt_lock_buses();
456 cam_periph_release_locked_buses(periph);
457 xpt_unlock_buses();
458 }
459
460 void
461 cam_periph_release(struct cam_periph *periph)
462 {
463 struct mtx *mtx;
464
465 if (periph == NULL)
466 return;
467
468 cam_periph_assert(periph, MA_NOTOWNED);
469 mtx = cam_periph_mtx(periph);
470 mtx_lock(mtx);
471 cam_periph_release_locked(periph);
472 mtx_unlock(mtx);
473 }
474
475 /*
476 * hold/unhold act as mutual exclusion for sections of the code that
477 * need to sleep and want to make sure that other sections that
478 * will interfere are held off. This only protects exclusive sections
479 * from each other.
480 */
481 int
482 cam_periph_hold(struct cam_periph *periph, int priority)
483 {
484 int error;
485
486 /*
487 * Increment the reference count on the peripheral
488 * while we wait for our lock attempt to succeed
489 * to ensure the peripheral doesn't disappear out
490 * from user us while we sleep.
491 */
492
493 if (cam_periph_acquire(periph) != 0)
494 return (ENXIO);
495
496 cam_periph_assert(periph, MA_OWNED);
497 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
498 periph->flags |= CAM_PERIPH_LOCK_WANTED;
499 if ((error = cam_periph_sleep(periph, periph, priority,
500 "caplck", 0)) != 0) {
501 cam_periph_release_locked(periph);
502 return (error);
503 }
504 if (periph->flags & CAM_PERIPH_INVALID) {
505 cam_periph_release_locked(periph);
506 return (ENXIO);
507 }
508 }
509
510 periph->flags |= CAM_PERIPH_LOCKED;
511 return (0);
512 }
513
514 void
515 cam_periph_unhold(struct cam_periph *periph)
516 {
517
518 cam_periph_assert(periph, MA_OWNED);
519
520 periph->flags &= ~CAM_PERIPH_LOCKED;
521 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
522 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
523 wakeup(periph);
524 }
525
526 cam_periph_release_locked(periph);
527 }
528
529 /*
530 * Look for the next unit number that is not currently in use for this
531 * peripheral type starting at "newunit". Also exclude unit numbers that
532 * are reserved by for future "hardwiring" unless we already know that this
533 * is a potential wired device. Only assume that the device is "wired" the
534 * first time through the loop since after that we'll be looking at unit
535 * numbers that did not match a wiring entry.
536 */
537 static u_int
538 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
539 path_id_t pathid, target_id_t target, lun_id_t lun)
540 {
541 struct cam_periph *periph;
542 char *periph_name;
543 int i, val, dunit, r;
544 const char *dname, *strval;
545
546 periph_name = p_drv->driver_name;
547 for (;;newunit++) {
548 for (periph = TAILQ_FIRST(&p_drv->units);
549 periph != NULL && periph->unit_number != newunit;
550 periph = TAILQ_NEXT(periph, unit_links))
551 ;
552
553 if (periph != NULL && periph->unit_number == newunit) {
554 if (wired != 0) {
555 xpt_print(periph->path, "Duplicate Wired "
556 "Device entry!\n");
557 xpt_print(periph->path, "Second device (%s "
558 "device at scbus%d target %d lun %d) will "
559 "not be wired\n", periph_name, pathid,
560 target, lun);
561 wired = 0;
562 }
563 continue;
564 }
565 if (wired)
566 break;
567
568 /*
569 * Don't match entries like "da 4" as a wired down
570 * device, but do match entries like "da 4 target 5"
571 * or even "da 4 scbus 1".
572 */
573 i = 0;
574 dname = periph_name;
575 for (;;) {
576 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
577 if (r != 0)
578 break;
579 /* if no "target" and no specific scbus, skip */
580 if (resource_int_value(dname, dunit, "target", &val) &&
581 (resource_string_value(dname, dunit, "at",&strval)||
582 strcmp(strval, "scbus") == 0))
583 continue;
584 if (newunit == dunit)
585 break;
586 }
587 if (r != 0)
588 break;
589 }
590 return (newunit);
591 }
592
593 static u_int
594 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
595 target_id_t target, lun_id_t lun)
596 {
597 u_int unit;
598 int wired, i, val, dunit;
599 const char *dname, *strval;
600 char pathbuf[32], *periph_name;
601
602 periph_name = p_drv->driver_name;
603 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
604 unit = 0;
605 i = 0;
606 dname = periph_name;
607 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
608 wired = 0) {
609 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
610 if (strcmp(strval, pathbuf) != 0)
611 continue;
612 wired++;
613 }
614 if (resource_int_value(dname, dunit, "target", &val) == 0) {
615 if (val != target)
616 continue;
617 wired++;
618 }
619 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
620 if (val != lun)
621 continue;
622 wired++;
623 }
624 if (wired != 0) {
625 unit = dunit;
626 break;
627 }
628 }
629
630 /*
631 * Either start from 0 looking for the next unit or from
632 * the unit number given in the resource config. This way,
633 * if we have wildcard matches, we don't return the same
634 * unit number twice.
635 */
636 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
637
638 return (unit);
639 }
640
641 void
642 cam_periph_invalidate(struct cam_periph *periph)
643 {
644
645 cam_periph_assert(periph, MA_OWNED);
646 /*
647 * We only tear down the device the first time a peripheral is
648 * invalidated.
649 */
650 if ((periph->flags & CAM_PERIPH_INVALID) != 0)
651 return;
652
653 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
654 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) {
655 struct sbuf sb;
656 char buffer[160];
657
658 sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN);
659 xpt_denounce_periph_sbuf(periph, &sb);
660 sbuf_finish(&sb);
661 sbuf_putbuf(&sb);
662 }
663 periph->flags |= CAM_PERIPH_INVALID;
664 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
665 if (periph->periph_oninval != NULL)
666 periph->periph_oninval(periph);
667 cam_periph_release_locked(periph);
668 }
669
670 static void
671 camperiphfree(struct cam_periph *periph)
672 {
673 struct periph_driver **p_drv;
674 struct periph_driver *drv;
675
676 cam_periph_assert(periph, MA_OWNED);
677 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
678 periph->periph_name, periph->unit_number));
679 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
680 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
681 break;
682 }
683 if (*p_drv == NULL) {
684 printf("camperiphfree: attempt to free non-existant periph\n");
685 return;
686 }
687 /*
688 * Cache a pointer to the periph_driver structure. If a
689 * periph_driver is added or removed from the array (see
690 * periphdriver_register()) while we drop the toplogy lock
691 * below, p_drv may change. This doesn't protect against this
692 * particular periph_driver going away. That will require full
693 * reference counting in the periph_driver infrastructure.
694 */
695 drv = *p_drv;
696
697 /*
698 * We need to set this flag before dropping the topology lock, to
699 * let anyone who is traversing the list that this peripheral is
700 * about to be freed, and there will be no more reference count
701 * checks.
702 */
703 periph->flags |= CAM_PERIPH_FREE;
704
705 /*
706 * The peripheral destructor semantics dictate calling with only the
707 * SIM mutex held. Since it might sleep, it should not be called
708 * with the topology lock held.
709 */
710 xpt_unlock_buses();
711
712 /*
713 * We need to call the peripheral destructor prior to removing the
714 * peripheral from the list. Otherwise, we risk running into a
715 * scenario where the peripheral unit number may get reused
716 * (because it has been removed from the list), but some resources
717 * used by the peripheral are still hanging around. In particular,
718 * the devfs nodes used by some peripherals like the pass(4) driver
719 * aren't fully cleaned up until the destructor is run. If the
720 * unit number is reused before the devfs instance is fully gone,
721 * devfs will panic.
722 */
723 if (periph->periph_dtor != NULL)
724 periph->periph_dtor(periph);
725
726 /*
727 * The peripheral list is protected by the topology lock. We have to
728 * remove the periph from the drv list before we call deferred_ac. The
729 * AC_FOUND_DEVICE callback won't create a new periph if it's still there.
730 */
731 xpt_lock_buses();
732
733 TAILQ_REMOVE(&drv->units, periph, unit_links);
734 drv->generation++;
735
736 xpt_remove_periph(periph);
737
738 xpt_unlock_buses();
739 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
740 xpt_print(periph->path, "Periph destroyed\n");
741 else
742 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
743
744 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
745 union ccb ccb;
746 void *arg;
747
748 switch (periph->deferred_ac) {
749 case AC_FOUND_DEVICE:
750 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
751 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
752 xpt_action(&ccb);
753 arg = &ccb;
754 break;
755 case AC_PATH_REGISTERED:
756 xpt_path_inq(&ccb.cpi, periph->path);
757 arg = &ccb;
758 break;
759 default:
760 arg = NULL;
761 break;
762 }
763 periph->deferred_callback(NULL, periph->deferred_ac,
764 periph->path, arg);
765 }
766 xpt_free_path(periph->path);
767 free(periph, M_CAMPERIPH);
768 xpt_lock_buses();
769 }
770
771 /*
772 * Map user virtual pointers into kernel virtual address space, so we can
773 * access the memory. This is now a generic function that centralizes most
774 * of the sanity checks on the data flags, if any.
775 * This also only works for up to maxphys memory. Since we use
776 * buffers to map stuff in and out, we're limited to the buffer size.
777 */
778 int
779 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
780 u_int maxmap)
781 {
782 int numbufs, i;
783 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
784 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
785 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
786
787 bzero(mapinfo, sizeof(*mapinfo));
788 if (maxmap == 0)
789 maxmap = DFLTPHYS; /* traditional default */
790 else if (maxmap > maxphys)
791 maxmap = maxphys; /* for safety */
792 switch(ccb->ccb_h.func_code) {
793 case XPT_DEV_MATCH:
794 if (ccb->cdm.match_buf_len == 0) {
795 printf("cam_periph_mapmem: invalid match buffer "
796 "length 0\n");
797 return(EINVAL);
798 }
799 if (ccb->cdm.pattern_buf_len > 0) {
800 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
801 lengths[0] = ccb->cdm.pattern_buf_len;
802 dirs[0] = CAM_DIR_OUT;
803 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
804 lengths[1] = ccb->cdm.match_buf_len;
805 dirs[1] = CAM_DIR_IN;
806 numbufs = 2;
807 } else {
808 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
809 lengths[0] = ccb->cdm.match_buf_len;
810 dirs[0] = CAM_DIR_IN;
811 numbufs = 1;
812 }
813 /*
814 * This request will not go to the hardware, no reason
815 * to be so strict. vmapbuf() is able to map up to maxphys.
816 */
817 maxmap = maxphys;
818 break;
819 case XPT_SCSI_IO:
820 case XPT_CONT_TARGET_IO:
821 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
822 return(0);
823 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
824 return (EINVAL);
825 data_ptrs[0] = &ccb->csio.data_ptr;
826 lengths[0] = ccb->csio.dxfer_len;
827 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
828 numbufs = 1;
829 break;
830 case XPT_ATA_IO:
831 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
832 return(0);
833 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
834 return (EINVAL);
835 data_ptrs[0] = &ccb->ataio.data_ptr;
836 lengths[0] = ccb->ataio.dxfer_len;
837 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
838 numbufs = 1;
839 break;
840 case XPT_MMC_IO:
841 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
842 return(0);
843 /* Two mappings: one for cmd->data and one for cmd->data->data */
844 data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
845 lengths[0] = sizeof(struct mmc_data *);
846 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
847 data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
848 lengths[1] = ccb->mmcio.cmd.data->len;
849 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
850 numbufs = 2;
851 break;
852 case XPT_SMP_IO:
853 data_ptrs[0] = &ccb->smpio.smp_request;
854 lengths[0] = ccb->smpio.smp_request_len;
855 dirs[0] = CAM_DIR_OUT;
856 data_ptrs[1] = &ccb->smpio.smp_response;
857 lengths[1] = ccb->smpio.smp_response_len;
858 dirs[1] = CAM_DIR_IN;
859 numbufs = 2;
860 break;
861 case XPT_NVME_IO:
862 case XPT_NVME_ADMIN:
863 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
864 return (0);
865 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
866 return (EINVAL);
867 data_ptrs[0] = &ccb->nvmeio.data_ptr;
868 lengths[0] = ccb->nvmeio.dxfer_len;
869 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
870 numbufs = 1;
871 break;
872 case XPT_DEV_ADVINFO:
873 if (ccb->cdai.bufsiz == 0)
874 return (0);
875
876 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
877 lengths[0] = ccb->cdai.bufsiz;
878 dirs[0] = CAM_DIR_IN;
879 numbufs = 1;
880
881 /*
882 * This request will not go to the hardware, no reason
883 * to be so strict. vmapbuf() is able to map up to maxphys.
884 */
885 maxmap = maxphys;
886 break;
887 default:
888 return(EINVAL);
889 break; /* NOTREACHED */
890 }
891
892 /*
893 * Check the transfer length and permissions first, so we don't
894 * have to unmap any previously mapped buffers.
895 */
896 for (i = 0; i < numbufs; i++) {
897 if (lengths[i] > maxmap) {
898 printf("cam_periph_mapmem: attempt to map %lu bytes, "
899 "which is greater than %lu\n",
900 (long)(lengths[i]), (u_long)maxmap);
901 return (E2BIG);
902 }
903 }
904
905 /*
906 * This keeps the kernel stack of current thread from getting
907 * swapped. In low-memory situations where the kernel stack might
908 * otherwise get swapped out, this holds it and allows the thread
909 * to make progress and release the kernel mapped pages sooner.
910 *
911 * XXX KDM should I use P_NOSWAP instead?
912 */
913 PHOLD(curproc);
914
915 for (i = 0; i < numbufs; i++) {
916 /* Save the user's data address. */
917 mapinfo->orig[i] = *data_ptrs[i];
918
919 /*
920 * For small buffers use malloc+copyin/copyout instead of
921 * mapping to KVA to avoid expensive TLB shootdowns. For
922 * small allocations malloc is backed by UMA, and so much
923 * cheaper on SMP systems.
924 */
925 if (lengths[i] <= periph_mapmem_thresh &&
926 ccb->ccb_h.func_code != XPT_MMC_IO) {
927 *data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH,
928 M_WAITOK);
929 if (dirs[i] != CAM_DIR_IN) {
930 if (copyin(mapinfo->orig[i], *data_ptrs[i],
931 lengths[i]) != 0) {
932 free(*data_ptrs[i], M_CAMPERIPH);
933 *data_ptrs[i] = mapinfo->orig[i];
934 goto fail;
935 }
936 } else
937 bzero(*data_ptrs[i], lengths[i]);
938 continue;
939 }
940
941 /*
942 * Get the buffer.
943 */
944 mapinfo->bp[i] = uma_zalloc(pbuf_zone, M_WAITOK);
945
946 /* set the direction */
947 mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ?
948 BIO_WRITE : BIO_READ;
949
950 /* Map the buffer into kernel memory. */
951 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i], 1) < 0) {
952 uma_zfree(pbuf_zone, mapinfo->bp[i]);
953 goto fail;
954 }
955
956 /* set our pointer to the new mapped area */
957 *data_ptrs[i] = mapinfo->bp[i]->b_data;
958 }
959
960 /*
961 * Now that we've gotten this far, change ownership to the kernel
962 * of the buffers so that we don't run afoul of returning to user
963 * space with locks (on the buffer) held.
964 */
965 for (i = 0; i < numbufs; i++) {
966 if (mapinfo->bp[i])
967 BUF_KERNPROC(mapinfo->bp[i]);
968 }
969
970 mapinfo->num_bufs_used = numbufs;
971 return(0);
972
973 fail:
974 for (i--; i >= 0; i--) {
975 if (mapinfo->bp[i]) {
976 vunmapbuf(mapinfo->bp[i]);
977 uma_zfree(pbuf_zone, mapinfo->bp[i]);
978 } else
979 free(*data_ptrs[i], M_CAMPERIPH);
980 *data_ptrs[i] = mapinfo->orig[i];
981 }
982 PRELE(curproc);
983 return(EACCES);
984 }
985
986 /*
987 * Unmap memory segments mapped into kernel virtual address space by
988 * cam_periph_mapmem().
989 */
990 void
991 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
992 {
993 int numbufs, i;
994 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
995 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
996 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
997
998 if (mapinfo->num_bufs_used <= 0) {
999 /* nothing to free and the process wasn't held. */
1000 return;
1001 }
1002
1003 switch (ccb->ccb_h.func_code) {
1004 case XPT_DEV_MATCH:
1005 if (ccb->cdm.pattern_buf_len > 0) {
1006 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
1007 lengths[0] = ccb->cdm.pattern_buf_len;
1008 dirs[0] = CAM_DIR_OUT;
1009 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
1010 lengths[1] = ccb->cdm.match_buf_len;
1011 dirs[1] = CAM_DIR_IN;
1012 numbufs = 2;
1013 } else {
1014 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
1015 lengths[0] = ccb->cdm.match_buf_len;
1016 dirs[0] = CAM_DIR_IN;
1017 numbufs = 1;
1018 }
1019 break;
1020 case XPT_SCSI_IO:
1021 case XPT_CONT_TARGET_IO:
1022 data_ptrs[0] = &ccb->csio.data_ptr;
1023 lengths[0] = ccb->csio.dxfer_len;
1024 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1025 numbufs = 1;
1026 break;
1027 case XPT_ATA_IO:
1028 data_ptrs[0] = &ccb->ataio.data_ptr;
1029 lengths[0] = ccb->ataio.dxfer_len;
1030 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1031 numbufs = 1;
1032 break;
1033 case XPT_MMC_IO:
1034 data_ptrs[0] = (u_int8_t **)&ccb->mmcio.cmd.data;
1035 lengths[0] = sizeof(struct mmc_data *);
1036 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1037 data_ptrs[1] = (u_int8_t **)&ccb->mmcio.cmd.data->data;
1038 lengths[1] = ccb->mmcio.cmd.data->len;
1039 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
1040 numbufs = 2;
1041 break;
1042 case XPT_SMP_IO:
1043 data_ptrs[0] = &ccb->smpio.smp_request;
1044 lengths[0] = ccb->smpio.smp_request_len;
1045 dirs[0] = CAM_DIR_OUT;
1046 data_ptrs[1] = &ccb->smpio.smp_response;
1047 lengths[1] = ccb->smpio.smp_response_len;
1048 dirs[1] = CAM_DIR_IN;
1049 numbufs = 2;
1050 break;
1051 case XPT_NVME_IO:
1052 case XPT_NVME_ADMIN:
1053 data_ptrs[0] = &ccb->nvmeio.data_ptr;
1054 lengths[0] = ccb->nvmeio.dxfer_len;
1055 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1056 numbufs = 1;
1057 break;
1058 case XPT_DEV_ADVINFO:
1059 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1060 lengths[0] = ccb->cdai.bufsiz;
1061 dirs[0] = CAM_DIR_IN;
1062 numbufs = 1;
1063 break;
1064 default:
1065 /* allow ourselves to be swapped once again */
1066 PRELE(curproc);
1067 return;
1068 break; /* NOTREACHED */
1069 }
1070
1071 for (i = 0; i < numbufs; i++) {
1072 if (mapinfo->bp[i]) {
1073 /* unmap the buffer */
1074 vunmapbuf(mapinfo->bp[i]);
1075
1076 /* release the buffer */
1077 uma_zfree(pbuf_zone, mapinfo->bp[i]);
1078 } else {
1079 if (dirs[i] != CAM_DIR_OUT) {
1080 copyout(*data_ptrs[i], mapinfo->orig[i],
1081 lengths[i]);
1082 }
1083 free(*data_ptrs[i], M_CAMPERIPH);
1084 }
1085
1086 /* Set the user's pointer back to the original value */
1087 *data_ptrs[i] = mapinfo->orig[i];
1088 }
1089
1090 /* allow ourselves to be swapped once again */
1091 PRELE(curproc);
1092 }
1093
1094 int
1095 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1096 int (*error_routine)(union ccb *ccb,
1097 cam_flags camflags,
1098 u_int32_t sense_flags))
1099 {
1100 union ccb *ccb;
1101 int error;
1102 int found;
1103
1104 error = found = 0;
1105
1106 switch(cmd){
1107 case CAMGETPASSTHRU:
1108 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1109 xpt_setup_ccb(&ccb->ccb_h,
1110 ccb->ccb_h.path,
1111 CAM_PRIORITY_NORMAL);
1112 ccb->ccb_h.func_code = XPT_GDEVLIST;
1113
1114 /*
1115 * Basically, the point of this is that we go through
1116 * getting the list of devices, until we find a passthrough
1117 * device. In the current version of the CAM code, the
1118 * only way to determine what type of device we're dealing
1119 * with is by its name.
1120 */
1121 while (found == 0) {
1122 ccb->cgdl.index = 0;
1123 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1124 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1125 /* we want the next device in the list */
1126 xpt_action(ccb);
1127 if (strncmp(ccb->cgdl.periph_name,
1128 "pass", 4) == 0){
1129 found = 1;
1130 break;
1131 }
1132 }
1133 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1134 (found == 0)) {
1135 ccb->cgdl.periph_name[0] = '\0';
1136 ccb->cgdl.unit_number = 0;
1137 break;
1138 }
1139 }
1140
1141 /* copy the result back out */
1142 bcopy(ccb, addr, sizeof(union ccb));
1143
1144 /* and release the ccb */
1145 xpt_release_ccb(ccb);
1146
1147 break;
1148 default:
1149 error = ENOTTY;
1150 break;
1151 }
1152 return(error);
1153 }
1154
1155 static void
1156 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1157 {
1158
1159 panic("%s: already done with ccb %p", __func__, done_ccb);
1160 }
1161
1162 static void
1163 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1164 {
1165
1166 /* Caller will release the CCB */
1167 xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1168 done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
1169 wakeup(&done_ccb->ccb_h.cbfcnp);
1170 }
1171
1172 static void
1173 cam_periph_ccbwait(union ccb *ccb)
1174 {
1175
1176 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1177 while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
1178 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
1179 PRIBIO, "cbwait", 0);
1180 }
1181 KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1182 (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
1183 ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1184 "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1185 ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
1186 }
1187
1188 /*
1189 * Dispatch a CCB and wait for it to complete. If the CCB has set a
1190 * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost.
1191 */
1192 int
1193 cam_periph_runccb(union ccb *ccb,
1194 int (*error_routine)(union ccb *ccb,
1195 cam_flags camflags,
1196 u_int32_t sense_flags),
1197 cam_flags camflags, u_int32_t sense_flags,
1198 struct devstat *ds)
1199 {
1200 struct bintime *starttime;
1201 struct bintime ltime;
1202 int error;
1203 bool must_poll;
1204 uint32_t timeout = 1;
1205
1206 starttime = NULL;
1207 xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1208 KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1209 ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1210 ccb->ccb_h.func_code, ccb->ccb_h.flags));
1211
1212 /*
1213 * If the user has supplied a stats structure, and if we understand
1214 * this particular type of ccb, record the transaction start.
1215 */
1216 if (ds != NULL &&
1217 (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1218 ccb->ccb_h.func_code == XPT_ATA_IO ||
1219 ccb->ccb_h.func_code == XPT_NVME_IO)) {
1220 starttime = <ime;
1221 binuptime(starttime);
1222 devstat_start_transaction(ds, starttime);
1223 }
1224
1225 /*
1226 * We must poll the I/O while we're dumping. The scheduler is normally
1227 * stopped for dumping, except when we call doadump from ddb. While the
1228 * scheduler is running in this case, we still need to poll the I/O to
1229 * avoid sleeping waiting for the ccb to complete.
1230 *
1231 * A panic triggered dump stops the scheduler, any callback from the
1232 * shutdown_post_sync event will run with the scheduler stopped, but
1233 * before we're officially dumping. To avoid hanging in adashutdown
1234 * initiated commands (or other similar situations), we have to test for
1235 * either SCHEDULER_STOPPED() here as well.
1236 *
1237 * To avoid locking problems, dumping/polling callers must call
1238 * without a periph lock held.
1239 */
1240 must_poll = dumping || SCHEDULER_STOPPED();
1241 ccb->ccb_h.cbfcnp = cam_periph_done;
1242
1243 /*
1244 * If we're polling, then we need to ensure that we have ample resources
1245 * in the periph. cam_periph_error can reschedule the ccb by calling
1246 * xpt_action and returning ERESTART, so we have to effect the polling
1247 * in the do loop below.
1248 */
1249 if (must_poll) {
1250 timeout = xpt_poll_setup(ccb);
1251 }
1252
1253 if (timeout == 0) {
1254 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1255 error = EBUSY;
1256 } else {
1257 xpt_action(ccb);
1258 do {
1259 if (must_poll) {
1260 xpt_pollwait(ccb, timeout);
1261 timeout = ccb->ccb_h.timeout * 10;
1262 } else {
1263 cam_periph_ccbwait(ccb);
1264 }
1265 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1266 error = 0;
1267 else if (error_routine != NULL) {
1268 ccb->ccb_h.cbfcnp = cam_periph_done;
1269 error = (*error_routine)(ccb, camflags, sense_flags);
1270 } else
1271 error = 0;
1272 } while (error == ERESTART);
1273 }
1274
1275 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1276 cam_release_devq(ccb->ccb_h.path,
1277 /* relsim_flags */0,
1278 /* openings */0,
1279 /* timeout */0,
1280 /* getcount_only */ FALSE);
1281 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1282 }
1283
1284 if (ds != NULL) {
1285 uint32_t bytes;
1286 devstat_tag_type tag;
1287 bool valid = true;
1288
1289 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1290 bytes = ccb->csio.dxfer_len - ccb->csio.resid;
1291 tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3);
1292 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1293 bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
1294 tag = (devstat_tag_type)0;
1295 } else if (ccb->ccb_h.func_code == XPT_NVME_IO) {
1296 bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */
1297 tag = (devstat_tag_type)0;
1298 } else {
1299 valid = false;
1300 }
1301 if (valid)
1302 devstat_end_transaction(ds, bytes, tag,
1303 ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ?
1304 DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1305 DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime);
1306 }
1307
1308 return(error);
1309 }
1310
1311 void
1312 cam_freeze_devq(struct cam_path *path)
1313 {
1314 struct ccb_hdr ccb_h;
1315
1316 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1317 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1318 ccb_h.func_code = XPT_NOOP;
1319 ccb_h.flags = CAM_DEV_QFREEZE;
1320 xpt_action((union ccb *)&ccb_h);
1321 }
1322
1323 u_int32_t
1324 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1325 u_int32_t openings, u_int32_t arg,
1326 int getcount_only)
1327 {
1328 struct ccb_relsim crs;
1329
1330 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1331 relsim_flags, openings, arg, getcount_only));
1332 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1333 crs.ccb_h.func_code = XPT_REL_SIMQ;
1334 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1335 crs.release_flags = relsim_flags;
1336 crs.openings = openings;
1337 crs.release_timeout = arg;
1338 xpt_action((union ccb *)&crs);
1339 return (crs.qfrozen_cnt);
1340 }
1341
1342 #define saved_ccb_ptr ppriv_ptr0
1343 static void
1344 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1345 {
1346 union ccb *saved_ccb;
1347 cam_status status;
1348 struct scsi_start_stop_unit *scsi_cmd;
1349 int error = 0, error_code, sense_key, asc, ascq;
1350
1351 scsi_cmd = (struct scsi_start_stop_unit *)
1352 &done_ccb->csio.cdb_io.cdb_bytes;
1353 status = done_ccb->ccb_h.status;
1354
1355 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1356 if (scsi_extract_sense_ccb(done_ccb,
1357 &error_code, &sense_key, &asc, &ascq)) {
1358 /*
1359 * If the error is "invalid field in CDB",
1360 * and the load/eject flag is set, turn the
1361 * flag off and try again. This is just in
1362 * case the drive in question barfs on the
1363 * load eject flag. The CAM code should set
1364 * the load/eject flag by default for
1365 * removable media.
1366 */
1367 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1368 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1369 (asc == 0x24) && (ascq == 0x00)) {
1370 scsi_cmd->how &= ~SSS_LOEJ;
1371 if (status & CAM_DEV_QFRZN) {
1372 cam_release_devq(done_ccb->ccb_h.path,
1373 0, 0, 0, 0);
1374 done_ccb->ccb_h.status &=
1375 ~CAM_DEV_QFRZN;
1376 }
1377 xpt_action(done_ccb);
1378 goto out;
1379 }
1380 }
1381 error = cam_periph_error(done_ccb, 0,
1382 SF_RETRY_UA | SF_NO_PRINT);
1383 if (error == ERESTART)
1384 goto out;
1385 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1386 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1387 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1388 }
1389 } else {
1390 /*
1391 * If we have successfully taken a device from the not
1392 * ready to ready state, re-scan the device and re-get
1393 * the inquiry information. Many devices (mostly disks)
1394 * don't properly report their inquiry information unless
1395 * they are spun up.
1396 */
1397 if (scsi_cmd->opcode == START_STOP_UNIT)
1398 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1399 }
1400
1401 /* If we tried long wait and still failed, remember that. */
1402 if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) &&
1403 (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) {
1404 periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT;
1405 if (error != 0 && done_ccb->ccb_h.retry_count == 0)
1406 periph->flags |= CAM_PERIPH_RECOVERY_WAIT_FAILED;
1407 }
1408
1409 /*
1410 * After recovery action(s) completed, return to the original CCB.
1411 * If the recovery CCB has failed, considering its own possible
1412 * retries and recovery, assume we are back in state where we have
1413 * been originally, but without recovery hopes left. In such case,
1414 * after the final attempt below, we cancel any further retries,
1415 * blocking by that also any new recovery attempts for this CCB,
1416 * and the result will be the final one returned to the CCB owher.
1417 */
1418 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1419 saved_ccb->ccb_h.periph_links = done_ccb->ccb_h.periph_links;
1420 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1421 xpt_free_ccb(saved_ccb);
1422 if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1423 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1424 if (error != 0)
1425 done_ccb->ccb_h.retry_count = 0;
1426 xpt_action(done_ccb);
1427
1428 out:
1429 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1430 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1431 }
1432
1433 /*
1434 * Generic Async Event handler. Peripheral drivers usually
1435 * filter out the events that require personal attention,
1436 * and leave the rest to this function.
1437 */
1438 void
1439 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1440 struct cam_path *path, void *arg)
1441 {
1442 switch (code) {
1443 case AC_LOST_DEVICE:
1444 cam_periph_invalidate(periph);
1445 break;
1446 default:
1447 break;
1448 }
1449 }
1450
1451 void
1452 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1453 {
1454 struct ccb_getdevstats cgds;
1455
1456 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1457 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1458 xpt_action((union ccb *)&cgds);
1459 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1460 }
1461
1462 void
1463 cam_periph_freeze_after_event(struct cam_periph *periph,
1464 struct timeval* event_time, u_int duration_ms)
1465 {
1466 struct timeval delta;
1467 struct timeval duration_tv;
1468
1469 if (!timevalisset(event_time))
1470 return;
1471
1472 microtime(&delta);
1473 timevalsub(&delta, event_time);
1474 duration_tv.tv_sec = duration_ms / 1000;
1475 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1476 if (timevalcmp(&delta, &duration_tv, <)) {
1477 timevalsub(&duration_tv, &delta);
1478
1479 duration_ms = duration_tv.tv_sec * 1000;
1480 duration_ms += duration_tv.tv_usec / 1000;
1481 cam_freeze_devq(periph->path);
1482 cam_release_devq(periph->path,
1483 RELSIM_RELEASE_AFTER_TIMEOUT,
1484 /*reduction*/0,
1485 /*timeout*/duration_ms,
1486 /*getcount_only*/0);
1487 }
1488
1489 }
1490
1491 static int
1492 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1493 cam_flags camflags, u_int32_t sense_flags,
1494 int *openings, u_int32_t *relsim_flags,
1495 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1496 {
1497 struct cam_periph *periph;
1498 int error;
1499
1500 switch (ccb->csio.scsi_status) {
1501 case SCSI_STATUS_OK:
1502 case SCSI_STATUS_COND_MET:
1503 case SCSI_STATUS_INTERMED:
1504 case SCSI_STATUS_INTERMED_COND_MET:
1505 error = 0;
1506 break;
1507 case SCSI_STATUS_CMD_TERMINATED:
1508 case SCSI_STATUS_CHECK_COND:
1509 error = camperiphscsisenseerror(ccb, orig_ccb,
1510 camflags,
1511 sense_flags,
1512 openings,
1513 relsim_flags,
1514 timeout,
1515 action,
1516 action_string);
1517 break;
1518 case SCSI_STATUS_QUEUE_FULL:
1519 {
1520 /* no decrement */
1521 struct ccb_getdevstats cgds;
1522
1523 /*
1524 * First off, find out what the current
1525 * transaction counts are.
1526 */
1527 xpt_setup_ccb(&cgds.ccb_h,
1528 ccb->ccb_h.path,
1529 CAM_PRIORITY_NORMAL);
1530 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1531 xpt_action((union ccb *)&cgds);
1532
1533 /*
1534 * If we were the only transaction active, treat
1535 * the QUEUE FULL as if it were a BUSY condition.
1536 */
1537 if (cgds.dev_active != 0) {
1538 int total_openings;
1539
1540 /*
1541 * Reduce the number of openings to
1542 * be 1 less than the amount it took
1543 * to get a queue full bounded by the
1544 * minimum allowed tag count for this
1545 * device.
1546 */
1547 total_openings = cgds.dev_active + cgds.dev_openings;
1548 *openings = cgds.dev_active;
1549 if (*openings < cgds.mintags)
1550 *openings = cgds.mintags;
1551 if (*openings < total_openings)
1552 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1553 else {
1554 /*
1555 * Some devices report queue full for
1556 * temporary resource shortages. For
1557 * this reason, we allow a minimum
1558 * tag count to be entered via a
1559 * quirk entry to prevent the queue
1560 * count on these devices from falling
1561 * to a pessimisticly low value. We
1562 * still wait for the next successful
1563 * completion, however, before queueing
1564 * more transactions to the device.
1565 */
1566 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1567 }
1568 *timeout = 0;
1569 error = ERESTART;
1570 *action &= ~SSQ_PRINT_SENSE;
1571 break;
1572 }
1573 /* FALLTHROUGH */
1574 }
1575 case SCSI_STATUS_BUSY:
1576 /*
1577 * Restart the queue after either another
1578 * command completes or a 1 second timeout.
1579 */
1580 periph = xpt_path_periph(ccb->ccb_h.path);
1581 if (periph->flags & CAM_PERIPH_INVALID) {
1582 error = EIO;
1583 *action_string = "Periph was invalidated";
1584 } else if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1585 ccb->ccb_h.retry_count > 0) {
1586 if ((sense_flags & SF_RETRY_BUSY) == 0)
1587 ccb->ccb_h.retry_count--;
1588 error = ERESTART;
1589 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1590 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1591 *timeout = 1000;
1592 } else {
1593 error = EIO;
1594 *action_string = "Retries exhausted";
1595 }
1596 break;
1597 case SCSI_STATUS_RESERV_CONFLICT:
1598 default:
1599 error = EIO;
1600 break;
1601 }
1602 return (error);
1603 }
1604
1605 static int
1606 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1607 cam_flags camflags, u_int32_t sense_flags,
1608 int *openings, u_int32_t *relsim_flags,
1609 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1610 {
1611 struct cam_periph *periph;
1612 union ccb *orig_ccb = ccb;
1613 int error, recoveryccb;
1614
1615 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1616 if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
1617 biotrack(ccb->csio.bio, __func__);
1618 #endif
1619
1620 periph = xpt_path_periph(ccb->ccb_h.path);
1621 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1622 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1623 /*
1624 * If error recovery is already in progress, don't attempt
1625 * to process this error, but requeue it unconditionally
1626 * and attempt to process it once error recovery has
1627 * completed. This failed command is probably related to
1628 * the error that caused the currently active error recovery
1629 * action so our current recovery efforts should also
1630 * address this command. Be aware that the error recovery
1631 * code assumes that only one recovery action is in progress
1632 * on a particular peripheral instance at any given time
1633 * (e.g. only one saved CCB for error recovery) so it is
1634 * imperitive that we don't violate this assumption.
1635 */
1636 error = ERESTART;
1637 *action &= ~SSQ_PRINT_SENSE;
1638 } else {
1639 scsi_sense_action err_action;
1640 struct ccb_getdev cgd;
1641
1642 /*
1643 * Grab the inquiry data for this device.
1644 */
1645 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1646 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1647 xpt_action((union ccb *)&cgd);
1648
1649 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1650 sense_flags);
1651 error = err_action & SS_ERRMASK;
1652
1653 /*
1654 * Do not autostart sequential access devices
1655 * to avoid unexpected tape loading.
1656 */
1657 if ((err_action & SS_MASK) == SS_START &&
1658 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1659 *action_string = "Will not autostart a "
1660 "sequential access device";
1661 goto sense_error_done;
1662 }
1663
1664 /*
1665 * Avoid recovery recursion if recovery action is the same.
1666 */
1667 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1668 if (((err_action & SS_MASK) == SS_START &&
1669 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1670 ((err_action & SS_MASK) == SS_TUR &&
1671 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1672 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1673 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1674 *timeout = 500;
1675 }
1676 }
1677
1678 /*
1679 * If the recovery action will consume a retry,
1680 * make sure we actually have retries available.
1681 */
1682 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1683 if (ccb->ccb_h.retry_count > 0 &&
1684 (periph->flags & CAM_PERIPH_INVALID) == 0)
1685 ccb->ccb_h.retry_count--;
1686 else {
1687 *action_string = "Retries exhausted";
1688 goto sense_error_done;
1689 }
1690 }
1691
1692 if ((err_action & SS_MASK) >= SS_START) {
1693 /*
1694 * Do common portions of commands that
1695 * use recovery CCBs.
1696 */
1697 orig_ccb = xpt_alloc_ccb_nowait();
1698 if (orig_ccb == NULL) {
1699 *action_string = "Can't allocate recovery CCB";
1700 goto sense_error_done;
1701 }
1702 /*
1703 * Clear freeze flag for original request here, as
1704 * this freeze will be dropped as part of ERESTART.
1705 */
1706 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1707 bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1708 }
1709
1710 switch (err_action & SS_MASK) {
1711 case SS_NOP:
1712 *action_string = "No recovery action needed";
1713 error = 0;
1714 break;
1715 case SS_RETRY:
1716 *action_string = "Retrying command (per sense data)";
1717 error = ERESTART;
1718 break;
1719 case SS_FAIL:
1720 *action_string = "Unretryable error";
1721 break;
1722 case SS_START:
1723 {
1724 int le;
1725
1726 /*
1727 * Send a start unit command to the device, and
1728 * then retry the command.
1729 */
1730 *action_string = "Attempting to start unit";
1731 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1732
1733 /*
1734 * Check for removable media and set
1735 * load/eject flag appropriately.
1736 */
1737 if (SID_IS_REMOVABLE(&cgd.inq_data))
1738 le = TRUE;
1739 else
1740 le = FALSE;
1741
1742 scsi_start_stop(&ccb->csio,
1743 /*retries*/1,
1744 camperiphdone,
1745 MSG_SIMPLE_Q_TAG,
1746 /*start*/TRUE,
1747 /*load/eject*/le,
1748 /*immediate*/FALSE,
1749 SSD_FULL_SIZE,
1750 /*timeout*/50000);
1751 break;
1752 }
1753 case SS_TUR:
1754 {
1755 /*
1756 * Send a Test Unit Ready to the device.
1757 * If the 'many' flag is set, we send 120
1758 * test unit ready commands, one every half
1759 * second. Otherwise, we just send one TUR.
1760 * We only want to do this if the retry
1761 * count has not been exhausted.
1762 */
1763 int retries;
1764
1765 if ((err_action & SSQ_MANY) != 0 && (periph->flags &
1766 CAM_PERIPH_RECOVERY_WAIT_FAILED) == 0) {
1767 periph->flags |= CAM_PERIPH_RECOVERY_WAIT;
1768 *action_string = "Polling device for readiness";
1769 retries = 120;
1770 } else {
1771 *action_string = "Testing device for readiness";
1772 retries = 1;
1773 }
1774 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1775 scsi_test_unit_ready(&ccb->csio,
1776 retries,
1777 camperiphdone,
1778 MSG_SIMPLE_Q_TAG,
1779 SSD_FULL_SIZE,
1780 /*timeout*/5000);
1781
1782 /*
1783 * Accomplish our 500ms delay by deferring
1784 * the release of our device queue appropriately.
1785 */
1786 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1787 *timeout = 500;
1788 break;
1789 }
1790 default:
1791 panic("Unhandled error action %x", err_action);
1792 }
1793
1794 if ((err_action & SS_MASK) >= SS_START) {
1795 /*
1796 * Drop the priority, so that the recovery
1797 * CCB is the first to execute. Freeze the queue
1798 * after this command is sent so that we can
1799 * restore the old csio and have it queued in
1800 * the proper order before we release normal
1801 * transactions to the device.
1802 */
1803 ccb->ccb_h.pinfo.priority--;
1804 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1805 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1806 error = ERESTART;
1807 *orig = orig_ccb;
1808 }
1809
1810 sense_error_done:
1811 *action = err_action;
1812 }
1813 return (error);
1814 }
1815
1816 /*
1817 * Generic error handler. Peripheral drivers usually filter
1818 * out the errors that they handle in a unique manner, then
1819 * call this function.
1820 */
1821 int
1822 cam_periph_error(union ccb *ccb, cam_flags camflags,
1823 u_int32_t sense_flags)
1824 {
1825 struct cam_path *newpath;
1826 union ccb *orig_ccb, *scan_ccb;
1827 struct cam_periph *periph;
1828 const char *action_string;
1829 cam_status status;
1830 int frozen, error, openings, devctl_err;
1831 u_int32_t action, relsim_flags, timeout;
1832
1833 action = SSQ_PRINT_SENSE;
1834 periph = xpt_path_periph(ccb->ccb_h.path);
1835 action_string = NULL;
1836 status = ccb->ccb_h.status;
1837 frozen = (status & CAM_DEV_QFRZN) != 0;
1838 status &= CAM_STATUS_MASK;
1839 devctl_err = openings = relsim_flags = timeout = 0;
1840 orig_ccb = ccb;
1841
1842 /* Filter the errors that should be reported via devctl */
1843 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1844 case CAM_CMD_TIMEOUT:
1845 case CAM_REQ_ABORTED:
1846 case CAM_REQ_CMP_ERR:
1847 case CAM_REQ_TERMIO:
1848 case CAM_UNREC_HBA_ERROR:
1849 case CAM_DATA_RUN_ERR:
1850 case CAM_SCSI_STATUS_ERROR:
1851 case CAM_ATA_STATUS_ERROR:
1852 case CAM_SMP_STATUS_ERROR:
1853 devctl_err++;
1854 break;
1855 default:
1856 break;
1857 }
1858
1859 switch (status) {
1860 case CAM_REQ_CMP:
1861 error = 0;
1862 action &= ~SSQ_PRINT_SENSE;
1863 break;
1864 case CAM_SCSI_STATUS_ERROR:
1865 error = camperiphscsistatuserror(ccb, &orig_ccb,
1866 camflags, sense_flags, &openings, &relsim_flags,
1867 &timeout, &action, &action_string);
1868 break;
1869 case CAM_AUTOSENSE_FAIL:
1870 error = EIO; /* we have to kill the command */
1871 break;
1872 case CAM_UA_ABORT:
1873 case CAM_UA_TERMIO:
1874 case CAM_MSG_REJECT_REC:
1875 /* XXX Don't know that these are correct */
1876 error = EIO;
1877 break;
1878 case CAM_SEL_TIMEOUT:
1879 if ((camflags & CAM_RETRY_SELTO) != 0) {
1880 if (ccb->ccb_h.retry_count > 0 &&
1881 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1882 ccb->ccb_h.retry_count--;
1883 error = ERESTART;
1884
1885 /*
1886 * Wait a bit to give the device
1887 * time to recover before we try again.
1888 */
1889 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1890 timeout = periph_selto_delay;
1891 break;
1892 }
1893 action_string = "Retries exhausted";
1894 }
1895 /* FALLTHROUGH */
1896 case CAM_DEV_NOT_THERE:
1897 error = ENXIO;
1898 action = SSQ_LOST;
1899 break;
1900 case CAM_REQ_INVALID:
1901 case CAM_PATH_INVALID:
1902 case CAM_NO_HBA:
1903 case CAM_PROVIDE_FAIL:
1904 case CAM_REQ_TOO_BIG:
1905 case CAM_LUN_INVALID:
1906 case CAM_TID_INVALID:
1907 case CAM_FUNC_NOTAVAIL:
1908 error = EINVAL;
1909 break;
1910 case CAM_SCSI_BUS_RESET:
1911 case CAM_BDR_SENT:
1912 /*
1913 * Commands that repeatedly timeout and cause these
1914 * kinds of error recovery actions, should return
1915 * CAM_CMD_TIMEOUT, which allows us to safely assume
1916 * that this command was an innocent bystander to
1917 * these events and should be unconditionally
1918 * retried.
1919 */
1920 case CAM_REQUEUE_REQ:
1921 /* Unconditional requeue if device is still there */
1922 if (periph->flags & CAM_PERIPH_INVALID) {
1923 action_string = "Periph was invalidated";
1924 error = EIO;
1925 } else if (sense_flags & SF_NO_RETRY) {
1926 error = EIO;
1927 action_string = "Retry was blocked";
1928 } else {
1929 error = ERESTART;
1930 action &= ~SSQ_PRINT_SENSE;
1931 }
1932 break;
1933 case CAM_RESRC_UNAVAIL:
1934 /* Wait a bit for the resource shortage to abate. */
1935 timeout = periph_noresrc_delay;
1936 /* FALLTHROUGH */
1937 case CAM_BUSY:
1938 if (timeout == 0) {
1939 /* Wait a bit for the busy condition to abate. */
1940 timeout = periph_busy_delay;
1941 }
1942 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1943 /* FALLTHROUGH */
1944 case CAM_ATA_STATUS_ERROR:
1945 case CAM_REQ_CMP_ERR:
1946 case CAM_CMD_TIMEOUT:
1947 case CAM_UNEXP_BUSFREE:
1948 case CAM_UNCOR_PARITY:
1949 case CAM_DATA_RUN_ERR:
1950 default:
1951 if (periph->flags & CAM_PERIPH_INVALID) {
1952 error = EIO;
1953 action_string = "Periph was invalidated";
1954 } else if (ccb->ccb_h.retry_count == 0) {
1955 error = EIO;
1956 action_string = "Retries exhausted";
1957 } else if (sense_flags & SF_NO_RETRY) {
1958 error = EIO;
1959 action_string = "Retry was blocked";
1960 } else {
1961 ccb->ccb_h.retry_count--;
1962 error = ERESTART;
1963 }
1964 break;
1965 }
1966
1967 if ((sense_flags & SF_PRINT_ALWAYS) ||
1968 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
1969 action |= SSQ_PRINT_SENSE;
1970 else if (sense_flags & SF_NO_PRINT)
1971 action &= ~SSQ_PRINT_SENSE;
1972 if ((action & SSQ_PRINT_SENSE) != 0)
1973 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1974 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
1975 if (error != ERESTART) {
1976 if (action_string == NULL)
1977 action_string = "Unretryable error";
1978 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
1979 error, action_string);
1980 } else if (action_string != NULL)
1981 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1982 else {
1983 xpt_print(ccb->ccb_h.path,
1984 "Retrying command, %d more tries remain\n",
1985 ccb->ccb_h.retry_count);
1986 }
1987 }
1988
1989 if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
1990 cam_periph_devctl_notify(orig_ccb);
1991
1992 if ((action & SSQ_LOST) != 0) {
1993 lun_id_t lun_id;
1994
1995 /*
1996 * For a selection timeout, we consider all of the LUNs on
1997 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
1998 * then we only get rid of the device(s) specified by the
1999 * path in the original CCB.
2000 */
2001 if (status == CAM_SEL_TIMEOUT)
2002 lun_id = CAM_LUN_WILDCARD;
2003 else
2004 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
2005
2006 /* Should we do more if we can't create the path?? */
2007 if (xpt_create_path(&newpath, periph,
2008 xpt_path_path_id(ccb->ccb_h.path),
2009 xpt_path_target_id(ccb->ccb_h.path),
2010 lun_id) == CAM_REQ_CMP) {
2011 /*
2012 * Let peripheral drivers know that this
2013 * device has gone away.
2014 */
2015 xpt_async(AC_LOST_DEVICE, newpath, NULL);
2016 xpt_free_path(newpath);
2017 }
2018 }
2019
2020 /* Broadcast UNIT ATTENTIONs to all periphs. */
2021 if ((action & SSQ_UA) != 0)
2022 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
2023
2024 /* Rescan target on "Reported LUNs data has changed" */
2025 if ((action & SSQ_RESCAN) != 0) {
2026 if (xpt_create_path(&newpath, NULL,
2027 xpt_path_path_id(ccb->ccb_h.path),
2028 xpt_path_target_id(ccb->ccb_h.path),
2029 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2030 scan_ccb = xpt_alloc_ccb_nowait();
2031 if (scan_ccb != NULL) {
2032 scan_ccb->ccb_h.path = newpath;
2033 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
2034 scan_ccb->crcn.flags = 0;
2035 xpt_rescan(scan_ccb);
2036 } else {
2037 xpt_print(newpath,
2038 "Can't allocate CCB to rescan target\n");
2039 xpt_free_path(newpath);
2040 }
2041 }
2042 }
2043
2044 /* Attempt a retry */
2045 if (error == ERESTART || error == 0) {
2046 if (frozen != 0)
2047 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2048 if (error == ERESTART)
2049 xpt_action(ccb);
2050 if (frozen != 0)
2051 cam_release_devq(ccb->ccb_h.path,
2052 relsim_flags,
2053 openings,
2054 timeout,
2055 /*getcount_only*/0);
2056 }
2057
2058 return (error);
2059 }
2060
2061 #define CAM_PERIPH_DEVD_MSG_SIZE 256
2062
2063 static void
2064 cam_periph_devctl_notify(union ccb *ccb)
2065 {
2066 struct cam_periph *periph;
2067 struct ccb_getdev *cgd;
2068 struct sbuf sb;
2069 int serr, sk, asc, ascq;
2070 char *sbmsg, *type;
2071
2072 sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
2073 if (sbmsg == NULL)
2074 return;
2075
2076 sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
2077
2078 periph = xpt_path_periph(ccb->ccb_h.path);
2079 sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
2080 periph->unit_number);
2081
2082 sbuf_printf(&sb, "serial=\"");
2083 if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
2084 xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
2085 CAM_PRIORITY_NORMAL);
2086 cgd->ccb_h.func_code = XPT_GDEV_TYPE;
2087 xpt_action((union ccb *)cgd);
2088
2089 if (cgd->ccb_h.status == CAM_REQ_CMP)
2090 sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
2091 xpt_free_ccb((union ccb *)cgd);
2092 }
2093 sbuf_printf(&sb, "\" ");
2094 sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
2095
2096 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2097 case CAM_CMD_TIMEOUT:
2098 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
2099 type = "timeout";
2100 break;
2101 case CAM_SCSI_STATUS_ERROR:
2102 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
2103 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
2104 sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
2105 serr, sk, asc, ascq);
2106 type = "error";
2107 break;
2108 case CAM_ATA_STATUS_ERROR:
2109 sbuf_printf(&sb, "RES=\"");
2110 ata_res_sbuf(&ccb->ataio.res, &sb);
2111 sbuf_printf(&sb, "\" ");
2112 type = "error";
2113 break;
2114 default:
2115 type = "error";
2116 break;
2117 }
2118
2119 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2120 sbuf_printf(&sb, "CDB=\"");
2121 scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
2122 sbuf_printf(&sb, "\" ");
2123 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
2124 sbuf_printf(&sb, "ACB=\"");
2125 ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
2126 sbuf_printf(&sb, "\" ");
2127 }
2128
2129 if (sbuf_finish(&sb) == 0)
2130 devctl_notify("CAM", "periph", type, sbuf_data(&sb));
2131 sbuf_delete(&sb);
2132 free(sbmsg, M_CAMPERIPH);
2133 }
2134
2135 /*
2136 * Sysctl to force an invalidation of the drive right now. Can be
2137 * called with CTLFLAG_MPSAFE since we take periph lock.
2138 */
2139 int
2140 cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)
2141 {
2142 struct cam_periph *periph;
2143 int error, value;
2144
2145 periph = arg1;
2146 value = 0;
2147 error = sysctl_handle_int(oidp, &value, 0, req);
2148 if (error != 0 || req->newptr == NULL || value != 1)
2149 return (error);
2150
2151 cam_periph_lock(periph);
2152 cam_periph_invalidate(periph);
2153 cam_periph_unlock(periph);
2154
2155 return (0);
2156 }
Cache object: d896445c34854ef65cba58955bd988f7
|