FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_periph.c
1 /*-
2 * Common functions for CAM "type" (peripheral) drivers.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification, immediately at the beginning of the file.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/bio.h>
41 #include <sys/conf.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/buf.h>
45 #include <sys/proc.h>
46 #include <sys/devicestat.h>
47 #include <sys/bus.h>
48 #include <sys/sbuf.h>
49 #include <sys/sysctl.h>
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_queue.h>
56 #include <cam/cam_xpt_periph.h>
57 #include <cam/cam_periph.h>
58 #include <cam/cam_debug.h>
59 #include <cam/cam_sim.h>
60
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 #include <cam/scsi/scsi_pass.h>
64
65 static u_int camperiphnextunit(struct periph_driver *p_drv,
66 u_int newunit, int wired,
67 path_id_t pathid, target_id_t target,
68 lun_id_t lun);
69 static u_int camperiphunit(struct periph_driver *p_drv,
70 path_id_t pathid, target_id_t target,
71 lun_id_t lun);
72 static void camperiphdone(struct cam_periph *periph,
73 union ccb *done_ccb);
74 static void camperiphfree(struct cam_periph *periph);
75 static int camperiphscsistatuserror(union ccb *ccb,
76 union ccb **orig_ccb,
77 cam_flags camflags,
78 u_int32_t sense_flags,
79 int *openings,
80 u_int32_t *relsim_flags,
81 u_int32_t *timeout,
82 u_int32_t *action,
83 const char **action_string);
84 static int camperiphscsisenseerror(union ccb *ccb,
85 union ccb **orig_ccb,
86 cam_flags camflags,
87 u_int32_t sense_flags,
88 int *openings,
89 u_int32_t *relsim_flags,
90 u_int32_t *timeout,
91 u_int32_t *action,
92 const char **action_string);
93 static void cam_periph_devctl_notify(union ccb *ccb);
94
95 static int nperiph_drivers;
96 static int initialized = 0;
97 struct periph_driver **periph_drivers;
98
99 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
100
101 static int periph_selto_delay = 1000;
102 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
103 static int periph_noresrc_delay = 500;
104 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
105 static int periph_busy_delay = 500;
106 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
107
108 static u_int periph_mapmem_thresh = 65536;
109 SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN,
110 &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping");
111
112 void
113 periphdriver_register(void *data)
114 {
115 struct periph_driver *drv = (struct periph_driver *)data;
116 struct periph_driver **newdrivers, **old;
117 int ndrivers;
118
119 again:
120 ndrivers = nperiph_drivers + 2;
121 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
122 M_WAITOK);
123 xpt_lock_buses();
124 if (ndrivers != nperiph_drivers + 2) {
125 /*
126 * Lost race against itself; go around.
127 */
128 xpt_unlock_buses();
129 free(newdrivers, M_CAMPERIPH);
130 goto again;
131 }
132 if (periph_drivers)
133 bcopy(periph_drivers, newdrivers,
134 sizeof(*newdrivers) * nperiph_drivers);
135 newdrivers[nperiph_drivers] = drv;
136 newdrivers[nperiph_drivers + 1] = NULL;
137 old = periph_drivers;
138 periph_drivers = newdrivers;
139 nperiph_drivers++;
140 xpt_unlock_buses();
141 if (old)
142 free(old, M_CAMPERIPH);
143 /* If driver marked as early or it is late now, initialize it. */
144 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
145 initialized > 1)
146 (*drv->init)();
147 }
148
149 int
150 periphdriver_unregister(void *data)
151 {
152 struct periph_driver *drv = (struct periph_driver *)data;
153 int error, n;
154
155 /* If driver marked as early or it is late now, deinitialize it. */
156 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
157 initialized > 1) {
158 if (drv->deinit == NULL) {
159 printf("CAM periph driver '%s' doesn't have deinit.\n",
160 drv->driver_name);
161 return (EOPNOTSUPP);
162 }
163 error = drv->deinit();
164 if (error != 0)
165 return (error);
166 }
167
168 xpt_lock_buses();
169 for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
170 ;
171 KASSERT(n < nperiph_drivers,
172 ("Periph driver '%s' was not registered", drv->driver_name));
173 for (; n + 1 < nperiph_drivers; n++)
174 periph_drivers[n] = periph_drivers[n + 1];
175 periph_drivers[n + 1] = NULL;
176 nperiph_drivers--;
177 xpt_unlock_buses();
178 return (0);
179 }
180
181 void
182 periphdriver_init(int level)
183 {
184 int i, early;
185
186 initialized = max(initialized, level);
187 for (i = 0; periph_drivers[i] != NULL; i++) {
188 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
189 if (early == initialized)
190 (*periph_drivers[i]->init)();
191 }
192 }
193
194 cam_status
195 cam_periph_alloc(periph_ctor_t *periph_ctor,
196 periph_oninv_t *periph_oninvalidate,
197 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
198 char *name, cam_periph_type type, struct cam_path *path,
199 ac_callback_t *ac_callback, ac_code code, void *arg)
200 {
201 struct periph_driver **p_drv;
202 struct cam_sim *sim;
203 struct cam_periph *periph;
204 struct cam_periph *cur_periph;
205 path_id_t path_id;
206 target_id_t target_id;
207 lun_id_t lun_id;
208 cam_status status;
209 u_int init_level;
210
211 init_level = 0;
212 /*
213 * Handle Hot-Plug scenarios. If there is already a peripheral
214 * of our type assigned to this path, we are likely waiting for
215 * final close on an old, invalidated, peripheral. If this is
216 * the case, queue up a deferred call to the peripheral's async
217 * handler. If it looks like a mistaken re-allocation, complain.
218 */
219 if ((periph = cam_periph_find(path, name)) != NULL) {
220
221 if ((periph->flags & CAM_PERIPH_INVALID) != 0
222 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
223 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
224 periph->deferred_callback = ac_callback;
225 periph->deferred_ac = code;
226 return (CAM_REQ_INPROG);
227 } else {
228 printf("cam_periph_alloc: attempt to re-allocate "
229 "valid device %s%d rejected flags %#x "
230 "refcount %d\n", periph->periph_name,
231 periph->unit_number, periph->flags,
232 periph->refcount);
233 }
234 return (CAM_REQ_INVALID);
235 }
236
237 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
238 M_NOWAIT|M_ZERO);
239
240 if (periph == NULL)
241 return (CAM_RESRC_UNAVAIL);
242
243 init_level++;
244
245
246 sim = xpt_path_sim(path);
247 path_id = xpt_path_path_id(path);
248 target_id = xpt_path_target_id(path);
249 lun_id = xpt_path_lun_id(path);
250 periph->periph_start = periph_start;
251 periph->periph_dtor = periph_dtor;
252 periph->periph_oninval = periph_oninvalidate;
253 periph->type = type;
254 periph->periph_name = name;
255 periph->scheduled_priority = CAM_PRIORITY_NONE;
256 periph->immediate_priority = CAM_PRIORITY_NONE;
257 periph->refcount = 1; /* Dropped by invalidation. */
258 periph->sim = sim;
259 SLIST_INIT(&periph->ccb_list);
260 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
261 if (status != CAM_REQ_CMP)
262 goto failure;
263 periph->path = path;
264
265 xpt_lock_buses();
266 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
267 if (strcmp((*p_drv)->driver_name, name) == 0)
268 break;
269 }
270 if (*p_drv == NULL) {
271 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
272 xpt_unlock_buses();
273 xpt_free_path(periph->path);
274 free(periph, M_CAMPERIPH);
275 return (CAM_REQ_INVALID);
276 }
277 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
278 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
279 while (cur_periph != NULL
280 && cur_periph->unit_number < periph->unit_number)
281 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
282 if (cur_periph != NULL) {
283 KASSERT(cur_periph->unit_number != periph->unit_number,
284 ("duplicate units on periph list"));
285 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
286 } else {
287 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
288 (*p_drv)->generation++;
289 }
290 xpt_unlock_buses();
291
292 init_level++;
293
294 status = xpt_add_periph(periph);
295 if (status != CAM_REQ_CMP)
296 goto failure;
297
298 init_level++;
299 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
300
301 status = periph_ctor(periph, arg);
302
303 if (status == CAM_REQ_CMP)
304 init_level++;
305
306 failure:
307 switch (init_level) {
308 case 4:
309 /* Initialized successfully */
310 break;
311 case 3:
312 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
313 xpt_remove_periph(periph);
314 /* FALLTHROUGH */
315 case 2:
316 xpt_lock_buses();
317 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
318 xpt_unlock_buses();
319 xpt_free_path(periph->path);
320 /* FALLTHROUGH */
321 case 1:
322 free(periph, M_CAMPERIPH);
323 /* FALLTHROUGH */
324 case 0:
325 /* No cleanup to perform. */
326 break;
327 default:
328 panic("%s: Unknown init level", __func__);
329 }
330 return(status);
331 }
332
333 /*
334 * Find a peripheral structure with the specified path, target, lun,
335 * and (optionally) type. If the name is NULL, this function will return
336 * the first peripheral driver that matches the specified path.
337 */
338 struct cam_periph *
339 cam_periph_find(struct cam_path *path, char *name)
340 {
341 struct periph_driver **p_drv;
342 struct cam_periph *periph;
343
344 xpt_lock_buses();
345 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
346
347 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
348 continue;
349
350 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
351 if (xpt_path_comp(periph->path, path) == 0) {
352 xpt_unlock_buses();
353 cam_periph_assert(periph, MA_OWNED);
354 return(periph);
355 }
356 }
357 if (name != NULL) {
358 xpt_unlock_buses();
359 return(NULL);
360 }
361 }
362 xpt_unlock_buses();
363 return(NULL);
364 }
365
366 /*
367 * Find peripheral driver instances attached to the specified path.
368 */
369 int
370 cam_periph_list(struct cam_path *path, struct sbuf *sb)
371 {
372 struct sbuf local_sb;
373 struct periph_driver **p_drv;
374 struct cam_periph *periph;
375 int count;
376 int sbuf_alloc_len;
377
378 sbuf_alloc_len = 16;
379 retry:
380 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
381 count = 0;
382 xpt_lock_buses();
383 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
384
385 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
386 if (xpt_path_comp(periph->path, path) != 0)
387 continue;
388
389 if (sbuf_len(&local_sb) != 0)
390 sbuf_cat(&local_sb, ",");
391
392 sbuf_printf(&local_sb, "%s%d", periph->periph_name,
393 periph->unit_number);
394
395 if (sbuf_error(&local_sb) == ENOMEM) {
396 sbuf_alloc_len *= 2;
397 xpt_unlock_buses();
398 sbuf_delete(&local_sb);
399 goto retry;
400 }
401 count++;
402 }
403 }
404 xpt_unlock_buses();
405 sbuf_finish(&local_sb);
406 if (sbuf_len(sb) != 0)
407 sbuf_cat(sb, ",");
408 sbuf_cat(sb, sbuf_data(&local_sb));
409 sbuf_delete(&local_sb);
410 return (count);
411 }
412
413 int
414 cam_periph_acquire(struct cam_periph *periph)
415 {
416 int status;
417
418 if (periph == NULL)
419 return (EINVAL);
420
421 status = ENOENT;
422 xpt_lock_buses();
423 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
424 periph->refcount++;
425 status = 0;
426 }
427 xpt_unlock_buses();
428
429 return (status);
430 }
431
432 void
433 cam_periph_doacquire(struct cam_periph *periph)
434 {
435
436 xpt_lock_buses();
437 KASSERT(periph->refcount >= 1,
438 ("cam_periph_doacquire() with refcount == %d", periph->refcount));
439 periph->refcount++;
440 xpt_unlock_buses();
441 }
442
443 void
444 cam_periph_release_locked_buses(struct cam_periph *periph)
445 {
446
447 cam_periph_assert(periph, MA_OWNED);
448 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
449 if (--periph->refcount == 0)
450 camperiphfree(periph);
451 }
452
453 void
454 cam_periph_release_locked(struct cam_periph *periph)
455 {
456
457 if (periph == NULL)
458 return;
459
460 xpt_lock_buses();
461 cam_periph_release_locked_buses(periph);
462 xpt_unlock_buses();
463 }
464
465 void
466 cam_periph_release(struct cam_periph *periph)
467 {
468 struct mtx *mtx;
469
470 if (periph == NULL)
471 return;
472
473 cam_periph_assert(periph, MA_NOTOWNED);
474 mtx = cam_periph_mtx(periph);
475 mtx_lock(mtx);
476 cam_periph_release_locked(periph);
477 mtx_unlock(mtx);
478 }
479
480 /*
481 * hold/unhold act as mutual exclusion for sections of the code that
482 * need to sleep and want to make sure that other sections that
483 * will interfere are held off. This only protects exclusive sections
484 * from each other.
485 */
486 int
487 cam_periph_hold(struct cam_periph *periph, int priority)
488 {
489 int error;
490
491 /*
492 * Increment the reference count on the peripheral
493 * while we wait for our lock attempt to succeed
494 * to ensure the peripheral doesn't disappear out
495 * from user us while we sleep.
496 */
497
498 if (cam_periph_acquire(periph) != 0)
499 return (ENXIO);
500
501 cam_periph_assert(periph, MA_OWNED);
502 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
503 periph->flags |= CAM_PERIPH_LOCK_WANTED;
504 if ((error = cam_periph_sleep(periph, periph, priority,
505 "caplck", 0)) != 0) {
506 cam_periph_release_locked(periph);
507 return (error);
508 }
509 if (periph->flags & CAM_PERIPH_INVALID) {
510 cam_periph_release_locked(periph);
511 return (ENXIO);
512 }
513 }
514
515 periph->flags |= CAM_PERIPH_LOCKED;
516 return (0);
517 }
518
519 void
520 cam_periph_unhold(struct cam_periph *periph)
521 {
522
523 cam_periph_assert(periph, MA_OWNED);
524
525 periph->flags &= ~CAM_PERIPH_LOCKED;
526 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
527 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
528 wakeup(periph);
529 }
530
531 cam_periph_release_locked(periph);
532 }
533
534 /*
535 * Look for the next unit number that is not currently in use for this
536 * peripheral type starting at "newunit". Also exclude unit numbers that
537 * are reserved by for future "hardwiring" unless we already know that this
538 * is a potential wired device. Only assume that the device is "wired" the
539 * first time through the loop since after that we'll be looking at unit
540 * numbers that did not match a wiring entry.
541 */
542 static u_int
543 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
544 path_id_t pathid, target_id_t target, lun_id_t lun)
545 {
546 struct cam_periph *periph;
547 char *periph_name;
548 int i, val, dunit, r;
549 const char *dname, *strval;
550
551 periph_name = p_drv->driver_name;
552 for (;;newunit++) {
553
554 for (periph = TAILQ_FIRST(&p_drv->units);
555 periph != NULL && periph->unit_number != newunit;
556 periph = TAILQ_NEXT(periph, unit_links))
557 ;
558
559 if (periph != NULL && periph->unit_number == newunit) {
560 if (wired != 0) {
561 xpt_print(periph->path, "Duplicate Wired "
562 "Device entry!\n");
563 xpt_print(periph->path, "Second device (%s "
564 "device at scbus%d target %d lun %d) will "
565 "not be wired\n", periph_name, pathid,
566 target, lun);
567 wired = 0;
568 }
569 continue;
570 }
571 if (wired)
572 break;
573
574 /*
575 * Don't match entries like "da 4" as a wired down
576 * device, but do match entries like "da 4 target 5"
577 * or even "da 4 scbus 1".
578 */
579 i = 0;
580 dname = periph_name;
581 for (;;) {
582 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
583 if (r != 0)
584 break;
585 /* if no "target" and no specific scbus, skip */
586 if (resource_int_value(dname, dunit, "target", &val) &&
587 (resource_string_value(dname, dunit, "at",&strval)||
588 strcmp(strval, "scbus") == 0))
589 continue;
590 if (newunit == dunit)
591 break;
592 }
593 if (r != 0)
594 break;
595 }
596 return (newunit);
597 }
598
599 static u_int
600 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
601 target_id_t target, lun_id_t lun)
602 {
603 u_int unit;
604 int wired, i, val, dunit;
605 const char *dname, *strval;
606 char pathbuf[32], *periph_name;
607
608 periph_name = p_drv->driver_name;
609 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
610 unit = 0;
611 i = 0;
612 dname = periph_name;
613 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
614 wired = 0) {
615 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
616 if (strcmp(strval, pathbuf) != 0)
617 continue;
618 wired++;
619 }
620 if (resource_int_value(dname, dunit, "target", &val) == 0) {
621 if (val != target)
622 continue;
623 wired++;
624 }
625 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
626 if (val != lun)
627 continue;
628 wired++;
629 }
630 if (wired != 0) {
631 unit = dunit;
632 break;
633 }
634 }
635
636 /*
637 * Either start from 0 looking for the next unit or from
638 * the unit number given in the resource config. This way,
639 * if we have wildcard matches, we don't return the same
640 * unit number twice.
641 */
642 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
643
644 return (unit);
645 }
646
647 void
648 cam_periph_invalidate(struct cam_periph *periph)
649 {
650
651 cam_periph_assert(periph, MA_OWNED);
652 /*
653 * We only call this routine the first time a peripheral is
654 * invalidated.
655 */
656 if ((periph->flags & CAM_PERIPH_INVALID) != 0)
657 return;
658
659 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
660 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) {
661 struct sbuf sb;
662 char buffer[160];
663
664 sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN);
665 xpt_denounce_periph_sbuf(periph, &sb);
666 sbuf_finish(&sb);
667 sbuf_putbuf(&sb);
668 }
669 periph->flags |= CAM_PERIPH_INVALID;
670 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
671 if (periph->periph_oninval != NULL)
672 periph->periph_oninval(periph);
673 cam_periph_release_locked(periph);
674 }
675
676 static void
677 camperiphfree(struct cam_periph *periph)
678 {
679 struct periph_driver **p_drv;
680 struct periph_driver *drv;
681
682 cam_periph_assert(periph, MA_OWNED);
683 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
684 periph->periph_name, periph->unit_number));
685 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
686 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
687 break;
688 }
689 if (*p_drv == NULL) {
690 printf("camperiphfree: attempt to free non-existant periph\n");
691 return;
692 }
693 /*
694 * Cache a pointer to the periph_driver structure. If a
695 * periph_driver is added or removed from the array (see
696 * periphdriver_register()) while we drop the toplogy lock
697 * below, p_drv may change. This doesn't protect against this
698 * particular periph_driver going away. That will require full
699 * reference counting in the periph_driver infrastructure.
700 */
701 drv = *p_drv;
702
703 /*
704 * We need to set this flag before dropping the topology lock, to
705 * let anyone who is traversing the list that this peripheral is
706 * about to be freed, and there will be no more reference count
707 * checks.
708 */
709 periph->flags |= CAM_PERIPH_FREE;
710
711 /*
712 * The peripheral destructor semantics dictate calling with only the
713 * SIM mutex held. Since it might sleep, it should not be called
714 * with the topology lock held.
715 */
716 xpt_unlock_buses();
717
718 /*
719 * We need to call the peripheral destructor prior to removing the
720 * peripheral from the list. Otherwise, we risk running into a
721 * scenario where the peripheral unit number may get reused
722 * (because it has been removed from the list), but some resources
723 * used by the peripheral are still hanging around. In particular,
724 * the devfs nodes used by some peripherals like the pass(4) driver
725 * aren't fully cleaned up until the destructor is run. If the
726 * unit number is reused before the devfs instance is fully gone,
727 * devfs will panic.
728 */
729 if (periph->periph_dtor != NULL)
730 periph->periph_dtor(periph);
731
732 /*
733 * The peripheral list is protected by the topology lock.
734 */
735 xpt_lock_buses();
736
737 TAILQ_REMOVE(&drv->units, periph, unit_links);
738 drv->generation++;
739
740 xpt_remove_periph(periph);
741
742 xpt_unlock_buses();
743 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
744 xpt_print(periph->path, "Periph destroyed\n");
745 else
746 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
747
748 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
749 union ccb ccb;
750 void *arg;
751
752 switch (periph->deferred_ac) {
753 case AC_FOUND_DEVICE:
754 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
755 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
756 xpt_action(&ccb);
757 arg = &ccb;
758 break;
759 case AC_PATH_REGISTERED:
760 xpt_path_inq(&ccb.cpi, periph->path);
761 arg = &ccb;
762 break;
763 default:
764 arg = NULL;
765 break;
766 }
767 periph->deferred_callback(NULL, periph->deferred_ac,
768 periph->path, arg);
769 }
770 xpt_free_path(periph->path);
771 free(periph, M_CAMPERIPH);
772 xpt_lock_buses();
773 }
774
775 /*
776 * Map user virtual pointers into kernel virtual address space, so we can
777 * access the memory. This is now a generic function that centralizes most
778 * of the sanity checks on the data flags, if any.
779 * This also only works for up to MAXPHYS memory. Since we use
780 * buffers to map stuff in and out, we're limited to the buffer size.
781 */
782 int
783 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
784 u_int maxmap)
785 {
786 int numbufs, i;
787 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
788 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
789 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
790 bool misaligned[CAM_PERIPH_MAXMAPS];
791
792 bzero(mapinfo, sizeof(*mapinfo));
793 if (maxmap == 0)
794 maxmap = DFLTPHYS; /* traditional default */
795 else if (maxmap > MAXPHYS)
796 maxmap = MAXPHYS; /* for safety */
797 switch(ccb->ccb_h.func_code) {
798 case XPT_DEV_MATCH:
799 if (ccb->cdm.match_buf_len == 0) {
800 printf("cam_periph_mapmem: invalid match buffer "
801 "length 0\n");
802 return(EINVAL);
803 }
804 if (ccb->cdm.pattern_buf_len > 0) {
805 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
806 lengths[0] = ccb->cdm.pattern_buf_len;
807 dirs[0] = CAM_DIR_OUT;
808 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
809 lengths[1] = ccb->cdm.match_buf_len;
810 dirs[1] = CAM_DIR_IN;
811 numbufs = 2;
812 } else {
813 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
814 lengths[0] = ccb->cdm.match_buf_len;
815 dirs[0] = CAM_DIR_IN;
816 numbufs = 1;
817 }
818 /*
819 * This request will not go to the hardware, no reason
820 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
821 */
822 maxmap = MAXPHYS;
823 break;
824 case XPT_SCSI_IO:
825 case XPT_CONT_TARGET_IO:
826 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
827 return(0);
828 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
829 return (EINVAL);
830 data_ptrs[0] = &ccb->csio.data_ptr;
831 lengths[0] = ccb->csio.dxfer_len;
832 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
833 numbufs = 1;
834 break;
835 case XPT_ATA_IO:
836 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
837 return(0);
838 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
839 return (EINVAL);
840 data_ptrs[0] = &ccb->ataio.data_ptr;
841 lengths[0] = ccb->ataio.dxfer_len;
842 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
843 numbufs = 1;
844 break;
845 case XPT_MMC_IO:
846 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
847 return(0);
848 /* Two mappings: one for cmd->data and one for cmd->data->data */
849 data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
850 lengths[0] = sizeof(struct mmc_data *);
851 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
852 data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
853 lengths[1] = ccb->mmcio.cmd.data->len;
854 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
855 numbufs = 2;
856 break;
857 case XPT_SMP_IO:
858 data_ptrs[0] = &ccb->smpio.smp_request;
859 lengths[0] = ccb->smpio.smp_request_len;
860 dirs[0] = CAM_DIR_OUT;
861 data_ptrs[1] = &ccb->smpio.smp_response;
862 lengths[1] = ccb->smpio.smp_response_len;
863 dirs[1] = CAM_DIR_IN;
864 numbufs = 2;
865 break;
866 case XPT_NVME_IO:
867 case XPT_NVME_ADMIN:
868 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
869 return (0);
870 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
871 return (EINVAL);
872 data_ptrs[0] = &ccb->nvmeio.data_ptr;
873 lengths[0] = ccb->nvmeio.dxfer_len;
874 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
875 numbufs = 1;
876 break;
877 case XPT_DEV_ADVINFO:
878 if (ccb->cdai.bufsiz == 0)
879 return (0);
880
881 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
882 lengths[0] = ccb->cdai.bufsiz;
883 dirs[0] = CAM_DIR_IN;
884 numbufs = 1;
885
886 /*
887 * This request will not go to the hardware, no reason
888 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
889 */
890 maxmap = MAXPHYS;
891 break;
892 default:
893 return(EINVAL);
894 break; /* NOTREACHED */
895 }
896
897 /*
898 * Check the transfer length and permissions first, so we don't
899 * have to unmap any previously mapped buffers.
900 */
901 for (i = 0; i < numbufs; i++) {
902 if (lengths[i] > maxmap) {
903 printf("cam_periph_mapmem: attempt to map %lu bytes, "
904 "which is greater than %lu\n",
905 (long)(lengths[i]), (u_long)maxmap);
906 return (E2BIG);
907 }
908
909 /*
910 * The userland data pointer passed in may not be page
911 * aligned. vmapbuf() truncates the address to a page
912 * boundary, so if the address isn't page aligned, we'll
913 * need enough space for the given transfer length, plus
914 * whatever extra space is necessary to make it to the page
915 * boundary.
916 */
917 misaligned[i] = (lengths[i] +
918 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK) > MAXPHYS);
919 }
920
921 /*
922 * This keeps the kernel stack of current thread from getting
923 * swapped. In low-memory situations where the kernel stack might
924 * otherwise get swapped out, this holds it and allows the thread
925 * to make progress and release the kernel mapped pages sooner.
926 *
927 * XXX KDM should I use P_NOSWAP instead?
928 */
929 PHOLD(curproc);
930
931 for (i = 0; i < numbufs; i++) {
932
933 /* Save the user's data address. */
934 mapinfo->orig[i] = *data_ptrs[i];
935
936 /*
937 * For small buffers use malloc+copyin/copyout instead of
938 * mapping to KVA to avoid expensive TLB shootdowns. For
939 * small allocations malloc is backed by UMA, and so much
940 * cheaper on SMP systems.
941 */
942 if ((lengths[i] <= periph_mapmem_thresh || misaligned[i]) &&
943 ccb->ccb_h.func_code != XPT_MMC_IO) {
944 *data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH,
945 M_WAITOK);
946 if (dirs[i] != CAM_DIR_IN) {
947 if (copyin(mapinfo->orig[i], *data_ptrs[i],
948 lengths[i]) != 0) {
949 free(*data_ptrs[i], M_CAMPERIPH);
950 *data_ptrs[i] = mapinfo->orig[i];
951 goto fail;
952 }
953 } else
954 bzero(*data_ptrs[i], lengths[i]);
955 continue;
956 }
957
958 /*
959 * Get the buffer.
960 */
961 mapinfo->bp[i] = getpbuf(NULL);
962
963 /* set the direction */
964 mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ?
965 BIO_WRITE : BIO_READ;
966
967 /*
968 * Map the buffer into kernel memory.
969 *
970 * Note that useracc() alone is not a sufficient test.
971 * vmapbuf() can still fail due to a smaller file mapped
972 * into a larger area of VM, or if userland races against
973 * vmapbuf() after the useracc() check.
974 */
975 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i], 1) < 0) {
976 relpbuf(mapinfo->bp[i], NULL);
977 goto fail;
978 }
979
980 /* set our pointer to the new mapped area */
981 *data_ptrs[i] = mapinfo->bp[i]->b_data;
982 }
983
984 /*
985 * Now that we've gotten this far, change ownership to the kernel
986 * of the buffers so that we don't run afoul of returning to user
987 * space with locks (on the buffer) held.
988 */
989 for (i = 0; i < numbufs; i++) {
990 if (mapinfo->bp[i])
991 BUF_KERNPROC(mapinfo->bp[i]);
992 }
993
994 mapinfo->num_bufs_used = numbufs;
995 return(0);
996
997 fail:
998 for (i--; i >= 0; i--) {
999 if (mapinfo->bp[i]) {
1000 vunmapbuf(mapinfo->bp[i]);
1001 relpbuf(mapinfo->bp[i], NULL);
1002 } else
1003 free(*data_ptrs[i], M_CAMPERIPH);
1004 *data_ptrs[i] = mapinfo->orig[i];
1005 }
1006 PRELE(curproc);
1007 return(EACCES);
1008 }
1009
1010 /*
1011 * Unmap memory segments mapped into kernel virtual address space by
1012 * cam_periph_mapmem().
1013 */
1014 void
1015 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
1016 {
1017 int numbufs, i;
1018 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1019 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
1020 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
1021
1022 if (mapinfo->num_bufs_used <= 0) {
1023 /* nothing to free and the process wasn't held. */
1024 return;
1025 }
1026
1027 switch (ccb->ccb_h.func_code) {
1028 case XPT_DEV_MATCH:
1029 if (ccb->cdm.pattern_buf_len > 0) {
1030 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
1031 lengths[0] = ccb->cdm.pattern_buf_len;
1032 dirs[0] = CAM_DIR_OUT;
1033 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
1034 lengths[1] = ccb->cdm.match_buf_len;
1035 dirs[1] = CAM_DIR_IN;
1036 numbufs = 2;
1037 } else {
1038 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
1039 lengths[0] = ccb->cdm.match_buf_len;
1040 dirs[0] = CAM_DIR_IN;
1041 numbufs = 1;
1042 }
1043 break;
1044 case XPT_SCSI_IO:
1045 case XPT_CONT_TARGET_IO:
1046 data_ptrs[0] = &ccb->csio.data_ptr;
1047 lengths[0] = ccb->csio.dxfer_len;
1048 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1049 numbufs = 1;
1050 break;
1051 case XPT_ATA_IO:
1052 data_ptrs[0] = &ccb->ataio.data_ptr;
1053 lengths[0] = ccb->ataio.dxfer_len;
1054 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1055 numbufs = 1;
1056 break;
1057 case XPT_MMC_IO:
1058 data_ptrs[0] = (u_int8_t **)&ccb->mmcio.cmd.data;
1059 lengths[0] = sizeof(struct mmc_data *);
1060 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1061 data_ptrs[1] = (u_int8_t **)&ccb->mmcio.cmd.data->data;
1062 lengths[1] = ccb->mmcio.cmd.data->len;
1063 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
1064 numbufs = 2;
1065 break;
1066 case XPT_SMP_IO:
1067 data_ptrs[0] = &ccb->smpio.smp_request;
1068 lengths[0] = ccb->smpio.smp_request_len;
1069 dirs[0] = CAM_DIR_OUT;
1070 data_ptrs[1] = &ccb->smpio.smp_response;
1071 lengths[1] = ccb->smpio.smp_response_len;
1072 dirs[1] = CAM_DIR_IN;
1073 numbufs = 2;
1074 break;
1075 case XPT_NVME_IO:
1076 case XPT_NVME_ADMIN:
1077 data_ptrs[0] = &ccb->nvmeio.data_ptr;
1078 lengths[0] = ccb->nvmeio.dxfer_len;
1079 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1080 numbufs = 1;
1081 break;
1082 case XPT_DEV_ADVINFO:
1083 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1084 lengths[0] = ccb->cdai.bufsiz;
1085 dirs[0] = CAM_DIR_IN;
1086 numbufs = 1;
1087 break;
1088 default:
1089 /* allow ourselves to be swapped once again */
1090 PRELE(curproc);
1091 return;
1092 break; /* NOTREACHED */
1093 }
1094
1095 for (i = 0; i < numbufs; i++) {
1096 if (mapinfo->bp[i]) {
1097 /* unmap the buffer */
1098 vunmapbuf(mapinfo->bp[i]);
1099
1100 /* release the buffer */
1101 relpbuf(mapinfo->bp[i], NULL);
1102 } else {
1103 if (dirs[i] != CAM_DIR_OUT) {
1104 copyout(*data_ptrs[i], mapinfo->orig[i],
1105 lengths[i]);
1106 }
1107 free(*data_ptrs[i], M_CAMPERIPH);
1108 }
1109
1110 /* Set the user's pointer back to the original value */
1111 *data_ptrs[i] = mapinfo->orig[i];
1112 }
1113
1114 /* allow ourselves to be swapped once again */
1115 PRELE(curproc);
1116 }
1117
1118 int
1119 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1120 int (*error_routine)(union ccb *ccb,
1121 cam_flags camflags,
1122 u_int32_t sense_flags))
1123 {
1124 union ccb *ccb;
1125 int error;
1126 int found;
1127
1128 error = found = 0;
1129
1130 switch(cmd){
1131 case CAMGETPASSTHRU:
1132 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1133 xpt_setup_ccb(&ccb->ccb_h,
1134 ccb->ccb_h.path,
1135 CAM_PRIORITY_NORMAL);
1136 ccb->ccb_h.func_code = XPT_GDEVLIST;
1137
1138 /*
1139 * Basically, the point of this is that we go through
1140 * getting the list of devices, until we find a passthrough
1141 * device. In the current version of the CAM code, the
1142 * only way to determine what type of device we're dealing
1143 * with is by its name.
1144 */
1145 while (found == 0) {
1146 ccb->cgdl.index = 0;
1147 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1148 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1149
1150 /* we want the next device in the list */
1151 xpt_action(ccb);
1152 if (strncmp(ccb->cgdl.periph_name,
1153 "pass", 4) == 0){
1154 found = 1;
1155 break;
1156 }
1157 }
1158 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1159 (found == 0)) {
1160 ccb->cgdl.periph_name[0] = '\0';
1161 ccb->cgdl.unit_number = 0;
1162 break;
1163 }
1164 }
1165
1166 /* copy the result back out */
1167 bcopy(ccb, addr, sizeof(union ccb));
1168
1169 /* and release the ccb */
1170 xpt_release_ccb(ccb);
1171
1172 break;
1173 default:
1174 error = ENOTTY;
1175 break;
1176 }
1177 return(error);
1178 }
1179
1180 static void
1181 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1182 {
1183
1184 panic("%s: already done with ccb %p", __func__, done_ccb);
1185 }
1186
1187 static void
1188 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1189 {
1190
1191 /* Caller will release the CCB */
1192 xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1193 done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
1194 wakeup(&done_ccb->ccb_h.cbfcnp);
1195 }
1196
1197 static void
1198 cam_periph_ccbwait(union ccb *ccb)
1199 {
1200
1201 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1202 while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
1203 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
1204 PRIBIO, "cbwait", 0);
1205 }
1206 KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1207 (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
1208 ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1209 "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1210 ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
1211 }
1212
1213 /*
1214 * Dispatch a CCB and wait for it to complete. If the CCB has set a
1215 * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost.
1216 */
1217 int
1218 cam_periph_runccb(union ccb *ccb,
1219 int (*error_routine)(union ccb *ccb,
1220 cam_flags camflags,
1221 u_int32_t sense_flags),
1222 cam_flags camflags, u_int32_t sense_flags,
1223 struct devstat *ds)
1224 {
1225 struct bintime *starttime;
1226 struct bintime ltime;
1227 int error;
1228 bool must_poll;
1229 uint32_t timeout = 1;
1230
1231 starttime = NULL;
1232 xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1233 KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1234 ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1235 ccb->ccb_h.func_code, ccb->ccb_h.flags));
1236
1237 /*
1238 * If the user has supplied a stats structure, and if we understand
1239 * this particular type of ccb, record the transaction start.
1240 */
1241 if (ds != NULL &&
1242 (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1243 ccb->ccb_h.func_code == XPT_ATA_IO ||
1244 ccb->ccb_h.func_code == XPT_NVME_IO)) {
1245 starttime = <ime;
1246 binuptime(starttime);
1247 devstat_start_transaction(ds, starttime);
1248 }
1249
1250 /*
1251 * We must poll the I/O while we're dumping. The scheduler is normally
1252 * stopped for dumping, except when we call doadump from ddb. While the
1253 * scheduler is running in this case, we still need to poll the I/O to
1254 * avoid sleeping waiting for the ccb to complete.
1255 *
1256 * A panic triggered dump stops the scheduler, any callback from the
1257 * shutdown_post_sync event will run with the scheduler stopped, but
1258 * before we're officially dumping. To avoid hanging in adashutdown
1259 * initiated commands (or other similar situations), we have to test for
1260 * either SCHEDULER_STOPPED() here as well.
1261 *
1262 * To avoid locking problems, dumping/polling callers must call
1263 * without a periph lock held.
1264 */
1265 must_poll = dumping || SCHEDULER_STOPPED();
1266 ccb->ccb_h.cbfcnp = cam_periph_done;
1267
1268 /*
1269 * If we're polling, then we need to ensure that we have ample resources
1270 * in the periph. cam_periph_error can reschedule the ccb by calling
1271 * xpt_action and returning ERESTART, so we have to effect the polling
1272 * in the do loop below.
1273 */
1274 if (must_poll) {
1275 timeout = xpt_poll_setup(ccb);
1276 }
1277
1278 if (timeout == 0) {
1279 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1280 error = EBUSY;
1281 } else {
1282 xpt_action(ccb);
1283 do {
1284 if (must_poll) {
1285 xpt_pollwait(ccb, timeout);
1286 timeout = ccb->ccb_h.timeout * 10;
1287 } else {
1288 cam_periph_ccbwait(ccb);
1289 }
1290 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1291 error = 0;
1292 else if (error_routine != NULL) {
1293 ccb->ccb_h.cbfcnp = cam_periph_done;
1294 error = (*error_routine)(ccb, camflags, sense_flags);
1295 } else
1296 error = 0;
1297 } while (error == ERESTART);
1298 }
1299
1300 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1301 cam_release_devq(ccb->ccb_h.path,
1302 /* relsim_flags */0,
1303 /* openings */0,
1304 /* timeout */0,
1305 /* getcount_only */ FALSE);
1306 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1307 }
1308
1309 if (ds != NULL) {
1310 uint32_t bytes;
1311 devstat_tag_type tag;
1312 bool valid = true;
1313
1314 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1315 bytes = ccb->csio.dxfer_len - ccb->csio.resid;
1316 tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3);
1317 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1318 bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
1319 tag = (devstat_tag_type)0;
1320 } else if (ccb->ccb_h.func_code == XPT_NVME_IO) {
1321 bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */
1322 tag = (devstat_tag_type)0;
1323 } else {
1324 valid = false;
1325 }
1326 if (valid)
1327 devstat_end_transaction(ds, bytes, tag,
1328 ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ?
1329 DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1330 DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime);
1331 }
1332
1333 return(error);
1334 }
1335
1336 void
1337 cam_freeze_devq(struct cam_path *path)
1338 {
1339 struct ccb_hdr ccb_h;
1340
1341 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1342 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1343 ccb_h.func_code = XPT_NOOP;
1344 ccb_h.flags = CAM_DEV_QFREEZE;
1345 xpt_action((union ccb *)&ccb_h);
1346 }
1347
1348 u_int32_t
1349 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1350 u_int32_t openings, u_int32_t arg,
1351 int getcount_only)
1352 {
1353 struct ccb_relsim crs;
1354
1355 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1356 relsim_flags, openings, arg, getcount_only));
1357 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1358 crs.ccb_h.func_code = XPT_REL_SIMQ;
1359 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1360 crs.release_flags = relsim_flags;
1361 crs.openings = openings;
1362 crs.release_timeout = arg;
1363 xpt_action((union ccb *)&crs);
1364 return (crs.qfrozen_cnt);
1365 }
1366
1367 #define saved_ccb_ptr ppriv_ptr0
1368 static void
1369 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1370 {
1371 union ccb *saved_ccb;
1372 cam_status status;
1373 struct scsi_start_stop_unit *scsi_cmd;
1374 int error = 0, error_code, sense_key, asc, ascq;
1375
1376 scsi_cmd = (struct scsi_start_stop_unit *)
1377 &done_ccb->csio.cdb_io.cdb_bytes;
1378 status = done_ccb->ccb_h.status;
1379
1380 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1381 if (scsi_extract_sense_ccb(done_ccb,
1382 &error_code, &sense_key, &asc, &ascq)) {
1383 /*
1384 * If the error is "invalid field in CDB",
1385 * and the load/eject flag is set, turn the
1386 * flag off and try again. This is just in
1387 * case the drive in question barfs on the
1388 * load eject flag. The CAM code should set
1389 * the load/eject flag by default for
1390 * removable media.
1391 */
1392 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1393 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1394 (asc == 0x24) && (ascq == 0x00)) {
1395 scsi_cmd->how &= ~SSS_LOEJ;
1396 if (status & CAM_DEV_QFRZN) {
1397 cam_release_devq(done_ccb->ccb_h.path,
1398 0, 0, 0, 0);
1399 done_ccb->ccb_h.status &=
1400 ~CAM_DEV_QFRZN;
1401 }
1402 xpt_action(done_ccb);
1403 goto out;
1404 }
1405 }
1406 error = cam_periph_error(done_ccb, 0,
1407 SF_RETRY_UA | SF_NO_PRINT);
1408 if (error == ERESTART)
1409 goto out;
1410 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1411 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1412 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1413 }
1414 } else {
1415 /*
1416 * If we have successfully taken a device from the not
1417 * ready to ready state, re-scan the device and re-get
1418 * the inquiry information. Many devices (mostly disks)
1419 * don't properly report their inquiry information unless
1420 * they are spun up.
1421 */
1422 if (scsi_cmd->opcode == START_STOP_UNIT)
1423 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1424 }
1425
1426 /* If we tried long wait and still failed, remember that. */
1427 if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) &&
1428 (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) {
1429 periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT;
1430 if (error != 0 && done_ccb->ccb_h.retry_count == 0)
1431 periph->flags |= CAM_PERIPH_RECOVERY_WAIT_FAILED;
1432 }
1433
1434 /*
1435 * After recovery action(s) completed, return to the original CCB.
1436 * If the recovery CCB has failed, considering its own possible
1437 * retries and recovery, assume we are back in state where we have
1438 * been originally, but without recovery hopes left. In such case,
1439 * after the final attempt below, we cancel any further retries,
1440 * blocking by that also any new recovery attempts for this CCB,
1441 * and the result will be the final one returned to the CCB owher.
1442 */
1443 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1444 saved_ccb->ccb_h.periph_links = done_ccb->ccb_h.periph_links;
1445 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1446 xpt_free_ccb(saved_ccb);
1447 if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1448 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1449 if (error != 0)
1450 done_ccb->ccb_h.retry_count = 0;
1451 xpt_action(done_ccb);
1452
1453 out:
1454 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1455 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1456 }
1457
1458 /*
1459 * Generic Async Event handler. Peripheral drivers usually
1460 * filter out the events that require personal attention,
1461 * and leave the rest to this function.
1462 */
1463 void
1464 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1465 struct cam_path *path, void *arg)
1466 {
1467 switch (code) {
1468 case AC_LOST_DEVICE:
1469 cam_periph_invalidate(periph);
1470 break;
1471 default:
1472 break;
1473 }
1474 }
1475
1476 void
1477 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1478 {
1479 struct ccb_getdevstats cgds;
1480
1481 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1482 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1483 xpt_action((union ccb *)&cgds);
1484 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1485 }
1486
1487 void
1488 cam_periph_freeze_after_event(struct cam_periph *periph,
1489 struct timeval* event_time, u_int duration_ms)
1490 {
1491 struct timeval delta;
1492 struct timeval duration_tv;
1493
1494 if (!timevalisset(event_time))
1495 return;
1496
1497 microtime(&delta);
1498 timevalsub(&delta, event_time);
1499 duration_tv.tv_sec = duration_ms / 1000;
1500 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1501 if (timevalcmp(&delta, &duration_tv, <)) {
1502 timevalsub(&duration_tv, &delta);
1503
1504 duration_ms = duration_tv.tv_sec * 1000;
1505 duration_ms += duration_tv.tv_usec / 1000;
1506 cam_freeze_devq(periph->path);
1507 cam_release_devq(periph->path,
1508 RELSIM_RELEASE_AFTER_TIMEOUT,
1509 /*reduction*/0,
1510 /*timeout*/duration_ms,
1511 /*getcount_only*/0);
1512 }
1513
1514 }
1515
1516 static int
1517 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1518 cam_flags camflags, u_int32_t sense_flags,
1519 int *openings, u_int32_t *relsim_flags,
1520 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1521 {
1522 struct cam_periph *periph;
1523 int error;
1524
1525 switch (ccb->csio.scsi_status) {
1526 case SCSI_STATUS_OK:
1527 case SCSI_STATUS_COND_MET:
1528 case SCSI_STATUS_INTERMED:
1529 case SCSI_STATUS_INTERMED_COND_MET:
1530 error = 0;
1531 break;
1532 case SCSI_STATUS_CMD_TERMINATED:
1533 case SCSI_STATUS_CHECK_COND:
1534 error = camperiphscsisenseerror(ccb, orig_ccb,
1535 camflags,
1536 sense_flags,
1537 openings,
1538 relsim_flags,
1539 timeout,
1540 action,
1541 action_string);
1542 break;
1543 case SCSI_STATUS_QUEUE_FULL:
1544 {
1545 /* no decrement */
1546 struct ccb_getdevstats cgds;
1547
1548 /*
1549 * First off, find out what the current
1550 * transaction counts are.
1551 */
1552 xpt_setup_ccb(&cgds.ccb_h,
1553 ccb->ccb_h.path,
1554 CAM_PRIORITY_NORMAL);
1555 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1556 xpt_action((union ccb *)&cgds);
1557
1558 /*
1559 * If we were the only transaction active, treat
1560 * the QUEUE FULL as if it were a BUSY condition.
1561 */
1562 if (cgds.dev_active != 0) {
1563 int total_openings;
1564
1565 /*
1566 * Reduce the number of openings to
1567 * be 1 less than the amount it took
1568 * to get a queue full bounded by the
1569 * minimum allowed tag count for this
1570 * device.
1571 */
1572 total_openings = cgds.dev_active + cgds.dev_openings;
1573 *openings = cgds.dev_active;
1574 if (*openings < cgds.mintags)
1575 *openings = cgds.mintags;
1576 if (*openings < total_openings)
1577 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1578 else {
1579 /*
1580 * Some devices report queue full for
1581 * temporary resource shortages. For
1582 * this reason, we allow a minimum
1583 * tag count to be entered via a
1584 * quirk entry to prevent the queue
1585 * count on these devices from falling
1586 * to a pessimisticly low value. We
1587 * still wait for the next successful
1588 * completion, however, before queueing
1589 * more transactions to the device.
1590 */
1591 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1592 }
1593 *timeout = 0;
1594 error = ERESTART;
1595 *action &= ~SSQ_PRINT_SENSE;
1596 break;
1597 }
1598 /* FALLTHROUGH */
1599 }
1600 case SCSI_STATUS_BUSY:
1601 /*
1602 * Restart the queue after either another
1603 * command completes or a 1 second timeout.
1604 */
1605 periph = xpt_path_periph(ccb->ccb_h.path);
1606 if (periph->flags & CAM_PERIPH_INVALID) {
1607 error = EIO;
1608 *action_string = "Periph was invalidated";
1609 } else if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1610 ccb->ccb_h.retry_count > 0) {
1611 if ((sense_flags & SF_RETRY_BUSY) == 0)
1612 ccb->ccb_h.retry_count--;
1613 error = ERESTART;
1614 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1615 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1616 *timeout = 1000;
1617 } else {
1618 error = EIO;
1619 *action_string = "Retries exhausted";
1620 }
1621 break;
1622 case SCSI_STATUS_RESERV_CONFLICT:
1623 default:
1624 error = EIO;
1625 break;
1626 }
1627 return (error);
1628 }
1629
1630 static int
1631 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1632 cam_flags camflags, u_int32_t sense_flags,
1633 int *openings, u_int32_t *relsim_flags,
1634 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1635 {
1636 struct cam_periph *periph;
1637 union ccb *orig_ccb = ccb;
1638 int error, recoveryccb;
1639
1640 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1641 if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
1642 biotrack(ccb->csio.bio, __func__);
1643 #endif
1644
1645 periph = xpt_path_periph(ccb->ccb_h.path);
1646 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1647 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1648 /*
1649 * If error recovery is already in progress, don't attempt
1650 * to process this error, but requeue it unconditionally
1651 * and attempt to process it once error recovery has
1652 * completed. This failed command is probably related to
1653 * the error that caused the currently active error recovery
1654 * action so our current recovery efforts should also
1655 * address this command. Be aware that the error recovery
1656 * code assumes that only one recovery action is in progress
1657 * on a particular peripheral instance at any given time
1658 * (e.g. only one saved CCB for error recovery) so it is
1659 * imperitive that we don't violate this assumption.
1660 */
1661 error = ERESTART;
1662 *action &= ~SSQ_PRINT_SENSE;
1663 } else {
1664 scsi_sense_action err_action;
1665 struct ccb_getdev cgd;
1666
1667 /*
1668 * Grab the inquiry data for this device.
1669 */
1670 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1671 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1672 xpt_action((union ccb *)&cgd);
1673
1674 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1675 sense_flags);
1676 error = err_action & SS_ERRMASK;
1677
1678 /*
1679 * Do not autostart sequential access devices
1680 * to avoid unexpected tape loading.
1681 */
1682 if ((err_action & SS_MASK) == SS_START &&
1683 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1684 *action_string = "Will not autostart a "
1685 "sequential access device";
1686 goto sense_error_done;
1687 }
1688
1689 /*
1690 * Avoid recovery recursion if recovery action is the same.
1691 */
1692 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1693 if (((err_action & SS_MASK) == SS_START &&
1694 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1695 ((err_action & SS_MASK) == SS_TUR &&
1696 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1697 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1698 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1699 *timeout = 500;
1700 }
1701 }
1702
1703 /*
1704 * If the recovery action will consume a retry,
1705 * make sure we actually have retries available.
1706 */
1707 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1708 if (ccb->ccb_h.retry_count > 0 &&
1709 (periph->flags & CAM_PERIPH_INVALID) == 0)
1710 ccb->ccb_h.retry_count--;
1711 else {
1712 *action_string = "Retries exhausted";
1713 goto sense_error_done;
1714 }
1715 }
1716
1717 if ((err_action & SS_MASK) >= SS_START) {
1718 /*
1719 * Do common portions of commands that
1720 * use recovery CCBs.
1721 */
1722 orig_ccb = xpt_alloc_ccb_nowait();
1723 if (orig_ccb == NULL) {
1724 *action_string = "Can't allocate recovery CCB";
1725 goto sense_error_done;
1726 }
1727 /*
1728 * Clear freeze flag for original request here, as
1729 * this freeze will be dropped as part of ERESTART.
1730 */
1731 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1732 bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1733 }
1734
1735 switch (err_action & SS_MASK) {
1736 case SS_NOP:
1737 *action_string = "No recovery action needed";
1738 error = 0;
1739 break;
1740 case SS_RETRY:
1741 *action_string = "Retrying command (per sense data)";
1742 error = ERESTART;
1743 break;
1744 case SS_FAIL:
1745 *action_string = "Unretryable error";
1746 break;
1747 case SS_START:
1748 {
1749 int le;
1750
1751 /*
1752 * Send a start unit command to the device, and
1753 * then retry the command.
1754 */
1755 *action_string = "Attempting to start unit";
1756 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1757
1758 /*
1759 * Check for removable media and set
1760 * load/eject flag appropriately.
1761 */
1762 if (SID_IS_REMOVABLE(&cgd.inq_data))
1763 le = TRUE;
1764 else
1765 le = FALSE;
1766
1767 scsi_start_stop(&ccb->csio,
1768 /*retries*/1,
1769 camperiphdone,
1770 MSG_SIMPLE_Q_TAG,
1771 /*start*/TRUE,
1772 /*load/eject*/le,
1773 /*immediate*/FALSE,
1774 SSD_FULL_SIZE,
1775 /*timeout*/50000);
1776 break;
1777 }
1778 case SS_TUR:
1779 {
1780 /*
1781 * Send a Test Unit Ready to the device.
1782 * If the 'many' flag is set, we send 120
1783 * test unit ready commands, one every half
1784 * second. Otherwise, we just send one TUR.
1785 * We only want to do this if the retry
1786 * count has not been exhausted.
1787 */
1788 int retries;
1789
1790 if ((err_action & SSQ_MANY) != 0 && (periph->flags &
1791 CAM_PERIPH_RECOVERY_WAIT_FAILED) == 0) {
1792 periph->flags |= CAM_PERIPH_RECOVERY_WAIT;
1793 *action_string = "Polling device for readiness";
1794 retries = 120;
1795 } else {
1796 *action_string = "Testing device for readiness";
1797 retries = 1;
1798 }
1799 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1800 scsi_test_unit_ready(&ccb->csio,
1801 retries,
1802 camperiphdone,
1803 MSG_SIMPLE_Q_TAG,
1804 SSD_FULL_SIZE,
1805 /*timeout*/5000);
1806
1807 /*
1808 * Accomplish our 500ms delay by deferring
1809 * the release of our device queue appropriately.
1810 */
1811 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1812 *timeout = 500;
1813 break;
1814 }
1815 default:
1816 panic("Unhandled error action %x", err_action);
1817 }
1818
1819 if ((err_action & SS_MASK) >= SS_START) {
1820 /*
1821 * Drop the priority, so that the recovery
1822 * CCB is the first to execute. Freeze the queue
1823 * after this command is sent so that we can
1824 * restore the old csio and have it queued in
1825 * the proper order before we release normal
1826 * transactions to the device.
1827 */
1828 ccb->ccb_h.pinfo.priority--;
1829 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1830 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1831 error = ERESTART;
1832 *orig = orig_ccb;
1833 }
1834
1835 sense_error_done:
1836 *action = err_action;
1837 }
1838 return (error);
1839 }
1840
1841 /*
1842 * Generic error handler. Peripheral drivers usually filter
1843 * out the errors that they handle in a unique manner, then
1844 * call this function.
1845 */
1846 int
1847 cam_periph_error(union ccb *ccb, cam_flags camflags,
1848 u_int32_t sense_flags)
1849 {
1850 struct cam_path *newpath;
1851 union ccb *orig_ccb, *scan_ccb;
1852 struct cam_periph *periph;
1853 const char *action_string;
1854 cam_status status;
1855 int frozen, error, openings, devctl_err;
1856 u_int32_t action, relsim_flags, timeout;
1857
1858 action = SSQ_PRINT_SENSE;
1859 periph = xpt_path_periph(ccb->ccb_h.path);
1860 action_string = NULL;
1861 status = ccb->ccb_h.status;
1862 frozen = (status & CAM_DEV_QFRZN) != 0;
1863 status &= CAM_STATUS_MASK;
1864 devctl_err = openings = relsim_flags = timeout = 0;
1865 orig_ccb = ccb;
1866
1867 /* Filter the errors that should be reported via devctl */
1868 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1869 case CAM_CMD_TIMEOUT:
1870 case CAM_REQ_ABORTED:
1871 case CAM_REQ_CMP_ERR:
1872 case CAM_REQ_TERMIO:
1873 case CAM_UNREC_HBA_ERROR:
1874 case CAM_DATA_RUN_ERR:
1875 case CAM_SCSI_STATUS_ERROR:
1876 case CAM_ATA_STATUS_ERROR:
1877 case CAM_SMP_STATUS_ERROR:
1878 devctl_err++;
1879 break;
1880 default:
1881 break;
1882 }
1883
1884 switch (status) {
1885 case CAM_REQ_CMP:
1886 error = 0;
1887 action &= ~SSQ_PRINT_SENSE;
1888 break;
1889 case CAM_SCSI_STATUS_ERROR:
1890 error = camperiphscsistatuserror(ccb, &orig_ccb,
1891 camflags, sense_flags, &openings, &relsim_flags,
1892 &timeout, &action, &action_string);
1893 break;
1894 case CAM_AUTOSENSE_FAIL:
1895 error = EIO; /* we have to kill the command */
1896 break;
1897 case CAM_UA_ABORT:
1898 case CAM_UA_TERMIO:
1899 case CAM_MSG_REJECT_REC:
1900 /* XXX Don't know that these are correct */
1901 error = EIO;
1902 break;
1903 case CAM_SEL_TIMEOUT:
1904 if ((camflags & CAM_RETRY_SELTO) != 0) {
1905 if (ccb->ccb_h.retry_count > 0 &&
1906 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1907 ccb->ccb_h.retry_count--;
1908 error = ERESTART;
1909
1910 /*
1911 * Wait a bit to give the device
1912 * time to recover before we try again.
1913 */
1914 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1915 timeout = periph_selto_delay;
1916 break;
1917 }
1918 action_string = "Retries exhausted";
1919 }
1920 /* FALLTHROUGH */
1921 case CAM_DEV_NOT_THERE:
1922 error = ENXIO;
1923 action = SSQ_LOST;
1924 break;
1925 case CAM_REQ_INVALID:
1926 case CAM_PATH_INVALID:
1927 case CAM_NO_HBA:
1928 case CAM_PROVIDE_FAIL:
1929 case CAM_REQ_TOO_BIG:
1930 case CAM_LUN_INVALID:
1931 case CAM_TID_INVALID:
1932 case CAM_FUNC_NOTAVAIL:
1933 error = EINVAL;
1934 break;
1935 case CAM_SCSI_BUS_RESET:
1936 case CAM_BDR_SENT:
1937 /*
1938 * Commands that repeatedly timeout and cause these
1939 * kinds of error recovery actions, should return
1940 * CAM_CMD_TIMEOUT, which allows us to safely assume
1941 * that this command was an innocent bystander to
1942 * these events and should be unconditionally
1943 * retried.
1944 */
1945 case CAM_REQUEUE_REQ:
1946 /* Unconditional requeue if device is still there */
1947 if (periph->flags & CAM_PERIPH_INVALID) {
1948 action_string = "Periph was invalidated";
1949 error = EIO;
1950 } else if (sense_flags & SF_NO_RETRY) {
1951 error = EIO;
1952 action_string = "Retry was blocked";
1953 } else {
1954 error = ERESTART;
1955 action &= ~SSQ_PRINT_SENSE;
1956 }
1957 break;
1958 case CAM_RESRC_UNAVAIL:
1959 /* Wait a bit for the resource shortage to abate. */
1960 timeout = periph_noresrc_delay;
1961 /* FALLTHROUGH */
1962 case CAM_BUSY:
1963 if (timeout == 0) {
1964 /* Wait a bit for the busy condition to abate. */
1965 timeout = periph_busy_delay;
1966 }
1967 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1968 /* FALLTHROUGH */
1969 case CAM_ATA_STATUS_ERROR:
1970 case CAM_REQ_CMP_ERR:
1971 case CAM_CMD_TIMEOUT:
1972 case CAM_UNEXP_BUSFREE:
1973 case CAM_UNCOR_PARITY:
1974 case CAM_DATA_RUN_ERR:
1975 default:
1976 if (periph->flags & CAM_PERIPH_INVALID) {
1977 error = EIO;
1978 action_string = "Periph was invalidated";
1979 } else if (ccb->ccb_h.retry_count == 0) {
1980 error = EIO;
1981 action_string = "Retries exhausted";
1982 } else if (sense_flags & SF_NO_RETRY) {
1983 error = EIO;
1984 action_string = "Retry was blocked";
1985 } else {
1986 ccb->ccb_h.retry_count--;
1987 error = ERESTART;
1988 }
1989 break;
1990 }
1991
1992 if ((sense_flags & SF_PRINT_ALWAYS) ||
1993 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
1994 action |= SSQ_PRINT_SENSE;
1995 else if (sense_flags & SF_NO_PRINT)
1996 action &= ~SSQ_PRINT_SENSE;
1997 if ((action & SSQ_PRINT_SENSE) != 0)
1998 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1999 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
2000 if (error != ERESTART) {
2001 if (action_string == NULL)
2002 action_string = "Unretryable error";
2003 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
2004 error, action_string);
2005 } else if (action_string != NULL)
2006 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
2007 else {
2008 xpt_print(ccb->ccb_h.path,
2009 "Retrying command, %d more tries remain\n",
2010 ccb->ccb_h.retry_count);
2011 }
2012 }
2013
2014 if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
2015 cam_periph_devctl_notify(orig_ccb);
2016
2017 if ((action & SSQ_LOST) != 0) {
2018 lun_id_t lun_id;
2019
2020 /*
2021 * For a selection timeout, we consider all of the LUNs on
2022 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
2023 * then we only get rid of the device(s) specified by the
2024 * path in the original CCB.
2025 */
2026 if (status == CAM_SEL_TIMEOUT)
2027 lun_id = CAM_LUN_WILDCARD;
2028 else
2029 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
2030
2031 /* Should we do more if we can't create the path?? */
2032 if (xpt_create_path(&newpath, periph,
2033 xpt_path_path_id(ccb->ccb_h.path),
2034 xpt_path_target_id(ccb->ccb_h.path),
2035 lun_id) == CAM_REQ_CMP) {
2036
2037 /*
2038 * Let peripheral drivers know that this
2039 * device has gone away.
2040 */
2041 xpt_async(AC_LOST_DEVICE, newpath, NULL);
2042 xpt_free_path(newpath);
2043 }
2044 }
2045
2046 /* Broadcast UNIT ATTENTIONs to all periphs. */
2047 if ((action & SSQ_UA) != 0)
2048 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
2049
2050 /* Rescan target on "Reported LUNs data has changed" */
2051 if ((action & SSQ_RESCAN) != 0) {
2052 if (xpt_create_path(&newpath, NULL,
2053 xpt_path_path_id(ccb->ccb_h.path),
2054 xpt_path_target_id(ccb->ccb_h.path),
2055 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2056
2057 scan_ccb = xpt_alloc_ccb_nowait();
2058 if (scan_ccb != NULL) {
2059 scan_ccb->ccb_h.path = newpath;
2060 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
2061 scan_ccb->crcn.flags = 0;
2062 xpt_rescan(scan_ccb);
2063 } else {
2064 xpt_print(newpath,
2065 "Can't allocate CCB to rescan target\n");
2066 xpt_free_path(newpath);
2067 }
2068 }
2069 }
2070
2071 /* Attempt a retry */
2072 if (error == ERESTART || error == 0) {
2073 if (frozen != 0)
2074 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2075 if (error == ERESTART)
2076 xpt_action(ccb);
2077 if (frozen != 0)
2078 cam_release_devq(ccb->ccb_h.path,
2079 relsim_flags,
2080 openings,
2081 timeout,
2082 /*getcount_only*/0);
2083 }
2084
2085 return (error);
2086 }
2087
2088 #define CAM_PERIPH_DEVD_MSG_SIZE 256
2089
2090 static void
2091 cam_periph_devctl_notify(union ccb *ccb)
2092 {
2093 struct cam_periph *periph;
2094 struct ccb_getdev *cgd;
2095 struct sbuf sb;
2096 int serr, sk, asc, ascq;
2097 char *sbmsg, *type;
2098
2099 sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
2100 if (sbmsg == NULL)
2101 return;
2102
2103 sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
2104
2105 periph = xpt_path_periph(ccb->ccb_h.path);
2106 sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
2107 periph->unit_number);
2108
2109 sbuf_printf(&sb, "serial=\"");
2110 if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
2111 xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
2112 CAM_PRIORITY_NORMAL);
2113 cgd->ccb_h.func_code = XPT_GDEV_TYPE;
2114 xpt_action((union ccb *)cgd);
2115
2116 if (cgd->ccb_h.status == CAM_REQ_CMP)
2117 sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
2118 xpt_free_ccb((union ccb *)cgd);
2119 }
2120 sbuf_printf(&sb, "\" ");
2121 sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
2122
2123 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2124 case CAM_CMD_TIMEOUT:
2125 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
2126 type = "timeout";
2127 break;
2128 case CAM_SCSI_STATUS_ERROR:
2129 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
2130 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
2131 sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
2132 serr, sk, asc, ascq);
2133 type = "error";
2134 break;
2135 case CAM_ATA_STATUS_ERROR:
2136 sbuf_printf(&sb, "RES=\"");
2137 ata_res_sbuf(&ccb->ataio.res, &sb);
2138 sbuf_printf(&sb, "\" ");
2139 type = "error";
2140 break;
2141 default:
2142 type = "error";
2143 break;
2144 }
2145
2146 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2147 sbuf_printf(&sb, "CDB=\"");
2148 scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
2149 sbuf_printf(&sb, "\" ");
2150 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
2151 sbuf_printf(&sb, "ACB=\"");
2152 ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
2153 sbuf_printf(&sb, "\" ");
2154 }
2155
2156 if (sbuf_finish(&sb) == 0)
2157 devctl_notify("CAM", "periph", type, sbuf_data(&sb));
2158 sbuf_delete(&sb);
2159 free(sbmsg, M_CAMPERIPH);
2160 }
2161
2162 /*
2163 * Sysctl to force an invalidation of the drive right now. Can be
2164 * called with CTLFLAG_MPSAFE since we take periph lock.
2165 */
2166 int
2167 cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)
2168 {
2169 struct cam_periph *periph;
2170 int error, value;
2171
2172 periph = arg1;
2173 value = 0;
2174 error = sysctl_handle_int(oidp, &value, 0, req);
2175 if (error != 0 || req->newptr == NULL || value != 1)
2176 return (error);
2177
2178 cam_periph_lock(periph);
2179 cam_periph_invalidate(periph);
2180 cam_periph_unlock(periph);
2181
2182 return (0);
2183 }
Cache object: f9b02b629ca49b7535a55535480431e5
|