1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* Driver for VirtIO SCSI devices. */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/callout.h>
45 #include <sys/queue.h>
46 #include <sys/sbuf.h>
47
48 #include <machine/stdarg.h>
49
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 #include <sys/bus.h>
53 #include <sys/rman.h>
54
55 #include <cam/cam.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_xpt_sim.h>
60 #include <cam/cam_debug.h>
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63
64 #include <dev/virtio/virtio.h>
65 #include <dev/virtio/virtqueue.h>
66 #include <dev/virtio/scsi/virtio_scsi.h>
67 #include <dev/virtio/scsi/virtio_scsivar.h>
68
69 #include "virtio_if.h"
70
71 static int vtscsi_modevent(module_t, int, void *);
72
73 static int vtscsi_probe(device_t);
74 static int vtscsi_attach(device_t);
75 static int vtscsi_detach(device_t);
76 static int vtscsi_suspend(device_t);
77 static int vtscsi_resume(device_t);
78
79 static int vtscsi_negotiate_features(struct vtscsi_softc *);
80 static int vtscsi_setup_features(struct vtscsi_softc *);
81 static void vtscsi_read_config(struct vtscsi_softc *,
82 struct virtio_scsi_config *);
83 static int vtscsi_maximum_segments(struct vtscsi_softc *, int);
84 static int vtscsi_alloc_virtqueues(struct vtscsi_softc *);
85 static void vtscsi_check_sizes(struct vtscsi_softc *);
86 static void vtscsi_write_device_config(struct vtscsi_softc *);
87 static int vtscsi_reinit(struct vtscsi_softc *);
88
89 static int vtscsi_alloc_cam(struct vtscsi_softc *);
90 static int vtscsi_register_cam(struct vtscsi_softc *);
91 static void vtscsi_free_cam(struct vtscsi_softc *);
92 static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
93 static int vtscsi_register_async(struct vtscsi_softc *);
94 static void vtscsi_deregister_async(struct vtscsi_softc *);
95 static void vtscsi_cam_action(struct cam_sim *, union ccb *);
96 static void vtscsi_cam_poll(struct cam_sim *);
97
98 static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
99 union ccb *);
100 static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
101 union ccb *);
102 static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
103 static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
104 static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
105 static void vtscsi_cam_path_inquiry(struct vtscsi_softc *,
106 struct cam_sim *, union ccb *);
107
108 static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
109 struct sglist *, struct ccb_scsiio *);
110 static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
111 struct vtscsi_request *, int *, int *);
112 static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
113 struct vtscsi_request *);
114 static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
115 static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
116 struct vtscsi_request *);
117 static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
118 struct vtscsi_request *);
119 static void vtscsi_timedout_scsi_cmd(void *);
120 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
121 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
122 struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
123 static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
124 struct vtscsi_request *);
125
126 static void vtscsi_poll_ctrl_req(struct vtscsi_softc *,
127 struct vtscsi_request *);
128 static int vtscsi_execute_ctrl_req(struct vtscsi_softc *,
129 struct vtscsi_request *, struct sglist *, int, int, int);
130 static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
131 struct vtscsi_request *);
132 static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
133 struct vtscsi_request *);
134 static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
135 struct vtscsi_request *);
136
137 static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
138 static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
139 static void vtscsi_init_scsi_cmd_req(struct vtscsi_softc *,
140 struct ccb_scsiio *, struct virtio_scsi_cmd_req *);
141 static void vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *, struct ccb_hdr *,
142 uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
143
144 static void vtscsi_freeze_simq(struct vtscsi_softc *, int);
145 static int vtscsi_thaw_simq(struct vtscsi_softc *, int);
146
147 static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
148 lun_id_t);
149 static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
150 lun_id_t);
151 static void vtscsi_execute_rescan_bus(struct vtscsi_softc *);
152
153 static void vtscsi_handle_event(struct vtscsi_softc *,
154 struct virtio_scsi_event *);
155 static int vtscsi_enqueue_event_buf(struct vtscsi_softc *,
156 struct virtio_scsi_event *);
157 static int vtscsi_init_event_vq(struct vtscsi_softc *);
158 static void vtscsi_reinit_event_vq(struct vtscsi_softc *);
159 static void vtscsi_drain_event_vq(struct vtscsi_softc *);
160
161 static void vtscsi_complete_vqs_locked(struct vtscsi_softc *);
162 static void vtscsi_complete_vqs(struct vtscsi_softc *);
163 static void vtscsi_drain_vqs(struct vtscsi_softc *);
164 static void vtscsi_cancel_request(struct vtscsi_softc *,
165 struct vtscsi_request *);
166 static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
167 static void vtscsi_stop(struct vtscsi_softc *);
168 static int vtscsi_reset_bus(struct vtscsi_softc *);
169
170 static void vtscsi_init_request(struct vtscsi_softc *,
171 struct vtscsi_request *);
172 static int vtscsi_alloc_requests(struct vtscsi_softc *);
173 static void vtscsi_free_requests(struct vtscsi_softc *);
174 static void vtscsi_enqueue_request(struct vtscsi_softc *,
175 struct vtscsi_request *);
176 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
177
178 static void vtscsi_complete_request(struct vtscsi_request *);
179 static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
180
181 static void vtscsi_control_vq_intr(void *);
182 static void vtscsi_event_vq_intr(void *);
183 static void vtscsi_request_vq_intr(void *);
184 static void vtscsi_disable_vqs_intr(struct vtscsi_softc *);
185 static void vtscsi_enable_vqs_intr(struct vtscsi_softc *);
186
187 static void vtscsi_get_tunables(struct vtscsi_softc *);
188 static void vtscsi_setup_sysctl(struct vtscsi_softc *);
189
190 static void vtscsi_printf_req(struct vtscsi_request *, const char *,
191 const char *, ...);
192
193 #define vtscsi_modern(_sc) (((_sc)->vtscsi_features & VIRTIO_F_VERSION_1) != 0)
194 #define vtscsi_htog16(_sc, _val) virtio_htog16(vtscsi_modern(_sc), _val)
195 #define vtscsi_htog32(_sc, _val) virtio_htog32(vtscsi_modern(_sc), _val)
196 #define vtscsi_htog64(_sc, _val) virtio_htog64(vtscsi_modern(_sc), _val)
197 #define vtscsi_gtoh16(_sc, _val) virtio_gtoh16(vtscsi_modern(_sc), _val)
198 #define vtscsi_gtoh32(_sc, _val) virtio_gtoh32(vtscsi_modern(_sc), _val)
199 #define vtscsi_gtoh64(_sc, _val) virtio_gtoh64(vtscsi_modern(_sc), _val)
200
201 /* Global tunables. */
202 /*
203 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
204 * IO during virtio_stop(). So in-flight requests still complete after the
205 * device reset. We would have to wait for all the in-flight IO to complete,
206 * which defeats the typical purpose of a bus reset. We could simulate the
207 * bus reset with either I_T_NEXUS_RESET of all the targets, or with
208 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
209 * control virtqueue). But this isn't very useful if things really go off
210 * the rails, so default to disabled for now.
211 */
212 static int vtscsi_bus_reset_disable = 1;
213 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
214
215 static struct virtio_feature_desc vtscsi_feature_desc[] = {
216 { VIRTIO_SCSI_F_INOUT, "InOut" },
217 { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" },
218 { VIRTIO_SCSI_F_CHANGE, "ChangeEvent" },
219 { VIRTIO_SCSI_F_T10_PI, "T10PI" },
220
221 { 0, NULL }
222 };
223
224 static device_method_t vtscsi_methods[] = {
225 /* Device methods. */
226 DEVMETHOD(device_probe, vtscsi_probe),
227 DEVMETHOD(device_attach, vtscsi_attach),
228 DEVMETHOD(device_detach, vtscsi_detach),
229 DEVMETHOD(device_suspend, vtscsi_suspend),
230 DEVMETHOD(device_resume, vtscsi_resume),
231
232 DEVMETHOD_END
233 };
234
235 static driver_t vtscsi_driver = {
236 "vtscsi",
237 vtscsi_methods,
238 sizeof(struct vtscsi_softc)
239 };
240
241 VIRTIO_DRIVER_MODULE(virtio_scsi, vtscsi_driver, vtscsi_modevent, NULL);
242 MODULE_VERSION(virtio_scsi, 1);
243 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
244 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
245
246 VIRTIO_SIMPLE_PNPINFO(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter");
247
248 static int
249 vtscsi_modevent(module_t mod, int type, void *unused)
250 {
251 int error;
252
253 switch (type) {
254 case MOD_LOAD:
255 case MOD_QUIESCE:
256 case MOD_UNLOAD:
257 case MOD_SHUTDOWN:
258 error = 0;
259 break;
260 default:
261 error = EOPNOTSUPP;
262 break;
263 }
264
265 return (error);
266 }
267
268 static int
269 vtscsi_probe(device_t dev)
270 {
271 return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi));
272 }
273
274 static int
275 vtscsi_attach(device_t dev)
276 {
277 struct vtscsi_softc *sc;
278 struct virtio_scsi_config scsicfg;
279 int error;
280
281 sc = device_get_softc(dev);
282 sc->vtscsi_dev = dev;
283 virtio_set_feature_desc(dev, vtscsi_feature_desc);
284
285 VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
286 TAILQ_INIT(&sc->vtscsi_req_free);
287
288 vtscsi_get_tunables(sc);
289 vtscsi_setup_sysctl(sc);
290
291 error = vtscsi_setup_features(sc);
292 if (error) {
293 device_printf(dev, "cannot setup features\n");
294 goto fail;
295 }
296
297 vtscsi_read_config(sc, &scsicfg);
298
299 sc->vtscsi_max_channel = scsicfg.max_channel;
300 sc->vtscsi_max_target = scsicfg.max_target;
301 sc->vtscsi_max_lun = scsicfg.max_lun;
302 sc->vtscsi_event_buf_size = scsicfg.event_info_size;
303
304 vtscsi_write_device_config(sc);
305
306 sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
307 sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
308 if (sc->vtscsi_sglist == NULL) {
309 error = ENOMEM;
310 device_printf(dev, "cannot allocate sglist\n");
311 goto fail;
312 }
313
314 error = vtscsi_alloc_virtqueues(sc);
315 if (error) {
316 device_printf(dev, "cannot allocate virtqueues\n");
317 goto fail;
318 }
319
320 vtscsi_check_sizes(sc);
321
322 error = vtscsi_init_event_vq(sc);
323 if (error) {
324 device_printf(dev, "cannot populate the eventvq\n");
325 goto fail;
326 }
327
328 error = vtscsi_alloc_requests(sc);
329 if (error) {
330 device_printf(dev, "cannot allocate requests\n");
331 goto fail;
332 }
333
334 error = vtscsi_alloc_cam(sc);
335 if (error) {
336 device_printf(dev, "cannot allocate CAM structures\n");
337 goto fail;
338 }
339
340 error = virtio_setup_intr(dev, INTR_TYPE_CAM);
341 if (error) {
342 device_printf(dev, "cannot setup virtqueue interrupts\n");
343 goto fail;
344 }
345
346 vtscsi_enable_vqs_intr(sc);
347
348 /*
349 * Register with CAM after interrupts are enabled so we will get
350 * notified of the probe responses.
351 */
352 error = vtscsi_register_cam(sc);
353 if (error) {
354 device_printf(dev, "cannot register with CAM\n");
355 goto fail;
356 }
357
358 fail:
359 if (error)
360 vtscsi_detach(dev);
361
362 return (error);
363 }
364
365 static int
366 vtscsi_detach(device_t dev)
367 {
368 struct vtscsi_softc *sc;
369
370 sc = device_get_softc(dev);
371
372 VTSCSI_LOCK(sc);
373 sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
374 if (device_is_attached(dev))
375 vtscsi_stop(sc);
376 VTSCSI_UNLOCK(sc);
377
378 vtscsi_complete_vqs(sc);
379 vtscsi_drain_vqs(sc);
380
381 vtscsi_free_cam(sc);
382 vtscsi_free_requests(sc);
383
384 if (sc->vtscsi_sglist != NULL) {
385 sglist_free(sc->vtscsi_sglist);
386 sc->vtscsi_sglist = NULL;
387 }
388
389 VTSCSI_LOCK_DESTROY(sc);
390
391 return (0);
392 }
393
394 static int
395 vtscsi_suspend(device_t dev)
396 {
397
398 return (0);
399 }
400
401 static int
402 vtscsi_resume(device_t dev)
403 {
404
405 return (0);
406 }
407
408 static int
409 vtscsi_negotiate_features(struct vtscsi_softc *sc)
410 {
411 device_t dev;
412 uint64_t features;
413
414 dev = sc->vtscsi_dev;
415 features = VTSCSI_FEATURES;
416
417 sc->vtscsi_features = virtio_negotiate_features(dev, features);
418 return (virtio_finalize_features(dev));
419 }
420
421 static int
422 vtscsi_setup_features(struct vtscsi_softc *sc)
423 {
424 device_t dev;
425 int error;
426
427 dev = sc->vtscsi_dev;
428
429 error = vtscsi_negotiate_features(sc);
430 if (error)
431 return (error);
432
433 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
434 sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
435 if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
436 sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
437 if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
438 sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
439
440 return (0);
441 }
442
443 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg) \
444 virtio_read_device_config(_dev, \
445 offsetof(struct virtio_scsi_config, _field), \
446 &(_cfg)->_field, sizeof((_cfg)->_field)) \
447
448 static void
449 vtscsi_read_config(struct vtscsi_softc *sc,
450 struct virtio_scsi_config *scsicfg)
451 {
452 device_t dev;
453
454 dev = sc->vtscsi_dev;
455
456 bzero(scsicfg, sizeof(struct virtio_scsi_config));
457
458 VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
459 VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
460 VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
461 VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
462 VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
463 VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
464 VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
465 VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
466 VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
467 VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
468 }
469
470 #undef VTSCSI_GET_CONFIG
471
472 static int
473 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
474 {
475 int nsegs;
476
477 nsegs = VTSCSI_MIN_SEGMENTS;
478
479 if (seg_max > 0) {
480 nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1);
481 if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
482 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
483 } else
484 nsegs += 1;
485
486 return (nsegs);
487 }
488
489 static int
490 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
491 {
492 device_t dev;
493 struct vq_alloc_info vq_info[3];
494 int nvqs;
495
496 dev = sc->vtscsi_dev;
497 nvqs = 3;
498
499 VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
500 &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
501
502 VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
503 &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
504
505 VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
506 vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
507 "%s request", device_get_nameunit(dev));
508
509 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
510 }
511
512 static void
513 vtscsi_check_sizes(struct vtscsi_softc *sc)
514 {
515 int rqsize;
516
517 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) {
518 /*
519 * Ensure the assertions in virtqueue_enqueue(),
520 * even if the hypervisor reports a bad seg_max.
521 */
522 rqsize = virtqueue_size(sc->vtscsi_request_vq);
523 if (sc->vtscsi_max_nsegs > rqsize) {
524 device_printf(sc->vtscsi_dev,
525 "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs,
526 rqsize);
527 sc->vtscsi_max_nsegs = rqsize;
528 }
529 }
530 }
531
532 static void
533 vtscsi_write_device_config(struct vtscsi_softc *sc)
534 {
535
536 virtio_write_dev_config_4(sc->vtscsi_dev,
537 offsetof(struct virtio_scsi_config, sense_size),
538 VIRTIO_SCSI_SENSE_SIZE);
539
540 /*
541 * This is the size in the virtio_scsi_cmd_req structure. Note
542 * this value (32) is larger than the maximum CAM CDB size (16).
543 */
544 virtio_write_dev_config_4(sc->vtscsi_dev,
545 offsetof(struct virtio_scsi_config, cdb_size),
546 VIRTIO_SCSI_CDB_SIZE);
547 }
548
549 static int
550 vtscsi_reinit(struct vtscsi_softc *sc)
551 {
552 device_t dev;
553 int error;
554
555 dev = sc->vtscsi_dev;
556
557 error = virtio_reinit(dev, sc->vtscsi_features);
558 if (error == 0) {
559 vtscsi_write_device_config(sc);
560 virtio_reinit_complete(dev);
561 vtscsi_reinit_event_vq(sc);
562
563 vtscsi_enable_vqs_intr(sc);
564 }
565
566 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
567
568 return (error);
569 }
570
571 static int
572 vtscsi_alloc_cam(struct vtscsi_softc *sc)
573 {
574 device_t dev;
575 struct cam_devq *devq;
576 int openings;
577
578 dev = sc->vtscsi_dev;
579 openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
580
581 devq = cam_simq_alloc(openings);
582 if (devq == NULL) {
583 device_printf(dev, "cannot allocate SIM queue\n");
584 return (ENOMEM);
585 }
586
587 sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
588 "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
589 openings, devq);
590 if (sc->vtscsi_sim == NULL) {
591 cam_simq_free(devq);
592 device_printf(dev, "cannot allocate SIM\n");
593 return (ENOMEM);
594 }
595
596 return (0);
597 }
598
599 static int
600 vtscsi_register_cam(struct vtscsi_softc *sc)
601 {
602 device_t dev;
603 int registered, error;
604
605 dev = sc->vtscsi_dev;
606 registered = 0;
607
608 VTSCSI_LOCK(sc);
609
610 if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
611 error = ENOMEM;
612 device_printf(dev, "cannot register XPT bus\n");
613 goto fail;
614 }
615
616 registered = 1;
617
618 if (xpt_create_path(&sc->vtscsi_path, NULL,
619 cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
620 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
621 error = ENOMEM;
622 device_printf(dev, "cannot create bus path\n");
623 goto fail;
624 }
625
626 if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
627 error = EIO;
628 device_printf(dev, "cannot register async callback\n");
629 goto fail;
630 }
631
632 VTSCSI_UNLOCK(sc);
633
634 return (0);
635
636 fail:
637 if (sc->vtscsi_path != NULL) {
638 xpt_free_path(sc->vtscsi_path);
639 sc->vtscsi_path = NULL;
640 }
641
642 if (registered != 0)
643 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
644
645 VTSCSI_UNLOCK(sc);
646
647 return (error);
648 }
649
650 static void
651 vtscsi_free_cam(struct vtscsi_softc *sc)
652 {
653
654 VTSCSI_LOCK(sc);
655
656 if (sc->vtscsi_path != NULL) {
657 vtscsi_deregister_async(sc);
658
659 xpt_free_path(sc->vtscsi_path);
660 sc->vtscsi_path = NULL;
661
662 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
663 }
664
665 if (sc->vtscsi_sim != NULL) {
666 cam_sim_free(sc->vtscsi_sim, 1);
667 sc->vtscsi_sim = NULL;
668 }
669
670 VTSCSI_UNLOCK(sc);
671 }
672
673 static void
674 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
675 {
676 struct cam_sim *sim;
677 struct vtscsi_softc *sc;
678
679 sim = cb_arg;
680 sc = cam_sim_softc(sim);
681
682 vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
683
684 /*
685 * TODO Once QEMU supports event reporting, we should
686 * (un)subscribe to events here.
687 */
688 switch (code) {
689 case AC_FOUND_DEVICE:
690 break;
691 case AC_LOST_DEVICE:
692 break;
693 }
694 }
695
696 static int
697 vtscsi_register_async(struct vtscsi_softc *sc)
698 {
699 struct ccb_setasync csa;
700
701 memset(&csa, 0, sizeof(csa));
702 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
703 csa.ccb_h.func_code = XPT_SASYNC_CB;
704 csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
705 csa.callback = vtscsi_cam_async;
706 csa.callback_arg = sc->vtscsi_sim;
707
708 xpt_action((union ccb *) &csa);
709
710 return (csa.ccb_h.status);
711 }
712
713 static void
714 vtscsi_deregister_async(struct vtscsi_softc *sc)
715 {
716 struct ccb_setasync csa;
717
718 memset(&csa, 0, sizeof(csa));
719 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
720 csa.ccb_h.func_code = XPT_SASYNC_CB;
721 csa.event_enable = 0;
722 csa.callback = vtscsi_cam_async;
723 csa.callback_arg = sc->vtscsi_sim;
724
725 xpt_action((union ccb *) &csa);
726 }
727
728 static void
729 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
730 {
731 struct vtscsi_softc *sc;
732 struct ccb_hdr *ccbh;
733
734 sc = cam_sim_softc(sim);
735 ccbh = &ccb->ccb_h;
736
737 VTSCSI_LOCK_OWNED(sc);
738
739 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
740 /*
741 * The VTSCSI_MTX is briefly dropped between setting
742 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
743 * drop any CCBs that come in during that window.
744 */
745 ccbh->status = CAM_NO_HBA;
746 xpt_done(ccb);
747 return;
748 }
749
750 switch (ccbh->func_code) {
751 case XPT_SCSI_IO:
752 vtscsi_cam_scsi_io(sc, sim, ccb);
753 break;
754
755 case XPT_SET_TRAN_SETTINGS:
756 ccbh->status = CAM_FUNC_NOTAVAIL;
757 xpt_done(ccb);
758 break;
759
760 case XPT_GET_TRAN_SETTINGS:
761 vtscsi_cam_get_tran_settings(sc, ccb);
762 break;
763
764 case XPT_RESET_BUS:
765 vtscsi_cam_reset_bus(sc, ccb);
766 break;
767
768 case XPT_RESET_DEV:
769 vtscsi_cam_reset_dev(sc, ccb);
770 break;
771
772 case XPT_ABORT:
773 vtscsi_cam_abort(sc, ccb);
774 break;
775
776 case XPT_CALC_GEOMETRY:
777 cam_calc_geometry(&ccb->ccg, 1);
778 xpt_done(ccb);
779 break;
780
781 case XPT_PATH_INQ:
782 vtscsi_cam_path_inquiry(sc, sim, ccb);
783 break;
784
785 default:
786 vtscsi_dprintf(sc, VTSCSI_ERROR,
787 "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
788
789 ccbh->status = CAM_REQ_INVALID;
790 xpt_done(ccb);
791 break;
792 }
793 }
794
795 static void
796 vtscsi_cam_poll(struct cam_sim *sim)
797 {
798 struct vtscsi_softc *sc;
799
800 sc = cam_sim_softc(sim);
801
802 vtscsi_complete_vqs_locked(sc);
803 }
804
805 static void
806 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
807 union ccb *ccb)
808 {
809 struct ccb_hdr *ccbh;
810 struct ccb_scsiio *csio;
811 int error;
812
813 ccbh = &ccb->ccb_h;
814 csio = &ccb->csio;
815
816 if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
817 error = EINVAL;
818 ccbh->status = CAM_REQ_INVALID;
819 goto done;
820 }
821
822 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
823 (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
824 error = EINVAL;
825 ccbh->status = CAM_REQ_INVALID;
826 goto done;
827 }
828
829 error = vtscsi_start_scsi_cmd(sc, ccb);
830
831 done:
832 if (error) {
833 vtscsi_dprintf(sc, VTSCSI_ERROR,
834 "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
835 xpt_done(ccb);
836 }
837 }
838
839 static void
840 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
841 {
842 struct ccb_trans_settings *cts;
843 struct ccb_trans_settings_scsi *scsi;
844
845 cts = &ccb->cts;
846 scsi = &cts->proto_specific.scsi;
847
848 cts->protocol = PROTO_SCSI;
849 cts->protocol_version = SCSI_REV_SPC3;
850 cts->transport = XPORT_SAS;
851 cts->transport_version = 0;
852
853 scsi->valid = CTS_SCSI_VALID_TQ;
854 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
855
856 ccb->ccb_h.status = CAM_REQ_CMP;
857 xpt_done(ccb);
858 }
859
860 static void
861 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
862 {
863 int error;
864
865 error = vtscsi_reset_bus(sc);
866 if (error == 0)
867 ccb->ccb_h.status = CAM_REQ_CMP;
868 else
869 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
870
871 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
872 error, ccb, ccb->ccb_h.status);
873
874 xpt_done(ccb);
875 }
876
877 static void
878 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
879 {
880 struct ccb_hdr *ccbh;
881 struct vtscsi_request *req;
882 int error;
883
884 ccbh = &ccb->ccb_h;
885
886 req = vtscsi_dequeue_request(sc);
887 if (req == NULL) {
888 error = EAGAIN;
889 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
890 goto fail;
891 }
892
893 req->vsr_ccb = ccb;
894
895 error = vtscsi_execute_reset_dev_cmd(sc, req);
896 if (error == 0)
897 return;
898
899 vtscsi_enqueue_request(sc, req);
900
901 fail:
902 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
903 error, req, ccb);
904
905 if (error == EAGAIN)
906 ccbh->status = CAM_RESRC_UNAVAIL;
907 else
908 ccbh->status = CAM_REQ_CMP_ERR;
909
910 xpt_done(ccb);
911 }
912
913 static void
914 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
915 {
916 struct vtscsi_request *req;
917 struct ccb_hdr *ccbh;
918 int error;
919
920 ccbh = &ccb->ccb_h;
921
922 req = vtscsi_dequeue_request(sc);
923 if (req == NULL) {
924 error = EAGAIN;
925 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
926 goto fail;
927 }
928
929 req->vsr_ccb = ccb;
930
931 error = vtscsi_execute_abort_task_cmd(sc, req);
932 if (error == 0)
933 return;
934
935 vtscsi_enqueue_request(sc, req);
936
937 fail:
938 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
939 error, req, ccb);
940
941 if (error == EAGAIN)
942 ccbh->status = CAM_RESRC_UNAVAIL;
943 else
944 ccbh->status = CAM_REQ_CMP_ERR;
945
946 xpt_done(ccb);
947 }
948
949 static void
950 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
951 union ccb *ccb)
952 {
953 device_t dev;
954 struct ccb_pathinq *cpi;
955
956 dev = sc->vtscsi_dev;
957 cpi = &ccb->cpi;
958
959 vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
960
961 cpi->version_num = 1;
962 cpi->hba_inquiry = PI_TAG_ABLE;
963 cpi->target_sprt = 0;
964 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
965 if (vtscsi_bus_reset_disable != 0)
966 cpi->hba_misc |= PIM_NOBUSRESET;
967 cpi->hba_eng_cnt = 0;
968
969 cpi->max_target = sc->vtscsi_max_target;
970 cpi->max_lun = sc->vtscsi_max_lun;
971 cpi->initiator_id = cpi->max_target + 1;
972
973 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
974 strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
975 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
976
977 cpi->unit_number = cam_sim_unit(sim);
978 cpi->bus_id = cam_sim_bus(sim);
979
980 cpi->base_transfer_speed = 300000;
981
982 cpi->protocol = PROTO_SCSI;
983 cpi->protocol_version = SCSI_REV_SPC3;
984 cpi->transport = XPORT_SAS;
985 cpi->transport_version = 0;
986
987 cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
988 PAGE_SIZE;
989
990 cpi->hba_vendor = virtio_get_vendor(dev);
991 cpi->hba_device = virtio_get_device(dev);
992 cpi->hba_subvendor = virtio_get_subvendor(dev);
993 cpi->hba_subdevice = virtio_get_subdevice(dev);
994
995 ccb->ccb_h.status = CAM_REQ_CMP;
996 xpt_done(ccb);
997 }
998
999 static int
1000 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
1001 struct ccb_scsiio *csio)
1002 {
1003 struct ccb_hdr *ccbh;
1004 struct bus_dma_segment *dseg;
1005 int i, error;
1006
1007 ccbh = &csio->ccb_h;
1008 error = 0;
1009
1010 switch ((ccbh->flags & CAM_DATA_MASK)) {
1011 case CAM_DATA_VADDR:
1012 error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
1013 break;
1014 case CAM_DATA_PADDR:
1015 error = sglist_append_phys(sg,
1016 (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
1017 break;
1018 case CAM_DATA_SG:
1019 for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1020 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1021 error = sglist_append(sg,
1022 (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
1023 }
1024 break;
1025 case CAM_DATA_SG_PADDR:
1026 for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1027 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1028 error = sglist_append_phys(sg,
1029 (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
1030 }
1031 break;
1032 case CAM_DATA_BIO:
1033 error = sglist_append_bio(sg, (struct bio *) csio->data_ptr);
1034 break;
1035 default:
1036 error = EINVAL;
1037 break;
1038 }
1039
1040 return (error);
1041 }
1042
1043 static int
1044 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
1045 int *readable, int *writable)
1046 {
1047 struct sglist *sg;
1048 struct ccb_hdr *ccbh;
1049 struct ccb_scsiio *csio;
1050 struct virtio_scsi_cmd_req *cmd_req;
1051 struct virtio_scsi_cmd_resp *cmd_resp;
1052 int error;
1053
1054 sg = sc->vtscsi_sglist;
1055 csio = &req->vsr_ccb->csio;
1056 ccbh = &csio->ccb_h;
1057 cmd_req = &req->vsr_cmd_req;
1058 cmd_resp = &req->vsr_cmd_resp;
1059
1060 sglist_reset(sg);
1061
1062 sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1063 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1064 error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1065 /* At least one segment must be left for the response. */
1066 if (error || sg->sg_nseg == sg->sg_maxseg)
1067 goto fail;
1068 }
1069
1070 *readable = sg->sg_nseg;
1071
1072 sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1073 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1074 error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1075 if (error)
1076 goto fail;
1077 }
1078
1079 *writable = sg->sg_nseg - *readable;
1080
1081 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1082 "writable=%d\n", req, ccbh, *readable, *writable);
1083
1084 return (0);
1085
1086 fail:
1087 /*
1088 * This should never happen unless maxio was incorrectly set.
1089 */
1090 vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1091
1092 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1093 "nseg=%d maxseg=%d\n",
1094 error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1095
1096 return (EFBIG);
1097 }
1098
1099 static int
1100 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1101 {
1102 struct sglist *sg;
1103 struct virtqueue *vq;
1104 struct ccb_scsiio *csio;
1105 struct ccb_hdr *ccbh;
1106 struct virtio_scsi_cmd_req *cmd_req;
1107 struct virtio_scsi_cmd_resp *cmd_resp;
1108 int readable, writable, error;
1109
1110 sg = sc->vtscsi_sglist;
1111 vq = sc->vtscsi_request_vq;
1112 csio = &req->vsr_ccb->csio;
1113 ccbh = &csio->ccb_h;
1114 cmd_req = &req->vsr_cmd_req;
1115 cmd_resp = &req->vsr_cmd_resp;
1116
1117 vtscsi_init_scsi_cmd_req(sc, csio, cmd_req);
1118
1119 error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1120 if (error)
1121 return (error);
1122
1123 req->vsr_complete = vtscsi_complete_scsi_cmd;
1124 cmd_resp->response = -1;
1125
1126 error = virtqueue_enqueue(vq, req, sg, readable, writable);
1127 if (error) {
1128 vtscsi_dprintf(sc, VTSCSI_ERROR,
1129 "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1130
1131 ccbh->status = CAM_REQUEUE_REQ;
1132 vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1133 return (error);
1134 }
1135
1136 ccbh->status |= CAM_SIM_QUEUED;
1137 ccbh->ccbh_vtscsi_req = req;
1138
1139 virtqueue_notify(vq);
1140
1141 if (ccbh->timeout != CAM_TIME_INFINITY) {
1142 req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1143 callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout,
1144 0, vtscsi_timedout_scsi_cmd, req, 0);
1145 }
1146
1147 vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1148 req, ccbh);
1149
1150 return (0);
1151 }
1152
1153 static int
1154 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1155 {
1156 struct vtscsi_request *req;
1157 int error;
1158
1159 req = vtscsi_dequeue_request(sc);
1160 if (req == NULL) {
1161 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1162 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1163 return (ENOBUFS);
1164 }
1165
1166 req->vsr_ccb = ccb;
1167
1168 error = vtscsi_execute_scsi_cmd(sc, req);
1169 if (error)
1170 vtscsi_enqueue_request(sc, req);
1171
1172 return (error);
1173 }
1174
1175 static void
1176 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1177 struct vtscsi_request *req)
1178 {
1179 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1180 struct vtscsi_request *to_req;
1181 uint8_t response;
1182
1183 tmf_resp = &req->vsr_tmf_resp;
1184 response = tmf_resp->response;
1185 to_req = req->vsr_timedout_req;
1186
1187 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1188 req, to_req, response);
1189
1190 vtscsi_enqueue_request(sc, req);
1191
1192 /*
1193 * The timedout request could have completed between when the
1194 * abort task was sent and when the host processed it.
1195 */
1196 if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1197 return;
1198
1199 /* The timedout request was successfully aborted. */
1200 if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1201 return;
1202
1203 /* Don't bother if the device is going away. */
1204 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1205 return;
1206
1207 /* The timedout request will be aborted by the reset. */
1208 if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1209 return;
1210
1211 vtscsi_reset_bus(sc);
1212 }
1213
1214 static int
1215 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1216 struct vtscsi_request *to_req)
1217 {
1218 struct sglist *sg;
1219 struct ccb_hdr *to_ccbh;
1220 struct vtscsi_request *req;
1221 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1222 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1223 int error;
1224
1225 sg = sc->vtscsi_sglist;
1226 to_ccbh = &to_req->vsr_ccb->ccb_h;
1227
1228 req = vtscsi_dequeue_request(sc);
1229 if (req == NULL) {
1230 error = ENOBUFS;
1231 goto fail;
1232 }
1233
1234 tmf_req = &req->vsr_tmf_req;
1235 tmf_resp = &req->vsr_tmf_resp;
1236
1237 vtscsi_init_ctrl_tmf_req(sc, to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1238 (uintptr_t) to_ccbh, tmf_req);
1239
1240 sglist_reset(sg);
1241 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1242 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1243
1244 req->vsr_timedout_req = to_req;
1245 req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1246 tmf_resp->response = -1;
1247
1248 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1249 VTSCSI_EXECUTE_ASYNC);
1250 if (error == 0)
1251 return (0);
1252
1253 vtscsi_enqueue_request(sc, req);
1254
1255 fail:
1256 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1257 "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1258
1259 return (error);
1260 }
1261
1262 static void
1263 vtscsi_timedout_scsi_cmd(void *xreq)
1264 {
1265 struct vtscsi_softc *sc;
1266 struct vtscsi_request *to_req;
1267
1268 to_req = xreq;
1269 sc = to_req->vsr_softc;
1270
1271 vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1272 to_req, to_req->vsr_ccb, to_req->vsr_state);
1273
1274 /* Don't bother if the device is going away. */
1275 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1276 return;
1277
1278 /*
1279 * Bail if the request is not in use. We likely raced when
1280 * stopping the callout handler or it has already been aborted.
1281 */
1282 if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1283 (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1284 return;
1285
1286 /*
1287 * Complete the request queue in case the timedout request is
1288 * actually just pending.
1289 */
1290 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1291 if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1292 return;
1293
1294 sc->vtscsi_stats.scsi_cmd_timeouts++;
1295 to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1296
1297 if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1298 return;
1299
1300 vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1301 vtscsi_reset_bus(sc);
1302 }
1303
1304 static cam_status
1305 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1306 {
1307 cam_status status;
1308
1309 switch (cmd_resp->response) {
1310 case VIRTIO_SCSI_S_OK:
1311 status = CAM_REQ_CMP;
1312 break;
1313 case VIRTIO_SCSI_S_OVERRUN:
1314 status = CAM_DATA_RUN_ERR;
1315 break;
1316 case VIRTIO_SCSI_S_ABORTED:
1317 status = CAM_REQ_ABORTED;
1318 break;
1319 case VIRTIO_SCSI_S_BAD_TARGET:
1320 status = CAM_SEL_TIMEOUT;
1321 break;
1322 case VIRTIO_SCSI_S_RESET:
1323 status = CAM_SCSI_BUS_RESET;
1324 break;
1325 case VIRTIO_SCSI_S_BUSY:
1326 status = CAM_SCSI_BUSY;
1327 break;
1328 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1329 case VIRTIO_SCSI_S_TARGET_FAILURE:
1330 case VIRTIO_SCSI_S_NEXUS_FAILURE:
1331 status = CAM_SCSI_IT_NEXUS_LOST;
1332 break;
1333 default: /* VIRTIO_SCSI_S_FAILURE */
1334 status = CAM_REQ_CMP_ERR;
1335 break;
1336 }
1337
1338 return (status);
1339 }
1340
1341 static cam_status
1342 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1343 struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1344 {
1345 uint32_t resp_sense_length;
1346 cam_status status;
1347
1348 csio->scsi_status = cmd_resp->status;
1349 csio->resid = vtscsi_htog32(sc, cmd_resp->resid);
1350
1351 if (csio->scsi_status == SCSI_STATUS_OK)
1352 status = CAM_REQ_CMP;
1353 else
1354 status = CAM_SCSI_STATUS_ERROR;
1355
1356 resp_sense_length = vtscsi_htog32(sc, cmd_resp->sense_len);
1357
1358 if (resp_sense_length > 0) {
1359 status |= CAM_AUTOSNS_VALID;
1360
1361 if (resp_sense_length < csio->sense_len)
1362 csio->sense_resid = csio->sense_len - resp_sense_length;
1363 else
1364 csio->sense_resid = 0;
1365
1366 memcpy(&csio->sense_data, cmd_resp->sense,
1367 csio->sense_len - csio->sense_resid);
1368 }
1369
1370 vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1371 "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1372 csio, csio->scsi_status, csio->resid, csio->sense_resid);
1373
1374 return (status);
1375 }
1376
1377 static void
1378 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1379 {
1380 struct ccb_hdr *ccbh;
1381 struct ccb_scsiio *csio;
1382 struct virtio_scsi_cmd_resp *cmd_resp;
1383 cam_status status;
1384
1385 csio = &req->vsr_ccb->csio;
1386 ccbh = &csio->ccb_h;
1387 cmd_resp = &req->vsr_cmd_resp;
1388
1389 KASSERT(ccbh->ccbh_vtscsi_req == req,
1390 ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1391
1392 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1393 callout_stop(&req->vsr_callout);
1394
1395 status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1396 if (status == CAM_REQ_ABORTED) {
1397 if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1398 status = CAM_CMD_TIMEOUT;
1399 } else if (status == CAM_REQ_CMP)
1400 status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1401
1402 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1403 status |= CAM_DEV_QFRZN;
1404 xpt_freeze_devq(ccbh->path, 1);
1405 }
1406
1407 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1408 status |= CAM_RELEASE_SIMQ;
1409
1410 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1411 req, ccbh, status);
1412
1413 ccbh->status = status;
1414 xpt_done(req->vsr_ccb);
1415 vtscsi_enqueue_request(sc, req);
1416 }
1417
1418 static void
1419 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1420 {
1421
1422 /* XXX We probably shouldn't poll forever. */
1423 req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1424 do
1425 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1426 while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1427
1428 req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1429 }
1430
1431 static int
1432 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1433 struct sglist *sg, int readable, int writable, int flag)
1434 {
1435 struct virtqueue *vq;
1436 int error;
1437
1438 vq = sc->vtscsi_control_vq;
1439
1440 MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1441
1442 error = virtqueue_enqueue(vq, req, sg, readable, writable);
1443 if (error) {
1444 /*
1445 * Return EAGAIN when the virtqueue does not have enough
1446 * descriptors available.
1447 */
1448 if (error == ENOSPC || error == EMSGSIZE)
1449 error = EAGAIN;
1450
1451 return (error);
1452 }
1453
1454 virtqueue_notify(vq);
1455 if (flag == VTSCSI_EXECUTE_POLL)
1456 vtscsi_poll_ctrl_req(sc, req);
1457
1458 return (0);
1459 }
1460
1461 static void
1462 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1463 struct vtscsi_request *req)
1464 {
1465 union ccb *ccb;
1466 struct ccb_hdr *ccbh;
1467 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1468
1469 ccb = req->vsr_ccb;
1470 ccbh = &ccb->ccb_h;
1471 tmf_resp = &req->vsr_tmf_resp;
1472
1473 switch (tmf_resp->response) {
1474 case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1475 ccbh->status = CAM_REQ_CMP;
1476 break;
1477 case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1478 ccbh->status = CAM_UA_ABORT;
1479 break;
1480 default:
1481 ccbh->status = CAM_REQ_CMP_ERR;
1482 break;
1483 }
1484
1485 xpt_done(ccb);
1486 vtscsi_enqueue_request(sc, req);
1487 }
1488
1489 static int
1490 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1491 struct vtscsi_request *req)
1492 {
1493 struct sglist *sg;
1494 struct ccb_abort *cab;
1495 struct ccb_hdr *ccbh;
1496 struct ccb_hdr *abort_ccbh;
1497 struct vtscsi_request *abort_req;
1498 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1499 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1500 int error;
1501
1502 sg = sc->vtscsi_sglist;
1503 cab = &req->vsr_ccb->cab;
1504 ccbh = &cab->ccb_h;
1505 tmf_req = &req->vsr_tmf_req;
1506 tmf_resp = &req->vsr_tmf_resp;
1507
1508 /* CCB header and request that's to be aborted. */
1509 abort_ccbh = &cab->abort_ccb->ccb_h;
1510 abort_req = abort_ccbh->ccbh_vtscsi_req;
1511
1512 if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1513 error = EINVAL;
1514 goto fail;
1515 }
1516
1517 /* Only attempt to abort requests that could be in-flight. */
1518 if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1519 error = EALREADY;
1520 goto fail;
1521 }
1522
1523 abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1524 if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1525 callout_stop(&abort_req->vsr_callout);
1526
1527 vtscsi_init_ctrl_tmf_req(sc, ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1528 (uintptr_t) abort_ccbh, tmf_req);
1529
1530 sglist_reset(sg);
1531 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1532 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1533
1534 req->vsr_complete = vtscsi_complete_abort_task_cmd;
1535 tmf_resp->response = -1;
1536
1537 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1538 VTSCSI_EXECUTE_ASYNC);
1539
1540 fail:
1541 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1542 "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1543
1544 return (error);
1545 }
1546
1547 static void
1548 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1549 struct vtscsi_request *req)
1550 {
1551 union ccb *ccb;
1552 struct ccb_hdr *ccbh;
1553 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1554
1555 ccb = req->vsr_ccb;
1556 ccbh = &ccb->ccb_h;
1557 tmf_resp = &req->vsr_tmf_resp;
1558
1559 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1560 req, ccb, tmf_resp->response);
1561
1562 if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1563 ccbh->status = CAM_REQ_CMP;
1564 vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1565 ccbh->target_lun);
1566 } else
1567 ccbh->status = CAM_REQ_CMP_ERR;
1568
1569 xpt_done(ccb);
1570 vtscsi_enqueue_request(sc, req);
1571 }
1572
1573 static int
1574 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1575 struct vtscsi_request *req)
1576 {
1577 struct sglist *sg;
1578 struct ccb_resetdev *crd;
1579 struct ccb_hdr *ccbh;
1580 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1581 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1582 uint32_t subtype;
1583 int error;
1584
1585 sg = sc->vtscsi_sglist;
1586 crd = &req->vsr_ccb->crd;
1587 ccbh = &crd->ccb_h;
1588 tmf_req = &req->vsr_tmf_req;
1589 tmf_resp = &req->vsr_tmf_resp;
1590
1591 if (ccbh->target_lun == CAM_LUN_WILDCARD)
1592 subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1593 else
1594 subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1595
1596 vtscsi_init_ctrl_tmf_req(sc, ccbh, subtype, 0, tmf_req);
1597
1598 sglist_reset(sg);
1599 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1600 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1601
1602 req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1603 tmf_resp->response = -1;
1604
1605 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1606 VTSCSI_EXECUTE_ASYNC);
1607
1608 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1609 error, req, ccbh);
1610
1611 return (error);
1612 }
1613
1614 static void
1615 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1616 {
1617
1618 *target_id = lun[1];
1619 *lun_id = (lun[2] << 8) | lun[3];
1620 }
1621
1622 static void
1623 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1624 {
1625
1626 lun[0] = 1;
1627 lun[1] = ccbh->target_id;
1628 lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1629 lun[3] = ccbh->target_lun & 0xFF;
1630 }
1631
1632 static void
1633 vtscsi_init_scsi_cmd_req(struct vtscsi_softc *sc, struct ccb_scsiio *csio,
1634 struct virtio_scsi_cmd_req *cmd_req)
1635 {
1636 uint8_t attr;
1637
1638 switch (csio->tag_action) {
1639 case MSG_HEAD_OF_Q_TAG:
1640 attr = VIRTIO_SCSI_S_HEAD;
1641 break;
1642 case MSG_ORDERED_Q_TAG:
1643 attr = VIRTIO_SCSI_S_ORDERED;
1644 break;
1645 case MSG_ACA_TASK:
1646 attr = VIRTIO_SCSI_S_ACA;
1647 break;
1648 default: /* MSG_SIMPLE_Q_TAG */
1649 attr = VIRTIO_SCSI_S_SIMPLE;
1650 break;
1651 }
1652
1653 vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1654 cmd_req->tag = vtscsi_gtoh64(sc, (uintptr_t) csio);
1655 cmd_req->task_attr = attr;
1656
1657 memcpy(cmd_req->cdb,
1658 csio->ccb_h.flags & CAM_CDB_POINTER ?
1659 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1660 csio->cdb_len);
1661 }
1662
1663 static void
1664 vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *sc, struct ccb_hdr *ccbh,
1665 uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1666 {
1667
1668 vtscsi_set_request_lun(ccbh, tmf_req->lun);
1669
1670 tmf_req->type = vtscsi_gtoh32(sc, VIRTIO_SCSI_T_TMF);
1671 tmf_req->subtype = vtscsi_gtoh32(sc, subtype);
1672 tmf_req->tag = vtscsi_gtoh64(sc, tag);
1673 }
1674
1675 static void
1676 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1677 {
1678 int frozen;
1679
1680 frozen = sc->vtscsi_frozen;
1681
1682 if (reason & VTSCSI_REQUEST &&
1683 (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1684 sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1685
1686 if (reason & VTSCSI_REQUEST_VQ &&
1687 (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1688 sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1689
1690 /* Freeze the SIMQ if transitioned to frozen. */
1691 if (frozen == 0 && sc->vtscsi_frozen != 0) {
1692 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1693 xpt_freeze_simq(sc->vtscsi_sim, 1);
1694 }
1695 }
1696
1697 static int
1698 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1699 {
1700 int thawed;
1701
1702 if (sc->vtscsi_frozen == 0 || reason == 0)
1703 return (0);
1704
1705 if (reason & VTSCSI_REQUEST &&
1706 sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1707 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1708
1709 if (reason & VTSCSI_REQUEST_VQ &&
1710 sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1711 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1712
1713 thawed = sc->vtscsi_frozen == 0;
1714 if (thawed != 0)
1715 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1716
1717 return (thawed);
1718 }
1719
1720 static void
1721 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1722 target_id_t target_id, lun_id_t lun_id)
1723 {
1724 struct cam_path *path;
1725
1726 /* Use the wildcard path from our softc for bus announcements. */
1727 if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1728 xpt_async(ac_code, sc->vtscsi_path, NULL);
1729 return;
1730 }
1731
1732 if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1733 target_id, lun_id) != CAM_REQ_CMP) {
1734 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1735 return;
1736 }
1737
1738 xpt_async(ac_code, path, NULL);
1739 xpt_free_path(path);
1740 }
1741
1742 static void
1743 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1744 lun_id_t lun_id)
1745 {
1746 union ccb *ccb;
1747 cam_status status;
1748
1749 ccb = xpt_alloc_ccb_nowait();
1750 if (ccb == NULL) {
1751 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1752 return;
1753 }
1754
1755 status = xpt_create_path(&ccb->ccb_h.path, NULL,
1756 cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1757 if (status != CAM_REQ_CMP) {
1758 xpt_free_ccb(ccb);
1759 return;
1760 }
1761
1762 xpt_rescan(ccb);
1763 }
1764
1765 static void
1766 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1767 {
1768
1769 vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1770 }
1771
1772 static void
1773 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1774 struct virtio_scsi_event *event)
1775 {
1776 target_id_t target_id;
1777 lun_id_t lun_id;
1778
1779 vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1780
1781 switch (event->reason) {
1782 case VIRTIO_SCSI_EVT_RESET_RESCAN:
1783 case VIRTIO_SCSI_EVT_RESET_REMOVED:
1784 vtscsi_execute_rescan(sc, target_id, lun_id);
1785 break;
1786 default:
1787 device_printf(sc->vtscsi_dev,
1788 "unhandled transport event reason: %d\n", event->reason);
1789 break;
1790 }
1791 }
1792
1793 static void
1794 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1795 {
1796 int error __diagused;
1797
1798 if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1799 switch (event->event) {
1800 case VIRTIO_SCSI_T_TRANSPORT_RESET:
1801 vtscsi_transport_reset_event(sc, event);
1802 break;
1803 default:
1804 device_printf(sc->vtscsi_dev,
1805 "unhandled event: %d\n", event->event);
1806 break;
1807 }
1808 } else
1809 vtscsi_execute_rescan_bus(sc);
1810
1811 /*
1812 * This should always be successful since the buffer
1813 * was just dequeued.
1814 */
1815 error = vtscsi_enqueue_event_buf(sc, event);
1816 KASSERT(error == 0,
1817 ("cannot requeue event buffer: %d", error));
1818 }
1819
1820 static int
1821 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1822 struct virtio_scsi_event *event)
1823 {
1824 struct sglist *sg;
1825 struct virtqueue *vq;
1826 int size, error;
1827
1828 sg = sc->vtscsi_sglist;
1829 vq = sc->vtscsi_event_vq;
1830 size = sc->vtscsi_event_buf_size;
1831
1832 bzero(event, size);
1833
1834 sglist_reset(sg);
1835 error = sglist_append(sg, event, size);
1836 if (error)
1837 return (error);
1838
1839 error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1840 if (error)
1841 return (error);
1842
1843 virtqueue_notify(vq);
1844
1845 return (0);
1846 }
1847
1848 static int
1849 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1850 {
1851 struct virtio_scsi_event *event;
1852 int i, size, error;
1853
1854 /*
1855 * The first release of QEMU with VirtIO SCSI support would crash
1856 * when attempting to notify the event virtqueue. This was fixed
1857 * when hotplug support was added.
1858 */
1859 if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1860 size = sc->vtscsi_event_buf_size;
1861 else
1862 size = 0;
1863
1864 if (size < sizeof(struct virtio_scsi_event))
1865 return (0);
1866
1867 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1868 event = &sc->vtscsi_event_bufs[i];
1869
1870 error = vtscsi_enqueue_event_buf(sc, event);
1871 if (error)
1872 break;
1873 }
1874
1875 /*
1876 * Even just one buffer is enough. Missed events are
1877 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1878 */
1879 if (i > 0)
1880 error = 0;
1881
1882 return (error);
1883 }
1884
1885 static void
1886 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1887 {
1888 struct virtio_scsi_event *event;
1889 int i, error;
1890
1891 if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1892 sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1893 return;
1894
1895 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1896 event = &sc->vtscsi_event_bufs[i];
1897
1898 error = vtscsi_enqueue_event_buf(sc, event);
1899 if (error)
1900 break;
1901 }
1902
1903 KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1904 }
1905
1906 static void
1907 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1908 {
1909 struct virtqueue *vq;
1910 int last;
1911
1912 vq = sc->vtscsi_event_vq;
1913 last = 0;
1914
1915 while (virtqueue_drain(vq, &last) != NULL)
1916 ;
1917
1918 KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1919 }
1920
1921 static void
1922 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1923 {
1924
1925 VTSCSI_LOCK_OWNED(sc);
1926
1927 if (sc->vtscsi_request_vq != NULL)
1928 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1929 if (sc->vtscsi_control_vq != NULL)
1930 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1931 }
1932
1933 static void
1934 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1935 {
1936
1937 VTSCSI_LOCK(sc);
1938 vtscsi_complete_vqs_locked(sc);
1939 VTSCSI_UNLOCK(sc);
1940 }
1941
1942 static void
1943 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1944 {
1945 union ccb *ccb;
1946 int detach;
1947
1948 ccb = req->vsr_ccb;
1949
1950 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1951
1952 /*
1953 * The callout must be drained when detaching since the request is
1954 * about to be freed. The VTSCSI_MTX must not be held for this in
1955 * case the callout is pending because there is a deadlock potential.
1956 * Otherwise, the virtqueue is being drained because of a bus reset
1957 * so we only need to attempt to stop the callouts.
1958 */
1959 detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1960 if (detach != 0)
1961 VTSCSI_LOCK_NOTOWNED(sc);
1962 else
1963 VTSCSI_LOCK_OWNED(sc);
1964
1965 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1966 if (detach != 0)
1967 callout_drain(&req->vsr_callout);
1968 else
1969 callout_stop(&req->vsr_callout);
1970 }
1971
1972 if (ccb != NULL) {
1973 if (detach != 0) {
1974 VTSCSI_LOCK(sc);
1975 ccb->ccb_h.status = CAM_NO_HBA;
1976 } else
1977 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1978 xpt_done(ccb);
1979 if (detach != 0)
1980 VTSCSI_UNLOCK(sc);
1981 }
1982
1983 vtscsi_enqueue_request(sc, req);
1984 }
1985
1986 static void
1987 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1988 {
1989 struct vtscsi_request *req;
1990 int last;
1991
1992 last = 0;
1993
1994 vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1995
1996 while ((req = virtqueue_drain(vq, &last)) != NULL)
1997 vtscsi_cancel_request(sc, req);
1998
1999 KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
2000 }
2001
2002 static void
2003 vtscsi_drain_vqs(struct vtscsi_softc *sc)
2004 {
2005
2006 if (sc->vtscsi_control_vq != NULL)
2007 vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
2008 if (sc->vtscsi_request_vq != NULL)
2009 vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
2010 if (sc->vtscsi_event_vq != NULL)
2011 vtscsi_drain_event_vq(sc);
2012 }
2013
2014 static void
2015 vtscsi_stop(struct vtscsi_softc *sc)
2016 {
2017
2018 vtscsi_disable_vqs_intr(sc);
2019 virtio_stop(sc->vtscsi_dev);
2020 }
2021
2022 static int
2023 vtscsi_reset_bus(struct vtscsi_softc *sc)
2024 {
2025 int error;
2026
2027 VTSCSI_LOCK_OWNED(sc);
2028
2029 if (vtscsi_bus_reset_disable != 0) {
2030 device_printf(sc->vtscsi_dev, "bus reset disabled\n");
2031 return (0);
2032 }
2033
2034 sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2035
2036 /*
2037 * vtscsi_stop() will cause the in-flight requests to be canceled.
2038 * Those requests are then completed here so CAM will retry them
2039 * after the reset is complete.
2040 */
2041 vtscsi_stop(sc);
2042 vtscsi_complete_vqs_locked(sc);
2043
2044 /* Rid the virtqueues of any remaining requests. */
2045 vtscsi_drain_vqs(sc);
2046
2047 /*
2048 * Any resource shortage that froze the SIMQ cannot persist across
2049 * a bus reset so ensure it gets thawed here.
2050 */
2051 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2052 xpt_release_simq(sc->vtscsi_sim, 0);
2053
2054 error = vtscsi_reinit(sc);
2055 if (error) {
2056 device_printf(sc->vtscsi_dev,
2057 "reinitialization failed, stopping device...\n");
2058 vtscsi_stop(sc);
2059 } else
2060 vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2061 CAM_LUN_WILDCARD);
2062
2063 sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2064
2065 return (error);
2066 }
2067
2068 static void
2069 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2070 {
2071
2072 #ifdef INVARIANTS
2073 int req_nsegs, resp_nsegs;
2074
2075 req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2076 resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2077
2078 KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2079 KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2080 #endif
2081
2082 req->vsr_softc = sc;
2083 callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2084 }
2085
2086 static int
2087 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2088 {
2089 struct vtscsi_request *req;
2090 int i, nreqs;
2091
2092 /*
2093 * Commands destined for either the request or control queues come
2094 * from the same SIM queue. Use the size of the request virtqueue
2095 * as it (should) be much more frequently used. Some additional
2096 * requests are allocated for internal (TMF) use.
2097 */
2098 nreqs = virtqueue_size(sc->vtscsi_request_vq);
2099 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2100 nreqs /= VTSCSI_MIN_SEGMENTS;
2101 nreqs += VTSCSI_RESERVED_REQUESTS;
2102
2103 for (i = 0; i < nreqs; i++) {
2104 req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2105 M_NOWAIT);
2106 if (req == NULL)
2107 return (ENOMEM);
2108
2109 vtscsi_init_request(sc, req);
2110
2111 sc->vtscsi_nrequests++;
2112 vtscsi_enqueue_request(sc, req);
2113 }
2114
2115 return (0);
2116 }
2117
2118 static void
2119 vtscsi_free_requests(struct vtscsi_softc *sc)
2120 {
2121 struct vtscsi_request *req;
2122
2123 while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2124 KASSERT(callout_active(&req->vsr_callout) == 0,
2125 ("request callout still active"));
2126
2127 sc->vtscsi_nrequests--;
2128 free(req, M_DEVBUF);
2129 }
2130
2131 KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2132 sc->vtscsi_nrequests));
2133 }
2134
2135 static void
2136 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2137 {
2138
2139 KASSERT(req->vsr_softc == sc,
2140 ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2141
2142 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2143
2144 /* A request is available so the SIMQ could be released. */
2145 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2146 xpt_release_simq(sc->vtscsi_sim, 1);
2147
2148 req->vsr_ccb = NULL;
2149 req->vsr_complete = NULL;
2150 req->vsr_ptr0 = NULL;
2151 req->vsr_state = VTSCSI_REQ_STATE_FREE;
2152 req->vsr_flags = 0;
2153
2154 bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2155 bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2156
2157 /*
2158 * We insert at the tail of the queue in order to make it
2159 * very unlikely a request will be reused if we race with
2160 * stopping its callout handler.
2161 */
2162 TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2163 }
2164
2165 static struct vtscsi_request *
2166 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2167 {
2168 struct vtscsi_request *req;
2169
2170 req = TAILQ_FIRST(&sc->vtscsi_req_free);
2171 if (req != NULL) {
2172 req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2173 TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2174 } else
2175 sc->vtscsi_stats.dequeue_no_requests++;
2176
2177 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2178
2179 return (req);
2180 }
2181
2182 static void
2183 vtscsi_complete_request(struct vtscsi_request *req)
2184 {
2185
2186 if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2187 req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2188
2189 if (req->vsr_complete != NULL)
2190 req->vsr_complete(req->vsr_softc, req);
2191 }
2192
2193 static void
2194 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2195 {
2196 struct vtscsi_request *req;
2197
2198 VTSCSI_LOCK_OWNED(sc);
2199
2200 while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2201 vtscsi_complete_request(req);
2202 }
2203
2204 static void
2205 vtscsi_control_vq_intr(void *xsc)
2206 {
2207 struct vtscsi_softc *sc;
2208 struct virtqueue *vq;
2209
2210 sc = xsc;
2211 vq = sc->vtscsi_control_vq;
2212
2213 again:
2214 VTSCSI_LOCK(sc);
2215
2216 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2217
2218 if (virtqueue_enable_intr(vq) != 0) {
2219 virtqueue_disable_intr(vq);
2220 VTSCSI_UNLOCK(sc);
2221 goto again;
2222 }
2223
2224 VTSCSI_UNLOCK(sc);
2225 }
2226
2227 static void
2228 vtscsi_event_vq_intr(void *xsc)
2229 {
2230 struct vtscsi_softc *sc;
2231 struct virtqueue *vq;
2232 struct virtio_scsi_event *event;
2233
2234 sc = xsc;
2235 vq = sc->vtscsi_event_vq;
2236
2237 again:
2238 VTSCSI_LOCK(sc);
2239
2240 while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2241 vtscsi_handle_event(sc, event);
2242
2243 if (virtqueue_enable_intr(vq) != 0) {
2244 virtqueue_disable_intr(vq);
2245 VTSCSI_UNLOCK(sc);
2246 goto again;
2247 }
2248
2249 VTSCSI_UNLOCK(sc);
2250 }
2251
2252 static void
2253 vtscsi_request_vq_intr(void *xsc)
2254 {
2255 struct vtscsi_softc *sc;
2256 struct virtqueue *vq;
2257
2258 sc = xsc;
2259 vq = sc->vtscsi_request_vq;
2260
2261 again:
2262 VTSCSI_LOCK(sc);
2263
2264 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2265
2266 if (virtqueue_enable_intr(vq) != 0) {
2267 virtqueue_disable_intr(vq);
2268 VTSCSI_UNLOCK(sc);
2269 goto again;
2270 }
2271
2272 VTSCSI_UNLOCK(sc);
2273 }
2274
2275 static void
2276 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2277 {
2278
2279 virtqueue_disable_intr(sc->vtscsi_control_vq);
2280 virtqueue_disable_intr(sc->vtscsi_event_vq);
2281 virtqueue_disable_intr(sc->vtscsi_request_vq);
2282 }
2283
2284 static void
2285 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2286 {
2287
2288 virtqueue_enable_intr(sc->vtscsi_control_vq);
2289 virtqueue_enable_intr(sc->vtscsi_event_vq);
2290 virtqueue_enable_intr(sc->vtscsi_request_vq);
2291 }
2292
2293 static void
2294 vtscsi_get_tunables(struct vtscsi_softc *sc)
2295 {
2296 char tmpstr[64];
2297
2298 TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2299
2300 snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2301 device_get_unit(sc->vtscsi_dev));
2302 TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2303 }
2304
2305 static void
2306 vtscsi_setup_sysctl(struct vtscsi_softc *sc)
2307 {
2308 device_t dev;
2309 struct vtscsi_statistics *stats;
2310 struct sysctl_ctx_list *ctx;
2311 struct sysctl_oid *tree;
2312 struct sysctl_oid_list *child;
2313
2314 dev = sc->vtscsi_dev;
2315 stats = &sc->vtscsi_stats;
2316 ctx = device_get_sysctl_ctx(dev);
2317 tree = device_get_sysctl_tree(dev);
2318 child = SYSCTL_CHILDREN(tree);
2319
2320 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2321 CTLFLAG_RW, &sc->vtscsi_debug, 0,
2322 "Debug level");
2323
2324 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2325 CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2326 "SCSI command timeouts");
2327 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2328 CTLFLAG_RD, &stats->dequeue_no_requests,
2329 "No available requests to dequeue");
2330 }
2331
2332 static void
2333 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2334 const char *fmt, ...)
2335 {
2336 struct vtscsi_softc *sc;
2337 union ccb *ccb;
2338 struct sbuf sb;
2339 va_list ap;
2340 char str[192];
2341 char path_str[64];
2342
2343 if (req == NULL)
2344 return;
2345
2346 sc = req->vsr_softc;
2347 ccb = req->vsr_ccb;
2348
2349 va_start(ap, fmt);
2350 sbuf_new(&sb, str, sizeof(str), 0);
2351
2352 if (ccb == NULL) {
2353 sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2354 cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2355 cam_sim_bus(sc->vtscsi_sim));
2356 } else {
2357 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2358 sbuf_cat(&sb, path_str);
2359 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2360 scsi_command_string(&ccb->csio, &sb);
2361 sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2362 }
2363 }
2364
2365 sbuf_vprintf(&sb, fmt, ap);
2366 va_end(ap);
2367
2368 sbuf_finish(&sb);
2369 printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2370 sbuf_data(&sb));
2371 }
Cache object: d6901b2bc138ec759fa8505abc5618ab
|