1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright 2013 Nathan Whitehorn
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/selinfo.h>
38 #include <sys/bus.h>
39 #include <sys/conf.h>
40 #include <sys/eventhandler.h>
41 #include <sys/rman.h>
42 #include <sys/bus_dma.h>
43 #include <sys/bio.h>
44 #include <sys/ioccom.h>
45 #include <sys/uio.h>
46 #include <sys/proc.h>
47 #include <sys/signalvar.h>
48 #include <sys/sysctl.h>
49 #include <sys/endian.h>
50 #include <sys/vmem.h>
51
52 #include <cam/cam.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_debug.h>
55 #include <cam/cam_periph.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_xpt_periph.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/scsi/scsi_all.h>
60 #include <cam/scsi/scsi_message.h>
61
62 #include <dev/ofw/openfirm.h>
63 #include <dev/ofw/ofw_bus.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65
66 #include <machine/bus.h>
67 #include <machine/resource.h>
68
69 #include <powerpc/pseries/phyp-hvcall.h>
70
71 struct vscsi_softc;
72
73 /* VSCSI CRQ format from table 260 of PAPR spec 2.4 (page 760) */
74 struct vscsi_crq {
75 uint8_t valid;
76 uint8_t format;
77 uint8_t reserved;
78 uint8_t status;
79 uint16_t timeout;
80 uint16_t iu_length;
81 uint64_t iu_data;
82 };
83
84 struct vscsi_xfer {
85 TAILQ_ENTRY(vscsi_xfer) queue;
86 struct vscsi_softc *sc;
87 union ccb *ccb;
88 bus_dmamap_t dmamap;
89 uint64_t tag;
90
91 vmem_addr_t srp_iu_offset;
92 vmem_size_t srp_iu_size;
93 };
94
95 TAILQ_HEAD(vscsi_xferq, vscsi_xfer);
96
97 struct vscsi_softc {
98 device_t dev;
99 struct cam_devq *devq;
100 struct cam_sim *sim;
101 struct cam_path *path;
102 struct mtx io_lock;
103
104 cell_t unit;
105 int bus_initialized;
106 int bus_logged_in;
107 int max_transactions;
108
109 int irqid;
110 struct resource *irq;
111 void *irq_cookie;
112
113 bus_dma_tag_t crq_tag;
114 struct vscsi_crq *crq_queue;
115 int n_crqs, cur_crq;
116 bus_dmamap_t crq_map;
117 bus_addr_t crq_phys;
118
119 vmem_t *srp_iu_arena;
120 void *srp_iu_queue;
121 bus_addr_t srp_iu_phys;
122
123 bus_dma_tag_t data_tag;
124
125 struct vscsi_xfer loginxp;
126 struct vscsi_xfer *xfer;
127 struct vscsi_xferq active_xferq;
128 struct vscsi_xferq free_xferq;
129 };
130
131 struct srp_login {
132 uint8_t type;
133 uint8_t reserved[7];
134 uint64_t tag;
135 uint64_t max_cmd_length;
136 uint32_t reserved2;
137 uint16_t buffer_formats;
138 uint8_t flags;
139 uint8_t reserved3[5];
140 uint8_t initiator_port_id[16];
141 uint8_t target_port_id[16];
142 } __packed;
143
144 struct srp_login_rsp {
145 uint8_t type;
146 uint8_t reserved[3];
147 uint32_t request_limit_delta;
148 uint8_t tag;
149 uint32_t max_i_to_t_len;
150 uint32_t max_t_to_i_len;
151 uint16_t buffer_formats;
152 uint8_t flags;
153 /* Some reserved bits follow */
154 } __packed;
155
156 struct srp_cmd {
157 uint8_t type;
158 uint8_t flags1;
159 uint8_t reserved[3];
160 uint8_t formats;
161 uint8_t out_buffer_count;
162 uint8_t in_buffer_count;
163 uint64_t tag;
164 uint32_t reserved2;
165 uint64_t lun;
166 uint8_t reserved3[3];
167 uint8_t additional_cdb;
168 uint8_t cdb[16];
169 uint8_t data_payload[0];
170 } __packed;
171
172 struct srp_rsp {
173 uint8_t type;
174 uint8_t reserved[3];
175 uint32_t request_limit_delta;
176 uint64_t tag;
177 uint16_t reserved2;
178 uint8_t flags;
179 uint8_t status;
180 uint32_t data_out_resid;
181 uint32_t data_in_resid;
182 uint32_t sense_data_len;
183 uint32_t response_data_len;
184 uint8_t data_payload[0];
185 } __packed;
186
187 struct srp_tsk_mgmt {
188 uint8_t type;
189 uint8_t reserved[7];
190 uint64_t tag;
191 uint32_t reserved2;
192 uint64_t lun;
193 uint8_t reserved3[2];
194 uint8_t function;
195 uint8_t reserved4;
196 uint64_t manage_tag;
197 uint64_t reserved5;
198 } __packed;
199
200 /* Message code type */
201 #define SRP_LOGIN_REQ 0x00
202 #define SRP_TSK_MGMT 0x01
203 #define SRP_CMD 0x02
204 #define SRP_I_LOGOUT 0x03
205
206 #define SRP_LOGIN_RSP 0xC0
207 #define SRP_RSP 0xC1
208 #define SRP_LOGIN_REJ 0xC2
209
210 #define SRP_T_LOGOUT 0x80
211 #define SRP_CRED_REQ 0x81
212 #define SRP_AER_REQ 0x82
213
214 #define SRP_CRED_RSP 0x41
215 #define SRP_AER_RSP 0x41
216
217 /* Flags for srp_rsp flags field */
218 #define SRP_RSPVALID 0x01
219 #define SRP_SNSVALID 0x02
220 #define SRP_DOOVER 0x04
221 #define SRP_DOUNDER 0x08
222 #define SRP_DIOVER 0x10
223 #define SRP_DIUNDER 0x20
224
225 #define MAD_SUCESS 0x00
226 #define MAD_NOT_SUPPORTED 0xf1
227 #define MAD_FAILED 0xf7
228
229 #define MAD_EMPTY_IU 0x01
230 #define MAD_ERROR_LOGGING_REQUEST 0x02
231 #define MAD_ADAPTER_INFO_REQUEST 0x03
232 #define MAD_CAPABILITIES_EXCHANGE 0x05
233 #define MAD_PHYS_ADAP_INFO_REQUEST 0x06
234 #define MAD_TAPE_PASSTHROUGH_REQUEST 0x07
235 #define MAD_ENABLE_FAST_FAIL 0x08
236
237 static int vscsi_probe(device_t);
238 static int vscsi_attach(device_t);
239 static int vscsi_detach(device_t);
240 static void vscsi_cam_action(struct cam_sim *, union ccb *);
241 static void vscsi_cam_poll(struct cam_sim *);
242 static void vscsi_intr(void *arg);
243 static void vscsi_check_response_queue(struct vscsi_softc *sc);
244 static void vscsi_setup_bus(struct vscsi_softc *sc);
245
246 static void vscsi_srp_login(struct vscsi_softc *sc);
247 static void vscsi_crq_load_cb(void *, bus_dma_segment_t *, int, int);
248 static void vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs,
249 int nsegs, int err);
250 static void vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb);
251 static void vscsi_srp_response(struct vscsi_xfer *, struct vscsi_crq *);
252
253 static device_method_t vscsi_methods[] = {
254 DEVMETHOD(device_probe, vscsi_probe),
255 DEVMETHOD(device_attach, vscsi_attach),
256 DEVMETHOD(device_detach, vscsi_detach),
257
258 DEVMETHOD_END
259 };
260
261 static driver_t vscsi_driver = {
262 "vscsi",
263 vscsi_methods,
264 sizeof(struct vscsi_softc)
265 };
266
267 DRIVER_MODULE(vscsi, vdevice, vscsi_driver, 0, 0);
268 MALLOC_DEFINE(M_VSCSI, "vscsi", "CAM device queue for VSCSI");
269
270 static int
271 vscsi_probe(device_t dev)
272 {
273
274 if (!ofw_bus_is_compatible(dev, "IBM,v-scsi"))
275 return (ENXIO);
276
277 device_set_desc(dev, "POWER Hypervisor Virtual SCSI Bus");
278 return (0);
279 }
280
281 static int
282 vscsi_attach(device_t dev)
283 {
284 struct vscsi_softc *sc;
285 struct vscsi_xfer *xp;
286 int error, i;
287
288 sc = device_get_softc(dev);
289 if (sc == NULL)
290 return (EINVAL);
291
292 sc->dev = dev;
293 mtx_init(&sc->io_lock, "vscsi", NULL, MTX_DEF);
294
295 /* Get properties */
296 OF_getencprop(ofw_bus_get_node(dev), "reg", &sc->unit,
297 sizeof(sc->unit));
298
299 /* Setup interrupt */
300 sc->irqid = 0;
301 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
302 RF_ACTIVE);
303
304 if (!sc->irq) {
305 device_printf(dev, "Could not allocate IRQ\n");
306 mtx_destroy(&sc->io_lock);
307 return (ENXIO);
308 }
309
310 bus_setup_intr(dev, sc->irq, INTR_TYPE_CAM | INTR_MPSAFE |
311 INTR_ENTROPY, NULL, vscsi_intr, sc, &sc->irq_cookie);
312
313 /* Data DMA */
314 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
315 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
316 256, BUS_SPACE_MAXSIZE_32BIT, 0, busdma_lock_mutex, &sc->io_lock,
317 &sc->data_tag);
318
319 TAILQ_INIT(&sc->active_xferq);
320 TAILQ_INIT(&sc->free_xferq);
321
322 /* First XFER for login data */
323 sc->loginxp.sc = sc;
324 bus_dmamap_create(sc->data_tag, 0, &sc->loginxp.dmamap);
325 TAILQ_INSERT_TAIL(&sc->free_xferq, &sc->loginxp, queue);
326
327 /* CRQ area */
328 error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0,
329 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 8*PAGE_SIZE,
330 1, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->crq_tag);
331 error = bus_dmamem_alloc(sc->crq_tag, (void **)&sc->crq_queue,
332 BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->crq_map);
333 sc->crq_phys = 0;
334 sc->n_crqs = 0;
335 error = bus_dmamap_load(sc->crq_tag, sc->crq_map, sc->crq_queue,
336 8*PAGE_SIZE, vscsi_crq_load_cb, sc, 0);
337
338 mtx_lock(&sc->io_lock);
339 vscsi_setup_bus(sc);
340 sc->xfer = malloc(sizeof(sc->xfer[0])*sc->max_transactions, M_VSCSI,
341 M_NOWAIT);
342 for (i = 0; i < sc->max_transactions; i++) {
343 xp = &sc->xfer[i];
344 xp->sc = sc;
345
346 error = bus_dmamap_create(sc->data_tag, 0, &xp->dmamap);
347 if (error) {
348 device_printf(dev, "Could not create DMA map (%d)\n",
349 error);
350 break;
351 }
352
353 TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
354 }
355 mtx_unlock(&sc->io_lock);
356
357 /* Allocate CAM bits */
358 if ((sc->devq = cam_simq_alloc(sc->max_transactions)) == NULL)
359 return (ENOMEM);
360
361 sc->sim = cam_sim_alloc(vscsi_cam_action, vscsi_cam_poll, "vscsi", sc,
362 device_get_unit(dev), &sc->io_lock,
363 sc->max_transactions, sc->max_transactions,
364 sc->devq);
365 if (sc->sim == NULL) {
366 cam_simq_free(sc->devq);
367 sc->devq = NULL;
368 device_printf(dev, "CAM SIM attach failed\n");
369 return (EINVAL);
370 }
371
372 mtx_lock(&sc->io_lock);
373 if (xpt_bus_register(sc->sim, dev, 0) != 0) {
374 device_printf(dev, "XPT bus registration failed\n");
375 cam_sim_free(sc->sim, FALSE);
376 sc->sim = NULL;
377 cam_simq_free(sc->devq);
378 sc->devq = NULL;
379 mtx_unlock(&sc->io_lock);
380 return (EINVAL);
381 }
382 mtx_unlock(&sc->io_lock);
383
384 return (0);
385 }
386
387 static int
388 vscsi_detach(device_t dev)
389 {
390 struct vscsi_softc *sc;
391
392 sc = device_get_softc(dev);
393 if (sc == NULL)
394 return (EINVAL);
395
396 if (sc->sim != NULL) {
397 mtx_lock(&sc->io_lock);
398 xpt_bus_deregister(cam_sim_path(sc->sim));
399 cam_sim_free(sc->sim, FALSE);
400 sc->sim = NULL;
401 mtx_unlock(&sc->io_lock);
402 }
403
404 if (sc->devq != NULL) {
405 cam_simq_free(sc->devq);
406 sc->devq = NULL;
407 }
408
409 mtx_destroy(&sc->io_lock);
410
411 return (0);
412 }
413
414 static void
415 vscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
416 {
417 struct vscsi_softc *sc = cam_sim_softc(sim);
418
419 mtx_assert(&sc->io_lock, MA_OWNED);
420
421 switch (ccb->ccb_h.func_code) {
422 case XPT_PATH_INQ:
423 {
424 struct ccb_pathinq *cpi = &ccb->cpi;
425
426 cpi->version_num = 1;
427 cpi->hba_inquiry = PI_TAG_ABLE;
428 cpi->hba_misc = PIM_EXTLUNS;
429 cpi->target_sprt = 0;
430 cpi->hba_eng_cnt = 0;
431 cpi->max_target = 0;
432 cpi->max_lun = 0;
433 cpi->initiator_id = ~0;
434 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
435 strlcpy(cpi->hba_vid, "IBM", HBA_IDLEN);
436 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
437 cpi->unit_number = cam_sim_unit(sim);
438 cpi->bus_id = cam_sim_bus(sim);
439 cpi->base_transfer_speed = 150000;
440 cpi->transport = XPORT_SRP;
441 cpi->transport_version = 0;
442 cpi->protocol = PROTO_SCSI;
443 cpi->protocol_version = SCSI_REV_SPC4;
444 cpi->ccb_h.status = CAM_REQ_CMP;
445 break;
446 }
447 case XPT_RESET_BUS:
448 ccb->ccb_h.status = CAM_REQ_CMP;
449 break;
450 case XPT_RESET_DEV:
451 ccb->ccb_h.status = CAM_REQ_INPROG;
452 vscsi_task_management(sc, ccb);
453 return;
454 case XPT_GET_TRAN_SETTINGS:
455 ccb->cts.protocol = PROTO_SCSI;
456 ccb->cts.protocol_version = SCSI_REV_SPC4;
457 ccb->cts.transport = XPORT_SRP;
458 ccb->cts.transport_version = 0;
459 ccb->cts.proto_specific.valid = 0;
460 ccb->cts.xport_specific.valid = 0;
461 ccb->ccb_h.status = CAM_REQ_CMP;
462 break;
463 case XPT_SET_TRAN_SETTINGS:
464 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
465 break;
466 case XPT_SCSI_IO:
467 {
468 struct vscsi_xfer *xp;
469
470 ccb->ccb_h.status = CAM_REQ_INPROG;
471
472 xp = TAILQ_FIRST(&sc->free_xferq);
473 if (xp == NULL)
474 panic("SCSI queue flooded");
475 xp->ccb = ccb;
476 TAILQ_REMOVE(&sc->free_xferq, xp, queue);
477 TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
478 bus_dmamap_load_ccb(sc->data_tag, xp->dmamap,
479 ccb, vscsi_scsi_command, xp, 0);
480
481 return;
482 }
483 default:
484 ccb->ccb_h.status = CAM_REQ_INVALID;
485 break;
486 }
487
488 xpt_done(ccb);
489 return;
490 }
491
492 static void
493 vscsi_srp_login(struct vscsi_softc *sc)
494 {
495 struct vscsi_xfer *xp;
496 struct srp_login *login;
497 struct vscsi_crq crq;
498 int err;
499
500 mtx_assert(&sc->io_lock, MA_OWNED);
501
502 xp = TAILQ_FIRST(&sc->free_xferq);
503 if (xp == NULL)
504 panic("SCSI queue flooded");
505 xp->ccb = NULL;
506 TAILQ_REMOVE(&sc->free_xferq, xp, queue);
507 TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
508
509 /* Set up command */
510 xp->srp_iu_size = 64;
511 crq.iu_length = htobe16(xp->srp_iu_size);
512 err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
513 M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
514 if (err)
515 panic("Error during VMEM allocation (%d)", err);
516
517 login = (struct srp_login *)((uint8_t *)xp->sc->srp_iu_queue +
518 (uintptr_t)xp->srp_iu_offset);
519 bzero(login, xp->srp_iu_size);
520 login->type = SRP_LOGIN_REQ;
521 login->tag = (uint64_t)(xp);
522 login->max_cmd_length = htobe64(256);
523 login->buffer_formats = htobe16(0x1 | 0x2); /* Direct and indirect */
524 login->flags = 0;
525
526 /* Create CRQ entry */
527 crq.valid = 0x80;
528 crq.format = 0x01;
529 crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
530 bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
531
532 err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
533 be64toh(((uint64_t *)(&crq))[0]),
534 be64toh(((uint64_t *)(&crq))[1]));
535 if (err != 0)
536 panic("CRQ send failure (%d)", err);
537 }
538
539 static void
540 vscsi_task_management(struct vscsi_softc *sc, union ccb *ccb)
541 {
542 struct srp_tsk_mgmt *cmd;
543 struct vscsi_xfer *xp;
544 struct vscsi_crq crq;
545 int err;
546
547 mtx_assert(&sc->io_lock, MA_OWNED);
548
549 xp = TAILQ_FIRST(&sc->free_xferq);
550 if (xp == NULL)
551 panic("SCSI queue flooded");
552 xp->ccb = ccb;
553 TAILQ_REMOVE(&sc->free_xferq, xp, queue);
554 TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
555
556 xp->srp_iu_size = sizeof(*cmd);
557 crq.iu_length = htobe16(xp->srp_iu_size);
558 err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
559 M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
560 if (err)
561 panic("Error during VMEM allocation (%d)", err);
562
563 cmd = (struct srp_tsk_mgmt *)((uint8_t *)xp->sc->srp_iu_queue +
564 (uintptr_t)xp->srp_iu_offset);
565 bzero(cmd, xp->srp_iu_size);
566 cmd->type = SRP_TSK_MGMT;
567 cmd->tag = (uint64_t)xp;
568 cmd->lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
569
570 switch (ccb->ccb_h.func_code) {
571 case XPT_RESET_DEV:
572 cmd->function = 0x08;
573 break;
574 default:
575 panic("Unimplemented code %d", ccb->ccb_h.func_code);
576 break;
577 }
578
579 bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
580
581 /* Create CRQ entry */
582 crq.valid = 0x80;
583 crq.format = 0x01;
584 crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
585
586 err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
587 be64toh(((uint64_t *)(&crq))[0]),
588 be64toh(((uint64_t *)(&crq))[1]));
589 if (err != 0)
590 panic("CRQ send failure (%d)", err);
591 }
592
593 static void
594 vscsi_scsi_command(void *xxp, bus_dma_segment_t *segs, int nsegs, int err)
595 {
596 struct vscsi_xfer *xp = xxp;
597 uint8_t *cdb;
598 union ccb *ccb = xp->ccb;
599 struct srp_cmd *cmd;
600 uint64_t chunk_addr;
601 uint32_t chunk_size;
602 int desc_start, i;
603 struct vscsi_crq crq;
604
605 KASSERT(err == 0, ("DMA error %d\n", err));
606
607 mtx_assert(&xp->sc->io_lock, MA_OWNED);
608
609 cdb = (ccb->ccb_h.flags & CAM_CDB_POINTER) ?
610 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes;
611
612 /* Command format from Table 20, page 37 of SRP spec */
613 xp->srp_iu_size = 48 + ((nsegs > 1) ? 20 : 16) +
614 ((ccb->csio.cdb_len > 16) ? (ccb->csio.cdb_len - 16) : 0);
615 crq.iu_length = htobe16(xp->srp_iu_size);
616 if (nsegs > 1)
617 xp->srp_iu_size += nsegs*16;
618 xp->srp_iu_size = roundup(xp->srp_iu_size, 16);
619 err = vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
620 M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
621 if (err)
622 panic("Error during VMEM allocation (%d)", err);
623
624 cmd = (struct srp_cmd *)((uint8_t *)xp->sc->srp_iu_queue +
625 (uintptr_t)xp->srp_iu_offset);
626 bzero(cmd, xp->srp_iu_size);
627 cmd->type = SRP_CMD;
628 if (ccb->csio.cdb_len > 16)
629 cmd->additional_cdb = (ccb->csio.cdb_len - 16) << 2;
630 memcpy(cmd->cdb, cdb, ccb->csio.cdb_len);
631
632 cmd->tag = (uint64_t)(xp); /* Let the responder find this again */
633 cmd->lun = htobe64(CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
634
635 if (nsegs > 1) {
636 /* Use indirect descriptors */
637 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
638 case CAM_DIR_OUT:
639 cmd->formats = (2 << 4);
640 break;
641 case CAM_DIR_IN:
642 cmd->formats = 2;
643 break;
644 default:
645 panic("Does not support bidirectional commands (%d)",
646 ccb->ccb_h.flags & CAM_DIR_MASK);
647 break;
648 }
649
650 desc_start = ((ccb->csio.cdb_len > 16) ?
651 ccb->csio.cdb_len - 16 : 0);
652 chunk_addr = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset + 20 +
653 desc_start + sizeof(*cmd));
654 chunk_size = htobe32(16*nsegs);
655 memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
656 memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
657 chunk_size = 0;
658 for (i = 0; i < nsegs; i++)
659 chunk_size += segs[i].ds_len;
660 chunk_size = htobe32(chunk_size);
661 memcpy(&cmd->data_payload[desc_start+16], &chunk_size, 4);
662 desc_start += 20;
663 for (i = 0; i < nsegs; i++) {
664 chunk_addr = htobe64(segs[i].ds_addr);
665 chunk_size = htobe32(segs[i].ds_len);
666
667 memcpy(&cmd->data_payload[desc_start + 16*i],
668 &chunk_addr, 8);
669 /* Set handle tag to 0 */
670 memcpy(&cmd->data_payload[desc_start + 16*i + 12],
671 &chunk_size, 4);
672 }
673 } else if (nsegs == 1) {
674 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
675 case CAM_DIR_OUT:
676 cmd->formats = (1 << 4);
677 break;
678 case CAM_DIR_IN:
679 cmd->formats = 1;
680 break;
681 default:
682 panic("Does not support bidirectional commands (%d)",
683 ccb->ccb_h.flags & CAM_DIR_MASK);
684 break;
685 }
686
687 /*
688 * Memory descriptor:
689 * 8 byte address
690 * 4 byte handle
691 * 4 byte length
692 */
693
694 chunk_addr = htobe64(segs[0].ds_addr);
695 chunk_size = htobe32(segs[0].ds_len);
696 desc_start = ((ccb->csio.cdb_len > 16) ?
697 ccb->csio.cdb_len - 16 : 0);
698
699 memcpy(&cmd->data_payload[desc_start], &chunk_addr, 8);
700 /* Set handle tag to 0 */
701 memcpy(&cmd->data_payload[desc_start+12], &chunk_size, 4);
702 KASSERT(xp->srp_iu_size >= 48 + ((ccb->csio.cdb_len > 16) ?
703 ccb->csio.cdb_len : 16), ("SRP IU command length"));
704 } else {
705 cmd->formats = 0;
706 }
707 bus_dmamap_sync(xp->sc->crq_tag, xp->sc->crq_map, BUS_DMASYNC_PREWRITE);
708
709 /* Create CRQ entry */
710 crq.valid = 0x80;
711 crq.format = 0x01;
712 crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
713
714 err = phyp_hcall(H_SEND_CRQ, xp->sc->unit,
715 be64toh(((uint64_t *)(&crq))[0]),
716 be64toh(((uint64_t *)(&crq))[1]));
717 if (err != 0)
718 panic("CRQ send failure (%d)", err);
719 }
720
721 static void
722 vscsi_crq_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int err)
723 {
724 struct vscsi_softc *sc = xsc;
725
726 sc->crq_phys = segs[0].ds_addr;
727 sc->n_crqs = PAGE_SIZE/sizeof(struct vscsi_crq);
728
729 sc->srp_iu_queue = (uint8_t *)(sc->crq_queue);
730 sc->srp_iu_phys = segs[0].ds_addr;
731 sc->srp_iu_arena = vmem_create("VSCSI SRP IU", PAGE_SIZE,
732 segs[0].ds_len - PAGE_SIZE, 16, 0, M_BESTFIT | M_NOWAIT);
733 }
734
735 static void
736 vscsi_setup_bus(struct vscsi_softc *sc)
737 {
738 struct vscsi_crq crq;
739 struct vscsi_xfer *xp;
740 int error;
741
742 struct {
743 uint32_t type;
744 uint16_t status;
745 uint16_t length;
746 uint64_t tag;
747 uint64_t buffer;
748 struct {
749 char srp_version[8];
750 char partition_name[96];
751 uint32_t partition_number;
752 uint32_t mad_version;
753 uint32_t os_type;
754 uint32_t port_max_txu[8];
755 } payload;
756 } mad_adapter_info;
757
758 bzero(&crq, sizeof(crq));
759
760 /* Init message */
761 crq.valid = 0xc0;
762 crq.format = 0x01;
763
764 do {
765 error = phyp_hcall(H_FREE_CRQ, sc->unit);
766 } while (error == H_BUSY);
767
768 /* See initialization sequence page 757 */
769 bzero(sc->crq_queue, sc->n_crqs*sizeof(sc->crq_queue[0]));
770 sc->cur_crq = 0;
771 sc->bus_initialized = 0;
772 sc->bus_logged_in = 0;
773 bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
774 error = phyp_hcall(H_REG_CRQ, sc->unit, sc->crq_phys,
775 sc->n_crqs*sizeof(sc->crq_queue[0]));
776 KASSERT(error == 0, ("CRQ registration success"));
777
778 error = phyp_hcall(H_SEND_CRQ, sc->unit,
779 be64toh(((uint64_t *)(&crq))[0]),
780 be64toh(((uint64_t *)(&crq))[1]));
781 if (error != 0)
782 panic("CRQ setup failure (%d)", error);
783
784 while (sc->bus_initialized == 0)
785 vscsi_check_response_queue(sc);
786
787 /* Send MAD adapter info */
788 mad_adapter_info.type = htobe32(MAD_ADAPTER_INFO_REQUEST);
789 mad_adapter_info.status = 0;
790 mad_adapter_info.length = htobe16(sizeof(mad_adapter_info.payload));
791
792 strcpy(mad_adapter_info.payload.srp_version, "16.a");
793 strcpy(mad_adapter_info.payload.partition_name, "UNKNOWN");
794 mad_adapter_info.payload.partition_number = -1;
795 mad_adapter_info.payload.mad_version = htobe32(1);
796 mad_adapter_info.payload.os_type = htobe32(2); /* Claim we are Linux */
797 mad_adapter_info.payload.port_max_txu[0] = 0;
798 /* If this fails, we get the defaults above */
799 OF_getprop(OF_finddevice("/"), "ibm,partition-name",
800 mad_adapter_info.payload.partition_name,
801 sizeof(mad_adapter_info.payload.partition_name));
802 OF_getprop(OF_finddevice("/"), "ibm,partition-no",
803 &mad_adapter_info.payload.partition_number,
804 sizeof(mad_adapter_info.payload.partition_number));
805
806 xp = TAILQ_FIRST(&sc->free_xferq);
807 xp->ccb = NULL;
808 TAILQ_REMOVE(&sc->free_xferq, xp, queue);
809 TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
810 xp->srp_iu_size = sizeof(mad_adapter_info);
811 crq.iu_length = htobe16(xp->srp_iu_size);
812 vmem_alloc(xp->sc->srp_iu_arena, xp->srp_iu_size,
813 M_BESTFIT | M_NOWAIT, &xp->srp_iu_offset);
814 mad_adapter_info.buffer = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset + 24);
815 mad_adapter_info.tag = (uint64_t)xp;
816 memcpy((uint8_t *)xp->sc->srp_iu_queue + (uintptr_t)xp->srp_iu_offset,
817 &mad_adapter_info, sizeof(mad_adapter_info));
818 crq.valid = 0x80;
819 crq.format = 0x02;
820 crq.iu_data = htobe64(xp->sc->srp_iu_phys + xp->srp_iu_offset);
821 bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
822 phyp_hcall(H_SEND_CRQ, xp->sc->unit,
823 be64toh(((uint64_t *)(&crq))[0]),
824 be64toh(((uint64_t *)(&crq))[1]));
825
826 while (TAILQ_EMPTY(&sc->free_xferq))
827 vscsi_check_response_queue(sc);
828
829 /* Send SRP login */
830 vscsi_srp_login(sc);
831 while (sc->bus_logged_in == 0)
832 vscsi_check_response_queue(sc);
833
834 error = phyp_hcall(H_VIO_SIGNAL, sc->unit, 1); /* Enable interrupts */
835 }
836
837 static void
838 vscsi_intr(void *xsc)
839 {
840 struct vscsi_softc *sc = xsc;
841
842 mtx_lock(&sc->io_lock);
843 vscsi_check_response_queue(sc);
844 mtx_unlock(&sc->io_lock);
845 }
846
847 static void
848 vscsi_srp_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
849 {
850 union ccb *ccb = xp->ccb;
851 struct vscsi_softc *sc = xp->sc;
852 struct srp_rsp *rsp;
853 uint32_t sense_len;
854
855 /* SRP response packet in original request */
856 rsp = (struct srp_rsp *)((uint8_t *)sc->srp_iu_queue +
857 (uintptr_t)xp->srp_iu_offset);
858 ccb->csio.scsi_status = rsp->status;
859 if (ccb->csio.scsi_status == SCSI_STATUS_OK)
860 ccb->ccb_h.status = CAM_REQ_CMP;
861 else
862 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
863 #ifdef NOTYET
864 /* Collect fast fail codes */
865 if (crq->status != 0)
866 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
867 #endif
868
869 if (ccb->ccb_h.status != CAM_REQ_CMP) {
870 ccb->ccb_h.status |= CAM_DEV_QFRZN;
871 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
872 }
873
874 if (!(rsp->flags & SRP_RSPVALID))
875 rsp->response_data_len = 0;
876 if (!(rsp->flags & SRP_SNSVALID))
877 rsp->sense_data_len = 0;
878 if (!(rsp->flags & (SRP_DOOVER | SRP_DOUNDER)))
879 rsp->data_out_resid = 0;
880 if (!(rsp->flags & (SRP_DIOVER | SRP_DIUNDER)))
881 rsp->data_in_resid = 0;
882
883 if (rsp->flags & SRP_SNSVALID) {
884 bzero(&ccb->csio.sense_data, sizeof(struct scsi_sense_data));
885 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
886 sense_len = min(be32toh(rsp->sense_data_len),
887 ccb->csio.sense_len);
888 memcpy(&ccb->csio.sense_data,
889 &rsp->data_payload[be32toh(rsp->response_data_len)],
890 sense_len);
891 ccb->csio.sense_resid = ccb->csio.sense_len -
892 be32toh(rsp->sense_data_len);
893 }
894
895 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
896 case CAM_DIR_OUT:
897 ccb->csio.resid = rsp->data_out_resid;
898 break;
899 case CAM_DIR_IN:
900 ccb->csio.resid = rsp->data_in_resid;
901 break;
902 }
903
904 bus_dmamap_sync(sc->data_tag, xp->dmamap, BUS_DMASYNC_POSTREAD);
905 bus_dmamap_unload(sc->data_tag, xp->dmamap);
906 xpt_done(ccb);
907 xp->ccb = NULL;
908 }
909
910 static void
911 vscsi_login_response(struct vscsi_xfer *xp, struct vscsi_crq *crq)
912 {
913 struct vscsi_softc *sc = xp->sc;
914 struct srp_login_rsp *rsp;
915
916 /* SRP response packet in original request */
917 rsp = (struct srp_login_rsp *)((uint8_t *)sc->srp_iu_queue +
918 (uintptr_t)xp->srp_iu_offset);
919 KASSERT(be16toh(rsp->buffer_formats) & 0x3, ("Both direct and indirect "
920 "buffers supported"));
921
922 sc->max_transactions = be32toh(rsp->request_limit_delta);
923 device_printf(sc->dev, "Queue depth %d commands\n",
924 sc->max_transactions);
925 sc->bus_logged_in = 1;
926 }
927
928 static void
929 vscsi_cam_poll(struct cam_sim *sim)
930 {
931 struct vscsi_softc *sc = cam_sim_softc(sim);
932
933 vscsi_check_response_queue(sc);
934 }
935
936 static void
937 vscsi_check_response_queue(struct vscsi_softc *sc)
938 {
939 struct vscsi_crq *crq;
940 struct vscsi_xfer *xp;
941 int code;
942
943 mtx_assert(&sc->io_lock, MA_OWNED);
944
945 while (sc->crq_queue[sc->cur_crq].valid != 0) {
946 /* The hypercalls at both ends of this are not optimal */
947 phyp_hcall(H_VIO_SIGNAL, sc->unit, 0);
948 bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_POSTREAD);
949
950 crq = &sc->crq_queue[sc->cur_crq];
951
952 switch (crq->valid) {
953 case 0xc0:
954 if (crq->format == 0x02)
955 sc->bus_initialized = 1;
956 break;
957 case 0x80:
958 /* IU data is set to tag pointer (the XP) */
959 xp = (struct vscsi_xfer *)crq->iu_data;
960
961 switch (crq->format) {
962 case 0x01:
963 code = *((uint8_t *)sc->srp_iu_queue +
964 (uintptr_t)xp->srp_iu_offset);
965 switch (code) {
966 case SRP_RSP:
967 vscsi_srp_response(xp, crq);
968 break;
969 case SRP_LOGIN_RSP:
970 vscsi_login_response(xp, crq);
971 break;
972 default:
973 device_printf(sc->dev, "Unknown SRP "
974 "response code %d\n", code);
975 break;
976 }
977 break;
978 case 0x02:
979 /* Ignore management datagrams */
980 break;
981 default:
982 panic("Unknown CRQ format %d\n", crq->format);
983 break;
984 }
985 vmem_free(sc->srp_iu_arena, xp->srp_iu_offset,
986 xp->srp_iu_size);
987 TAILQ_REMOVE(&sc->active_xferq, xp, queue);
988 TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
989 break;
990 default:
991 device_printf(sc->dev,
992 "Unknown CRQ message type %d\n", crq->valid);
993 break;
994 }
995
996 crq->valid = 0;
997 sc->cur_crq = (sc->cur_crq + 1) % sc->n_crqs;
998
999 bus_dmamap_sync(sc->crq_tag, sc->crq_map, BUS_DMASYNC_PREWRITE);
1000 phyp_hcall(H_VIO_SIGNAL, sc->unit, 1);
1001 }
1002 }
Cache object: 855d69f5fd8bf92bab2fcc804db7111f
|