1 /*-
2 * Copyright (c) 2016 Netflix, Inc
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/buf.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/ioccom.h>
36 #include <sys/malloc.h>
37 #include <sys/proc.h>
38 #include <sys/smp.h>
39
40 #include <cam/cam.h>
41 #include <cam/cam_ccb.h>
42 #include <cam/cam_sim.h>
43 #include <cam/cam_xpt_sim.h>
44 #include <cam/cam_xpt_internal.h> // Yes, this is wrong.
45 #include <cam/cam_debug.h>
46
47 #include <dev/pci/pcivar.h>
48 #include <dev/pci/pcireg.h>
49
50 #include "nvme_private.h"
51
52 #define ccb_accb_ptr spriv_ptr0
53 #define ccb_ctrlr_ptr spriv_ptr1
54 static void nvme_sim_action(struct cam_sim *sim, union ccb *ccb);
55 static void nvme_sim_poll(struct cam_sim *sim);
56
57 #define sim2softc(sim) ((struct nvme_sim_softc *)cam_sim_softc(sim))
58 #define sim2ns(sim) (sim2softc(sim)->s_ns)
59 #define sim2ctrlr(sim) (sim2softc(sim)->s_ctrlr)
60
61 struct nvme_sim_softc
62 {
63 struct nvme_controller *s_ctrlr;
64 struct nvme_namespace *s_ns;
65 struct cam_sim *s_sim;
66 struct cam_path *s_path;
67 };
68
69 static void
70 nvme_sim_nvmeio_done(void *ccb_arg, const struct nvme_completion *cpl)
71 {
72 union ccb *ccb = (union ccb *)ccb_arg;
73
74 /*
75 * Let the periph know the completion, and let it sort out what
76 * it means. Make our best guess, though for the status code.
77 */
78 memcpy(&ccb->nvmeio.cpl, cpl, sizeof(*cpl));
79 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
80 if (nvme_completion_is_error(cpl)) {
81 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
82 xpt_done(ccb);
83 } else {
84 ccb->ccb_h.status = CAM_REQ_CMP;
85 xpt_done_direct(ccb);
86 }
87 }
88
89 static void
90 nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb)
91 {
92 struct ccb_nvmeio *nvmeio = &ccb->nvmeio;
93 struct nvme_request *req;
94 void *payload;
95 uint32_t size;
96 struct nvme_controller *ctrlr;
97
98 ctrlr = sim2ctrlr(sim);
99 payload = nvmeio->data_ptr;
100 size = nvmeio->dxfer_len;
101 /* SG LIST ??? */
102 if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
103 req = nvme_allocate_request_bio((struct bio *)payload,
104 nvme_sim_nvmeio_done, ccb);
105 else if ((nvmeio->ccb_h.flags & CAM_DATA_SG) == CAM_DATA_SG)
106 req = nvme_allocate_request_ccb(ccb, nvme_sim_nvmeio_done, ccb);
107 else if (payload == NULL)
108 req = nvme_allocate_request_null(nvme_sim_nvmeio_done, ccb);
109 else
110 req = nvme_allocate_request_vaddr(payload, size,
111 nvme_sim_nvmeio_done, ccb);
112
113 if (req == NULL) {
114 nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL;
115 xpt_done(ccb);
116 return;
117 }
118 ccb->ccb_h.status |= CAM_SIM_QUEUED;
119
120 memcpy(&req->cmd, &ccb->nvmeio.cmd, sizeof(ccb->nvmeio.cmd));
121
122 if (ccb->ccb_h.func_code == XPT_NVME_IO)
123 nvme_ctrlr_submit_io_request(ctrlr, req);
124 else
125 nvme_ctrlr_submit_admin_request(ctrlr, req);
126 }
127
128 static uint32_t
129 nvme_link_kBps(struct nvme_controller *ctrlr)
130 {
131 uint32_t speed, lanes, link[] = { 1, 250000, 500000, 985000, 1970000 };
132 uint32_t status;
133
134 status = pcie_read_config(ctrlr->dev, PCIER_LINK_STA, 2);
135 speed = status & PCIEM_LINK_STA_SPEED;
136 lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4;
137 /*
138 * Failsafe on link speed indicator. If it is insane report the number of
139 * lanes as the speed. Not 100% accurate, but may be diagnostic.
140 */
141 if (speed >= nitems(link))
142 speed = 0;
143 return link[speed] * lanes;
144 }
145
146 static void
147 nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
148 {
149 struct nvme_controller *ctrlr;
150 struct nvme_namespace *ns;
151
152 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
153 ("nvme_sim_action: func= %#x\n",
154 ccb->ccb_h.func_code));
155
156 /*
157 * XXX when we support multiple namespaces in the base driver we'll need
158 * to revisit how all this gets stored and saved in the periph driver's
159 * reserved areas. Right now we store all three in the softc of the sim.
160 */
161 ns = sim2ns(sim);
162 ctrlr = sim2ctrlr(sim);
163
164 mtx_assert(&ctrlr->lock, MA_OWNED);
165
166 switch (ccb->ccb_h.func_code) {
167 case XPT_CALC_GEOMETRY: /* Calculate Geometry Totally nuts ? XXX */
168 /*
169 * Only meaningful for old-school SCSI disks since only the SCSI
170 * da driver generates them. Reject all these that slip through.
171 */
172 /*FALLTHROUGH*/
173 case XPT_ABORT: /* Abort the specified CCB */
174 ccb->ccb_h.status = CAM_REQ_INVALID;
175 break;
176 case XPT_SET_TRAN_SETTINGS:
177 /*
178 * NVMe doesn't really have different transfer settings, but
179 * other parts of CAM think failure here is a big deal.
180 */
181 ccb->ccb_h.status = CAM_REQ_CMP;
182 break;
183 case XPT_PATH_INQ: /* Path routing inquiry */
184 {
185 struct ccb_pathinq *cpi = &ccb->cpi;
186 device_t dev = ctrlr->dev;
187
188 /*
189 * NVMe may have multiple LUNs on the same path. Current generation
190 * of NVMe devives support only a single name space. Multiple name
191 * space drives are coming, but it's unclear how we should report
192 * them up the stack.
193 */
194 cpi->version_num = 1;
195 cpi->hba_inquiry = 0;
196 cpi->target_sprt = 0;
197 cpi->hba_misc = PIM_UNMAPPED /* | PIM_NOSCAN */;
198 cpi->hba_eng_cnt = 0;
199 cpi->max_target = 0;
200 cpi->max_lun = ctrlr->cdata.nn;
201 cpi->maxio = nvme_ns_get_max_io_xfer_size(ns);
202 cpi->initiator_id = 0;
203 cpi->bus_id = cam_sim_bus(sim);
204 cpi->base_transfer_speed = nvme_link_kBps(ctrlr);
205 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
206 strncpy(cpi->hba_vid, "NVMe", HBA_IDLEN);
207 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
208 cpi->unit_number = cam_sim_unit(sim);
209 cpi->transport = XPORT_NVME; /* XXX XPORT_PCIE ? */
210 cpi->transport_version = nvme_mmio_read_4(ctrlr, vs);
211 cpi->protocol = PROTO_NVME;
212 cpi->protocol_version = nvme_mmio_read_4(ctrlr, vs);
213 cpi->xport_specific.nvme.nsid = ns->id;
214 cpi->xport_specific.nvme.domain = pci_get_domain(dev);
215 cpi->xport_specific.nvme.bus = pci_get_bus(dev);
216 cpi->xport_specific.nvme.slot = pci_get_slot(dev);
217 cpi->xport_specific.nvme.function = pci_get_function(dev);
218 cpi->xport_specific.nvme.extra = 0;
219 cpi->ccb_h.status = CAM_REQ_CMP;
220 break;
221 }
222 case XPT_GET_TRAN_SETTINGS: /* Get transport settings */
223 {
224 struct ccb_trans_settings *cts;
225 struct ccb_trans_settings_nvme *nvmep;
226 struct ccb_trans_settings_nvme *nvmex;
227 device_t dev;
228 uint32_t status, caps;
229
230 dev = ctrlr->dev;
231 cts = &ccb->cts;
232 nvmex = &cts->xport_specific.nvme;
233 nvmep = &cts->proto_specific.nvme;
234
235 status = pcie_read_config(dev, PCIER_LINK_STA, 2);
236 caps = pcie_read_config(dev, PCIER_LINK_CAP, 2);
237 nvmex->valid = CTS_NVME_VALID_SPEC | CTS_NVME_VALID_LINK;
238 nvmex->spec = nvme_mmio_read_4(ctrlr, vs);
239 nvmex->speed = status & PCIEM_LINK_STA_SPEED;
240 nvmex->lanes = (status & PCIEM_LINK_STA_WIDTH) >> 4;
241 nvmex->max_speed = caps & PCIEM_LINK_CAP_MAX_SPEED;
242 nvmex->max_lanes = (caps & PCIEM_LINK_CAP_MAX_WIDTH) >> 4;
243
244 /* XXX these should be something else maybe ? */
245 nvmep->valid = 1;
246 nvmep->spec = nvmex->spec;
247
248 cts->transport = XPORT_NVME;
249 cts->protocol = PROTO_NVME;
250 cts->ccb_h.status = CAM_REQ_CMP;
251 break;
252 }
253 case XPT_TERM_IO: /* Terminate the I/O process */
254 /*
255 * every driver handles this, but nothing generates it. Assume
256 * it's OK to just say 'that worked'.
257 */
258 /*FALLTHROUGH*/
259 case XPT_RESET_DEV: /* Bus Device Reset the specified device */
260 case XPT_RESET_BUS: /* Reset the specified bus */
261 /*
262 * NVMe doesn't really support physically resetting the bus. It's part
263 * of the bus scanning dance, so return sucess to tell the process to
264 * proceed.
265 */
266 ccb->ccb_h.status = CAM_REQ_CMP;
267 break;
268 case XPT_NVME_IO: /* Execute the requested I/O operation */
269 case XPT_NVME_ADMIN: /* or Admin operation */
270 nvme_sim_nvmeio(sim, ccb);
271 return; /* no done */
272 default:
273 ccb->ccb_h.status = CAM_REQ_INVALID;
274 break;
275 }
276 xpt_done(ccb);
277 }
278
279 static void
280 nvme_sim_poll(struct cam_sim *sim)
281 {
282
283 nvme_ctrlr_poll(sim2ctrlr(sim));
284 }
285
286 static void *
287 nvme_sim_new_controller(struct nvme_controller *ctrlr)
288 {
289 struct cam_devq *devq;
290 int max_trans;
291 int unit;
292 struct nvme_sim_softc *sc = NULL;
293
294 max_trans = ctrlr->max_hw_pend_io;
295 unit = device_get_unit(ctrlr->dev);
296 devq = cam_simq_alloc(max_trans);
297 if (devq == NULL)
298 return NULL;
299
300 sc = malloc(sizeof(*sc), M_NVME, M_ZERO | M_WAITOK);
301
302 sc->s_ctrlr = ctrlr;
303
304 sc->s_sim = cam_sim_alloc(nvme_sim_action, nvme_sim_poll,
305 "nvme", sc, unit, &ctrlr->lock, max_trans, max_trans, devq);
306 if (sc->s_sim == NULL) {
307 printf("Failed to allocate a sim\n");
308 cam_simq_free(devq);
309 free(sc, M_NVME);
310 return NULL;
311 }
312
313 return sc;
314 }
315
316 static void
317 nvme_sim_rescan_target(struct nvme_controller *ctrlr, struct cam_path *path)
318 {
319 union ccb *ccb;
320
321 ccb = xpt_alloc_ccb_nowait();
322 if (ccb == NULL) {
323 printf("unable to alloc CCB for rescan\n");
324 return;
325 }
326
327 if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) {
328 printf("unable to copy path for rescan\n");
329 xpt_free_ccb(ccb);
330 return;
331 }
332
333 xpt_rescan(ccb);
334 }
335
336 static void *
337 nvme_sim_new_ns(struct nvme_namespace *ns, void *sc_arg)
338 {
339 struct nvme_sim_softc *sc = sc_arg;
340 struct nvme_controller *ctrlr = sc->s_ctrlr;
341 int i;
342
343 sc->s_ns = ns;
344
345 /*
346 * XXX this is creating one bus per ns, but it should be one
347 * XXX target per controller, and one LUN per namespace.
348 * XXX Current drives only support one NS, so there's time
349 * XXX to fix it later when new drives arrive.
350 *
351 * XXX I'm pretty sure the xpt_bus_register() call below is
352 * XXX like super lame and it really belongs in the sim_new_ctrlr
353 * XXX callback. Then the create_path below would be pretty close
354 * XXX to being right. Except we should be per-ns not per-ctrlr
355 * XXX data.
356 */
357
358 mtx_lock(&ctrlr->lock);
359 /* Create bus */
360
361 /*
362 * XXX do I need to lock ctrlr->lock ?
363 * XXX do I need to lock the path?
364 * ata and scsi seem to in their code, but their discovery is
365 * somewhat more asynchronous. We're only every called one at a
366 * time, and nothing is in parallel.
367 */
368
369 i = 0;
370 if (xpt_bus_register(sc->s_sim, ctrlr->dev, 0) != CAM_SUCCESS)
371 goto error;
372 i++;
373 if (xpt_create_path(&sc->s_path, /*periph*/NULL, cam_sim_path(sc->s_sim),
374 1, ns->id) != CAM_REQ_CMP)
375 goto error;
376 i++;
377
378 sc->s_path->device->nvme_data = nvme_ns_get_data(ns);
379 sc->s_path->device->nvme_cdata = nvme_ctrlr_get_data(ns->ctrlr);
380
381 /* Scan bus */
382 nvme_sim_rescan_target(ctrlr, sc->s_path);
383
384 mtx_unlock(&ctrlr->lock);
385
386 return ns;
387
388 error:
389 switch (i) {
390 case 2:
391 xpt_free_path(sc->s_path);
392 case 1:
393 xpt_bus_deregister(cam_sim_path(sc->s_sim));
394 case 0:
395 cam_sim_free(sc->s_sim, /*free_devq*/TRUE);
396 }
397 mtx_unlock(&ctrlr->lock);
398 return NULL;
399 }
400
401 static void
402 nvme_sim_controller_fail(void *ctrlr_arg)
403 {
404 /* XXX cleanup XXX */
405 }
406
407 struct nvme_consumer *consumer_cookie;
408
409 static void
410 nvme_sim_init(void)
411 {
412 if (nvme_use_nvd)
413 return;
414
415 consumer_cookie = nvme_register_consumer(nvme_sim_new_ns,
416 nvme_sim_new_controller, NULL, nvme_sim_controller_fail);
417 }
418
419 SYSINIT(nvme_sim_register, SI_SUB_DRIVERS, SI_ORDER_ANY,
420 nvme_sim_init, NULL);
421
422 static void
423 nvme_sim_uninit(void)
424 {
425 if (nvme_use_nvd)
426 return;
427 /* XXX Cleanup */
428
429 nvme_unregister_consumer(consumer_cookie);
430 }
431
432 SYSUNINIT(nvme_sim_unregister, SI_SUB_DRIVERS, SI_ORDER_ANY,
433 nvme_sim_uninit, NULL);
Cache object: 8227ebddd958f4ffcfca0bec7afb1eec
|