1 /*-
2 * Copyright (C) 2012-2013 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/9.2/sys/dev/nvme/nvme_ctrlr.c 253627 2013-07-24 22:42:00Z jimharris $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/buf.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/ioccom.h>
36 #include <sys/proc.h>
37 #include <sys/smp.h>
38 #include <sys/uio.h>
39
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42
43 #include "nvme_private.h"
44
45 static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
46 struct nvme_async_event_request *aer);
47
48 static int
49 nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
50 {
51
52 /* Chatham puts the NVMe MMRs behind BAR 2/3, not BAR 0/1. */
53 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
54 ctrlr->resource_id = PCIR_BAR(2);
55 else
56 ctrlr->resource_id = PCIR_BAR(0);
57
58 ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
59 &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE);
60
61 if(ctrlr->resource == NULL) {
62 nvme_printf(ctrlr, "unable to allocate pci resource\n");
63 return (ENOMEM);
64 }
65
66 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
67 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
68 ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
69
70 /*
71 * The NVMe spec allows for the MSI-X table to be placed behind
72 * BAR 4/5, separate from the control/doorbell registers. Always
73 * try to map this bar, because it must be mapped prior to calling
74 * pci_alloc_msix(). If the table isn't behind BAR 4/5,
75 * bus_alloc_resource() will just return NULL which is OK.
76 */
77 ctrlr->bar4_resource_id = PCIR_BAR(4);
78 ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
79 &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE);
80
81 return (0);
82 }
83
84 #ifdef CHATHAM2
85 static int
86 nvme_ctrlr_allocate_chatham_bar(struct nvme_controller *ctrlr)
87 {
88
89 ctrlr->chatham_resource_id = PCIR_BAR(CHATHAM_CONTROL_BAR);
90 ctrlr->chatham_resource = bus_alloc_resource(ctrlr->dev,
91 SYS_RES_MEMORY, &ctrlr->chatham_resource_id, 0, ~0, 1,
92 RF_ACTIVE);
93
94 if(ctrlr->chatham_resource == NULL) {
95 nvme_printf(ctrlr, "unable to alloc pci resource\n");
96 return (ENOMEM);
97 }
98
99 ctrlr->chatham_bus_tag = rman_get_bustag(ctrlr->chatham_resource);
100 ctrlr->chatham_bus_handle =
101 rman_get_bushandle(ctrlr->chatham_resource);
102
103 return (0);
104 }
105
106 static void
107 nvme_ctrlr_setup_chatham(struct nvme_controller *ctrlr)
108 {
109 uint64_t reg1, reg2, reg3;
110 uint64_t temp1, temp2;
111 uint32_t temp3;
112 uint32_t use_flash_timings = 0;
113
114 DELAY(10000);
115
116 temp3 = chatham_read_4(ctrlr, 0x8080);
117
118 device_printf(ctrlr->dev, "Chatham version: 0x%x\n", temp3);
119
120 ctrlr->chatham_lbas = chatham_read_4(ctrlr, 0x8068) - 0x110;
121 ctrlr->chatham_size = ctrlr->chatham_lbas * 512;
122
123 device_printf(ctrlr->dev, "Chatham size: %jd\n",
124 (intmax_t)ctrlr->chatham_size);
125
126 reg1 = reg2 = reg3 = ctrlr->chatham_size - 1;
127
128 TUNABLE_INT_FETCH("hw.nvme.use_flash_timings", &use_flash_timings);
129 if (use_flash_timings) {
130 device_printf(ctrlr->dev, "Chatham: using flash timings\n");
131 temp1 = 0x00001b58000007d0LL;
132 temp2 = 0x000000cb00000131LL;
133 } else {
134 device_printf(ctrlr->dev, "Chatham: using DDR timings\n");
135 temp1 = temp2 = 0x0LL;
136 }
137
138 chatham_write_8(ctrlr, 0x8000, reg1);
139 chatham_write_8(ctrlr, 0x8008, reg2);
140 chatham_write_8(ctrlr, 0x8010, reg3);
141
142 chatham_write_8(ctrlr, 0x8020, temp1);
143 temp3 = chatham_read_4(ctrlr, 0x8020);
144
145 chatham_write_8(ctrlr, 0x8028, temp2);
146 temp3 = chatham_read_4(ctrlr, 0x8028);
147
148 chatham_write_8(ctrlr, 0x8030, temp1);
149 chatham_write_8(ctrlr, 0x8038, temp2);
150 chatham_write_8(ctrlr, 0x8040, temp1);
151 chatham_write_8(ctrlr, 0x8048, temp2);
152 chatham_write_8(ctrlr, 0x8050, temp1);
153 chatham_write_8(ctrlr, 0x8058, temp2);
154
155 DELAY(10000);
156 }
157
158 static void
159 nvme_chatham_populate_cdata(struct nvme_controller *ctrlr)
160 {
161 struct nvme_controller_data *cdata;
162
163 cdata = &ctrlr->cdata;
164
165 cdata->vid = 0x8086;
166 cdata->ssvid = 0x2011;
167
168 /*
169 * Chatham2 puts garbage data in these fields when we
170 * invoke IDENTIFY_CONTROLLER, so we need to re-zero
171 * the fields before calling bcopy().
172 */
173 memset(cdata->sn, 0, sizeof(cdata->sn));
174 memcpy(cdata->sn, "2012", strlen("2012"));
175 memset(cdata->mn, 0, sizeof(cdata->mn));
176 memcpy(cdata->mn, "CHATHAM2", strlen("CHATHAM2"));
177 memset(cdata->fr, 0, sizeof(cdata->fr));
178 memcpy(cdata->fr, "", strlen(""));
179 cdata->rab = 8;
180 cdata->aerl = 3;
181 cdata->lpa.ns_smart = 1;
182 cdata->sqes.min = 6;
183 cdata->sqes.max = 6;
184 cdata->sqes.min = 4;
185 cdata->sqes.max = 4;
186 cdata->nn = 1;
187
188 /* Chatham2 doesn't support DSM command */
189 cdata->oncs.dsm = 0;
190
191 cdata->vwc.present = 1;
192 }
193 #endif /* CHATHAM2 */
194
195 static void
196 nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
197 {
198 struct nvme_qpair *qpair;
199 uint32_t num_entries;
200
201 qpair = &ctrlr->adminq;
202
203 num_entries = NVME_ADMIN_ENTRIES;
204 TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
205 /*
206 * If admin_entries was overridden to an invalid value, revert it
207 * back to our default value.
208 */
209 if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
210 num_entries > NVME_MAX_ADMIN_ENTRIES) {
211 nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
212 "specified\n", num_entries);
213 num_entries = NVME_ADMIN_ENTRIES;
214 }
215
216 /*
217 * The admin queue's max xfer size is treated differently than the
218 * max I/O xfer size. 16KB is sufficient here - maybe even less?
219 */
220 nvme_qpair_construct(qpair,
221 0, /* qpair ID */
222 0, /* vector */
223 num_entries,
224 NVME_ADMIN_TRACKERS,
225 ctrlr);
226 }
227
228 static int
229 nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
230 {
231 struct nvme_qpair *qpair;
232 union cap_lo_register cap_lo;
233 int i, num_entries, num_trackers;
234
235 num_entries = NVME_IO_ENTRIES;
236 TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
237
238 /*
239 * NVMe spec sets a hard limit of 64K max entries, but
240 * devices may specify a smaller limit, so we need to check
241 * the MQES field in the capabilities register.
242 */
243 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
244 num_entries = min(num_entries, cap_lo.bits.mqes+1);
245
246 num_trackers = NVME_IO_TRACKERS;
247 TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
248
249 num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
250 num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
251 /*
252 * No need to have more trackers than entries in the submit queue.
253 * Note also that for a queue size of N, we can only have (N-1)
254 * commands outstanding, hence the "-1" here.
255 */
256 num_trackers = min(num_trackers, (num_entries-1));
257
258 ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
259 M_NVME, M_ZERO | M_WAITOK);
260
261 for (i = 0; i < ctrlr->num_io_queues; i++) {
262 qpair = &ctrlr->ioq[i];
263
264 /*
265 * Admin queue has ID=0. IO queues start at ID=1 -
266 * hence the 'i+1' here.
267 *
268 * For I/O queues, use the controller-wide max_xfer_size
269 * calculated in nvme_attach().
270 */
271 nvme_qpair_construct(qpair,
272 i+1, /* qpair ID */
273 ctrlr->msix_enabled ? i+1 : 0, /* vector */
274 num_entries,
275 num_trackers,
276 ctrlr);
277
278 if (ctrlr->per_cpu_io_queues)
279 bus_bind_intr(ctrlr->dev, qpair->res, i);
280 }
281
282 return (0);
283 }
284
285 static void
286 nvme_ctrlr_fail(struct nvme_controller *ctrlr)
287 {
288 int i;
289
290 ctrlr->is_failed = TRUE;
291 nvme_qpair_fail(&ctrlr->adminq);
292 for (i = 0; i < ctrlr->num_io_queues; i++)
293 nvme_qpair_fail(&ctrlr->ioq[i]);
294 nvme_notify_fail_consumers(ctrlr);
295 }
296
297 void
298 nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
299 struct nvme_request *req)
300 {
301
302 mtx_lock(&ctrlr->lock);
303 STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
304 mtx_unlock(&ctrlr->lock);
305 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
306 }
307
308 static void
309 nvme_ctrlr_fail_req_task(void *arg, int pending)
310 {
311 struct nvme_controller *ctrlr = arg;
312 struct nvme_request *req;
313
314 mtx_lock(&ctrlr->lock);
315 while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
316 req = STAILQ_FIRST(&ctrlr->fail_req);
317 STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
318 nvme_qpair_manual_complete_request(req->qpair, req,
319 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
320 }
321 mtx_unlock(&ctrlr->lock);
322 }
323
324 static int
325 nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr)
326 {
327 int ms_waited;
328 union cc_register cc;
329 union csts_register csts;
330
331 cc.raw = nvme_mmio_read_4(ctrlr, cc);
332 csts.raw = nvme_mmio_read_4(ctrlr, csts);
333
334 if (!cc.bits.en) {
335 nvme_printf(ctrlr, "%s called with cc.en = 0\n", __func__);
336 return (ENXIO);
337 }
338
339 ms_waited = 0;
340
341 while (!csts.bits.rdy) {
342 DELAY(1000);
343 if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
344 nvme_printf(ctrlr, "controller did not become ready "
345 "within %d ms\n", ctrlr->ready_timeout_in_ms);
346 return (ENXIO);
347 }
348 csts.raw = nvme_mmio_read_4(ctrlr, csts);
349 }
350
351 return (0);
352 }
353
354 static void
355 nvme_ctrlr_disable(struct nvme_controller *ctrlr)
356 {
357 union cc_register cc;
358 union csts_register csts;
359
360 cc.raw = nvme_mmio_read_4(ctrlr, cc);
361 csts.raw = nvme_mmio_read_4(ctrlr, csts);
362
363 if (cc.bits.en == 1 && csts.bits.rdy == 0)
364 nvme_ctrlr_wait_for_ready(ctrlr);
365
366 cc.bits.en = 0;
367 nvme_mmio_write_4(ctrlr, cc, cc.raw);
368 DELAY(5000);
369 }
370
371 static int
372 nvme_ctrlr_enable(struct nvme_controller *ctrlr)
373 {
374 union cc_register cc;
375 union csts_register csts;
376 union aqa_register aqa;
377
378 cc.raw = nvme_mmio_read_4(ctrlr, cc);
379 csts.raw = nvme_mmio_read_4(ctrlr, csts);
380
381 if (cc.bits.en == 1) {
382 if (csts.bits.rdy == 1)
383 return (0);
384 else
385 return (nvme_ctrlr_wait_for_ready(ctrlr));
386 }
387
388 nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
389 DELAY(5000);
390 nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
391 DELAY(5000);
392
393 aqa.raw = 0;
394 /* acqs and asqs are 0-based. */
395 aqa.bits.acqs = ctrlr->adminq.num_entries-1;
396 aqa.bits.asqs = ctrlr->adminq.num_entries-1;
397 nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
398 DELAY(5000);
399
400 cc.bits.en = 1;
401 cc.bits.css = 0;
402 cc.bits.ams = 0;
403 cc.bits.shn = 0;
404 cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
405 cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
406
407 /* This evaluates to 0, which is according to spec. */
408 cc.bits.mps = (PAGE_SIZE >> 13);
409
410 nvme_mmio_write_4(ctrlr, cc, cc.raw);
411 DELAY(5000);
412
413 return (nvme_ctrlr_wait_for_ready(ctrlr));
414 }
415
416 int
417 nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
418 {
419 int i;
420
421 nvme_admin_qpair_disable(&ctrlr->adminq);
422 for (i = 0; i < ctrlr->num_io_queues; i++)
423 nvme_io_qpair_disable(&ctrlr->ioq[i]);
424
425 DELAY(100*1000);
426
427 nvme_ctrlr_disable(ctrlr);
428 return (nvme_ctrlr_enable(ctrlr));
429 }
430
431 void
432 nvme_ctrlr_reset(struct nvme_controller *ctrlr)
433 {
434 int cmpset;
435
436 cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
437
438 if (cmpset == 0 || ctrlr->is_failed)
439 /*
440 * Controller is already resetting or has failed. Return
441 * immediately since there is no need to kick off another
442 * reset in these cases.
443 */
444 return;
445
446 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
447 }
448
449 static int
450 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
451 {
452 struct nvme_completion_poll_status status;
453
454 status.done = FALSE;
455 nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
456 nvme_completion_poll_cb, &status);
457 while (status.done == FALSE)
458 pause("nvme", 1);
459 if (nvme_completion_is_error(&status.cpl)) {
460 nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
461 return (ENXIO);
462 }
463
464 #ifdef CHATHAM2
465 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
466 nvme_chatham_populate_cdata(ctrlr);
467 #endif
468
469 /*
470 * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
471 * controller supports.
472 */
473 if (ctrlr->cdata.mdts > 0)
474 ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
475 ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
476
477 return (0);
478 }
479
480 static int
481 nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
482 {
483 struct nvme_completion_poll_status status;
484 int cq_allocated, i, sq_allocated;
485
486 status.done = FALSE;
487 nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
488 nvme_completion_poll_cb, &status);
489 while (status.done == FALSE)
490 pause("nvme", 1);
491 if (nvme_completion_is_error(&status.cpl)) {
492 nvme_printf(ctrlr, "nvme_set_num_queues failed!\n");
493 return (ENXIO);
494 }
495
496 /*
497 * Data in cdw0 is 0-based.
498 * Lower 16-bits indicate number of submission queues allocated.
499 * Upper 16-bits indicate number of completion queues allocated.
500 */
501 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
502 cq_allocated = (status.cpl.cdw0 >> 16) + 1;
503
504 /*
505 * Check that the controller was able to allocate the number of
506 * queues we requested. If not, revert to one IO queue pair.
507 */
508 if (sq_allocated < ctrlr->num_io_queues ||
509 cq_allocated < ctrlr->num_io_queues) {
510
511 /*
512 * Destroy extra IO queue pairs that were created at
513 * controller construction time but are no longer
514 * needed. This will only happen when a controller
515 * supports fewer queues than MSI-X vectors. This
516 * is not the normal case, but does occur with the
517 * Chatham prototype board.
518 */
519 for (i = 1; i < ctrlr->num_io_queues; i++)
520 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
521
522 ctrlr->num_io_queues = 1;
523 ctrlr->per_cpu_io_queues = 0;
524 }
525
526 return (0);
527 }
528
529 static int
530 nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
531 {
532 struct nvme_completion_poll_status status;
533 struct nvme_qpair *qpair;
534 int i;
535
536 for (i = 0; i < ctrlr->num_io_queues; i++) {
537 qpair = &ctrlr->ioq[i];
538
539 status.done = FALSE;
540 nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
541 nvme_completion_poll_cb, &status);
542 while (status.done == FALSE)
543 pause("nvme", 1);
544 if (nvme_completion_is_error(&status.cpl)) {
545 nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
546 return (ENXIO);
547 }
548
549 status.done = FALSE;
550 nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
551 nvme_completion_poll_cb, &status);
552 while (status.done == FALSE)
553 pause("nvme", 1);
554 if (nvme_completion_is_error(&status.cpl)) {
555 nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
556 return (ENXIO);
557 }
558 }
559
560 return (0);
561 }
562
563 static int
564 nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
565 {
566 struct nvme_namespace *ns;
567 int i, status;
568
569 for (i = 0; i < ctrlr->cdata.nn; i++) {
570 ns = &ctrlr->ns[i];
571 status = nvme_ns_construct(ns, i+1, ctrlr);
572 if (status != 0)
573 return (status);
574 }
575
576 return (0);
577 }
578
579 static boolean_t
580 is_log_page_id_valid(uint8_t page_id)
581 {
582
583 switch (page_id) {
584 case NVME_LOG_ERROR:
585 case NVME_LOG_HEALTH_INFORMATION:
586 case NVME_LOG_FIRMWARE_SLOT:
587 return (TRUE);
588 }
589
590 return (FALSE);
591 }
592
593 static uint32_t
594 nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
595 {
596 uint32_t log_page_size;
597
598 switch (page_id) {
599 case NVME_LOG_ERROR:
600 log_page_size = min(
601 sizeof(struct nvme_error_information_entry) *
602 ctrlr->cdata.elpe,
603 NVME_MAX_AER_LOG_SIZE);
604 break;
605 case NVME_LOG_HEALTH_INFORMATION:
606 log_page_size = sizeof(struct nvme_health_information_page);
607 break;
608 case NVME_LOG_FIRMWARE_SLOT:
609 log_page_size = sizeof(struct nvme_firmware_page);
610 break;
611 default:
612 log_page_size = 0;
613 break;
614 }
615
616 return (log_page_size);
617 }
618
619 static void
620 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
621 {
622 struct nvme_async_event_request *aer = arg;
623
624 /*
625 * If the log page fetch for some reason completed with an error,
626 * don't pass log page data to the consumers. In practice, this case
627 * should never happen.
628 */
629 if (nvme_completion_is_error(cpl))
630 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
631 aer->log_page_id, NULL, 0);
632 else
633 /*
634 * Pass the cpl data from the original async event completion,
635 * not the log page fetch.
636 */
637 nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
638 aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
639
640 /*
641 * Repost another asynchronous event request to replace the one
642 * that just completed.
643 */
644 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
645 }
646
647 static void
648 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
649 {
650 struct nvme_async_event_request *aer = arg;
651
652 if (nvme_completion_is_error(cpl)) {
653 /*
654 * Do not retry failed async event requests. This avoids
655 * infinite loops where a new async event request is submitted
656 * to replace the one just failed, only to fail again and
657 * perpetuate the loop.
658 */
659 return;
660 }
661
662 /* Associated log page is in bits 23:16 of completion entry dw0. */
663 aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
664
665 nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
666 aer->log_page_id);
667
668 if (is_log_page_id_valid(aer->log_page_id)) {
669 aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
670 aer->log_page_id);
671 memcpy(&aer->cpl, cpl, sizeof(*cpl));
672 nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
673 NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
674 aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
675 aer);
676 /* Wait to notify consumers until after log page is fetched. */
677 } else {
678 nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
679 NULL, 0);
680
681 /*
682 * Repost another asynchronous event request to replace the one
683 * that just completed.
684 */
685 nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
686 }
687 }
688
689 static void
690 nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
691 struct nvme_async_event_request *aer)
692 {
693 struct nvme_request *req;
694
695 aer->ctrlr = ctrlr;
696 req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
697 aer->req = req;
698
699 /*
700 * Disable timeout here, since asynchronous event requests should by
701 * nature never be timed out.
702 */
703 req->timeout = FALSE;
704 req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
705 nvme_ctrlr_submit_admin_request(ctrlr, req);
706 }
707
708 static void
709 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
710 {
711 union nvme_critical_warning_state state;
712 struct nvme_async_event_request *aer;
713 uint32_t i;
714
715 state.raw = 0xFF;
716 state.bits.reserved = 0;
717 nvme_ctrlr_cmd_set_async_event_config(ctrlr, state, NULL, NULL);
718
719 /* aerl is a zero-based value, so we need to add 1 here. */
720 ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
721
722 /* Chatham doesn't support AERs. */
723 if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
724 ctrlr->num_aers = 0;
725
726 for (i = 0; i < ctrlr->num_aers; i++) {
727 aer = &ctrlr->aer[i];
728 nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
729 }
730 }
731
732 static void
733 nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
734 {
735
736 ctrlr->int_coal_time = 0;
737 TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
738 &ctrlr->int_coal_time);
739
740 ctrlr->int_coal_threshold = 0;
741 TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
742 &ctrlr->int_coal_threshold);
743
744 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
745 ctrlr->int_coal_threshold, NULL, NULL);
746 }
747
748 static void
749 nvme_ctrlr_start(void *ctrlr_arg)
750 {
751 struct nvme_controller *ctrlr = ctrlr_arg;
752 int i;
753
754 nvme_qpair_reset(&ctrlr->adminq);
755 for (i = 0; i < ctrlr->num_io_queues; i++)
756 nvme_qpair_reset(&ctrlr->ioq[i]);
757
758 nvme_admin_qpair_enable(&ctrlr->adminq);
759
760 if (nvme_ctrlr_identify(ctrlr) != 0) {
761 nvme_ctrlr_fail(ctrlr);
762 return;
763 }
764
765 if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
766 nvme_ctrlr_fail(ctrlr);
767 return;
768 }
769
770 if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
771 nvme_ctrlr_fail(ctrlr);
772 return;
773 }
774
775 if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
776 nvme_ctrlr_fail(ctrlr);
777 return;
778 }
779
780 nvme_ctrlr_configure_aer(ctrlr);
781 nvme_ctrlr_configure_int_coalescing(ctrlr);
782
783 for (i = 0; i < ctrlr->num_io_queues; i++)
784 nvme_io_qpair_enable(&ctrlr->ioq[i]);
785
786 /*
787 * Clear software progress marker to 0, to indicate to pre-boot
788 * software that OS driver load was successful.
789 *
790 * Chatham does not support this feature.
791 */
792 if (pci_get_devid(ctrlr->dev) != CHATHAM_PCI_ID)
793 nvme_ctrlr_cmd_set_feature(ctrlr,
794 NVME_FEAT_SOFTWARE_PROGRESS_MARKER, 0, NULL, 0, NULL, NULL);
795 }
796
797 void
798 nvme_ctrlr_start_config_hook(void *arg)
799 {
800 struct nvme_controller *ctrlr = arg;
801
802 nvme_ctrlr_start(ctrlr);
803 config_intrhook_disestablish(&ctrlr->config_hook);
804 }
805
806 static void
807 nvme_ctrlr_reset_task(void *arg, int pending)
808 {
809 struct nvme_controller *ctrlr = arg;
810 int status;
811
812 nvme_printf(ctrlr, "resetting controller\n");
813 status = nvme_ctrlr_hw_reset(ctrlr);
814 /*
815 * Use pause instead of DELAY, so that we yield to any nvme interrupt
816 * handlers on this CPU that were blocked on a qpair lock. We want
817 * all nvme interrupts completed before proceeding with restarting the
818 * controller.
819 *
820 * XXX - any way to guarantee the interrupt handlers have quiesced?
821 */
822 pause("nvmereset", hz / 10);
823 if (status == 0)
824 nvme_ctrlr_start(ctrlr);
825 else
826 nvme_ctrlr_fail(ctrlr);
827
828 atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
829 }
830
831 static void
832 nvme_ctrlr_intx_handler(void *arg)
833 {
834 struct nvme_controller *ctrlr = arg;
835
836 nvme_mmio_write_4(ctrlr, intms, 1);
837
838 nvme_qpair_process_completions(&ctrlr->adminq);
839
840 if (ctrlr->ioq[0].cpl)
841 nvme_qpair_process_completions(&ctrlr->ioq[0]);
842
843 nvme_mmio_write_4(ctrlr, intmc, 1);
844 }
845
846 static int
847 nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
848 {
849
850 ctrlr->num_io_queues = 1;
851 ctrlr->per_cpu_io_queues = 0;
852 ctrlr->rid = 0;
853 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
854 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
855
856 if (ctrlr->res == NULL) {
857 nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
858 return (ENOMEM);
859 }
860
861 bus_setup_intr(ctrlr->dev, ctrlr->res,
862 INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
863 ctrlr, &ctrlr->tag);
864
865 if (ctrlr->tag == NULL) {
866 nvme_printf(ctrlr, "unable to setup intx handler\n");
867 return (ENOMEM);
868 }
869
870 return (0);
871 }
872
873 static void
874 nvme_pt_done(void *arg, const struct nvme_completion *cpl)
875 {
876 struct nvme_pt_command *pt = arg;
877
878 bzero(&pt->cpl, sizeof(pt->cpl));
879 pt->cpl.cdw0 = cpl->cdw0;
880 pt->cpl.status = cpl->status;
881 pt->cpl.status.p = 0;
882
883 mtx_lock(pt->driver_lock);
884 wakeup(pt);
885 mtx_unlock(pt->driver_lock);
886 }
887
888 int
889 nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
890 struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
891 int is_admin_cmd)
892 {
893 struct nvme_request *req;
894 struct mtx *mtx;
895 struct buf *buf = NULL;
896 int ret = 0;
897
898 if (pt->len > 0) {
899 if (pt->len > ctrlr->max_xfer_size) {
900 nvme_printf(ctrlr, "pt->len (%d) "
901 "exceeds max_xfer_size (%d)\n", pt->len,
902 ctrlr->max_xfer_size);
903 return EIO;
904 }
905 if (is_user_buffer) {
906 /*
907 * Ensure the user buffer is wired for the duration of
908 * this passthrough command.
909 */
910 PHOLD(curproc);
911 buf = getpbuf(NULL);
912 buf->b_saveaddr = buf->b_data;
913 buf->b_data = pt->buf;
914 buf->b_bufsize = pt->len;
915 buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
916 #ifdef NVME_UNMAPPED_BIO_SUPPORT
917 if (vmapbuf(buf, 1) < 0) {
918 #else
919 if (vmapbuf(buf) < 0) {
920 #endif
921 ret = EFAULT;
922 goto err;
923 }
924 req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
925 nvme_pt_done, pt);
926 } else
927 req = nvme_allocate_request_vaddr(pt->buf, pt->len,
928 nvme_pt_done, pt);
929 } else
930 req = nvme_allocate_request_null(nvme_pt_done, pt);
931
932 req->cmd.opc = pt->cmd.opc;
933 req->cmd.cdw10 = pt->cmd.cdw10;
934 req->cmd.cdw11 = pt->cmd.cdw11;
935 req->cmd.cdw12 = pt->cmd.cdw12;
936 req->cmd.cdw13 = pt->cmd.cdw13;
937 req->cmd.cdw14 = pt->cmd.cdw14;
938 req->cmd.cdw15 = pt->cmd.cdw15;
939
940 req->cmd.nsid = nsid;
941
942 if (is_admin_cmd)
943 mtx = &ctrlr->lock;
944 else
945 mtx = &ctrlr->ns[nsid-1].lock;
946
947 mtx_lock(mtx);
948 pt->driver_lock = mtx;
949
950 if (is_admin_cmd)
951 nvme_ctrlr_submit_admin_request(ctrlr, req);
952 else
953 nvme_ctrlr_submit_io_request(ctrlr, req);
954
955 mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
956 mtx_unlock(mtx);
957
958 pt->driver_lock = NULL;
959
960 err:
961 if (buf != NULL) {
962 relpbuf(buf, NULL);
963 PRELE(curproc);
964 }
965
966 return (ret);
967 }
968
969 static int
970 nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
971 struct thread *td)
972 {
973 struct nvme_controller *ctrlr;
974 struct nvme_pt_command *pt;
975
976 ctrlr = cdev->si_drv1;
977
978 switch (cmd) {
979 case NVME_RESET_CONTROLLER:
980 nvme_ctrlr_reset(ctrlr);
981 break;
982 case NVME_PASSTHROUGH_CMD:
983 pt = (struct nvme_pt_command *)arg;
984 return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
985 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
986 default:
987 return (ENOTTY);
988 }
989
990 return (0);
991 }
992
993 static struct cdevsw nvme_ctrlr_cdevsw = {
994 .d_version = D_VERSION,
995 .d_flags = 0,
996 .d_ioctl = nvme_ctrlr_ioctl
997 };
998
999 int
1000 nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
1001 {
1002 union cap_lo_register cap_lo;
1003 union cap_hi_register cap_hi;
1004 int num_vectors, per_cpu_io_queues, status = 0;
1005 int timeout_period;
1006
1007 ctrlr->dev = dev;
1008
1009 mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
1010
1011 status = nvme_ctrlr_allocate_bar(ctrlr);
1012
1013 if (status != 0)
1014 return (status);
1015
1016 #ifdef CHATHAM2
1017 if (pci_get_devid(dev) == CHATHAM_PCI_ID) {
1018 status = nvme_ctrlr_allocate_chatham_bar(ctrlr);
1019 if (status != 0)
1020 return (status);
1021 nvme_ctrlr_setup_chatham(ctrlr);
1022 }
1023 #endif
1024
1025 /*
1026 * Software emulators may set the doorbell stride to something
1027 * other than zero, but this driver is not set up to handle that.
1028 */
1029 cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
1030 if (cap_hi.bits.dstrd != 0)
1031 return (ENXIO);
1032
1033 ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
1034
1035 /* Get ready timeout value from controller, in units of 500ms. */
1036 cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
1037 ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
1038
1039 timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
1040 TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
1041 timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
1042 timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
1043 ctrlr->timeout_period = timeout_period;
1044
1045 nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
1046 TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
1047
1048 per_cpu_io_queues = 1;
1049 TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
1050 ctrlr->per_cpu_io_queues = per_cpu_io_queues ? TRUE : FALSE;
1051
1052 if (ctrlr->per_cpu_io_queues)
1053 ctrlr->num_io_queues = mp_ncpus;
1054 else
1055 ctrlr->num_io_queues = 1;
1056
1057 ctrlr->force_intx = 0;
1058 TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
1059
1060 ctrlr->enable_aborts = 0;
1061 TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
1062
1063 ctrlr->msix_enabled = 1;
1064
1065 if (ctrlr->force_intx) {
1066 ctrlr->msix_enabled = 0;
1067 goto intx;
1068 }
1069
1070 /* One vector per IO queue, plus one vector for admin queue. */
1071 num_vectors = ctrlr->num_io_queues + 1;
1072
1073 if (pci_msix_count(dev) < num_vectors) {
1074 ctrlr->msix_enabled = 0;
1075 goto intx;
1076 }
1077
1078 if (pci_alloc_msix(dev, &num_vectors) != 0)
1079 ctrlr->msix_enabled = 0;
1080
1081 intx:
1082
1083 if (!ctrlr->msix_enabled)
1084 nvme_ctrlr_configure_intx(ctrlr);
1085
1086 ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
1087 nvme_ctrlr_construct_admin_qpair(ctrlr);
1088 status = nvme_ctrlr_construct_io_qpairs(ctrlr);
1089
1090 if (status != 0)
1091 return (status);
1092
1093 ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
1094 "nvme%d", device_get_unit(dev));
1095
1096 if (ctrlr->cdev == NULL)
1097 return (ENXIO);
1098
1099 ctrlr->cdev->si_drv1 = (void *)ctrlr;
1100
1101 ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
1102 taskqueue_thread_enqueue, &ctrlr->taskqueue);
1103 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
1104
1105 ctrlr->is_resetting = 0;
1106 TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
1107
1108 TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
1109 STAILQ_INIT(&ctrlr->fail_req);
1110 ctrlr->is_failed = FALSE;
1111
1112 return (0);
1113 }
1114
1115 void
1116 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
1117 {
1118 int i;
1119
1120 nvme_ctrlr_disable(ctrlr);
1121 taskqueue_free(ctrlr->taskqueue);
1122
1123 for (i = 0; i < NVME_MAX_NAMESPACES; i++)
1124 nvme_ns_destruct(&ctrlr->ns[i]);
1125
1126 if (ctrlr->cdev)
1127 destroy_dev(ctrlr->cdev);
1128
1129 for (i = 0; i < ctrlr->num_io_queues; i++) {
1130 nvme_io_qpair_destroy(&ctrlr->ioq[i]);
1131 }
1132
1133 free(ctrlr->ioq, M_NVME);
1134
1135 nvme_admin_qpair_destroy(&ctrlr->adminq);
1136
1137 if (ctrlr->resource != NULL) {
1138 bus_release_resource(dev, SYS_RES_MEMORY,
1139 ctrlr->resource_id, ctrlr->resource);
1140 }
1141
1142 if (ctrlr->bar4_resource != NULL) {
1143 bus_release_resource(dev, SYS_RES_MEMORY,
1144 ctrlr->bar4_resource_id, ctrlr->bar4_resource);
1145 }
1146
1147 #ifdef CHATHAM2
1148 if (ctrlr->chatham_resource != NULL) {
1149 bus_release_resource(dev, SYS_RES_MEMORY,
1150 ctrlr->chatham_resource_id, ctrlr->chatham_resource);
1151 }
1152 #endif
1153
1154 if (ctrlr->tag)
1155 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
1156
1157 if (ctrlr->res)
1158 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
1159 rman_get_rid(ctrlr->res), ctrlr->res);
1160
1161 if (ctrlr->msix_enabled)
1162 pci_release_msi(dev);
1163 }
1164
1165 void
1166 nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
1167 struct nvme_request *req)
1168 {
1169
1170 nvme_qpair_submit_request(&ctrlr->adminq, req);
1171 }
1172
1173 void
1174 nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
1175 struct nvme_request *req)
1176 {
1177 struct nvme_qpair *qpair;
1178
1179 if (ctrlr->per_cpu_io_queues)
1180 qpair = &ctrlr->ioq[curcpu];
1181 else
1182 qpair = &ctrlr->ioq[0];
1183
1184 nvme_qpair_submit_request(qpair, req);
1185 }
1186
1187 device_t
1188 nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
1189 {
1190
1191 return (ctrlr->dev);
1192 }
1193
1194 const struct nvme_controller_data *
1195 nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
1196 {
1197
1198 return (&ctrlr->cdata);
1199 }
Cache object: c81048a38b2586244f128a48e96437db
|