1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2012-2014 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #ifndef __NVME_PRIVATE_H__
32 #define __NVME_PRIVATE_H__
33
34 #include <sys/param.h>
35 #include <sys/bio.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/systm.h>
44 #include <sys/taskqueue.h>
45
46 #include <vm/uma.h>
47
48 #include <machine/bus.h>
49
50 #include "nvme.h"
51
52 #define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
53
54 MALLOC_DECLARE(M_NVME);
55
56 #define IDT32_PCI_ID 0x80d0111d /* 32 channel board */
57 #define IDT8_PCI_ID 0x80d2111d /* 8 channel board */
58
59 #define NVME_ADMIN_TRACKERS (16)
60 #define NVME_ADMIN_ENTRIES (128)
61 /* min and max are defined in admin queue attributes section of spec */
62 #define NVME_MIN_ADMIN_ENTRIES (2)
63 #define NVME_MAX_ADMIN_ENTRIES (4096)
64
65 /*
66 * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
67 * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
68 * will allow outstanding on an I/O qpair at any time. The only advantage in
69 * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
70 * the contents of the submission and completion queues, it will show a longer
71 * history of data.
72 */
73 #define NVME_IO_ENTRIES (256)
74 #define NVME_IO_TRACKERS (128)
75 #define NVME_MIN_IO_TRACKERS (4)
76 #define NVME_MAX_IO_TRACKERS (1024)
77
78 /*
79 * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
80 * for each controller.
81 */
82
83 #define NVME_INT_COAL_TIME (0) /* disabled */
84 #define NVME_INT_COAL_THRESHOLD (0) /* 0-based */
85
86 #define NVME_MAX_NAMESPACES (16)
87 #define NVME_MAX_CONSUMERS (2)
88 #define NVME_MAX_ASYNC_EVENTS (8)
89
90 #define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */
91 #define NVME_MIN_TIMEOUT_PERIOD (5)
92 #define NVME_MAX_TIMEOUT_PERIOD (120)
93
94 #define NVME_DEFAULT_RETRY_COUNT (4)
95
96 /* Maximum log page size to fetch for AERs. */
97 #define NVME_MAX_AER_LOG_SIZE (4096)
98
99 /*
100 * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
101 * it.
102 */
103 #ifndef CACHE_LINE_SIZE
104 #define CACHE_LINE_SIZE (64)
105 #endif
106
107 #define NVME_GONE 0xfffffffful
108
109 extern int32_t nvme_retry_count;
110 extern bool nvme_verbose_cmd_dump;
111
112 struct nvme_completion_poll_status {
113 struct nvme_completion cpl;
114 int done;
115 };
116
117 extern devclass_t nvme_devclass;
118
119 #define NVME_REQUEST_VADDR 1
120 #define NVME_REQUEST_NULL 2 /* For requests with no payload. */
121 #define NVME_REQUEST_UIO 3
122 #define NVME_REQUEST_BIO 4
123 #define NVME_REQUEST_CCB 5
124
125 struct nvme_request {
126 struct nvme_command cmd;
127 struct nvme_qpair *qpair;
128 union {
129 void *payload;
130 struct bio *bio;
131 } u;
132 uint32_t type;
133 uint32_t payload_size;
134 bool timeout;
135 nvme_cb_fn_t cb_fn;
136 void *cb_arg;
137 int32_t retries;
138 STAILQ_ENTRY(nvme_request) stailq;
139 };
140
141 struct nvme_async_event_request {
142 struct nvme_controller *ctrlr;
143 struct nvme_request *req;
144 struct nvme_completion cpl;
145 uint32_t log_page_id;
146 uint32_t log_page_size;
147 uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE];
148 };
149
150 struct nvme_tracker {
151 TAILQ_ENTRY(nvme_tracker) tailq;
152 struct nvme_request *req;
153 struct nvme_qpair *qpair;
154 struct callout timer;
155 bus_dmamap_t payload_dma_map;
156 uint16_t cid;
157
158 uint64_t *prp;
159 bus_addr_t prp_bus_addr;
160 };
161
162 struct nvme_qpair {
163 struct nvme_controller *ctrlr;
164 uint32_t id;
165 int domain;
166 int cpu;
167
168 uint16_t vector;
169 int rid;
170 struct resource *res;
171 void *tag;
172
173 uint32_t num_entries;
174 uint32_t num_trackers;
175 uint32_t sq_tdbl_off;
176 uint32_t cq_hdbl_off;
177
178 uint32_t phase;
179 uint32_t sq_head;
180 uint32_t sq_tail;
181 uint32_t cq_head;
182
183 int64_t num_cmds;
184 int64_t num_intr_handler_calls;
185 int64_t num_retries;
186 int64_t num_failures;
187
188 struct nvme_command *cmd;
189 struct nvme_completion *cpl;
190
191 bus_dma_tag_t dma_tag;
192 bus_dma_tag_t dma_tag_payload;
193
194 bus_dmamap_t queuemem_map;
195 uint64_t cmd_bus_addr;
196 uint64_t cpl_bus_addr;
197
198 TAILQ_HEAD(, nvme_tracker) free_tr;
199 TAILQ_HEAD(, nvme_tracker) outstanding_tr;
200 STAILQ_HEAD(, nvme_request) queued_req;
201
202 struct nvme_tracker **act_tr;
203
204 bool is_enabled;
205
206 struct mtx lock __aligned(CACHE_LINE_SIZE);
207
208 } __aligned(CACHE_LINE_SIZE);
209
210 struct nvme_namespace {
211 struct nvme_controller *ctrlr;
212 struct nvme_namespace_data data;
213 uint32_t id;
214 uint32_t flags;
215 struct cdev *cdev;
216 void *cons_cookie[NVME_MAX_CONSUMERS];
217 uint32_t boundary;
218 struct mtx lock;
219 };
220
221 /*
222 * One of these per allocated PCI device.
223 */
224 struct nvme_controller {
225 device_t dev;
226
227 struct mtx lock;
228 int domain;
229 uint32_t ready_timeout_in_ms;
230 uint32_t quirks;
231 #define QUIRK_DELAY_B4_CHK_RDY 1 /* Can't touch MMIO on disable */
232 #define QUIRK_DISABLE_TIMEOUT 2 /* Disable broken completion timeout feature */
233
234 bus_space_tag_t bus_tag;
235 bus_space_handle_t bus_handle;
236 int resource_id;
237 struct resource *resource;
238
239 /*
240 * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
241 * separate from the control registers which are in BAR 0/1. These
242 * members track the mapping of BAR 4/5 for that reason.
243 */
244 int bar4_resource_id;
245 struct resource *bar4_resource;
246
247 int msi_count;
248 uint32_t enable_aborts;
249
250 uint32_t num_io_queues;
251 uint32_t max_hw_pend_io;
252
253 /* Fields for tracking progress during controller initialization. */
254 struct intr_config_hook config_hook;
255 uint32_t ns_identified;
256 uint32_t queues_created;
257
258 struct task reset_task;
259 struct task fail_req_task;
260 struct taskqueue *taskqueue;
261
262 /* For shared legacy interrupt. */
263 int rid;
264 struct resource *res;
265 void *tag;
266
267 /** maximum i/o size in bytes */
268 uint32_t max_xfer_size;
269
270 /** minimum page size supported by this controller in bytes */
271 uint32_t min_page_size;
272
273 /** interrupt coalescing time period (in microseconds) */
274 uint32_t int_coal_time;
275
276 /** interrupt coalescing threshold */
277 uint32_t int_coal_threshold;
278
279 /** timeout period in seconds */
280 uint32_t timeout_period;
281
282 /** doorbell stride */
283 uint32_t dstrd;
284
285 struct nvme_qpair adminq;
286 struct nvme_qpair *ioq;
287
288 struct nvme_registers *regs;
289
290 struct nvme_controller_data cdata;
291 struct nvme_namespace ns[NVME_MAX_NAMESPACES];
292
293 struct cdev *cdev;
294
295 /** bit mask of event types currently enabled for async events */
296 uint32_t async_event_config;
297
298 uint32_t num_aers;
299 struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
300
301 void *cons_cookie[NVME_MAX_CONSUMERS];
302
303 uint32_t is_resetting;
304 uint32_t is_initialized;
305 uint32_t notification_sent;
306
307 bool is_failed;
308 STAILQ_HEAD(, nvme_request) fail_req;
309
310 /* Host Memory Buffer */
311 int hmb_nchunks;
312 size_t hmb_chunk;
313 bus_dma_tag_t hmb_tag;
314 struct nvme_hmb_chunk {
315 bus_dmamap_t hmbc_map;
316 void *hmbc_vaddr;
317 uint64_t hmbc_paddr;
318 } *hmb_chunks;
319 bus_dma_tag_t hmb_desc_tag;
320 bus_dmamap_t hmb_desc_map;
321 struct nvme_hmb_desc *hmb_desc_vaddr;
322 uint64_t hmb_desc_paddr;
323 };
324
325 #define nvme_mmio_offsetof(reg) \
326 offsetof(struct nvme_registers, reg)
327
328 #define nvme_mmio_read_4(sc, reg) \
329 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
330 nvme_mmio_offsetof(reg))
331
332 #define nvme_mmio_write_4(sc, reg, val) \
333 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
334 nvme_mmio_offsetof(reg), val)
335
336 #define nvme_mmio_write_8(sc, reg, val) \
337 do { \
338 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
339 nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \
340 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
341 nvme_mmio_offsetof(reg)+4, \
342 (val & 0xFFFFFFFF00000000ULL) >> 32); \
343 } while (0);
344
345 #define nvme_printf(ctrlr, fmt, args...) \
346 device_printf(ctrlr->dev, fmt, ##args)
347
348 void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
349
350 void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
351 void *payload,
352 nvme_cb_fn_t cb_fn, void *cb_arg);
353 void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
354 uint32_t nsid, void *payload,
355 nvme_cb_fn_t cb_fn, void *cb_arg);
356 void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
357 uint32_t microseconds,
358 uint32_t threshold,
359 nvme_cb_fn_t cb_fn,
360 void *cb_arg);
361 void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
362 struct nvme_error_information_entry *payload,
363 uint32_t num_entries, /* 0 = max */
364 nvme_cb_fn_t cb_fn,
365 void *cb_arg);
366 void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
367 uint32_t nsid,
368 struct nvme_health_information_page *payload,
369 nvme_cb_fn_t cb_fn,
370 void *cb_arg);
371 void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
372 struct nvme_firmware_page *payload,
373 nvme_cb_fn_t cb_fn,
374 void *cb_arg);
375 void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
376 struct nvme_qpair *io_que,
377 nvme_cb_fn_t cb_fn, void *cb_arg);
378 void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
379 struct nvme_qpair *io_que,
380 nvme_cb_fn_t cb_fn, void *cb_arg);
381 void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
382 struct nvme_qpair *io_que,
383 nvme_cb_fn_t cb_fn, void *cb_arg);
384 void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
385 struct nvme_qpair *io_que,
386 nvme_cb_fn_t cb_fn, void *cb_arg);
387 void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
388 uint32_t num_queues, nvme_cb_fn_t cb_fn,
389 void *cb_arg);
390 void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
391 uint32_t state,
392 nvme_cb_fn_t cb_fn, void *cb_arg);
393 void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
394 uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
395
396 void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
397
398 int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
399 void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
400 void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr);
401 void nvme_ctrlr_reset(struct nvme_controller *ctrlr);
402 /* ctrlr defined as void * to allow use with config_intrhook. */
403 void nvme_ctrlr_start_config_hook(void *ctrlr_arg);
404 void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
405 struct nvme_request *req);
406 void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
407 struct nvme_request *req);
408 void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
409 struct nvme_request *req);
410
411 int nvme_qpair_construct(struct nvme_qpair *qpair,
412 uint32_t num_entries, uint32_t num_trackers,
413 struct nvme_controller *ctrlr);
414 void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
415 struct nvme_tracker *tr);
416 bool nvme_qpair_process_completions(struct nvme_qpair *qpair);
417 void nvme_qpair_submit_request(struct nvme_qpair *qpair,
418 struct nvme_request *req);
419 void nvme_qpair_reset(struct nvme_qpair *qpair);
420 void nvme_qpair_fail(struct nvme_qpair *qpair);
421 void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
422 struct nvme_request *req,
423 uint32_t sct, uint32_t sc);
424
425 void nvme_admin_qpair_enable(struct nvme_qpair *qpair);
426 void nvme_admin_qpair_disable(struct nvme_qpair *qpair);
427 void nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
428
429 void nvme_io_qpair_enable(struct nvme_qpair *qpair);
430 void nvme_io_qpair_disable(struct nvme_qpair *qpair);
431 void nvme_io_qpair_destroy(struct nvme_qpair *qpair);
432
433 int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
434 struct nvme_controller *ctrlr);
435 void nvme_ns_destruct(struct nvme_namespace *ns);
436
437 void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
438
439 void nvme_dump_command(struct nvme_command *cmd);
440 void nvme_dump_completion(struct nvme_completion *cpl);
441
442 int nvme_attach(device_t dev);
443 int nvme_shutdown(device_t dev);
444 int nvme_detach(device_t dev);
445
446 /*
447 * Wait for a command to complete using the nvme_completion_poll_cb.
448 * Used in limited contexts where the caller knows it's OK to block
449 * briefly while the command runs. The ISR will run the callback which
450 * will set status->done to true, usually within microseconds. If not,
451 * then after one second timeout handler should reset the controller
452 * and abort all outstanding requests including this polled one. If
453 * still not after ten seconds, then something is wrong with the driver,
454 * and panic is the only way to recover.
455 */
456 static __inline
457 void
458 nvme_completion_poll(struct nvme_completion_poll_status *status)
459 {
460 int sanity = hz * 10;
461
462 while (!atomic_load_acq_int(&status->done) && --sanity > 0)
463 pause("nvme", 1);
464 if (sanity <= 0)
465 panic("NVME polled command failed to complete within 10s.");
466 }
467
468 static __inline void
469 nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
470 {
471 uint64_t *bus_addr = (uint64_t *)arg;
472
473 KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
474 if (error != 0)
475 printf("nvme_single_map err %d\n", error);
476 *bus_addr = seg[0].ds_addr;
477 }
478
479 static __inline struct nvme_request *
480 _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
481 {
482 struct nvme_request *req;
483
484 req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO);
485 if (req != NULL) {
486 req->cb_fn = cb_fn;
487 req->cb_arg = cb_arg;
488 req->timeout = true;
489 }
490 return (req);
491 }
492
493 static __inline struct nvme_request *
494 nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
495 nvme_cb_fn_t cb_fn, void *cb_arg)
496 {
497 struct nvme_request *req;
498
499 req = _nvme_allocate_request(cb_fn, cb_arg);
500 if (req != NULL) {
501 req->type = NVME_REQUEST_VADDR;
502 req->u.payload = payload;
503 req->payload_size = payload_size;
504 }
505 return (req);
506 }
507
508 static __inline struct nvme_request *
509 nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
510 {
511 struct nvme_request *req;
512
513 req = _nvme_allocate_request(cb_fn, cb_arg);
514 if (req != NULL)
515 req->type = NVME_REQUEST_NULL;
516 return (req);
517 }
518
519 static __inline struct nvme_request *
520 nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
521 {
522 struct nvme_request *req;
523
524 req = _nvme_allocate_request(cb_fn, cb_arg);
525 if (req != NULL) {
526 req->type = NVME_REQUEST_BIO;
527 req->u.bio = bio;
528 }
529 return (req);
530 }
531
532 static __inline struct nvme_request *
533 nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
534 {
535 struct nvme_request *req;
536
537 req = _nvme_allocate_request(cb_fn, cb_arg);
538 if (req != NULL) {
539 req->type = NVME_REQUEST_CCB;
540 req->u.payload = ccb;
541 }
542
543 return (req);
544 }
545
546 #define nvme_free_request(req) free(req, M_NVME)
547
548 void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
549 const struct nvme_completion *async_cpl,
550 uint32_t log_page_id, void *log_page_buffer,
551 uint32_t log_page_size);
552 void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
553 void nvme_notify_new_controller(struct nvme_controller *ctrlr);
554 void nvme_notify_ns(struct nvme_controller *ctrlr, int nsid);
555
556 void nvme_ctrlr_shared_handler(void *arg);
557 void nvme_ctrlr_poll(struct nvme_controller *ctrlr);
558
559 int nvme_ctrlr_suspend(struct nvme_controller *ctrlr);
560 int nvme_ctrlr_resume(struct nvme_controller *ctrlr);
561
562 #endif /* __NVME_PRIVATE_H__ */
Cache object: ad728958aabd4d77c1397278ab3317ad
|