1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2012-2014 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/domainset.h>
36 #include <sys/proc.h>
37
38 #include <dev/pci/pcivar.h>
39
40 #include "nvme_private.h"
41
42 typedef enum error_print { ERROR_PRINT_NONE, ERROR_PRINT_NO_RETRY, ERROR_PRINT_ALL } error_print_t;
43 #define DO_NOT_RETRY 1
44
45 static void _nvme_qpair_submit_request(struct nvme_qpair *qpair,
46 struct nvme_request *req);
47 static void nvme_qpair_destroy(struct nvme_qpair *qpair);
48
49 struct nvme_opcode_string {
50 uint16_t opc;
51 const char * str;
52 };
53
54 static struct nvme_opcode_string admin_opcode[] = {
55 { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" },
56 { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" },
57 { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" },
58 { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" },
59 { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" },
60 { NVME_OPC_IDENTIFY, "IDENTIFY" },
61 { NVME_OPC_ABORT, "ABORT" },
62 { NVME_OPC_SET_FEATURES, "SET FEATURES" },
63 { NVME_OPC_GET_FEATURES, "GET FEATURES" },
64 { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" },
65 { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" },
66 { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" },
67 { NVME_OPC_DEVICE_SELF_TEST, "DEVICE SELF-TEST" },
68 { NVME_OPC_NAMESPACE_ATTACHMENT, "NAMESPACE ATTACHMENT" },
69 { NVME_OPC_KEEP_ALIVE, "KEEP ALIVE" },
70 { NVME_OPC_DIRECTIVE_SEND, "DIRECTIVE SEND" },
71 { NVME_OPC_DIRECTIVE_RECEIVE, "DIRECTIVE RECEIVE" },
72 { NVME_OPC_VIRTUALIZATION_MANAGEMENT, "VIRTUALIZATION MANAGEMENT" },
73 { NVME_OPC_NVME_MI_SEND, "NVME-MI SEND" },
74 { NVME_OPC_NVME_MI_RECEIVE, "NVME-MI RECEIVE" },
75 { NVME_OPC_DOORBELL_BUFFER_CONFIG, "DOORBELL BUFFER CONFIG" },
76 { NVME_OPC_FORMAT_NVM, "FORMAT NVM" },
77 { NVME_OPC_SECURITY_SEND, "SECURITY SEND" },
78 { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" },
79 { NVME_OPC_SANITIZE, "SANITIZE" },
80 { NVME_OPC_GET_LBA_STATUS, "GET LBA STATUS" },
81 { 0xFFFF, "ADMIN COMMAND" }
82 };
83
84 static struct nvme_opcode_string io_opcode[] = {
85 { NVME_OPC_FLUSH, "FLUSH" },
86 { NVME_OPC_WRITE, "WRITE" },
87 { NVME_OPC_READ, "READ" },
88 { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" },
89 { NVME_OPC_COMPARE, "COMPARE" },
90 { NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" },
91 { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" },
92 { NVME_OPC_VERIFY, "VERIFY" },
93 { NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" },
94 { NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" },
95 { NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" },
96 { NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" },
97 { 0xFFFF, "IO COMMAND" }
98 };
99
100 static const char *
101 get_admin_opcode_string(uint16_t opc)
102 {
103 struct nvme_opcode_string *entry;
104
105 entry = admin_opcode;
106
107 while (entry->opc != 0xFFFF) {
108 if (entry->opc == opc)
109 return (entry->str);
110 entry++;
111 }
112 return (entry->str);
113 }
114
115 static const char *
116 get_io_opcode_string(uint16_t opc)
117 {
118 struct nvme_opcode_string *entry;
119
120 entry = io_opcode;
121
122 while (entry->opc != 0xFFFF) {
123 if (entry->opc == opc)
124 return (entry->str);
125 entry++;
126 }
127 return (entry->str);
128 }
129
130 static void
131 nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
132 struct nvme_command *cmd)
133 {
134
135 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
136 "cdw10:%08x cdw11:%08x\n",
137 get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
138 le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11));
139 }
140
141 static void
142 nvme_io_qpair_print_command(struct nvme_qpair *qpair,
143 struct nvme_command *cmd)
144 {
145
146 switch (cmd->opc) {
147 case NVME_OPC_WRITE:
148 case NVME_OPC_READ:
149 case NVME_OPC_WRITE_UNCORRECTABLE:
150 case NVME_OPC_COMPARE:
151 case NVME_OPC_WRITE_ZEROES:
152 case NVME_OPC_VERIFY:
153 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
154 "lba:%llu len:%d\n",
155 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid),
156 ((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10),
157 (le32toh(cmd->cdw12) & 0xFFFF) + 1);
158 break;
159 case NVME_OPC_FLUSH:
160 case NVME_OPC_DATASET_MANAGEMENT:
161 case NVME_OPC_RESERVATION_REGISTER:
162 case NVME_OPC_RESERVATION_REPORT:
163 case NVME_OPC_RESERVATION_ACQUIRE:
164 case NVME_OPC_RESERVATION_RELEASE:
165 nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
166 get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid));
167 break;
168 default:
169 nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
170 get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
171 cmd->cid, le32toh(cmd->nsid));
172 break;
173 }
174 }
175
176 static void
177 nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
178 {
179 if (qpair->id == 0)
180 nvme_admin_qpair_print_command(qpair, cmd);
181 else
182 nvme_io_qpair_print_command(qpair, cmd);
183 if (nvme_verbose_cmd_dump) {
184 nvme_printf(qpair->ctrlr,
185 "nsid:%#x rsvd2:%#x rsvd3:%#x mptr:%#jx prp1:%#jx prp2:%#jx\n",
186 cmd->nsid, cmd->rsvd2, cmd->rsvd3, (uintmax_t)cmd->mptr,
187 (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2);
188 nvme_printf(qpair->ctrlr,
189 "cdw10: %#x cdw11:%#x cdw12:%#x cdw13:%#x cdw14:%#x cdw15:%#x\n",
190 cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
191 cmd->cdw15);
192 }
193 }
194
195 struct nvme_status_string {
196 uint16_t sc;
197 const char * str;
198 };
199
200 static struct nvme_status_string generic_status[] = {
201 { NVME_SC_SUCCESS, "SUCCESS" },
202 { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
203 { NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
204 { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
205 { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
206 { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
207 { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
208 { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
209 { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
210 { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
211 { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
212 { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
213 { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
214 { NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" },
215 { NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" },
216 { NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" },
217 { NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" },
218 { NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" },
219 { NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" },
220 { NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" },
221 { NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" },
222 { NVME_SC_OPERATION_DENIED, "OPERATION DENIED" },
223 { NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" },
224 { NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" },
225 { NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" },
226 { NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" },
227 { NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" },
228 { NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" },
229 { NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" },
230 { NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" },
231 { NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" },
232 { NVME_SC_NAMESPACE_IS_WRITE_PROTECTED, "NAMESPACE IS WRITE PROTECTED" },
233 { NVME_SC_COMMAND_INTERRUPTED, "COMMAND INTERRUPTED" },
234 { NVME_SC_TRANSIENT_TRANSPORT_ERROR, "TRANSIENT TRANSPORT ERROR" },
235
236 { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
237 { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
238 { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
239 { NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" },
240 { NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" },
241 { 0xFFFF, "GENERIC" }
242 };
243
244 static struct nvme_status_string command_specific_status[] = {
245 { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
246 { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
247 { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
248 { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
249 { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
250 { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
251 { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
252 { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
253 { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
254 { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
255 { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
256 { NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" },
257 { NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" },
258 { NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" },
259 { NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" },
260 { NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" },
261 { NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" },
262 { NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" },
263 { NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" },
264 { NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" },
265 { NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" },
266 { NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" },
267 { NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" },
268 { NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" },
269 { NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" },
270 { NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" },
271 { NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" },
272 { NVME_SC_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" },
273 { NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" },
274 { NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" },
275 { NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" },
276 { NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" },
277 { NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" },
278 { NVME_SC_SANITIZE_PROHIBITED_WPMRE, "SANITIZE PROHIBITED WRITE PERSISTENT MEMORY REGION ENABLED" },
279 { NVME_SC_ANA_GROUP_ID_INVALID, "ANA GROUP IDENTIFIED INVALID" },
280 { NVME_SC_ANA_ATTACH_FAILED, "ANA ATTACH FAILED" },
281
282 { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
283 { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
284 { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
285 { 0xFFFF, "COMMAND SPECIFIC" }
286 };
287
288 static struct nvme_status_string media_error_status[] = {
289 { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
290 { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
291 { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
292 { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
293 { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
294 { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
295 { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
296 { NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" },
297 { 0xFFFF, "MEDIA ERROR" }
298 };
299
300 static struct nvme_status_string path_related_status[] = {
301 { NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" },
302 { NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS, "ASYMMETRIC ACCESS PERSISTENT LOSS" },
303 { NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE, "ASYMMETRIC ACCESS INACCESSIBLE" },
304 { NVME_SC_ASYMMETRIC_ACCESS_TRANSITION, "ASYMMETRIC ACCESS TRANSITION" },
305 { NVME_SC_CONTROLLER_PATHING_ERROR, "CONTROLLER PATHING ERROR" },
306 { NVME_SC_HOST_PATHING_ERROR, "HOST PATHING ERROR" },
307 { NVME_SC_COMMAND_ABOTHED_BY_HOST, "COMMAND ABOTHED BY HOST" },
308 { 0xFFFF, "PATH RELATED" },
309 };
310
311 static const char *
312 get_status_string(uint16_t sct, uint16_t sc)
313 {
314 struct nvme_status_string *entry;
315
316 switch (sct) {
317 case NVME_SCT_GENERIC:
318 entry = generic_status;
319 break;
320 case NVME_SCT_COMMAND_SPECIFIC:
321 entry = command_specific_status;
322 break;
323 case NVME_SCT_MEDIA_ERROR:
324 entry = media_error_status;
325 break;
326 case NVME_SCT_PATH_RELATED:
327 entry = path_related_status;
328 break;
329 case NVME_SCT_VENDOR_SPECIFIC:
330 return ("VENDOR SPECIFIC");
331 default:
332 return ("RESERVED");
333 }
334
335 while (entry->sc != 0xFFFF) {
336 if (entry->sc == sc)
337 return (entry->str);
338 entry++;
339 }
340 return (entry->str);
341 }
342
343 static void
344 nvme_qpair_print_completion(struct nvme_qpair *qpair,
345 struct nvme_completion *cpl)
346 {
347 uint16_t sct, sc;
348
349 sct = NVME_STATUS_GET_SCT(cpl->status);
350 sc = NVME_STATUS_GET_SC(cpl->status);
351
352 nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n",
353 get_status_string(sct, sc), sct, sc, cpl->sqid, cpl->cid,
354 cpl->cdw0);
355 }
356
357 static bool
358 nvme_completion_is_retry(const struct nvme_completion *cpl)
359 {
360 uint8_t sct, sc, dnr;
361
362 sct = NVME_STATUS_GET_SCT(cpl->status);
363 sc = NVME_STATUS_GET_SC(cpl->status);
364 dnr = NVME_STATUS_GET_DNR(cpl->status); /* Do Not Retry Bit */
365
366 /*
367 * TODO: spec is not clear how commands that are aborted due
368 * to TLER will be marked. So for now, it seems
369 * NAMESPACE_NOT_READY is the only case where we should
370 * look at the DNR bit. Requests failed with ABORTED_BY_REQUEST
371 * set the DNR bit correctly since the driver controls that.
372 */
373 switch (sct) {
374 case NVME_SCT_GENERIC:
375 switch (sc) {
376 case NVME_SC_ABORTED_BY_REQUEST:
377 case NVME_SC_NAMESPACE_NOT_READY:
378 if (dnr)
379 return (0);
380 else
381 return (1);
382 case NVME_SC_INVALID_OPCODE:
383 case NVME_SC_INVALID_FIELD:
384 case NVME_SC_COMMAND_ID_CONFLICT:
385 case NVME_SC_DATA_TRANSFER_ERROR:
386 case NVME_SC_ABORTED_POWER_LOSS:
387 case NVME_SC_INTERNAL_DEVICE_ERROR:
388 case NVME_SC_ABORTED_SQ_DELETION:
389 case NVME_SC_ABORTED_FAILED_FUSED:
390 case NVME_SC_ABORTED_MISSING_FUSED:
391 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
392 case NVME_SC_COMMAND_SEQUENCE_ERROR:
393 case NVME_SC_LBA_OUT_OF_RANGE:
394 case NVME_SC_CAPACITY_EXCEEDED:
395 default:
396 return (0);
397 }
398 case NVME_SCT_COMMAND_SPECIFIC:
399 case NVME_SCT_MEDIA_ERROR:
400 return (0);
401 case NVME_SCT_PATH_RELATED:
402 switch (sc) {
403 case NVME_SC_INTERNAL_PATH_ERROR:
404 if (dnr)
405 return (0);
406 else
407 return (1);
408 default:
409 return (0);
410 }
411 case NVME_SCT_VENDOR_SPECIFIC:
412 default:
413 return (0);
414 }
415 }
416
417 static void
418 nvme_qpair_complete_tracker(struct nvme_tracker *tr,
419 struct nvme_completion *cpl, error_print_t print_on_error)
420 {
421 struct nvme_qpair * qpair = tr->qpair;
422 struct nvme_request *req;
423 bool retry, error, retriable;
424
425 req = tr->req;
426 error = nvme_completion_is_error(cpl);
427 retriable = nvme_completion_is_retry(cpl);
428 retry = error && retriable && req->retries < nvme_retry_count;
429 if (retry)
430 qpair->num_retries++;
431 if (error && req->retries >= nvme_retry_count && retriable)
432 qpair->num_failures++;
433
434 if (error && (print_on_error == ERROR_PRINT_ALL ||
435 (!retry && print_on_error == ERROR_PRINT_NO_RETRY))) {
436 nvme_qpair_print_command(qpair, &req->cmd);
437 nvme_qpair_print_completion(qpair, cpl);
438 }
439
440 qpair->act_tr[cpl->cid] = NULL;
441
442 KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
443
444 if (!retry) {
445 if (req->type != NVME_REQUEST_NULL) {
446 bus_dmamap_sync(qpair->dma_tag_payload,
447 tr->payload_dma_map,
448 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
449 }
450 if (req->cb_fn)
451 req->cb_fn(req->cb_arg, cpl);
452 }
453
454 mtx_lock(&qpair->lock);
455 callout_stop(&tr->timer);
456
457 if (retry) {
458 req->retries++;
459 nvme_qpair_submit_tracker(qpair, tr);
460 } else {
461 if (req->type != NVME_REQUEST_NULL) {
462 bus_dmamap_unload(qpair->dma_tag_payload,
463 tr->payload_dma_map);
464 }
465
466 nvme_free_request(req);
467 tr->req = NULL;
468
469 TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
470 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
471
472 /*
473 * If the controller is in the middle of resetting, don't
474 * try to submit queued requests here - let the reset logic
475 * handle that instead.
476 */
477 if (!STAILQ_EMPTY(&qpair->queued_req) &&
478 !qpair->ctrlr->is_resetting) {
479 req = STAILQ_FIRST(&qpair->queued_req);
480 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
481 _nvme_qpair_submit_request(qpair, req);
482 }
483 }
484
485 mtx_unlock(&qpair->lock);
486 }
487
488 static void
489 nvme_qpair_manual_complete_tracker(
490 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
491 error_print_t print_on_error)
492 {
493 struct nvme_completion cpl;
494
495 memset(&cpl, 0, sizeof(cpl));
496
497 struct nvme_qpair * qpair = tr->qpair;
498
499 cpl.sqid = qpair->id;
500 cpl.cid = tr->cid;
501 cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
502 cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
503 cpl.status |= (dnr & NVME_STATUS_DNR_MASK) << NVME_STATUS_DNR_SHIFT;
504 nvme_qpair_complete_tracker(tr, &cpl, print_on_error);
505 }
506
507 void
508 nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
509 struct nvme_request *req, uint32_t sct, uint32_t sc)
510 {
511 struct nvme_completion cpl;
512 bool error;
513
514 memset(&cpl, 0, sizeof(cpl));
515 cpl.sqid = qpair->id;
516 cpl.status |= (sct & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT;
517 cpl.status |= (sc & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
518
519 error = nvme_completion_is_error(&cpl);
520
521 if (error) {
522 nvme_qpair_print_command(qpair, &req->cmd);
523 nvme_qpair_print_completion(qpair, &cpl);
524 }
525
526 if (req->cb_fn)
527 req->cb_fn(req->cb_arg, &cpl);
528
529 nvme_free_request(req);
530 }
531
532 bool
533 nvme_qpair_process_completions(struct nvme_qpair *qpair)
534 {
535 struct nvme_tracker *tr;
536 struct nvme_completion cpl;
537 int done = 0;
538 bool in_panic = dumping || SCHEDULER_STOPPED();
539
540 qpair->num_intr_handler_calls++;
541
542 /*
543 * qpair is not enabled, likely because a controller reset is is in
544 * progress. Ignore the interrupt - any I/O that was associated with
545 * this interrupt will get retried when the reset is complete.
546 */
547 if (!qpair->is_enabled)
548 return (false);
549
550 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
551 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
552 /*
553 * A panic can stop the CPU this routine is running on at any point. If
554 * we're called during a panic, complete the sq_head wrap protocol for
555 * the case where we are interrupted just after the increment at 1
556 * below, but before we can reset cq_head to zero at 2. Also cope with
557 * the case where we do the zero at 2, but may or may not have done the
558 * phase adjustment at step 3. The panic machinery flushes all pending
559 * memory writes, so we can make these strong ordering assumptions
560 * that would otherwise be unwise if we were racing in real time.
561 */
562 if (__predict_false(in_panic)) {
563 if (qpair->cq_head == qpair->num_entries) {
564 /*
565 * Here we know that we need to zero cq_head and then negate
566 * the phase, which hasn't been assigned if cq_head isn't
567 * zero due to the atomic_store_rel.
568 */
569 qpair->cq_head = 0;
570 qpair->phase = !qpair->phase;
571 } else if (qpair->cq_head == 0) {
572 /*
573 * In this case, we know that the assignment at 2
574 * happened below, but we don't know if it 3 happened or
575 * not. To do this, we look at the last completion
576 * entry and set the phase to the opposite phase
577 * that it has. This gets us back in sync
578 */
579 cpl = qpair->cpl[qpair->num_entries - 1];
580 nvme_completion_swapbytes(&cpl);
581 qpair->phase = !NVME_STATUS_GET_P(cpl.status);
582 }
583 }
584
585 while (1) {
586 uint16_t status;
587
588 /*
589 * We need to do this dance to avoid a race between the host and
590 * the device where the device overtakes the host while the host
591 * is reading this record, leaving the status field 'new' and
592 * the sqhd and cid fields potentially stale. If the phase
593 * doesn't match, that means status hasn't yet been updated and
594 * we'll get any pending changes next time. It also means that
595 * the phase must be the same the second time. We have to sync
596 * before reading to ensure any bouncing completes.
597 */
598 status = le16toh(qpair->cpl[qpair->cq_head].status);
599 if (NVME_STATUS_GET_P(status) != qpair->phase)
600 break;
601
602 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
603 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
604 cpl = qpair->cpl[qpair->cq_head];
605 nvme_completion_swapbytes(&cpl);
606
607 KASSERT(
608 NVME_STATUS_GET_P(status) == NVME_STATUS_GET_P(cpl.status),
609 ("Phase unexpectedly inconsistent"));
610
611 tr = qpair->act_tr[cpl.cid];
612
613 if (tr != NULL) {
614 nvme_qpair_complete_tracker(tr, &cpl, ERROR_PRINT_ALL);
615 qpair->sq_head = cpl.sqhd;
616 done++;
617 } else if (!in_panic) {
618 /*
619 * A missing tracker is normally an error. However, a
620 * panic can stop the CPU this routine is running on
621 * after completing an I/O but before updating
622 * qpair->cq_head at 1 below. Later, we re-enter this
623 * routine to poll I/O associated with the kernel
624 * dump. We find that the tr has been set to null before
625 * calling the completion routine. If it hasn't
626 * completed (or it triggers a panic), then '1' below
627 * won't have updated cq_head. Rather than panic again,
628 * ignore this condition because it's not unexpected.
629 */
630 nvme_printf(qpair->ctrlr,
631 "cpl does not map to outstanding cmd\n");
632 /* nvme_dump_completion expects device endianess */
633 nvme_dump_completion(&qpair->cpl[qpair->cq_head]);
634 KASSERT(0, ("received completion for unknown cmd"));
635 }
636
637 /*
638 * There's a number of races with the following (see above) when
639 * the system panics. We compensate for each one of them by
640 * using the atomic store to force strong ordering (at least when
641 * viewed in the aftermath of a panic).
642 */
643 if (++qpair->cq_head == qpair->num_entries) { /* 1 */
644 atomic_store_rel_int(&qpair->cq_head, 0); /* 2 */
645 qpair->phase = !qpair->phase; /* 3 */
646 }
647
648 bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
649 qpair->cq_hdbl_off, qpair->cq_head);
650 }
651 return (done != 0);
652 }
653
654 static void
655 nvme_qpair_msi_handler(void *arg)
656 {
657 struct nvme_qpair *qpair = arg;
658
659 nvme_qpair_process_completions(qpair);
660 }
661
662 int
663 nvme_qpair_construct(struct nvme_qpair *qpair,
664 uint32_t num_entries, uint32_t num_trackers,
665 struct nvme_controller *ctrlr)
666 {
667 struct nvme_tracker *tr;
668 size_t cmdsz, cplsz, prpsz, allocsz, prpmemsz;
669 uint64_t queuemem_phys, prpmem_phys, list_phys;
670 uint8_t *queuemem, *prpmem, *prp_list;
671 int i, err;
672
673 qpair->vector = ctrlr->msi_count > 1 ? qpair->id : 0;
674 qpair->num_entries = num_entries;
675 qpair->num_trackers = num_trackers;
676 qpair->ctrlr = ctrlr;
677
678 mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
679
680 /* Note: NVMe PRP format is restricted to 4-byte alignment. */
681 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
682 4, PAGE_SIZE, BUS_SPACE_MAXADDR,
683 BUS_SPACE_MAXADDR, NULL, NULL, ctrlr->max_xfer_size,
684 btoc(ctrlr->max_xfer_size) + 1, PAGE_SIZE, 0,
685 NULL, NULL, &qpair->dma_tag_payload);
686 if (err != 0) {
687 nvme_printf(ctrlr, "payload tag create failed %d\n", err);
688 goto out;
689 }
690
691 /*
692 * Each component must be page aligned, and individual PRP lists
693 * cannot cross a page boundary.
694 */
695 cmdsz = qpair->num_entries * sizeof(struct nvme_command);
696 cmdsz = roundup2(cmdsz, PAGE_SIZE);
697 cplsz = qpair->num_entries * sizeof(struct nvme_completion);
698 cplsz = roundup2(cplsz, PAGE_SIZE);
699 /*
700 * For commands requiring more than 2 PRP entries, one PRP will be
701 * embedded in the command (prp1), and the rest of the PRP entries
702 * will be in a list pointed to by the command (prp2).
703 */
704 prpsz = sizeof(uint64_t) * btoc(ctrlr->max_xfer_size);
705 prpmemsz = qpair->num_trackers * prpsz;
706 allocsz = cmdsz + cplsz + prpmemsz;
707
708 err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
709 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
710 allocsz, 1, allocsz, 0, NULL, NULL, &qpair->dma_tag);
711 if (err != 0) {
712 nvme_printf(ctrlr, "tag create failed %d\n", err);
713 goto out;
714 }
715 bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain);
716
717 if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem,
718 BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &qpair->queuemem_map)) {
719 nvme_printf(ctrlr, "failed to alloc qpair memory\n");
720 goto out;
721 }
722
723 if (bus_dmamap_load(qpair->dma_tag, qpair->queuemem_map,
724 queuemem, allocsz, nvme_single_map, &queuemem_phys, 0) != 0) {
725 nvme_printf(ctrlr, "failed to load qpair memory\n");
726 bus_dmamem_free(qpair->dma_tag, qpair->cmd,
727 qpair->queuemem_map);
728 goto out;
729 }
730
731 qpair->num_cmds = 0;
732 qpair->num_intr_handler_calls = 0;
733 qpair->num_retries = 0;
734 qpair->num_failures = 0;
735 qpair->cmd = (struct nvme_command *)queuemem;
736 qpair->cpl = (struct nvme_completion *)(queuemem + cmdsz);
737 prpmem = (uint8_t *)(queuemem + cmdsz + cplsz);
738 qpair->cmd_bus_addr = queuemem_phys;
739 qpair->cpl_bus_addr = queuemem_phys + cmdsz;
740 prpmem_phys = queuemem_phys + cmdsz + cplsz;
741
742 /*
743 * Calcuate the stride of the doorbell register. Many emulators set this
744 * value to correspond to a cache line. However, some hardware has set
745 * it to various small values.
746 */
747 qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[0]) +
748 (qpair->id << (ctrlr->dstrd + 1));
749 qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[0]) +
750 (qpair->id << (ctrlr->dstrd + 1)) + (1 << ctrlr->dstrd);
751
752 TAILQ_INIT(&qpair->free_tr);
753 TAILQ_INIT(&qpair->outstanding_tr);
754 STAILQ_INIT(&qpair->queued_req);
755
756 list_phys = prpmem_phys;
757 prp_list = prpmem;
758 for (i = 0; i < qpair->num_trackers; i++) {
759 if (list_phys + prpsz > prpmem_phys + prpmemsz) {
760 qpair->num_trackers = i;
761 break;
762 }
763
764 /*
765 * Make sure that the PRP list for this tracker doesn't
766 * overflow to another page.
767 */
768 if (trunc_page(list_phys) !=
769 trunc_page(list_phys + prpsz - 1)) {
770 list_phys = roundup2(list_phys, PAGE_SIZE);
771 prp_list =
772 (uint8_t *)roundup2((uintptr_t)prp_list, PAGE_SIZE);
773 }
774
775 tr = malloc_domainset(sizeof(*tr), M_NVME,
776 DOMAINSET_PREF(qpair->domain), M_ZERO | M_WAITOK);
777 bus_dmamap_create(qpair->dma_tag_payload, 0,
778 &tr->payload_dma_map);
779 callout_init(&tr->timer, 1);
780 tr->cid = i;
781 tr->qpair = qpair;
782 tr->prp = (uint64_t *)prp_list;
783 tr->prp_bus_addr = list_phys;
784 TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
785 list_phys += prpsz;
786 prp_list += prpsz;
787 }
788
789 if (qpair->num_trackers == 0) {
790 nvme_printf(ctrlr, "failed to allocate enough trackers\n");
791 goto out;
792 }
793
794 qpair->act_tr = malloc_domainset(sizeof(struct nvme_tracker *) *
795 qpair->num_entries, M_NVME, DOMAINSET_PREF(qpair->domain),
796 M_ZERO | M_WAITOK);
797
798 if (ctrlr->msi_count > 1) {
799 /*
800 * MSI-X vector resource IDs start at 1, so we add one to
801 * the queue's vector to get the corresponding rid to use.
802 */
803 qpair->rid = qpair->vector + 1;
804
805 qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
806 &qpair->rid, RF_ACTIVE);
807 if (qpair->res == NULL) {
808 nvme_printf(ctrlr, "unable to allocate MSI\n");
809 goto out;
810 }
811 if (bus_setup_intr(ctrlr->dev, qpair->res,
812 INTR_TYPE_MISC | INTR_MPSAFE, NULL,
813 nvme_qpair_msi_handler, qpair, &qpair->tag) != 0) {
814 nvme_printf(ctrlr, "unable to setup MSI\n");
815 goto out;
816 }
817 if (qpair->id == 0) {
818 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
819 "admin");
820 } else {
821 bus_describe_intr(ctrlr->dev, qpair->res, qpair->tag,
822 "io%d", qpair->id - 1);
823 }
824 }
825
826 return (0);
827
828 out:
829 nvme_qpair_destroy(qpair);
830 return (ENOMEM);
831 }
832
833 static void
834 nvme_qpair_destroy(struct nvme_qpair *qpair)
835 {
836 struct nvme_tracker *tr;
837
838 if (qpair->tag) {
839 bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
840 qpair->tag = NULL;
841 }
842
843 if (qpair->act_tr) {
844 free(qpair->act_tr, M_NVME);
845 qpair->act_tr = NULL;
846 }
847
848 while (!TAILQ_EMPTY(&qpair->free_tr)) {
849 tr = TAILQ_FIRST(&qpair->free_tr);
850 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
851 bus_dmamap_destroy(qpair->dma_tag_payload,
852 tr->payload_dma_map);
853 free(tr, M_NVME);
854 }
855
856 if (qpair->cmd != NULL) {
857 bus_dmamap_unload(qpair->dma_tag, qpair->queuemem_map);
858 bus_dmamem_free(qpair->dma_tag, qpair->cmd,
859 qpair->queuemem_map);
860 qpair->cmd = NULL;
861 }
862
863 if (qpair->dma_tag) {
864 bus_dma_tag_destroy(qpair->dma_tag);
865 qpair->dma_tag = NULL;
866 }
867
868 if (qpair->dma_tag_payload) {
869 bus_dma_tag_destroy(qpair->dma_tag_payload);
870 qpair->dma_tag_payload = NULL;
871 }
872
873 if (mtx_initialized(&qpair->lock))
874 mtx_destroy(&qpair->lock);
875
876 if (qpair->res) {
877 bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
878 rman_get_rid(qpair->res), qpair->res);
879 qpair->res = NULL;
880 }
881 }
882
883 static void
884 nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
885 {
886 struct nvme_tracker *tr;
887
888 tr = TAILQ_FIRST(&qpair->outstanding_tr);
889 while (tr != NULL) {
890 if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
891 nvme_qpair_manual_complete_tracker(tr,
892 NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
893 ERROR_PRINT_NONE);
894 tr = TAILQ_FIRST(&qpair->outstanding_tr);
895 } else {
896 tr = TAILQ_NEXT(tr, tailq);
897 }
898 }
899 }
900
901 void
902 nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
903 {
904
905 nvme_admin_qpair_abort_aers(qpair);
906 nvme_qpair_destroy(qpair);
907 }
908
909 void
910 nvme_io_qpair_destroy(struct nvme_qpair *qpair)
911 {
912
913 nvme_qpair_destroy(qpair);
914 }
915
916 static void
917 nvme_abort_complete(void *arg, const struct nvme_completion *status)
918 {
919 struct nvme_tracker *tr = arg;
920
921 /*
922 * If cdw0 == 1, the controller was not able to abort the command
923 * we requested. We still need to check the active tracker array,
924 * to cover race where I/O timed out at same time controller was
925 * completing the I/O.
926 */
927 if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
928 /*
929 * An I/O has timed out, and the controller was unable to
930 * abort it for some reason. Construct a fake completion
931 * status, and then complete the I/O's tracker manually.
932 */
933 nvme_printf(tr->qpair->ctrlr,
934 "abort command failed, aborting command manually\n");
935 nvme_qpair_manual_complete_tracker(tr,
936 NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_ALL);
937 }
938 }
939
940 static void
941 nvme_timeout(void *arg)
942 {
943 struct nvme_tracker *tr = arg;
944 struct nvme_qpair *qpair = tr->qpair;
945 struct nvme_controller *ctrlr = qpair->ctrlr;
946 uint32_t csts;
947 uint8_t cfs;
948
949 /*
950 * Read csts to get value of cfs - controller fatal status.
951 * If no fatal status, try to call the completion routine, and
952 * if completes transactions, report a missed interrupt and
953 * return (this may need to be rate limited). Otherwise, if
954 * aborts are enabled and the controller is not reporting
955 * fatal status, abort the command. Otherwise, just reset the
956 * controller and hope for the best.
957 */
958 csts = nvme_mmio_read_4(ctrlr, csts);
959 cfs = (csts >> NVME_CSTS_REG_CFS_SHIFT) & NVME_CSTS_REG_CFS_MASK;
960 if (cfs == 0 && nvme_qpair_process_completions(qpair)) {
961 nvme_printf(ctrlr, "Missing interrupt\n");
962 return;
963 }
964 if (ctrlr->enable_aborts && cfs == 0) {
965 nvme_printf(ctrlr, "Aborting command due to a timeout.\n");
966 nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id,
967 nvme_abort_complete, tr);
968 } else {
969 nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n",
970 (csts == NVME_GONE) ? " and possible hot unplug" :
971 (cfs ? " and fatal error status" : ""));
972 nvme_ctrlr_reset(ctrlr);
973 }
974 }
975
976 void
977 nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
978 {
979 struct nvme_request *req;
980 struct nvme_controller *ctrlr;
981 int timeout;
982
983 mtx_assert(&qpair->lock, MA_OWNED);
984
985 req = tr->req;
986 req->cmd.cid = tr->cid;
987 qpair->act_tr[tr->cid] = tr;
988 ctrlr = qpair->ctrlr;
989
990 if (req->timeout) {
991 if (req->cb_fn == nvme_completion_poll_cb)
992 timeout = hz;
993 else
994 timeout = ctrlr->timeout_period * hz;
995 callout_reset_on(&tr->timer, timeout, nvme_timeout, tr,
996 qpair->cpu);
997 }
998
999 /* Copy the command from the tracker to the submission queue. */
1000 memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
1001
1002 if (++qpair->sq_tail == qpair->num_entries)
1003 qpair->sq_tail = 0;
1004
1005 bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
1006 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1007 bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
1008 qpair->sq_tdbl_off, qpair->sq_tail);
1009 qpair->num_cmds++;
1010 }
1011
1012 static void
1013 nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1014 {
1015 struct nvme_tracker *tr = arg;
1016 uint32_t cur_nseg;
1017
1018 /*
1019 * If the mapping operation failed, return immediately. The caller
1020 * is responsible for detecting the error status and failing the
1021 * tracker manually.
1022 */
1023 if (error != 0) {
1024 nvme_printf(tr->qpair->ctrlr,
1025 "nvme_payload_map err %d\n", error);
1026 return;
1027 }
1028
1029 /*
1030 * Note that we specified PAGE_SIZE for alignment and max
1031 * segment size when creating the bus dma tags. So here
1032 * we can safely just transfer each segment to its
1033 * associated PRP entry.
1034 */
1035 tr->req->cmd.prp1 = htole64(seg[0].ds_addr);
1036
1037 if (nseg == 2) {
1038 tr->req->cmd.prp2 = htole64(seg[1].ds_addr);
1039 } else if (nseg > 2) {
1040 cur_nseg = 1;
1041 tr->req->cmd.prp2 = htole64((uint64_t)tr->prp_bus_addr);
1042 while (cur_nseg < nseg) {
1043 tr->prp[cur_nseg-1] =
1044 htole64((uint64_t)seg[cur_nseg].ds_addr);
1045 cur_nseg++;
1046 }
1047 } else {
1048 /*
1049 * prp2 should not be used by the controller
1050 * since there is only one segment, but set
1051 * to 0 just to be safe.
1052 */
1053 tr->req->cmd.prp2 = 0;
1054 }
1055
1056 bus_dmamap_sync(tr->qpair->dma_tag_payload, tr->payload_dma_map,
1057 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1058 nvme_qpair_submit_tracker(tr->qpair, tr);
1059 }
1060
1061 static void
1062 _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1063 {
1064 struct nvme_tracker *tr;
1065 int err = 0;
1066
1067 mtx_assert(&qpair->lock, MA_OWNED);
1068
1069 tr = TAILQ_FIRST(&qpair->free_tr);
1070 req->qpair = qpair;
1071
1072 if (tr == NULL || !qpair->is_enabled) {
1073 /*
1074 * No tracker is available, or the qpair is disabled due to
1075 * an in-progress controller-level reset or controller
1076 * failure.
1077 */
1078
1079 if (qpair->ctrlr->is_failed) {
1080 /*
1081 * The controller has failed. Post the request to a
1082 * task where it will be aborted, so that we do not
1083 * invoke the request's callback in the context
1084 * of the submission.
1085 */
1086 nvme_ctrlr_post_failed_request(qpair->ctrlr, req);
1087 } else {
1088 /*
1089 * Put the request on the qpair's request queue to be
1090 * processed when a tracker frees up via a command
1091 * completion or when the controller reset is
1092 * completed.
1093 */
1094 STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
1095 }
1096 return;
1097 }
1098
1099 TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
1100 TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
1101 tr->req = req;
1102
1103 switch (req->type) {
1104 case NVME_REQUEST_VADDR:
1105 KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size,
1106 ("payload_size (%d) exceeds max_xfer_size (%d)\n",
1107 req->payload_size, qpair->ctrlr->max_xfer_size));
1108 err = bus_dmamap_load(tr->qpair->dma_tag_payload,
1109 tr->payload_dma_map, req->u.payload, req->payload_size,
1110 nvme_payload_map, tr, 0);
1111 if (err != 0)
1112 nvme_printf(qpair->ctrlr,
1113 "bus_dmamap_load returned 0x%x!\n", err);
1114 break;
1115 case NVME_REQUEST_NULL:
1116 nvme_qpair_submit_tracker(tr->qpair, tr);
1117 break;
1118 case NVME_REQUEST_BIO:
1119 KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size,
1120 ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n",
1121 (intmax_t)req->u.bio->bio_bcount,
1122 qpair->ctrlr->max_xfer_size));
1123 err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload,
1124 tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0);
1125 if (err != 0)
1126 nvme_printf(qpair->ctrlr,
1127 "bus_dmamap_load_bio returned 0x%x!\n", err);
1128 break;
1129 case NVME_REQUEST_CCB:
1130 err = bus_dmamap_load_ccb(tr->qpair->dma_tag_payload,
1131 tr->payload_dma_map, req->u.payload,
1132 nvme_payload_map, tr, 0);
1133 if (err != 0)
1134 nvme_printf(qpair->ctrlr,
1135 "bus_dmamap_load_ccb returned 0x%x!\n", err);
1136 break;
1137 default:
1138 panic("unknown nvme request type 0x%x\n", req->type);
1139 break;
1140 }
1141
1142 if (err != 0) {
1143 /*
1144 * The dmamap operation failed, so we manually fail the
1145 * tracker here with DATA_TRANSFER_ERROR status.
1146 *
1147 * nvme_qpair_manual_complete_tracker must not be called
1148 * with the qpair lock held.
1149 */
1150 mtx_unlock(&qpair->lock);
1151 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1152 NVME_SC_DATA_TRANSFER_ERROR, DO_NOT_RETRY, ERROR_PRINT_ALL);
1153 mtx_lock(&qpair->lock);
1154 }
1155 }
1156
1157 void
1158 nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
1159 {
1160
1161 mtx_lock(&qpair->lock);
1162 _nvme_qpair_submit_request(qpair, req);
1163 mtx_unlock(&qpair->lock);
1164 }
1165
1166 static void
1167 nvme_qpair_enable(struct nvme_qpair *qpair)
1168 {
1169
1170 qpair->is_enabled = true;
1171 }
1172
1173 void
1174 nvme_qpair_reset(struct nvme_qpair *qpair)
1175 {
1176
1177 qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
1178
1179 /*
1180 * First time through the completion queue, HW will set phase
1181 * bit on completions to 1. So set this to 1 here, indicating
1182 * we're looking for a 1 to know which entries have completed.
1183 * we'll toggle the bit each time when the completion queue
1184 * rolls over.
1185 */
1186 qpair->phase = 1;
1187
1188 memset(qpair->cmd, 0,
1189 qpair->num_entries * sizeof(struct nvme_command));
1190 memset(qpair->cpl, 0,
1191 qpair->num_entries * sizeof(struct nvme_completion));
1192 }
1193
1194 void
1195 nvme_admin_qpair_enable(struct nvme_qpair *qpair)
1196 {
1197 struct nvme_tracker *tr;
1198 struct nvme_tracker *tr_temp;
1199
1200 /*
1201 * Manually abort each outstanding admin command. Do not retry
1202 * admin commands found here, since they will be left over from
1203 * a controller reset and its likely the context in which the
1204 * command was issued no longer applies.
1205 */
1206 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1207 nvme_printf(qpair->ctrlr,
1208 "aborting outstanding admin command\n");
1209 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1210 NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1211 }
1212
1213 nvme_qpair_enable(qpair);
1214 }
1215
1216 void
1217 nvme_io_qpair_enable(struct nvme_qpair *qpair)
1218 {
1219 STAILQ_HEAD(, nvme_request) temp;
1220 struct nvme_tracker *tr;
1221 struct nvme_tracker *tr_temp;
1222 struct nvme_request *req;
1223
1224 /*
1225 * Manually abort each outstanding I/O. This normally results in a
1226 * retry, unless the retry count on the associated request has
1227 * reached its limit.
1228 */
1229 TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
1230 nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n");
1231 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1232 NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_NO_RETRY);
1233 }
1234
1235 mtx_lock(&qpair->lock);
1236
1237 nvme_qpair_enable(qpair);
1238
1239 STAILQ_INIT(&temp);
1240 STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
1241
1242 while (!STAILQ_EMPTY(&temp)) {
1243 req = STAILQ_FIRST(&temp);
1244 STAILQ_REMOVE_HEAD(&temp, stailq);
1245 nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
1246 nvme_qpair_print_command(qpair, &req->cmd);
1247 _nvme_qpair_submit_request(qpair, req);
1248 }
1249
1250 mtx_unlock(&qpair->lock);
1251 }
1252
1253 static void
1254 nvme_qpair_disable(struct nvme_qpair *qpair)
1255 {
1256 struct nvme_tracker *tr;
1257
1258 qpair->is_enabled = false;
1259 mtx_lock(&qpair->lock);
1260 TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq)
1261 callout_stop(&tr->timer);
1262 mtx_unlock(&qpair->lock);
1263 }
1264
1265 void
1266 nvme_admin_qpair_disable(struct nvme_qpair *qpair)
1267 {
1268
1269 nvme_qpair_disable(qpair);
1270 nvme_admin_qpair_abort_aers(qpair);
1271 }
1272
1273 void
1274 nvme_io_qpair_disable(struct nvme_qpair *qpair)
1275 {
1276
1277 nvme_qpair_disable(qpair);
1278 }
1279
1280 void
1281 nvme_qpair_fail(struct nvme_qpair *qpair)
1282 {
1283 struct nvme_tracker *tr;
1284 struct nvme_request *req;
1285
1286 if (!mtx_initialized(&qpair->lock))
1287 return;
1288
1289 mtx_lock(&qpair->lock);
1290
1291 while (!STAILQ_EMPTY(&qpair->queued_req)) {
1292 req = STAILQ_FIRST(&qpair->queued_req);
1293 STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
1294 nvme_printf(qpair->ctrlr, "failing queued i/o\n");
1295 mtx_unlock(&qpair->lock);
1296 nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
1297 NVME_SC_ABORTED_BY_REQUEST);
1298 mtx_lock(&qpair->lock);
1299 }
1300
1301 /* Manually abort each outstanding I/O. */
1302 while (!TAILQ_EMPTY(&qpair->outstanding_tr)) {
1303 tr = TAILQ_FIRST(&qpair->outstanding_tr);
1304 /*
1305 * Do not remove the tracker. The abort_tracker path will
1306 * do that for us.
1307 */
1308 nvme_printf(qpair->ctrlr, "failing outstanding i/o\n");
1309 mtx_unlock(&qpair->lock);
1310 nvme_qpair_manual_complete_tracker(tr, NVME_SCT_GENERIC,
1311 NVME_SC_ABORTED_BY_REQUEST, DO_NOT_RETRY, ERROR_PRINT_ALL);
1312 mtx_lock(&qpair->lock);
1313 }
1314
1315 mtx_unlock(&qpair->lock);
1316 }
Cache object: a87d6c657bea9fe7f50ac09dfa4a64a4
|