1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2012-2016 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_nvme.h"
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/sysctl.h>
37
38 #include "nvme_private.h"
39
40 #ifndef NVME_USE_NVD
41 #define NVME_USE_NVD 1
42 #endif
43
44 int nvme_use_nvd = NVME_USE_NVD;
45 bool nvme_verbose_cmd_dump = false;
46
47 SYSCTL_NODE(_hw, OID_AUTO, nvme, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
48 "NVMe sysctl tunables");
49 SYSCTL_INT(_hw_nvme, OID_AUTO, use_nvd, CTLFLAG_RDTUN,
50 &nvme_use_nvd, 1, "1 = Create NVD devices, 0 = Create NDA devices");
51 SYSCTL_BOOL(_hw_nvme, OID_AUTO, verbose_cmd_dump, CTLFLAG_RWTUN,
52 &nvme_verbose_cmd_dump, 0,
53 "enable verbose command printing when a command fails");
54
55 static void
56 nvme_dump_queue(struct nvme_qpair *qpair)
57 {
58 struct nvme_completion *cpl;
59 struct nvme_command *cmd;
60 int i;
61
62 printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);
63
64 printf("Completion queue:\n");
65 for (i = 0; i < qpair->num_entries; i++) {
66 cpl = &qpair->cpl[i];
67 printf("%05d: ", i);
68 nvme_dump_completion(cpl);
69 }
70
71 printf("Submission queue:\n");
72 for (i = 0; i < qpair->num_entries; i++) {
73 cmd = &qpair->cmd[i];
74 printf("%05d: ", i);
75 nvme_dump_command(cmd);
76 }
77 }
78
79 static int
80 nvme_sysctl_dump_debug(SYSCTL_HANDLER_ARGS)
81 {
82 struct nvme_qpair *qpair = arg1;
83 uint32_t val = 0;
84
85 int error = sysctl_handle_int(oidp, &val, 0, req);
86
87 if (error)
88 return (error);
89
90 if (val != 0)
91 nvme_dump_queue(qpair);
92
93 return (0);
94 }
95
96 static int
97 nvme_sysctl_int_coal_time(SYSCTL_HANDLER_ARGS)
98 {
99 struct nvme_controller *ctrlr = arg1;
100 uint32_t oldval = ctrlr->int_coal_time;
101 int error = sysctl_handle_int(oidp, &ctrlr->int_coal_time, 0,
102 req);
103
104 if (error)
105 return (error);
106
107 if (oldval != ctrlr->int_coal_time)
108 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
109 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
110 NULL);
111
112 return (0);
113 }
114
115 static int
116 nvme_sysctl_int_coal_threshold(SYSCTL_HANDLER_ARGS)
117 {
118 struct nvme_controller *ctrlr = arg1;
119 uint32_t oldval = ctrlr->int_coal_threshold;
120 int error = sysctl_handle_int(oidp, &ctrlr->int_coal_threshold, 0,
121 req);
122
123 if (error)
124 return (error);
125
126 if (oldval != ctrlr->int_coal_threshold)
127 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
128 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
129 NULL);
130
131 return (0);
132 }
133
134 static int
135 nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
136 {
137 struct nvme_controller *ctrlr = arg1;
138 uint32_t newval = ctrlr->timeout_period;
139 int error = sysctl_handle_int(oidp, &newval, 0, req);
140
141 if (error || (req->newptr == NULL))
142 return (error);
143
144 if (newval > NVME_MAX_TIMEOUT_PERIOD ||
145 newval < NVME_MIN_TIMEOUT_PERIOD) {
146 return (EINVAL);
147 } else {
148 ctrlr->timeout_period = newval;
149 }
150
151 return (0);
152 }
153
154 static void
155 nvme_qpair_reset_stats(struct nvme_qpair *qpair)
156 {
157
158 /*
159 * Reset the values. Due to sanity checks in
160 * nvme_qpair_process_completions, we reset the number of interrupt
161 * calls to 1.
162 */
163 qpair->num_cmds = 0;
164 qpair->num_intr_handler_calls = 1;
165 qpair->num_retries = 0;
166 qpair->num_failures = 0;
167 qpair->num_ignored = 0;
168 }
169
170 static int
171 nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
172 {
173 struct nvme_controller *ctrlr = arg1;
174 int64_t num_cmds = 0;
175 int i;
176
177 num_cmds = ctrlr->adminq.num_cmds;
178
179 for (i = 0; i < ctrlr->num_io_queues; i++)
180 num_cmds += ctrlr->ioq[i].num_cmds;
181
182 return (sysctl_handle_64(oidp, &num_cmds, 0, req));
183 }
184
185 static int
186 nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
187 {
188 struct nvme_controller *ctrlr = arg1;
189 int64_t num_intr_handler_calls = 0;
190 int i;
191
192 num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
193
194 for (i = 0; i < ctrlr->num_io_queues; i++)
195 num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
196
197 return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
198 }
199
200 static int
201 nvme_sysctl_num_retries(SYSCTL_HANDLER_ARGS)
202 {
203 struct nvme_controller *ctrlr = arg1;
204 int64_t num_retries = 0;
205 int i;
206
207 num_retries = ctrlr->adminq.num_retries;
208
209 for (i = 0; i < ctrlr->num_io_queues; i++)
210 num_retries += ctrlr->ioq[i].num_retries;
211
212 return (sysctl_handle_64(oidp, &num_retries, 0, req));
213 }
214
215 static int
216 nvme_sysctl_num_failures(SYSCTL_HANDLER_ARGS)
217 {
218 struct nvme_controller *ctrlr = arg1;
219 int64_t num_failures = 0;
220 int i;
221
222 num_failures = ctrlr->adminq.num_failures;
223
224 for (i = 0; i < ctrlr->num_io_queues; i++)
225 num_failures += ctrlr->ioq[i].num_failures;
226
227 return (sysctl_handle_64(oidp, &num_failures, 0, req));
228 }
229
230 static int
231 nvme_sysctl_num_ignored(SYSCTL_HANDLER_ARGS)
232 {
233 struct nvme_controller *ctrlr = arg1;
234 int64_t num_ignored = 0;
235 int i;
236
237 num_ignored = ctrlr->adminq.num_ignored;
238
239 for (i = 0; i < ctrlr->num_io_queues; i++)
240 num_ignored += ctrlr->ioq[i].num_ignored;
241
242 return (sysctl_handle_64(oidp, &num_ignored, 0, req));
243 }
244
245 static int
246 nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
247 {
248 struct nvme_controller *ctrlr = arg1;
249 uint32_t i, val = 0;
250
251 int error = sysctl_handle_int(oidp, &val, 0, req);
252
253 if (error)
254 return (error);
255
256 if (val != 0) {
257 nvme_qpair_reset_stats(&ctrlr->adminq);
258
259 for (i = 0; i < ctrlr->num_io_queues; i++)
260 nvme_qpair_reset_stats(&ctrlr->ioq[i]);
261 }
262
263 return (0);
264 }
265
266 static void
267 nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
268 struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
269 {
270 struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
271
272 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
273 CTLFLAG_RD, &qpair->num_entries, 0,
274 "Number of entries in hardware queue");
275 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers",
276 CTLFLAG_RD, &qpair->num_trackers, 0,
277 "Number of trackers pre-allocated for this queue pair");
278 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head",
279 CTLFLAG_RD, &qpair->sq_head, 0,
280 "Current head of submission queue (as observed by driver)");
281 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail",
282 CTLFLAG_RD, &qpair->sq_tail, 0,
283 "Current tail of submission queue (as observed by driver)");
284 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
285 CTLFLAG_RD, &qpair->cq_head, 0,
286 "Current head of completion queue (as observed by driver)");
287
288 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
289 CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
290 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
291 CTLFLAG_RD, &qpair->num_intr_handler_calls,
292 "Number of times interrupt handler was invoked (will typically be "
293 "less than number of actual interrupts generated due to "
294 "coalescing)");
295 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_retries",
296 CTLFLAG_RD, &qpair->num_retries, "Number of commands retried");
297 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_failures",
298 CTLFLAG_RD, &qpair->num_failures,
299 "Number of commands ending in failure after all retries");
300 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_ignored",
301 CTLFLAG_RD, &qpair->num_ignored,
302 "Number of interrupts posted, but were administratively ignored");
303
304 SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
305 "dump_debug", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
306 qpair, 0, nvme_sysctl_dump_debug, "IU", "Dump debug data");
307 }
308
309 void
310 nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)
311 {
312 struct sysctl_ctx_list *ctrlr_ctx;
313 struct sysctl_oid *ctrlr_tree, *que_tree;
314 struct sysctl_oid_list *ctrlr_list;
315 #define QUEUE_NAME_LENGTH 16
316 char queue_name[QUEUE_NAME_LENGTH];
317 int i;
318
319 ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
320 ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
321 ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
322
323 SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_io_queues",
324 CTLFLAG_RD, &ctrlr->num_io_queues, 0,
325 "Number of I/O queue pairs");
326
327 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
328 "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
329 ctrlr, 0, nvme_sysctl_int_coal_time, "IU",
330 "Interrupt coalescing timeout (in microseconds)");
331
332 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
333 "int_coal_threshold",
334 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, ctrlr, 0,
335 nvme_sysctl_int_coal_threshold, "IU",
336 "Interrupt coalescing threshold");
337
338 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
339 "timeout_period", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
340 ctrlr, 0, nvme_sysctl_timeout_period, "IU",
341 "Timeout period (in seconds)");
342
343 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
344 "num_cmds", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
345 ctrlr, 0, nvme_sysctl_num_cmds, "IU",
346 "Number of commands submitted");
347
348 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
349 "num_intr_handler_calls",
350 CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0,
351 nvme_sysctl_num_intr_handler_calls, "IU",
352 "Number of times interrupt handler was invoked (will "
353 "typically be less than number of actual interrupts "
354 "generated due to coalescing)");
355
356 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
357 "num_retries", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
358 ctrlr, 0, nvme_sysctl_num_retries, "IU",
359 "Number of commands retried");
360
361 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
362 "num_failures", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
363 ctrlr, 0, nvme_sysctl_num_failures, "IU",
364 "Number of commands ending in failure after all retries");
365
366 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
367 "num_ignored", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
368 ctrlr, 0, nvme_sysctl_num_ignored, "IU",
369 "Number of interrupts ignored administratively");
370
371 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
372 "reset_stats", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, ctrlr,
373 0, nvme_sysctl_reset_stats, "IU", "Reset statistics to zero");
374
375 SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap_lo",
376 CTLFLAG_RD, &ctrlr->cap_lo, 0,
377 "Low 32-bits of capacities for the drive");
378
379 SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap_hi",
380 CTLFLAG_RD, &ctrlr->cap_hi, 0,
381 "Hi 32-bits of capacities for the drive");
382
383 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
384 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Admin Queue");
385
386 nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
387
388 for (i = 0; i < ctrlr->num_io_queues; i++) {
389 snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
390 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
391 queue_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IO Queue");
392 nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
393 que_tree);
394 }
395 }
Cache object: 02fcbd682e33de0326fab21dfc7a013f
|