1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2012-2016 Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_nvme.h"
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/sysctl.h>
37
38 #include "nvme_private.h"
39
40 #ifndef NVME_USE_NVD
41 #define NVME_USE_NVD 1
42 #endif
43
44 int nvme_use_nvd = NVME_USE_NVD;
45 bool nvme_verbose_cmd_dump = false;
46
47 SYSCTL_NODE(_hw, OID_AUTO, nvme, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
48 "NVMe sysctl tunables");
49 SYSCTL_INT(_hw_nvme, OID_AUTO, use_nvd, CTLFLAG_RDTUN,
50 &nvme_use_nvd, 1, "1 = Create NVD devices, 0 = Create NDA devices");
51 SYSCTL_BOOL(_hw_nvme, OID_AUTO, verbose_cmd_dump, CTLFLAG_RWTUN,
52 &nvme_verbose_cmd_dump, 0,
53 "enable verbose command printing when a command fails");
54
55 static void
56 nvme_dump_queue(struct nvme_qpair *qpair)
57 {
58 struct nvme_completion *cpl;
59 struct nvme_command *cmd;
60 int i;
61
62 printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);
63
64 printf("Completion queue:\n");
65 for (i = 0; i < qpair->num_entries; i++) {
66 cpl = &qpair->cpl[i];
67 printf("%05d: ", i);
68 nvme_dump_completion(cpl);
69 }
70
71 printf("Submission queue:\n");
72 for (i = 0; i < qpair->num_entries; i++) {
73 cmd = &qpair->cmd[i];
74 printf("%05d: ", i);
75 nvme_dump_command(cmd);
76 }
77 }
78
79 static int
80 nvme_sysctl_dump_debug(SYSCTL_HANDLER_ARGS)
81 {
82 struct nvme_qpair *qpair = arg1;
83 uint32_t val = 0;
84
85 int error = sysctl_handle_int(oidp, &val, 0, req);
86
87 if (error)
88 return (error);
89
90 if (val != 0)
91 nvme_dump_queue(qpair);
92
93 return (0);
94 }
95
96 static int
97 nvme_sysctl_int_coal_time(SYSCTL_HANDLER_ARGS)
98 {
99 struct nvme_controller *ctrlr = arg1;
100 uint32_t oldval = ctrlr->int_coal_time;
101 int error = sysctl_handle_int(oidp, &ctrlr->int_coal_time, 0,
102 req);
103
104 if (error)
105 return (error);
106
107 if (oldval != ctrlr->int_coal_time)
108 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
109 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
110 NULL);
111
112 return (0);
113 }
114
115 static int
116 nvme_sysctl_int_coal_threshold(SYSCTL_HANDLER_ARGS)
117 {
118 struct nvme_controller *ctrlr = arg1;
119 uint32_t oldval = ctrlr->int_coal_threshold;
120 int error = sysctl_handle_int(oidp, &ctrlr->int_coal_threshold, 0,
121 req);
122
123 if (error)
124 return (error);
125
126 if (oldval != ctrlr->int_coal_threshold)
127 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
128 ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
129 NULL);
130
131 return (0);
132 }
133
134 static int
135 nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
136 {
137 struct nvme_controller *ctrlr = arg1;
138 uint32_t newval = ctrlr->timeout_period;
139 int error = sysctl_handle_int(oidp, &newval, 0, req);
140
141 if (error || (req->newptr == NULL))
142 return (error);
143
144 if (newval > NVME_MAX_TIMEOUT_PERIOD ||
145 newval < NVME_MIN_TIMEOUT_PERIOD) {
146 return (EINVAL);
147 } else {
148 ctrlr->timeout_period = newval;
149 }
150
151 return (0);
152 }
153
154 static void
155 nvme_qpair_reset_stats(struct nvme_qpair *qpair)
156 {
157
158 qpair->num_cmds = 0;
159 qpair->num_intr_handler_calls = 0;
160 qpair->num_retries = 0;
161 qpair->num_failures = 0;
162 }
163
164 static int
165 nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
166 {
167 struct nvme_controller *ctrlr = arg1;
168 int64_t num_cmds = 0;
169 int i;
170
171 num_cmds = ctrlr->adminq.num_cmds;
172
173 for (i = 0; i < ctrlr->num_io_queues; i++)
174 num_cmds += ctrlr->ioq[i].num_cmds;
175
176 return (sysctl_handle_64(oidp, &num_cmds, 0, req));
177 }
178
179 static int
180 nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
181 {
182 struct nvme_controller *ctrlr = arg1;
183 int64_t num_intr_handler_calls = 0;
184 int i;
185
186 num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
187
188 for (i = 0; i < ctrlr->num_io_queues; i++)
189 num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
190
191 return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
192 }
193
194 static int
195 nvme_sysctl_num_retries(SYSCTL_HANDLER_ARGS)
196 {
197 struct nvme_controller *ctrlr = arg1;
198 int64_t num_retries = 0;
199 int i;
200
201 num_retries = ctrlr->adminq.num_retries;
202
203 for (i = 0; i < ctrlr->num_io_queues; i++)
204 num_retries += ctrlr->ioq[i].num_retries;
205
206 return (sysctl_handle_64(oidp, &num_retries, 0, req));
207 }
208
209 static int
210 nvme_sysctl_num_failures(SYSCTL_HANDLER_ARGS)
211 {
212 struct nvme_controller *ctrlr = arg1;
213 int64_t num_failures = 0;
214 int i;
215
216 num_failures = ctrlr->adminq.num_failures;
217
218 for (i = 0; i < ctrlr->num_io_queues; i++)
219 num_failures += ctrlr->ioq[i].num_failures;
220
221 return (sysctl_handle_64(oidp, &num_failures, 0, req));
222 }
223
224 static int
225 nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
226 {
227 struct nvme_controller *ctrlr = arg1;
228 uint32_t i, val = 0;
229
230 int error = sysctl_handle_int(oidp, &val, 0, req);
231
232 if (error)
233 return (error);
234
235 if (val != 0) {
236 nvme_qpair_reset_stats(&ctrlr->adminq);
237
238 for (i = 0; i < ctrlr->num_io_queues; i++)
239 nvme_qpair_reset_stats(&ctrlr->ioq[i]);
240 }
241
242 return (0);
243 }
244
245 static void
246 nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
247 struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
248 {
249 struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
250
251 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
252 CTLFLAG_RD, &qpair->num_entries, 0,
253 "Number of entries in hardware queue");
254 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers",
255 CTLFLAG_RD, &qpair->num_trackers, 0,
256 "Number of trackers pre-allocated for this queue pair");
257 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head",
258 CTLFLAG_RD, &qpair->sq_head, 0,
259 "Current head of submission queue (as observed by driver)");
260 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail",
261 CTLFLAG_RD, &qpair->sq_tail, 0,
262 "Current tail of submission queue (as observed by driver)");
263 SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
264 CTLFLAG_RD, &qpair->cq_head, 0,
265 "Current head of completion queue (as observed by driver)");
266
267 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
268 CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
269 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
270 CTLFLAG_RD, &qpair->num_intr_handler_calls,
271 "Number of times interrupt handler was invoked (will typically be "
272 "less than number of actual interrupts generated due to "
273 "coalescing)");
274 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_retries",
275 CTLFLAG_RD, &qpair->num_retries, "Number of commands retried");
276 SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_failures",
277 CTLFLAG_RD, &qpair->num_failures,
278 "Number of commands ending in failure after all retries");
279
280 SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
281 "dump_debug", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
282 qpair, 0, nvme_sysctl_dump_debug, "IU", "Dump debug data");
283 }
284
285 void
286 nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)
287 {
288 struct sysctl_ctx_list *ctrlr_ctx;
289 struct sysctl_oid *ctrlr_tree, *que_tree;
290 struct sysctl_oid_list *ctrlr_list;
291 #define QUEUE_NAME_LENGTH 16
292 char queue_name[QUEUE_NAME_LENGTH];
293 int i;
294
295 ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
296 ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
297 ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
298
299 SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_io_queues",
300 CTLFLAG_RD, &ctrlr->num_io_queues, 0,
301 "Number of I/O queue pairs");
302
303 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
304 "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
305 ctrlr, 0, nvme_sysctl_int_coal_time, "IU",
306 "Interrupt coalescing timeout (in microseconds)");
307
308 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
309 "int_coal_threshold",
310 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, ctrlr, 0,
311 nvme_sysctl_int_coal_threshold, "IU",
312 "Interrupt coalescing threshold");
313
314 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
315 "timeout_period", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
316 ctrlr, 0, nvme_sysctl_timeout_period, "IU",
317 "Timeout period (in seconds)");
318
319 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
320 "num_cmds", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
321 ctrlr, 0, nvme_sysctl_num_cmds, "IU",
322 "Number of commands submitted");
323
324 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
325 "num_intr_handler_calls",
326 CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0,
327 nvme_sysctl_num_intr_handler_calls, "IU",
328 "Number of times interrupt handler was invoked (will "
329 "typically be less than number of actual interrupts "
330 "generated due to coalescing)");
331
332 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
333 "num_retries", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
334 ctrlr, 0, nvme_sysctl_num_retries, "IU",
335 "Number of commands retried");
336
337 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
338 "num_failures", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
339 ctrlr, 0, nvme_sysctl_num_failures, "IU",
340 "Number of commands ending in failure after all retries");
341
342 SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
343 "reset_stats", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, ctrlr,
344 0, nvme_sysctl_reset_stats, "IU", "Reset statistics to zero");
345
346 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
347 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Admin Queue");
348
349 nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
350
351 for (i = 0; i < ctrlr->num_io_queues; i++) {
352 snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
353 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
354 queue_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IO Queue");
355 nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
356 que_tree);
357 }
358 }
Cache object: 6cad157084118e1b26377166f48025eb
|