The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/nvme/nvme_sysctl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (C) 2012-2016 Intel Corporation
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD$");
   29 
   30 #include "opt_nvme.h"
   31 
   32 #include <sys/param.h>
   33 #include <sys/bus.h>
   34 #include <sys/sysctl.h>
   35 
   36 #include "nvme_private.h"
   37 
   38 #ifndef NVME_USE_NVD
   39 #define NVME_USE_NVD 1
   40 #endif
   41 
   42 int nvme_use_nvd = NVME_USE_NVD;
   43 
   44 SYSCTL_NODE(_hw, OID_AUTO, nvme, CTLFLAG_RD, 0, "NVMe sysctl tunables");
   45 SYSCTL_INT(_hw_nvme, OID_AUTO, use_nvd, CTLFLAG_RDTUN,
   46     &nvme_use_nvd, 1, "1 = Create NVD devices, 0 = Create NDA devices");
   47 
   48 /*
   49  * CTLTYPE_S64 and sysctl_handle_64 were added in r217616.  Define these
   50  *  explicitly here for older kernels that don't include the r217616
   51  *  changeset.
   52  */
   53 #ifndef CTLTYPE_S64
   54 #define CTLTYPE_S64             CTLTYPE_QUAD
   55 #define sysctl_handle_64        sysctl_handle_quad
   56 #endif
   57 
   58 static void
   59 nvme_dump_queue(struct nvme_qpair *qpair)
   60 {
   61         struct nvme_completion *cpl;
   62         struct nvme_command *cmd;
   63         int i;
   64 
   65         printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);
   66 
   67         printf("Completion queue:\n");
   68         for (i = 0; i < qpair->num_entries; i++) {
   69                 cpl = &qpair->cpl[i];
   70                 printf("%05d: ", i);
   71                 nvme_dump_completion(cpl);
   72         }
   73 
   74         printf("Submission queue:\n");
   75         for (i = 0; i < qpair->num_entries; i++) {
   76                 cmd = &qpair->cmd[i];
   77                 printf("%05d: ", i);
   78                 nvme_dump_command(cmd);
   79         }
   80 }
   81 
   82 
   83 static int
   84 nvme_sysctl_dump_debug(SYSCTL_HANDLER_ARGS)
   85 {
   86         struct nvme_qpair       *qpair = arg1;
   87         uint32_t                val = 0;
   88 
   89         int error = sysctl_handle_int(oidp, &val, 0, req);
   90 
   91         if (error)
   92                 return (error);
   93 
   94         if (val != 0)
   95                 nvme_dump_queue(qpair);
   96 
   97         return (0);
   98 }
   99 
  100 static int
  101 nvme_sysctl_int_coal_time(SYSCTL_HANDLER_ARGS)
  102 {
  103         struct nvme_controller *ctrlr = arg1;
  104         uint32_t oldval = ctrlr->int_coal_time;
  105         int error = sysctl_handle_int(oidp, &ctrlr->int_coal_time, 0,
  106             req);
  107 
  108         if (error)
  109                 return (error);
  110 
  111         if (oldval != ctrlr->int_coal_time)
  112                 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
  113                     ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
  114                     NULL);
  115 
  116         return (0);
  117 }
  118 
  119 static int
  120 nvme_sysctl_int_coal_threshold(SYSCTL_HANDLER_ARGS)
  121 {
  122         struct nvme_controller *ctrlr = arg1;
  123         uint32_t oldval = ctrlr->int_coal_threshold;
  124         int error = sysctl_handle_int(oidp, &ctrlr->int_coal_threshold, 0,
  125             req);
  126 
  127         if (error)
  128                 return (error);
  129 
  130         if (oldval != ctrlr->int_coal_threshold)
  131                 nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
  132                     ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
  133                     NULL);
  134 
  135         return (0);
  136 }
  137 
  138 static int
  139 nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
  140 {
  141         struct nvme_controller *ctrlr = arg1;
  142         uint32_t oldval = ctrlr->timeout_period;
  143         int error = sysctl_handle_int(oidp, &ctrlr->timeout_period, 0, req);
  144 
  145         if (error)
  146                 return (error);
  147 
  148         if (ctrlr->timeout_period > NVME_MAX_TIMEOUT_PERIOD ||
  149             ctrlr->timeout_period < NVME_MIN_TIMEOUT_PERIOD) {
  150                 ctrlr->timeout_period = oldval;
  151                 return (EINVAL);
  152         }
  153 
  154         return (0);
  155 }
  156 
  157 static void
  158 nvme_qpair_reset_stats(struct nvme_qpair *qpair)
  159 {
  160 
  161         qpair->num_cmds = 0;
  162         qpair->num_intr_handler_calls = 0;
  163 }
  164 
  165 static int
  166 nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
  167 {
  168         struct nvme_controller  *ctrlr = arg1;
  169         int64_t                 num_cmds = 0;
  170         int                     i;
  171 
  172         num_cmds = ctrlr->adminq.num_cmds;
  173 
  174         for (i = 0; i < ctrlr->num_io_queues; i++)
  175                 num_cmds += ctrlr->ioq[i].num_cmds;
  176 
  177         return (sysctl_handle_64(oidp, &num_cmds, 0, req));
  178 }
  179 
  180 static int
  181 nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
  182 {
  183         struct nvme_controller  *ctrlr = arg1;
  184         int64_t                 num_intr_handler_calls = 0;
  185         int                     i;
  186 
  187         num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
  188 
  189         for (i = 0; i < ctrlr->num_io_queues; i++)
  190                 num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
  191 
  192         return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
  193 }
  194 
  195 static int
  196 nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
  197 {
  198         struct nvme_controller  *ctrlr = arg1;
  199         uint32_t                i, val = 0;
  200 
  201         int error = sysctl_handle_int(oidp, &val, 0, req);
  202 
  203         if (error)
  204                 return (error);
  205 
  206         if (val != 0) {
  207                 nvme_qpair_reset_stats(&ctrlr->adminq);
  208 
  209                 for (i = 0; i < ctrlr->num_io_queues; i++)
  210                         nvme_qpair_reset_stats(&ctrlr->ioq[i]);
  211         }
  212 
  213         return (0);
  214 }
  215 
  216 
  217 static void
  218 nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
  219     struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
  220 {
  221         struct sysctl_oid_list  *que_list = SYSCTL_CHILDREN(que_tree);
  222 
  223         SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
  224             CTLFLAG_RD, &qpair->num_entries, 0,
  225             "Number of entries in hardware queue");
  226         SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers",
  227             CTLFLAG_RD, &qpair->num_trackers, 0,
  228             "Number of trackers pre-allocated for this queue pair");
  229         SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head",
  230             CTLFLAG_RD, &qpair->sq_head, 0,
  231             "Current head of submission queue (as observed by driver)");
  232         SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail",
  233             CTLFLAG_RD, &qpair->sq_tail, 0,
  234             "Current tail of submission queue (as observed by driver)");
  235         SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
  236             CTLFLAG_RD, &qpair->cq_head, 0,
  237             "Current head of completion queue (as observed by driver)");
  238 
  239         SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
  240             CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
  241         SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
  242             CTLFLAG_RD, &qpair->num_intr_handler_calls,
  243             "Number of times interrupt handler was invoked (will typically be "
  244             "less than number of actual interrupts generated due to "
  245             "coalescing)");
  246 
  247         SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
  248             "dump_debug", CTLTYPE_UINT | CTLFLAG_RW, qpair, 0,
  249             nvme_sysctl_dump_debug, "IU", "Dump debug data");
  250 }
  251 
  252 void
  253 nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)
  254 {
  255         struct sysctl_ctx_list  *ctrlr_ctx;
  256         struct sysctl_oid       *ctrlr_tree, *que_tree;
  257         struct sysctl_oid_list  *ctrlr_list;
  258 #define QUEUE_NAME_LENGTH       16
  259         char                    queue_name[QUEUE_NAME_LENGTH];
  260         int                     i;
  261 
  262         ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
  263         ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
  264         ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
  265 
  266         SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_cpus_per_ioq",
  267             CTLFLAG_RD, &ctrlr->num_cpus_per_ioq, 0,
  268             "Number of CPUs assigned per I/O queue pair");
  269 
  270         SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
  271             "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
  272             nvme_sysctl_int_coal_time, "IU",
  273             "Interrupt coalescing timeout (in microseconds)");
  274 
  275         SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
  276             "int_coal_threshold", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
  277             nvme_sysctl_int_coal_threshold, "IU",
  278             "Interrupt coalescing threshold");
  279 
  280         SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
  281             "timeout_period", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
  282             nvme_sysctl_timeout_period, "IU",
  283             "Timeout period (in seconds)");
  284 
  285         SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
  286             "num_cmds", CTLTYPE_S64 | CTLFLAG_RD,
  287             ctrlr, 0, nvme_sysctl_num_cmds, "IU",
  288             "Number of commands submitted");
  289 
  290         SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
  291             "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD,
  292             ctrlr, 0, nvme_sysctl_num_intr_handler_calls, "IU",
  293             "Number of times interrupt handler was invoked (will "
  294             "typically be less than number of actual interrupts "
  295             "generated due to coalescing)");
  296 
  297         SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
  298             "reset_stats", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
  299             nvme_sysctl_reset_stats, "IU", "Reset statistics to zero");
  300 
  301         que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
  302             CTLFLAG_RD, NULL, "Admin Queue");
  303 
  304         nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
  305 
  306         for (i = 0; i < ctrlr->num_io_queues; i++) {
  307                 snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
  308                 que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
  309                     queue_name, CTLFLAG_RD, NULL, "IO Queue");
  310                 nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
  311                     que_tree);
  312         }
  313 }

Cache object: bd493cc32e09bdc95070358736ff475b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.