The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_iosched.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * CAM IO Scheduler Interface
    3  *
    4  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    5  *
    6  * Copyright (c) 2015 Netflix, Inc.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions, and the following disclaimer,
   13  *    without modification, immediately at the beginning of the file.
   14  * 2. The name of the author may not be used to endorse or promote products
   15  *    derived from this software without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
   21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD: releng/12.0/sys/cam/cam_iosched.c 337824 2018-08-15 00:15:40Z imp $
   30  */
   31 
   32 #include "opt_cam.h"
   33 #include "opt_ddb.h"
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/12.0/sys/cam/cam_iosched.c 337824 2018-08-15 00:15:40Z imp $");
   37 
   38 #include <sys/param.h>
   39 
   40 #include <sys/systm.h>
   41 #include <sys/kernel.h>
   42 #include <sys/bio.h>
   43 #include <sys/lock.h>
   44 #include <sys/malloc.h>
   45 #include <sys/mutex.h>
   46 #include <sys/sbuf.h>
   47 #include <sys/sysctl.h>
   48 
   49 #include <cam/cam.h>
   50 #include <cam/cam_ccb.h>
   51 #include <cam/cam_periph.h>
   52 #include <cam/cam_xpt_periph.h>
   53 #include <cam/cam_xpt_internal.h>
   54 #include <cam/cam_iosched.h>
   55 
   56 #include <ddb/ddb.h>
   57 
   58 static MALLOC_DEFINE(M_CAMSCHED, "CAM I/O Scheduler",
   59     "CAM I/O Scheduler buffers");
   60 
   61 /*
   62  * Default I/O scheduler for FreeBSD. This implementation is just a thin-vineer
   63  * over the bioq_* interface, with notions of separate calls for normal I/O and
   64  * for trims.
   65  *
   66  * When CAM_IOSCHED_DYNAMIC is defined, the scheduler is enhanced to dynamically
   67  * steer the rate of one type of traffic to help other types of traffic (eg
   68  * limit writes when read latency deteriorates on SSDs).
   69  */
   70 
   71 #ifdef CAM_IOSCHED_DYNAMIC
   72 
   73 static int do_dynamic_iosched = 1;
   74 TUNABLE_INT("kern.cam.do_dynamic_iosched", &do_dynamic_iosched);
   75 SYSCTL_INT(_kern_cam, OID_AUTO, do_dynamic_iosched, CTLFLAG_RD,
   76     &do_dynamic_iosched, 1,
   77     "Enable Dynamic I/O scheduler optimizations.");
   78 
   79 /*
   80  * For an EMA, with an alpha of alpha, we know
   81  *      alpha = 2 / (N + 1)
   82  * or
   83  *      N = 1 + (2 / alpha)
   84  * where N is the number of samples that 86% of the current
   85  * EMA is derived from.
   86  *
   87  * So we invent[*] alpha_bits:
   88  *      alpha_bits = -log_2(alpha)
   89  *      alpha = 2^-alpha_bits
   90  * So
   91  *      N = 1 + 2^(alpha_bits + 1)
   92  *
   93  * The default 9 gives a 1025 lookback for 86% of the data.
   94  * For a brief intro: https://en.wikipedia.org/wiki/Moving_average
   95  *
   96  * [*] Steal from the load average code and many other places.
   97  * Note: See computation of EMA and EMVAR for acceptable ranges of alpha.
   98  */
   99 static int alpha_bits = 9;
  100 TUNABLE_INT("kern.cam.iosched_alpha_bits", &alpha_bits);
  101 SYSCTL_INT(_kern_cam, OID_AUTO, iosched_alpha_bits, CTLFLAG_RW,
  102     &alpha_bits, 1,
  103     "Bits in EMA's alpha.");
  104 
  105 struct iop_stats;
  106 struct cam_iosched_softc;
  107 
  108 int iosched_debug = 0;
  109 
  110 typedef enum {
  111         none = 0,                               /* No limits */
  112         queue_depth,                    /* Limit how many ops we queue to SIM */
  113         iops,                           /* Limit # of IOPS to the drive */
  114         bandwidth,                      /* Limit bandwidth to the drive */
  115         limiter_max
  116 } io_limiter;
  117 
  118 static const char *cam_iosched_limiter_names[] =
  119     { "none", "queue_depth", "iops", "bandwidth" };
  120 
  121 /*
  122  * Called to initialize the bits of the iop_stats structure relevant to the
  123  * limiter. Called just after the limiter is set.
  124  */
  125 typedef int l_init_t(struct iop_stats *);
  126 
  127 /*
  128  * Called every tick.
  129  */
  130 typedef int l_tick_t(struct iop_stats *);
  131 
  132 /*
  133  * Called to see if the limiter thinks this IOP can be allowed to
  134  * proceed. If so, the limiter assumes that the IOP proceeded
  135  * and makes any accounting of it that's needed.
  136  */
  137 typedef int l_iop_t(struct iop_stats *, struct bio *);
  138 
  139 /*
  140  * Called when an I/O completes so the limiter can update its
  141  * accounting. Pending I/Os may complete in any order (even when
  142  * sent to the hardware at the same time), so the limiter may not
  143  * make any assumptions other than this I/O has completed. If it
  144  * returns 1, then xpt_schedule() needs to be called again.
  145  */
  146 typedef int l_iodone_t(struct iop_stats *, struct bio *);
  147 
  148 static l_iop_t cam_iosched_qd_iop;
  149 static l_iop_t cam_iosched_qd_caniop;
  150 static l_iodone_t cam_iosched_qd_iodone;
  151 
  152 static l_init_t cam_iosched_iops_init;
  153 static l_tick_t cam_iosched_iops_tick;
  154 static l_iop_t cam_iosched_iops_caniop;
  155 static l_iop_t cam_iosched_iops_iop;
  156 
  157 static l_init_t cam_iosched_bw_init;
  158 static l_tick_t cam_iosched_bw_tick;
  159 static l_iop_t cam_iosched_bw_caniop;
  160 static l_iop_t cam_iosched_bw_iop;
  161 
  162 struct limswitch {
  163         l_init_t        *l_init;
  164         l_tick_t        *l_tick;
  165         l_iop_t         *l_iop;
  166         l_iop_t         *l_caniop;
  167         l_iodone_t      *l_iodone;
  168 } limsw[] =
  169 {
  170         {       /* none */
  171                 .l_init = NULL,
  172                 .l_tick = NULL,
  173                 .l_iop = NULL,
  174                 .l_iodone= NULL,
  175         },
  176         {       /* queue_depth */
  177                 .l_init = NULL,
  178                 .l_tick = NULL,
  179                 .l_caniop = cam_iosched_qd_caniop,
  180                 .l_iop = cam_iosched_qd_iop,
  181                 .l_iodone= cam_iosched_qd_iodone,
  182         },
  183         {       /* iops */
  184                 .l_init = cam_iosched_iops_init,
  185                 .l_tick = cam_iosched_iops_tick,
  186                 .l_caniop = cam_iosched_iops_caniop,
  187                 .l_iop = cam_iosched_iops_iop,
  188                 .l_iodone= NULL,
  189         },
  190         {       /* bandwidth */
  191                 .l_init = cam_iosched_bw_init,
  192                 .l_tick = cam_iosched_bw_tick,
  193                 .l_caniop = cam_iosched_bw_caniop,
  194                 .l_iop = cam_iosched_bw_iop,
  195                 .l_iodone= NULL,
  196         },
  197 };
  198 
  199 struct iop_stats {
  200         /*
  201          * sysctl state for this subnode.
  202          */
  203         struct sysctl_ctx_list  sysctl_ctx;
  204         struct sysctl_oid       *sysctl_tree;
  205 
  206         /*
  207          * Information about the current rate limiters, if any
  208          */
  209         io_limiter      limiter;        /* How are I/Os being limited */
  210         int             min;            /* Low range of limit */
  211         int             max;            /* High range of limit */
  212         int             current;        /* Current rate limiter */
  213         int             l_value1;       /* per-limiter scratch value 1. */
  214         int             l_value2;       /* per-limiter scratch value 2. */
  215 
  216         /*
  217          * Debug information about counts of I/Os that have gone through the
  218          * scheduler.
  219          */
  220         int             pending;        /* I/Os pending in the hardware */
  221         int             queued;         /* number currently in the queue */
  222         int             total;          /* Total for all time -- wraps */
  223         int             in;             /* number queued all time -- wraps */
  224         int             out;            /* number completed all time -- wraps */
  225         int             errs;           /* Number of I/Os completed with error --  wraps */
  226 
  227         /*
  228          * Statistics on different bits of the process.
  229          */
  230                 /* Exp Moving Average, see alpha_bits for more details */
  231         sbintime_t      ema;
  232         sbintime_t      emvar;
  233         sbintime_t      sd;             /* Last computed sd */
  234 
  235         uint32_t        state_flags;
  236 #define IOP_RATE_LIMITED                1u
  237 
  238 #define LAT_BUCKETS 15                  /* < 1ms < 2ms ... < 2^(n-1)ms >= 2^(n-1)ms*/
  239         uint64_t        latencies[LAT_BUCKETS];
  240 
  241         struct cam_iosched_softc *softc;
  242 };
  243 
  244 
  245 typedef enum {
  246         set_max = 0,                    /* current = max */
  247         read_latency,                   /* Steer read latency by throttling writes */
  248         cl_max                          /* Keep last */
  249 } control_type;
  250 
  251 static const char *cam_iosched_control_type_names[] =
  252     { "set_max", "read_latency" };
  253 
  254 struct control_loop {
  255         /*
  256          * sysctl state for this subnode.
  257          */
  258         struct sysctl_ctx_list  sysctl_ctx;
  259         struct sysctl_oid       *sysctl_tree;
  260 
  261         sbintime_t      next_steer;             /* Time of next steer */
  262         sbintime_t      steer_interval;         /* How often do we steer? */
  263         sbintime_t      lolat;
  264         sbintime_t      hilat;
  265         int             alpha;
  266         control_type    type;                   /* What type of control? */
  267         int             last_count;             /* Last I/O count */
  268 
  269         struct cam_iosched_softc *softc;
  270 };
  271 
  272 #endif
  273 
  274 struct cam_iosched_softc {
  275         struct bio_queue_head bio_queue;
  276         struct bio_queue_head trim_queue;
  277                                 /* scheduler flags < 16, user flags >= 16 */
  278         uint32_t        flags;
  279         int             sort_io_queue;
  280 #ifdef CAM_IOSCHED_DYNAMIC
  281         int             read_bias;              /* Read bias setting */
  282         int             current_read_bias;      /* Current read bias state */
  283         int             total_ticks;
  284         int             load;                   /* EMA of 'load average' of disk / 2^16 */
  285 
  286         struct bio_queue_head write_queue;
  287         struct iop_stats read_stats, write_stats, trim_stats;
  288         struct sysctl_ctx_list  sysctl_ctx;
  289         struct sysctl_oid       *sysctl_tree;
  290 
  291         int             quanta;                 /* Number of quanta per second */
  292         struct callout  ticker;                 /* Callout for our quota system */
  293         struct cam_periph *periph;              /* cam periph associated with this device */
  294         uint32_t        this_frac;              /* Fraction of a second (1024ths) for this tick */
  295         sbintime_t      last_time;              /* Last time we ticked */
  296         struct control_loop cl;
  297 #endif
  298 };
  299 
  300 #ifdef CAM_IOSCHED_DYNAMIC
  301 /*
  302  * helper functions to call the limsw functions.
  303  */
  304 static int
  305 cam_iosched_limiter_init(struct iop_stats *ios)
  306 {
  307         int lim = ios->limiter;
  308 
  309         /* maybe this should be a kassert */
  310         if (lim < none || lim >= limiter_max)
  311                 return EINVAL;
  312 
  313         if (limsw[lim].l_init)
  314                 return limsw[lim].l_init(ios);
  315 
  316         return 0;
  317 }
  318 
  319 static int
  320 cam_iosched_limiter_tick(struct iop_stats *ios)
  321 {
  322         int lim = ios->limiter;
  323 
  324         /* maybe this should be a kassert */
  325         if (lim < none || lim >= limiter_max)
  326                 return EINVAL;
  327 
  328         if (limsw[lim].l_tick)
  329                 return limsw[lim].l_tick(ios);
  330 
  331         return 0;
  332 }
  333 
  334 static int
  335 cam_iosched_limiter_iop(struct iop_stats *ios, struct bio *bp)
  336 {
  337         int lim = ios->limiter;
  338 
  339         /* maybe this should be a kassert */
  340         if (lim < none || lim >= limiter_max)
  341                 return EINVAL;
  342 
  343         if (limsw[lim].l_iop)
  344                 return limsw[lim].l_iop(ios, bp);
  345 
  346         return 0;
  347 }
  348 
  349 static int
  350 cam_iosched_limiter_caniop(struct iop_stats *ios, struct bio *bp)
  351 {
  352         int lim = ios->limiter;
  353 
  354         /* maybe this should be a kassert */
  355         if (lim < none || lim >= limiter_max)
  356                 return EINVAL;
  357 
  358         if (limsw[lim].l_caniop)
  359                 return limsw[lim].l_caniop(ios, bp);
  360 
  361         return 0;
  362 }
  363 
  364 static int
  365 cam_iosched_limiter_iodone(struct iop_stats *ios, struct bio *bp)
  366 {
  367         int lim = ios->limiter;
  368 
  369         /* maybe this should be a kassert */
  370         if (lim < none || lim >= limiter_max)
  371                 return 0;
  372 
  373         if (limsw[lim].l_iodone)
  374                 return limsw[lim].l_iodone(ios, bp);
  375 
  376         return 0;
  377 }
  378 
  379 /*
  380  * Functions to implement the different kinds of limiters
  381  */
  382 
  383 static int
  384 cam_iosched_qd_iop(struct iop_stats *ios, struct bio *bp)
  385 {
  386 
  387         if (ios->current <= 0 || ios->pending < ios->current)
  388                 return 0;
  389 
  390         return EAGAIN;
  391 }
  392 
  393 static int
  394 cam_iosched_qd_caniop(struct iop_stats *ios, struct bio *bp)
  395 {
  396 
  397         if (ios->current <= 0 || ios->pending < ios->current)
  398                 return 0;
  399 
  400         return EAGAIN;
  401 }
  402 
  403 static int
  404 cam_iosched_qd_iodone(struct iop_stats *ios, struct bio *bp)
  405 {
  406 
  407         if (ios->current <= 0 || ios->pending != ios->current)
  408                 return 0;
  409 
  410         return 1;
  411 }
  412 
  413 static int
  414 cam_iosched_iops_init(struct iop_stats *ios)
  415 {
  416 
  417         ios->l_value1 = ios->current / ios->softc->quanta;
  418         if (ios->l_value1 <= 0)
  419                 ios->l_value1 = 1;
  420         ios->l_value2 = 0;
  421 
  422         return 0;
  423 }
  424 
  425 static int
  426 cam_iosched_iops_tick(struct iop_stats *ios)
  427 {
  428         int new_ios;
  429 
  430         /*
  431          * Allow at least one IO per tick until all
  432          * the IOs for this interval have been spent.
  433          */
  434         new_ios = (int)((ios->current * (uint64_t)ios->softc->this_frac) >> 16);
  435         if (new_ios < 1 && ios->l_value2 < ios->current) {
  436                 new_ios = 1;
  437                 ios->l_value2++;
  438         }
  439 
  440         /*
  441          * If this a new accounting interval, discard any "unspent" ios
  442          * granted in the previous interval.  Otherwise add the new ios to
  443          * the previously granted ones that haven't been spent yet.
  444          */
  445         if ((ios->softc->total_ticks % ios->softc->quanta) == 0) {
  446                 ios->l_value1 = new_ios;
  447                 ios->l_value2 = 1;
  448         } else {
  449                 ios->l_value1 += new_ios;
  450         }
  451 
  452 
  453         return 0;
  454 }
  455 
  456 static int
  457 cam_iosched_iops_caniop(struct iop_stats *ios, struct bio *bp)
  458 {
  459 
  460         /*
  461          * So if we have any more IOPs left, allow it,
  462          * otherwise wait. If current iops is 0, treat that
  463          * as unlimited as a failsafe.
  464          */
  465         if (ios->current > 0 && ios->l_value1 <= 0)
  466                 return EAGAIN;
  467         return 0;
  468 }
  469 
  470 static int
  471 cam_iosched_iops_iop(struct iop_stats *ios, struct bio *bp)
  472 {
  473         int rv;
  474 
  475         rv = cam_iosched_limiter_caniop(ios, bp);
  476         if (rv == 0)
  477                 ios->l_value1--;
  478 
  479         return rv;
  480 }
  481 
  482 static int
  483 cam_iosched_bw_init(struct iop_stats *ios)
  484 {
  485 
  486         /* ios->current is in kB/s, so scale to bytes */
  487         ios->l_value1 = ios->current * 1000 / ios->softc->quanta;
  488 
  489         return 0;
  490 }
  491 
  492 static int
  493 cam_iosched_bw_tick(struct iop_stats *ios)
  494 {
  495         int bw;
  496 
  497         /*
  498          * If we're in the hole for available quota from
  499          * the last time, then add the quantum for this.
  500          * If we have any left over from last quantum,
  501          * then too bad, that's lost. Also, ios->current
  502          * is in kB/s, so scale.
  503          *
  504          * We also allow up to 4 quanta of credits to
  505          * accumulate to deal with burstiness. 4 is extremely
  506          * arbitrary.
  507          */
  508         bw = (int)((ios->current * 1000ull * (uint64_t)ios->softc->this_frac) >> 16);
  509         if (ios->l_value1 < bw * 4)
  510                 ios->l_value1 += bw;
  511 
  512         return 0;
  513 }
  514 
  515 static int
  516 cam_iosched_bw_caniop(struct iop_stats *ios, struct bio *bp)
  517 {
  518         /*
  519          * So if we have any more bw quota left, allow it,
  520          * otherwise wait. Note, we'll go negative and that's
  521          * OK. We'll just get a little less next quota.
  522          *
  523          * Note on going negative: that allows us to process
  524          * requests in order better, since we won't allow
  525          * shorter reads to get around the long one that we
  526          * don't have the quota to do just yet. It also prevents
  527          * starvation by being a little more permissive about
  528          * what we let through this quantum (to prevent the
  529          * starvation), at the cost of getting a little less
  530          * next quantum.
  531          *
  532          * Also note that if the current limit is <= 0,
  533          * we treat it as unlimited as a failsafe.
  534          */
  535         if (ios->current > 0 && ios->l_value1 <= 0)
  536                 return EAGAIN;
  537 
  538 
  539         return 0;
  540 }
  541 
  542 static int
  543 cam_iosched_bw_iop(struct iop_stats *ios, struct bio *bp)
  544 {
  545         int rv;
  546 
  547         rv = cam_iosched_limiter_caniop(ios, bp);
  548         if (rv == 0)
  549                 ios->l_value1 -= bp->bio_length;
  550 
  551         return rv;
  552 }
  553 
  554 static void cam_iosched_cl_maybe_steer(struct control_loop *clp);
  555 
  556 static void
  557 cam_iosched_ticker(void *arg)
  558 {
  559         struct cam_iosched_softc *isc = arg;
  560         sbintime_t now, delta;
  561         int pending;
  562 
  563         callout_reset(&isc->ticker, hz / isc->quanta, cam_iosched_ticker, isc);
  564 
  565         now = sbinuptime();
  566         delta = now - isc->last_time;
  567         isc->this_frac = (uint32_t)delta >> 16;         /* Note: discards seconds -- should be 0 harmless if not */
  568         isc->last_time = now;
  569 
  570         cam_iosched_cl_maybe_steer(&isc->cl);
  571 
  572         cam_iosched_limiter_tick(&isc->read_stats);
  573         cam_iosched_limiter_tick(&isc->write_stats);
  574         cam_iosched_limiter_tick(&isc->trim_stats);
  575 
  576         cam_iosched_schedule(isc, isc->periph);
  577 
  578         /*
  579          * isc->load is an EMA of the pending I/Os at each tick. The number of
  580          * pending I/Os is the sum of the I/Os queued to the hardware, and those
  581          * in the software queue that could be queued to the hardware if there
  582          * were slots.
  583          *
  584          * ios_stats.pending is a count of requests in the SIM right now for
  585          * each of these types of I/O. So the total pending count is the sum of
  586          * these I/Os and the sum of the queued I/Os still in the software queue
  587          * for those operations that aren't being rate limited at the moment.
  588          *
  589          * The reason for the rate limiting bit is because those I/Os
  590          * aren't part of the software queued load (since we could
  591          * give them to hardware, but choose not to).
  592          *
  593          * Note: due to a bug in counting pending TRIM in the device, we
  594          * don't include them in this count. We count each BIO_DELETE in
  595          * the pending count, but the periph drivers collapse them down
  596          * into one TRIM command. That one trim command gets the completion
  597          * so the counts get off.
  598          */
  599         pending = isc->read_stats.pending + isc->write_stats.pending /* + isc->trim_stats.pending */;
  600         pending += !!(isc->read_stats.state_flags & IOP_RATE_LIMITED) * isc->read_stats.queued +
  601             !!(isc->write_stats.state_flags & IOP_RATE_LIMITED) * isc->write_stats.queued /* +
  602             !!(isc->trim_stats.state_flags & IOP_RATE_LIMITED) * isc->trim_stats.queued */ ;
  603         pending <<= 16;
  604         pending /= isc->periph->path->device->ccbq.total_openings;
  605 
  606         isc->load = (pending + (isc->load << 13) - isc->load) >> 13; /* see above: 13 -> 16139 / 200/s = ~81s ~1 minute */
  607 
  608         isc->total_ticks++;
  609 }
  610 
  611 
  612 static void
  613 cam_iosched_cl_init(struct control_loop *clp, struct cam_iosched_softc *isc)
  614 {
  615 
  616         clp->next_steer = sbinuptime();
  617         clp->softc = isc;
  618         clp->steer_interval = SBT_1S * 5;       /* Let's start out steering every 5s */
  619         clp->lolat = 5 * SBT_1MS;
  620         clp->hilat = 15 * SBT_1MS;
  621         clp->alpha = 20;                        /* Alpha == gain. 20 = .2 */
  622         clp->type = set_max;
  623 }
  624 
  625 static void
  626 cam_iosched_cl_maybe_steer(struct control_loop *clp)
  627 {
  628         struct cam_iosched_softc *isc;
  629         sbintime_t now, lat;
  630         int old;
  631 
  632         isc = clp->softc;
  633         now = isc->last_time;
  634         if (now < clp->next_steer)
  635                 return;
  636 
  637         clp->next_steer = now + clp->steer_interval;
  638         switch (clp->type) {
  639         case set_max:
  640                 if (isc->write_stats.current != isc->write_stats.max)
  641                         printf("Steering write from %d kBps to %d kBps\n",
  642                             isc->write_stats.current, isc->write_stats.max);
  643                 isc->read_stats.current = isc->read_stats.max;
  644                 isc->write_stats.current = isc->write_stats.max;
  645                 isc->trim_stats.current = isc->trim_stats.max;
  646                 break;
  647         case read_latency:
  648                 old = isc->write_stats.current;
  649                 lat = isc->read_stats.ema;
  650                 /*
  651                  * Simple PLL-like engine. Since we're steering to a range for
  652                  * the SP (set point) that makes things a little more
  653                  * complicated. In addition, we're not directly controlling our
  654                  * PV (process variable), the read latency, but instead are
  655                  * manipulating the write bandwidth limit for our MV
  656                  * (manipulation variable), analysis of this code gets a bit
  657                  * messy. Also, the MV is a very noisy control surface for read
  658                  * latency since it is affected by many hidden processes inside
  659                  * the device which change how responsive read latency will be
  660                  * in reaction to changes in write bandwidth. Unlike the classic
  661                  * boiler control PLL. this may result in over-steering while
  662                  * the SSD takes its time to react to the new, lower load. This
  663                  * is why we use a relatively low alpha of between .1 and .25 to
  664                  * compensate for this effect. At .1, it takes ~22 steering
  665                  * intervals to back off by a factor of 10. At .2 it only takes
  666                  * ~10. At .25 it only takes ~8. However some preliminary data
  667                  * from the SSD drives suggests a reasponse time in 10's of
  668                  * seconds before latency drops regardless of the new write
  669                  * rate. Careful observation will be required to tune this
  670                  * effectively.
  671                  *
  672                  * Also, when there's no read traffic, we jack up the write
  673                  * limit too regardless of the last read latency.  10 is
  674                  * somewhat arbitrary.
  675                  */
  676                 if (lat < clp->lolat || isc->read_stats.total - clp->last_count < 10)
  677                         isc->write_stats.current = isc->write_stats.current *
  678                             (100 + clp->alpha) / 100;   /* Scale up */
  679                 else if (lat > clp->hilat)
  680                         isc->write_stats.current = isc->write_stats.current *
  681                             (100 - clp->alpha) / 100;   /* Scale down */
  682                 clp->last_count = isc->read_stats.total;
  683 
  684                 /*
  685                  * Even if we don't steer, per se, enforce the min/max limits as
  686                  * those may have changed.
  687                  */
  688                 if (isc->write_stats.current < isc->write_stats.min)
  689                         isc->write_stats.current = isc->write_stats.min;
  690                 if (isc->write_stats.current > isc->write_stats.max)
  691                         isc->write_stats.current = isc->write_stats.max;
  692                 if (old != isc->write_stats.current &&  iosched_debug)
  693                         printf("Steering write from %d kBps to %d kBps due to latency of %jdus\n",
  694                             old, isc->write_stats.current,
  695                             (uintmax_t)((uint64_t)1000000 * (uint32_t)lat) >> 32);
  696                 break;
  697         case cl_max:
  698                 break;
  699         }
  700 }
  701 #endif
  702 
  703 /*
  704  * Trim or similar currently pending completion. Should only be set for
  705  * those drivers wishing only one Trim active at a time.
  706  */
  707 #define CAM_IOSCHED_FLAG_TRIM_ACTIVE    (1ul << 0)
  708                         /* Callout active, and needs to be torn down */
  709 #define CAM_IOSCHED_FLAG_CALLOUT_ACTIVE (1ul << 1)
  710 
  711                         /* Periph drivers set these flags to indicate work */
  712 #define CAM_IOSCHED_FLAG_WORK_FLAGS     ((0xffffu) << 16)
  713 
  714 #ifdef CAM_IOSCHED_DYNAMIC
  715 static void
  716 cam_iosched_io_metric_update(struct cam_iosched_softc *isc,
  717     sbintime_t sim_latency, int cmd, size_t size);
  718 #endif
  719 
  720 static inline bool
  721 cam_iosched_has_flagged_work(struct cam_iosched_softc *isc)
  722 {
  723         return !!(isc->flags & CAM_IOSCHED_FLAG_WORK_FLAGS);
  724 }
  725 
  726 static inline bool
  727 cam_iosched_has_io(struct cam_iosched_softc *isc)
  728 {
  729 #ifdef CAM_IOSCHED_DYNAMIC
  730         if (do_dynamic_iosched) {
  731                 struct bio *rbp = bioq_first(&isc->bio_queue);
  732                 struct bio *wbp = bioq_first(&isc->write_queue);
  733                 bool can_write = wbp != NULL &&
  734                     cam_iosched_limiter_caniop(&isc->write_stats, wbp) == 0;
  735                 bool can_read = rbp != NULL &&
  736                     cam_iosched_limiter_caniop(&isc->read_stats, rbp) == 0;
  737                 if (iosched_debug > 2) {
  738                         printf("can write %d: pending_writes %d max_writes %d\n", can_write, isc->write_stats.pending, isc->write_stats.max);
  739                         printf("can read %d: read_stats.pending %d max_reads %d\n", can_read, isc->read_stats.pending, isc->read_stats.max);
  740                         printf("Queued reads %d writes %d\n", isc->read_stats.queued, isc->write_stats.queued);
  741                 }
  742                 return can_read || can_write;
  743         }
  744 #endif
  745         return bioq_first(&isc->bio_queue) != NULL;
  746 }
  747 
  748 static inline bool
  749 cam_iosched_has_more_trim(struct cam_iosched_softc *isc)
  750 {
  751         return !(isc->flags & CAM_IOSCHED_FLAG_TRIM_ACTIVE) &&
  752             bioq_first(&isc->trim_queue);
  753 }
  754 
  755 #define cam_iosched_sort_queue(isc)     ((isc)->sort_io_queue >= 0 ?    \
  756     (isc)->sort_io_queue : cam_sort_io_queues)
  757 
  758 
  759 static inline bool
  760 cam_iosched_has_work(struct cam_iosched_softc *isc)
  761 {
  762 #ifdef CAM_IOSCHED_DYNAMIC
  763         if (iosched_debug > 2)
  764                 printf("has work: %d %d %d\n", cam_iosched_has_io(isc),
  765                     cam_iosched_has_more_trim(isc),
  766                     cam_iosched_has_flagged_work(isc));
  767 #endif
  768 
  769         return cam_iosched_has_io(isc) ||
  770                 cam_iosched_has_more_trim(isc) ||
  771                 cam_iosched_has_flagged_work(isc);
  772 }
  773 
  774 #ifdef CAM_IOSCHED_DYNAMIC
  775 static void
  776 cam_iosched_iop_stats_init(struct cam_iosched_softc *isc, struct iop_stats *ios)
  777 {
  778 
  779         ios->limiter = none;
  780         ios->in = 0;
  781         ios->max = ios->current = 300000;
  782         ios->min = 1;
  783         ios->out = 0;
  784         ios->errs = 0;
  785         ios->pending = 0;
  786         ios->queued = 0;
  787         ios->total = 0;
  788         ios->ema = 0;
  789         ios->emvar = 0;
  790         ios->softc = isc;
  791         cam_iosched_limiter_init(ios);
  792 }
  793 
  794 static int
  795 cam_iosched_limiter_sysctl(SYSCTL_HANDLER_ARGS)
  796 {
  797         char buf[16];
  798         struct iop_stats *ios;
  799         struct cam_iosched_softc *isc;
  800         int value, i, error;
  801         const char *p;
  802 
  803         ios = arg1;
  804         isc = ios->softc;
  805         value = ios->limiter;
  806         if (value < none || value >= limiter_max)
  807                 p = "UNKNOWN";
  808         else
  809                 p = cam_iosched_limiter_names[value];
  810 
  811         strlcpy(buf, p, sizeof(buf));
  812         error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
  813         if (error != 0 || req->newptr == NULL)
  814                 return error;
  815 
  816         cam_periph_lock(isc->periph);
  817 
  818         for (i = none; i < limiter_max; i++) {
  819                 if (strcmp(buf, cam_iosched_limiter_names[i]) != 0)
  820                         continue;
  821                 ios->limiter = i;
  822                 error = cam_iosched_limiter_init(ios);
  823                 if (error != 0) {
  824                         ios->limiter = value;
  825                         cam_periph_unlock(isc->periph);
  826                         return error;
  827                 }
  828                 /* Note: disk load averate requires ticker to be always running */
  829                 callout_reset(&isc->ticker, hz / isc->quanta, cam_iosched_ticker, isc);
  830                 isc->flags |= CAM_IOSCHED_FLAG_CALLOUT_ACTIVE;
  831 
  832                 cam_periph_unlock(isc->periph);
  833                 return 0;
  834         }
  835 
  836         cam_periph_unlock(isc->periph);
  837         return EINVAL;
  838 }
  839 
  840 static int
  841 cam_iosched_control_type_sysctl(SYSCTL_HANDLER_ARGS)
  842 {
  843         char buf[16];
  844         struct control_loop *clp;
  845         struct cam_iosched_softc *isc;
  846         int value, i, error;
  847         const char *p;
  848 
  849         clp = arg1;
  850         isc = clp->softc;
  851         value = clp->type;
  852         if (value < none || value >= cl_max)
  853                 p = "UNKNOWN";
  854         else
  855                 p = cam_iosched_control_type_names[value];
  856 
  857         strlcpy(buf, p, sizeof(buf));
  858         error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
  859         if (error != 0 || req->newptr == NULL)
  860                 return error;
  861 
  862         for (i = set_max; i < cl_max; i++) {
  863                 if (strcmp(buf, cam_iosched_control_type_names[i]) != 0)
  864                         continue;
  865                 cam_periph_lock(isc->periph);
  866                 clp->type = i;
  867                 cam_periph_unlock(isc->periph);
  868                 return 0;
  869         }
  870 
  871         return EINVAL;
  872 }
  873 
  874 static int
  875 cam_iosched_sbintime_sysctl(SYSCTL_HANDLER_ARGS)
  876 {
  877         char buf[16];
  878         sbintime_t value;
  879         int error;
  880         uint64_t us;
  881 
  882         value = *(sbintime_t *)arg1;
  883         us = (uint64_t)value / SBT_1US;
  884         snprintf(buf, sizeof(buf), "%ju", (intmax_t)us);
  885         error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
  886         if (error != 0 || req->newptr == NULL)
  887                 return error;
  888         us = strtoul(buf, NULL, 10);
  889         if (us == 0)
  890                 return EINVAL;
  891         *(sbintime_t *)arg1 = us * SBT_1US;
  892         return 0;
  893 }
  894 
  895 static int
  896 cam_iosched_sysctl_latencies(SYSCTL_HANDLER_ARGS)
  897 {
  898         int i, error;
  899         struct sbuf sb;
  900         uint64_t *latencies;
  901 
  902         latencies = arg1;
  903         sbuf_new_for_sysctl(&sb, NULL, LAT_BUCKETS * 16, req);
  904 
  905         for (i = 0; i < LAT_BUCKETS - 1; i++)
  906                 sbuf_printf(&sb, "%jd,", (intmax_t)latencies[i]);
  907         sbuf_printf(&sb, "%jd", (intmax_t)latencies[LAT_BUCKETS - 1]);
  908         error = sbuf_finish(&sb);
  909         sbuf_delete(&sb);
  910 
  911         return (error);
  912 }
  913 
  914 static int
  915 cam_iosched_quanta_sysctl(SYSCTL_HANDLER_ARGS)
  916 {
  917         int *quanta;
  918         int error, value;
  919 
  920         quanta = (unsigned *)arg1;
  921         value = *quanta;
  922 
  923         error = sysctl_handle_int(oidp, (int *)&value, 0, req);
  924         if ((error != 0) || (req->newptr == NULL))
  925                 return (error);
  926 
  927         if (value < 1 || value > hz)
  928                 return (EINVAL);
  929 
  930         *quanta = value;
  931 
  932         return (0);
  933 }
  934 
  935 static void
  936 cam_iosched_iop_stats_sysctl_init(struct cam_iosched_softc *isc, struct iop_stats *ios, char *name)
  937 {
  938         struct sysctl_oid_list *n;
  939         struct sysctl_ctx_list *ctx;
  940 
  941         ios->sysctl_tree = SYSCTL_ADD_NODE(&isc->sysctl_ctx,
  942             SYSCTL_CHILDREN(isc->sysctl_tree), OID_AUTO, name,
  943             CTLFLAG_RD, 0, name);
  944         n = SYSCTL_CHILDREN(ios->sysctl_tree);
  945         ctx = &ios->sysctl_ctx;
  946 
  947         SYSCTL_ADD_UQUAD(ctx, n,
  948             OID_AUTO, "ema", CTLFLAG_RD,
  949             &ios->ema,
  950             "Fast Exponentially Weighted Moving Average");
  951         SYSCTL_ADD_UQUAD(ctx, n,
  952             OID_AUTO, "emvar", CTLFLAG_RD,
  953             &ios->emvar,
  954             "Fast Exponentially Weighted Moving Variance");
  955 
  956         SYSCTL_ADD_INT(ctx, n,
  957             OID_AUTO, "pending", CTLFLAG_RD,
  958             &ios->pending, 0,
  959             "Instantaneous # of pending transactions");
  960         SYSCTL_ADD_INT(ctx, n,
  961             OID_AUTO, "count", CTLFLAG_RD,
  962             &ios->total, 0,
  963             "# of transactions submitted to hardware");
  964         SYSCTL_ADD_INT(ctx, n,
  965             OID_AUTO, "queued", CTLFLAG_RD,
  966             &ios->queued, 0,
  967             "# of transactions in the queue");
  968         SYSCTL_ADD_INT(ctx, n,
  969             OID_AUTO, "in", CTLFLAG_RD,
  970             &ios->in, 0,
  971             "# of transactions queued to driver");
  972         SYSCTL_ADD_INT(ctx, n,
  973             OID_AUTO, "out", CTLFLAG_RD,
  974             &ios->out, 0,
  975             "# of transactions completed (including with error)");
  976         SYSCTL_ADD_INT(ctx, n,
  977             OID_AUTO, "errs", CTLFLAG_RD,
  978             &ios->errs, 0,
  979             "# of transactions completed with an error");
  980 
  981         SYSCTL_ADD_PROC(ctx, n,
  982             OID_AUTO, "limiter", CTLTYPE_STRING | CTLFLAG_RW,
  983             ios, 0, cam_iosched_limiter_sysctl, "A",
  984             "Current limiting type.");
  985         SYSCTL_ADD_INT(ctx, n,
  986             OID_AUTO, "min", CTLFLAG_RW,
  987             &ios->min, 0,
  988             "min resource");
  989         SYSCTL_ADD_INT(ctx, n,
  990             OID_AUTO, "max", CTLFLAG_RW,
  991             &ios->max, 0,
  992             "max resource");
  993         SYSCTL_ADD_INT(ctx, n,
  994             OID_AUTO, "current", CTLFLAG_RW,
  995             &ios->current, 0,
  996             "current resource");
  997 
  998         SYSCTL_ADD_PROC(ctx, n,
  999             OID_AUTO, "latencies", CTLTYPE_STRING | CTLFLAG_RD,
 1000             &ios->latencies, 0,
 1001             cam_iosched_sysctl_latencies, "A",
 1002             "Array of power of 2 latency from 1ms to 1.024s");
 1003 }
 1004 
 1005 static void
 1006 cam_iosched_iop_stats_fini(struct iop_stats *ios)
 1007 {
 1008         if (ios->sysctl_tree)
 1009                 if (sysctl_ctx_free(&ios->sysctl_ctx) != 0)
 1010                         printf("can't remove iosched sysctl stats context\n");
 1011 }
 1012 
 1013 static void
 1014 cam_iosched_cl_sysctl_init(struct cam_iosched_softc *isc)
 1015 {
 1016         struct sysctl_oid_list *n;
 1017         struct sysctl_ctx_list *ctx;
 1018         struct control_loop *clp;
 1019 
 1020         clp = &isc->cl;
 1021         clp->sysctl_tree = SYSCTL_ADD_NODE(&isc->sysctl_ctx,
 1022             SYSCTL_CHILDREN(isc->sysctl_tree), OID_AUTO, "control",
 1023             CTLFLAG_RD, 0, "Control loop info");
 1024         n = SYSCTL_CHILDREN(clp->sysctl_tree);
 1025         ctx = &clp->sysctl_ctx;
 1026 
 1027         SYSCTL_ADD_PROC(ctx, n,
 1028             OID_AUTO, "type", CTLTYPE_STRING | CTLFLAG_RW,
 1029             clp, 0, cam_iosched_control_type_sysctl, "A",
 1030             "Control loop algorithm");
 1031         SYSCTL_ADD_PROC(ctx, n,
 1032             OID_AUTO, "steer_interval", CTLTYPE_STRING | CTLFLAG_RW,
 1033             &clp->steer_interval, 0, cam_iosched_sbintime_sysctl, "A",
 1034             "How often to steer (in us)");
 1035         SYSCTL_ADD_PROC(ctx, n,
 1036             OID_AUTO, "lolat", CTLTYPE_STRING | CTLFLAG_RW,
 1037             &clp->lolat, 0, cam_iosched_sbintime_sysctl, "A",
 1038             "Low water mark for Latency (in us)");
 1039         SYSCTL_ADD_PROC(ctx, n,
 1040             OID_AUTO, "hilat", CTLTYPE_STRING | CTLFLAG_RW,
 1041             &clp->hilat, 0, cam_iosched_sbintime_sysctl, "A",
 1042             "Hi water mark for Latency (in us)");
 1043         SYSCTL_ADD_INT(ctx, n,
 1044             OID_AUTO, "alpha", CTLFLAG_RW,
 1045             &clp->alpha, 0,
 1046             "Alpha for PLL (x100) aka gain");
 1047 }
 1048 
 1049 static void
 1050 cam_iosched_cl_sysctl_fini(struct control_loop *clp)
 1051 {
 1052         if (clp->sysctl_tree)
 1053                 if (sysctl_ctx_free(&clp->sysctl_ctx) != 0)
 1054                         printf("can't remove iosched sysctl control loop context\n");
 1055 }
 1056 #endif
 1057 
 1058 /*
 1059  * Allocate the iosched structure. This also insulates callers from knowing
 1060  * sizeof struct cam_iosched_softc.
 1061  */
 1062 int
 1063 cam_iosched_init(struct cam_iosched_softc **iscp, struct cam_periph *periph)
 1064 {
 1065 
 1066         *iscp = malloc(sizeof(**iscp), M_CAMSCHED, M_NOWAIT | M_ZERO);
 1067         if (*iscp == NULL)
 1068                 return ENOMEM;
 1069 #ifdef CAM_IOSCHED_DYNAMIC
 1070         if (iosched_debug)
 1071                 printf("CAM IOSCHEDULER Allocating entry at %p\n", *iscp);
 1072 #endif
 1073         (*iscp)->sort_io_queue = -1;
 1074         bioq_init(&(*iscp)->bio_queue);
 1075         bioq_init(&(*iscp)->trim_queue);
 1076 #ifdef CAM_IOSCHED_DYNAMIC
 1077         if (do_dynamic_iosched) {
 1078                 bioq_init(&(*iscp)->write_queue);
 1079                 (*iscp)->read_bias = 100;
 1080                 (*iscp)->current_read_bias = 100;
 1081                 (*iscp)->quanta = min(hz, 200);
 1082                 cam_iosched_iop_stats_init(*iscp, &(*iscp)->read_stats);
 1083                 cam_iosched_iop_stats_init(*iscp, &(*iscp)->write_stats);
 1084                 cam_iosched_iop_stats_init(*iscp, &(*iscp)->trim_stats);
 1085                 (*iscp)->trim_stats.max = 1;    /* Trims are special: one at a time for now */
 1086                 (*iscp)->last_time = sbinuptime();
 1087                 callout_init_mtx(&(*iscp)->ticker, cam_periph_mtx(periph), 0);
 1088                 (*iscp)->periph = periph;
 1089                 cam_iosched_cl_init(&(*iscp)->cl, *iscp);
 1090                 callout_reset(&(*iscp)->ticker, hz / (*iscp)->quanta, cam_iosched_ticker, *iscp);
 1091                 (*iscp)->flags |= CAM_IOSCHED_FLAG_CALLOUT_ACTIVE;
 1092         }
 1093 #endif
 1094 
 1095         return 0;
 1096 }
 1097 
 1098 /*
 1099  * Reclaim all used resources. This assumes that other folks have
 1100  * drained the requests in the hardware. Maybe an unwise assumption.
 1101  */
 1102 void
 1103 cam_iosched_fini(struct cam_iosched_softc *isc)
 1104 {
 1105         if (isc) {
 1106                 cam_iosched_flush(isc, NULL, ENXIO);
 1107 #ifdef CAM_IOSCHED_DYNAMIC
 1108                 cam_iosched_iop_stats_fini(&isc->read_stats);
 1109                 cam_iosched_iop_stats_fini(&isc->write_stats);
 1110                 cam_iosched_iop_stats_fini(&isc->trim_stats);
 1111                 cam_iosched_cl_sysctl_fini(&isc->cl);
 1112                 if (isc->sysctl_tree)
 1113                         if (sysctl_ctx_free(&isc->sysctl_ctx) != 0)
 1114                                 printf("can't remove iosched sysctl stats context\n");
 1115                 if (isc->flags & CAM_IOSCHED_FLAG_CALLOUT_ACTIVE) {
 1116                         callout_drain(&isc->ticker);
 1117                         isc->flags &= ~ CAM_IOSCHED_FLAG_CALLOUT_ACTIVE;
 1118                 }
 1119 #endif
 1120                 free(isc, M_CAMSCHED);
 1121         }
 1122 }
 1123 
 1124 /*
 1125  * After we're sure we're attaching a device, go ahead and add
 1126  * hooks for any sysctl we may wish to honor.
 1127  */
 1128 void cam_iosched_sysctl_init(struct cam_iosched_softc *isc,
 1129     struct sysctl_ctx_list *ctx, struct sysctl_oid *node)
 1130 {
 1131 #ifdef CAM_IOSCHED_DYNAMIC
 1132         struct sysctl_oid_list *n;
 1133 #endif
 1134 
 1135         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(node),
 1136                 OID_AUTO, "sort_io_queue", CTLFLAG_RW | CTLFLAG_MPSAFE,
 1137                 &isc->sort_io_queue, 0,
 1138                 "Sort IO queue to try and optimise disk access patterns");
 1139 
 1140 #ifdef CAM_IOSCHED_DYNAMIC
 1141         if (!do_dynamic_iosched)
 1142                 return;
 1143 
 1144         isc->sysctl_tree = SYSCTL_ADD_NODE(&isc->sysctl_ctx,
 1145             SYSCTL_CHILDREN(node), OID_AUTO, "iosched",
 1146             CTLFLAG_RD, 0, "I/O scheduler statistics");
 1147         n = SYSCTL_CHILDREN(isc->sysctl_tree);
 1148         ctx = &isc->sysctl_ctx;
 1149 
 1150         cam_iosched_iop_stats_sysctl_init(isc, &isc->read_stats, "read");
 1151         cam_iosched_iop_stats_sysctl_init(isc, &isc->write_stats, "write");
 1152         cam_iosched_iop_stats_sysctl_init(isc, &isc->trim_stats, "trim");
 1153         cam_iosched_cl_sysctl_init(isc);
 1154 
 1155         SYSCTL_ADD_INT(ctx, n,
 1156             OID_AUTO, "read_bias", CTLFLAG_RW,
 1157             &isc->read_bias, 100,
 1158             "How biased towards read should we be independent of limits");
 1159 
 1160         SYSCTL_ADD_PROC(ctx, n,
 1161             OID_AUTO, "quanta", CTLTYPE_UINT | CTLFLAG_RW,
 1162             &isc->quanta, 0, cam_iosched_quanta_sysctl, "I",
 1163             "How many quanta per second do we slice the I/O up into");
 1164 
 1165         SYSCTL_ADD_INT(ctx, n,
 1166             OID_AUTO, "total_ticks", CTLFLAG_RD,
 1167             &isc->total_ticks, 0,
 1168             "Total number of ticks we've done");
 1169 
 1170         SYSCTL_ADD_INT(ctx, n,
 1171             OID_AUTO, "load", CTLFLAG_RD,
 1172             &isc->load, 0,
 1173             "scaled load average / 100");
 1174 #endif
 1175 }
 1176 
 1177 /*
 1178  * Flush outstanding I/O. Consumers of this library don't know all the
 1179  * queues we may keep, so this allows all I/O to be flushed in one
 1180  * convenient call.
 1181  */
 1182 void
 1183 cam_iosched_flush(struct cam_iosched_softc *isc, struct devstat *stp, int err)
 1184 {
 1185         bioq_flush(&isc->bio_queue, stp, err);
 1186         bioq_flush(&isc->trim_queue, stp, err);
 1187 #ifdef CAM_IOSCHED_DYNAMIC
 1188         if (do_dynamic_iosched)
 1189                 bioq_flush(&isc->write_queue, stp, err);
 1190 #endif
 1191 }
 1192 
 1193 #ifdef CAM_IOSCHED_DYNAMIC
 1194 static struct bio *
 1195 cam_iosched_get_write(struct cam_iosched_softc *isc)
 1196 {
 1197         struct bio *bp;
 1198 
 1199         /*
 1200          * We control the write rate by controlling how many requests we send
 1201          * down to the drive at any one time. Fewer requests limits the
 1202          * effects of both starvation when the requests take a while and write
 1203          * amplification when each request is causing more than one write to
 1204          * the NAND media. Limiting the queue depth like this will also limit
 1205          * the write throughput and give and reads that want to compete to
 1206          * compete unfairly.
 1207          */
 1208         bp = bioq_first(&isc->write_queue);
 1209         if (bp == NULL) {
 1210                 if (iosched_debug > 3)
 1211                         printf("No writes present in write_queue\n");
 1212                 return NULL;
 1213         }
 1214 
 1215         /*
 1216          * If pending read, prefer that based on current read bias
 1217          * setting.
 1218          */
 1219         if (bioq_first(&isc->bio_queue) && isc->current_read_bias) {
 1220                 if (iosched_debug)
 1221                         printf(
 1222                             "Reads present and current_read_bias is %d queued "
 1223                             "writes %d queued reads %d\n",
 1224                             isc->current_read_bias, isc->write_stats.queued,
 1225                             isc->read_stats.queued);
 1226                 isc->current_read_bias--;
 1227                 /* We're not limiting writes, per se, just doing reads first */
 1228                 return NULL;
 1229         }
 1230 
 1231         /*
 1232          * See if our current limiter allows this I/O.
 1233          */
 1234         if (cam_iosched_limiter_iop(&isc->write_stats, bp) != 0) {
 1235                 if (iosched_debug)
 1236                         printf("Can't write because limiter says no.\n");
 1237                 isc->write_stats.state_flags |= IOP_RATE_LIMITED;
 1238                 return NULL;
 1239         }
 1240 
 1241         /*
 1242          * Let's do this: We've passed all the gates and we're a go
 1243          * to schedule the I/O in the SIM.
 1244          */
 1245         isc->current_read_bias = isc->read_bias;
 1246         bioq_remove(&isc->write_queue, bp);
 1247         if (bp->bio_cmd == BIO_WRITE) {
 1248                 isc->write_stats.queued--;
 1249                 isc->write_stats.total++;
 1250                 isc->write_stats.pending++;
 1251         }
 1252         if (iosched_debug > 9)
 1253                 printf("HWQ : %p %#x\n", bp, bp->bio_cmd);
 1254         isc->write_stats.state_flags &= ~IOP_RATE_LIMITED;
 1255         return bp;
 1256 }
 1257 #endif
 1258 
 1259 /*
 1260  * Put back a trim that you weren't able to actually schedule this time.
 1261  */
 1262 void
 1263 cam_iosched_put_back_trim(struct cam_iosched_softc *isc, struct bio *bp)
 1264 {
 1265         bioq_insert_head(&isc->trim_queue, bp);
 1266 #ifdef CAM_IOSCHED_DYNAMIC
 1267         isc->trim_stats.queued++;
 1268         isc->trim_stats.total--;                /* since we put it back, don't double count */
 1269         isc->trim_stats.pending--;
 1270 #endif
 1271 }
 1272 
 1273 /*
 1274  * gets the next trim from the trim queue.
 1275  *
 1276  * Assumes we're called with the periph lock held.  It removes this
 1277  * trim from the queue and the device must explicitly reinsert it
 1278  * should the need arise.
 1279  */
 1280 struct bio *
 1281 cam_iosched_next_trim(struct cam_iosched_softc *isc)
 1282 {
 1283         struct bio *bp;
 1284 
 1285         bp  = bioq_first(&isc->trim_queue);
 1286         if (bp == NULL)
 1287                 return NULL;
 1288         bioq_remove(&isc->trim_queue, bp);
 1289 #ifdef CAM_IOSCHED_DYNAMIC
 1290         isc->trim_stats.queued--;
 1291         isc->trim_stats.total++;
 1292         isc->trim_stats.pending++;
 1293 #endif
 1294         return bp;
 1295 }
 1296 
 1297 /*
 1298  * gets an available trim from the trim queue, if there's no trim
 1299  * already pending. It removes this trim from the queue and the device
 1300  * must explicitly reinsert it should the need arise.
 1301  *
 1302  * Assumes we're called with the periph lock held.
 1303  */
 1304 struct bio *
 1305 cam_iosched_get_trim(struct cam_iosched_softc *isc)
 1306 {
 1307 
 1308         if (!cam_iosched_has_more_trim(isc))
 1309                 return NULL;
 1310 #ifdef CAM_IOSCHED_DYNAMIC
 1311         if (do_dynamic_iosched) {
 1312                 /*
 1313                  * If pending read, prefer that based on current read bias
 1314                  * setting. The read bias is shared for both writes and
 1315                  * TRIMs, but on TRIMs the bias is for a combined TRIM
 1316                  * not a single TRIM request that's come in.
 1317                  */
 1318                 if (bioq_first(&isc->bio_queue) && isc->current_read_bias) {
 1319                         isc->current_read_bias--;
 1320                         /* We're not limiting TRIMS, per se, just doing reads first */
 1321                         return NULL;
 1322                 }
 1323                 /*
 1324                  * We're going to do a trim, so reset the bias.
 1325                  */
 1326                 isc->current_read_bias = isc->read_bias;
 1327         }
 1328 #endif
 1329         return cam_iosched_next_trim(isc);
 1330 }
 1331 
 1332 /*
 1333  * Determine what the next bit of work to do is for the periph. The
 1334  * default implementation looks to see if we have trims to do, but no
 1335  * trims outstanding. If so, we do that. Otherwise we see if we have
 1336  * other work. If we do, then we do that. Otherwise why were we called?
 1337  */
 1338 struct bio *
 1339 cam_iosched_next_bio(struct cam_iosched_softc *isc)
 1340 {
 1341         struct bio *bp;
 1342 
 1343         /*
 1344          * See if we have a trim that can be scheduled. We can only send one
 1345          * at a time down, so this takes that into account.
 1346          *
 1347          * XXX newer TRIM commands are queueable. Revisit this when we
 1348          * implement them.
 1349          */
 1350         if ((bp = cam_iosched_get_trim(isc)) != NULL)
 1351                 return bp;
 1352 
 1353 #ifdef CAM_IOSCHED_DYNAMIC
 1354         /*
 1355          * See if we have any pending writes, and room in the queue for them,
 1356          * and if so, those are next.
 1357          */
 1358         if (do_dynamic_iosched) {
 1359                 if ((bp = cam_iosched_get_write(isc)) != NULL)
 1360                         return bp;
 1361         }
 1362 #endif
 1363 
 1364         /*
 1365          * next, see if there's other, normal I/O waiting. If so return that.
 1366          */
 1367         if ((bp = bioq_first(&isc->bio_queue)) == NULL)
 1368                 return NULL;
 1369 
 1370 #ifdef CAM_IOSCHED_DYNAMIC
 1371         /*
 1372          * For the dynamic scheduler, bio_queue is only for reads, so enforce
 1373          * the limits here. Enforce only for reads.
 1374          */
 1375         if (do_dynamic_iosched) {
 1376                 if (bp->bio_cmd == BIO_READ &&
 1377                     cam_iosched_limiter_iop(&isc->read_stats, bp) != 0) {
 1378                         isc->read_stats.state_flags |= IOP_RATE_LIMITED;
 1379                         return NULL;
 1380                 }
 1381         }
 1382         isc->read_stats.state_flags &= ~IOP_RATE_LIMITED;
 1383 #endif
 1384         bioq_remove(&isc->bio_queue, bp);
 1385 #ifdef CAM_IOSCHED_DYNAMIC
 1386         if (do_dynamic_iosched) {
 1387                 if (bp->bio_cmd == BIO_READ) {
 1388                         isc->read_stats.queued--;
 1389                         isc->read_stats.total++;
 1390                         isc->read_stats.pending++;
 1391                 } else
 1392                         printf("Found bio_cmd = %#x\n", bp->bio_cmd);
 1393         }
 1394         if (iosched_debug > 9)
 1395                 printf("HWQ : %p %#x\n", bp, bp->bio_cmd);
 1396 #endif
 1397         return bp;
 1398 }
 1399 
 1400 /*
 1401  * Driver has been given some work to do by the block layer. Tell the
 1402  * scheduler about it and have it queue the work up. The scheduler module
 1403  * will then return the currently most useful bit of work later, possibly
 1404  * deferring work for various reasons.
 1405  */
 1406 void
 1407 cam_iosched_queue_work(struct cam_iosched_softc *isc, struct bio *bp)
 1408 {
 1409 
 1410         /*
 1411          * Put all trims on the trim queue sorted, since we know
 1412          * that the collapsing code requires this. Otherwise put
 1413          * the work on the bio queue.
 1414          */
 1415         if (bp->bio_cmd == BIO_DELETE) {
 1416                 bioq_insert_tail(&isc->trim_queue, bp);
 1417 #ifdef CAM_IOSCHED_DYNAMIC
 1418                 isc->trim_stats.in++;
 1419                 isc->trim_stats.queued++;
 1420 #endif
 1421         }
 1422 #ifdef CAM_IOSCHED_DYNAMIC
 1423         else if (do_dynamic_iosched && (bp->bio_cmd != BIO_READ)) {
 1424                 if (cam_iosched_sort_queue(isc))
 1425                         bioq_disksort(&isc->write_queue, bp);
 1426                 else
 1427                         bioq_insert_tail(&isc->write_queue, bp);
 1428                 if (iosched_debug > 9)
 1429                         printf("Qw  : %p %#x\n", bp, bp->bio_cmd);
 1430                 if (bp->bio_cmd == BIO_WRITE) {
 1431                         isc->write_stats.in++;
 1432                         isc->write_stats.queued++;
 1433                 }
 1434         }
 1435 #endif
 1436         else {
 1437                 if (cam_iosched_sort_queue(isc))
 1438                         bioq_disksort(&isc->bio_queue, bp);
 1439                 else
 1440                         bioq_insert_tail(&isc->bio_queue, bp);
 1441 #ifdef CAM_IOSCHED_DYNAMIC
 1442                 if (iosched_debug > 9)
 1443                         printf("Qr  : %p %#x\n", bp, bp->bio_cmd);
 1444                 if (bp->bio_cmd == BIO_READ) {
 1445                         isc->read_stats.in++;
 1446                         isc->read_stats.queued++;
 1447                 } else if (bp->bio_cmd == BIO_WRITE) {
 1448                         isc->write_stats.in++;
 1449                         isc->write_stats.queued++;
 1450                 }
 1451 #endif
 1452         }
 1453 }
 1454 
 1455 /*
 1456  * If we have work, get it scheduled. Called with the periph lock held.
 1457  */
 1458 void
 1459 cam_iosched_schedule(struct cam_iosched_softc *isc, struct cam_periph *periph)
 1460 {
 1461 
 1462         if (cam_iosched_has_work(isc))
 1463                 xpt_schedule(periph, CAM_PRIORITY_NORMAL);
 1464 }
 1465 
 1466 /*
 1467  * Complete a trim request. Mark that we no longer have one in flight.
 1468  */
 1469 void
 1470 cam_iosched_trim_done(struct cam_iosched_softc *isc)
 1471 {
 1472 
 1473         isc->flags &= ~CAM_IOSCHED_FLAG_TRIM_ACTIVE;
 1474 }
 1475 
 1476 /*
 1477  * Complete a bio. Called before we release the ccb with xpt_release_ccb so we
 1478  * might use notes in the ccb for statistics.
 1479  */
 1480 int
 1481 cam_iosched_bio_complete(struct cam_iosched_softc *isc, struct bio *bp,
 1482     union ccb *done_ccb)
 1483 {
 1484         int retval = 0;
 1485 #ifdef CAM_IOSCHED_DYNAMIC
 1486         if (!do_dynamic_iosched)
 1487                 return retval;
 1488 
 1489         if (iosched_debug > 10)
 1490                 printf("done: %p %#x\n", bp, bp->bio_cmd);
 1491         if (bp->bio_cmd == BIO_WRITE) {
 1492                 retval = cam_iosched_limiter_iodone(&isc->write_stats, bp);
 1493                 if ((bp->bio_flags & BIO_ERROR) != 0)
 1494                         isc->write_stats.errs++;
 1495                 isc->write_stats.out++;
 1496                 isc->write_stats.pending--;
 1497         } else if (bp->bio_cmd == BIO_READ) {
 1498                 retval = cam_iosched_limiter_iodone(&isc->read_stats, bp);
 1499                 if ((bp->bio_flags & BIO_ERROR) != 0)
 1500                         isc->read_stats.errs++;
 1501                 isc->read_stats.out++;
 1502                 isc->read_stats.pending--;
 1503         } else if (bp->bio_cmd == BIO_DELETE) {
 1504                 if ((bp->bio_flags & BIO_ERROR) != 0)
 1505                         isc->trim_stats.errs++;
 1506                 isc->trim_stats.out++;
 1507                 isc->trim_stats.pending--;
 1508         } else if (bp->bio_cmd != BIO_FLUSH) {
 1509                 if (iosched_debug)
 1510                         printf("Completing command with bio_cmd == %#x\n", bp->bio_cmd);
 1511         }
 1512 
 1513         if (!(bp->bio_flags & BIO_ERROR) && done_ccb != NULL)
 1514                 cam_iosched_io_metric_update(isc,
 1515                     cam_iosched_sbintime_t(done_ccb->ccb_h.qos.periph_data),
 1516                     bp->bio_cmd, bp->bio_bcount);
 1517 #endif
 1518         return retval;
 1519 }
 1520 
 1521 /*
 1522  * Tell the io scheduler that you've pushed a trim down into the sim.
 1523  * This also tells the I/O scheduler not to push any more trims down, so
 1524  * some periphs do not call it if they can cope with multiple trims in flight.
 1525  */
 1526 void
 1527 cam_iosched_submit_trim(struct cam_iosched_softc *isc)
 1528 {
 1529 
 1530         isc->flags |= CAM_IOSCHED_FLAG_TRIM_ACTIVE;
 1531 }
 1532 
 1533 /*
 1534  * Change the sorting policy hint for I/O transactions for this device.
 1535  */
 1536 void
 1537 cam_iosched_set_sort_queue(struct cam_iosched_softc *isc, int val)
 1538 {
 1539 
 1540         isc->sort_io_queue = val;
 1541 }
 1542 
 1543 int
 1544 cam_iosched_has_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
 1545 {
 1546         return isc->flags & flags;
 1547 }
 1548 
 1549 void
 1550 cam_iosched_set_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
 1551 {
 1552         isc->flags |= flags;
 1553 }
 1554 
 1555 void
 1556 cam_iosched_clr_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
 1557 {
 1558         isc->flags &= ~flags;
 1559 }
 1560 
 1561 #ifdef CAM_IOSCHED_DYNAMIC
 1562 /*
 1563  * After the method presented in Jack Crenshaw's 1998 article "Integer
 1564  * Square Roots," reprinted at
 1565  * http://www.embedded.com/electronics-blogs/programmer-s-toolbox/4219659/Integer-Square-Roots
 1566  * and well worth the read. Briefly, we find the power of 4 that's the
 1567  * largest smaller than val. We then check each smaller power of 4 to
 1568  * see if val is still bigger. The right shifts at each step divide
 1569  * the result by 2 which after successive application winds up
 1570  * accumulating the right answer. It could also have been accumulated
 1571  * using a separate root counter, but this code is smaller and faster
 1572  * than that method. This method is also integer size invariant.
 1573  * It returns floor(sqrt((float)val)), or the largest integer less than
 1574  * or equal to the square root.
 1575  */
 1576 static uint64_t
 1577 isqrt64(uint64_t val)
 1578 {
 1579         uint64_t res = 0;
 1580         uint64_t bit = 1ULL << (sizeof(uint64_t) * NBBY - 2);
 1581 
 1582         /*
 1583          * Find the largest power of 4 smaller than val.
 1584          */
 1585         while (bit > val)
 1586                 bit >>= 2;
 1587 
 1588         /*
 1589          * Accumulate the answer, one bit at a time (we keep moving
 1590          * them over since 2 is the square root of 4 and we test
 1591          * powers of 4). We accumulate where we find the bit, but
 1592          * the successive shifts land the bit in the right place
 1593          * by the end.
 1594          */
 1595         while (bit != 0) {
 1596                 if (val >= res + bit) {
 1597                         val -= res + bit;
 1598                         res = (res >> 1) + bit;
 1599                 } else
 1600                         res >>= 1;
 1601                 bit >>= 2;
 1602         }
 1603 
 1604         return res;
 1605 }
 1606 
 1607 static sbintime_t latencies[LAT_BUCKETS - 1] = {
 1608         SBT_1MS <<  0,
 1609         SBT_1MS <<  1,
 1610         SBT_1MS <<  2,
 1611         SBT_1MS <<  3,
 1612         SBT_1MS <<  4,
 1613         SBT_1MS <<  5,
 1614         SBT_1MS <<  6,
 1615         SBT_1MS <<  7,
 1616         SBT_1MS <<  8,
 1617         SBT_1MS <<  9,
 1618         SBT_1MS << 10,
 1619         SBT_1MS << 11,
 1620         SBT_1MS << 12,
 1621         SBT_1MS << 13           /* 8.192s */
 1622 };
 1623 
 1624 static void
 1625 cam_iosched_update(struct iop_stats *iop, sbintime_t sim_latency)
 1626 {
 1627         sbintime_t y, deltasq, delta;
 1628         int i;
 1629 
 1630         /*
 1631          * Keep counts for latency. We do it by power of two buckets.
 1632          * This helps us spot outlier behavior obscured by averages.
 1633          */
 1634         for (i = 0; i < LAT_BUCKETS - 1; i++) {
 1635                 if (sim_latency < latencies[i]) {
 1636                         iop->latencies[i]++;
 1637                         break;
 1638                 }
 1639         }
 1640         if (i == LAT_BUCKETS - 1)
 1641                 iop->latencies[i]++;     /* Put all > 1024ms values into the last bucket. */
 1642 
 1643         /*
 1644          * Classic exponentially decaying average with a tiny alpha
 1645          * (2 ^ -alpha_bits). For more info see the NIST statistical
 1646          * handbook.
 1647          *
 1648          * ema_t = y_t * alpha + ema_t-1 * (1 - alpha)          [nist]
 1649          * ema_t = y_t * alpha + ema_t-1 - alpha * ema_t-1
 1650          * ema_t = alpha * y_t - alpha * ema_t-1 + ema_t-1
 1651          * alpha = 1 / (1 << alpha_bits)
 1652          * sub e == ema_t-1, b == 1/alpha (== 1 << alpha_bits), d == y_t - ema_t-1
 1653          *      = y_t/b - e/b + be/b
 1654          *      = (y_t - e + be) / b
 1655          *      = (e + d) / b
 1656          *
 1657          * Since alpha is a power of two, we can compute this w/o any mult or
 1658          * division.
 1659          *
 1660          * Variance can also be computed. Usually, it would be expressed as follows:
 1661          *      diff_t = y_t - ema_t-1
 1662          *      emvar_t = (1 - alpha) * (emavar_t-1 + diff_t^2 * alpha)
 1663          *        = emavar_t-1 - alpha * emavar_t-1 + delta_t^2 * alpha - (delta_t * alpha)^2
 1664          * sub b == 1/alpha (== 1 << alpha_bits), e == emavar_t-1, d = delta_t^2
 1665          *        = e - e/b + dd/b + dd/bb
 1666          *        = (bbe - be + bdd + dd) / bb
 1667          *        = (bbe + b(dd-e) + dd) / bb (which is expanded below bb = 1<<(2*alpha_bits))
 1668          */
 1669         /*
 1670          * XXX possible numeric issues
 1671          *      o We assume right shifted integers do the right thing, since that's
 1672          *        implementation defined. You can change the right shifts to / (1LL << alpha).
 1673          *      o alpha_bits = 9 gives ema ceiling of 23 bits of seconds for ema and 14 bits
 1674          *        for emvar. This puts a ceiling of 13 bits on alpha since we need a
 1675          *        few tens of seconds of representation.
 1676          *      o We mitigate alpha issues by never setting it too high.
 1677          */
 1678         y = sim_latency;
 1679         delta = (y - iop->ema);                                 /* d */
 1680         iop->ema = ((iop->ema << alpha_bits) + delta) >> alpha_bits;
 1681 
 1682         /*
 1683          * Were we to naively plow ahead at this point, we wind up with many numerical
 1684          * issues making any SD > ~3ms unreliable. So, we shift right by 12. This leaves
 1685          * us with microsecond level precision in the input, so the same in the
 1686          * output. It means we can't overflow deltasq unless delta > 4k seconds. It
 1687          * also means that emvar can be up 46 bits 40 of which are fraction, which
 1688          * gives us a way to measure up to ~8s in the SD before the computation goes
 1689          * unstable. Even the worst hard disk rarely has > 1s service time in the
 1690          * drive. It does mean we have to shift left 12 bits after taking the
 1691          * square root to compute the actual standard deviation estimate. This loss of
 1692          * precision is preferable to needing int128 types to work. The above numbers
 1693          * assume alpha=9. 10 or 11 are ok, but we start to run into issues at 12,
 1694          * so 12 or 13 is OK for EMA, EMVAR and SD will be wrong in those cases.
 1695          */
 1696         delta >>= 12;
 1697         deltasq = delta * delta;                                /* dd */
 1698         iop->emvar = ((iop->emvar << (2 * alpha_bits)) +        /* bbe */
 1699             ((deltasq - iop->emvar) << alpha_bits) +            /* b(dd-e) */
 1700             deltasq)                                            /* dd */
 1701             >> (2 * alpha_bits);                                /* div bb */
 1702         iop->sd = (sbintime_t)isqrt64((uint64_t)iop->emvar) << 12;
 1703 }
 1704 
 1705 static void
 1706 cam_iosched_io_metric_update(struct cam_iosched_softc *isc,
 1707     sbintime_t sim_latency, int cmd, size_t size)
 1708 {
 1709         /* xxx Do we need to scale based on the size of the I/O ? */
 1710         switch (cmd) {
 1711         case BIO_READ:
 1712                 cam_iosched_update(&isc->read_stats, sim_latency);
 1713                 break;
 1714         case BIO_WRITE:
 1715                 cam_iosched_update(&isc->write_stats, sim_latency);
 1716                 break;
 1717         case BIO_DELETE:
 1718                 cam_iosched_update(&isc->trim_stats, sim_latency);
 1719                 break;
 1720         default:
 1721                 break;
 1722         }
 1723 }
 1724 
 1725 #ifdef DDB
 1726 static int biolen(struct bio_queue_head *bq)
 1727 {
 1728         int i = 0;
 1729         struct bio *bp;
 1730 
 1731         TAILQ_FOREACH(bp, &bq->queue, bio_queue) {
 1732                 i++;
 1733         }
 1734         return i;
 1735 }
 1736 
 1737 /*
 1738  * Show the internal state of the I/O scheduler.
 1739  */
 1740 DB_SHOW_COMMAND(iosched, cam_iosched_db_show)
 1741 {
 1742         struct cam_iosched_softc *isc;
 1743 
 1744         if (!have_addr) {
 1745                 db_printf("Need addr\n");
 1746                 return;
 1747         }
 1748         isc = (struct cam_iosched_softc *)addr;
 1749         db_printf("pending_reads:     %d\n", isc->read_stats.pending);
 1750         db_printf("min_reads:         %d\n", isc->read_stats.min);
 1751         db_printf("max_reads:         %d\n", isc->read_stats.max);
 1752         db_printf("reads:             %d\n", isc->read_stats.total);
 1753         db_printf("in_reads:          %d\n", isc->read_stats.in);
 1754         db_printf("out_reads:         %d\n", isc->read_stats.out);
 1755         db_printf("queued_reads:      %d\n", isc->read_stats.queued);
 1756         db_printf("Current Q len      %d\n", biolen(&isc->bio_queue));
 1757         db_printf("pending_writes:    %d\n", isc->write_stats.pending);
 1758         db_printf("min_writes:        %d\n", isc->write_stats.min);
 1759         db_printf("max_writes:        %d\n", isc->write_stats.max);
 1760         db_printf("writes:            %d\n", isc->write_stats.total);
 1761         db_printf("in_writes:         %d\n", isc->write_stats.in);
 1762         db_printf("out_writes:        %d\n", isc->write_stats.out);
 1763         db_printf("queued_writes:     %d\n", isc->write_stats.queued);
 1764         db_printf("Current Q len      %d\n", biolen(&isc->write_queue));
 1765         db_printf("pending_trims:     %d\n", isc->trim_stats.pending);
 1766         db_printf("min_trims:         %d\n", isc->trim_stats.min);
 1767         db_printf("max_trims:         %d\n", isc->trim_stats.max);
 1768         db_printf("trims:             %d\n", isc->trim_stats.total);
 1769         db_printf("in_trims:          %d\n", isc->trim_stats.in);
 1770         db_printf("out_trims:         %d\n", isc->trim_stats.out);
 1771         db_printf("queued_trims:      %d\n", isc->trim_stats.queued);
 1772         db_printf("Current Q len      %d\n", biolen(&isc->trim_queue));
 1773         db_printf("read_bias:         %d\n", isc->read_bias);
 1774         db_printf("current_read_bias: %d\n", isc->current_read_bias);
 1775         db_printf("Trim active?       %s\n",
 1776             (isc->flags & CAM_IOSCHED_FLAG_TRIM_ACTIVE) ? "yes" : "no");
 1777 }
 1778 #endif
 1779 #endif

Cache object: c9873f36f7b0a7c50173c7c1ad707dfa


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.