FreeBSD/Linux Kernel Cross Reference
sys/cam/cam_iosched.c
1 /*-
2 * CAM IO Scheduler Interface
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 *
6 * Copyright (c) 2015 Netflix, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 #include "opt_cam.h"
33 #include "opt_ddb.h"
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/bio.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/sbuf.h>
47 #include <sys/sysctl.h>
48
49 #include <cam/cam.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_periph.h>
52 #include <cam/cam_xpt_periph.h>
53 #include <cam/cam_xpt_internal.h>
54 #include <cam/cam_iosched.h>
55
56 #include <ddb/ddb.h>
57
58 static MALLOC_DEFINE(M_CAMSCHED, "CAM I/O Scheduler",
59 "CAM I/O Scheduler buffers");
60
61 static SYSCTL_NODE(_kern_cam, OID_AUTO, iosched, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
62 "CAM I/O Scheduler parameters");
63
64 /*
65 * Default I/O scheduler for FreeBSD. This implementation is just a thin-vineer
66 * over the bioq_* interface, with notions of separate calls for normal I/O and
67 * for trims.
68 *
69 * When CAM_IOSCHED_DYNAMIC is defined, the scheduler is enhanced to dynamically
70 * steer the rate of one type of traffic to help other types of traffic (eg
71 * limit writes when read latency deteriorates on SSDs).
72 */
73
74 #ifdef CAM_IOSCHED_DYNAMIC
75
76 static bool do_dynamic_iosched = true;
77 SYSCTL_BOOL(_kern_cam_iosched, OID_AUTO, dynamic, CTLFLAG_RD | CTLFLAG_TUN,
78 &do_dynamic_iosched, 1,
79 "Enable Dynamic I/O scheduler optimizations.");
80
81 /*
82 * For an EMA, with an alpha of alpha, we know
83 * alpha = 2 / (N + 1)
84 * or
85 * N = 1 + (2 / alpha)
86 * where N is the number of samples that 86% of the current
87 * EMA is derived from.
88 *
89 * So we invent[*] alpha_bits:
90 * alpha_bits = -log_2(alpha)
91 * alpha = 2^-alpha_bits
92 * So
93 * N = 1 + 2^(alpha_bits + 1)
94 *
95 * The default 9 gives a 1025 lookback for 86% of the data.
96 * For a brief intro: https://en.wikipedia.org/wiki/Moving_average
97 *
98 * [*] Steal from the load average code and many other places.
99 * Note: See computation of EMA and EMVAR for acceptable ranges of alpha.
100 */
101 static int alpha_bits = 9;
102 SYSCTL_INT(_kern_cam_iosched, OID_AUTO, alpha_bits, CTLFLAG_RW | CTLFLAG_TUN,
103 &alpha_bits, 1,
104 "Bits in EMA's alpha.");
105
106 /*
107 * Different parameters for the buckets of latency we keep track of. These are all
108 * published read-only since at present they are compile time constants.
109 *
110 * Bucket base is the upper bounds of the first latency bucket. It's currently 20us.
111 * With 20 buckets (see below), that leads to a geometric progression with a max size
112 * of 5.2s which is safeily larger than 1s to help diagnose extreme outliers better.
113 */
114 #ifndef BUCKET_BASE
115 #define BUCKET_BASE ((SBT_1S / 50000) + 1) /* 20us */
116 #endif
117 static sbintime_t bucket_base = BUCKET_BASE;
118 SYSCTL_SBINTIME_USEC(_kern_cam_iosched, OID_AUTO, bucket_base_us, CTLFLAG_RD,
119 &bucket_base,
120 "Size of the smallest latency bucket");
121
122 /*
123 * Bucket ratio is the geometric progression for the bucket. For a bucket b_n
124 * the size of bucket b_n+1 is b_n * bucket_ratio / 100.
125 */
126 static int bucket_ratio = 200; /* Rather hard coded at the moment */
127 SYSCTL_INT(_kern_cam_iosched, OID_AUTO, bucket_ratio, CTLFLAG_RD,
128 &bucket_ratio, 200,
129 "Latency Bucket Ratio for geometric progression.");
130
131 /*
132 * Number of total buckets. Starting at BUCKET_BASE, each one is a power of 2.
133 */
134 #ifndef LAT_BUCKETS
135 #define LAT_BUCKETS 20 /* < 20us < 40us ... < 2^(n-1)*20us >= 2^(n-1)*20us */
136 #endif
137 static int lat_buckets = LAT_BUCKETS;
138 SYSCTL_INT(_kern_cam_iosched, OID_AUTO, buckets, CTLFLAG_RD,
139 &lat_buckets, LAT_BUCKETS,
140 "Total number of latency buckets published");
141
142 /*
143 * Read bias: how many reads do we favor before scheduling a write
144 * when we have a choice.
145 */
146 static int default_read_bias = 0;
147 SYSCTL_INT(_kern_cam_iosched, OID_AUTO, read_bias, CTLFLAG_RWTUN,
148 &default_read_bias, 0,
149 "Default read bias for new devices.");
150
151 struct iop_stats;
152 struct cam_iosched_softc;
153
154 int iosched_debug = 0;
155
156 typedef enum {
157 none = 0, /* No limits */
158 queue_depth, /* Limit how many ops we queue to SIM */
159 iops, /* Limit # of IOPS to the drive */
160 bandwidth, /* Limit bandwidth to the drive */
161 limiter_max
162 } io_limiter;
163
164 static const char *cam_iosched_limiter_names[] =
165 { "none", "queue_depth", "iops", "bandwidth" };
166
167 /*
168 * Called to initialize the bits of the iop_stats structure relevant to the
169 * limiter. Called just after the limiter is set.
170 */
171 typedef int l_init_t(struct iop_stats *);
172
173 /*
174 * Called every tick.
175 */
176 typedef int l_tick_t(struct iop_stats *);
177
178 /*
179 * Called to see if the limiter thinks this IOP can be allowed to
180 * proceed. If so, the limiter assumes that the IOP proceeded
181 * and makes any accounting of it that's needed.
182 */
183 typedef int l_iop_t(struct iop_stats *, struct bio *);
184
185 /*
186 * Called when an I/O completes so the limiter can update its
187 * accounting. Pending I/Os may complete in any order (even when
188 * sent to the hardware at the same time), so the limiter may not
189 * make any assumptions other than this I/O has completed. If it
190 * returns 1, then xpt_schedule() needs to be called again.
191 */
192 typedef int l_iodone_t(struct iop_stats *, struct bio *);
193
194 static l_iop_t cam_iosched_qd_iop;
195 static l_iop_t cam_iosched_qd_caniop;
196 static l_iodone_t cam_iosched_qd_iodone;
197
198 static l_init_t cam_iosched_iops_init;
199 static l_tick_t cam_iosched_iops_tick;
200 static l_iop_t cam_iosched_iops_caniop;
201 static l_iop_t cam_iosched_iops_iop;
202
203 static l_init_t cam_iosched_bw_init;
204 static l_tick_t cam_iosched_bw_tick;
205 static l_iop_t cam_iosched_bw_caniop;
206 static l_iop_t cam_iosched_bw_iop;
207
208 struct limswitch {
209 l_init_t *l_init;
210 l_tick_t *l_tick;
211 l_iop_t *l_iop;
212 l_iop_t *l_caniop;
213 l_iodone_t *l_iodone;
214 } limsw[] =
215 {
216 { /* none */
217 .l_init = NULL,
218 .l_tick = NULL,
219 .l_iop = NULL,
220 .l_iodone= NULL,
221 },
222 { /* queue_depth */
223 .l_init = NULL,
224 .l_tick = NULL,
225 .l_caniop = cam_iosched_qd_caniop,
226 .l_iop = cam_iosched_qd_iop,
227 .l_iodone= cam_iosched_qd_iodone,
228 },
229 { /* iops */
230 .l_init = cam_iosched_iops_init,
231 .l_tick = cam_iosched_iops_tick,
232 .l_caniop = cam_iosched_iops_caniop,
233 .l_iop = cam_iosched_iops_iop,
234 .l_iodone= NULL,
235 },
236 { /* bandwidth */
237 .l_init = cam_iosched_bw_init,
238 .l_tick = cam_iosched_bw_tick,
239 .l_caniop = cam_iosched_bw_caniop,
240 .l_iop = cam_iosched_bw_iop,
241 .l_iodone= NULL,
242 },
243 };
244
245 struct iop_stats {
246 /*
247 * sysctl state for this subnode.
248 */
249 struct sysctl_ctx_list sysctl_ctx;
250 struct sysctl_oid *sysctl_tree;
251
252 /*
253 * Information about the current rate limiters, if any
254 */
255 io_limiter limiter; /* How are I/Os being limited */
256 int min; /* Low range of limit */
257 int max; /* High range of limit */
258 int current; /* Current rate limiter */
259 int l_value1; /* per-limiter scratch value 1. */
260 int l_value2; /* per-limiter scratch value 2. */
261
262 /*
263 * Debug information about counts of I/Os that have gone through the
264 * scheduler.
265 */
266 int pending; /* I/Os pending in the hardware */
267 int queued; /* number currently in the queue */
268 int total; /* Total for all time -- wraps */
269 int in; /* number queued all time -- wraps */
270 int out; /* number completed all time -- wraps */
271 int errs; /* Number of I/Os completed with error -- wraps */
272
273 /*
274 * Statistics on different bits of the process.
275 */
276 /* Exp Moving Average, see alpha_bits for more details */
277 sbintime_t ema;
278 sbintime_t emvar;
279 sbintime_t sd; /* Last computed sd */
280
281 uint32_t state_flags;
282 #define IOP_RATE_LIMITED 1u
283
284 uint64_t latencies[LAT_BUCKETS];
285
286 struct cam_iosched_softc *softc;
287 };
288
289 typedef enum {
290 set_max = 0, /* current = max */
291 read_latency, /* Steer read latency by throttling writes */
292 cl_max /* Keep last */
293 } control_type;
294
295 static const char *cam_iosched_control_type_names[] =
296 { "set_max", "read_latency" };
297
298 struct control_loop {
299 /*
300 * sysctl state for this subnode.
301 */
302 struct sysctl_ctx_list sysctl_ctx;
303 struct sysctl_oid *sysctl_tree;
304
305 sbintime_t next_steer; /* Time of next steer */
306 sbintime_t steer_interval; /* How often do we steer? */
307 sbintime_t lolat;
308 sbintime_t hilat;
309 int alpha;
310 control_type type; /* What type of control? */
311 int last_count; /* Last I/O count */
312
313 struct cam_iosched_softc *softc;
314 };
315
316 #endif
317
318 struct cam_iosched_softc {
319 struct bio_queue_head bio_queue;
320 struct bio_queue_head trim_queue;
321 /* scheduler flags < 16, user flags >= 16 */
322 uint32_t flags;
323 int sort_io_queue;
324 int trim_goal; /* # of trims to queue before sending */
325 int trim_ticks; /* Max ticks to hold trims */
326 int last_trim_tick; /* Last 'tick' time ld a trim */
327 int queued_trims; /* Number of trims in the queue */
328 #ifdef CAM_IOSCHED_DYNAMIC
329 int read_bias; /* Read bias setting */
330 int current_read_bias; /* Current read bias state */
331 int total_ticks;
332 int load; /* EMA of 'load average' of disk / 2^16 */
333
334 struct bio_queue_head write_queue;
335 struct iop_stats read_stats, write_stats, trim_stats;
336 struct sysctl_ctx_list sysctl_ctx;
337 struct sysctl_oid *sysctl_tree;
338
339 int quanta; /* Number of quanta per second */
340 struct callout ticker; /* Callout for our quota system */
341 struct cam_periph *periph; /* cam periph associated with this device */
342 uint32_t this_frac; /* Fraction of a second (1024ths) for this tick */
343 sbintime_t last_time; /* Last time we ticked */
344 struct control_loop cl;
345 sbintime_t max_lat; /* when != 0, if iop latency > max_lat, call max_lat_fcn */
346 cam_iosched_latfcn_t latfcn;
347 void *latarg;
348 #endif
349 };
350
351 #ifdef CAM_IOSCHED_DYNAMIC
352 /*
353 * helper functions to call the limsw functions.
354 */
355 static int
356 cam_iosched_limiter_init(struct iop_stats *ios)
357 {
358 int lim = ios->limiter;
359
360 /* maybe this should be a kassert */
361 if (lim < none || lim >= limiter_max)
362 return EINVAL;
363
364 if (limsw[lim].l_init)
365 return limsw[lim].l_init(ios);
366
367 return 0;
368 }
369
370 static int
371 cam_iosched_limiter_tick(struct iop_stats *ios)
372 {
373 int lim = ios->limiter;
374
375 /* maybe this should be a kassert */
376 if (lim < none || lim >= limiter_max)
377 return EINVAL;
378
379 if (limsw[lim].l_tick)
380 return limsw[lim].l_tick(ios);
381
382 return 0;
383 }
384
385 static int
386 cam_iosched_limiter_iop(struct iop_stats *ios, struct bio *bp)
387 {
388 int lim = ios->limiter;
389
390 /* maybe this should be a kassert */
391 if (lim < none || lim >= limiter_max)
392 return EINVAL;
393
394 if (limsw[lim].l_iop)
395 return limsw[lim].l_iop(ios, bp);
396
397 return 0;
398 }
399
400 static int
401 cam_iosched_limiter_caniop(struct iop_stats *ios, struct bio *bp)
402 {
403 int lim = ios->limiter;
404
405 /* maybe this should be a kassert */
406 if (lim < none || lim >= limiter_max)
407 return EINVAL;
408
409 if (limsw[lim].l_caniop)
410 return limsw[lim].l_caniop(ios, bp);
411
412 return 0;
413 }
414
415 static int
416 cam_iosched_limiter_iodone(struct iop_stats *ios, struct bio *bp)
417 {
418 int lim = ios->limiter;
419
420 /* maybe this should be a kassert */
421 if (lim < none || lim >= limiter_max)
422 return 0;
423
424 if (limsw[lim].l_iodone)
425 return limsw[lim].l_iodone(ios, bp);
426
427 return 0;
428 }
429
430 /*
431 * Functions to implement the different kinds of limiters
432 */
433
434 static int
435 cam_iosched_qd_iop(struct iop_stats *ios, struct bio *bp)
436 {
437
438 if (ios->current <= 0 || ios->pending < ios->current)
439 return 0;
440
441 return EAGAIN;
442 }
443
444 static int
445 cam_iosched_qd_caniop(struct iop_stats *ios, struct bio *bp)
446 {
447
448 if (ios->current <= 0 || ios->pending < ios->current)
449 return 0;
450
451 return EAGAIN;
452 }
453
454 static int
455 cam_iosched_qd_iodone(struct iop_stats *ios, struct bio *bp)
456 {
457
458 if (ios->current <= 0 || ios->pending != ios->current)
459 return 0;
460
461 return 1;
462 }
463
464 static int
465 cam_iosched_iops_init(struct iop_stats *ios)
466 {
467
468 ios->l_value1 = ios->current / ios->softc->quanta;
469 if (ios->l_value1 <= 0)
470 ios->l_value1 = 1;
471 ios->l_value2 = 0;
472
473 return 0;
474 }
475
476 static int
477 cam_iosched_iops_tick(struct iop_stats *ios)
478 {
479 int new_ios;
480
481 /*
482 * Allow at least one IO per tick until all
483 * the IOs for this interval have been spent.
484 */
485 new_ios = (int)((ios->current * (uint64_t)ios->softc->this_frac) >> 16);
486 if (new_ios < 1 && ios->l_value2 < ios->current) {
487 new_ios = 1;
488 ios->l_value2++;
489 }
490
491 /*
492 * If this a new accounting interval, discard any "unspent" ios
493 * granted in the previous interval. Otherwise add the new ios to
494 * the previously granted ones that haven't been spent yet.
495 */
496 if ((ios->softc->total_ticks % ios->softc->quanta) == 0) {
497 ios->l_value1 = new_ios;
498 ios->l_value2 = 1;
499 } else {
500 ios->l_value1 += new_ios;
501 }
502
503 return 0;
504 }
505
506 static int
507 cam_iosched_iops_caniop(struct iop_stats *ios, struct bio *bp)
508 {
509
510 /*
511 * So if we have any more IOPs left, allow it,
512 * otherwise wait. If current iops is 0, treat that
513 * as unlimited as a failsafe.
514 */
515 if (ios->current > 0 && ios->l_value1 <= 0)
516 return EAGAIN;
517 return 0;
518 }
519
520 static int
521 cam_iosched_iops_iop(struct iop_stats *ios, struct bio *bp)
522 {
523 int rv;
524
525 rv = cam_iosched_limiter_caniop(ios, bp);
526 if (rv == 0)
527 ios->l_value1--;
528
529 return rv;
530 }
531
532 static int
533 cam_iosched_bw_init(struct iop_stats *ios)
534 {
535
536 /* ios->current is in kB/s, so scale to bytes */
537 ios->l_value1 = ios->current * 1000 / ios->softc->quanta;
538
539 return 0;
540 }
541
542 static int
543 cam_iosched_bw_tick(struct iop_stats *ios)
544 {
545 int bw;
546
547 /*
548 * If we're in the hole for available quota from
549 * the last time, then add the quantum for this.
550 * If we have any left over from last quantum,
551 * then too bad, that's lost. Also, ios->current
552 * is in kB/s, so scale.
553 *
554 * We also allow up to 4 quanta of credits to
555 * accumulate to deal with burstiness. 4 is extremely
556 * arbitrary.
557 */
558 bw = (int)((ios->current * 1000ull * (uint64_t)ios->softc->this_frac) >> 16);
559 if (ios->l_value1 < bw * 4)
560 ios->l_value1 += bw;
561
562 return 0;
563 }
564
565 static int
566 cam_iosched_bw_caniop(struct iop_stats *ios, struct bio *bp)
567 {
568 /*
569 * So if we have any more bw quota left, allow it,
570 * otherwise wait. Note, we'll go negative and that's
571 * OK. We'll just get a little less next quota.
572 *
573 * Note on going negative: that allows us to process
574 * requests in order better, since we won't allow
575 * shorter reads to get around the long one that we
576 * don't have the quota to do just yet. It also prevents
577 * starvation by being a little more permissive about
578 * what we let through this quantum (to prevent the
579 * starvation), at the cost of getting a little less
580 * next quantum.
581 *
582 * Also note that if the current limit is <= 0,
583 * we treat it as unlimited as a failsafe.
584 */
585 if (ios->current > 0 && ios->l_value1 <= 0)
586 return EAGAIN;
587
588 return 0;
589 }
590
591 static int
592 cam_iosched_bw_iop(struct iop_stats *ios, struct bio *bp)
593 {
594 int rv;
595
596 rv = cam_iosched_limiter_caniop(ios, bp);
597 if (rv == 0)
598 ios->l_value1 -= bp->bio_length;
599
600 return rv;
601 }
602
603 static void cam_iosched_cl_maybe_steer(struct control_loop *clp);
604
605 static void
606 cam_iosched_ticker(void *arg)
607 {
608 struct cam_iosched_softc *isc = arg;
609 sbintime_t now, delta;
610 int pending;
611
612 callout_reset(&isc->ticker, hz / isc->quanta, cam_iosched_ticker, isc);
613
614 now = sbinuptime();
615 delta = now - isc->last_time;
616 isc->this_frac = (uint32_t)delta >> 16; /* Note: discards seconds -- should be 0 harmless if not */
617 isc->last_time = now;
618
619 cam_iosched_cl_maybe_steer(&isc->cl);
620
621 cam_iosched_limiter_tick(&isc->read_stats);
622 cam_iosched_limiter_tick(&isc->write_stats);
623 cam_iosched_limiter_tick(&isc->trim_stats);
624
625 cam_iosched_schedule(isc, isc->periph);
626
627 /*
628 * isc->load is an EMA of the pending I/Os at each tick. The number of
629 * pending I/Os is the sum of the I/Os queued to the hardware, and those
630 * in the software queue that could be queued to the hardware if there
631 * were slots.
632 *
633 * ios_stats.pending is a count of requests in the SIM right now for
634 * each of these types of I/O. So the total pending count is the sum of
635 * these I/Os and the sum of the queued I/Os still in the software queue
636 * for those operations that aren't being rate limited at the moment.
637 *
638 * The reason for the rate limiting bit is because those I/Os
639 * aren't part of the software queued load (since we could
640 * give them to hardware, but choose not to).
641 *
642 * Note: due to a bug in counting pending TRIM in the device, we
643 * don't include them in this count. We count each BIO_DELETE in
644 * the pending count, but the periph drivers collapse them down
645 * into one TRIM command. That one trim command gets the completion
646 * so the counts get off.
647 */
648 pending = isc->read_stats.pending + isc->write_stats.pending /* + isc->trim_stats.pending */;
649 pending += !!(isc->read_stats.state_flags & IOP_RATE_LIMITED) * isc->read_stats.queued +
650 !!(isc->write_stats.state_flags & IOP_RATE_LIMITED) * isc->write_stats.queued /* +
651 !!(isc->trim_stats.state_flags & IOP_RATE_LIMITED) * isc->trim_stats.queued */ ;
652 pending <<= 16;
653 pending /= isc->periph->path->device->ccbq.total_openings;
654
655 isc->load = (pending + (isc->load << 13) - isc->load) >> 13; /* see above: 13 -> 16139 / 200/s = ~81s ~1 minute */
656
657 isc->total_ticks++;
658 }
659
660 static void
661 cam_iosched_cl_init(struct control_loop *clp, struct cam_iosched_softc *isc)
662 {
663
664 clp->next_steer = sbinuptime();
665 clp->softc = isc;
666 clp->steer_interval = SBT_1S * 5; /* Let's start out steering every 5s */
667 clp->lolat = 5 * SBT_1MS;
668 clp->hilat = 15 * SBT_1MS;
669 clp->alpha = 20; /* Alpha == gain. 20 = .2 */
670 clp->type = set_max;
671 }
672
673 static void
674 cam_iosched_cl_maybe_steer(struct control_loop *clp)
675 {
676 struct cam_iosched_softc *isc;
677 sbintime_t now, lat;
678 int old;
679
680 isc = clp->softc;
681 now = isc->last_time;
682 if (now < clp->next_steer)
683 return;
684
685 clp->next_steer = now + clp->steer_interval;
686 switch (clp->type) {
687 case set_max:
688 if (isc->write_stats.current != isc->write_stats.max)
689 printf("Steering write from %d kBps to %d kBps\n",
690 isc->write_stats.current, isc->write_stats.max);
691 isc->read_stats.current = isc->read_stats.max;
692 isc->write_stats.current = isc->write_stats.max;
693 isc->trim_stats.current = isc->trim_stats.max;
694 break;
695 case read_latency:
696 old = isc->write_stats.current;
697 lat = isc->read_stats.ema;
698 /*
699 * Simple PLL-like engine. Since we're steering to a range for
700 * the SP (set point) that makes things a little more
701 * complicated. In addition, we're not directly controlling our
702 * PV (process variable), the read latency, but instead are
703 * manipulating the write bandwidth limit for our MV
704 * (manipulation variable), analysis of this code gets a bit
705 * messy. Also, the MV is a very noisy control surface for read
706 * latency since it is affected by many hidden processes inside
707 * the device which change how responsive read latency will be
708 * in reaction to changes in write bandwidth. Unlike the classic
709 * boiler control PLL. this may result in over-steering while
710 * the SSD takes its time to react to the new, lower load. This
711 * is why we use a relatively low alpha of between .1 and .25 to
712 * compensate for this effect. At .1, it takes ~22 steering
713 * intervals to back off by a factor of 10. At .2 it only takes
714 * ~10. At .25 it only takes ~8. However some preliminary data
715 * from the SSD drives suggests a reasponse time in 10's of
716 * seconds before latency drops regardless of the new write
717 * rate. Careful observation will be required to tune this
718 * effectively.
719 *
720 * Also, when there's no read traffic, we jack up the write
721 * limit too regardless of the last read latency. 10 is
722 * somewhat arbitrary.
723 */
724 if (lat < clp->lolat || isc->read_stats.total - clp->last_count < 10)
725 isc->write_stats.current = isc->write_stats.current *
726 (100 + clp->alpha) / 100; /* Scale up */
727 else if (lat > clp->hilat)
728 isc->write_stats.current = isc->write_stats.current *
729 (100 - clp->alpha) / 100; /* Scale down */
730 clp->last_count = isc->read_stats.total;
731
732 /*
733 * Even if we don't steer, per se, enforce the min/max limits as
734 * those may have changed.
735 */
736 if (isc->write_stats.current < isc->write_stats.min)
737 isc->write_stats.current = isc->write_stats.min;
738 if (isc->write_stats.current > isc->write_stats.max)
739 isc->write_stats.current = isc->write_stats.max;
740 if (old != isc->write_stats.current && iosched_debug)
741 printf("Steering write from %d kBps to %d kBps due to latency of %jdus\n",
742 old, isc->write_stats.current,
743 (uintmax_t)((uint64_t)1000000 * (uint32_t)lat) >> 32);
744 break;
745 case cl_max:
746 break;
747 }
748 }
749 #endif
750
751 /*
752 * Trim or similar currently pending completion. Should only be set for
753 * those drivers wishing only one Trim active at a time.
754 */
755 #define CAM_IOSCHED_FLAG_TRIM_ACTIVE (1ul << 0)
756 /* Callout active, and needs to be torn down */
757 #define CAM_IOSCHED_FLAG_CALLOUT_ACTIVE (1ul << 1)
758
759 /* Periph drivers set these flags to indicate work */
760 #define CAM_IOSCHED_FLAG_WORK_FLAGS ((0xffffu) << 16)
761
762 #ifdef CAM_IOSCHED_DYNAMIC
763 static void
764 cam_iosched_io_metric_update(struct cam_iosched_softc *isc,
765 sbintime_t sim_latency, int cmd, size_t size);
766 #endif
767
768 static inline bool
769 cam_iosched_has_flagged_work(struct cam_iosched_softc *isc)
770 {
771 return !!(isc->flags & CAM_IOSCHED_FLAG_WORK_FLAGS);
772 }
773
774 static inline bool
775 cam_iosched_has_io(struct cam_iosched_softc *isc)
776 {
777 #ifdef CAM_IOSCHED_DYNAMIC
778 if (do_dynamic_iosched) {
779 struct bio *rbp = bioq_first(&isc->bio_queue);
780 struct bio *wbp = bioq_first(&isc->write_queue);
781 bool can_write = wbp != NULL &&
782 cam_iosched_limiter_caniop(&isc->write_stats, wbp) == 0;
783 bool can_read = rbp != NULL &&
784 cam_iosched_limiter_caniop(&isc->read_stats, rbp) == 0;
785 if (iosched_debug > 2) {
786 printf("can write %d: pending_writes %d max_writes %d\n", can_write, isc->write_stats.pending, isc->write_stats.max);
787 printf("can read %d: read_stats.pending %d max_reads %d\n", can_read, isc->read_stats.pending, isc->read_stats.max);
788 printf("Queued reads %d writes %d\n", isc->read_stats.queued, isc->write_stats.queued);
789 }
790 return can_read || can_write;
791 }
792 #endif
793 return bioq_first(&isc->bio_queue) != NULL;
794 }
795
796 static inline bool
797 cam_iosched_has_more_trim(struct cam_iosched_softc *isc)
798 {
799 struct bio *bp;
800
801 bp = bioq_first(&isc->trim_queue);
802 #ifdef CAM_IOSCHED_DYNAMIC
803 if (do_dynamic_iosched) {
804 /*
805 * If we're limiting trims, then defer action on trims
806 * for a bit.
807 */
808 if (bp == NULL || cam_iosched_limiter_caniop(&isc->trim_stats, bp) != 0)
809 return false;
810 }
811 #endif
812
813 /*
814 * If we've set a trim_goal, then if we exceed that allow trims
815 * to be passed back to the driver. If we've also set a tick timeout
816 * allow trims back to the driver. Otherwise, don't allow trims yet.
817 */
818 if (isc->trim_goal > 0) {
819 if (isc->queued_trims >= isc->trim_goal)
820 return true;
821 if (isc->queued_trims > 0 &&
822 isc->trim_ticks > 0 &&
823 ticks - isc->last_trim_tick > isc->trim_ticks)
824 return true;
825 return false;
826 }
827
828 /* NB: Should perhaps have a max trim active independent of I/O limiters */
829 return !(isc->flags & CAM_IOSCHED_FLAG_TRIM_ACTIVE) && bp != NULL;
830 }
831
832 #define cam_iosched_sort_queue(isc) ((isc)->sort_io_queue >= 0 ? \
833 (isc)->sort_io_queue : cam_sort_io_queues)
834
835 static inline bool
836 cam_iosched_has_work(struct cam_iosched_softc *isc)
837 {
838 #ifdef CAM_IOSCHED_DYNAMIC
839 if (iosched_debug > 2)
840 printf("has work: %d %d %d\n", cam_iosched_has_io(isc),
841 cam_iosched_has_more_trim(isc),
842 cam_iosched_has_flagged_work(isc));
843 #endif
844
845 return cam_iosched_has_io(isc) ||
846 cam_iosched_has_more_trim(isc) ||
847 cam_iosched_has_flagged_work(isc);
848 }
849
850 #ifdef CAM_IOSCHED_DYNAMIC
851 static void
852 cam_iosched_iop_stats_init(struct cam_iosched_softc *isc, struct iop_stats *ios)
853 {
854
855 ios->limiter = none;
856 ios->in = 0;
857 ios->max = ios->current = 300000;
858 ios->min = 1;
859 ios->out = 0;
860 ios->errs = 0;
861 ios->pending = 0;
862 ios->queued = 0;
863 ios->total = 0;
864 ios->ema = 0;
865 ios->emvar = 0;
866 ios->softc = isc;
867 cam_iosched_limiter_init(ios);
868 }
869
870 static int
871 cam_iosched_limiter_sysctl(SYSCTL_HANDLER_ARGS)
872 {
873 char buf[16];
874 struct iop_stats *ios;
875 struct cam_iosched_softc *isc;
876 int value, i, error;
877 const char *p;
878
879 ios = arg1;
880 isc = ios->softc;
881 value = ios->limiter;
882 if (value < none || value >= limiter_max)
883 p = "UNKNOWN";
884 else
885 p = cam_iosched_limiter_names[value];
886
887 strlcpy(buf, p, sizeof(buf));
888 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
889 if (error != 0 || req->newptr == NULL)
890 return error;
891
892 cam_periph_lock(isc->periph);
893
894 for (i = none; i < limiter_max; i++) {
895 if (strcmp(buf, cam_iosched_limiter_names[i]) != 0)
896 continue;
897 ios->limiter = i;
898 error = cam_iosched_limiter_init(ios);
899 if (error != 0) {
900 ios->limiter = value;
901 cam_periph_unlock(isc->periph);
902 return error;
903 }
904 /* Note: disk load averate requires ticker to be always running */
905 callout_reset(&isc->ticker, hz / isc->quanta, cam_iosched_ticker, isc);
906 isc->flags |= CAM_IOSCHED_FLAG_CALLOUT_ACTIVE;
907
908 cam_periph_unlock(isc->periph);
909 return 0;
910 }
911
912 cam_periph_unlock(isc->periph);
913 return EINVAL;
914 }
915
916 static int
917 cam_iosched_control_type_sysctl(SYSCTL_HANDLER_ARGS)
918 {
919 char buf[16];
920 struct control_loop *clp;
921 struct cam_iosched_softc *isc;
922 int value, i, error;
923 const char *p;
924
925 clp = arg1;
926 isc = clp->softc;
927 value = clp->type;
928 if (value < none || value >= cl_max)
929 p = "UNKNOWN";
930 else
931 p = cam_iosched_control_type_names[value];
932
933 strlcpy(buf, p, sizeof(buf));
934 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
935 if (error != 0 || req->newptr == NULL)
936 return error;
937
938 for (i = set_max; i < cl_max; i++) {
939 if (strcmp(buf, cam_iosched_control_type_names[i]) != 0)
940 continue;
941 cam_periph_lock(isc->periph);
942 clp->type = i;
943 cam_periph_unlock(isc->periph);
944 return 0;
945 }
946
947 return EINVAL;
948 }
949
950 static int
951 cam_iosched_sbintime_sysctl(SYSCTL_HANDLER_ARGS)
952 {
953 char buf[16];
954 sbintime_t value;
955 int error;
956 uint64_t us;
957
958 value = *(sbintime_t *)arg1;
959 us = (uint64_t)value / SBT_1US;
960 snprintf(buf, sizeof(buf), "%ju", (intmax_t)us);
961 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
962 if (error != 0 || req->newptr == NULL)
963 return error;
964 us = strtoul(buf, NULL, 10);
965 if (us == 0)
966 return EINVAL;
967 *(sbintime_t *)arg1 = us * SBT_1US;
968 return 0;
969 }
970
971 static int
972 cam_iosched_sysctl_latencies(SYSCTL_HANDLER_ARGS)
973 {
974 int i, error;
975 struct sbuf sb;
976 uint64_t *latencies;
977
978 latencies = arg1;
979 sbuf_new_for_sysctl(&sb, NULL, LAT_BUCKETS * 16, req);
980
981 for (i = 0; i < LAT_BUCKETS - 1; i++)
982 sbuf_printf(&sb, "%jd,", (intmax_t)latencies[i]);
983 sbuf_printf(&sb, "%jd", (intmax_t)latencies[LAT_BUCKETS - 1]);
984 error = sbuf_finish(&sb);
985 sbuf_delete(&sb);
986
987 return (error);
988 }
989
990 static int
991 cam_iosched_quanta_sysctl(SYSCTL_HANDLER_ARGS)
992 {
993 int *quanta;
994 int error, value;
995
996 quanta = (unsigned *)arg1;
997 value = *quanta;
998
999 error = sysctl_handle_int(oidp, (int *)&value, 0, req);
1000 if ((error != 0) || (req->newptr == NULL))
1001 return (error);
1002
1003 if (value < 1 || value > hz)
1004 return (EINVAL);
1005
1006 *quanta = value;
1007
1008 return (0);
1009 }
1010
1011 static void
1012 cam_iosched_iop_stats_sysctl_init(struct cam_iosched_softc *isc, struct iop_stats *ios, char *name)
1013 {
1014 struct sysctl_oid_list *n;
1015 struct sysctl_ctx_list *ctx;
1016
1017 ios->sysctl_tree = SYSCTL_ADD_NODE(&isc->sysctl_ctx,
1018 SYSCTL_CHILDREN(isc->sysctl_tree), OID_AUTO, name,
1019 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, name);
1020 n = SYSCTL_CHILDREN(ios->sysctl_tree);
1021 ctx = &ios->sysctl_ctx;
1022
1023 SYSCTL_ADD_UQUAD(ctx, n,
1024 OID_AUTO, "ema", CTLFLAG_RD,
1025 &ios->ema,
1026 "Fast Exponentially Weighted Moving Average");
1027 SYSCTL_ADD_UQUAD(ctx, n,
1028 OID_AUTO, "emvar", CTLFLAG_RD,
1029 &ios->emvar,
1030 "Fast Exponentially Weighted Moving Variance");
1031
1032 SYSCTL_ADD_INT(ctx, n,
1033 OID_AUTO, "pending", CTLFLAG_RD,
1034 &ios->pending, 0,
1035 "Instantaneous # of pending transactions");
1036 SYSCTL_ADD_INT(ctx, n,
1037 OID_AUTO, "count", CTLFLAG_RD,
1038 &ios->total, 0,
1039 "# of transactions submitted to hardware");
1040 SYSCTL_ADD_INT(ctx, n,
1041 OID_AUTO, "queued", CTLFLAG_RD,
1042 &ios->queued, 0,
1043 "# of transactions in the queue");
1044 SYSCTL_ADD_INT(ctx, n,
1045 OID_AUTO, "in", CTLFLAG_RD,
1046 &ios->in, 0,
1047 "# of transactions queued to driver");
1048 SYSCTL_ADD_INT(ctx, n,
1049 OID_AUTO, "out", CTLFLAG_RD,
1050 &ios->out, 0,
1051 "# of transactions completed (including with error)");
1052 SYSCTL_ADD_INT(ctx, n,
1053 OID_AUTO, "errs", CTLFLAG_RD,
1054 &ios->errs, 0,
1055 "# of transactions completed with an error");
1056
1057 SYSCTL_ADD_PROC(ctx, n,
1058 OID_AUTO, "limiter",
1059 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1060 ios, 0, cam_iosched_limiter_sysctl, "A",
1061 "Current limiting type.");
1062 SYSCTL_ADD_INT(ctx, n,
1063 OID_AUTO, "min", CTLFLAG_RW,
1064 &ios->min, 0,
1065 "min resource");
1066 SYSCTL_ADD_INT(ctx, n,
1067 OID_AUTO, "max", CTLFLAG_RW,
1068 &ios->max, 0,
1069 "max resource");
1070 SYSCTL_ADD_INT(ctx, n,
1071 OID_AUTO, "current", CTLFLAG_RW,
1072 &ios->current, 0,
1073 "current resource");
1074
1075 SYSCTL_ADD_PROC(ctx, n,
1076 OID_AUTO, "latencies",
1077 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
1078 &ios->latencies, 0,
1079 cam_iosched_sysctl_latencies, "A",
1080 "Array of power of 2 latency from 1ms to 1.024s");
1081 }
1082
1083 static void
1084 cam_iosched_iop_stats_fini(struct iop_stats *ios)
1085 {
1086 if (ios->sysctl_tree)
1087 if (sysctl_ctx_free(&ios->sysctl_ctx) != 0)
1088 printf("can't remove iosched sysctl stats context\n");
1089 }
1090
1091 static void
1092 cam_iosched_cl_sysctl_init(struct cam_iosched_softc *isc)
1093 {
1094 struct sysctl_oid_list *n;
1095 struct sysctl_ctx_list *ctx;
1096 struct control_loop *clp;
1097
1098 clp = &isc->cl;
1099 clp->sysctl_tree = SYSCTL_ADD_NODE(&isc->sysctl_ctx,
1100 SYSCTL_CHILDREN(isc->sysctl_tree), OID_AUTO, "control",
1101 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Control loop info");
1102 n = SYSCTL_CHILDREN(clp->sysctl_tree);
1103 ctx = &clp->sysctl_ctx;
1104
1105 SYSCTL_ADD_PROC(ctx, n,
1106 OID_AUTO, "type",
1107 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1108 clp, 0, cam_iosched_control_type_sysctl, "A",
1109 "Control loop algorithm");
1110 SYSCTL_ADD_PROC(ctx, n,
1111 OID_AUTO, "steer_interval",
1112 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1113 &clp->steer_interval, 0, cam_iosched_sbintime_sysctl, "A",
1114 "How often to steer (in us)");
1115 SYSCTL_ADD_PROC(ctx, n,
1116 OID_AUTO, "lolat",
1117 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1118 &clp->lolat, 0, cam_iosched_sbintime_sysctl, "A",
1119 "Low water mark for Latency (in us)");
1120 SYSCTL_ADD_PROC(ctx, n,
1121 OID_AUTO, "hilat",
1122 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
1123 &clp->hilat, 0, cam_iosched_sbintime_sysctl, "A",
1124 "Hi water mark for Latency (in us)");
1125 SYSCTL_ADD_INT(ctx, n,
1126 OID_AUTO, "alpha", CTLFLAG_RW,
1127 &clp->alpha, 0,
1128 "Alpha for PLL (x100) aka gain");
1129 }
1130
1131 static void
1132 cam_iosched_cl_sysctl_fini(struct control_loop *clp)
1133 {
1134 if (clp->sysctl_tree)
1135 if (sysctl_ctx_free(&clp->sysctl_ctx) != 0)
1136 printf("can't remove iosched sysctl control loop context\n");
1137 }
1138 #endif
1139
1140 /*
1141 * Allocate the iosched structure. This also insulates callers from knowing
1142 * sizeof struct cam_iosched_softc.
1143 */
1144 int
1145 cam_iosched_init(struct cam_iosched_softc **iscp, struct cam_periph *periph)
1146 {
1147
1148 *iscp = malloc(sizeof(**iscp), M_CAMSCHED, M_NOWAIT | M_ZERO);
1149 if (*iscp == NULL)
1150 return ENOMEM;
1151 #ifdef CAM_IOSCHED_DYNAMIC
1152 if (iosched_debug)
1153 printf("CAM IOSCHEDULER Allocating entry at %p\n", *iscp);
1154 #endif
1155 (*iscp)->sort_io_queue = -1;
1156 bioq_init(&(*iscp)->bio_queue);
1157 bioq_init(&(*iscp)->trim_queue);
1158 #ifdef CAM_IOSCHED_DYNAMIC
1159 if (do_dynamic_iosched) {
1160 bioq_init(&(*iscp)->write_queue);
1161 (*iscp)->read_bias = default_read_bias;
1162 (*iscp)->current_read_bias = 0;
1163 (*iscp)->quanta = min(hz, 200);
1164 cam_iosched_iop_stats_init(*iscp, &(*iscp)->read_stats);
1165 cam_iosched_iop_stats_init(*iscp, &(*iscp)->write_stats);
1166 cam_iosched_iop_stats_init(*iscp, &(*iscp)->trim_stats);
1167 (*iscp)->trim_stats.max = 1; /* Trims are special: one at a time for now */
1168 (*iscp)->last_time = sbinuptime();
1169 callout_init_mtx(&(*iscp)->ticker, cam_periph_mtx(periph), 0);
1170 (*iscp)->periph = periph;
1171 cam_iosched_cl_init(&(*iscp)->cl, *iscp);
1172 callout_reset(&(*iscp)->ticker, hz / (*iscp)->quanta, cam_iosched_ticker, *iscp);
1173 (*iscp)->flags |= CAM_IOSCHED_FLAG_CALLOUT_ACTIVE;
1174 }
1175 #endif
1176
1177 return 0;
1178 }
1179
1180 /*
1181 * Reclaim all used resources. This assumes that other folks have
1182 * drained the requests in the hardware. Maybe an unwise assumption.
1183 */
1184 void
1185 cam_iosched_fini(struct cam_iosched_softc *isc)
1186 {
1187 if (isc) {
1188 cam_iosched_flush(isc, NULL, ENXIO);
1189 #ifdef CAM_IOSCHED_DYNAMIC
1190 cam_iosched_iop_stats_fini(&isc->read_stats);
1191 cam_iosched_iop_stats_fini(&isc->write_stats);
1192 cam_iosched_iop_stats_fini(&isc->trim_stats);
1193 cam_iosched_cl_sysctl_fini(&isc->cl);
1194 if (isc->sysctl_tree)
1195 if (sysctl_ctx_free(&isc->sysctl_ctx) != 0)
1196 printf("can't remove iosched sysctl stats context\n");
1197 if (isc->flags & CAM_IOSCHED_FLAG_CALLOUT_ACTIVE) {
1198 callout_drain(&isc->ticker);
1199 isc->flags &= ~ CAM_IOSCHED_FLAG_CALLOUT_ACTIVE;
1200 }
1201 #endif
1202 free(isc, M_CAMSCHED);
1203 }
1204 }
1205
1206 /*
1207 * After we're sure we're attaching a device, go ahead and add
1208 * hooks for any sysctl we may wish to honor.
1209 */
1210 void cam_iosched_sysctl_init(struct cam_iosched_softc *isc,
1211 struct sysctl_ctx_list *ctx, struct sysctl_oid *node)
1212 {
1213 struct sysctl_oid_list *n;
1214
1215 n = SYSCTL_CHILDREN(node);
1216 SYSCTL_ADD_INT(ctx, n,
1217 OID_AUTO, "sort_io_queue", CTLFLAG_RW | CTLFLAG_MPSAFE,
1218 &isc->sort_io_queue, 0,
1219 "Sort IO queue to try and optimise disk access patterns");
1220 SYSCTL_ADD_INT(ctx, n,
1221 OID_AUTO, "trim_goal", CTLFLAG_RW,
1222 &isc->trim_goal, 0,
1223 "Number of trims to try to accumulate before sending to hardware");
1224 SYSCTL_ADD_INT(ctx, n,
1225 OID_AUTO, "trim_ticks", CTLFLAG_RW,
1226 &isc->trim_goal, 0,
1227 "IO Schedul qaunta to hold back trims for when accumulating");
1228
1229 #ifdef CAM_IOSCHED_DYNAMIC
1230 if (!do_dynamic_iosched)
1231 return;
1232
1233 isc->sysctl_tree = SYSCTL_ADD_NODE(&isc->sysctl_ctx,
1234 SYSCTL_CHILDREN(node), OID_AUTO, "iosched",
1235 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "I/O scheduler statistics");
1236 n = SYSCTL_CHILDREN(isc->sysctl_tree);
1237 ctx = &isc->sysctl_ctx;
1238
1239 cam_iosched_iop_stats_sysctl_init(isc, &isc->read_stats, "read");
1240 cam_iosched_iop_stats_sysctl_init(isc, &isc->write_stats, "write");
1241 cam_iosched_iop_stats_sysctl_init(isc, &isc->trim_stats, "trim");
1242 cam_iosched_cl_sysctl_init(isc);
1243
1244 SYSCTL_ADD_INT(ctx, n,
1245 OID_AUTO, "read_bias", CTLFLAG_RW,
1246 &isc->read_bias, default_read_bias,
1247 "How biased towards read should we be independent of limits");
1248
1249 SYSCTL_ADD_PROC(ctx, n,
1250 OID_AUTO, "quanta", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
1251 &isc->quanta, 0, cam_iosched_quanta_sysctl, "I",
1252 "How many quanta per second do we slice the I/O up into");
1253
1254 SYSCTL_ADD_INT(ctx, n,
1255 OID_AUTO, "total_ticks", CTLFLAG_RD,
1256 &isc->total_ticks, 0,
1257 "Total number of ticks we've done");
1258
1259 SYSCTL_ADD_INT(ctx, n,
1260 OID_AUTO, "load", CTLFLAG_RD,
1261 &isc->load, 0,
1262 "scaled load average / 100");
1263
1264 SYSCTL_ADD_U64(ctx, n,
1265 OID_AUTO, "latency_trigger", CTLFLAG_RW,
1266 &isc->max_lat, 0,
1267 "Latency treshold to trigger callbacks");
1268 #endif
1269 }
1270
1271 void
1272 cam_iosched_set_latfcn(struct cam_iosched_softc *isc,
1273 cam_iosched_latfcn_t fnp, void *argp)
1274 {
1275 #ifdef CAM_IOSCHED_DYNAMIC
1276 isc->latfcn = fnp;
1277 isc->latarg = argp;
1278 #endif
1279 }
1280
1281 /*
1282 * Client drivers can set two parameters. "goal" is the number of BIO_DELETEs
1283 * that will be queued up before iosched will "release" the trims to the client
1284 * driver to wo with what they will (usually combine as many as possible). If we
1285 * don't get this many, after trim_ticks we'll submit the I/O anyway with
1286 * whatever we have. We do need an I/O of some kind of to clock the deferred
1287 * trims out to disk. Since we will eventually get a write for the super block
1288 * or something before we shutdown, the trims will complete. To be safe, when a
1289 * BIO_FLUSH is presented to the iosched work queue, we set the ticks time far
1290 * enough in the past so we'll present the BIO_DELETEs to the client driver.
1291 * There might be a race if no BIO_DELETESs were queued, a BIO_FLUSH comes in
1292 * and then a BIO_DELETE is sent down. No know client does this, and there's
1293 * already a race between an ordered BIO_FLUSH and any BIO_DELETEs in flight,
1294 * but no client depends on the ordering being honored.
1295 *
1296 * XXX I'm not sure what the interaction between UFS direct BIOs and the BUF
1297 * flushing on shutdown. I think there's bufs that would be dependent on the BIO
1298 * finishing to write out at least metadata, so we'll be fine. To be safe, keep
1299 * the number of ticks low (less than maybe 10s) to avoid shutdown races.
1300 */
1301
1302 void
1303 cam_iosched_set_trim_goal(struct cam_iosched_softc *isc, int goal)
1304 {
1305
1306 isc->trim_goal = goal;
1307 }
1308
1309 void
1310 cam_iosched_set_trim_ticks(struct cam_iosched_softc *isc, int trim_ticks)
1311 {
1312
1313 isc->trim_ticks = trim_ticks;
1314 }
1315
1316 /*
1317 * Flush outstanding I/O. Consumers of this library don't know all the
1318 * queues we may keep, so this allows all I/O to be flushed in one
1319 * convenient call.
1320 */
1321 void
1322 cam_iosched_flush(struct cam_iosched_softc *isc, struct devstat *stp, int err)
1323 {
1324 bioq_flush(&isc->bio_queue, stp, err);
1325 bioq_flush(&isc->trim_queue, stp, err);
1326 #ifdef CAM_IOSCHED_DYNAMIC
1327 if (do_dynamic_iosched)
1328 bioq_flush(&isc->write_queue, stp, err);
1329 #endif
1330 }
1331
1332 #ifdef CAM_IOSCHED_DYNAMIC
1333 static struct bio *
1334 cam_iosched_get_write(struct cam_iosched_softc *isc)
1335 {
1336 struct bio *bp;
1337
1338 /*
1339 * We control the write rate by controlling how many requests we send
1340 * down to the drive at any one time. Fewer requests limits the
1341 * effects of both starvation when the requests take a while and write
1342 * amplification when each request is causing more than one write to
1343 * the NAND media. Limiting the queue depth like this will also limit
1344 * the write throughput and give and reads that want to compete to
1345 * compete unfairly.
1346 */
1347 bp = bioq_first(&isc->write_queue);
1348 if (bp == NULL) {
1349 if (iosched_debug > 3)
1350 printf("No writes present in write_queue\n");
1351 return NULL;
1352 }
1353
1354 /*
1355 * If pending read, prefer that based on current read bias
1356 * setting.
1357 */
1358 if (bioq_first(&isc->bio_queue) && isc->current_read_bias) {
1359 if (iosched_debug)
1360 printf(
1361 "Reads present and current_read_bias is %d queued "
1362 "writes %d queued reads %d\n",
1363 isc->current_read_bias, isc->write_stats.queued,
1364 isc->read_stats.queued);
1365 isc->current_read_bias--;
1366 /* We're not limiting writes, per se, just doing reads first */
1367 return NULL;
1368 }
1369
1370 /*
1371 * See if our current limiter allows this I/O.
1372 */
1373 if (cam_iosched_limiter_iop(&isc->write_stats, bp) != 0) {
1374 if (iosched_debug)
1375 printf("Can't write because limiter says no.\n");
1376 isc->write_stats.state_flags |= IOP_RATE_LIMITED;
1377 return NULL;
1378 }
1379
1380 /*
1381 * Let's do this: We've passed all the gates and we're a go
1382 * to schedule the I/O in the SIM.
1383 */
1384 isc->current_read_bias = isc->read_bias;
1385 bioq_remove(&isc->write_queue, bp);
1386 if (bp->bio_cmd == BIO_WRITE) {
1387 isc->write_stats.queued--;
1388 isc->write_stats.total++;
1389 isc->write_stats.pending++;
1390 }
1391 if (iosched_debug > 9)
1392 printf("HWQ : %p %#x\n", bp, bp->bio_cmd);
1393 isc->write_stats.state_flags &= ~IOP_RATE_LIMITED;
1394 return bp;
1395 }
1396 #endif
1397
1398 /*
1399 * Put back a trim that you weren't able to actually schedule this time.
1400 */
1401 void
1402 cam_iosched_put_back_trim(struct cam_iosched_softc *isc, struct bio *bp)
1403 {
1404 bioq_insert_head(&isc->trim_queue, bp);
1405 if (isc->queued_trims == 0)
1406 isc->last_trim_tick = ticks;
1407 isc->queued_trims++;
1408 #ifdef CAM_IOSCHED_DYNAMIC
1409 isc->trim_stats.queued++;
1410 isc->trim_stats.total--; /* since we put it back, don't double count */
1411 isc->trim_stats.pending--;
1412 #endif
1413 }
1414
1415 /*
1416 * gets the next trim from the trim queue.
1417 *
1418 * Assumes we're called with the periph lock held. It removes this
1419 * trim from the queue and the device must explicitly reinsert it
1420 * should the need arise.
1421 */
1422 struct bio *
1423 cam_iosched_next_trim(struct cam_iosched_softc *isc)
1424 {
1425 struct bio *bp;
1426
1427 bp = bioq_first(&isc->trim_queue);
1428 if (bp == NULL)
1429 return NULL;
1430 bioq_remove(&isc->trim_queue, bp);
1431 isc->queued_trims--;
1432 isc->last_trim_tick = ticks; /* Reset the tick timer when we take trims */
1433 #ifdef CAM_IOSCHED_DYNAMIC
1434 isc->trim_stats.queued--;
1435 isc->trim_stats.total++;
1436 isc->trim_stats.pending++;
1437 #endif
1438 return bp;
1439 }
1440
1441 /*
1442 * gets an available trim from the trim queue, if there's no trim
1443 * already pending. It removes this trim from the queue and the device
1444 * must explicitly reinsert it should the need arise.
1445 *
1446 * Assumes we're called with the periph lock held.
1447 */
1448 struct bio *
1449 cam_iosched_get_trim(struct cam_iosched_softc *isc)
1450 {
1451 #ifdef CAM_IOSCHED_DYNAMIC
1452 struct bio *bp;
1453 #endif
1454
1455 if (!cam_iosched_has_more_trim(isc))
1456 return NULL;
1457 #ifdef CAM_IOSCHED_DYNAMIC
1458 bp = bioq_first(&isc->trim_queue);
1459 if (bp == NULL)
1460 return NULL;
1461
1462 /*
1463 * If pending read, prefer that based on current read bias setting. The
1464 * read bias is shared for both writes and TRIMs, but on TRIMs the bias
1465 * is for a combined TRIM not a single TRIM request that's come in.
1466 */
1467 if (do_dynamic_iosched) {
1468 if (bioq_first(&isc->bio_queue) && isc->current_read_bias) {
1469 if (iosched_debug)
1470 printf("Reads present and current_read_bias is %d"
1471 " queued trims %d queued reads %d\n",
1472 isc->current_read_bias, isc->trim_stats.queued,
1473 isc->read_stats.queued);
1474 isc->current_read_bias--;
1475 /* We're not limiting TRIMS, per se, just doing reads first */
1476 return NULL;
1477 }
1478 /*
1479 * We're going to do a trim, so reset the bias.
1480 */
1481 isc->current_read_bias = isc->read_bias;
1482 }
1483
1484 /*
1485 * See if our current limiter allows this I/O. Because we only call this
1486 * here, and not in next_trim, the 'bandwidth' limits for trims won't
1487 * work, while the iops or max queued limits will work. It's tricky
1488 * because we want the limits to be from the perspective of the
1489 * "commands sent to the device." To make iops work, we need to check
1490 * only here (since we want all the ops we combine to count as one). To
1491 * make bw limits work, we'd need to check in next_trim, but that would
1492 * have the effect of limiting the iops as seen from the upper layers.
1493 */
1494 if (cam_iosched_limiter_iop(&isc->trim_stats, bp) != 0) {
1495 if (iosched_debug)
1496 printf("Can't trim because limiter says no.\n");
1497 isc->trim_stats.state_flags |= IOP_RATE_LIMITED;
1498 return NULL;
1499 }
1500 isc->current_read_bias = isc->read_bias;
1501 isc->trim_stats.state_flags &= ~IOP_RATE_LIMITED;
1502 /* cam_iosched_next_trim below keeps proper book */
1503 #endif
1504 return cam_iosched_next_trim(isc);
1505 }
1506
1507
1508 #ifdef CAM_IOSCHED_DYNAMIC
1509 static struct bio *
1510 bio_next(struct bio *bp)
1511 {
1512 bp = TAILQ_NEXT(bp, bio_queue);
1513 /*
1514 * After the first commands, the ordered bit terminates
1515 * our search because BIO_ORDERED acts like a barrier.
1516 */
1517 if (bp == NULL || bp->bio_flags & BIO_ORDERED)
1518 return NULL;
1519 return bp;
1520 }
1521
1522 static bool
1523 cam_iosched_rate_limited(struct iop_stats *ios)
1524 {
1525 return ios->state_flags & IOP_RATE_LIMITED;
1526 }
1527 #endif
1528
1529 /*
1530 * Determine what the next bit of work to do is for the periph. The
1531 * default implementation looks to see if we have trims to do, but no
1532 * trims outstanding. If so, we do that. Otherwise we see if we have
1533 * other work. If we do, then we do that. Otherwise why were we called?
1534 */
1535 struct bio *
1536 cam_iosched_next_bio(struct cam_iosched_softc *isc)
1537 {
1538 struct bio *bp;
1539
1540 /*
1541 * See if we have a trim that can be scheduled. We can only send one
1542 * at a time down, so this takes that into account.
1543 *
1544 * XXX newer TRIM commands are queueable. Revisit this when we
1545 * implement them.
1546 */
1547 if ((bp = cam_iosched_get_trim(isc)) != NULL)
1548 return bp;
1549
1550 #ifdef CAM_IOSCHED_DYNAMIC
1551 /*
1552 * See if we have any pending writes, room in the queue for them,
1553 * and no pending reads (unless we've scheduled too many).
1554 * if so, those are next.
1555 */
1556 if (do_dynamic_iosched) {
1557 if ((bp = cam_iosched_get_write(isc)) != NULL)
1558 return bp;
1559 }
1560 #endif
1561 /*
1562 * next, see if there's other, normal I/O waiting. If so return that.
1563 */
1564 #ifdef CAM_IOSCHED_DYNAMIC
1565 if (do_dynamic_iosched) {
1566 for (bp = bioq_first(&isc->bio_queue); bp != NULL;
1567 bp = bio_next(bp)) {
1568 /*
1569 * For the dynamic scheduler with a read bias, bio_queue
1570 * is only for reads. However, without one, all
1571 * operations are queued. Enforce limits here for any
1572 * operation we find here.
1573 */
1574 if (bp->bio_cmd == BIO_READ) {
1575 if (cam_iosched_rate_limited(&isc->read_stats) ||
1576 cam_iosched_limiter_iop(&isc->read_stats, bp) != 0) {
1577 isc->read_stats.state_flags |= IOP_RATE_LIMITED;
1578 continue;
1579 }
1580 isc->read_stats.state_flags &= ~IOP_RATE_LIMITED;
1581 }
1582 /*
1583 * There can only be write requests on the queue when
1584 * the read bias is 0, but we need to process them
1585 * here. We do not assert for read bias == 0, however,
1586 * since it is dynamic and we can have WRITE operations
1587 * in the queue after we transition from 0 to non-zero.
1588 */
1589 if (bp->bio_cmd == BIO_WRITE) {
1590 if (cam_iosched_rate_limited(&isc->write_stats) ||
1591 cam_iosched_limiter_iop(&isc->write_stats, bp) != 0) {
1592 isc->write_stats.state_flags |= IOP_RATE_LIMITED;
1593 continue;
1594 }
1595 isc->write_stats.state_flags &= ~IOP_RATE_LIMITED;
1596 }
1597 /*
1598 * here we know we have a bp that's != NULL, that's not rate limited
1599 * and can be the next I/O.
1600 */
1601 break;
1602 }
1603 } else
1604 #endif
1605 bp = bioq_first(&isc->bio_queue);
1606
1607 if (bp == NULL)
1608 return (NULL);
1609 bioq_remove(&isc->bio_queue, bp);
1610 #ifdef CAM_IOSCHED_DYNAMIC
1611 if (do_dynamic_iosched) {
1612 if (bp->bio_cmd == BIO_READ) {
1613 isc->read_stats.queued--;
1614 isc->read_stats.total++;
1615 isc->read_stats.pending++;
1616 } else if (bp->bio_cmd == BIO_WRITE) {
1617 isc->write_stats.queued--;
1618 isc->write_stats.total++;
1619 isc->write_stats.pending++;
1620 }
1621 }
1622 if (iosched_debug > 9)
1623 printf("HWQ : %p %#x\n", bp, bp->bio_cmd);
1624 #endif
1625 return bp;
1626 }
1627
1628 /*
1629 * Driver has been given some work to do by the block layer. Tell the
1630 * scheduler about it and have it queue the work up. The scheduler module
1631 * will then return the currently most useful bit of work later, possibly
1632 * deferring work for various reasons.
1633 */
1634 void
1635 cam_iosched_queue_work(struct cam_iosched_softc *isc, struct bio *bp)
1636 {
1637
1638 /*
1639 * A BIO_SPEEDUP from the uppper layers means that they have a block
1640 * shortage. At the present, this is only sent when we're trying to
1641 * allocate blocks, but have a shortage before giving up. bio_length is
1642 * the size of their shortage. We will complete just enough BIO_DELETEs
1643 * in the queue to satisfy the need. If bio_length is 0, we'll complete
1644 * them all. This allows the scheduler to delay BIO_DELETEs to improve
1645 * read/write performance without worrying about the upper layers. When
1646 * it's possibly a problem, we respond by pretending the BIO_DELETEs
1647 * just worked. We can't do anything about the BIO_DELETEs in the
1648 * hardware, though. We have to wait for them to complete.
1649 */
1650 if (bp->bio_cmd == BIO_SPEEDUP) {
1651 off_t len;
1652 struct bio *nbp;
1653
1654 len = 0;
1655 while (bioq_first(&isc->trim_queue) &&
1656 (bp->bio_length == 0 || len < bp->bio_length)) {
1657 nbp = bioq_takefirst(&isc->trim_queue);
1658 len += nbp->bio_length;
1659 nbp->bio_error = 0;
1660 biodone(nbp);
1661 }
1662 if (bp->bio_length > 0) {
1663 if (bp->bio_length > len)
1664 bp->bio_resid = bp->bio_length - len;
1665 else
1666 bp->bio_resid = 0;
1667 }
1668 bp->bio_error = 0;
1669 biodone(bp);
1670 return;
1671 }
1672
1673 /*
1674 * If we get a BIO_FLUSH, and we're doing delayed BIO_DELETEs then we
1675 * set the last tick time to one less than the current ticks minus the
1676 * delay to force the BIO_DELETEs to be presented to the client driver.
1677 */
1678 if (bp->bio_cmd == BIO_FLUSH && isc->trim_ticks > 0)
1679 isc->last_trim_tick = ticks - isc->trim_ticks - 1;
1680
1681 /*
1682 * Put all trims on the trim queue. Otherwise put the work on the bio
1683 * queue.
1684 */
1685 if (bp->bio_cmd == BIO_DELETE) {
1686 bioq_insert_tail(&isc->trim_queue, bp);
1687 if (isc->queued_trims == 0)
1688 isc->last_trim_tick = ticks;
1689 isc->queued_trims++;
1690 #ifdef CAM_IOSCHED_DYNAMIC
1691 isc->trim_stats.in++;
1692 isc->trim_stats.queued++;
1693 #endif
1694 }
1695 #ifdef CAM_IOSCHED_DYNAMIC
1696 else if (do_dynamic_iosched && isc->read_bias != 0 &&
1697 (bp->bio_cmd != BIO_READ)) {
1698 if (cam_iosched_sort_queue(isc))
1699 bioq_disksort(&isc->write_queue, bp);
1700 else
1701 bioq_insert_tail(&isc->write_queue, bp);
1702 if (iosched_debug > 9)
1703 printf("Qw : %p %#x\n", bp, bp->bio_cmd);
1704 if (bp->bio_cmd == BIO_WRITE) {
1705 isc->write_stats.in++;
1706 isc->write_stats.queued++;
1707 }
1708 }
1709 #endif
1710 else {
1711 if (cam_iosched_sort_queue(isc))
1712 bioq_disksort(&isc->bio_queue, bp);
1713 else
1714 bioq_insert_tail(&isc->bio_queue, bp);
1715 #ifdef CAM_IOSCHED_DYNAMIC
1716 if (iosched_debug > 9)
1717 printf("Qr : %p %#x\n", bp, bp->bio_cmd);
1718 if (bp->bio_cmd == BIO_READ) {
1719 isc->read_stats.in++;
1720 isc->read_stats.queued++;
1721 } else if (bp->bio_cmd == BIO_WRITE) {
1722 isc->write_stats.in++;
1723 isc->write_stats.queued++;
1724 }
1725 #endif
1726 }
1727 }
1728
1729 /*
1730 * If we have work, get it scheduled. Called with the periph lock held.
1731 */
1732 void
1733 cam_iosched_schedule(struct cam_iosched_softc *isc, struct cam_periph *periph)
1734 {
1735
1736 if (cam_iosched_has_work(isc))
1737 xpt_schedule(periph, CAM_PRIORITY_NORMAL);
1738 }
1739
1740 /*
1741 * Complete a trim request. Mark that we no longer have one in flight.
1742 */
1743 void
1744 cam_iosched_trim_done(struct cam_iosched_softc *isc)
1745 {
1746
1747 isc->flags &= ~CAM_IOSCHED_FLAG_TRIM_ACTIVE;
1748 }
1749
1750 /*
1751 * Complete a bio. Called before we release the ccb with xpt_release_ccb so we
1752 * might use notes in the ccb for statistics.
1753 */
1754 int
1755 cam_iosched_bio_complete(struct cam_iosched_softc *isc, struct bio *bp,
1756 union ccb *done_ccb)
1757 {
1758 int retval = 0;
1759 #ifdef CAM_IOSCHED_DYNAMIC
1760 if (!do_dynamic_iosched)
1761 return retval;
1762
1763 if (iosched_debug > 10)
1764 printf("done: %p %#x\n", bp, bp->bio_cmd);
1765 if (bp->bio_cmd == BIO_WRITE) {
1766 retval = cam_iosched_limiter_iodone(&isc->write_stats, bp);
1767 if ((bp->bio_flags & BIO_ERROR) != 0)
1768 isc->write_stats.errs++;
1769 isc->write_stats.out++;
1770 isc->write_stats.pending--;
1771 } else if (bp->bio_cmd == BIO_READ) {
1772 retval = cam_iosched_limiter_iodone(&isc->read_stats, bp);
1773 if ((bp->bio_flags & BIO_ERROR) != 0)
1774 isc->read_stats.errs++;
1775 isc->read_stats.out++;
1776 isc->read_stats.pending--;
1777 } else if (bp->bio_cmd == BIO_DELETE) {
1778 if ((bp->bio_flags & BIO_ERROR) != 0)
1779 isc->trim_stats.errs++;
1780 isc->trim_stats.out++;
1781 isc->trim_stats.pending--;
1782 } else if (bp->bio_cmd != BIO_FLUSH) {
1783 if (iosched_debug)
1784 printf("Completing command with bio_cmd == %#x\n", bp->bio_cmd);
1785 }
1786
1787 if ((bp->bio_flags & BIO_ERROR) == 0 && done_ccb != NULL &&
1788 (done_ccb->ccb_h.status & CAM_QOS_VALID) != 0) {
1789 sbintime_t sim_latency;
1790
1791 sim_latency = cam_iosched_sbintime_t(done_ccb->ccb_h.qos.periph_data);
1792
1793 cam_iosched_io_metric_update(isc, sim_latency,
1794 bp->bio_cmd, bp->bio_bcount);
1795 /*
1796 * Debugging code: allow callbacks to the periph driver when latency max
1797 * is exceeded. This can be useful for triggering external debugging actions.
1798 */
1799 if (isc->latfcn && isc->max_lat != 0 && sim_latency > isc->max_lat)
1800 isc->latfcn(isc->latarg, sim_latency, bp);
1801 }
1802
1803 #endif
1804 return retval;
1805 }
1806
1807 /*
1808 * Tell the io scheduler that you've pushed a trim down into the sim.
1809 * This also tells the I/O scheduler not to push any more trims down, so
1810 * some periphs do not call it if they can cope with multiple trims in flight.
1811 */
1812 void
1813 cam_iosched_submit_trim(struct cam_iosched_softc *isc)
1814 {
1815
1816 isc->flags |= CAM_IOSCHED_FLAG_TRIM_ACTIVE;
1817 }
1818
1819 /*
1820 * Change the sorting policy hint for I/O transactions for this device.
1821 */
1822 void
1823 cam_iosched_set_sort_queue(struct cam_iosched_softc *isc, int val)
1824 {
1825
1826 isc->sort_io_queue = val;
1827 }
1828
1829 int
1830 cam_iosched_has_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
1831 {
1832 return isc->flags & flags;
1833 }
1834
1835 void
1836 cam_iosched_set_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
1837 {
1838 isc->flags |= flags;
1839 }
1840
1841 void
1842 cam_iosched_clr_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
1843 {
1844 isc->flags &= ~flags;
1845 }
1846
1847 #ifdef CAM_IOSCHED_DYNAMIC
1848 /*
1849 * After the method presented in Jack Crenshaw's 1998 article "Integer
1850 * Square Roots," reprinted at
1851 * http://www.embedded.com/electronics-blogs/programmer-s-toolbox/4219659/Integer-Square-Roots
1852 * and well worth the read. Briefly, we find the power of 4 that's the
1853 * largest smaller than val. We then check each smaller power of 4 to
1854 * see if val is still bigger. The right shifts at each step divide
1855 * the result by 2 which after successive application winds up
1856 * accumulating the right answer. It could also have been accumulated
1857 * using a separate root counter, but this code is smaller and faster
1858 * than that method. This method is also integer size invariant.
1859 * It returns floor(sqrt((float)val)), or the largest integer less than
1860 * or equal to the square root.
1861 */
1862 static uint64_t
1863 isqrt64(uint64_t val)
1864 {
1865 uint64_t res = 0;
1866 uint64_t bit = 1ULL << (sizeof(uint64_t) * NBBY - 2);
1867
1868 /*
1869 * Find the largest power of 4 smaller than val.
1870 */
1871 while (bit > val)
1872 bit >>= 2;
1873
1874 /*
1875 * Accumulate the answer, one bit at a time (we keep moving
1876 * them over since 2 is the square root of 4 and we test
1877 * powers of 4). We accumulate where we find the bit, but
1878 * the successive shifts land the bit in the right place
1879 * by the end.
1880 */
1881 while (bit != 0) {
1882 if (val >= res + bit) {
1883 val -= res + bit;
1884 res = (res >> 1) + bit;
1885 } else
1886 res >>= 1;
1887 bit >>= 2;
1888 }
1889
1890 return res;
1891 }
1892
1893 static sbintime_t latencies[LAT_BUCKETS - 1] = {
1894 BUCKET_BASE << 0, /* 20us */
1895 BUCKET_BASE << 1,
1896 BUCKET_BASE << 2,
1897 BUCKET_BASE << 3,
1898 BUCKET_BASE << 4,
1899 BUCKET_BASE << 5,
1900 BUCKET_BASE << 6,
1901 BUCKET_BASE << 7,
1902 BUCKET_BASE << 8,
1903 BUCKET_BASE << 9,
1904 BUCKET_BASE << 10,
1905 BUCKET_BASE << 11,
1906 BUCKET_BASE << 12,
1907 BUCKET_BASE << 13,
1908 BUCKET_BASE << 14,
1909 BUCKET_BASE << 15,
1910 BUCKET_BASE << 16,
1911 BUCKET_BASE << 17,
1912 BUCKET_BASE << 18 /* 5,242,880us */
1913 };
1914
1915 static void
1916 cam_iosched_update(struct iop_stats *iop, sbintime_t sim_latency)
1917 {
1918 sbintime_t y, deltasq, delta;
1919 int i;
1920
1921 /*
1922 * Keep counts for latency. We do it by power of two buckets.
1923 * This helps us spot outlier behavior obscured by averages.
1924 */
1925 for (i = 0; i < LAT_BUCKETS - 1; i++) {
1926 if (sim_latency < latencies[i]) {
1927 iop->latencies[i]++;
1928 break;
1929 }
1930 }
1931 if (i == LAT_BUCKETS - 1)
1932 iop->latencies[i]++; /* Put all > 8192ms values into the last bucket. */
1933
1934 /*
1935 * Classic exponentially decaying average with a tiny alpha
1936 * (2 ^ -alpha_bits). For more info see the NIST statistical
1937 * handbook.
1938 *
1939 * ema_t = y_t * alpha + ema_t-1 * (1 - alpha) [nist]
1940 * ema_t = y_t * alpha + ema_t-1 - alpha * ema_t-1
1941 * ema_t = alpha * y_t - alpha * ema_t-1 + ema_t-1
1942 * alpha = 1 / (1 << alpha_bits)
1943 * sub e == ema_t-1, b == 1/alpha (== 1 << alpha_bits), d == y_t - ema_t-1
1944 * = y_t/b - e/b + be/b
1945 * = (y_t - e + be) / b
1946 * = (e + d) / b
1947 *
1948 * Since alpha is a power of two, we can compute this w/o any mult or
1949 * division.
1950 *
1951 * Variance can also be computed. Usually, it would be expressed as follows:
1952 * diff_t = y_t - ema_t-1
1953 * emvar_t = (1 - alpha) * (emavar_t-1 + diff_t^2 * alpha)
1954 * = emavar_t-1 - alpha * emavar_t-1 + delta_t^2 * alpha - (delta_t * alpha)^2
1955 * sub b == 1/alpha (== 1 << alpha_bits), e == emavar_t-1, d = delta_t^2
1956 * = e - e/b + dd/b + dd/bb
1957 * = (bbe - be + bdd + dd) / bb
1958 * = (bbe + b(dd-e) + dd) / bb (which is expanded below bb = 1<<(2*alpha_bits))
1959 */
1960 /*
1961 * XXX possible numeric issues
1962 * o We assume right shifted integers do the right thing, since that's
1963 * implementation defined. You can change the right shifts to / (1LL << alpha).
1964 * o alpha_bits = 9 gives ema ceiling of 23 bits of seconds for ema and 14 bits
1965 * for emvar. This puts a ceiling of 13 bits on alpha since we need a
1966 * few tens of seconds of representation.
1967 * o We mitigate alpha issues by never setting it too high.
1968 */
1969 y = sim_latency;
1970 delta = (y - iop->ema); /* d */
1971 iop->ema = ((iop->ema << alpha_bits) + delta) >> alpha_bits;
1972
1973 /*
1974 * Were we to naively plow ahead at this point, we wind up with many numerical
1975 * issues making any SD > ~3ms unreliable. So, we shift right by 12. This leaves
1976 * us with microsecond level precision in the input, so the same in the
1977 * output. It means we can't overflow deltasq unless delta > 4k seconds. It
1978 * also means that emvar can be up 46 bits 40 of which are fraction, which
1979 * gives us a way to measure up to ~8s in the SD before the computation goes
1980 * unstable. Even the worst hard disk rarely has > 1s service time in the
1981 * drive. It does mean we have to shift left 12 bits after taking the
1982 * square root to compute the actual standard deviation estimate. This loss of
1983 * precision is preferable to needing int128 types to work. The above numbers
1984 * assume alpha=9. 10 or 11 are ok, but we start to run into issues at 12,
1985 * so 12 or 13 is OK for EMA, EMVAR and SD will be wrong in those cases.
1986 */
1987 delta >>= 12;
1988 deltasq = delta * delta; /* dd */
1989 iop->emvar = ((iop->emvar << (2 * alpha_bits)) + /* bbe */
1990 ((deltasq - iop->emvar) << alpha_bits) + /* b(dd-e) */
1991 deltasq) /* dd */
1992 >> (2 * alpha_bits); /* div bb */
1993 iop->sd = (sbintime_t)isqrt64((uint64_t)iop->emvar) << 12;
1994 }
1995
1996 static void
1997 cam_iosched_io_metric_update(struct cam_iosched_softc *isc,
1998 sbintime_t sim_latency, int cmd, size_t size)
1999 {
2000 /* xxx Do we need to scale based on the size of the I/O ? */
2001 switch (cmd) {
2002 case BIO_READ:
2003 cam_iosched_update(&isc->read_stats, sim_latency);
2004 break;
2005 case BIO_WRITE:
2006 cam_iosched_update(&isc->write_stats, sim_latency);
2007 break;
2008 case BIO_DELETE:
2009 cam_iosched_update(&isc->trim_stats, sim_latency);
2010 break;
2011 default:
2012 break;
2013 }
2014 }
2015
2016 #ifdef DDB
2017 static int biolen(struct bio_queue_head *bq)
2018 {
2019 int i = 0;
2020 struct bio *bp;
2021
2022 TAILQ_FOREACH(bp, &bq->queue, bio_queue) {
2023 i++;
2024 }
2025 return i;
2026 }
2027
2028 /*
2029 * Show the internal state of the I/O scheduler.
2030 */
2031 DB_SHOW_COMMAND(iosched, cam_iosched_db_show)
2032 {
2033 struct cam_iosched_softc *isc;
2034
2035 if (!have_addr) {
2036 db_printf("Need addr\n");
2037 return;
2038 }
2039 isc = (struct cam_iosched_softc *)addr;
2040 db_printf("pending_reads: %d\n", isc->read_stats.pending);
2041 db_printf("min_reads: %d\n", isc->read_stats.min);
2042 db_printf("max_reads: %d\n", isc->read_stats.max);
2043 db_printf("reads: %d\n", isc->read_stats.total);
2044 db_printf("in_reads: %d\n", isc->read_stats.in);
2045 db_printf("out_reads: %d\n", isc->read_stats.out);
2046 db_printf("queued_reads: %d\n", isc->read_stats.queued);
2047 db_printf("Read Q len %d\n", biolen(&isc->bio_queue));
2048 db_printf("pending_writes: %d\n", isc->write_stats.pending);
2049 db_printf("min_writes: %d\n", isc->write_stats.min);
2050 db_printf("max_writes: %d\n", isc->write_stats.max);
2051 db_printf("writes: %d\n", isc->write_stats.total);
2052 db_printf("in_writes: %d\n", isc->write_stats.in);
2053 db_printf("out_writes: %d\n", isc->write_stats.out);
2054 db_printf("queued_writes: %d\n", isc->write_stats.queued);
2055 db_printf("Write Q len %d\n", biolen(&isc->write_queue));
2056 db_printf("pending_trims: %d\n", isc->trim_stats.pending);
2057 db_printf("min_trims: %d\n", isc->trim_stats.min);
2058 db_printf("max_trims: %d\n", isc->trim_stats.max);
2059 db_printf("trims: %d\n", isc->trim_stats.total);
2060 db_printf("in_trims: %d\n", isc->trim_stats.in);
2061 db_printf("out_trims: %d\n", isc->trim_stats.out);
2062 db_printf("queued_trims: %d\n", isc->trim_stats.queued);
2063 db_printf("Trim Q len %d\n", biolen(&isc->trim_queue));
2064 db_printf("read_bias: %d\n", isc->read_bias);
2065 db_printf("current_read_bias: %d\n", isc->current_read_bias);
2066 db_printf("Trim active? %s\n",
2067 (isc->flags & CAM_IOSCHED_FLAG_TRIM_ACTIVE) ? "yes" : "no");
2068 }
2069 #endif
2070 #endif
Cache object: d1ede4455331547a8542508211e35f74
|