The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/block/as-iosched.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *  Anticipatory & deadline i/o scheduler.
    3  *
    4  *  Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
    5  *                     Nick Piggin <nickpiggin@yahoo.com.au>
    6  *
    7  */
    8 #include <linux/kernel.h>
    9 #include <linux/fs.h>
   10 #include <linux/blkdev.h>
   11 #include <linux/elevator.h>
   12 #include <linux/bio.h>
   13 #include <linux/module.h>
   14 #include <linux/slab.h>
   15 #include <linux/init.h>
   16 #include <linux/compiler.h>
   17 #include <linux/rbtree.h>
   18 #include <linux/interrupt.h>
   19 
   20 /*
   21  * See Documentation/block/as-iosched.txt
   22  */
   23 
   24 /*
   25  * max time before a read is submitted.
   26  */
   27 #define default_read_expire (HZ / 8)
   28 
   29 /*
   30  * ditto for writes, these limits are not hard, even
   31  * if the disk is capable of satisfying them.
   32  */
   33 #define default_write_expire (HZ / 4)
   34 
   35 /*
   36  * read_batch_expire describes how long we will allow a stream of reads to
   37  * persist before looking to see whether it is time to switch over to writes.
   38  */
   39 #define default_read_batch_expire (HZ / 2)
   40 
   41 /*
   42  * write_batch_expire describes how long we want a stream of writes to run for.
   43  * This is not a hard limit, but a target we set for the auto-tuning thingy.
   44  * See, the problem is: we can send a lot of writes to disk cache / TCQ in
   45  * a short amount of time...
   46  */
   47 #define default_write_batch_expire (HZ / 8)
   48 
   49 /*
   50  * max time we may wait to anticipate a read (default around 6ms)
   51  */
   52 #define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
   53 
   54 /*
   55  * Keep track of up to 20ms thinktimes. We can go as big as we like here,
   56  * however huge values tend to interfere and not decay fast enough. A program
   57  * might be in a non-io phase of operation. Waiting on user input for example,
   58  * or doing a lengthy computation. A small penalty can be justified there, and
   59  * will still catch out those processes that constantly have large thinktimes.
   60  */
   61 #define MAX_THINKTIME (HZ/50UL)
   62 
   63 /* Bits in as_io_context.state */
   64 enum as_io_states {
   65         AS_TASK_RUNNING=0,      /* Process has not exited */
   66         AS_TASK_IOSTARTED,      /* Process has started some IO */
   67         AS_TASK_IORUNNING,      /* Process has completed some IO */
   68 };
   69 
   70 enum anticipation_status {
   71         ANTIC_OFF=0,            /* Not anticipating (normal operation)  */
   72         ANTIC_WAIT_REQ,         /* The last read has not yet completed  */
   73         ANTIC_WAIT_NEXT,        /* Currently anticipating a request vs
   74                                    last read (which has completed) */
   75         ANTIC_FINISHED,         /* Anticipating but have found a candidate
   76                                  * or timed out */
   77 };
   78 
   79 struct as_data {
   80         /*
   81          * run time data
   82          */
   83 
   84         struct request_queue *q;        /* the "owner" queue */
   85 
   86         /*
   87          * requests (as_rq s) are present on both sort_list and fifo_list
   88          */
   89         struct rb_root sort_list[2];
   90         struct list_head fifo_list[2];
   91 
   92         struct request *next_rq[2];     /* next in sort order */
   93         sector_t last_sector[2];        /* last SYNC & ASYNC sectors */
   94 
   95         unsigned long exit_prob;        /* probability a task will exit while
   96                                            being waited on */
   97         unsigned long exit_no_coop;     /* probablility an exited task will
   98                                            not be part of a later cooperating
   99                                            request */
  100         unsigned long new_ttime_total;  /* mean thinktime on new proc */
  101         unsigned long new_ttime_mean;
  102         u64 new_seek_total;             /* mean seek on new proc */
  103         sector_t new_seek_mean;
  104 
  105         unsigned long current_batch_expires;
  106         unsigned long last_check_fifo[2];
  107         int changed_batch;              /* 1: waiting for old batch to end */
  108         int new_batch;                  /* 1: waiting on first read complete */
  109         int batch_data_dir;             /* current batch SYNC / ASYNC */
  110         int write_batch_count;          /* max # of reqs in a write batch */
  111         int current_write_count;        /* how many requests left this batch */
  112         int write_batch_idled;          /* has the write batch gone idle? */
  113 
  114         enum anticipation_status antic_status;
  115         unsigned long antic_start;      /* jiffies: when it started */
  116         struct timer_list antic_timer;  /* anticipatory scheduling timer */
  117         struct work_struct antic_work;  /* Deferred unplugging */
  118         struct io_context *io_context;  /* Identify the expected process */
  119         int ioc_finished; /* IO associated with io_context is finished */
  120         int nr_dispatched;
  121 
  122         /*
  123          * settings that change how the i/o scheduler behaves
  124          */
  125         unsigned long fifo_expire[2];
  126         unsigned long batch_expire[2];
  127         unsigned long antic_expire;
  128 };
  129 
  130 /*
  131  * per-request data.
  132  */
  133 enum arq_state {
  134         AS_RQ_NEW=0,            /* New - not referenced and not on any lists */
  135         AS_RQ_QUEUED,           /* In the request queue. It belongs to the
  136                                    scheduler */
  137         AS_RQ_DISPATCHED,       /* On the dispatch list. It belongs to the
  138                                    driver now */
  139         AS_RQ_PRESCHED,         /* Debug poisoning for requests being used */
  140         AS_RQ_REMOVED,
  141         AS_RQ_MERGED,
  142         AS_RQ_POSTSCHED,        /* when they shouldn't be */
  143 };
  144 
  145 #define RQ_IOC(rq)      ((struct io_context *) (rq)->elevator_private)
  146 #define RQ_STATE(rq)    ((enum arq_state)(rq)->elevator_private2)
  147 #define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
  148 
  149 static DEFINE_PER_CPU(unsigned long, as_ioc_count);
  150 static struct completion *ioc_gone;
  151 static DEFINE_SPINLOCK(ioc_gone_lock);
  152 
  153 static void as_move_to_dispatch(struct as_data *ad, struct request *rq);
  154 static void as_antic_stop(struct as_data *ad);
  155 
  156 /*
  157  * IO Context helper functions
  158  */
  159 
  160 /* Called to deallocate the as_io_context */
  161 static void free_as_io_context(struct as_io_context *aic)
  162 {
  163         kfree(aic);
  164         elv_ioc_count_dec(as_ioc_count);
  165         if (ioc_gone) {
  166                 /*
  167                  * AS scheduler is exiting, grab exit lock and check
  168                  * the pending io context count. If it hits zero,
  169                  * complete ioc_gone and set it back to NULL.
  170                  */
  171                 spin_lock(&ioc_gone_lock);
  172                 if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
  173                         complete(ioc_gone);
  174                         ioc_gone = NULL;
  175                 }
  176                 spin_unlock(&ioc_gone_lock);
  177         }
  178 }
  179 
  180 static void as_trim(struct io_context *ioc)
  181 {
  182         spin_lock_irq(&ioc->lock);
  183         if (ioc->aic)
  184                 free_as_io_context(ioc->aic);
  185         ioc->aic = NULL;
  186         spin_unlock_irq(&ioc->lock);
  187 }
  188 
  189 /* Called when the task exits */
  190 static void exit_as_io_context(struct as_io_context *aic)
  191 {
  192         WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
  193         clear_bit(AS_TASK_RUNNING, &aic->state);
  194 }
  195 
  196 static struct as_io_context *alloc_as_io_context(void)
  197 {
  198         struct as_io_context *ret;
  199 
  200         ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
  201         if (ret) {
  202                 ret->dtor = free_as_io_context;
  203                 ret->exit = exit_as_io_context;
  204                 ret->state = 1 << AS_TASK_RUNNING;
  205                 atomic_set(&ret->nr_queued, 0);
  206                 atomic_set(&ret->nr_dispatched, 0);
  207                 spin_lock_init(&ret->lock);
  208                 ret->ttime_total = 0;
  209                 ret->ttime_samples = 0;
  210                 ret->ttime_mean = 0;
  211                 ret->seek_total = 0;
  212                 ret->seek_samples = 0;
  213                 ret->seek_mean = 0;
  214                 elv_ioc_count_inc(as_ioc_count);
  215         }
  216 
  217         return ret;
  218 }
  219 
  220 /*
  221  * If the current task has no AS IO context then create one and initialise it.
  222  * Then take a ref on the task's io context and return it.
  223  */
  224 static struct io_context *as_get_io_context(int node)
  225 {
  226         struct io_context *ioc = get_io_context(GFP_ATOMIC, node);
  227         if (ioc && !ioc->aic) {
  228                 ioc->aic = alloc_as_io_context();
  229                 if (!ioc->aic) {
  230                         put_io_context(ioc);
  231                         ioc = NULL;
  232                 }
  233         }
  234         return ioc;
  235 }
  236 
  237 static void as_put_io_context(struct request *rq)
  238 {
  239         struct as_io_context *aic;
  240 
  241         if (unlikely(!RQ_IOC(rq)))
  242                 return;
  243 
  244         aic = RQ_IOC(rq)->aic;
  245 
  246         if (rq_is_sync(rq) && aic) {
  247                 unsigned long flags;
  248 
  249                 spin_lock_irqsave(&aic->lock, flags);
  250                 set_bit(AS_TASK_IORUNNING, &aic->state);
  251                 aic->last_end_request = jiffies;
  252                 spin_unlock_irqrestore(&aic->lock, flags);
  253         }
  254 
  255         put_io_context(RQ_IOC(rq));
  256 }
  257 
  258 /*
  259  * rb tree support functions
  260  */
  261 #define RQ_RB_ROOT(ad, rq)      (&(ad)->sort_list[rq_is_sync((rq))])
  262 
  263 static void as_add_rq_rb(struct as_data *ad, struct request *rq)
  264 {
  265         struct request *alias;
  266 
  267         while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) {
  268                 as_move_to_dispatch(ad, alias);
  269                 as_antic_stop(ad);
  270         }
  271 }
  272 
  273 static inline void as_del_rq_rb(struct as_data *ad, struct request *rq)
  274 {
  275         elv_rb_del(RQ_RB_ROOT(ad, rq), rq);
  276 }
  277 
  278 /*
  279  * IO Scheduler proper
  280  */
  281 
  282 #define MAXBACK (1024 * 1024)   /*
  283                                  * Maximum distance the disk will go backward
  284                                  * for a request.
  285                                  */
  286 
  287 #define BACK_PENALTY    2
  288 
  289 /*
  290  * as_choose_req selects the preferred one of two requests of the same data_dir
  291  * ignoring time - eg. timeouts, which is the job of as_dispatch_request
  292  */
  293 static struct request *
  294 as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
  295 {
  296         int data_dir;
  297         sector_t last, s1, s2, d1, d2;
  298         int r1_wrap=0, r2_wrap=0;       /* requests are behind the disk head */
  299         const sector_t maxback = MAXBACK;
  300 
  301         if (rq1 == NULL || rq1 == rq2)
  302                 return rq2;
  303         if (rq2 == NULL)
  304                 return rq1;
  305 
  306         data_dir = rq_is_sync(rq1);
  307 
  308         last = ad->last_sector[data_dir];
  309         s1 = blk_rq_pos(rq1);
  310         s2 = blk_rq_pos(rq2);
  311 
  312         BUG_ON(data_dir != rq_is_sync(rq2));
  313 
  314         /*
  315          * Strict one way elevator _except_ in the case where we allow
  316          * short backward seeks which are biased as twice the cost of a
  317          * similar forward seek.
  318          */
  319         if (s1 >= last)
  320                 d1 = s1 - last;
  321         else if (s1+maxback >= last)
  322                 d1 = (last - s1)*BACK_PENALTY;
  323         else {
  324                 r1_wrap = 1;
  325                 d1 = 0; /* shut up, gcc */
  326         }
  327 
  328         if (s2 >= last)
  329                 d2 = s2 - last;
  330         else if (s2+maxback >= last)
  331                 d2 = (last - s2)*BACK_PENALTY;
  332         else {
  333                 r2_wrap = 1;
  334                 d2 = 0;
  335         }
  336 
  337         /* Found required data */
  338         if (!r1_wrap && r2_wrap)
  339                 return rq1;
  340         else if (!r2_wrap && r1_wrap)
  341                 return rq2;
  342         else if (r1_wrap && r2_wrap) {
  343                 /* both behind the head */
  344                 if (s1 <= s2)
  345                         return rq1;
  346                 else
  347                         return rq2;
  348         }
  349 
  350         /* Both requests in front of the head */
  351         if (d1 < d2)
  352                 return rq1;
  353         else if (d2 < d1)
  354                 return rq2;
  355         else {
  356                 if (s1 >= s2)
  357                         return rq1;
  358                 else
  359                         return rq2;
  360         }
  361 }
  362 
  363 /*
  364  * as_find_next_rq finds the next request after @prev in elevator order.
  365  * this with as_choose_req form the basis for how the scheduler chooses
  366  * what request to process next. Anticipation works on top of this.
  367  */
  368 static struct request *
  369 as_find_next_rq(struct as_data *ad, struct request *last)
  370 {
  371         struct rb_node *rbnext = rb_next(&last->rb_node);
  372         struct rb_node *rbprev = rb_prev(&last->rb_node);
  373         struct request *next = NULL, *prev = NULL;
  374 
  375         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
  376 
  377         if (rbprev)
  378                 prev = rb_entry_rq(rbprev);
  379 
  380         if (rbnext)
  381                 next = rb_entry_rq(rbnext);
  382         else {
  383                 const int data_dir = rq_is_sync(last);
  384 
  385                 rbnext = rb_first(&ad->sort_list[data_dir]);
  386                 if (rbnext && rbnext != &last->rb_node)
  387                         next = rb_entry_rq(rbnext);
  388         }
  389 
  390         return as_choose_req(ad, next, prev);
  391 }
  392 
  393 /*
  394  * anticipatory scheduling functions follow
  395  */
  396 
  397 /*
  398  * as_antic_expired tells us when we have anticipated too long.
  399  * The funny "absolute difference" math on the elapsed time is to handle
  400  * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
  401  */
  402 static int as_antic_expired(struct as_data *ad)
  403 {
  404         long delta_jif;
  405 
  406         delta_jif = jiffies - ad->antic_start;
  407         if (unlikely(delta_jif < 0))
  408                 delta_jif = -delta_jif;
  409         if (delta_jif < ad->antic_expire)
  410                 return 0;
  411 
  412         return 1;
  413 }
  414 
  415 /*
  416  * as_antic_waitnext starts anticipating that a nice request will soon be
  417  * submitted. See also as_antic_waitreq
  418  */
  419 static void as_antic_waitnext(struct as_data *ad)
  420 {
  421         unsigned long timeout;
  422 
  423         BUG_ON(ad->antic_status != ANTIC_OFF
  424                         && ad->antic_status != ANTIC_WAIT_REQ);
  425 
  426         timeout = ad->antic_start + ad->antic_expire;
  427 
  428         mod_timer(&ad->antic_timer, timeout);
  429 
  430         ad->antic_status = ANTIC_WAIT_NEXT;
  431 }
  432 
  433 /*
  434  * as_antic_waitreq starts anticipating. We don't start timing the anticipation
  435  * until the request that we're anticipating on has finished. This means we
  436  * are timing from when the candidate process wakes up hopefully.
  437  */
  438 static void as_antic_waitreq(struct as_data *ad)
  439 {
  440         BUG_ON(ad->antic_status == ANTIC_FINISHED);
  441         if (ad->antic_status == ANTIC_OFF) {
  442                 if (!ad->io_context || ad->ioc_finished)
  443                         as_antic_waitnext(ad);
  444                 else
  445                         ad->antic_status = ANTIC_WAIT_REQ;
  446         }
  447 }
  448 
  449 /*
  450  * This is called directly by the functions in this file to stop anticipation.
  451  * We kill the timer and schedule a call to the request_fn asap.
  452  */
  453 static void as_antic_stop(struct as_data *ad)
  454 {
  455         int status = ad->antic_status;
  456 
  457         if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
  458                 if (status == ANTIC_WAIT_NEXT)
  459                         del_timer(&ad->antic_timer);
  460                 ad->antic_status = ANTIC_FINISHED;
  461                 /* see as_work_handler */
  462                 kblockd_schedule_work(ad->q, &ad->antic_work);
  463         }
  464 }
  465 
  466 /*
  467  * as_antic_timeout is the timer function set by as_antic_waitnext.
  468  */
  469 static void as_antic_timeout(unsigned long data)
  470 {
  471         struct request_queue *q = (struct request_queue *)data;
  472         struct as_data *ad = q->elevator->elevator_data;
  473         unsigned long flags;
  474 
  475         spin_lock_irqsave(q->queue_lock, flags);
  476         if (ad->antic_status == ANTIC_WAIT_REQ
  477                         || ad->antic_status == ANTIC_WAIT_NEXT) {
  478                 struct as_io_context *aic;
  479                 spin_lock(&ad->io_context->lock);
  480                 aic = ad->io_context->aic;
  481 
  482                 ad->antic_status = ANTIC_FINISHED;
  483                 kblockd_schedule_work(q, &ad->antic_work);
  484 
  485                 if (aic->ttime_samples == 0) {
  486                         /* process anticipated on has exited or timed out*/
  487                         ad->exit_prob = (7*ad->exit_prob + 256)/8;
  488                 }
  489                 if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
  490                         /* process not "saved" by a cooperating request */
  491                         ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8;
  492                 }
  493                 spin_unlock(&ad->io_context->lock);
  494         }
  495         spin_unlock_irqrestore(q->queue_lock, flags);
  496 }
  497 
  498 static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic,
  499                                 unsigned long ttime)
  500 {
  501         /* fixed point: 1.0 == 1<<8 */
  502         if (aic->ttime_samples == 0) {
  503                 ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
  504                 ad->new_ttime_mean = ad->new_ttime_total / 256;
  505 
  506                 ad->exit_prob = (7*ad->exit_prob)/8;
  507         }
  508         aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
  509         aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
  510         aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
  511 }
  512 
  513 static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic,
  514                                 sector_t sdist)
  515 {
  516         u64 total;
  517 
  518         if (aic->seek_samples == 0) {
  519                 ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
  520                 ad->new_seek_mean = ad->new_seek_total / 256;
  521         }
  522 
  523         /*
  524          * Don't allow the seek distance to get too large from the
  525          * odd fragment, pagein, etc
  526          */
  527         if (aic->seek_samples <= 60) /* second&third seek */
  528                 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
  529         else
  530                 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64);
  531 
  532         aic->seek_samples = (7*aic->seek_samples + 256) / 8;
  533         aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
  534         total = aic->seek_total + (aic->seek_samples/2);
  535         do_div(total, aic->seek_samples);
  536         aic->seek_mean = (sector_t)total;
  537 }
  538 
  539 /*
  540  * as_update_iohist keeps a decaying histogram of IO thinktimes, and
  541  * updates @aic->ttime_mean based on that. It is called when a new
  542  * request is queued.
  543  */
  544 static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
  545                                 struct request *rq)
  546 {
  547         int data_dir = rq_is_sync(rq);
  548         unsigned long thinktime = 0;
  549         sector_t seek_dist;
  550 
  551         if (aic == NULL)
  552                 return;
  553 
  554         if (data_dir == BLK_RW_SYNC) {
  555                 unsigned long in_flight = atomic_read(&aic->nr_queued)
  556                                         + atomic_read(&aic->nr_dispatched);
  557                 spin_lock(&aic->lock);
  558                 if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
  559                         test_bit(AS_TASK_IOSTARTED, &aic->state)) {
  560                         /* Calculate read -> read thinktime */
  561                         if (test_bit(AS_TASK_IORUNNING, &aic->state)
  562                                                         && in_flight == 0) {
  563                                 thinktime = jiffies - aic->last_end_request;
  564                                 thinktime = min(thinktime, MAX_THINKTIME-1);
  565                         }
  566                         as_update_thinktime(ad, aic, thinktime);
  567 
  568                         /* Calculate read -> read seek distance */
  569                         if (aic->last_request_pos < blk_rq_pos(rq))
  570                                 seek_dist = blk_rq_pos(rq) -
  571                                             aic->last_request_pos;
  572                         else
  573                                 seek_dist = aic->last_request_pos -
  574                                             blk_rq_pos(rq);
  575                         as_update_seekdist(ad, aic, seek_dist);
  576                 }
  577                 aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
  578                 set_bit(AS_TASK_IOSTARTED, &aic->state);
  579                 spin_unlock(&aic->lock);
  580         }
  581 }
  582 
  583 /*
  584  * as_close_req decides if one request is considered "close" to the
  585  * previous one issued.
  586  */
  587 static int as_close_req(struct as_data *ad, struct as_io_context *aic,
  588                         struct request *rq)
  589 {
  590         unsigned long delay;    /* jiffies */
  591         sector_t last = ad->last_sector[ad->batch_data_dir];
  592         sector_t next = blk_rq_pos(rq);
  593         sector_t delta; /* acceptable close offset (in sectors) */
  594         sector_t s;
  595 
  596         if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
  597                 delay = 0;
  598         else
  599                 delay = jiffies - ad->antic_start;
  600 
  601         if (delay == 0)
  602                 delta = 8192;
  603         else if (delay <= (20 * HZ / 1000) && delay <= ad->antic_expire)
  604                 delta = 8192 << delay;
  605         else
  606                 return 1;
  607 
  608         if ((last <= next + (delta>>1)) && (next <= last + delta))
  609                 return 1;
  610 
  611         if (last < next)
  612                 s = next - last;
  613         else
  614                 s = last - next;
  615 
  616         if (aic->seek_samples == 0) {
  617                 /*
  618                  * Process has just started IO. Use past statistics to
  619                  * gauge success possibility
  620                  */
  621                 if (ad->new_seek_mean > s) {
  622                         /* this request is better than what we're expecting */
  623                         return 1;
  624                 }
  625 
  626         } else {
  627                 if (aic->seek_mean > s) {
  628                         /* this request is better than what we're expecting */
  629                         return 1;
  630                 }
  631         }
  632 
  633         return 0;
  634 }
  635 
  636 /*
  637  * as_can_break_anticipation returns true if we have been anticipating this
  638  * request.
  639  *
  640  * It also returns true if the process against which we are anticipating
  641  * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
  642  * dispatch it ASAP, because we know that application will not be submitting
  643  * any new reads.
  644  *
  645  * If the task which has submitted the request has exited, break anticipation.
  646  *
  647  * If this task has queued some other IO, do not enter enticipation.
  648  */
  649 static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
  650 {
  651         struct io_context *ioc;
  652         struct as_io_context *aic;
  653 
  654         ioc = ad->io_context;
  655         BUG_ON(!ioc);
  656         spin_lock(&ioc->lock);
  657 
  658         if (rq && ioc == RQ_IOC(rq)) {
  659                 /* request from same process */
  660                 spin_unlock(&ioc->lock);
  661                 return 1;
  662         }
  663 
  664         if (ad->ioc_finished && as_antic_expired(ad)) {
  665                 /*
  666                  * In this situation status should really be FINISHED,
  667                  * however the timer hasn't had the chance to run yet.
  668                  */
  669                 spin_unlock(&ioc->lock);
  670                 return 1;
  671         }
  672 
  673         aic = ioc->aic;
  674         if (!aic) {
  675                 spin_unlock(&ioc->lock);
  676                 return 0;
  677         }
  678 
  679         if (atomic_read(&aic->nr_queued) > 0) {
  680                 /* process has more requests queued */
  681                 spin_unlock(&ioc->lock);
  682                 return 1;
  683         }
  684 
  685         if (atomic_read(&aic->nr_dispatched) > 0) {
  686                 /* process has more requests dispatched */
  687                 spin_unlock(&ioc->lock);
  688                 return 1;
  689         }
  690 
  691         if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) {
  692                 /*
  693                  * Found a close request that is not one of ours.
  694                  *
  695                  * This makes close requests from another process update
  696                  * our IO history. Is generally useful when there are
  697                  * two or more cooperating processes working in the same
  698                  * area.
  699                  */
  700                 if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
  701                         if (aic->ttime_samples == 0)
  702                                 ad->exit_prob = (7*ad->exit_prob + 256)/8;
  703 
  704                         ad->exit_no_coop = (7*ad->exit_no_coop)/8;
  705                 }
  706 
  707                 as_update_iohist(ad, aic, rq);
  708                 spin_unlock(&ioc->lock);
  709                 return 1;
  710         }
  711 
  712         if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
  713                 /* process anticipated on has exited */
  714                 if (aic->ttime_samples == 0)
  715                         ad->exit_prob = (7*ad->exit_prob + 256)/8;
  716 
  717                 if (ad->exit_no_coop > 128) {
  718                         spin_unlock(&ioc->lock);
  719                         return 1;
  720                 }
  721         }
  722 
  723         if (aic->ttime_samples == 0) {
  724                 if (ad->new_ttime_mean > ad->antic_expire) {
  725                         spin_unlock(&ioc->lock);
  726                         return 1;
  727                 }
  728                 if (ad->exit_prob * ad->exit_no_coop > 128*256) {
  729                         spin_unlock(&ioc->lock);
  730                         return 1;
  731                 }
  732         } else if (aic->ttime_mean > ad->antic_expire) {
  733                 /* the process thinks too much between requests */
  734                 spin_unlock(&ioc->lock);
  735                 return 1;
  736         }
  737         spin_unlock(&ioc->lock);
  738         return 0;
  739 }
  740 
  741 /*
  742  * as_can_anticipate indicates whether we should either run rq
  743  * or keep anticipating a better request.
  744  */
  745 static int as_can_anticipate(struct as_data *ad, struct request *rq)
  746 {
  747 #if 0 /* disable for now, we need to check tag level as well */
  748         /*
  749          * SSD device without seek penalty, disable idling
  750          */
  751         if (blk_queue_nonrot(ad->q)) axman
  752                 return 0;
  753 #endif
  754 
  755         if (!ad->io_context)
  756                 /*
  757                  * Last request submitted was a write
  758                  */
  759                 return 0;
  760 
  761         if (ad->antic_status == ANTIC_FINISHED)
  762                 /*
  763                  * Don't restart if we have just finished. Run the next request
  764                  */
  765                 return 0;
  766 
  767         if (as_can_break_anticipation(ad, rq))
  768                 /*
  769                  * This request is a good candidate. Don't keep anticipating,
  770                  * run it.
  771                  */
  772                 return 0;
  773 
  774         /*
  775          * OK from here, we haven't finished, and don't have a decent request!
  776          * Status is either ANTIC_OFF so start waiting,
  777          * ANTIC_WAIT_REQ so continue waiting for request to finish
  778          * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
  779          */
  780 
  781         return 1;
  782 }
  783 
  784 /*
  785  * as_update_rq must be called whenever a request (rq) is added to
  786  * the sort_list. This function keeps caches up to date, and checks if the
  787  * request might be one we are "anticipating"
  788  */
  789 static void as_update_rq(struct as_data *ad, struct request *rq)
  790 {
  791         const int data_dir = rq_is_sync(rq);
  792 
  793         /* keep the next_rq cache up to date */
  794         ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]);
  795 
  796         /*
  797          * have we been anticipating this request?
  798          * or does it come from the same process as the one we are anticipating
  799          * for?
  800          */
  801         if (ad->antic_status == ANTIC_WAIT_REQ
  802                         || ad->antic_status == ANTIC_WAIT_NEXT) {
  803                 if (as_can_break_anticipation(ad, rq))
  804                         as_antic_stop(ad);
  805         }
  806 }
  807 
  808 /*
  809  * Gathers timings and resizes the write batch automatically
  810  */
  811 static void update_write_batch(struct as_data *ad)
  812 {
  813         unsigned long batch = ad->batch_expire[BLK_RW_ASYNC];
  814         long write_time;
  815 
  816         write_time = (jiffies - ad->current_batch_expires) + batch;
  817         if (write_time < 0)
  818                 write_time = 0;
  819 
  820         if (write_time > batch && !ad->write_batch_idled) {
  821                 if (write_time > batch * 3)
  822                         ad->write_batch_count /= 2;
  823                 else
  824                         ad->write_batch_count--;
  825         } else if (write_time < batch && ad->current_write_count == 0) {
  826                 if (batch > write_time * 3)
  827                         ad->write_batch_count *= 2;
  828                 else
  829                         ad->write_batch_count++;
  830         }
  831 
  832         if (ad->write_batch_count < 1)
  833                 ad->write_batch_count = 1;
  834 }
  835 
  836 /*
  837  * as_completed_request is to be called when a request has completed and
  838  * returned something to the requesting process, be it an error or data.
  839  */
  840 static void as_completed_request(struct request_queue *q, struct request *rq)
  841 {
  842         struct as_data *ad = q->elevator->elevator_data;
  843 
  844         WARN_ON(!list_empty(&rq->queuelist));
  845 
  846         if (RQ_STATE(rq) != AS_RQ_REMOVED) {
  847                 WARN(1, "rq->state %d\n", RQ_STATE(rq));
  848                 goto out;
  849         }
  850 
  851         if (ad->changed_batch && ad->nr_dispatched == 1) {
  852                 ad->current_batch_expires = jiffies +
  853                                         ad->batch_expire[ad->batch_data_dir];
  854                 kblockd_schedule_work(q, &ad->antic_work);
  855                 ad->changed_batch = 0;
  856 
  857                 if (ad->batch_data_dir == BLK_RW_SYNC)
  858                         ad->new_batch = 1;
  859         }
  860         WARN_ON(ad->nr_dispatched == 0);
  861         ad->nr_dispatched--;
  862 
  863         /*
  864          * Start counting the batch from when a request of that direction is
  865          * actually serviced. This should help devices with big TCQ windows
  866          * and writeback caches
  867          */
  868         if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) {
  869                 update_write_batch(ad);
  870                 ad->current_batch_expires = jiffies +
  871                                 ad->batch_expire[BLK_RW_SYNC];
  872                 ad->new_batch = 0;
  873         }
  874 
  875         if (ad->io_context == RQ_IOC(rq) && ad->io_context) {
  876                 ad->antic_start = jiffies;
  877                 ad->ioc_finished = 1;
  878                 if (ad->antic_status == ANTIC_WAIT_REQ) {
  879                         /*
  880                          * We were waiting on this request, now anticipate
  881                          * the next one
  882                          */
  883                         as_antic_waitnext(ad);
  884                 }
  885         }
  886 
  887         as_put_io_context(rq);
  888 out:
  889         RQ_SET_STATE(rq, AS_RQ_POSTSCHED);
  890 }
  891 
  892 /*
  893  * as_remove_queued_request removes a request from the pre dispatch queue
  894  * without updating refcounts. It is expected the caller will drop the
  895  * reference unless it replaces the request at somepart of the elevator
  896  * (ie. the dispatch queue)
  897  */
  898 static void as_remove_queued_request(struct request_queue *q,
  899                                      struct request *rq)
  900 {
  901         const int data_dir = rq_is_sync(rq);
  902         struct as_data *ad = q->elevator->elevator_data;
  903         struct io_context *ioc;
  904 
  905         WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
  906 
  907         ioc = RQ_IOC(rq);
  908         if (ioc && ioc->aic) {
  909                 BUG_ON(!atomic_read(&ioc->aic->nr_queued));
  910                 atomic_dec(&ioc->aic->nr_queued);
  911         }
  912 
  913         /*
  914          * Update the "next_rq" cache if we are about to remove its
  915          * entry
  916          */
  917         if (ad->next_rq[data_dir] == rq)
  918                 ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
  919 
  920         rq_fifo_clear(rq);
  921         as_del_rq_rb(ad, rq);
  922 }
  923 
  924 /*
  925  * as_fifo_expired returns 0 if there are no expired requests on the fifo,
  926  * 1 otherwise.  It is ratelimited so that we only perform the check once per
  927  * `fifo_expire' interval.  Otherwise a large number of expired requests
  928  * would create a hopeless seekstorm.
  929  *
  930  * See as_antic_expired comment.
  931  */
  932 static int as_fifo_expired(struct as_data *ad, int adir)
  933 {
  934         struct request *rq;
  935         long delta_jif;
  936 
  937         delta_jif = jiffies - ad->last_check_fifo[adir];
  938         if (unlikely(delta_jif < 0))
  939                 delta_jif = -delta_jif;
  940         if (delta_jif < ad->fifo_expire[adir])
  941                 return 0;
  942 
  943         ad->last_check_fifo[adir] = jiffies;
  944 
  945         if (list_empty(&ad->fifo_list[adir]))
  946                 return 0;
  947 
  948         rq = rq_entry_fifo(ad->fifo_list[adir].next);
  949 
  950         return time_after(jiffies, rq_fifo_time(rq));
  951 }
  952 
  953 /*
  954  * as_batch_expired returns true if the current batch has expired. A batch
  955  * is a set of reads or a set of writes.
  956  */
  957 static inline int as_batch_expired(struct as_data *ad)
  958 {
  959         if (ad->changed_batch || ad->new_batch)
  960                 return 0;
  961 
  962         if (ad->batch_data_dir == BLK_RW_SYNC)
  963                 /* TODO! add a check so a complete fifo gets written? */
  964                 return time_after(jiffies, ad->current_batch_expires);
  965 
  966         return time_after(jiffies, ad->current_batch_expires)
  967                 || ad->current_write_count == 0;
  968 }
  969 
  970 /*
  971  * move an entry to dispatch queue
  972  */
  973 static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
  974 {
  975         const int data_dir = rq_is_sync(rq);
  976 
  977         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  978 
  979         as_antic_stop(ad);
  980         ad->antic_status = ANTIC_OFF;
  981 
  982         /*
  983          * This has to be set in order to be correctly updated by
  984          * as_find_next_rq
  985          */
  986         ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
  987 
  988         if (data_dir == BLK_RW_SYNC) {
  989                 struct io_context *ioc = RQ_IOC(rq);
  990                 /* In case we have to anticipate after this */
  991                 copy_io_context(&ad->io_context, &ioc);
  992         } else {
  993                 if (ad->io_context) {
  994                         put_io_context(ad->io_context);
  995                         ad->io_context = NULL;
  996                 }
  997 
  998                 if (ad->current_write_count != 0)
  999                         ad->current_write_count--;
 1000         }
 1001         ad->ioc_finished = 0;
 1002 
 1003         ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
 1004 
 1005         /*
 1006          * take it off the sort and fifo list, add to dispatch queue
 1007          */
 1008         as_remove_queued_request(ad->q, rq);
 1009         WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
 1010 
 1011         elv_dispatch_sort(ad->q, rq);
 1012 
 1013         RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
 1014         if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
 1015                 atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
 1016         ad->nr_dispatched++;
 1017 }
 1018 
 1019 /*
 1020  * as_dispatch_request selects the best request according to
 1021  * read/write expire, batch expire, etc, and moves it to the dispatch
 1022  * queue. Returns 1 if a request was found, 0 otherwise.
 1023  */
 1024 static int as_dispatch_request(struct request_queue *q, int force)
 1025 {
 1026         struct as_data *ad = q->elevator->elevator_data;
 1027         const int reads = !list_empty(&ad->fifo_list[BLK_RW_SYNC]);
 1028         const int writes = !list_empty(&ad->fifo_list[BLK_RW_ASYNC]);
 1029         struct request *rq;
 1030 
 1031         if (unlikely(force)) {
 1032                 /*
 1033                  * Forced dispatch, accounting is useless.  Reset
 1034                  * accounting states and dump fifo_lists.  Note that
 1035                  * batch_data_dir is reset to BLK_RW_SYNC to avoid
 1036                  * screwing write batch accounting as write batch
 1037                  * accounting occurs on W->R transition.
 1038                  */
 1039                 int dispatched = 0;
 1040 
 1041                 ad->batch_data_dir = BLK_RW_SYNC;
 1042                 ad->changed_batch = 0;
 1043                 ad->new_batch = 0;
 1044 
 1045                 while (ad->next_rq[BLK_RW_SYNC]) {
 1046                         as_move_to_dispatch(ad, ad->next_rq[BLK_RW_SYNC]);
 1047                         dispatched++;
 1048                 }
 1049                 ad->last_check_fifo[BLK_RW_SYNC] = jiffies;
 1050 
 1051                 while (ad->next_rq[BLK_RW_ASYNC]) {
 1052                         as_move_to_dispatch(ad, ad->next_rq[BLK_RW_ASYNC]);
 1053                         dispatched++;
 1054                 }
 1055                 ad->last_check_fifo[BLK_RW_ASYNC] = jiffies;
 1056 
 1057                 return dispatched;
 1058         }
 1059 
 1060         /* Signal that the write batch was uncontended, so we can't time it */
 1061         if (ad->batch_data_dir == BLK_RW_ASYNC && !reads) {
 1062                 if (ad->current_write_count == 0 || !writes)
 1063                         ad->write_batch_idled = 1;
 1064         }
 1065 
 1066         if (!(reads || writes)
 1067                 || ad->antic_status == ANTIC_WAIT_REQ
 1068                 || ad->antic_status == ANTIC_WAIT_NEXT
 1069                 || ad->changed_batch)
 1070                 return 0;
 1071 
 1072         if (!(reads && writes && as_batch_expired(ad))) {
 1073                 /*
 1074                  * batch is still running or no reads or no writes
 1075                  */
 1076                 rq = ad->next_rq[ad->batch_data_dir];
 1077 
 1078                 if (ad->batch_data_dir == BLK_RW_SYNC && ad->antic_expire) {
 1079                         if (as_fifo_expired(ad, BLK_RW_SYNC))
 1080                                 goto fifo_expired;
 1081 
 1082                         if (as_can_anticipate(ad, rq)) {
 1083                                 as_antic_waitreq(ad);
 1084                                 return 0;
 1085                         }
 1086                 }
 1087 
 1088                 if (rq) {
 1089                         /* we have a "next request" */
 1090                         if (reads && !writes)
 1091                                 ad->current_batch_expires =
 1092                                         jiffies + ad->batch_expire[BLK_RW_SYNC];
 1093                         goto dispatch_request;
 1094                 }
 1095         }
 1096 
 1097         /*
 1098          * at this point we are not running a batch. select the appropriate
 1099          * data direction (read / write)
 1100          */
 1101 
 1102         if (reads) {
 1103                 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_SYNC]));
 1104 
 1105                 if (writes && ad->batch_data_dir == BLK_RW_SYNC)
 1106                         /*
 1107                          * Last batch was a read, switch to writes
 1108                          */
 1109                         goto dispatch_writes;
 1110 
 1111                 if (ad->batch_data_dir == BLK_RW_ASYNC) {
 1112                         WARN_ON(ad->new_batch);
 1113                         ad->changed_batch = 1;
 1114                 }
 1115                 ad->batch_data_dir = BLK_RW_SYNC;
 1116                 rq = rq_entry_fifo(ad->fifo_list[BLK_RW_SYNC].next);
 1117                 ad->last_check_fifo[ad->batch_data_dir] = jiffies;
 1118                 goto dispatch_request;
 1119         }
 1120 
 1121         /*
 1122          * the last batch was a read
 1123          */
 1124 
 1125         if (writes) {
 1126 dispatch_writes:
 1127                 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_ASYNC]));
 1128 
 1129                 if (ad->batch_data_dir == BLK_RW_SYNC) {
 1130                         ad->changed_batch = 1;
 1131 
 1132                         /*
 1133                          * new_batch might be 1 when the queue runs out of
 1134                          * reads. A subsequent submission of a write might
 1135                          * cause a change of batch before the read is finished.
 1136                          */
 1137                         ad->new_batch = 0;
 1138                 }
 1139                 ad->batch_data_dir = BLK_RW_ASYNC;
 1140                 ad->current_write_count = ad->write_batch_count;
 1141                 ad->write_batch_idled = 0;
 1142                 rq = rq_entry_fifo(ad->fifo_list[BLK_RW_ASYNC].next);
 1143                 ad->last_check_fifo[BLK_RW_ASYNC] = jiffies;
 1144                 goto dispatch_request;
 1145         }
 1146 
 1147         BUG();
 1148         return 0;
 1149 
 1150 dispatch_request:
 1151         /*
 1152          * If a request has expired, service it.
 1153          */
 1154 
 1155         if (as_fifo_expired(ad, ad->batch_data_dir)) {
 1156 fifo_expired:
 1157                 rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
 1158         }
 1159 
 1160         if (ad->changed_batch) {
 1161                 WARN_ON(ad->new_batch);
 1162 
 1163                 if (ad->nr_dispatched)
 1164                         return 0;
 1165 
 1166                 if (ad->batch_data_dir == BLK_RW_ASYNC)
 1167                         ad->current_batch_expires = jiffies +
 1168                                         ad->batch_expire[BLK_RW_ASYNC];
 1169                 else
 1170                         ad->new_batch = 1;
 1171 
 1172                 ad->changed_batch = 0;
 1173         }
 1174 
 1175         /*
 1176          * rq is the selected appropriate request.
 1177          */
 1178         as_move_to_dispatch(ad, rq);
 1179 
 1180         return 1;
 1181 }
 1182 
 1183 /*
 1184  * add rq to rbtree and fifo
 1185  */
 1186 static void as_add_request(struct request_queue *q, struct request *rq)
 1187 {
 1188         struct as_data *ad = q->elevator->elevator_data;
 1189         int data_dir;
 1190 
 1191         RQ_SET_STATE(rq, AS_RQ_NEW);
 1192 
 1193         data_dir = rq_is_sync(rq);
 1194 
 1195         rq->elevator_private = as_get_io_context(q->node);
 1196 
 1197         if (RQ_IOC(rq)) {
 1198                 as_update_iohist(ad, RQ_IOC(rq)->aic, rq);
 1199                 atomic_inc(&RQ_IOC(rq)->aic->nr_queued);
 1200         }
 1201 
 1202         as_add_rq_rb(ad, rq);
 1203 
 1204         /*
 1205          * set expire time and add to fifo list
 1206          */
 1207         rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]);
 1208         list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]);
 1209 
 1210         as_update_rq(ad, rq); /* keep state machine up to date */
 1211         RQ_SET_STATE(rq, AS_RQ_QUEUED);
 1212 }
 1213 
 1214 static void as_activate_request(struct request_queue *q, struct request *rq)
 1215 {
 1216         WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
 1217         RQ_SET_STATE(rq, AS_RQ_REMOVED);
 1218         if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
 1219                 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
 1220 }
 1221 
 1222 static void as_deactivate_request(struct request_queue *q, struct request *rq)
 1223 {
 1224         WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
 1225         RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
 1226         if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
 1227                 atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
 1228 }
 1229 
 1230 /*
 1231  * as_queue_empty tells us if there are requests left in the device. It may
 1232  * not be the case that a driver can get the next request even if the queue
 1233  * is not empty - it is used in the block layer to check for plugging and
 1234  * merging opportunities
 1235  */
 1236 static int as_queue_empty(struct request_queue *q)
 1237 {
 1238         struct as_data *ad = q->elevator->elevator_data;
 1239 
 1240         return list_empty(&ad->fifo_list[BLK_RW_ASYNC])
 1241                 && list_empty(&ad->fifo_list[BLK_RW_SYNC]);
 1242 }
 1243 
 1244 static int
 1245 as_merge(struct request_queue *q, struct request **req, struct bio *bio)
 1246 {
 1247         struct as_data *ad = q->elevator->elevator_data;
 1248         sector_t rb_key = bio->bi_sector + bio_sectors(bio);
 1249         struct request *__rq;
 1250 
 1251         /*
 1252          * check for front merge
 1253          */
 1254         __rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key);
 1255         if (__rq && elv_rq_merge_ok(__rq, bio)) {
 1256                 *req = __rq;
 1257                 return ELEVATOR_FRONT_MERGE;
 1258         }
 1259 
 1260         return ELEVATOR_NO_MERGE;
 1261 }
 1262 
 1263 static void as_merged_request(struct request_queue *q, struct request *req,
 1264                               int type)
 1265 {
 1266         struct as_data *ad = q->elevator->elevator_data;
 1267 
 1268         /*
 1269          * if the merge was a front merge, we need to reposition request
 1270          */
 1271         if (type == ELEVATOR_FRONT_MERGE) {
 1272                 as_del_rq_rb(ad, req);
 1273                 as_add_rq_rb(ad, req);
 1274                 /*
 1275                  * Note! At this stage of this and the next function, our next
 1276                  * request may not be optimal - eg the request may have "grown"
 1277                  * behind the disk head. We currently don't bother adjusting.
 1278                  */
 1279         }
 1280 }
 1281 
 1282 static void as_merged_requests(struct request_queue *q, struct request *req,
 1283                                 struct request *next)
 1284 {
 1285         /*
 1286          * if next expires before rq, assign its expire time to arq
 1287          * and move into next position (next will be deleted) in fifo
 1288          */
 1289         if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
 1290                 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
 1291                         list_move(&req->queuelist, &next->queuelist);
 1292                         rq_set_fifo_time(req, rq_fifo_time(next));
 1293                 }
 1294         }
 1295 
 1296         /*
 1297          * kill knowledge of next, this one is a goner
 1298          */
 1299         as_remove_queued_request(q, next);
 1300         as_put_io_context(next);
 1301 
 1302         RQ_SET_STATE(next, AS_RQ_MERGED);
 1303 }
 1304 
 1305 /*
 1306  * This is executed in a "deferred" process context, by kblockd. It calls the
 1307  * driver's request_fn so the driver can submit that request.
 1308  *
 1309  * IMPORTANT! This guy will reenter the elevator, so set up all queue global
 1310  * state before calling, and don't rely on any state over calls.
 1311  *
 1312  * FIXME! dispatch queue is not a queue at all!
 1313  */
 1314 static void as_work_handler(struct work_struct *work)
 1315 {
 1316         struct as_data *ad = container_of(work, struct as_data, antic_work);
 1317 
 1318         blk_run_queue(ad->q);
 1319 }
 1320 
 1321 static int as_may_queue(struct request_queue *q, int rw)
 1322 {
 1323         int ret = ELV_MQUEUE_MAY;
 1324         struct as_data *ad = q->elevator->elevator_data;
 1325         struct io_context *ioc;
 1326         if (ad->antic_status == ANTIC_WAIT_REQ ||
 1327                         ad->antic_status == ANTIC_WAIT_NEXT) {
 1328                 ioc = as_get_io_context(q->node);
 1329                 if (ad->io_context == ioc)
 1330                         ret = ELV_MQUEUE_MUST;
 1331                 put_io_context(ioc);
 1332         }
 1333 
 1334         return ret;
 1335 }
 1336 
 1337 static void as_exit_queue(struct elevator_queue *e)
 1338 {
 1339         struct as_data *ad = e->elevator_data;
 1340 
 1341         del_timer_sync(&ad->antic_timer);
 1342         cancel_work_sync(&ad->antic_work);
 1343 
 1344         BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_SYNC]));
 1345         BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_ASYNC]));
 1346 
 1347         put_io_context(ad->io_context);
 1348         kfree(ad);
 1349 }
 1350 
 1351 /*
 1352  * initialize elevator private data (as_data).
 1353  */
 1354 static void *as_init_queue(struct request_queue *q)
 1355 {
 1356         struct as_data *ad;
 1357 
 1358         ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
 1359         if (!ad)
 1360                 return NULL;
 1361 
 1362         ad->q = q; /* Identify what queue the data belongs to */
 1363 
 1364         /* anticipatory scheduling helpers */
 1365         ad->antic_timer.function = as_antic_timeout;
 1366         ad->antic_timer.data = (unsigned long)q;
 1367         init_timer(&ad->antic_timer);
 1368         INIT_WORK(&ad->antic_work, as_work_handler);
 1369 
 1370         INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_SYNC]);
 1371         INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_ASYNC]);
 1372         ad->sort_list[BLK_RW_SYNC] = RB_ROOT;
 1373         ad->sort_list[BLK_RW_ASYNC] = RB_ROOT;
 1374         ad->fifo_expire[BLK_RW_SYNC] = default_read_expire;
 1375         ad->fifo_expire[BLK_RW_ASYNC] = default_write_expire;
 1376         ad->antic_expire = default_antic_expire;
 1377         ad->batch_expire[BLK_RW_SYNC] = default_read_batch_expire;
 1378         ad->batch_expire[BLK_RW_ASYNC] = default_write_batch_expire;
 1379 
 1380         ad->current_batch_expires = jiffies + ad->batch_expire[BLK_RW_SYNC];
 1381         ad->write_batch_count = ad->batch_expire[BLK_RW_ASYNC] / 10;
 1382         if (ad->write_batch_count < 2)
 1383                 ad->write_batch_count = 2;
 1384 
 1385         return ad;
 1386 }
 1387 
 1388 /*
 1389  * sysfs parts below
 1390  */
 1391 
 1392 static ssize_t
 1393 as_var_show(unsigned int var, char *page)
 1394 {
 1395         return sprintf(page, "%d\n", var);
 1396 }
 1397 
 1398 static ssize_t
 1399 as_var_store(unsigned long *var, const char *page, size_t count)
 1400 {
 1401         char *p = (char *) page;
 1402 
 1403         *var = simple_strtoul(p, &p, 10);
 1404         return count;
 1405 }
 1406 
 1407 static ssize_t est_time_show(struct elevator_queue *e, char *page)
 1408 {
 1409         struct as_data *ad = e->elevator_data;
 1410         int pos = 0;
 1411 
 1412         pos += sprintf(page+pos, "%lu %% exit probability\n",
 1413                                 100*ad->exit_prob/256);
 1414         pos += sprintf(page+pos, "%lu %% probability of exiting without a "
 1415                                 "cooperating process submitting IO\n",
 1416                                 100*ad->exit_no_coop/256);
 1417         pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean);
 1418         pos += sprintf(page+pos, "%llu sectors new seek distance\n",
 1419                                 (unsigned long long)ad->new_seek_mean);
 1420 
 1421         return pos;
 1422 }
 1423 
 1424 #define SHOW_FUNCTION(__FUNC, __VAR)                            \
 1425 static ssize_t __FUNC(struct elevator_queue *e, char *page)     \
 1426 {                                                               \
 1427         struct as_data *ad = e->elevator_data;                  \
 1428         return as_var_show(jiffies_to_msecs((__VAR)), (page));  \
 1429 }
 1430 SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[BLK_RW_SYNC]);
 1431 SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[BLK_RW_ASYNC]);
 1432 SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
 1433 SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[BLK_RW_SYNC]);
 1434 SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[BLK_RW_ASYNC]);
 1435 #undef SHOW_FUNCTION
 1436 
 1437 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                         \
 1438 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
 1439 {                                                                       \
 1440         struct as_data *ad = e->elevator_data;                          \
 1441         int ret = as_var_store(__PTR, (page), count);                   \
 1442         if (*(__PTR) < (MIN))                                           \
 1443                 *(__PTR) = (MIN);                                       \
 1444         else if (*(__PTR) > (MAX))                                      \
 1445                 *(__PTR) = (MAX);                                       \
 1446         *(__PTR) = msecs_to_jiffies(*(__PTR));                          \
 1447         return ret;                                                     \
 1448 }
 1449 STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[BLK_RW_SYNC], 0, INT_MAX);
 1450 STORE_FUNCTION(as_write_expire_store,
 1451                         &ad->fifo_expire[BLK_RW_ASYNC], 0, INT_MAX);
 1452 STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
 1453 STORE_FUNCTION(as_read_batch_expire_store,
 1454                         &ad->batch_expire[BLK_RW_SYNC], 0, INT_MAX);
 1455 STORE_FUNCTION(as_write_batch_expire_store,
 1456                         &ad->batch_expire[BLK_RW_ASYNC], 0, INT_MAX);
 1457 #undef STORE_FUNCTION
 1458 
 1459 #define AS_ATTR(name) \
 1460         __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
 1461 
 1462 static struct elv_fs_entry as_attrs[] = {
 1463         __ATTR_RO(est_time),
 1464         AS_ATTR(read_expire),
 1465         AS_ATTR(write_expire),
 1466         AS_ATTR(antic_expire),
 1467         AS_ATTR(read_batch_expire),
 1468         AS_ATTR(write_batch_expire),
 1469         __ATTR_NULL
 1470 };
 1471 
 1472 static struct elevator_type iosched_as = {
 1473         .ops = {
 1474                 .elevator_merge_fn =            as_merge,
 1475                 .elevator_merged_fn =           as_merged_request,
 1476                 .elevator_merge_req_fn =        as_merged_requests,
 1477                 .elevator_dispatch_fn =         as_dispatch_request,
 1478                 .elevator_add_req_fn =          as_add_request,
 1479                 .elevator_activate_req_fn =     as_activate_request,
 1480                 .elevator_deactivate_req_fn =   as_deactivate_request,
 1481                 .elevator_queue_empty_fn =      as_queue_empty,
 1482                 .elevator_completed_req_fn =    as_completed_request,
 1483                 .elevator_former_req_fn =       elv_rb_former_request,
 1484                 .elevator_latter_req_fn =       elv_rb_latter_request,
 1485                 .elevator_may_queue_fn =        as_may_queue,
 1486                 .elevator_init_fn =             as_init_queue,
 1487                 .elevator_exit_fn =             as_exit_queue,
 1488                 .trim =                         as_trim,
 1489         },
 1490 
 1491         .elevator_attrs = as_attrs,
 1492         .elevator_name = "anticipatory",
 1493         .elevator_owner = THIS_MODULE,
 1494 };
 1495 
 1496 static int __init as_init(void)
 1497 {
 1498         elv_register(&iosched_as);
 1499 
 1500         return 0;
 1501 }
 1502 
 1503 static void __exit as_exit(void)
 1504 {
 1505         DECLARE_COMPLETION_ONSTACK(all_gone);
 1506         elv_unregister(&iosched_as);
 1507         ioc_gone = &all_gone;
 1508         /* ioc_gone's update must be visible before reading ioc_count */
 1509         smp_wmb();
 1510         if (elv_ioc_count_read(as_ioc_count))
 1511                 wait_for_completion(&all_gone);
 1512         synchronize_rcu();
 1513 }
 1514 
 1515 module_init(as_init);
 1516 module_exit(as_exit);
 1517 
 1518 MODULE_AUTHOR("Nick Piggin");
 1519 MODULE_LICENSE("GPL");
 1520 MODULE_DESCRIPTION("anticipatory IO scheduler");

Cache object: 9cdd8efd247adbfdfb8dc9dafa00cd16


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.