The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/block/elevator.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *  Block device elevator/IO-scheduler.
    3  *
    4  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
    5  *
    6  * 30042000 Jens Axboe <axboe@kernel.dk> :
    7  *
    8  * Split the elevator a bit so that it is possible to choose a different
    9  * one or even write a new "plug in". There are three pieces:
   10  * - elevator_fn, inserts a new request in the queue list
   11  * - elevator_merge_fn, decides whether a new buffer can be merged with
   12  *   an existing request
   13  * - elevator_dequeue_fn, called when a request is taken off the active list
   14  *
   15  * 20082000 Dave Jones <davej@suse.de> :
   16  * Removed tests for max-bomb-segments, which was breaking elvtune
   17  *  when run without -bN
   18  *
   19  * Jens:
   20  * - Rework again to work with bio instead of buffer_heads
   21  * - loose bi_dev comparisons, partition handling is right now
   22  * - completely modularize elevator setup and teardown
   23  *
   24  */
   25 #include <linux/kernel.h>
   26 #include <linux/fs.h>
   27 #include <linux/blkdev.h>
   28 #include <linux/elevator.h>
   29 #include <linux/bio.h>
   30 #include <linux/module.h>
   31 #include <linux/slab.h>
   32 #include <linux/init.h>
   33 #include <linux/compiler.h>
   34 #include <linux/blktrace_api.h>
   35 #include <linux/hash.h>
   36 #include <linux/uaccess.h>
   37 
   38 #include <trace/events/block.h>
   39 
   40 #include "blk.h"
   41 #include "blk-cgroup.h"
   42 
   43 static DEFINE_SPINLOCK(elv_list_lock);
   44 static LIST_HEAD(elv_list);
   45 
   46 /*
   47  * Merge hash stuff.
   48  */
   49 static const int elv_hash_shift = 6;
   50 #define ELV_HASH_BLOCK(sec)     ((sec) >> 3)
   51 #define ELV_HASH_FN(sec)        \
   52                 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
   53 #define ELV_HASH_ENTRIES        (1 << elv_hash_shift)
   54 #define rq_hash_key(rq)         (blk_rq_pos(rq) + blk_rq_sectors(rq))
   55 
   56 /*
   57  * Query io scheduler to see if the current process issuing bio may be
   58  * merged with rq.
   59  */
   60 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
   61 {
   62         struct request_queue *q = rq->q;
   63         struct elevator_queue *e = q->elevator;
   64 
   65         if (e->type->ops.elevator_allow_merge_fn)
   66                 return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
   67 
   68         return 1;
   69 }
   70 
   71 /*
   72  * can we safely merge with this request?
   73  */
   74 bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
   75 {
   76         if (!blk_rq_merge_ok(rq, bio))
   77                 return 0;
   78 
   79         if (!elv_iosched_allow_merge(rq, bio))
   80                 return 0;
   81 
   82         return 1;
   83 }
   84 EXPORT_SYMBOL(elv_rq_merge_ok);
   85 
   86 static struct elevator_type *elevator_find(const char *name)
   87 {
   88         struct elevator_type *e;
   89 
   90         list_for_each_entry(e, &elv_list, list) {
   91                 if (!strcmp(e->elevator_name, name))
   92                         return e;
   93         }
   94 
   95         return NULL;
   96 }
   97 
   98 static void elevator_put(struct elevator_type *e)
   99 {
  100         module_put(e->elevator_owner);
  101 }
  102 
  103 static struct elevator_type *elevator_get(const char *name)
  104 {
  105         struct elevator_type *e;
  106 
  107         spin_lock(&elv_list_lock);
  108 
  109         e = elevator_find(name);
  110         if (!e) {
  111                 spin_unlock(&elv_list_lock);
  112                 request_module("%s-iosched", name);
  113                 spin_lock(&elv_list_lock);
  114                 e = elevator_find(name);
  115         }
  116 
  117         if (e && !try_module_get(e->elevator_owner))
  118                 e = NULL;
  119 
  120         spin_unlock(&elv_list_lock);
  121 
  122         return e;
  123 }
  124 
  125 static char chosen_elevator[ELV_NAME_MAX];
  126 
  127 static int __init elevator_setup(char *str)
  128 {
  129         /*
  130          * Be backwards-compatible with previous kernels, so users
  131          * won't get the wrong elevator.
  132          */
  133         strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
  134         return 1;
  135 }
  136 
  137 __setup("elevator=", elevator_setup);
  138 
  139 static struct kobj_type elv_ktype;
  140 
  141 static struct elevator_queue *elevator_alloc(struct request_queue *q,
  142                                   struct elevator_type *e)
  143 {
  144         struct elevator_queue *eq;
  145         int i;
  146 
  147         eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
  148         if (unlikely(!eq))
  149                 goto err;
  150 
  151         eq->type = e;
  152         kobject_init(&eq->kobj, &elv_ktype);
  153         mutex_init(&eq->sysfs_lock);
  154 
  155         eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
  156                                         GFP_KERNEL, q->node);
  157         if (!eq->hash)
  158                 goto err;
  159 
  160         for (i = 0; i < ELV_HASH_ENTRIES; i++)
  161                 INIT_HLIST_HEAD(&eq->hash[i]);
  162 
  163         return eq;
  164 err:
  165         kfree(eq);
  166         elevator_put(e);
  167         return NULL;
  168 }
  169 
  170 static void elevator_release(struct kobject *kobj)
  171 {
  172         struct elevator_queue *e;
  173 
  174         e = container_of(kobj, struct elevator_queue, kobj);
  175         elevator_put(e->type);
  176         kfree(e->hash);
  177         kfree(e);
  178 }
  179 
  180 int elevator_init(struct request_queue *q, char *name)
  181 {
  182         struct elevator_type *e = NULL;
  183         int err;
  184 
  185         if (unlikely(q->elevator))
  186                 return 0;
  187 
  188         INIT_LIST_HEAD(&q->queue_head);
  189         q->last_merge = NULL;
  190         q->end_sector = 0;
  191         q->boundary_rq = NULL;
  192 
  193         if (name) {
  194                 e = elevator_get(name);
  195                 if (!e)
  196                         return -EINVAL;
  197         }
  198 
  199         if (!e && *chosen_elevator) {
  200                 e = elevator_get(chosen_elevator);
  201                 if (!e)
  202                         printk(KERN_ERR "I/O scheduler %s not found\n",
  203                                                         chosen_elevator);
  204         }
  205 
  206         if (!e) {
  207                 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
  208                 if (!e) {
  209                         printk(KERN_ERR
  210                                 "Default I/O scheduler not found. " \
  211                                 "Using noop.\n");
  212                         e = elevator_get("noop");
  213                 }
  214         }
  215 
  216         q->elevator = elevator_alloc(q, e);
  217         if (!q->elevator)
  218                 return -ENOMEM;
  219 
  220         err = e->ops.elevator_init_fn(q);
  221         if (err) {
  222                 kobject_put(&q->elevator->kobj);
  223                 return err;
  224         }
  225 
  226         return 0;
  227 }
  228 EXPORT_SYMBOL(elevator_init);
  229 
  230 void elevator_exit(struct elevator_queue *e)
  231 {
  232         mutex_lock(&e->sysfs_lock);
  233         if (e->type->ops.elevator_exit_fn)
  234                 e->type->ops.elevator_exit_fn(e);
  235         mutex_unlock(&e->sysfs_lock);
  236 
  237         kobject_put(&e->kobj);
  238 }
  239 EXPORT_SYMBOL(elevator_exit);
  240 
  241 static inline void __elv_rqhash_del(struct request *rq)
  242 {
  243         hlist_del_init(&rq->hash);
  244 }
  245 
  246 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
  247 {
  248         if (ELV_ON_HASH(rq))
  249                 __elv_rqhash_del(rq);
  250 }
  251 
  252 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
  253 {
  254         struct elevator_queue *e = q->elevator;
  255 
  256         BUG_ON(ELV_ON_HASH(rq));
  257         hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
  258 }
  259 
  260 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
  261 {
  262         __elv_rqhash_del(rq);
  263         elv_rqhash_add(q, rq);
  264 }
  265 
  266 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
  267 {
  268         struct elevator_queue *e = q->elevator;
  269         struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
  270         struct hlist_node *entry, *next;
  271         struct request *rq;
  272 
  273         hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
  274                 BUG_ON(!ELV_ON_HASH(rq));
  275 
  276                 if (unlikely(!rq_mergeable(rq))) {
  277                         __elv_rqhash_del(rq);
  278                         continue;
  279                 }
  280 
  281                 if (rq_hash_key(rq) == offset)
  282                         return rq;
  283         }
  284 
  285         return NULL;
  286 }
  287 
  288 /*
  289  * RB-tree support functions for inserting/lookup/removal of requests
  290  * in a sorted RB tree.
  291  */
  292 void elv_rb_add(struct rb_root *root, struct request *rq)
  293 {
  294         struct rb_node **p = &root->rb_node;
  295         struct rb_node *parent = NULL;
  296         struct request *__rq;
  297 
  298         while (*p) {
  299                 parent = *p;
  300                 __rq = rb_entry(parent, struct request, rb_node);
  301 
  302                 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
  303                         p = &(*p)->rb_left;
  304                 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
  305                         p = &(*p)->rb_right;
  306         }
  307 
  308         rb_link_node(&rq->rb_node, parent, p);
  309         rb_insert_color(&rq->rb_node, root);
  310 }
  311 EXPORT_SYMBOL(elv_rb_add);
  312 
  313 void elv_rb_del(struct rb_root *root, struct request *rq)
  314 {
  315         BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  316         rb_erase(&rq->rb_node, root);
  317         RB_CLEAR_NODE(&rq->rb_node);
  318 }
  319 EXPORT_SYMBOL(elv_rb_del);
  320 
  321 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
  322 {
  323         struct rb_node *n = root->rb_node;
  324         struct request *rq;
  325 
  326         while (n) {
  327                 rq = rb_entry(n, struct request, rb_node);
  328 
  329                 if (sector < blk_rq_pos(rq))
  330                         n = n->rb_left;
  331                 else if (sector > blk_rq_pos(rq))
  332                         n = n->rb_right;
  333                 else
  334                         return rq;
  335         }
  336 
  337         return NULL;
  338 }
  339 EXPORT_SYMBOL(elv_rb_find);
  340 
  341 /*
  342  * Insert rq into dispatch queue of q.  Queue lock must be held on
  343  * entry.  rq is sort instead into the dispatch queue. To be used by
  344  * specific elevators.
  345  */
  346 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
  347 {
  348         sector_t boundary;
  349         struct list_head *entry;
  350         int stop_flags;
  351 
  352         if (q->last_merge == rq)
  353                 q->last_merge = NULL;
  354 
  355         elv_rqhash_del(q, rq);
  356 
  357         q->nr_sorted--;
  358 
  359         boundary = q->end_sector;
  360         stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
  361         list_for_each_prev(entry, &q->queue_head) {
  362                 struct request *pos = list_entry_rq(entry);
  363 
  364                 if ((rq->cmd_flags & REQ_DISCARD) !=
  365                     (pos->cmd_flags & REQ_DISCARD))
  366                         break;
  367                 if (rq_data_dir(rq) != rq_data_dir(pos))
  368                         break;
  369                 if (pos->cmd_flags & stop_flags)
  370                         break;
  371                 if (blk_rq_pos(rq) >= boundary) {
  372                         if (blk_rq_pos(pos) < boundary)
  373                                 continue;
  374                 } else {
  375                         if (blk_rq_pos(pos) >= boundary)
  376                                 break;
  377                 }
  378                 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
  379                         break;
  380         }
  381 
  382         list_add(&rq->queuelist, entry);
  383 }
  384 EXPORT_SYMBOL(elv_dispatch_sort);
  385 
  386 /*
  387  * Insert rq into dispatch queue of q.  Queue lock must be held on
  388  * entry.  rq is added to the back of the dispatch queue. To be used by
  389  * specific elevators.
  390  */
  391 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
  392 {
  393         if (q->last_merge == rq)
  394                 q->last_merge = NULL;
  395 
  396         elv_rqhash_del(q, rq);
  397 
  398         q->nr_sorted--;
  399 
  400         q->end_sector = rq_end_sector(rq);
  401         q->boundary_rq = rq;
  402         list_add_tail(&rq->queuelist, &q->queue_head);
  403 }
  404 EXPORT_SYMBOL(elv_dispatch_add_tail);
  405 
  406 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
  407 {
  408         struct elevator_queue *e = q->elevator;
  409         struct request *__rq;
  410         int ret;
  411 
  412         /*
  413          * Levels of merges:
  414          *      nomerges:  No merges at all attempted
  415          *      noxmerges: Only simple one-hit cache try
  416          *      merges:    All merge tries attempted
  417          */
  418         if (blk_queue_nomerges(q))
  419                 return ELEVATOR_NO_MERGE;
  420 
  421         /*
  422          * First try one-hit cache.
  423          */
  424         if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
  425                 ret = blk_try_merge(q->last_merge, bio);
  426                 if (ret != ELEVATOR_NO_MERGE) {
  427                         *req = q->last_merge;
  428                         return ret;
  429                 }
  430         }
  431 
  432         if (blk_queue_noxmerges(q))
  433                 return ELEVATOR_NO_MERGE;
  434 
  435         /*
  436          * See if our hash lookup can find a potential backmerge.
  437          */
  438         __rq = elv_rqhash_find(q, bio->bi_sector);
  439         if (__rq && elv_rq_merge_ok(__rq, bio)) {
  440                 *req = __rq;
  441                 return ELEVATOR_BACK_MERGE;
  442         }
  443 
  444         if (e->type->ops.elevator_merge_fn)
  445                 return e->type->ops.elevator_merge_fn(q, req, bio);
  446 
  447         return ELEVATOR_NO_MERGE;
  448 }
  449 
  450 /*
  451  * Attempt to do an insertion back merge. Only check for the case where
  452  * we can append 'rq' to an existing request, so we can throw 'rq' away
  453  * afterwards.
  454  *
  455  * Returns true if we merged, false otherwise
  456  */
  457 static bool elv_attempt_insert_merge(struct request_queue *q,
  458                                      struct request *rq)
  459 {
  460         struct request *__rq;
  461         bool ret;
  462 
  463         if (blk_queue_nomerges(q))
  464                 return false;
  465 
  466         /*
  467          * First try one-hit cache.
  468          */
  469         if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
  470                 return true;
  471 
  472         if (blk_queue_noxmerges(q))
  473                 return false;
  474 
  475         ret = false;
  476         /*
  477          * See if our hash lookup can find a potential backmerge.
  478          */
  479         while (1) {
  480                 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
  481                 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
  482                         break;
  483 
  484                 /* The merged request could be merged with others, try again */
  485                 ret = true;
  486                 rq = __rq;
  487         }
  488 
  489         return ret;
  490 }
  491 
  492 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
  493 {
  494         struct elevator_queue *e = q->elevator;
  495 
  496         if (e->type->ops.elevator_merged_fn)
  497                 e->type->ops.elevator_merged_fn(q, rq, type);
  498 
  499         if (type == ELEVATOR_BACK_MERGE)
  500                 elv_rqhash_reposition(q, rq);
  501 
  502         q->last_merge = rq;
  503 }
  504 
  505 void elv_merge_requests(struct request_queue *q, struct request *rq,
  506                              struct request *next)
  507 {
  508         struct elevator_queue *e = q->elevator;
  509         const int next_sorted = next->cmd_flags & REQ_SORTED;
  510 
  511         if (next_sorted && e->type->ops.elevator_merge_req_fn)
  512                 e->type->ops.elevator_merge_req_fn(q, rq, next);
  513 
  514         elv_rqhash_reposition(q, rq);
  515 
  516         if (next_sorted) {
  517                 elv_rqhash_del(q, next);
  518                 q->nr_sorted--;
  519         }
  520 
  521         q->last_merge = rq;
  522 }
  523 
  524 void elv_bio_merged(struct request_queue *q, struct request *rq,
  525                         struct bio *bio)
  526 {
  527         struct elevator_queue *e = q->elevator;
  528 
  529         if (e->type->ops.elevator_bio_merged_fn)
  530                 e->type->ops.elevator_bio_merged_fn(q, rq, bio);
  531 }
  532 
  533 void elv_requeue_request(struct request_queue *q, struct request *rq)
  534 {
  535         /*
  536          * it already went through dequeue, we need to decrement the
  537          * in_flight count again
  538          */
  539         if (blk_account_rq(rq)) {
  540                 q->in_flight[rq_is_sync(rq)]--;
  541                 if (rq->cmd_flags & REQ_SORTED)
  542                         elv_deactivate_rq(q, rq);
  543         }
  544 
  545         rq->cmd_flags &= ~REQ_STARTED;
  546 
  547         __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
  548 }
  549 
  550 void elv_drain_elevator(struct request_queue *q)
  551 {
  552         static int printed;
  553 
  554         lockdep_assert_held(q->queue_lock);
  555 
  556         while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
  557                 ;
  558         if (q->nr_sorted && printed++ < 10) {
  559                 printk(KERN_ERR "%s: forced dispatching is broken "
  560                        "(nr_sorted=%u), please report this\n",
  561                        q->elevator->type->elevator_name, q->nr_sorted);
  562         }
  563 }
  564 
  565 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
  566 {
  567         trace_block_rq_insert(q, rq);
  568 
  569         rq->q = q;
  570 
  571         if (rq->cmd_flags & REQ_SOFTBARRIER) {
  572                 /* barriers are scheduling boundary, update end_sector */
  573                 if (rq->cmd_type == REQ_TYPE_FS) {
  574                         q->end_sector = rq_end_sector(rq);
  575                         q->boundary_rq = rq;
  576                 }
  577         } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
  578                     (where == ELEVATOR_INSERT_SORT ||
  579                      where == ELEVATOR_INSERT_SORT_MERGE))
  580                 where = ELEVATOR_INSERT_BACK;
  581 
  582         switch (where) {
  583         case ELEVATOR_INSERT_REQUEUE:
  584         case ELEVATOR_INSERT_FRONT:
  585                 rq->cmd_flags |= REQ_SOFTBARRIER;
  586                 list_add(&rq->queuelist, &q->queue_head);
  587                 break;
  588 
  589         case ELEVATOR_INSERT_BACK:
  590                 rq->cmd_flags |= REQ_SOFTBARRIER;
  591                 elv_drain_elevator(q);
  592                 list_add_tail(&rq->queuelist, &q->queue_head);
  593                 /*
  594                  * We kick the queue here for the following reasons.
  595                  * - The elevator might have returned NULL previously
  596                  *   to delay requests and returned them now.  As the
  597                  *   queue wasn't empty before this request, ll_rw_blk
  598                  *   won't run the queue on return, resulting in hang.
  599                  * - Usually, back inserted requests won't be merged
  600                  *   with anything.  There's no point in delaying queue
  601                  *   processing.
  602                  */
  603                 __blk_run_queue(q);
  604                 break;
  605 
  606         case ELEVATOR_INSERT_SORT_MERGE:
  607                 /*
  608                  * If we succeed in merging this request with one in the
  609                  * queue already, we are done - rq has now been freed,
  610                  * so no need to do anything further.
  611                  */
  612                 if (elv_attempt_insert_merge(q, rq))
  613                         break;
  614         case ELEVATOR_INSERT_SORT:
  615                 BUG_ON(rq->cmd_type != REQ_TYPE_FS);
  616                 rq->cmd_flags |= REQ_SORTED;
  617                 q->nr_sorted++;
  618                 if (rq_mergeable(rq)) {
  619                         elv_rqhash_add(q, rq);
  620                         if (!q->last_merge)
  621                                 q->last_merge = rq;
  622                 }
  623 
  624                 /*
  625                  * Some ioscheds (cfq) run q->request_fn directly, so
  626                  * rq cannot be accessed after calling
  627                  * elevator_add_req_fn.
  628                  */
  629                 q->elevator->type->ops.elevator_add_req_fn(q, rq);
  630                 break;
  631 
  632         case ELEVATOR_INSERT_FLUSH:
  633                 rq->cmd_flags |= REQ_SOFTBARRIER;
  634                 blk_insert_flush(rq);
  635                 break;
  636         default:
  637                 printk(KERN_ERR "%s: bad insertion point %d\n",
  638                        __func__, where);
  639                 BUG();
  640         }
  641 }
  642 EXPORT_SYMBOL(__elv_add_request);
  643 
  644 void elv_add_request(struct request_queue *q, struct request *rq, int where)
  645 {
  646         unsigned long flags;
  647 
  648         spin_lock_irqsave(q->queue_lock, flags);
  649         __elv_add_request(q, rq, where);
  650         spin_unlock_irqrestore(q->queue_lock, flags);
  651 }
  652 EXPORT_SYMBOL(elv_add_request);
  653 
  654 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
  655 {
  656         struct elevator_queue *e = q->elevator;
  657 
  658         if (e->type->ops.elevator_latter_req_fn)
  659                 return e->type->ops.elevator_latter_req_fn(q, rq);
  660         return NULL;
  661 }
  662 
  663 struct request *elv_former_request(struct request_queue *q, struct request *rq)
  664 {
  665         struct elevator_queue *e = q->elevator;
  666 
  667         if (e->type->ops.elevator_former_req_fn)
  668                 return e->type->ops.elevator_former_req_fn(q, rq);
  669         return NULL;
  670 }
  671 
  672 int elv_set_request(struct request_queue *q, struct request *rq,
  673                     struct bio *bio, gfp_t gfp_mask)
  674 {
  675         struct elevator_queue *e = q->elevator;
  676 
  677         if (e->type->ops.elevator_set_req_fn)
  678                 return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
  679         return 0;
  680 }
  681 
  682 void elv_put_request(struct request_queue *q, struct request *rq)
  683 {
  684         struct elevator_queue *e = q->elevator;
  685 
  686         if (e->type->ops.elevator_put_req_fn)
  687                 e->type->ops.elevator_put_req_fn(rq);
  688 }
  689 
  690 int elv_may_queue(struct request_queue *q, int rw)
  691 {
  692         struct elevator_queue *e = q->elevator;
  693 
  694         if (e->type->ops.elevator_may_queue_fn)
  695                 return e->type->ops.elevator_may_queue_fn(q, rw);
  696 
  697         return ELV_MQUEUE_MAY;
  698 }
  699 
  700 void elv_abort_queue(struct request_queue *q)
  701 {
  702         struct request *rq;
  703 
  704         blk_abort_flushes(q);
  705 
  706         while (!list_empty(&q->queue_head)) {
  707                 rq = list_entry_rq(q->queue_head.next);
  708                 rq->cmd_flags |= REQ_QUIET;
  709                 trace_block_rq_abort(q, rq);
  710                 /*
  711                  * Mark this request as started so we don't trigger
  712                  * any debug logic in the end I/O path.
  713                  */
  714                 blk_start_request(rq);
  715                 __blk_end_request_all(rq, -EIO);
  716         }
  717 }
  718 EXPORT_SYMBOL(elv_abort_queue);
  719 
  720 void elv_completed_request(struct request_queue *q, struct request *rq)
  721 {
  722         struct elevator_queue *e = q->elevator;
  723 
  724         /*
  725          * request is released from the driver, io must be done
  726          */
  727         if (blk_account_rq(rq)) {
  728                 q->in_flight[rq_is_sync(rq)]--;
  729                 if ((rq->cmd_flags & REQ_SORTED) &&
  730                     e->type->ops.elevator_completed_req_fn)
  731                         e->type->ops.elevator_completed_req_fn(q, rq);
  732         }
  733 }
  734 
  735 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
  736 
  737 static ssize_t
  738 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  739 {
  740         struct elv_fs_entry *entry = to_elv(attr);
  741         struct elevator_queue *e;
  742         ssize_t error;
  743 
  744         if (!entry->show)
  745                 return -EIO;
  746 
  747         e = container_of(kobj, struct elevator_queue, kobj);
  748         mutex_lock(&e->sysfs_lock);
  749         error = e->type ? entry->show(e, page) : -ENOENT;
  750         mutex_unlock(&e->sysfs_lock);
  751         return error;
  752 }
  753 
  754 static ssize_t
  755 elv_attr_store(struct kobject *kobj, struct attribute *attr,
  756                const char *page, size_t length)
  757 {
  758         struct elv_fs_entry *entry = to_elv(attr);
  759         struct elevator_queue *e;
  760         ssize_t error;
  761 
  762         if (!entry->store)
  763                 return -EIO;
  764 
  765         e = container_of(kobj, struct elevator_queue, kobj);
  766         mutex_lock(&e->sysfs_lock);
  767         error = e->type ? entry->store(e, page, length) : -ENOENT;
  768         mutex_unlock(&e->sysfs_lock);
  769         return error;
  770 }
  771 
  772 static const struct sysfs_ops elv_sysfs_ops = {
  773         .show   = elv_attr_show,
  774         .store  = elv_attr_store,
  775 };
  776 
  777 static struct kobj_type elv_ktype = {
  778         .sysfs_ops      = &elv_sysfs_ops,
  779         .release        = elevator_release,
  780 };
  781 
  782 int elv_register_queue(struct request_queue *q)
  783 {
  784         struct elevator_queue *e = q->elevator;
  785         int error;
  786 
  787         error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
  788         if (!error) {
  789                 struct elv_fs_entry *attr = e->type->elevator_attrs;
  790                 if (attr) {
  791                         while (attr->attr.name) {
  792                                 if (sysfs_create_file(&e->kobj, &attr->attr))
  793                                         break;
  794                                 attr++;
  795                         }
  796                 }
  797                 kobject_uevent(&e->kobj, KOBJ_ADD);
  798                 e->registered = 1;
  799         }
  800         return error;
  801 }
  802 EXPORT_SYMBOL(elv_register_queue);
  803 
  804 void elv_unregister_queue(struct request_queue *q)
  805 {
  806         if (q) {
  807                 struct elevator_queue *e = q->elevator;
  808 
  809                 kobject_uevent(&e->kobj, KOBJ_REMOVE);
  810                 kobject_del(&e->kobj);
  811                 e->registered = 0;
  812         }
  813 }
  814 EXPORT_SYMBOL(elv_unregister_queue);
  815 
  816 int elv_register(struct elevator_type *e)
  817 {
  818         char *def = "";
  819 
  820         /* create icq_cache if requested */
  821         if (e->icq_size) {
  822                 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
  823                     WARN_ON(e->icq_align < __alignof__(struct io_cq)))
  824                         return -EINVAL;
  825 
  826                 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
  827                          "%s_io_cq", e->elevator_name);
  828                 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
  829                                                  e->icq_align, 0, NULL);
  830                 if (!e->icq_cache)
  831                         return -ENOMEM;
  832         }
  833 
  834         /* register, don't allow duplicate names */
  835         spin_lock(&elv_list_lock);
  836         if (elevator_find(e->elevator_name)) {
  837                 spin_unlock(&elv_list_lock);
  838                 if (e->icq_cache)
  839                         kmem_cache_destroy(e->icq_cache);
  840                 return -EBUSY;
  841         }
  842         list_add_tail(&e->list, &elv_list);
  843         spin_unlock(&elv_list_lock);
  844 
  845         /* print pretty message */
  846         if (!strcmp(e->elevator_name, chosen_elevator) ||
  847                         (!*chosen_elevator &&
  848                          !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
  849                                 def = " (default)";
  850 
  851         printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
  852                                                                 def);
  853         return 0;
  854 }
  855 EXPORT_SYMBOL_GPL(elv_register);
  856 
  857 void elv_unregister(struct elevator_type *e)
  858 {
  859         /* unregister */
  860         spin_lock(&elv_list_lock);
  861         list_del_init(&e->list);
  862         spin_unlock(&elv_list_lock);
  863 
  864         /*
  865          * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
  866          * sure all RCU operations are complete before proceeding.
  867          */
  868         if (e->icq_cache) {
  869                 rcu_barrier();
  870                 kmem_cache_destroy(e->icq_cache);
  871                 e->icq_cache = NULL;
  872         }
  873 }
  874 EXPORT_SYMBOL_GPL(elv_unregister);
  875 
  876 /*
  877  * switch to new_e io scheduler. be careful not to introduce deadlocks -
  878  * we don't free the old io scheduler, before we have allocated what we
  879  * need for the new one. this way we have a chance of going back to the old
  880  * one, if the new one fails init for some reason.
  881  */
  882 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
  883 {
  884         struct elevator_queue *old = q->elevator;
  885         bool registered = old->registered;
  886         int err;
  887 
  888         /*
  889          * Turn on BYPASS and drain all requests w/ elevator private data.
  890          * Block layer doesn't call into a quiesced elevator - all requests
  891          * are directly put on the dispatch list without elevator data
  892          * using INSERT_BACK.  All requests have SOFTBARRIER set and no
  893          * merge happens either.
  894          */
  895         blk_queue_bypass_start(q);
  896 
  897         /* unregister and clear all auxiliary data of the old elevator */
  898         if (registered)
  899                 elv_unregister_queue(q);
  900 
  901         spin_lock_irq(q->queue_lock);
  902         ioc_clear_queue(q);
  903         spin_unlock_irq(q->queue_lock);
  904 
  905         /* allocate, init and register new elevator */
  906         err = -ENOMEM;
  907         q->elevator = elevator_alloc(q, new_e);
  908         if (!q->elevator)
  909                 goto fail_init;
  910 
  911         err = new_e->ops.elevator_init_fn(q);
  912         if (err) {
  913                 kobject_put(&q->elevator->kobj);
  914                 goto fail_init;
  915         }
  916 
  917         if (registered) {
  918                 err = elv_register_queue(q);
  919                 if (err)
  920                         goto fail_register;
  921         }
  922 
  923         /* done, kill the old one and finish */
  924         elevator_exit(old);
  925         blk_queue_bypass_end(q);
  926 
  927         blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
  928 
  929         return 0;
  930 
  931 fail_register:
  932         elevator_exit(q->elevator);
  933 fail_init:
  934         /* switch failed, restore and re-register old elevator */
  935         q->elevator = old;
  936         elv_register_queue(q);
  937         blk_queue_bypass_end(q);
  938 
  939         return err;
  940 }
  941 
  942 /*
  943  * Switch this queue to the given IO scheduler.
  944  */
  945 int elevator_change(struct request_queue *q, const char *name)
  946 {
  947         char elevator_name[ELV_NAME_MAX];
  948         struct elevator_type *e;
  949 
  950         if (!q->elevator)
  951                 return -ENXIO;
  952 
  953         strlcpy(elevator_name, name, sizeof(elevator_name));
  954         e = elevator_get(strstrip(elevator_name));
  955         if (!e) {
  956                 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
  957                 return -EINVAL;
  958         }
  959 
  960         if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
  961                 elevator_put(e);
  962                 return 0;
  963         }
  964 
  965         return elevator_switch(q, e);
  966 }
  967 EXPORT_SYMBOL(elevator_change);
  968 
  969 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
  970                           size_t count)
  971 {
  972         int ret;
  973 
  974         if (!q->elevator)
  975                 return count;
  976 
  977         ret = elevator_change(q, name);
  978         if (!ret)
  979                 return count;
  980 
  981         printk(KERN_ERR "elevator: switch to %s failed\n", name);
  982         return ret;
  983 }
  984 
  985 ssize_t elv_iosched_show(struct request_queue *q, char *name)
  986 {
  987         struct elevator_queue *e = q->elevator;
  988         struct elevator_type *elv;
  989         struct elevator_type *__e;
  990         int len = 0;
  991 
  992         if (!q->elevator || !blk_queue_stackable(q))
  993                 return sprintf(name, "none\n");
  994 
  995         elv = e->type;
  996 
  997         spin_lock(&elv_list_lock);
  998         list_for_each_entry(__e, &elv_list, list) {
  999                 if (!strcmp(elv->elevator_name, __e->elevator_name))
 1000                         len += sprintf(name+len, "[%s] ", elv->elevator_name);
 1001                 else
 1002                         len += sprintf(name+len, "%s ", __e->elevator_name);
 1003         }
 1004         spin_unlock(&elv_list_lock);
 1005 
 1006         len += sprintf(len+name, "\n");
 1007         return len;
 1008 }
 1009 
 1010 struct request *elv_rb_former_request(struct request_queue *q,
 1011                                       struct request *rq)
 1012 {
 1013         struct rb_node *rbprev = rb_prev(&rq->rb_node);
 1014 
 1015         if (rbprev)
 1016                 return rb_entry_rq(rbprev);
 1017 
 1018         return NULL;
 1019 }
 1020 EXPORT_SYMBOL(elv_rb_former_request);
 1021 
 1022 struct request *elv_rb_latter_request(struct request_queue *q,
 1023                                       struct request *rq)
 1024 {
 1025         struct rb_node *rbnext = rb_next(&rq->rb_node);
 1026 
 1027         if (rbnext)
 1028                 return rb_entry_rq(rbnext);
 1029 
 1030         return NULL;
 1031 }
 1032 EXPORT_SYMBOL(elv_rb_latter_request);

Cache object: e35fc3091f0c968978106e285e53e15d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.