The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/block/blk-flush.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Functions to sequence FLUSH and FUA writes.
    3  *
    4  * Copyright (C) 2011           Max Planck Institute for Gravitational Physics
    5  * Copyright (C) 2011           Tejun Heo <tj@kernel.org>
    6  *
    7  * This file is released under the GPLv2.
    8  *
    9  * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
   10  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
   11  * properties and hardware capability.
   12  *
   13  * If a request doesn't have data, only REQ_FLUSH makes sense, which
   14  * indicates a simple flush request.  If there is data, REQ_FLUSH indicates
   15  * that the device cache should be flushed before the data is executed, and
   16  * REQ_FUA means that the data must be on non-volatile media on request
   17  * completion.
   18  *
   19  * If the device doesn't have writeback cache, FLUSH and FUA don't make any
   20  * difference.  The requests are either completed immediately if there's no
   21  * data or executed as normal requests otherwise.
   22  *
   23  * If the device has writeback cache and supports FUA, REQ_FLUSH is
   24  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
   25  *
   26  * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
   27  * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
   28  *
   29  * The actual execution of flush is double buffered.  Whenever a request
   30  * needs to execute PRE or POSTFLUSH, it queues at
   31  * q->flush_queue[q->flush_pending_idx].  Once certain criteria are met, a
   32  * flush is issued and the pending_idx is toggled.  When the flush
   33  * completes, all the requests which were pending are proceeded to the next
   34  * step.  This allows arbitrary merging of different types of FLUSH/FUA
   35  * requests.
   36  *
   37  * Currently, the following conditions are used to determine when to issue
   38  * flush.
   39  *
   40  * C1. At any given time, only one flush shall be in progress.  This makes
   41  *     double buffering sufficient.
   42  *
   43  * C2. Flush is deferred if any request is executing DATA of its sequence.
   44  *     This avoids issuing separate POSTFLUSHes for requests which shared
   45  *     PREFLUSH.
   46  *
   47  * C3. The second condition is ignored if there is a request which has
   48  *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
   49  *     starvation in the unlikely case where there are continuous stream of
   50  *     FUA (without FLUSH) requests.
   51  *
   52  * For devices which support FUA, it isn't clear whether C2 (and thus C3)
   53  * is beneficial.
   54  *
   55  * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
   56  * Once while executing DATA and again after the whole sequence is
   57  * complete.  The first completion updates the contained bio but doesn't
   58  * finish it so that the bio submitter is notified only after the whole
   59  * sequence is complete.  This is implemented by testing REQ_FLUSH_SEQ in
   60  * req_bio_endio().
   61  *
   62  * The above peculiarity requires that each FLUSH/FUA request has only one
   63  * bio attached to it, which is guaranteed as they aren't allowed to be
   64  * merged in the usual way.
   65  */
   66 
   67 #include <linux/kernel.h>
   68 #include <linux/module.h>
   69 #include <linux/bio.h>
   70 #include <linux/blkdev.h>
   71 #include <linux/gfp.h>
   72 
   73 #include "blk.h"
   74 
   75 /* FLUSH/FUA sequences */
   76 enum {
   77         REQ_FSEQ_PREFLUSH       = (1 << 0), /* pre-flushing in progress */
   78         REQ_FSEQ_DATA           = (1 << 1), /* data write in progress */
   79         REQ_FSEQ_POSTFLUSH      = (1 << 2), /* post-flushing in progress */
   80         REQ_FSEQ_DONE           = (1 << 3),
   81 
   82         REQ_FSEQ_ACTIONS        = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
   83                                   REQ_FSEQ_POSTFLUSH,
   84 
   85         /*
   86          * If flush has been pending longer than the following timeout,
   87          * it's issued even if flush_data requests are still in flight.
   88          */
   89         FLUSH_PENDING_TIMEOUT   = 5 * HZ,
   90 };
   91 
   92 static bool blk_kick_flush(struct request_queue *q);
   93 
   94 static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
   95 {
   96         unsigned int policy = 0;
   97 
   98         if (blk_rq_sectors(rq))
   99                 policy |= REQ_FSEQ_DATA;
  100 
  101         if (fflags & REQ_FLUSH) {
  102                 if (rq->cmd_flags & REQ_FLUSH)
  103                         policy |= REQ_FSEQ_PREFLUSH;
  104                 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
  105                         policy |= REQ_FSEQ_POSTFLUSH;
  106         }
  107         return policy;
  108 }
  109 
  110 static unsigned int blk_flush_cur_seq(struct request *rq)
  111 {
  112         return 1 << ffz(rq->flush.seq);
  113 }
  114 
  115 static void blk_flush_restore_request(struct request *rq)
  116 {
  117         /*
  118          * After flush data completion, @rq->bio is %NULL but we need to
  119          * complete the bio again.  @rq->biotail is guaranteed to equal the
  120          * original @rq->bio.  Restore it.
  121          */
  122         rq->bio = rq->biotail;
  123 
  124         /* make @rq a normal request */
  125         rq->cmd_flags &= ~REQ_FLUSH_SEQ;
  126         rq->end_io = rq->flush.saved_end_io;
  127 }
  128 
  129 /**
  130  * blk_flush_complete_seq - complete flush sequence
  131  * @rq: FLUSH/FUA request being sequenced
  132  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
  133  * @error: whether an error occurred
  134  *
  135  * @rq just completed @seq part of its flush sequence, record the
  136  * completion and trigger the next step.
  137  *
  138  * CONTEXT:
  139  * spin_lock_irq(q->queue_lock)
  140  *
  141  * RETURNS:
  142  * %true if requests were added to the dispatch queue, %false otherwise.
  143  */
  144 static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
  145                                    int error)
  146 {
  147         struct request_queue *q = rq->q;
  148         struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
  149         bool queued = false;
  150 
  151         BUG_ON(rq->flush.seq & seq);
  152         rq->flush.seq |= seq;
  153 
  154         if (likely(!error))
  155                 seq = blk_flush_cur_seq(rq);
  156         else
  157                 seq = REQ_FSEQ_DONE;
  158 
  159         switch (seq) {
  160         case REQ_FSEQ_PREFLUSH:
  161         case REQ_FSEQ_POSTFLUSH:
  162                 /* queue for flush */
  163                 if (list_empty(pending))
  164                         q->flush_pending_since = jiffies;
  165                 list_move_tail(&rq->flush.list, pending);
  166                 break;
  167 
  168         case REQ_FSEQ_DATA:
  169                 list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
  170                 list_add(&rq->queuelist, &q->queue_head);
  171                 queued = true;
  172                 break;
  173 
  174         case REQ_FSEQ_DONE:
  175                 /*
  176                  * @rq was previously adjusted by blk_flush_issue() for
  177                  * flush sequencing and may already have gone through the
  178                  * flush data request completion path.  Restore @rq for
  179                  * normal completion and end it.
  180                  */
  181                 BUG_ON(!list_empty(&rq->queuelist));
  182                 list_del_init(&rq->flush.list);
  183                 blk_flush_restore_request(rq);
  184                 __blk_end_request_all(rq, error);
  185                 break;
  186 
  187         default:
  188                 BUG();
  189         }
  190 
  191         return blk_kick_flush(q) | queued;
  192 }
  193 
  194 static void flush_end_io(struct request *flush_rq, int error)
  195 {
  196         struct request_queue *q = flush_rq->q;
  197         struct list_head *running = &q->flush_queue[q->flush_running_idx];
  198         bool queued = false;
  199         struct request *rq, *n;
  200 
  201         BUG_ON(q->flush_pending_idx == q->flush_running_idx);
  202 
  203         /* account completion of the flush request */
  204         q->flush_running_idx ^= 1;
  205         elv_completed_request(q, flush_rq);
  206 
  207         /* and push the waiting requests to the next stage */
  208         list_for_each_entry_safe(rq, n, running, flush.list) {
  209                 unsigned int seq = blk_flush_cur_seq(rq);
  210 
  211                 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
  212                 queued |= blk_flush_complete_seq(rq, seq, error);
  213         }
  214 
  215         /*
  216          * Kick the queue to avoid stall for two cases:
  217          * 1. Moving a request silently to empty queue_head may stall the
  218          * queue.
  219          * 2. When flush request is running in non-queueable queue, the
  220          * queue is hold. Restart the queue after flush request is finished
  221          * to avoid stall.
  222          * This function is called from request completion path and calling
  223          * directly into request_fn may confuse the driver.  Always use
  224          * kblockd.
  225          */
  226         if (queued || q->flush_queue_delayed)
  227                 blk_run_queue_async(q);
  228         q->flush_queue_delayed = 0;
  229 }
  230 
  231 /**
  232  * blk_kick_flush - consider issuing flush request
  233  * @q: request_queue being kicked
  234  *
  235  * Flush related states of @q have changed, consider issuing flush request.
  236  * Please read the comment at the top of this file for more info.
  237  *
  238  * CONTEXT:
  239  * spin_lock_irq(q->queue_lock)
  240  *
  241  * RETURNS:
  242  * %true if flush was issued, %false otherwise.
  243  */
  244 static bool blk_kick_flush(struct request_queue *q)
  245 {
  246         struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
  247         struct request *first_rq =
  248                 list_first_entry(pending, struct request, flush.list);
  249 
  250         /* C1 described at the top of this file */
  251         if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
  252                 return false;
  253 
  254         /* C2 and C3 */
  255         if (!list_empty(&q->flush_data_in_flight) &&
  256             time_before(jiffies,
  257                         q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
  258                 return false;
  259 
  260         /*
  261          * Issue flush and toggle pending_idx.  This makes pending_idx
  262          * different from running_idx, which means flush is in flight.
  263          */
  264         blk_rq_init(q, &q->flush_rq);
  265         q->flush_rq.cmd_type = REQ_TYPE_FS;
  266         q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
  267         q->flush_rq.rq_disk = first_rq->rq_disk;
  268         q->flush_rq.end_io = flush_end_io;
  269 
  270         q->flush_pending_idx ^= 1;
  271         list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
  272         return true;
  273 }
  274 
  275 static void flush_data_end_io(struct request *rq, int error)
  276 {
  277         struct request_queue *q = rq->q;
  278 
  279         /*
  280          * After populating an empty queue, kick it to avoid stall.  Read
  281          * the comment in flush_end_io().
  282          */
  283         if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
  284                 blk_run_queue_async(q);
  285 }
  286 
  287 /**
  288  * blk_insert_flush - insert a new FLUSH/FUA request
  289  * @rq: request to insert
  290  *
  291  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
  292  * @rq is being submitted.  Analyze what needs to be done and put it on the
  293  * right queue.
  294  *
  295  * CONTEXT:
  296  * spin_lock_irq(q->queue_lock)
  297  */
  298 void blk_insert_flush(struct request *rq)
  299 {
  300         struct request_queue *q = rq->q;
  301         unsigned int fflags = q->flush_flags;   /* may change, cache */
  302         unsigned int policy = blk_flush_policy(fflags, rq);
  303 
  304         /*
  305          * @policy now records what operations need to be done.  Adjust
  306          * REQ_FLUSH and FUA for the driver.
  307          */
  308         rq->cmd_flags &= ~REQ_FLUSH;
  309         if (!(fflags & REQ_FUA))
  310                 rq->cmd_flags &= ~REQ_FUA;
  311 
  312         /*
  313          * An empty flush handed down from a stacking driver may
  314          * translate into nothing if the underlying device does not
  315          * advertise a write-back cache.  In this case, simply
  316          * complete the request.
  317          */
  318         if (!policy) {
  319                 __blk_end_bidi_request(rq, 0, 0, 0);
  320                 return;
  321         }
  322 
  323         BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
  324 
  325         /*
  326          * If there's data but flush is not necessary, the request can be
  327          * processed directly without going through flush machinery.  Queue
  328          * for normal execution.
  329          */
  330         if ((policy & REQ_FSEQ_DATA) &&
  331             !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
  332                 list_add_tail(&rq->queuelist, &q->queue_head);
  333                 return;
  334         }
  335 
  336         /*
  337          * @rq should go through flush machinery.  Mark it part of flush
  338          * sequence and submit for further processing.
  339          */
  340         memset(&rq->flush, 0, sizeof(rq->flush));
  341         INIT_LIST_HEAD(&rq->flush.list);
  342         rq->cmd_flags |= REQ_FLUSH_SEQ;
  343         rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
  344         rq->end_io = flush_data_end_io;
  345 
  346         blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
  347 }
  348 
  349 /**
  350  * blk_abort_flushes - @q is being aborted, abort flush requests
  351  * @q: request_queue being aborted
  352  *
  353  * To be called from elv_abort_queue().  @q is being aborted.  Prepare all
  354  * FLUSH/FUA requests for abortion.
  355  *
  356  * CONTEXT:
  357  * spin_lock_irq(q->queue_lock)
  358  */
  359 void blk_abort_flushes(struct request_queue *q)
  360 {
  361         struct request *rq, *n;
  362         int i;
  363 
  364         /*
  365          * Requests in flight for data are already owned by the dispatch
  366          * queue or the device driver.  Just restore for normal completion.
  367          */
  368         list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
  369                 list_del_init(&rq->flush.list);
  370                 blk_flush_restore_request(rq);
  371         }
  372 
  373         /*
  374          * We need to give away requests on flush queues.  Restore for
  375          * normal completion and put them on the dispatch queue.
  376          */
  377         for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
  378                 list_for_each_entry_safe(rq, n, &q->flush_queue[i],
  379                                          flush.list) {
  380                         list_del_init(&rq->flush.list);
  381                         blk_flush_restore_request(rq);
  382                         list_add_tail(&rq->queuelist, &q->queue_head);
  383                 }
  384         }
  385 }
  386 
  387 static void bio_end_flush(struct bio *bio, int err)
  388 {
  389         if (err)
  390                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
  391         if (bio->bi_private)
  392                 complete(bio->bi_private);
  393         bio_put(bio);
  394 }
  395 
  396 /**
  397  * blkdev_issue_flush - queue a flush
  398  * @bdev:       blockdev to issue flush for
  399  * @gfp_mask:   memory allocation flags (for bio_alloc)
  400  * @error_sector:       error sector
  401  *
  402  * Description:
  403  *    Issue a flush for the block device in question. Caller can supply
  404  *    room for storing the error offset in case of a flush error, if they
  405  *    wish to. If WAIT flag is not passed then caller may check only what
  406  *    request was pushed in some internal queue for later handling.
  407  */
  408 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
  409                 sector_t *error_sector)
  410 {
  411         DECLARE_COMPLETION_ONSTACK(wait);
  412         struct request_queue *q;
  413         struct bio *bio;
  414         int ret = 0;
  415 
  416         if (bdev->bd_disk == NULL)
  417                 return -ENXIO;
  418 
  419         q = bdev_get_queue(bdev);
  420         if (!q)
  421                 return -ENXIO;
  422 
  423         /*
  424          * some block devices may not have their queue correctly set up here
  425          * (e.g. loop device without a backing file) and so issuing a flush
  426          * here will panic. Ensure there is a request function before issuing
  427          * the flush.
  428          */
  429         if (!q->make_request_fn)
  430                 return -ENXIO;
  431 
  432         bio = bio_alloc(gfp_mask, 0);
  433         bio->bi_end_io = bio_end_flush;
  434         bio->bi_bdev = bdev;
  435         bio->bi_private = &wait;
  436 
  437         bio_get(bio);
  438         submit_bio(WRITE_FLUSH, bio);
  439         wait_for_completion(&wait);
  440 
  441         /*
  442          * The driver must store the error location in ->bi_sector, if
  443          * it supports it. For non-stacked drivers, this should be
  444          * copied from blk_rq_pos(rq).
  445          */
  446         if (error_sector)
  447                *error_sector = bio->bi_sector;
  448 
  449         if (!bio_flagged(bio, BIO_UPTODATE))
  450                 ret = -EIO;
  451 
  452         bio_put(bio);
  453         return ret;
  454 }
  455 EXPORT_SYMBOL(blkdev_issue_flush);

Cache object: 6ca6d5f7431be9476fd07082fd16f2f4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.