]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-flush.c
blktrace: make do_blk_trace_setup() static
[mirror_ubuntu-bionic-kernel.git] / block / blk-flush.c
1 /*
2 * Functions to sequence FLUSH and FUA writes.
3 *
4 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
5 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability.
12 *
13 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
14 * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
15 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request
17 * completion.
18 *
19 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
20 * difference. The requests are either completed immediately if there's no
21 * data or executed as normal requests otherwise.
22 *
23 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 *
26 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
27 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 *
29 * The actual execution of flush is double buffered. Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at
31 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
32 * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
33 * completes, all the requests which were pending are proceeded to the next
34 * step. This allows arbitrary merging of different types of FLUSH/FUA
35 * requests.
36 *
37 * Currently, the following conditions are used to determine when to issue
38 * flush.
39 *
40 * C1. At any given time, only one flush shall be in progress. This makes
41 * double buffering sufficient.
42 *
43 * C2. Flush is deferred if any request is executing DATA of its sequence.
44 * This avoids issuing separate POSTFLUSHes for requests which shared
45 * PREFLUSH.
46 *
47 * C3. The second condition is ignored if there is a request which has
48 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
49 * starvation in the unlikely case where there are continuous stream of
50 * FUA (without FLUSH) requests.
51 *
52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53 * is beneficial.
54 *
55 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
56 * Once while executing DATA and again after the whole sequence is
57 * complete. The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
59 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
60 * req_bio_endio().
61 *
62 * The above peculiarity requires that each FLUSH/FUA request has only one
63 * bio attached to it, which is guaranteed as they aren't allowed to be
64 * merged in the usual way.
65 */
66
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/bio.h>
70 #include <linux/blkdev.h>
71 #include <linux/gfp.h>
72 #include <linux/blk-mq.h>
73
74 #include "blk.h"
75 #include "blk-mq.h"
76 #include "blk-mq-tag.h"
77 #include "blk-mq-sched.h"
78
79 /* FLUSH/FUA sequences */
80 enum {
81 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
82 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
83 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
84 REQ_FSEQ_DONE = (1 << 3),
85
86 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
87 REQ_FSEQ_POSTFLUSH,
88
89 /*
90 * If flush has been pending longer than the following timeout,
91 * it's issued even if flush_data requests are still in flight.
92 */
93 FLUSH_PENDING_TIMEOUT = 5 * HZ,
94 };
95
96 static bool blk_kick_flush(struct request_queue *q,
97 struct blk_flush_queue *fq);
98
99 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
100 {
101 unsigned int policy = 0;
102
103 if (blk_rq_sectors(rq))
104 policy |= REQ_FSEQ_DATA;
105
106 if (fflags & (1UL << QUEUE_FLAG_WC)) {
107 if (rq->cmd_flags & REQ_PREFLUSH)
108 policy |= REQ_FSEQ_PREFLUSH;
109 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
110 (rq->cmd_flags & REQ_FUA))
111 policy |= REQ_FSEQ_POSTFLUSH;
112 }
113 return policy;
114 }
115
116 static unsigned int blk_flush_cur_seq(struct request *rq)
117 {
118 return 1 << ffz(rq->flush.seq);
119 }
120
121 static void blk_flush_restore_request(struct request *rq)
122 {
123 /*
124 * After flush data completion, @rq->bio is %NULL but we need to
125 * complete the bio again. @rq->biotail is guaranteed to equal the
126 * original @rq->bio. Restore it.
127 */
128 rq->bio = rq->biotail;
129
130 /* make @rq a normal request */
131 rq->rq_flags &= ~RQF_FLUSH_SEQ;
132 rq->end_io = rq->flush.saved_end_io;
133 }
134
135 static bool blk_flush_queue_rq(struct request *rq, bool add_front)
136 {
137 if (rq->q->mq_ops) {
138 blk_mq_add_to_requeue_list(rq, add_front, true);
139 return false;
140 } else {
141 if (add_front)
142 list_add(&rq->queuelist, &rq->q->queue_head);
143 else
144 list_add_tail(&rq->queuelist, &rq->q->queue_head);
145 return true;
146 }
147 }
148
149 /**
150 * blk_flush_complete_seq - complete flush sequence
151 * @rq: FLUSH/FUA request being sequenced
152 * @fq: flush queue
153 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
154 * @error: whether an error occurred
155 *
156 * @rq just completed @seq part of its flush sequence, record the
157 * completion and trigger the next step.
158 *
159 * CONTEXT:
160 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
161 *
162 * RETURNS:
163 * %true if requests were added to the dispatch queue, %false otherwise.
164 */
165 static bool blk_flush_complete_seq(struct request *rq,
166 struct blk_flush_queue *fq,
167 unsigned int seq, int error)
168 {
169 struct request_queue *q = rq->q;
170 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
171 bool queued = false, kicked;
172
173 BUG_ON(rq->flush.seq & seq);
174 rq->flush.seq |= seq;
175
176 if (likely(!error))
177 seq = blk_flush_cur_seq(rq);
178 else
179 seq = REQ_FSEQ_DONE;
180
181 switch (seq) {
182 case REQ_FSEQ_PREFLUSH:
183 case REQ_FSEQ_POSTFLUSH:
184 /* queue for flush */
185 if (list_empty(pending))
186 fq->flush_pending_since = jiffies;
187 list_move_tail(&rq->flush.list, pending);
188 break;
189
190 case REQ_FSEQ_DATA:
191 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
192 queued = blk_flush_queue_rq(rq, true);
193 break;
194
195 case REQ_FSEQ_DONE:
196 /*
197 * @rq was previously adjusted by blk_flush_issue() for
198 * flush sequencing and may already have gone through the
199 * flush data request completion path. Restore @rq for
200 * normal completion and end it.
201 */
202 BUG_ON(!list_empty(&rq->queuelist));
203 list_del_init(&rq->flush.list);
204 blk_flush_restore_request(rq);
205 if (q->mq_ops)
206 blk_mq_end_request(rq, error);
207 else
208 __blk_end_request_all(rq, error);
209 break;
210
211 default:
212 BUG();
213 }
214
215 kicked = blk_kick_flush(q, fq);
216 return kicked | queued;
217 }
218
219 static void flush_end_io(struct request *flush_rq, int error)
220 {
221 struct request_queue *q = flush_rq->q;
222 struct list_head *running;
223 bool queued = false;
224 struct request *rq, *n;
225 unsigned long flags = 0;
226 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
227
228 if (q->mq_ops) {
229 struct blk_mq_hw_ctx *hctx;
230
231 /* release the tag's ownership to the req cloned from */
232 spin_lock_irqsave(&fq->mq_flush_lock, flags);
233 hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
234 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
235 flush_rq->tag = -1;
236 }
237
238 running = &fq->flush_queue[fq->flush_running_idx];
239 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
240
241 /* account completion of the flush request */
242 fq->flush_running_idx ^= 1;
243
244 if (!q->mq_ops)
245 elv_completed_request(q, flush_rq);
246
247 /* and push the waiting requests to the next stage */
248 list_for_each_entry_safe(rq, n, running, flush.list) {
249 unsigned int seq = blk_flush_cur_seq(rq);
250
251 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
252 queued |= blk_flush_complete_seq(rq, fq, seq, error);
253 }
254
255 /*
256 * Kick the queue to avoid stall for two cases:
257 * 1. Moving a request silently to empty queue_head may stall the
258 * queue.
259 * 2. When flush request is running in non-queueable queue, the
260 * queue is hold. Restart the queue after flush request is finished
261 * to avoid stall.
262 * This function is called from request completion path and calling
263 * directly into request_fn may confuse the driver. Always use
264 * kblockd.
265 */
266 if (queued || fq->flush_queue_delayed) {
267 WARN_ON(q->mq_ops);
268 blk_run_queue_async(q);
269 }
270 fq->flush_queue_delayed = 0;
271 if (q->mq_ops)
272 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
273 }
274
275 /**
276 * blk_kick_flush - consider issuing flush request
277 * @q: request_queue being kicked
278 * @fq: flush queue
279 *
280 * Flush related states of @q have changed, consider issuing flush request.
281 * Please read the comment at the top of this file for more info.
282 *
283 * CONTEXT:
284 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
285 *
286 * RETURNS:
287 * %true if flush was issued, %false otherwise.
288 */
289 static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
290 {
291 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
292 struct request *first_rq =
293 list_first_entry(pending, struct request, flush.list);
294 struct request *flush_rq = fq->flush_rq;
295
296 /* C1 described at the top of this file */
297 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
298 return false;
299
300 /* C2 and C3 */
301 if (!list_empty(&fq->flush_data_in_flight) &&
302 time_before(jiffies,
303 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
304 return false;
305
306 /*
307 * Issue flush and toggle pending_idx. This makes pending_idx
308 * different from running_idx, which means flush is in flight.
309 */
310 fq->flush_pending_idx ^= 1;
311
312 blk_rq_init(q, flush_rq);
313
314 /*
315 * Borrow tag from the first request since they can't
316 * be in flight at the same time. And acquire the tag's
317 * ownership for flush req.
318 */
319 if (q->mq_ops) {
320 struct blk_mq_hw_ctx *hctx;
321
322 flush_rq->mq_ctx = first_rq->mq_ctx;
323 flush_rq->tag = first_rq->tag;
324 fq->orig_rq = first_rq;
325
326 hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
327 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
328 }
329
330 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
331 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
332 flush_rq->rq_disk = first_rq->rq_disk;
333 flush_rq->end_io = flush_end_io;
334
335 return blk_flush_queue_rq(flush_rq, false);
336 }
337
338 static void flush_data_end_io(struct request *rq, int error)
339 {
340 struct request_queue *q = rq->q;
341 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
342
343 /*
344 * Updating q->in_flight[] here for making this tag usable
345 * early. Because in blk_queue_start_tag(),
346 * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
347 * reserve tags for sync I/O.
348 *
349 * More importantly this way can avoid the following I/O
350 * deadlock:
351 *
352 * - suppose there are 40 fua requests comming to flush queue
353 * and queue depth is 31
354 * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
355 * tag for async I/O any more
356 * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
357 * and flush_data_end_io() is called
358 * - the other rqs still can't go ahead if not updating
359 * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
360 * are held in flush data queue and make no progress of
361 * handling post flush rq
362 * - only after the post flush rq is handled, all these rqs
363 * can be completed
364 */
365
366 elv_completed_request(q, rq);
367
368 /* for avoiding double accounting */
369 rq->rq_flags &= ~RQF_STARTED;
370
371 /*
372 * After populating an empty queue, kick it to avoid stall. Read
373 * the comment in flush_end_io().
374 */
375 if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
376 blk_run_queue_async(q);
377 }
378
379 static void mq_flush_data_end_io(struct request *rq, int error)
380 {
381 struct request_queue *q = rq->q;
382 struct blk_mq_hw_ctx *hctx;
383 struct blk_mq_ctx *ctx = rq->mq_ctx;
384 unsigned long flags;
385 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
386
387 hctx = blk_mq_map_queue(q, ctx->cpu);
388
389 /*
390 * After populating an empty queue, kick it to avoid stall. Read
391 * the comment in flush_end_io().
392 */
393 spin_lock_irqsave(&fq->mq_flush_lock, flags);
394 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
395 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
396
397 blk_mq_run_hw_queue(hctx, true);
398 }
399
400 /**
401 * blk_insert_flush - insert a new FLUSH/FUA request
402 * @rq: request to insert
403 *
404 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
405 * or __blk_mq_run_hw_queue() to dispatch request.
406 * @rq is being submitted. Analyze what needs to be done and put it on the
407 * right queue.
408 *
409 * CONTEXT:
410 * spin_lock_irq(q->queue_lock) in !mq case
411 */
412 void blk_insert_flush(struct request *rq)
413 {
414 struct request_queue *q = rq->q;
415 unsigned long fflags = q->queue_flags; /* may change, cache */
416 unsigned int policy = blk_flush_policy(fflags, rq);
417 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
418
419 /*
420 * @policy now records what operations need to be done. Adjust
421 * REQ_PREFLUSH and FUA for the driver.
422 */
423 rq->cmd_flags &= ~REQ_PREFLUSH;
424 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
425 rq->cmd_flags &= ~REQ_FUA;
426
427 /*
428 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
429 * of those flags, we have to set REQ_SYNC to avoid skewing
430 * the request accounting.
431 */
432 rq->cmd_flags |= REQ_SYNC;
433
434 /*
435 * An empty flush handed down from a stacking driver may
436 * translate into nothing if the underlying device does not
437 * advertise a write-back cache. In this case, simply
438 * complete the request.
439 */
440 if (!policy) {
441 if (q->mq_ops)
442 blk_mq_end_request(rq, 0);
443 else
444 __blk_end_bidi_request(rq, 0, 0, 0);
445 return;
446 }
447
448 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
449
450 /*
451 * If there's data but flush is not necessary, the request can be
452 * processed directly without going through flush machinery. Queue
453 * for normal execution.
454 */
455 if ((policy & REQ_FSEQ_DATA) &&
456 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
457 if (q->mq_ops)
458 blk_mq_sched_insert_request(rq, false, true, false, false);
459 else
460 list_add_tail(&rq->queuelist, &q->queue_head);
461 return;
462 }
463
464 /*
465 * @rq should go through flush machinery. Mark it part of flush
466 * sequence and submit for further processing.
467 */
468 memset(&rq->flush, 0, sizeof(rq->flush));
469 INIT_LIST_HEAD(&rq->flush.list);
470 rq->rq_flags |= RQF_FLUSH_SEQ;
471 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
472 if (q->mq_ops) {
473 rq->end_io = mq_flush_data_end_io;
474
475 spin_lock_irq(&fq->mq_flush_lock);
476 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
477 spin_unlock_irq(&fq->mq_flush_lock);
478 return;
479 }
480 rq->end_io = flush_data_end_io;
481
482 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
483 }
484
485 /**
486 * blkdev_issue_flush - queue a flush
487 * @bdev: blockdev to issue flush for
488 * @gfp_mask: memory allocation flags (for bio_alloc)
489 * @error_sector: error sector
490 *
491 * Description:
492 * Issue a flush for the block device in question. Caller can supply
493 * room for storing the error offset in case of a flush error, if they
494 * wish to. If WAIT flag is not passed then caller may check only what
495 * request was pushed in some internal queue for later handling.
496 */
497 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
498 sector_t *error_sector)
499 {
500 struct request_queue *q;
501 struct bio *bio;
502 int ret = 0;
503
504 if (bdev->bd_disk == NULL)
505 return -ENXIO;
506
507 q = bdev_get_queue(bdev);
508 if (!q)
509 return -ENXIO;
510
511 /*
512 * some block devices may not have their queue correctly set up here
513 * (e.g. loop device without a backing file) and so issuing a flush
514 * here will panic. Ensure there is a request function before issuing
515 * the flush.
516 */
517 if (!q->make_request_fn)
518 return -ENXIO;
519
520 bio = bio_alloc(gfp_mask, 0);
521 bio->bi_bdev = bdev;
522 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
523
524 ret = submit_bio_wait(bio);
525
526 /*
527 * The driver must store the error location in ->bi_sector, if
528 * it supports it. For non-stacked drivers, this should be
529 * copied from blk_rq_pos(rq).
530 */
531 if (error_sector)
532 *error_sector = bio->bi_iter.bi_sector;
533
534 bio_put(bio);
535 return ret;
536 }
537 EXPORT_SYMBOL(blkdev_issue_flush);
538
539 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
540 int node, int cmd_size)
541 {
542 struct blk_flush_queue *fq;
543 int rq_sz = sizeof(struct request);
544
545 fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
546 if (!fq)
547 goto fail;
548
549 if (q->mq_ops)
550 spin_lock_init(&fq->mq_flush_lock);
551
552 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
553 fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
554 if (!fq->flush_rq)
555 goto fail_rq;
556
557 INIT_LIST_HEAD(&fq->flush_queue[0]);
558 INIT_LIST_HEAD(&fq->flush_queue[1]);
559 INIT_LIST_HEAD(&fq->flush_data_in_flight);
560
561 return fq;
562
563 fail_rq:
564 kfree(fq);
565 fail:
566 return NULL;
567 }
568
569 void blk_free_flush_queue(struct blk_flush_queue *fq)
570 {
571 /* bio based request queue hasn't flush queue */
572 if (!fq)
573 return;
574
575 kfree(fq->flush_rq);
576 kfree(fq);
577 }