]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - block/blk-flush.c
blk-mq: separate number of hardware queues from nr_cpu_ids
[mirror_ubuntu-eoan-kernel.git] / block / blk-flush.c
1 /*
2 * Functions to sequence PREFLUSH and FUA writes.
3 *
4 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
5 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
11 * properties and hardware capability.
12 *
13 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
14 * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
15 * that the device cache should be flushed before the data is executed, and
16 * REQ_FUA means that the data must be on non-volatile media on request
17 * completion.
18 *
19 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
20 * difference. The requests are either completed immediately if there's no data
21 * or executed as normal requests otherwise.
22 *
23 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 *
26 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
27 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 *
29 * The actual execution of flush is double buffered. Whenever a request
30 * needs to execute PRE or POSTFLUSH, it queues at
31 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
32 * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
33 * completes, all the requests which were pending are proceeded to the next
34 * step. This allows arbitrary merging of different types of PREFLUSH/FUA
35 * requests.
36 *
37 * Currently, the following conditions are used to determine when to issue
38 * flush.
39 *
40 * C1. At any given time, only one flush shall be in progress. This makes
41 * double buffering sufficient.
42 *
43 * C2. Flush is deferred if any request is executing DATA of its sequence.
44 * This avoids issuing separate POSTFLUSHes for requests which shared
45 * PREFLUSH.
46 *
47 * C3. The second condition is ignored if there is a request which has
48 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
49 * starvation in the unlikely case where there are continuous stream of
50 * FUA (without PREFLUSH) requests.
51 *
52 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
53 * is beneficial.
54 *
55 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
56 * Once while executing DATA and again after the whole sequence is
57 * complete. The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole
59 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
60 * req_bio_endio().
61 *
62 * The above peculiarity requires that each PREFLUSH/FUA request has only one
63 * bio attached to it, which is guaranteed as they aren't allowed to be
64 * merged in the usual way.
65 */
66
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/bio.h>
70 #include <linux/blkdev.h>
71 #include <linux/gfp.h>
72 #include <linux/blk-mq.h>
73
74 #include "blk.h"
75 #include "blk-mq.h"
76 #include "blk-mq-tag.h"
77 #include "blk-mq-sched.h"
78
79 /* PREFLUSH/FUA sequences */
80 enum {
81 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
82 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
83 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
84 REQ_FSEQ_DONE = (1 << 3),
85
86 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
87 REQ_FSEQ_POSTFLUSH,
88
89 /*
90 * If flush has been pending longer than the following timeout,
91 * it's issued even if flush_data requests are still in flight.
92 */
93 FLUSH_PENDING_TIMEOUT = 5 * HZ,
94 };
95
96 static void blk_kick_flush(struct request_queue *q,
97 struct blk_flush_queue *fq, unsigned int flags);
98
99 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
100 {
101 unsigned int policy = 0;
102
103 if (blk_rq_sectors(rq))
104 policy |= REQ_FSEQ_DATA;
105
106 if (fflags & (1UL << QUEUE_FLAG_WC)) {
107 if (rq->cmd_flags & REQ_PREFLUSH)
108 policy |= REQ_FSEQ_PREFLUSH;
109 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
110 (rq->cmd_flags & REQ_FUA))
111 policy |= REQ_FSEQ_POSTFLUSH;
112 }
113 return policy;
114 }
115
116 static unsigned int blk_flush_cur_seq(struct request *rq)
117 {
118 return 1 << ffz(rq->flush.seq);
119 }
120
121 static void blk_flush_restore_request(struct request *rq)
122 {
123 /*
124 * After flush data completion, @rq->bio is %NULL but we need to
125 * complete the bio again. @rq->biotail is guaranteed to equal the
126 * original @rq->bio. Restore it.
127 */
128 rq->bio = rq->biotail;
129
130 /* make @rq a normal request */
131 rq->rq_flags &= ~RQF_FLUSH_SEQ;
132 rq->end_io = rq->flush.saved_end_io;
133 }
134
135 static void blk_flush_queue_rq(struct request *rq, bool add_front)
136 {
137 blk_mq_add_to_requeue_list(rq, add_front, true);
138 }
139
140 /**
141 * blk_flush_complete_seq - complete flush sequence
142 * @rq: PREFLUSH/FUA request being sequenced
143 * @fq: flush queue
144 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
145 * @error: whether an error occurred
146 *
147 * @rq just completed @seq part of its flush sequence, record the
148 * completion and trigger the next step.
149 *
150 * CONTEXT:
151 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
152 *
153 * RETURNS:
154 * %true if requests were added to the dispatch queue, %false otherwise.
155 */
156 static void blk_flush_complete_seq(struct request *rq,
157 struct blk_flush_queue *fq,
158 unsigned int seq, blk_status_t error)
159 {
160 struct request_queue *q = rq->q;
161 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
162 unsigned int cmd_flags;
163
164 BUG_ON(rq->flush.seq & seq);
165 rq->flush.seq |= seq;
166 cmd_flags = rq->cmd_flags;
167
168 if (likely(!error))
169 seq = blk_flush_cur_seq(rq);
170 else
171 seq = REQ_FSEQ_DONE;
172
173 switch (seq) {
174 case REQ_FSEQ_PREFLUSH:
175 case REQ_FSEQ_POSTFLUSH:
176 /* queue for flush */
177 if (list_empty(pending))
178 fq->flush_pending_since = jiffies;
179 list_move_tail(&rq->flush.list, pending);
180 break;
181
182 case REQ_FSEQ_DATA:
183 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
184 blk_flush_queue_rq(rq, true);
185 break;
186
187 case REQ_FSEQ_DONE:
188 /*
189 * @rq was previously adjusted by blk_flush_issue() for
190 * flush sequencing and may already have gone through the
191 * flush data request completion path. Restore @rq for
192 * normal completion and end it.
193 */
194 BUG_ON(!list_empty(&rq->queuelist));
195 list_del_init(&rq->flush.list);
196 blk_flush_restore_request(rq);
197 blk_mq_end_request(rq, error);
198 break;
199
200 default:
201 BUG();
202 }
203
204 blk_kick_flush(q, fq, cmd_flags);
205 }
206
207 static void flush_end_io(struct request *flush_rq, blk_status_t error)
208 {
209 struct request_queue *q = flush_rq->q;
210 struct list_head *running;
211 struct request *rq, *n;
212 unsigned long flags = 0;
213 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
214 struct blk_mq_hw_ctx *hctx;
215
216 /* release the tag's ownership to the req cloned from */
217 spin_lock_irqsave(&fq->mq_flush_lock, flags);
218 hctx = blk_mq_map_queue(q, flush_rq->cmd_flags, flush_rq->mq_ctx->cpu);
219 if (!q->elevator) {
220 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
221 flush_rq->tag = -1;
222 } else {
223 blk_mq_put_driver_tag_hctx(hctx, flush_rq);
224 flush_rq->internal_tag = -1;
225 }
226
227 running = &fq->flush_queue[fq->flush_running_idx];
228 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
229
230 /* account completion of the flush request */
231 fq->flush_running_idx ^= 1;
232
233 /* and push the waiting requests to the next stage */
234 list_for_each_entry_safe(rq, n, running, flush.list) {
235 unsigned int seq = blk_flush_cur_seq(rq);
236
237 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
238 blk_flush_complete_seq(rq, fq, seq, error);
239 }
240
241 fq->flush_queue_delayed = 0;
242 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
243 }
244
245 /**
246 * blk_kick_flush - consider issuing flush request
247 * @q: request_queue being kicked
248 * @fq: flush queue
249 * @flags: cmd_flags of the original request
250 *
251 * Flush related states of @q have changed, consider issuing flush request.
252 * Please read the comment at the top of this file for more info.
253 *
254 * CONTEXT:
255 * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
256 *
257 */
258 static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
259 unsigned int flags)
260 {
261 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
262 struct request *first_rq =
263 list_first_entry(pending, struct request, flush.list);
264 struct request *flush_rq = fq->flush_rq;
265 struct blk_mq_hw_ctx *hctx;
266
267 /* C1 described at the top of this file */
268 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
269 return;
270
271 /* C2 and C3
272 *
273 * For blk-mq + scheduling, we can risk having all driver tags
274 * assigned to empty flushes, and we deadlock if we are expecting
275 * other requests to make progress. Don't defer for that case.
276 */
277 if (!list_empty(&fq->flush_data_in_flight) &&
278 !(q->mq_ops && q->elevator) &&
279 time_before(jiffies,
280 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
281 return;
282
283 /*
284 * Issue flush and toggle pending_idx. This makes pending_idx
285 * different from running_idx, which means flush is in flight.
286 */
287 fq->flush_pending_idx ^= 1;
288
289 blk_rq_init(q, flush_rq);
290
291 /*
292 * In case of none scheduler, borrow tag from the first request
293 * since they can't be in flight at the same time. And acquire
294 * the tag's ownership for flush req.
295 *
296 * In case of IO scheduler, flush rq need to borrow scheduler tag
297 * just for cheating put/get driver tag.
298 */
299 flush_rq->mq_ctx = first_rq->mq_ctx;
300
301 if (!q->elevator) {
302 fq->orig_rq = first_rq;
303 flush_rq->tag = first_rq->tag;
304 hctx = blk_mq_map_queue(q, first_rq->cmd_flags,
305 first_rq->mq_ctx->cpu);
306 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
307 } else {
308 flush_rq->internal_tag = first_rq->internal_tag;
309 }
310
311 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
312 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
313 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
314 flush_rq->rq_disk = first_rq->rq_disk;
315 flush_rq->end_io = flush_end_io;
316
317 blk_flush_queue_rq(flush_rq, false);
318 }
319
320 static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
321 {
322 struct request_queue *q = rq->q;
323 struct blk_mq_hw_ctx *hctx;
324 struct blk_mq_ctx *ctx = rq->mq_ctx;
325 unsigned long flags;
326 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
327
328 hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
329
330 if (q->elevator) {
331 WARN_ON(rq->tag < 0);
332 blk_mq_put_driver_tag_hctx(hctx, rq);
333 }
334
335 /*
336 * After populating an empty queue, kick it to avoid stall. Read
337 * the comment in flush_end_io().
338 */
339 spin_lock_irqsave(&fq->mq_flush_lock, flags);
340 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
341 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
342
343 blk_mq_run_hw_queue(hctx, true);
344 }
345
346 /**
347 * blk_insert_flush - insert a new PREFLUSH/FUA request
348 * @rq: request to insert
349 *
350 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
351 * or __blk_mq_run_hw_queue() to dispatch request.
352 * @rq is being submitted. Analyze what needs to be done and put it on the
353 * right queue.
354 */
355 void blk_insert_flush(struct request *rq)
356 {
357 struct request_queue *q = rq->q;
358 unsigned long fflags = q->queue_flags; /* may change, cache */
359 unsigned int policy = blk_flush_policy(fflags, rq);
360 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
361
362 /*
363 * @policy now records what operations need to be done. Adjust
364 * REQ_PREFLUSH and FUA for the driver.
365 */
366 rq->cmd_flags &= ~REQ_PREFLUSH;
367 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
368 rq->cmd_flags &= ~REQ_FUA;
369
370 /*
371 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
372 * of those flags, we have to set REQ_SYNC to avoid skewing
373 * the request accounting.
374 */
375 rq->cmd_flags |= REQ_SYNC;
376
377 /*
378 * An empty flush handed down from a stacking driver may
379 * translate into nothing if the underlying device does not
380 * advertise a write-back cache. In this case, simply
381 * complete the request.
382 */
383 if (!policy) {
384 blk_mq_end_request(rq, 0);
385 return;
386 }
387
388 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
389
390 /*
391 * If there's data but flush is not necessary, the request can be
392 * processed directly without going through flush machinery. Queue
393 * for normal execution.
394 */
395 if ((policy & REQ_FSEQ_DATA) &&
396 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
397 blk_mq_request_bypass_insert(rq, false);
398 return;
399 }
400
401 /*
402 * @rq should go through flush machinery. Mark it part of flush
403 * sequence and submit for further processing.
404 */
405 memset(&rq->flush, 0, sizeof(rq->flush));
406 INIT_LIST_HEAD(&rq->flush.list);
407 rq->rq_flags |= RQF_FLUSH_SEQ;
408 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
409
410 rq->end_io = mq_flush_data_end_io;
411
412 spin_lock_irq(&fq->mq_flush_lock);
413 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
414 spin_unlock_irq(&fq->mq_flush_lock);
415 }
416
417 /**
418 * blkdev_issue_flush - queue a flush
419 * @bdev: blockdev to issue flush for
420 * @gfp_mask: memory allocation flags (for bio_alloc)
421 * @error_sector: error sector
422 *
423 * Description:
424 * Issue a flush for the block device in question. Caller can supply
425 * room for storing the error offset in case of a flush error, if they
426 * wish to.
427 */
428 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
429 sector_t *error_sector)
430 {
431 struct request_queue *q;
432 struct bio *bio;
433 int ret = 0;
434
435 if (bdev->bd_disk == NULL)
436 return -ENXIO;
437
438 q = bdev_get_queue(bdev);
439 if (!q)
440 return -ENXIO;
441
442 /*
443 * some block devices may not have their queue correctly set up here
444 * (e.g. loop device without a backing file) and so issuing a flush
445 * here will panic. Ensure there is a request function before issuing
446 * the flush.
447 */
448 if (!q->make_request_fn)
449 return -ENXIO;
450
451 bio = bio_alloc(gfp_mask, 0);
452 bio_set_dev(bio, bdev);
453 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
454
455 ret = submit_bio_wait(bio);
456
457 /*
458 * The driver must store the error location in ->bi_sector, if
459 * it supports it. For non-stacked drivers, this should be
460 * copied from blk_rq_pos(rq).
461 */
462 if (error_sector)
463 *error_sector = bio->bi_iter.bi_sector;
464
465 bio_put(bio);
466 return ret;
467 }
468 EXPORT_SYMBOL(blkdev_issue_flush);
469
470 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
471 int node, int cmd_size, gfp_t flags)
472 {
473 struct blk_flush_queue *fq;
474 int rq_sz = sizeof(struct request);
475
476 fq = kzalloc_node(sizeof(*fq), flags, node);
477 if (!fq)
478 goto fail;
479
480 spin_lock_init(&fq->mq_flush_lock);
481
482 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
483 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
484 if (!fq->flush_rq)
485 goto fail_rq;
486
487 INIT_LIST_HEAD(&fq->flush_queue[0]);
488 INIT_LIST_HEAD(&fq->flush_queue[1]);
489 INIT_LIST_HEAD(&fq->flush_data_in_flight);
490
491 return fq;
492
493 fail_rq:
494 kfree(fq);
495 fail:
496 return NULL;
497 }
498
499 void blk_free_flush_queue(struct blk_flush_queue *fq)
500 {
501 /* bio based request queue hasn't flush queue */
502 if (!fq)
503 return;
504
505 kfree(fq->flush_rq);
506 kfree(fq);
507 }