]>
Commit | Line | Data |
---|---|---|
8c16567d | 1 | // SPDX-License-Identifier: GPL-2.0 |
86db1e29 | 2 | /* |
3140c3cf | 3 | * Functions to sequence PREFLUSH and FUA writes. |
ae1b1539 TH |
4 | * |
5 | * Copyright (C) 2011 Max Planck Institute for Gravitational Physics | |
6 | * Copyright (C) 2011 Tejun Heo <tj@kernel.org> | |
7 | * | |
3140c3cf | 8 | * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three |
ae1b1539 TH |
9 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request |
10 | * properties and hardware capability. | |
11 | * | |
28a8f0d3 MC |
12 | * If a request doesn't have data, only REQ_PREFLUSH makes sense, which |
13 | * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates | |
ae1b1539 TH |
14 | * that the device cache should be flushed before the data is executed, and |
15 | * REQ_FUA means that the data must be on non-volatile media on request | |
16 | * completion. | |
17 | * | |
3140c3cf OS |
18 | * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any |
19 | * difference. The requests are either completed immediately if there's no data | |
20 | * or executed as normal requests otherwise. | |
ae1b1539 | 21 | * |
28a8f0d3 | 22 | * If the device has writeback cache and supports FUA, REQ_PREFLUSH is |
ae1b1539 TH |
23 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. |
24 | * | |
28a8f0d3 MC |
25 | * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH |
26 | * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. | |
ae1b1539 TH |
27 | * |
28 | * The actual execution of flush is double buffered. Whenever a request | |
29 | * needs to execute PRE or POSTFLUSH, it queues at | |
7c94e1c1 | 30 | * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a |
3a5e02ce | 31 | * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush |
ae1b1539 | 32 | * completes, all the requests which were pending are proceeded to the next |
3140c3cf | 33 | * step. This allows arbitrary merging of different types of PREFLUSH/FUA |
ae1b1539 TH |
34 | * requests. |
35 | * | |
36 | * Currently, the following conditions are used to determine when to issue | |
37 | * flush. | |
38 | * | |
39 | * C1. At any given time, only one flush shall be in progress. This makes | |
40 | * double buffering sufficient. | |
41 | * | |
42 | * C2. Flush is deferred if any request is executing DATA of its sequence. | |
43 | * This avoids issuing separate POSTFLUSHes for requests which shared | |
44 | * PREFLUSH. | |
45 | * | |
46 | * C3. The second condition is ignored if there is a request which has | |
47 | * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid | |
48 | * starvation in the unlikely case where there are continuous stream of | |
3140c3cf | 49 | * FUA (without PREFLUSH) requests. |
ae1b1539 TH |
50 | * |
51 | * For devices which support FUA, it isn't clear whether C2 (and thus C3) | |
52 | * is beneficial. | |
53 | * | |
3140c3cf | 54 | * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice. |
ae1b1539 TH |
55 | * Once while executing DATA and again after the whole sequence is |
56 | * complete. The first completion updates the contained bio but doesn't | |
57 | * finish it so that the bio submitter is notified only after the whole | |
e8064021 | 58 | * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in |
ae1b1539 TH |
59 | * req_bio_endio(). |
60 | * | |
3140c3cf | 61 | * The above peculiarity requires that each PREFLUSH/FUA request has only one |
ae1b1539 TH |
62 | * bio attached to it, which is guaranteed as they aren't allowed to be |
63 | * merged in the usual way. | |
86db1e29 | 64 | */ |
ae1b1539 | 65 | |
86db1e29 JA |
66 | #include <linux/kernel.h> |
67 | #include <linux/module.h> | |
68 | #include <linux/bio.h> | |
69 | #include <linux/blkdev.h> | |
5a0e3ad6 | 70 | #include <linux/gfp.h> |
320ae51f | 71 | #include <linux/blk-mq.h> |
b3c6a599 | 72 | #include <linux/lockdep.h> |
86db1e29 JA |
73 | |
74 | #include "blk.h" | |
320ae51f | 75 | #include "blk-mq.h" |
0048b483 | 76 | #include "blk-mq-tag.h" |
bd166ef1 | 77 | #include "blk-mq-sched.h" |
86db1e29 | 78 | |
3140c3cf | 79 | /* PREFLUSH/FUA sequences */ |
4fed947c | 80 | enum { |
ae1b1539 TH |
81 | REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ |
82 | REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ | |
83 | REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ | |
84 | REQ_FSEQ_DONE = (1 << 3), | |
85 | ||
86 | REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | | |
87 | REQ_FSEQ_POSTFLUSH, | |
88 | ||
89 | /* | |
90 | * If flush has been pending longer than the following timeout, | |
91 | * it's issued even if flush_data requests are still in flight. | |
92 | */ | |
93 | FLUSH_PENDING_TIMEOUT = 5 * HZ, | |
4fed947c TH |
94 | }; |
95 | ||
404b8f5a | 96 | static void blk_kick_flush(struct request_queue *q, |
84fca1b0 | 97 | struct blk_flush_queue *fq, unsigned int flags); |
28e7d184 | 98 | |
c888a8f9 | 99 | static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) |
86db1e29 | 100 | { |
ae1b1539 | 101 | unsigned int policy = 0; |
86db1e29 | 102 | |
fa1bf42f JM |
103 | if (blk_rq_sectors(rq)) |
104 | policy |= REQ_FSEQ_DATA; | |
105 | ||
c888a8f9 | 106 | if (fflags & (1UL << QUEUE_FLAG_WC)) { |
28a8f0d3 | 107 | if (rq->cmd_flags & REQ_PREFLUSH) |
ae1b1539 | 108 | policy |= REQ_FSEQ_PREFLUSH; |
c888a8f9 JA |
109 | if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && |
110 | (rq->cmd_flags & REQ_FUA)) | |
ae1b1539 | 111 | policy |= REQ_FSEQ_POSTFLUSH; |
28e7d184 | 112 | } |
ae1b1539 | 113 | return policy; |
86db1e29 JA |
114 | } |
115 | ||
ae1b1539 | 116 | static unsigned int blk_flush_cur_seq(struct request *rq) |
47f70d5a | 117 | { |
ae1b1539 TH |
118 | return 1 << ffz(rq->flush.seq); |
119 | } | |
47f70d5a | 120 | |
ae1b1539 TH |
121 | static void blk_flush_restore_request(struct request *rq) |
122 | { | |
47f70d5a | 123 | /* |
ae1b1539 TH |
124 | * After flush data completion, @rq->bio is %NULL but we need to |
125 | * complete the bio again. @rq->biotail is guaranteed to equal the | |
126 | * original @rq->bio. Restore it. | |
47f70d5a | 127 | */ |
ae1b1539 TH |
128 | rq->bio = rq->biotail; |
129 | ||
130 | /* make @rq a normal request */ | |
e8064021 | 131 | rq->rq_flags &= ~RQF_FLUSH_SEQ; |
4853abaa | 132 | rq->end_io = rq->flush.saved_end_io; |
320ae51f JA |
133 | } |
134 | ||
404b8f5a | 135 | static void blk_flush_queue_rq(struct request *rq, bool add_front) |
320ae51f | 136 | { |
7e992f84 | 137 | blk_mq_add_to_requeue_list(rq, add_front, true); |
47f70d5a TH |
138 | } |
139 | ||
b6866318 KK |
140 | static void blk_account_io_flush(struct request *rq) |
141 | { | |
142 | struct hd_struct *part = &rq->rq_disk->part0; | |
143 | ||
144 | part_stat_lock(); | |
145 | part_stat_inc(part, ios[STAT_FLUSH]); | |
146 | part_stat_add(part, nsecs[STAT_FLUSH], | |
147 | ktime_get_ns() - rq->start_time_ns); | |
148 | part_stat_unlock(); | |
149 | } | |
150 | ||
ae1b1539 TH |
151 | /** |
152 | * blk_flush_complete_seq - complete flush sequence | |
3140c3cf | 153 | * @rq: PREFLUSH/FUA request being sequenced |
0bae352d | 154 | * @fq: flush queue |
ae1b1539 TH |
155 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) |
156 | * @error: whether an error occurred | |
157 | * | |
158 | * @rq just completed @seq part of its flush sequence, record the | |
159 | * completion and trigger the next step. | |
160 | * | |
161 | * CONTEXT: | |
9809b4ee | 162 | * spin_lock_irq(fq->mq_flush_lock) |
ae1b1539 | 163 | */ |
404b8f5a | 164 | static void blk_flush_complete_seq(struct request *rq, |
0bae352d | 165 | struct blk_flush_queue *fq, |
2a842aca | 166 | unsigned int seq, blk_status_t error) |
86db1e29 | 167 | { |
ae1b1539 | 168 | struct request_queue *q = rq->q; |
7c94e1c1 | 169 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
190b02ed | 170 | unsigned int cmd_flags; |
ae1b1539 TH |
171 | |
172 | BUG_ON(rq->flush.seq & seq); | |
173 | rq->flush.seq |= seq; | |
190b02ed | 174 | cmd_flags = rq->cmd_flags; |
ae1b1539 TH |
175 | |
176 | if (likely(!error)) | |
177 | seq = blk_flush_cur_seq(rq); | |
178 | else | |
179 | seq = REQ_FSEQ_DONE; | |
180 | ||
181 | switch (seq) { | |
182 | case REQ_FSEQ_PREFLUSH: | |
183 | case REQ_FSEQ_POSTFLUSH: | |
184 | /* queue for flush */ | |
185 | if (list_empty(pending)) | |
7c94e1c1 | 186 | fq->flush_pending_since = jiffies; |
ae1b1539 TH |
187 | list_move_tail(&rq->flush.list, pending); |
188 | break; | |
189 | ||
190 | case REQ_FSEQ_DATA: | |
7c94e1c1 | 191 | list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); |
404b8f5a | 192 | blk_flush_queue_rq(rq, true); |
ae1b1539 TH |
193 | break; |
194 | ||
195 | case REQ_FSEQ_DONE: | |
196 | /* | |
b6866318 | 197 | * @rq was previously adjusted by blk_insert_flush() for |
ae1b1539 TH |
198 | * flush sequencing and may already have gone through the |
199 | * flush data request completion path. Restore @rq for | |
200 | * normal completion and end it. | |
201 | */ | |
202 | BUG_ON(!list_empty(&rq->queuelist)); | |
203 | list_del_init(&rq->flush.list); | |
204 | blk_flush_restore_request(rq); | |
7e992f84 | 205 | blk_mq_end_request(rq, error); |
ae1b1539 TH |
206 | break; |
207 | ||
208 | default: | |
209 | BUG(); | |
210 | } | |
211 | ||
404b8f5a | 212 | blk_kick_flush(q, fq, cmd_flags); |
86db1e29 JA |
213 | } |
214 | ||
2a842aca | 215 | static void flush_end_io(struct request *flush_rq, blk_status_t error) |
86db1e29 | 216 | { |
ae1b1539 | 217 | struct request_queue *q = flush_rq->q; |
320ae51f | 218 | struct list_head *running; |
ae1b1539 | 219 | struct request *rq, *n; |
320ae51f | 220 | unsigned long flags = 0; |
e97c293c | 221 | struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); |
ae1b1539 | 222 | |
b6866318 KK |
223 | blk_account_io_flush(flush_rq); |
224 | ||
7e992f84 JA |
225 | /* release the tag's ownership to the req cloned from */ |
226 | spin_lock_irqsave(&fq->mq_flush_lock, flags); | |
8d699663 | 227 | |
65ff5cd0 | 228 | WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE); |
8d699663 YY |
229 | if (!refcount_dec_and_test(&flush_rq->ref)) { |
230 | fq->rq_status = error; | |
231 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); | |
232 | return; | |
233 | } | |
234 | ||
235 | if (fq->rq_status != BLK_STS_OK) | |
236 | error = fq->rq_status; | |
237 | ||
4e2f62e5 | 238 | if (!q->elevator) { |
568f2700 | 239 | flush_rq->tag = BLK_MQ_NO_TAG; |
4e2f62e5 JA |
240 | } else { |
241 | blk_mq_put_driver_tag(flush_rq); | |
568f2700 | 242 | flush_rq->internal_tag = BLK_MQ_NO_TAG; |
4e2f62e5 | 243 | } |
18741986 | 244 | |
7c94e1c1 ML |
245 | running = &fq->flush_queue[fq->flush_running_idx]; |
246 | BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); | |
ae1b1539 TH |
247 | |
248 | /* account completion of the flush request */ | |
7c94e1c1 | 249 | fq->flush_running_idx ^= 1; |
320ae51f | 250 | |
ae1b1539 TH |
251 | /* and push the waiting requests to the next stage */ |
252 | list_for_each_entry_safe(rq, n, running, flush.list) { | |
253 | unsigned int seq = blk_flush_cur_seq(rq); | |
254 | ||
255 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); | |
404b8f5a | 256 | blk_flush_complete_seq(rq, fq, seq, error); |
ae1b1539 TH |
257 | } |
258 | ||
7e992f84 | 259 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
320ae51f JA |
260 | } |
261 | ||
ae1b1539 TH |
262 | /** |
263 | * blk_kick_flush - consider issuing flush request | |
264 | * @q: request_queue being kicked | |
0bae352d | 265 | * @fq: flush queue |
84fca1b0 | 266 | * @flags: cmd_flags of the original request |
ae1b1539 TH |
267 | * |
268 | * Flush related states of @q have changed, consider issuing flush request. | |
269 | * Please read the comment at the top of this file for more info. | |
270 | * | |
271 | * CONTEXT: | |
9809b4ee | 272 | * spin_lock_irq(fq->mq_flush_lock) |
ae1b1539 | 273 | * |
ae1b1539 | 274 | */ |
404b8f5a | 275 | static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, |
84fca1b0 | 276 | unsigned int flags) |
86db1e29 | 277 | { |
7c94e1c1 | 278 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
ae1b1539 TH |
279 | struct request *first_rq = |
280 | list_first_entry(pending, struct request, flush.list); | |
7c94e1c1 | 281 | struct request *flush_rq = fq->flush_rq; |
ae1b1539 TH |
282 | |
283 | /* C1 described at the top of this file */ | |
7c94e1c1 | 284 | if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) |
404b8f5a | 285 | return; |
ae1b1539 | 286 | |
b5718d6c YY |
287 | /* C2 and C3 */ |
288 | if (!list_empty(&fq->flush_data_in_flight) && | |
ae1b1539 | 289 | time_before(jiffies, |
7c94e1c1 | 290 | fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) |
404b8f5a | 291 | return; |
ae1b1539 TH |
292 | |
293 | /* | |
294 | * Issue flush and toggle pending_idx. This makes pending_idx | |
295 | * different from running_idx, which means flush is in flight. | |
296 | */ | |
7c94e1c1 | 297 | fq->flush_pending_idx ^= 1; |
18741986 | 298 | |
7ddab5de | 299 | blk_rq_init(q, flush_rq); |
f70ced09 ML |
300 | |
301 | /* | |
923218f6 ML |
302 | * In case of none scheduler, borrow tag from the first request |
303 | * since they can't be in flight at the same time. And acquire | |
304 | * the tag's ownership for flush req. | |
305 | * | |
306 | * In case of IO scheduler, flush rq need to borrow scheduler tag | |
307 | * just for cheating put/get driver tag. | |
f70ced09 | 308 | */ |
7e992f84 | 309 | flush_rq->mq_ctx = first_rq->mq_ctx; |
ea4f995e | 310 | flush_rq->mq_hctx = first_rq->mq_hctx; |
7e992f84 | 311 | |
c1e2b842 | 312 | if (!q->elevator) { |
7e992f84 | 313 | flush_rq->tag = first_rq->tag; |
c1e2b842 ML |
314 | |
315 | /* | |
316 | * We borrow data request's driver tag, so have to mark | |
317 | * this flush request as INFLIGHT for avoiding double | |
318 | * account of this driver tag | |
319 | */ | |
320 | flush_rq->rq_flags |= RQF_MQ_INFLIGHT; | |
321 | } else | |
7e992f84 | 322 | flush_rq->internal_tag = first_rq->internal_tag; |
320ae51f | 323 | |
70fd7614 | 324 | flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; |
84fca1b0 | 325 | flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); |
e8064021 | 326 | flush_rq->rq_flags |= RQF_FLUSH_SEQ; |
7ddab5de ML |
327 | flush_rq->rq_disk = first_rq->rq_disk; |
328 | flush_rq->end_io = flush_end_io; | |
ae1b1539 | 329 | |
404b8f5a | 330 | blk_flush_queue_rq(flush_rq, false); |
86db1e29 JA |
331 | } |
332 | ||
2a842aca | 333 | static void mq_flush_data_end_io(struct request *rq, blk_status_t error) |
320ae51f JA |
334 | { |
335 | struct request_queue *q = rq->q; | |
ea4f995e | 336 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
e97c293c | 337 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
320ae51f | 338 | unsigned long flags; |
e97c293c | 339 | struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); |
320ae51f | 340 | |
4e2f62e5 JA |
341 | if (q->elevator) { |
342 | WARN_ON(rq->tag < 0); | |
343 | blk_mq_put_driver_tag(rq); | |
344 | } | |
345 | ||
320ae51f JA |
346 | /* |
347 | * After populating an empty queue, kick it to avoid stall. Read | |
348 | * the comment in flush_end_io(). | |
349 | */ | |
7c94e1c1 | 350 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
bd166ef1 | 351 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); |
7c94e1c1 | 352 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
bd166ef1 | 353 | |
85bd6e61 | 354 | blk_mq_sched_restart(hctx); |
320ae51f JA |
355 | } |
356 | ||
ae1b1539 | 357 | /** |
3140c3cf | 358 | * blk_insert_flush - insert a new PREFLUSH/FUA request |
ae1b1539 TH |
359 | * @rq: request to insert |
360 | * | |
b710a480 | 361 | * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. |
320ae51f | 362 | * or __blk_mq_run_hw_queue() to dispatch request. |
ae1b1539 TH |
363 | * @rq is being submitted. Analyze what needs to be done and put it on the |
364 | * right queue. | |
ae1b1539 TH |
365 | */ |
366 | void blk_insert_flush(struct request *rq) | |
86db1e29 | 367 | { |
ae1b1539 | 368 | struct request_queue *q = rq->q; |
c888a8f9 | 369 | unsigned long fflags = q->queue_flags; /* may change, cache */ |
ae1b1539 | 370 | unsigned int policy = blk_flush_policy(fflags, rq); |
e97c293c | 371 | struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); |
86db1e29 | 372 | |
ae1b1539 TH |
373 | /* |
374 | * @policy now records what operations need to be done. Adjust | |
28a8f0d3 | 375 | * REQ_PREFLUSH and FUA for the driver. |
ae1b1539 | 376 | */ |
28a8f0d3 | 377 | rq->cmd_flags &= ~REQ_PREFLUSH; |
c888a8f9 | 378 | if (!(fflags & (1UL << QUEUE_FLAG_FUA))) |
ae1b1539 TH |
379 | rq->cmd_flags &= ~REQ_FUA; |
380 | ||
ae5b2ec8 JA |
381 | /* |
382 | * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any | |
383 | * of those flags, we have to set REQ_SYNC to avoid skewing | |
384 | * the request accounting. | |
385 | */ | |
386 | rq->cmd_flags |= REQ_SYNC; | |
387 | ||
4853abaa JM |
388 | /* |
389 | * An empty flush handed down from a stacking driver may | |
390 | * translate into nothing if the underlying device does not | |
391 | * advertise a write-back cache. In this case, simply | |
392 | * complete the request. | |
393 | */ | |
394 | if (!policy) { | |
7e992f84 | 395 | blk_mq_end_request(rq, 0); |
4853abaa JM |
396 | return; |
397 | } | |
398 | ||
834f9f61 | 399 | BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ |
4853abaa | 400 | |
ae1b1539 TH |
401 | /* |
402 | * If there's data but flush is not necessary, the request can be | |
403 | * processed directly without going through flush machinery. Queue | |
404 | * for normal execution. | |
405 | */ | |
406 | if ((policy & REQ_FSEQ_DATA) && | |
407 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { | |
01e99aec | 408 | blk_mq_request_bypass_insert(rq, false, false); |
ae1b1539 | 409 | return; |
28e7d184 | 410 | } |
cde4c406 | 411 | |
ae1b1539 TH |
412 | /* |
413 | * @rq should go through flush machinery. Mark it part of flush | |
414 | * sequence and submit for further processing. | |
415 | */ | |
416 | memset(&rq->flush, 0, sizeof(rq->flush)); | |
417 | INIT_LIST_HEAD(&rq->flush.list); | |
e8064021 | 418 | rq->rq_flags |= RQF_FLUSH_SEQ; |
4853abaa | 419 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ |
320ae51f | 420 | |
7e992f84 | 421 | rq->end_io = mq_flush_data_end_io; |
ae1b1539 | 422 | |
7e992f84 | 423 | spin_lock_irq(&fq->mq_flush_lock); |
0bae352d | 424 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); |
7e992f84 | 425 | spin_unlock_irq(&fq->mq_flush_lock); |
86db1e29 JA |
426 | } |
427 | ||
86db1e29 JA |
428 | /** |
429 | * blkdev_issue_flush - queue a flush | |
430 | * @bdev: blockdev to issue flush for | |
fbd9b09a | 431 | * @gfp_mask: memory allocation flags (for bio_alloc) |
86db1e29 JA |
432 | * |
433 | * Description: | |
9398554f | 434 | * Issue a flush for the block device in question. |
86db1e29 | 435 | */ |
9398554f | 436 | int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) |
86db1e29 | 437 | { |
86db1e29 | 438 | struct bio *bio; |
fbd9b09a | 439 | int ret = 0; |
86db1e29 | 440 | |
fbd9b09a | 441 | bio = bio_alloc(gfp_mask, 0); |
74d46992 | 442 | bio_set_dev(bio, bdev); |
70fd7614 | 443 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
86db1e29 | 444 | |
4e49ea4a | 445 | ret = submit_bio_wait(bio); |
86db1e29 JA |
446 | bio_put(bio); |
447 | return ret; | |
448 | } | |
86db1e29 | 449 | EXPORT_SYMBOL(blkdev_issue_flush); |
320ae51f | 450 | |
754a1572 GJ |
451 | struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, |
452 | gfp_t flags) | |
320ae51f | 453 | { |
7c94e1c1 ML |
454 | struct blk_flush_queue *fq; |
455 | int rq_sz = sizeof(struct request); | |
1bcb1ead | 456 | |
5b202853 | 457 | fq = kzalloc_node(sizeof(*fq), flags, node); |
7c94e1c1 ML |
458 | if (!fq) |
459 | goto fail; | |
1bcb1ead | 460 | |
7e992f84 | 461 | spin_lock_init(&fq->mq_flush_lock); |
7c94e1c1 | 462 | |
6d247d7f | 463 | rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); |
5b202853 | 464 | fq->flush_rq = kzalloc_node(rq_sz, flags, node); |
7c94e1c1 ML |
465 | if (!fq->flush_rq) |
466 | goto fail_rq; | |
467 | ||
468 | INIT_LIST_HEAD(&fq->flush_queue[0]); | |
469 | INIT_LIST_HEAD(&fq->flush_queue[1]); | |
470 | INIT_LIST_HEAD(&fq->flush_data_in_flight); | |
471 | ||
b3c6a599 BVA |
472 | lockdep_register_key(&fq->key); |
473 | lockdep_set_class(&fq->mq_flush_lock, &fq->key); | |
474 | ||
7c94e1c1 ML |
475 | return fq; |
476 | ||
477 | fail_rq: | |
478 | kfree(fq); | |
479 | fail: | |
480 | return NULL; | |
320ae51f | 481 | } |
f3552655 | 482 | |
ba483388 | 483 | void blk_free_flush_queue(struct blk_flush_queue *fq) |
f3552655 | 484 | { |
7c94e1c1 ML |
485 | /* bio based request queue hasn't flush queue */ |
486 | if (!fq) | |
487 | return; | |
3c09676c | 488 | |
b3c6a599 | 489 | lockdep_unregister_key(&fq->key); |
7c94e1c1 ML |
490 | kfree(fq->flush_rq); |
491 | kfree(fq); | |
492 | } |