]>
Commit | Line | Data |
---|---|---|
86db1e29 | 1 | /* |
3140c3cf | 2 | * Functions to sequence PREFLUSH and FUA writes. |
ae1b1539 TH |
3 | * |
4 | * Copyright (C) 2011 Max Planck Institute for Gravitational Physics | |
5 | * Copyright (C) 2011 Tejun Heo <tj@kernel.org> | |
6 | * | |
7 | * This file is released under the GPLv2. | |
8 | * | |
3140c3cf | 9 | * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three |
ae1b1539 TH |
10 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request |
11 | * properties and hardware capability. | |
12 | * | |
28a8f0d3 MC |
13 | * If a request doesn't have data, only REQ_PREFLUSH makes sense, which |
14 | * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates | |
ae1b1539 TH |
15 | * that the device cache should be flushed before the data is executed, and |
16 | * REQ_FUA means that the data must be on non-volatile media on request | |
17 | * completion. | |
18 | * | |
3140c3cf OS |
19 | * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any |
20 | * difference. The requests are either completed immediately if there's no data | |
21 | * or executed as normal requests otherwise. | |
ae1b1539 | 22 | * |
28a8f0d3 | 23 | * If the device has writeback cache and supports FUA, REQ_PREFLUSH is |
ae1b1539 TH |
24 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. |
25 | * | |
28a8f0d3 MC |
26 | * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH |
27 | * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. | |
ae1b1539 TH |
28 | * |
29 | * The actual execution of flush is double buffered. Whenever a request | |
30 | * needs to execute PRE or POSTFLUSH, it queues at | |
7c94e1c1 | 31 | * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a |
3a5e02ce | 32 | * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush |
ae1b1539 | 33 | * completes, all the requests which were pending are proceeded to the next |
3140c3cf | 34 | * step. This allows arbitrary merging of different types of PREFLUSH/FUA |
ae1b1539 TH |
35 | * requests. |
36 | * | |
37 | * Currently, the following conditions are used to determine when to issue | |
38 | * flush. | |
39 | * | |
40 | * C1. At any given time, only one flush shall be in progress. This makes | |
41 | * double buffering sufficient. | |
42 | * | |
43 | * C2. Flush is deferred if any request is executing DATA of its sequence. | |
44 | * This avoids issuing separate POSTFLUSHes for requests which shared | |
45 | * PREFLUSH. | |
46 | * | |
47 | * C3. The second condition is ignored if there is a request which has | |
48 | * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid | |
49 | * starvation in the unlikely case where there are continuous stream of | |
3140c3cf | 50 | * FUA (without PREFLUSH) requests. |
ae1b1539 TH |
51 | * |
52 | * For devices which support FUA, it isn't clear whether C2 (and thus C3) | |
53 | * is beneficial. | |
54 | * | |
3140c3cf | 55 | * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice. |
ae1b1539 TH |
56 | * Once while executing DATA and again after the whole sequence is |
57 | * complete. The first completion updates the contained bio but doesn't | |
58 | * finish it so that the bio submitter is notified only after the whole | |
e8064021 | 59 | * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in |
ae1b1539 TH |
60 | * req_bio_endio(). |
61 | * | |
3140c3cf | 62 | * The above peculiarity requires that each PREFLUSH/FUA request has only one |
ae1b1539 TH |
63 | * bio attached to it, which is guaranteed as they aren't allowed to be |
64 | * merged in the usual way. | |
86db1e29 | 65 | */ |
ae1b1539 | 66 | |
86db1e29 JA |
67 | #include <linux/kernel.h> |
68 | #include <linux/module.h> | |
69 | #include <linux/bio.h> | |
70 | #include <linux/blkdev.h> | |
5a0e3ad6 | 71 | #include <linux/gfp.h> |
320ae51f | 72 | #include <linux/blk-mq.h> |
86db1e29 JA |
73 | |
74 | #include "blk.h" | |
320ae51f | 75 | #include "blk-mq.h" |
0048b483 | 76 | #include "blk-mq-tag.h" |
bd166ef1 | 77 | #include "blk-mq-sched.h" |
86db1e29 | 78 | |
3140c3cf | 79 | /* PREFLUSH/FUA sequences */ |
4fed947c | 80 | enum { |
ae1b1539 TH |
81 | REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ |
82 | REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ | |
83 | REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ | |
84 | REQ_FSEQ_DONE = (1 << 3), | |
85 | ||
86 | REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | | |
87 | REQ_FSEQ_POSTFLUSH, | |
88 | ||
89 | /* | |
90 | * If flush has been pending longer than the following timeout, | |
91 | * it's issued even if flush_data requests are still in flight. | |
92 | */ | |
93 | FLUSH_PENDING_TIMEOUT = 5 * HZ, | |
4fed947c TH |
94 | }; |
95 | ||
404b8f5a | 96 | static void blk_kick_flush(struct request_queue *q, |
84fca1b0 | 97 | struct blk_flush_queue *fq, unsigned int flags); |
28e7d184 | 98 | |
c888a8f9 | 99 | static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) |
86db1e29 | 100 | { |
ae1b1539 | 101 | unsigned int policy = 0; |
86db1e29 | 102 | |
fa1bf42f JM |
103 | if (blk_rq_sectors(rq)) |
104 | policy |= REQ_FSEQ_DATA; | |
105 | ||
c888a8f9 | 106 | if (fflags & (1UL << QUEUE_FLAG_WC)) { |
28a8f0d3 | 107 | if (rq->cmd_flags & REQ_PREFLUSH) |
ae1b1539 | 108 | policy |= REQ_FSEQ_PREFLUSH; |
c888a8f9 JA |
109 | if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && |
110 | (rq->cmd_flags & REQ_FUA)) | |
ae1b1539 | 111 | policy |= REQ_FSEQ_POSTFLUSH; |
28e7d184 | 112 | } |
ae1b1539 | 113 | return policy; |
86db1e29 JA |
114 | } |
115 | ||
ae1b1539 | 116 | static unsigned int blk_flush_cur_seq(struct request *rq) |
47f70d5a | 117 | { |
ae1b1539 TH |
118 | return 1 << ffz(rq->flush.seq); |
119 | } | |
47f70d5a | 120 | |
ae1b1539 TH |
121 | static void blk_flush_restore_request(struct request *rq) |
122 | { | |
47f70d5a | 123 | /* |
ae1b1539 TH |
124 | * After flush data completion, @rq->bio is %NULL but we need to |
125 | * complete the bio again. @rq->biotail is guaranteed to equal the | |
126 | * original @rq->bio. Restore it. | |
47f70d5a | 127 | */ |
ae1b1539 TH |
128 | rq->bio = rq->biotail; |
129 | ||
130 | /* make @rq a normal request */ | |
e8064021 | 131 | rq->rq_flags &= ~RQF_FLUSH_SEQ; |
4853abaa | 132 | rq->end_io = rq->flush.saved_end_io; |
320ae51f JA |
133 | } |
134 | ||
404b8f5a | 135 | static void blk_flush_queue_rq(struct request *rq, bool add_front) |
320ae51f | 136 | { |
7e992f84 | 137 | blk_mq_add_to_requeue_list(rq, add_front, true); |
47f70d5a TH |
138 | } |
139 | ||
ae1b1539 TH |
140 | /** |
141 | * blk_flush_complete_seq - complete flush sequence | |
3140c3cf | 142 | * @rq: PREFLUSH/FUA request being sequenced |
0bae352d | 143 | * @fq: flush queue |
ae1b1539 TH |
144 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) |
145 | * @error: whether an error occurred | |
146 | * | |
147 | * @rq just completed @seq part of its flush sequence, record the | |
148 | * completion and trigger the next step. | |
149 | * | |
150 | * CONTEXT: | |
9809b4ee | 151 | * spin_lock_irq(fq->mq_flush_lock) |
ae1b1539 TH |
152 | * |
153 | * RETURNS: | |
154 | * %true if requests were added to the dispatch queue, %false otherwise. | |
155 | */ | |
404b8f5a | 156 | static void blk_flush_complete_seq(struct request *rq, |
0bae352d | 157 | struct blk_flush_queue *fq, |
2a842aca | 158 | unsigned int seq, blk_status_t error) |
86db1e29 | 159 | { |
ae1b1539 | 160 | struct request_queue *q = rq->q; |
7c94e1c1 | 161 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
190b02ed | 162 | unsigned int cmd_flags; |
ae1b1539 TH |
163 | |
164 | BUG_ON(rq->flush.seq & seq); | |
165 | rq->flush.seq |= seq; | |
190b02ed | 166 | cmd_flags = rq->cmd_flags; |
ae1b1539 TH |
167 | |
168 | if (likely(!error)) | |
169 | seq = blk_flush_cur_seq(rq); | |
170 | else | |
171 | seq = REQ_FSEQ_DONE; | |
172 | ||
173 | switch (seq) { | |
174 | case REQ_FSEQ_PREFLUSH: | |
175 | case REQ_FSEQ_POSTFLUSH: | |
176 | /* queue for flush */ | |
177 | if (list_empty(pending)) | |
7c94e1c1 | 178 | fq->flush_pending_since = jiffies; |
ae1b1539 TH |
179 | list_move_tail(&rq->flush.list, pending); |
180 | break; | |
181 | ||
182 | case REQ_FSEQ_DATA: | |
7c94e1c1 | 183 | list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); |
404b8f5a | 184 | blk_flush_queue_rq(rq, true); |
ae1b1539 TH |
185 | break; |
186 | ||
187 | case REQ_FSEQ_DONE: | |
188 | /* | |
189 | * @rq was previously adjusted by blk_flush_issue() for | |
190 | * flush sequencing and may already have gone through the | |
191 | * flush data request completion path. Restore @rq for | |
192 | * normal completion and end it. | |
193 | */ | |
194 | BUG_ON(!list_empty(&rq->queuelist)); | |
195 | list_del_init(&rq->flush.list); | |
196 | blk_flush_restore_request(rq); | |
7e992f84 | 197 | blk_mq_end_request(rq, error); |
ae1b1539 TH |
198 | break; |
199 | ||
200 | default: | |
201 | BUG(); | |
202 | } | |
203 | ||
404b8f5a | 204 | blk_kick_flush(q, fq, cmd_flags); |
86db1e29 JA |
205 | } |
206 | ||
2a842aca | 207 | static void flush_end_io(struct request *flush_rq, blk_status_t error) |
86db1e29 | 208 | { |
ae1b1539 | 209 | struct request_queue *q = flush_rq->q; |
320ae51f | 210 | struct list_head *running; |
ae1b1539 | 211 | struct request *rq, *n; |
320ae51f | 212 | unsigned long flags = 0; |
e97c293c | 213 | struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); |
7e992f84 | 214 | struct blk_mq_hw_ctx *hctx; |
ae1b1539 | 215 | |
7e992f84 JA |
216 | /* release the tag's ownership to the req cloned from */ |
217 | spin_lock_irqsave(&fq->mq_flush_lock, flags); | |
ea4f995e | 218 | hctx = flush_rq->mq_hctx; |
7e992f84 JA |
219 | if (!q->elevator) { |
220 | blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); | |
221 | flush_rq->tag = -1; | |
222 | } else { | |
223 | blk_mq_put_driver_tag_hctx(hctx, flush_rq); | |
224 | flush_rq->internal_tag = -1; | |
22302375 | 225 | } |
18741986 | 226 | |
7c94e1c1 ML |
227 | running = &fq->flush_queue[fq->flush_running_idx]; |
228 | BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); | |
ae1b1539 TH |
229 | |
230 | /* account completion of the flush request */ | |
7c94e1c1 | 231 | fq->flush_running_idx ^= 1; |
320ae51f | 232 | |
ae1b1539 TH |
233 | /* and push the waiting requests to the next stage */ |
234 | list_for_each_entry_safe(rq, n, running, flush.list) { | |
235 | unsigned int seq = blk_flush_cur_seq(rq); | |
236 | ||
237 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); | |
404b8f5a | 238 | blk_flush_complete_seq(rq, fq, seq, error); |
ae1b1539 TH |
239 | } |
240 | ||
7c94e1c1 | 241 | fq->flush_queue_delayed = 0; |
7e992f84 | 242 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
320ae51f JA |
243 | } |
244 | ||
ae1b1539 TH |
245 | /** |
246 | * blk_kick_flush - consider issuing flush request | |
247 | * @q: request_queue being kicked | |
0bae352d | 248 | * @fq: flush queue |
84fca1b0 | 249 | * @flags: cmd_flags of the original request |
ae1b1539 TH |
250 | * |
251 | * Flush related states of @q have changed, consider issuing flush request. | |
252 | * Please read the comment at the top of this file for more info. | |
253 | * | |
254 | * CONTEXT: | |
9809b4ee | 255 | * spin_lock_irq(fq->mq_flush_lock) |
ae1b1539 | 256 | * |
ae1b1539 | 257 | */ |
404b8f5a | 258 | static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, |
84fca1b0 | 259 | unsigned int flags) |
86db1e29 | 260 | { |
7c94e1c1 | 261 | struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; |
ae1b1539 TH |
262 | struct request *first_rq = |
263 | list_first_entry(pending, struct request, flush.list); | |
7c94e1c1 | 264 | struct request *flush_rq = fq->flush_rq; |
ae1b1539 TH |
265 | |
266 | /* C1 described at the top of this file */ | |
7c94e1c1 | 267 | if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) |
404b8f5a | 268 | return; |
ae1b1539 | 269 | |
7520872c JA |
270 | /* C2 and C3 |
271 | * | |
272 | * For blk-mq + scheduling, we can risk having all driver tags | |
273 | * assigned to empty flushes, and we deadlock if we are expecting | |
274 | * other requests to make progress. Don't defer for that case. | |
275 | */ | |
344e9ffc | 276 | if (!list_empty(&fq->flush_data_in_flight) && q->elevator && |
ae1b1539 | 277 | time_before(jiffies, |
7c94e1c1 | 278 | fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) |
404b8f5a | 279 | return; |
ae1b1539 TH |
280 | |
281 | /* | |
282 | * Issue flush and toggle pending_idx. This makes pending_idx | |
283 | * different from running_idx, which means flush is in flight. | |
284 | */ | |
7c94e1c1 | 285 | fq->flush_pending_idx ^= 1; |
18741986 | 286 | |
7ddab5de | 287 | blk_rq_init(q, flush_rq); |
f70ced09 ML |
288 | |
289 | /* | |
923218f6 ML |
290 | * In case of none scheduler, borrow tag from the first request |
291 | * since they can't be in flight at the same time. And acquire | |
292 | * the tag's ownership for flush req. | |
293 | * | |
294 | * In case of IO scheduler, flush rq need to borrow scheduler tag | |
295 | * just for cheating put/get driver tag. | |
f70ced09 | 296 | */ |
7e992f84 | 297 | flush_rq->mq_ctx = first_rq->mq_ctx; |
ea4f995e | 298 | flush_rq->mq_hctx = first_rq->mq_hctx; |
7e992f84 JA |
299 | |
300 | if (!q->elevator) { | |
301 | fq->orig_rq = first_rq; | |
302 | flush_rq->tag = first_rq->tag; | |
ea4f995e | 303 | blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq); |
7e992f84 JA |
304 | } else { |
305 | flush_rq->internal_tag = first_rq->internal_tag; | |
f70ced09 | 306 | } |
320ae51f | 307 | |
70fd7614 | 308 | flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; |
84fca1b0 | 309 | flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); |
e8064021 | 310 | flush_rq->rq_flags |= RQF_FLUSH_SEQ; |
7ddab5de ML |
311 | flush_rq->rq_disk = first_rq->rq_disk; |
312 | flush_rq->end_io = flush_end_io; | |
ae1b1539 | 313 | |
404b8f5a | 314 | blk_flush_queue_rq(flush_rq, false); |
86db1e29 JA |
315 | } |
316 | ||
2a842aca | 317 | static void mq_flush_data_end_io(struct request *rq, blk_status_t error) |
320ae51f JA |
318 | { |
319 | struct request_queue *q = rq->q; | |
ea4f995e | 320 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
e97c293c | 321 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
320ae51f | 322 | unsigned long flags; |
e97c293c | 323 | struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); |
320ae51f | 324 | |
923218f6 ML |
325 | if (q->elevator) { |
326 | WARN_ON(rq->tag < 0); | |
327 | blk_mq_put_driver_tag_hctx(hctx, rq); | |
328 | } | |
329 | ||
320ae51f JA |
330 | /* |
331 | * After populating an empty queue, kick it to avoid stall. Read | |
332 | * the comment in flush_end_io(). | |
333 | */ | |
7c94e1c1 | 334 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
bd166ef1 | 335 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); |
7c94e1c1 | 336 | spin_unlock_irqrestore(&fq->mq_flush_lock, flags); |
bd166ef1 | 337 | |
85bd6e61 | 338 | blk_mq_sched_restart(hctx); |
320ae51f JA |
339 | } |
340 | ||
ae1b1539 | 341 | /** |
3140c3cf | 342 | * blk_insert_flush - insert a new PREFLUSH/FUA request |
ae1b1539 TH |
343 | * @rq: request to insert |
344 | * | |
b710a480 | 345 | * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. |
320ae51f | 346 | * or __blk_mq_run_hw_queue() to dispatch request. |
ae1b1539 TH |
347 | * @rq is being submitted. Analyze what needs to be done and put it on the |
348 | * right queue. | |
ae1b1539 TH |
349 | */ |
350 | void blk_insert_flush(struct request *rq) | |
86db1e29 | 351 | { |
ae1b1539 | 352 | struct request_queue *q = rq->q; |
c888a8f9 | 353 | unsigned long fflags = q->queue_flags; /* may change, cache */ |
ae1b1539 | 354 | unsigned int policy = blk_flush_policy(fflags, rq); |
e97c293c | 355 | struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); |
86db1e29 | 356 | |
ae1b1539 TH |
357 | /* |
358 | * @policy now records what operations need to be done. Adjust | |
28a8f0d3 | 359 | * REQ_PREFLUSH and FUA for the driver. |
ae1b1539 | 360 | */ |
28a8f0d3 | 361 | rq->cmd_flags &= ~REQ_PREFLUSH; |
c888a8f9 | 362 | if (!(fflags & (1UL << QUEUE_FLAG_FUA))) |
ae1b1539 TH |
363 | rq->cmd_flags &= ~REQ_FUA; |
364 | ||
ae5b2ec8 JA |
365 | /* |
366 | * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any | |
367 | * of those flags, we have to set REQ_SYNC to avoid skewing | |
368 | * the request accounting. | |
369 | */ | |
370 | rq->cmd_flags |= REQ_SYNC; | |
371 | ||
4853abaa JM |
372 | /* |
373 | * An empty flush handed down from a stacking driver may | |
374 | * translate into nothing if the underlying device does not | |
375 | * advertise a write-back cache. In this case, simply | |
376 | * complete the request. | |
377 | */ | |
378 | if (!policy) { | |
7e992f84 | 379 | blk_mq_end_request(rq, 0); |
4853abaa JM |
380 | return; |
381 | } | |
382 | ||
834f9f61 | 383 | BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ |
4853abaa | 384 | |
ae1b1539 TH |
385 | /* |
386 | * If there's data but flush is not necessary, the request can be | |
387 | * processed directly without going through flush machinery. Queue | |
388 | * for normal execution. | |
389 | */ | |
390 | if ((policy & REQ_FSEQ_DATA) && | |
391 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { | |
7e992f84 | 392 | blk_mq_request_bypass_insert(rq, false); |
ae1b1539 | 393 | return; |
28e7d184 | 394 | } |
cde4c406 | 395 | |
ae1b1539 TH |
396 | /* |
397 | * @rq should go through flush machinery. Mark it part of flush | |
398 | * sequence and submit for further processing. | |
399 | */ | |
400 | memset(&rq->flush, 0, sizeof(rq->flush)); | |
401 | INIT_LIST_HEAD(&rq->flush.list); | |
e8064021 | 402 | rq->rq_flags |= RQF_FLUSH_SEQ; |
4853abaa | 403 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ |
320ae51f | 404 | |
7e992f84 | 405 | rq->end_io = mq_flush_data_end_io; |
ae1b1539 | 406 | |
7e992f84 | 407 | spin_lock_irq(&fq->mq_flush_lock); |
0bae352d | 408 | blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); |
7e992f84 | 409 | spin_unlock_irq(&fq->mq_flush_lock); |
86db1e29 JA |
410 | } |
411 | ||
86db1e29 JA |
412 | /** |
413 | * blkdev_issue_flush - queue a flush | |
414 | * @bdev: blockdev to issue flush for | |
fbd9b09a | 415 | * @gfp_mask: memory allocation flags (for bio_alloc) |
86db1e29 JA |
416 | * @error_sector: error sector |
417 | * | |
418 | * Description: | |
419 | * Issue a flush for the block device in question. Caller can supply | |
420 | * room for storing the error offset in case of a flush error, if they | |
1be7d207 | 421 | * wish to. |
86db1e29 | 422 | */ |
fbd9b09a | 423 | int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, |
dd3932ed | 424 | sector_t *error_sector) |
86db1e29 | 425 | { |
86db1e29 JA |
426 | struct request_queue *q; |
427 | struct bio *bio; | |
fbd9b09a | 428 | int ret = 0; |
86db1e29 JA |
429 | |
430 | if (bdev->bd_disk == NULL) | |
431 | return -ENXIO; | |
432 | ||
433 | q = bdev_get_queue(bdev); | |
434 | if (!q) | |
435 | return -ENXIO; | |
436 | ||
f10d9f61 DC |
437 | /* |
438 | * some block devices may not have their queue correctly set up here | |
439 | * (e.g. loop device without a backing file) and so issuing a flush | |
440 | * here will panic. Ensure there is a request function before issuing | |
d391a2dd | 441 | * the flush. |
f10d9f61 DC |
442 | */ |
443 | if (!q->make_request_fn) | |
444 | return -ENXIO; | |
445 | ||
fbd9b09a | 446 | bio = bio_alloc(gfp_mask, 0); |
74d46992 | 447 | bio_set_dev(bio, bdev); |
70fd7614 | 448 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
86db1e29 | 449 | |
4e49ea4a | 450 | ret = submit_bio_wait(bio); |
dd3932ed CH |
451 | |
452 | /* | |
453 | * The driver must store the error location in ->bi_sector, if | |
454 | * it supports it. For non-stacked drivers, this should be | |
455 | * copied from blk_rq_pos(rq). | |
456 | */ | |
457 | if (error_sector) | |
4f024f37 | 458 | *error_sector = bio->bi_iter.bi_sector; |
86db1e29 | 459 | |
86db1e29 JA |
460 | bio_put(bio); |
461 | return ret; | |
462 | } | |
86db1e29 | 463 | EXPORT_SYMBOL(blkdev_issue_flush); |
320ae51f | 464 | |
f70ced09 | 465 | struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, |
5b202853 | 466 | int node, int cmd_size, gfp_t flags) |
320ae51f | 467 | { |
7c94e1c1 ML |
468 | struct blk_flush_queue *fq; |
469 | int rq_sz = sizeof(struct request); | |
1bcb1ead | 470 | |
5b202853 | 471 | fq = kzalloc_node(sizeof(*fq), flags, node); |
7c94e1c1 ML |
472 | if (!fq) |
473 | goto fail; | |
1bcb1ead | 474 | |
7e992f84 | 475 | spin_lock_init(&fq->mq_flush_lock); |
7c94e1c1 | 476 | |
6d247d7f | 477 | rq_sz = round_up(rq_sz + cmd_size, cache_line_size()); |
5b202853 | 478 | fq->flush_rq = kzalloc_node(rq_sz, flags, node); |
7c94e1c1 ML |
479 | if (!fq->flush_rq) |
480 | goto fail_rq; | |
481 | ||
482 | INIT_LIST_HEAD(&fq->flush_queue[0]); | |
483 | INIT_LIST_HEAD(&fq->flush_queue[1]); | |
484 | INIT_LIST_HEAD(&fq->flush_data_in_flight); | |
485 | ||
486 | return fq; | |
487 | ||
488 | fail_rq: | |
489 | kfree(fq); | |
490 | fail: | |
491 | return NULL; | |
320ae51f | 492 | } |
f3552655 | 493 | |
ba483388 | 494 | void blk_free_flush_queue(struct blk_flush_queue *fq) |
f3552655 | 495 | { |
7c94e1c1 ML |
496 | /* bio based request queue hasn't flush queue */ |
497 | if (!fq) | |
498 | return; | |
3c09676c | 499 | |
7c94e1c1 ML |
500 | kfree(fq->flush_rq); |
501 | kfree(fq); | |
502 | } |