]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/blk-barrier.c
floppy: make controller const
[mirror_ubuntu-artful-kernel.git] / block / blk-barrier.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to barrier IO handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
5a0e3ad6 8#include <linux/gfp.h>
86db1e29
JA
9
10#include "blk.h"
11
12/**
13 * blk_queue_ordered - does this queue support ordered writes
14 * @q: the request queue
15 * @ordered: one of QUEUE_ORDERED_*
86db1e29
JA
16 *
17 * Description:
18 * For journalled file systems, doing ordered writes on a commit
19 * block instead of explicitly doing wait_on_buffer (which is bad
20 * for performance) can be a big win. Block drivers supporting this
21 * feature should call this function and indicate so.
22 *
23 **/
00fff265 24int blk_queue_ordered(struct request_queue *q, unsigned ordered)
86db1e29 25{
86db1e29
JA
26 if (ordered != QUEUE_ORDERED_NONE &&
27 ordered != QUEUE_ORDERED_DRAIN &&
28 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
29 ordered != QUEUE_ORDERED_DRAIN_FUA &&
30 ordered != QUEUE_ORDERED_TAG &&
31 ordered != QUEUE_ORDERED_TAG_FLUSH &&
32 ordered != QUEUE_ORDERED_TAG_FUA) {
33 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
34 return -EINVAL;
35 }
36
37 q->ordered = ordered;
38 q->next_ordered = ordered;
86db1e29
JA
39
40 return 0;
41}
86db1e29
JA
42EXPORT_SYMBOL(blk_queue_ordered);
43
44/*
45 * Cache flushing for ordered writes handling
46 */
6f6a036e 47unsigned blk_ordered_cur_seq(struct request_queue *q)
86db1e29
JA
48{
49 if (!q->ordseq)
50 return 0;
51 return 1 << ffz(q->ordseq);
52}
53
54unsigned blk_ordered_req_seq(struct request *rq)
55{
56 struct request_queue *q = rq->q;
57
58 BUG_ON(q->ordseq == 0);
59
60 if (rq == &q->pre_flush_rq)
61 return QUEUE_ORDSEQ_PREFLUSH;
62 if (rq == &q->bar_rq)
63 return QUEUE_ORDSEQ_BAR;
64 if (rq == &q->post_flush_rq)
65 return QUEUE_ORDSEQ_POSTFLUSH;
66
67 /*
68 * !fs requests don't need to follow barrier ordering. Always
69 * put them at the front. This fixes the following deadlock.
70 *
71 * http://thread.gmane.org/gmane.linux.kernel/537473
72 */
33659ebb 73 if (rq->cmd_type != REQ_TYPE_FS)
86db1e29
JA
74 return QUEUE_ORDSEQ_DRAIN;
75
76 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
77 (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
78 return QUEUE_ORDSEQ_DRAIN;
79 else
80 return QUEUE_ORDSEQ_DONE;
81}
82
8f11b3e9 83bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
86db1e29
JA
84{
85 struct request *rq;
86
87 if (error && !q->orderr)
88 q->orderr = error;
89
90 BUG_ON(q->ordseq & seq);
91 q->ordseq |= seq;
92
93 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
8f11b3e9 94 return false;
86db1e29
JA
95
96 /*
97 * Okay, sequence complete.
98 */
99 q->ordseq = 0;
100 rq = q->orig_bar_rq;
40cbbb78 101 __blk_end_request_all(rq, q->orderr);
8f11b3e9 102 return true;
86db1e29
JA
103}
104
105static void pre_flush_end_io(struct request *rq, int error)
106{
107 elv_completed_request(rq->q, rq);
108 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
109}
110
111static void bar_end_io(struct request *rq, int error)
112{
113 elv_completed_request(rq->q, rq);
114 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
115}
116
117static void post_flush_end_io(struct request *rq, int error)
118{
119 elv_completed_request(rq->q, rq);
120 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
121}
122
123static void queue_flush(struct request_queue *q, unsigned which)
124{
125 struct request *rq;
126 rq_end_io_fn *end_io;
127
313e4299 128 if (which == QUEUE_ORDERED_DO_PREFLUSH) {
86db1e29
JA
129 rq = &q->pre_flush_rq;
130 end_io = pre_flush_end_io;
131 } else {
132 rq = &q->post_flush_rq;
133 end_io = post_flush_end_io;
134 }
135
2a4aa30c 136 blk_rq_init(q, rq);
8749534f 137 rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
86db1e29
JA
138 rq->rq_disk = q->bar_rq.rq_disk;
139 rq->end_io = end_io;
86db1e29
JA
140
141 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
142}
143
8f11b3e9 144static inline bool start_ordered(struct request_queue *q, struct request **rqp)
86db1e29 145{
8f11b3e9
TH
146 struct request *rq = *rqp;
147 unsigned skip = 0;
148
86db1e29
JA
149 q->orderr = 0;
150 q->ordered = q->next_ordered;
151 q->ordseq |= QUEUE_ORDSEQ_STARTED;
152
58eea927
TH
153 /*
154 * For an empty barrier, there's no actual BAR request, which
155 * in turn makes POSTFLUSH unnecessary. Mask them off.
156 */
5b93629b 157 if (!blk_rq_sectors(rq)) {
58eea927
TH
158 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
159 QUEUE_ORDERED_DO_POSTFLUSH);
a185eb4b
TH
160 /*
161 * Empty barrier on a write-through device w/ ordered
162 * tag has no command to issue and without any command
163 * to issue, ordering by tag can't be used. Drain
164 * instead.
165 */
166 if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
167 !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
168 q->ordered &= ~QUEUE_ORDERED_BY_TAG;
169 q->ordered |= QUEUE_ORDERED_BY_DRAIN;
170 }
171 }
58eea927 172
f671620e 173 /* stash away the original request */
9934c8c0 174 blk_dequeue_request(rq);
86db1e29 175 q->orig_bar_rq = rq;
f671620e 176 rq = NULL;
86db1e29
JA
177
178 /*
179 * Queue ordered sequence. As we stack them at the head, we
180 * need to queue in reverse order. Note that we rely on that
181 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
58eea927 182 * request gets inbetween ordered sequence.
86db1e29 183 */
58eea927 184 if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
313e4299 185 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
f671620e
TH
186 rq = &q->post_flush_rq;
187 } else
8f11b3e9 188 skip |= QUEUE_ORDSEQ_POSTFLUSH;
86db1e29 189
f671620e
TH
190 if (q->ordered & QUEUE_ORDERED_DO_BAR) {
191 rq = &q->bar_rq;
192
193 /* initialize proxy request and queue it */
194 blk_rq_init(q, rq);
195 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
7b6d91da 196 rq->cmd_flags |= REQ_WRITE;
f671620e
TH
197 if (q->ordered & QUEUE_ORDERED_DO_FUA)
198 rq->cmd_flags |= REQ_FUA;
199 init_request_from_bio(rq, q->orig_bar_rq->bio);
200 rq->end_io = bar_end_io;
201
202 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
203 } else
8f11b3e9 204 skip |= QUEUE_ORDSEQ_BAR;
86db1e29 205
313e4299
TH
206 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
207 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
86db1e29
JA
208 rq = &q->pre_flush_rq;
209 } else
8f11b3e9 210 skip |= QUEUE_ORDSEQ_PREFLUSH;
86db1e29 211
0a7ae2ff 212 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
86db1e29 213 rq = NULL;
f671620e 214 else
8f11b3e9 215 skip |= QUEUE_ORDSEQ_DRAIN;
86db1e29 216
8f11b3e9
TH
217 *rqp = rq;
218
219 /*
220 * Complete skipped sequences. If whole sequence is complete,
221 * return false to tell elevator that this request is gone.
222 */
223 return !blk_ordered_complete_seq(q, skip, 0);
86db1e29
JA
224}
225
8f11b3e9 226bool blk_do_ordered(struct request_queue *q, struct request **rqp)
86db1e29
JA
227{
228 struct request *rq = *rqp;
33659ebb
CH
229 const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
230 (rq->cmd_flags & REQ_HARDBARRIER);
86db1e29
JA
231
232 if (!q->ordseq) {
233 if (!is_barrier)
8f11b3e9 234 return true;
86db1e29 235
8f11b3e9
TH
236 if (q->next_ordered != QUEUE_ORDERED_NONE)
237 return start_ordered(q, rqp);
238 else {
86db1e29 239 /*
a7384677
TH
240 * Queue ordering not supported. Terminate
241 * with prejudice.
86db1e29 242 */
9934c8c0 243 blk_dequeue_request(rq);
40cbbb78 244 __blk_end_request_all(rq, -EOPNOTSUPP);
86db1e29 245 *rqp = NULL;
8f11b3e9 246 return false;
86db1e29
JA
247 }
248 }
249
250 /*
251 * Ordered sequence in progress
252 */
253
254 /* Special requests are not subject to ordering rules. */
33659ebb 255 if (rq->cmd_type != REQ_TYPE_FS &&
86db1e29 256 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
8f11b3e9 257 return true;
86db1e29 258
313e4299 259 if (q->ordered & QUEUE_ORDERED_BY_TAG) {
86db1e29
JA
260 /* Ordered by tag. Blocking the next barrier is enough. */
261 if (is_barrier && rq != &q->bar_rq)
262 *rqp = NULL;
263 } else {
264 /* Ordered by draining. Wait for turn. */
265 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
266 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
267 *rqp = NULL;
268 }
269
8f11b3e9 270 return true;
86db1e29
JA
271}
272
273static void bio_end_empty_barrier(struct bio *bio, int err)
274{
cc66b451
JA
275 if (err) {
276 if (err == -EOPNOTSUPP)
277 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
86db1e29 278 clear_bit(BIO_UPTODATE, &bio->bi_flags);
cc66b451 279 }
f17e232e
DM
280 if (bio->bi_private)
281 complete(bio->bi_private);
282 bio_put(bio);
86db1e29
JA
283}
284
285/**
286 * blkdev_issue_flush - queue a flush
287 * @bdev: blockdev to issue flush for
fbd9b09a 288 * @gfp_mask: memory allocation flags (for bio_alloc)
86db1e29 289 * @error_sector: error sector
fbd9b09a 290 * @flags: BLKDEV_IFL_* flags to control behaviour
86db1e29
JA
291 *
292 * Description:
293 * Issue a flush for the block device in question. Caller can supply
294 * room for storing the error offset in case of a flush error, if they
f17e232e
DM
295 * wish to. If WAIT flag is not passed then caller may check only what
296 * request was pushed in some internal queue for later handling.
86db1e29 297 */
fbd9b09a
DM
298int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
299 sector_t *error_sector, unsigned long flags)
86db1e29
JA
300{
301 DECLARE_COMPLETION_ONSTACK(wait);
302 struct request_queue *q;
303 struct bio *bio;
fbd9b09a 304 int ret = 0;
86db1e29
JA
305
306 if (bdev->bd_disk == NULL)
307 return -ENXIO;
308
309 q = bdev_get_queue(bdev);
310 if (!q)
311 return -ENXIO;
312
f10d9f61
DC
313 /*
314 * some block devices may not have their queue correctly set up here
315 * (e.g. loop device without a backing file) and so issuing a flush
316 * here will panic. Ensure there is a request function before issuing
317 * the barrier.
318 */
319 if (!q->make_request_fn)
320 return -ENXIO;
321
fbd9b09a 322 bio = bio_alloc(gfp_mask, 0);
86db1e29 323 bio->bi_end_io = bio_end_empty_barrier;
86db1e29 324 bio->bi_bdev = bdev;
f17e232e
DM
325 if (test_bit(BLKDEV_WAIT, &flags))
326 bio->bi_private = &wait;
86db1e29 327
f17e232e
DM
328 bio_get(bio);
329 submit_bio(WRITE_BARRIER, bio);
330 if (test_bit(BLKDEV_WAIT, &flags)) {
331 wait_for_completion(&wait);
332 /*
333 * The driver must store the error location in ->bi_sector, if
334 * it supports it. For non-stacked drivers, this should be
335 * copied from blk_rq_pos(rq).
336 */
337 if (error_sector)
338 *error_sector = bio->bi_sector;
339 }
86db1e29 340
cc66b451
JA
341 if (bio_flagged(bio, BIO_EOPNOTSUPP))
342 ret = -EOPNOTSUPP;
343 else if (!bio_flagged(bio, BIO_UPTODATE))
86db1e29
JA
344 ret = -EIO;
345
346 bio_put(bio);
347 return ret;
348}
86db1e29 349EXPORT_SYMBOL(blkdev_issue_flush);