]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - block/blk-lib.c
blkdev: check for valid request queue before issuing flush
[mirror_ubuntu-zesty-kernel.git] / block / blk-lib.c
CommitLineData
f31e7e40
DM
1/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
12static void blkdev_discard_end_io(struct bio *bio, int err)
13{
14 if (err) {
15 if (err == -EOPNOTSUPP)
16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18 }
19
20 if (bio->bi_private)
21 complete(bio->bi_private);
f31e7e40
DM
22
23 bio_put(bio);
24}
25
26/**
27 * blkdev_issue_discard - queue a discard
28 * @bdev: blockdev to issue discard for
29 * @sector: start sector
30 * @nr_sects: number of sectors to discard
31 * @gfp_mask: memory allocation flags (for bio_alloc)
32 * @flags: BLKDEV_IFL_* flags to control behaviour
33 *
34 * Description:
35 * Issue a discard request for the sectors in question.
36 */
37int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
38 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
39{
40 DECLARE_COMPLETION_ONSTACK(wait);
41 struct request_queue *q = bdev_get_queue(bdev);
42 int type = flags & BLKDEV_IFL_BARRIER ?
43 DISCARD_BARRIER : DISCARD_NOBARRIER;
44 struct bio *bio;
f31e7e40
DM
45 int ret = 0;
46
47 if (!q)
48 return -ENXIO;
49
50 if (!blk_queue_discard(q))
51 return -EOPNOTSUPP;
52
53 while (nr_sects && !ret) {
f31e7e40
DM
54 unsigned int max_discard_sectors =
55 min(q->limits.max_discard_sectors, UINT_MAX >> 9);
56
57 bio = bio_alloc(gfp_mask, 1);
66ac0280
CH
58 if (!bio) {
59 ret = -ENOMEM;
60 break;
61 }
62
f31e7e40
DM
63 bio->bi_sector = sector;
64 bio->bi_end_io = blkdev_discard_end_io;
65 bio->bi_bdev = bdev;
66 if (flags & BLKDEV_IFL_WAIT)
67 bio->bi_private = &wait;
68
f31e7e40
DM
69 if (nr_sects > max_discard_sectors) {
70 bio->bi_size = max_discard_sectors << 9;
71 nr_sects -= max_discard_sectors;
72 sector += max_discard_sectors;
73 } else {
74 bio->bi_size = nr_sects << 9;
75 nr_sects = 0;
76 }
77
78 bio_get(bio);
79 submit_bio(type, bio);
80
81 if (flags & BLKDEV_IFL_WAIT)
82 wait_for_completion(&wait);
83
84 if (bio_flagged(bio, BIO_EOPNOTSUPP))
85 ret = -EOPNOTSUPP;
86 else if (!bio_flagged(bio, BIO_UPTODATE))
87 ret = -EIO;
88 bio_put(bio);
89 }
66ac0280 90
f31e7e40 91 return ret;
f31e7e40
DM
92}
93EXPORT_SYMBOL(blkdev_issue_discard);
3f14d792
DM
94
95struct bio_batch
96{
97 atomic_t done;
98 unsigned long flags;
99 struct completion *wait;
100 bio_end_io_t *end_io;
101};
102
103static void bio_batch_end_io(struct bio *bio, int err)
104{
105 struct bio_batch *bb = bio->bi_private;
0341aafb 106
3f14d792
DM
107 if (err) {
108 if (err == -EOPNOTSUPP)
109 set_bit(BIO_EOPNOTSUPP, &bb->flags);
110 else
111 clear_bit(BIO_UPTODATE, &bb->flags);
112 }
113 if (bb) {
114 if (bb->end_io)
115 bb->end_io(bio, err);
116 atomic_inc(&bb->done);
117 complete(bb->wait);
118 }
119 bio_put(bio);
120}
121
122/**
123 * blkdev_issue_zeroout generate number of zero filed write bios
124 * @bdev: blockdev to issue
125 * @sector: start sector
126 * @nr_sects: number of sectors to write
127 * @gfp_mask: memory allocation flags (for bio_alloc)
128 * @flags: BLKDEV_IFL_* flags to control behaviour
129 *
130 * Description:
131 * Generate and issue number of bios with zerofiled pages.
132 * Send barrier at the beginning and at the end if requested. This guarantie
133 * correct request ordering. Empty barrier allow us to avoid post queue flush.
134 */
135
136int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
137 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
138{
139 int ret = 0;
140 struct bio *bio;
141 struct bio_batch bb;
142 unsigned int sz, issued = 0;
143 DECLARE_COMPLETION_ONSTACK(wait);
144
145 atomic_set(&bb.done, 0);
146 bb.flags = 1 << BIO_UPTODATE;
147 bb.wait = &wait;
148 bb.end_io = NULL;
149
150 if (flags & BLKDEV_IFL_BARRIER) {
151 /* issue async barrier before the data */
152 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
153 if (ret)
154 return ret;
155 }
156submit:
157 while (nr_sects != 0) {
158 bio = bio_alloc(gfp_mask,
159 min(nr_sects, (sector_t)BIO_MAX_PAGES));
160 if (!bio)
161 break;
162
163 bio->bi_sector = sector;
164 bio->bi_bdev = bdev;
165 bio->bi_end_io = bio_batch_end_io;
166 if (flags & BLKDEV_IFL_WAIT)
167 bio->bi_private = &bb;
168
0341aafb
JA
169 while (nr_sects != 0) {
170 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
3f14d792
DM
171 if (sz == 0)
172 /* bio has maximum size possible */
173 break;
174 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
175 nr_sects -= ret >> 9;
176 sector += ret >> 9;
177 if (ret < (sz << 9))
178 break;
179 }
180 issued++;
181 submit_bio(WRITE, bio);
182 }
183 /*
184 * When all data bios are in flight. Send final barrier if requeted.
185 */
186 if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
187 ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
188 flags & BLKDEV_IFL_WAIT);
189
190
191 if (flags & BLKDEV_IFL_WAIT)
192 /* Wait for bios in-flight */
193 while ( issued != atomic_read(&bb.done))
194 wait_for_completion(&wait);
195
196 if (!test_bit(BIO_UPTODATE, &bb.flags))
197 /* One of bios in the batch was completed with error.*/
198 ret = -EIO;
199
200 if (ret)
201 goto out;
202
203 if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
204 ret = -EOPNOTSUPP;
205 goto out;
206 }
207 if (nr_sects != 0)
208 goto submit;
209out:
210 return ret;
211}
212EXPORT_SYMBOL(blkdev_issue_zeroout);