]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-lib.c
btrfs: remove bio splitting and merge_bvec_fn() calls
[mirror_ubuntu-bionic-kernel.git] / block / blk-lib.c
CommitLineData
f31e7e40
DM
1/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
5dba3089
LC
12struct bio_batch {
13 atomic_t done;
4246a0b6 14 int error;
5dba3089
LC
15 struct completion *wait;
16};
17
4246a0b6 18static void bio_batch_end_io(struct bio *bio)
f31e7e40 19{
5dba3089
LC
20 struct bio_batch *bb = bio->bi_private;
21
4246a0b6
CH
22 if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
23 bb->error = bio->bi_error;
5dba3089
LC
24 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
f31e7e40
DM
26 bio_put(bio);
27}
28
29/**
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
36 *
37 * Description:
38 * Issue a discard request for the sectors in question.
39 */
40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42{
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
8c555367 45 int type = REQ_WRITE | REQ_DISCARD;
97597dc0
GU
46 unsigned int max_discard_sectors, granularity;
47 int alignment;
5dba3089 48 struct bio_batch bb;
f31e7e40 49 struct bio *bio;
f31e7e40 50 int ret = 0;
0cfbcafc 51 struct blk_plug plug;
f31e7e40
DM
52
53 if (!q)
54 return -ENXIO;
55
56 if (!blk_queue_discard(q))
57 return -EOPNOTSUPP;
58
f6ff53d3
PB
59 /* Zero-sector (unknown) and one-sector granularities are the same. */
60 granularity = max(q->limits.discard_granularity >> 9, 1U);
97597dc0 61 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
f6ff53d3 62
10d1f9e2
JA
63 /*
64 * Ensure that max_discard_sectors is of the proper
c6e66634 65 * granularity, so that requests stay aligned after a split.
10d1f9e2
JA
66 */
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
97597dc0 68 max_discard_sectors -= max_discard_sectors % granularity;
4c64500e 69 if (unlikely(!max_discard_sectors)) {
0f799603
MS
70 /* Avoid infinite loop below. Being cautious never hurts. */
71 return -EOPNOTSUPP;
10d1f9e2 72 }
f31e7e40 73
dd3932ed 74 if (flags & BLKDEV_DISCARD_SECURE) {
8d57a98c
AH
75 if (!blk_queue_secdiscard(q))
76 return -EOPNOTSUPP;
8c555367 77 type |= REQ_SECURE;
8d57a98c
AH
78 }
79
5dba3089 80 atomic_set(&bb.done, 1);
4246a0b6 81 bb.error = 0;
5dba3089
LC
82 bb.wait = &wait;
83
0cfbcafc 84 blk_start_plug(&plug);
5dba3089 85 while (nr_sects) {
c6e66634 86 unsigned int req_sects;
8dd2cb7e 87 sector_t end_sect, tmp;
c6e66634 88
f31e7e40 89 bio = bio_alloc(gfp_mask, 1);
66ac0280
CH
90 if (!bio) {
91 ret = -ENOMEM;
92 break;
93 }
94
c6e66634
PB
95 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
96
97 /*
98 * If splitting a request, and the next starting sector would be
99 * misaligned, stop the discard at the previous aligned sector.
100 */
101 end_sect = sector + req_sects;
8dd2cb7e
SL
102 tmp = end_sect;
103 if (req_sects < nr_sects &&
104 sector_div(tmp, granularity) != alignment) {
105 end_sect = end_sect - alignment;
106 sector_div(end_sect, granularity);
107 end_sect = end_sect * granularity + alignment;
c6e66634
PB
108 req_sects = end_sect - sector;
109 }
110
4f024f37 111 bio->bi_iter.bi_sector = sector;
5dba3089 112 bio->bi_end_io = bio_batch_end_io;
f31e7e40 113 bio->bi_bdev = bdev;
5dba3089 114 bio->bi_private = &bb;
f31e7e40 115
4f024f37 116 bio->bi_iter.bi_size = req_sects << 9;
c6e66634
PB
117 nr_sects -= req_sects;
118 sector = end_sect;
f31e7e40 119
5dba3089 120 atomic_inc(&bb.done);
f31e7e40 121 submit_bio(type, bio);
c8123f8c
JA
122
123 /*
124 * We can loop for a long time in here, if someone does
125 * full device discards (like mkfs). Be nice and allow
126 * us to schedule out to avoid softlocking if preempt
127 * is disabled.
128 */
129 cond_resched();
5dba3089 130 }
0cfbcafc 131 blk_finish_plug(&plug);
f31e7e40 132
5dba3089
LC
133 /* Wait for bios in-flight */
134 if (!atomic_dec_and_test(&bb.done))
5577022f 135 wait_for_completion_io(&wait);
f31e7e40 136
4246a0b6
CH
137 if (bb.error)
138 return bb.error;
f31e7e40 139 return ret;
f31e7e40
DM
140}
141EXPORT_SYMBOL(blkdev_issue_discard);
3f14d792 142
4363ac7c
MP
143/**
144 * blkdev_issue_write_same - queue a write same operation
145 * @bdev: target blockdev
146 * @sector: start sector
147 * @nr_sects: number of sectors to write
148 * @gfp_mask: memory allocation flags (for bio_alloc)
149 * @page: page containing data to write
150 *
151 * Description:
152 * Issue a write same request for the sectors in question.
153 */
154int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
155 sector_t nr_sects, gfp_t gfp_mask,
156 struct page *page)
157{
158 DECLARE_COMPLETION_ONSTACK(wait);
159 struct request_queue *q = bdev_get_queue(bdev);
160 unsigned int max_write_same_sectors;
161 struct bio_batch bb;
162 struct bio *bio;
163 int ret = 0;
164
165 if (!q)
166 return -ENXIO;
167
168 max_write_same_sectors = q->limits.max_write_same_sectors;
169
170 if (max_write_same_sectors == 0)
171 return -EOPNOTSUPP;
172
173 atomic_set(&bb.done, 1);
4246a0b6 174 bb.error = 0;
4363ac7c
MP
175 bb.wait = &wait;
176
177 while (nr_sects) {
178 bio = bio_alloc(gfp_mask, 1);
179 if (!bio) {
180 ret = -ENOMEM;
181 break;
182 }
183
4f024f37 184 bio->bi_iter.bi_sector = sector;
4363ac7c
MP
185 bio->bi_end_io = bio_batch_end_io;
186 bio->bi_bdev = bdev;
187 bio->bi_private = &bb;
188 bio->bi_vcnt = 1;
189 bio->bi_io_vec->bv_page = page;
190 bio->bi_io_vec->bv_offset = 0;
191 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
192
193 if (nr_sects > max_write_same_sectors) {
4f024f37 194 bio->bi_iter.bi_size = max_write_same_sectors << 9;
4363ac7c
MP
195 nr_sects -= max_write_same_sectors;
196 sector += max_write_same_sectors;
197 } else {
4f024f37 198 bio->bi_iter.bi_size = nr_sects << 9;
4363ac7c
MP
199 nr_sects = 0;
200 }
201
202 atomic_inc(&bb.done);
203 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
204 }
205
206 /* Wait for bios in-flight */
207 if (!atomic_dec_and_test(&bb.done))
5577022f 208 wait_for_completion_io(&wait);
4363ac7c 209
4246a0b6
CH
210 if (bb.error)
211 return bb.error;
4363ac7c
MP
212 return ret;
213}
214EXPORT_SYMBOL(blkdev_issue_write_same);
215
3f14d792 216/**
291d24f6 217 * blkdev_issue_zeroout - generate number of zero filed write bios
3f14d792
DM
218 * @bdev: blockdev to issue
219 * @sector: start sector
220 * @nr_sects: number of sectors to write
221 * @gfp_mask: memory allocation flags (for bio_alloc)
3f14d792
DM
222 *
223 * Description:
224 * Generate and issue number of bios with zerofiled pages.
3f14d792
DM
225 */
226
35086784
FF
227static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
228 sector_t nr_sects, gfp_t gfp_mask)
3f14d792 229{
18edc8ea 230 int ret;
3f14d792
DM
231 struct bio *bio;
232 struct bio_batch bb;
0aeea189 233 unsigned int sz;
3f14d792
DM
234 DECLARE_COMPLETION_ONSTACK(wait);
235
0aeea189 236 atomic_set(&bb.done, 1);
4246a0b6 237 bb.error = 0;
3f14d792 238 bb.wait = &wait;
3f14d792 239
18edc8ea 240 ret = 0;
3f14d792
DM
241 while (nr_sects != 0) {
242 bio = bio_alloc(gfp_mask,
243 min(nr_sects, (sector_t)BIO_MAX_PAGES));
18edc8ea
DM
244 if (!bio) {
245 ret = -ENOMEM;
3f14d792 246 break;
18edc8ea 247 }
3f14d792 248
4f024f37 249 bio->bi_iter.bi_sector = sector;
3f14d792
DM
250 bio->bi_bdev = bdev;
251 bio->bi_end_io = bio_batch_end_io;
dd3932ed 252 bio->bi_private = &bb;
3f14d792 253
0341aafb
JA
254 while (nr_sects != 0) {
255 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
3f14d792
DM
256 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
257 nr_sects -= ret >> 9;
258 sector += ret >> 9;
259 if (ret < (sz << 9))
260 break;
261 }
18edc8ea 262 ret = 0;
0aeea189 263 atomic_inc(&bb.done);
3f14d792
DM
264 submit_bio(WRITE, bio);
265 }
3f14d792 266
dd3932ed 267 /* Wait for bios in-flight */
0aeea189 268 if (!atomic_dec_and_test(&bb.done))
5577022f 269 wait_for_completion_io(&wait);
3f14d792 270
4246a0b6
CH
271 if (bb.error)
272 return bb.error;
3f14d792
DM
273 return ret;
274}
579e8f3c
MP
275
276/**
277 * blkdev_issue_zeroout - zero-fill a block range
278 * @bdev: blockdev to write
279 * @sector: start sector
280 * @nr_sects: number of sectors to write
281 * @gfp_mask: memory allocation flags (for bio_alloc)
d93ba7a5 282 * @discard: whether to discard the block range
579e8f3c
MP
283 *
284 * Description:
d93ba7a5
MP
285 * Zero-fill a block range. If the discard flag is set and the block
286 * device guarantees that subsequent READ operations to the block range
287 * in question will return zeroes, the blocks will be discarded. Should
288 * the discard request fail, if the discard flag is not set, or if
289 * discard_zeroes_data is not supported, this function will resort to
290 * zeroing the blocks manually, thus provisioning (allocating,
291 * anchoring) them. If the block device supports the WRITE SAME command
292 * blkdev_issue_zeroout() will use it to optimize the process of
293 * clearing the block range. Otherwise the zeroing will be performed
294 * using regular WRITE calls.
579e8f3c
MP
295 */
296
297int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
d93ba7a5 298 sector_t nr_sects, gfp_t gfp_mask, bool discard)
579e8f3c 299{
d93ba7a5 300 struct request_queue *q = bdev_get_queue(bdev);
d93ba7a5 301
9f9ee1f2
MP
302 if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
303 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
304 return 0;
d93ba7a5 305
9f9ee1f2
MP
306 if (bdev_write_same(bdev) &&
307 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
308 ZERO_PAGE(0)) == 0)
309 return 0;
579e8f3c
MP
310
311 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
312}
3f14d792 313EXPORT_SYMBOL(blkdev_issue_zeroout);