]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-lib.c
block: cleanup __blkdev_issue_discard()
[mirror_ubuntu-bionic-kernel.git] / block / blk-lib.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
f31e7e40
DM
2/*
3 * Functions related to generic helpers functions
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include "blk.h"
12
4e49ea4a 13static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
9082e87b 14 gfp_t gfp)
f31e7e40 15{
9082e87b
CH
16 struct bio *new = bio_alloc(gfp, nr_pages);
17
18 if (bio) {
19 bio_chain(bio, new);
4e49ea4a 20 submit_bio(bio);
9082e87b 21 }
5dba3089 22
9082e87b 23 return new;
f31e7e40
DM
24}
25
38f25255 26int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
288dab8a 27 sector_t nr_sects, gfp_t gfp_mask, int flags,
469e3216 28 struct bio **biop)
f31e7e40 29{
f31e7e40 30 struct request_queue *q = bdev_get_queue(bdev);
38f25255 31 struct bio *bio = *biop;
ef295ecf 32 unsigned int op;
28b2be20 33 sector_t bs_mask;
f31e7e40
DM
34
35 if (!q)
36 return -ENXIO;
288dab8a
CH
37
38 if (flags & BLKDEV_DISCARD_SECURE) {
39 if (!blk_queue_secure_erase(q))
40 return -EOPNOTSUPP;
41 op = REQ_OP_SECURE_ERASE;
42 } else {
43 if (!blk_queue_discard(q))
44 return -EOPNOTSUPP;
45 op = REQ_OP_DISCARD;
46 }
f31e7e40 47
28b2be20
DW
48 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
49 if ((sector | nr_sects) & bs_mask)
50 return -EINVAL;
51
3a343bb3
ML
52 if (!nr_sects)
53 return -EINVAL;
b515257f 54
3a343bb3
ML
55 while (nr_sects) {
56 unsigned int req_sects = min_t(unsigned int, nr_sects,
57 bio_allowed_max_sectors(q));
c6e66634 58
f9d03f96 59 bio = next_bio(bio, 0, gfp_mask);
4f024f37 60 bio->bi_iter.bi_sector = sector;
74d46992 61 bio_set_dev(bio, bdev);
288dab8a 62 bio_set_op_attrs(bio, op, 0);
f31e7e40 63
4f024f37 64 bio->bi_iter.bi_size = req_sects << 9;
3a343bb3 65 sector += req_sects;
c6e66634 66 nr_sects -= req_sects;
f31e7e40 67
c8123f8c
JA
68 /*
69 * We can loop for a long time in here, if someone does
70 * full device discards (like mkfs). Be nice and allow
71 * us to schedule out to avoid softlocking if preempt
72 * is disabled.
73 */
74 cond_resched();
5dba3089 75 }
38f25255
CH
76
77 *biop = bio;
78 return 0;
79}
80EXPORT_SYMBOL(__blkdev_issue_discard);
81
82/**
83 * blkdev_issue_discard - queue a discard
84 * @bdev: blockdev to issue discard for
85 * @sector: start sector
86 * @nr_sects: number of sectors to discard
87 * @gfp_mask: memory allocation flags (for bio_alloc)
e554911c 88 * @flags: BLKDEV_DISCARD_* flags to control behaviour
38f25255
CH
89 *
90 * Description:
91 * Issue a discard request for the sectors in question.
92 */
93int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
94 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
95{
38f25255
CH
96 struct bio *bio = NULL;
97 struct blk_plug plug;
98 int ret;
99
38f25255 100 blk_start_plug(&plug);
288dab8a 101 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
38f25255 102 &bio);
bbd848e0 103 if (!ret && bio) {
4e49ea4a 104 ret = submit_bio_wait(bio);
48920ff2 105 if (ret == -EOPNOTSUPP)
bbd848e0 106 ret = 0;
05bd92dd 107 bio_put(bio);
bbd848e0 108 }
0cfbcafc 109 blk_finish_plug(&plug);
f31e7e40 110
bbd848e0 111 return ret;
f31e7e40
DM
112}
113EXPORT_SYMBOL(blkdev_issue_discard);
3f14d792 114
4363ac7c 115/**
e73c23ff 116 * __blkdev_issue_write_same - generate number of bios with same page
4363ac7c
MP
117 * @bdev: target blockdev
118 * @sector: start sector
119 * @nr_sects: number of sectors to write
120 * @gfp_mask: memory allocation flags (for bio_alloc)
121 * @page: page containing data to write
e73c23ff 122 * @biop: pointer to anchor bio
4363ac7c
MP
123 *
124 * Description:
e73c23ff 125 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
4363ac7c 126 */
e73c23ff
CK
127static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
128 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
129 struct bio **biop)
4363ac7c 130{
4363ac7c
MP
131 struct request_queue *q = bdev_get_queue(bdev);
132 unsigned int max_write_same_sectors;
e73c23ff 133 struct bio *bio = *biop;
28b2be20 134 sector_t bs_mask;
4363ac7c
MP
135
136 if (!q)
137 return -ENXIO;
138
28b2be20
DW
139 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
140 if ((sector | nr_sects) & bs_mask)
141 return -EINVAL;
142
e73c23ff
CK
143 if (!bdev_write_same(bdev))
144 return -EOPNOTSUPP;
145
b49a0871 146 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
425bc216 147 max_write_same_sectors = bio_allowed_max_sectors(q);
4363ac7c 148
4363ac7c 149 while (nr_sects) {
4e49ea4a 150 bio = next_bio(bio, 1, gfp_mask);
4f024f37 151 bio->bi_iter.bi_sector = sector;
74d46992 152 bio_set_dev(bio, bdev);
4363ac7c
MP
153 bio->bi_vcnt = 1;
154 bio->bi_io_vec->bv_page = page;
155 bio->bi_io_vec->bv_offset = 0;
156 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
95fe6c1a 157 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
4363ac7c
MP
158
159 if (nr_sects > max_write_same_sectors) {
4f024f37 160 bio->bi_iter.bi_size = max_write_same_sectors << 9;
4363ac7c
MP
161 nr_sects -= max_write_same_sectors;
162 sector += max_write_same_sectors;
163 } else {
4f024f37 164 bio->bi_iter.bi_size = nr_sects << 9;
4363ac7c
MP
165 nr_sects = 0;
166 }
e73c23ff 167 cond_resched();
4363ac7c
MP
168 }
169
e73c23ff
CK
170 *biop = bio;
171 return 0;
172}
173
174/**
175 * blkdev_issue_write_same - queue a write same operation
176 * @bdev: target blockdev
177 * @sector: start sector
178 * @nr_sects: number of sectors to write
179 * @gfp_mask: memory allocation flags (for bio_alloc)
180 * @page: page containing data
181 *
182 * Description:
183 * Issue a write same request for the sectors in question.
184 */
185int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
186 sector_t nr_sects, gfp_t gfp_mask,
187 struct page *page)
188{
189 struct bio *bio = NULL;
190 struct blk_plug plug;
191 int ret;
192
193 blk_start_plug(&plug);
194 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
195 &bio);
196 if (ret == 0 && bio) {
4e49ea4a 197 ret = submit_bio_wait(bio);
05bd92dd
ST
198 bio_put(bio);
199 }
e73c23ff 200 blk_finish_plug(&plug);
3f40bf2c 201 return ret;
4363ac7c
MP
202}
203EXPORT_SYMBOL(blkdev_issue_write_same);
204
a6f0788e
CK
205static int __blkdev_issue_write_zeroes(struct block_device *bdev,
206 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
d928be9f 207 struct bio **biop, unsigned flags)
a6f0788e
CK
208{
209 struct bio *bio = *biop;
210 unsigned int max_write_zeroes_sectors;
211 struct request_queue *q = bdev_get_queue(bdev);
212
213 if (!q)
214 return -ENXIO;
215
216 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
217 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
218
219 if (max_write_zeroes_sectors == 0)
220 return -EOPNOTSUPP;
221
222 while (nr_sects) {
223 bio = next_bio(bio, 0, gfp_mask);
224 bio->bi_iter.bi_sector = sector;
74d46992 225 bio_set_dev(bio, bdev);
d928be9f
CH
226 bio->bi_opf = REQ_OP_WRITE_ZEROES;
227 if (flags & BLKDEV_ZERO_NOUNMAP)
228 bio->bi_opf |= REQ_NOUNMAP;
a6f0788e
CK
229
230 if (nr_sects > max_write_zeroes_sectors) {
231 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
232 nr_sects -= max_write_zeroes_sectors;
233 sector += max_write_zeroes_sectors;
234 } else {
235 bio->bi_iter.bi_size = nr_sects << 9;
236 nr_sects = 0;
237 }
238 cond_resched();
239 }
240
241 *biop = bio;
242 return 0;
243}
244
615d22a5
DLM
245/*
246 * Convert a number of 512B sectors to a number of pages.
247 * The result is limited to a number of pages that can fit into a BIO.
248 * Also make sure that the result is always at least 1 (page) for the cases
249 * where nr_sects is lower than the number of sectors in a page.
250 */
251static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
252{
09c2c359 253 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
615d22a5 254
09c2c359 255 return min(pages, (sector_t)BIO_MAX_PAGES);
615d22a5
DLM
256}
257
425a4dba
ID
258static int __blkdev_issue_zero_pages(struct block_device *bdev,
259 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
260 struct bio **biop)
261{
262 struct request_queue *q = bdev_get_queue(bdev);
263 struct bio *bio = *biop;
264 int bi_size = 0;
265 unsigned int sz;
266
267 if (!q)
268 return -ENXIO;
269
270 while (nr_sects != 0) {
271 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
272 gfp_mask);
273 bio->bi_iter.bi_sector = sector;
274 bio_set_dev(bio, bdev);
275 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
276
277 while (nr_sects != 0) {
278 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
279 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
280 nr_sects -= bi_size >> 9;
281 sector += bi_size >> 9;
282 if (bi_size < sz)
283 break;
284 }
285 cond_resched();
286 }
287
288 *biop = bio;
289 return 0;
290}
291
3f14d792 292/**
e73c23ff 293 * __blkdev_issue_zeroout - generate number of zero filed write bios
3f14d792
DM
294 * @bdev: blockdev to issue
295 * @sector: start sector
296 * @nr_sects: number of sectors to write
297 * @gfp_mask: memory allocation flags (for bio_alloc)
e73c23ff 298 * @biop: pointer to anchor bio
ee472d83 299 * @flags: controls detailed behavior
3f14d792
DM
300 *
301 * Description:
ee472d83
CH
302 * Zero-fill a block range, either using hardware offload or by explicitly
303 * writing zeroes to the device.
304 *
305 * If a device is using logical block provisioning, the underlying space will
306 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
cb365b96
CH
307 *
308 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
309 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
3f14d792 310 */
e73c23ff
CK
311int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
312 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
ee472d83 313 unsigned flags)
3f14d792 314{
18edc8ea 315 int ret;
28b2be20
DW
316 sector_t bs_mask;
317
318 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
319 if ((sector | nr_sects) & bs_mask)
320 return -EINVAL;
3f14d792 321
a6f0788e 322 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
d928be9f 323 biop, flags);
cb365b96 324 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
425a4dba 325 return ret;
3f14d792 326
425a4dba
ID
327 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
328 biop);
3f14d792 329}
e73c23ff 330EXPORT_SYMBOL(__blkdev_issue_zeroout);
579e8f3c
MP
331
332/**
333 * blkdev_issue_zeroout - zero-fill a block range
334 * @bdev: blockdev to write
335 * @sector: start sector
336 * @nr_sects: number of sectors to write
337 * @gfp_mask: memory allocation flags (for bio_alloc)
ee472d83 338 * @flags: controls detailed behavior
579e8f3c
MP
339 *
340 * Description:
ee472d83
CH
341 * Zero-fill a block range, either using hardware offload or by explicitly
342 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
343 * valid values for %flags.
579e8f3c 344 */
579e8f3c 345int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
ee472d83 346 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
579e8f3c 347{
d5ce4c31
ID
348 int ret = 0;
349 sector_t bs_mask;
350 struct bio *bio;
e73c23ff 351 struct blk_plug plug;
d5ce4c31 352 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
d93ba7a5 353
d5ce4c31
ID
354 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
355 if ((sector | nr_sects) & bs_mask)
356 return -EINVAL;
357
358retry:
359 bio = NULL;
e73c23ff 360 blk_start_plug(&plug);
d5ce4c31
ID
361 if (try_write_zeroes) {
362 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
363 gfp_mask, &bio, flags);
364 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
365 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
366 gfp_mask, &bio);
367 } else {
368 /* No zeroing offload support */
369 ret = -EOPNOTSUPP;
370 }
e73c23ff
CK
371 if (ret == 0 && bio) {
372 ret = submit_bio_wait(bio);
373 bio_put(bio);
374 }
375 blk_finish_plug(&plug);
d5ce4c31
ID
376 if (ret && try_write_zeroes) {
377 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
378 try_write_zeroes = false;
379 goto retry;
380 }
381 if (!bdev_write_zeroes_sectors(bdev)) {
382 /*
383 * Zeroing offload support was indicated, but the
384 * device reported ILLEGAL REQUEST (for some devices
385 * there is no non-destructive way to verify whether
386 * WRITE ZEROES is actually supported).
387 */
388 ret = -EOPNOTSUPP;
389 }
390 }
579e8f3c 391
e73c23ff 392 return ret;
579e8f3c 393}
3f14d792 394EXPORT_SYMBOL(blkdev_issue_zeroout);