]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f31e7e40 DM |
2 | /* |
3 | * Functions related to generic helpers functions | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/scatterlist.h> | |
10 | ||
11 | #include "blk.h" | |
12 | ||
4e49ea4a | 13 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
9082e87b | 14 | gfp_t gfp) |
f31e7e40 | 15 | { |
9082e87b CH |
16 | struct bio *new = bio_alloc(gfp, nr_pages); |
17 | ||
18 | if (bio) { | |
19 | bio_chain(bio, new); | |
4e49ea4a | 20 | submit_bio(bio); |
9082e87b | 21 | } |
5dba3089 | 22 | |
9082e87b | 23 | return new; |
f31e7e40 DM |
24 | } |
25 | ||
38f25255 | 26 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
288dab8a | 27 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
469e3216 | 28 | struct bio **biop) |
f31e7e40 | 29 | { |
f31e7e40 | 30 | struct request_queue *q = bdev_get_queue(bdev); |
38f25255 | 31 | struct bio *bio = *biop; |
ef295ecf | 32 | unsigned int op; |
28b2be20 | 33 | sector_t bs_mask; |
f31e7e40 DM |
34 | |
35 | if (!q) | |
36 | return -ENXIO; | |
288dab8a | 37 | |
a13553c7 ID |
38 | if (bdev_read_only(bdev)) |
39 | return -EPERM; | |
40 | ||
288dab8a CH |
41 | if (flags & BLKDEV_DISCARD_SECURE) { |
42 | if (!blk_queue_secure_erase(q)) | |
43 | return -EOPNOTSUPP; | |
44 | op = REQ_OP_SECURE_ERASE; | |
45 | } else { | |
46 | if (!blk_queue_discard(q)) | |
47 | return -EOPNOTSUPP; | |
48 | op = REQ_OP_DISCARD; | |
49 | } | |
f31e7e40 | 50 | |
28b2be20 DW |
51 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
52 | if ((sector | nr_sects) & bs_mask) | |
53 | return -EINVAL; | |
54 | ||
5dba3089 | 55 | while (nr_sects) { |
744889b7 ML |
56 | unsigned int req_sects = nr_sects; |
57 | sector_t end_sect; | |
c6e66634 | 58 | |
b88aef36 MP |
59 | if (!req_sects) |
60 | goto fail; | |
af097f5d JA |
61 | if (req_sects > UINT_MAX >> 9) |
62 | req_sects = UINT_MAX >> 9; | |
a22c4d7e | 63 | |
c6e66634 | 64 | end_sect = sector + req_sects; |
c6e66634 | 65 | |
f9d03f96 | 66 | bio = next_bio(bio, 0, gfp_mask); |
4f024f37 | 67 | bio->bi_iter.bi_sector = sector; |
74d46992 | 68 | bio_set_dev(bio, bdev); |
288dab8a | 69 | bio_set_op_attrs(bio, op, 0); |
f31e7e40 | 70 | |
4f024f37 | 71 | bio->bi_iter.bi_size = req_sects << 9; |
c6e66634 PB |
72 | nr_sects -= req_sects; |
73 | sector = end_sect; | |
f31e7e40 | 74 | |
c8123f8c JA |
75 | /* |
76 | * We can loop for a long time in here, if someone does | |
77 | * full device discards (like mkfs). Be nice and allow | |
78 | * us to schedule out to avoid softlocking if preempt | |
79 | * is disabled. | |
80 | */ | |
81 | cond_resched(); | |
5dba3089 | 82 | } |
38f25255 CH |
83 | |
84 | *biop = bio; | |
85 | return 0; | |
b88aef36 MP |
86 | |
87 | fail: | |
88 | if (bio) { | |
89 | submit_bio_wait(bio); | |
90 | bio_put(bio); | |
91 | } | |
92 | *biop = NULL; | |
93 | return -EOPNOTSUPP; | |
38f25255 CH |
94 | } |
95 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
96 | ||
97 | /** | |
98 | * blkdev_issue_discard - queue a discard | |
99 | * @bdev: blockdev to issue discard for | |
100 | * @sector: start sector | |
101 | * @nr_sects: number of sectors to discard | |
102 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e554911c | 103 | * @flags: BLKDEV_DISCARD_* flags to control behaviour |
38f25255 CH |
104 | * |
105 | * Description: | |
106 | * Issue a discard request for the sectors in question. | |
107 | */ | |
108 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
109 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
110 | { | |
38f25255 CH |
111 | struct bio *bio = NULL; |
112 | struct blk_plug plug; | |
113 | int ret; | |
114 | ||
38f25255 | 115 | blk_start_plug(&plug); |
288dab8a | 116 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
38f25255 | 117 | &bio); |
bbd848e0 | 118 | if (!ret && bio) { |
4e49ea4a | 119 | ret = submit_bio_wait(bio); |
48920ff2 | 120 | if (ret == -EOPNOTSUPP) |
bbd848e0 | 121 | ret = 0; |
05bd92dd | 122 | bio_put(bio); |
bbd848e0 | 123 | } |
0cfbcafc | 124 | blk_finish_plug(&plug); |
f31e7e40 | 125 | |
bbd848e0 | 126 | return ret; |
f31e7e40 DM |
127 | } |
128 | EXPORT_SYMBOL(blkdev_issue_discard); | |
3f14d792 | 129 | |
4363ac7c | 130 | /** |
e73c23ff | 131 | * __blkdev_issue_write_same - generate number of bios with same page |
4363ac7c MP |
132 | * @bdev: target blockdev |
133 | * @sector: start sector | |
134 | * @nr_sects: number of sectors to write | |
135 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
136 | * @page: page containing data to write | |
e73c23ff | 137 | * @biop: pointer to anchor bio |
4363ac7c MP |
138 | * |
139 | * Description: | |
e73c23ff | 140 | * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. |
4363ac7c | 141 | */ |
e73c23ff CK |
142 | static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
143 | sector_t nr_sects, gfp_t gfp_mask, struct page *page, | |
144 | struct bio **biop) | |
4363ac7c | 145 | { |
4363ac7c MP |
146 | struct request_queue *q = bdev_get_queue(bdev); |
147 | unsigned int max_write_same_sectors; | |
e73c23ff | 148 | struct bio *bio = *biop; |
28b2be20 | 149 | sector_t bs_mask; |
4363ac7c MP |
150 | |
151 | if (!q) | |
152 | return -ENXIO; | |
153 | ||
a13553c7 ID |
154 | if (bdev_read_only(bdev)) |
155 | return -EPERM; | |
156 | ||
28b2be20 DW |
157 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
158 | if ((sector | nr_sects) & bs_mask) | |
159 | return -EINVAL; | |
160 | ||
e73c23ff CK |
161 | if (!bdev_write_same(bdev)) |
162 | return -EOPNOTSUPP; | |
163 | ||
b49a0871 ML |
164 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
165 | max_write_same_sectors = UINT_MAX >> 9; | |
4363ac7c | 166 | |
4363ac7c | 167 | while (nr_sects) { |
4e49ea4a | 168 | bio = next_bio(bio, 1, gfp_mask); |
4f024f37 | 169 | bio->bi_iter.bi_sector = sector; |
74d46992 | 170 | bio_set_dev(bio, bdev); |
4363ac7c MP |
171 | bio->bi_vcnt = 1; |
172 | bio->bi_io_vec->bv_page = page; | |
173 | bio->bi_io_vec->bv_offset = 0; | |
174 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | |
95fe6c1a | 175 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
4363ac7c MP |
176 | |
177 | if (nr_sects > max_write_same_sectors) { | |
4f024f37 | 178 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
4363ac7c MP |
179 | nr_sects -= max_write_same_sectors; |
180 | sector += max_write_same_sectors; | |
181 | } else { | |
4f024f37 | 182 | bio->bi_iter.bi_size = nr_sects << 9; |
4363ac7c MP |
183 | nr_sects = 0; |
184 | } | |
e73c23ff | 185 | cond_resched(); |
4363ac7c MP |
186 | } |
187 | ||
e73c23ff CK |
188 | *biop = bio; |
189 | return 0; | |
190 | } | |
191 | ||
192 | /** | |
193 | * blkdev_issue_write_same - queue a write same operation | |
194 | * @bdev: target blockdev | |
195 | * @sector: start sector | |
196 | * @nr_sects: number of sectors to write | |
197 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
198 | * @page: page containing data | |
199 | * | |
200 | * Description: | |
201 | * Issue a write same request for the sectors in question. | |
202 | */ | |
203 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
204 | sector_t nr_sects, gfp_t gfp_mask, | |
205 | struct page *page) | |
206 | { | |
207 | struct bio *bio = NULL; | |
208 | struct blk_plug plug; | |
209 | int ret; | |
210 | ||
211 | blk_start_plug(&plug); | |
212 | ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, | |
213 | &bio); | |
214 | if (ret == 0 && bio) { | |
4e49ea4a | 215 | ret = submit_bio_wait(bio); |
05bd92dd ST |
216 | bio_put(bio); |
217 | } | |
e73c23ff | 218 | blk_finish_plug(&plug); |
3f40bf2c | 219 | return ret; |
4363ac7c MP |
220 | } |
221 | EXPORT_SYMBOL(blkdev_issue_write_same); | |
222 | ||
a6f0788e CK |
223 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
224 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
d928be9f | 225 | struct bio **biop, unsigned flags) |
a6f0788e CK |
226 | { |
227 | struct bio *bio = *biop; | |
228 | unsigned int max_write_zeroes_sectors; | |
229 | struct request_queue *q = bdev_get_queue(bdev); | |
230 | ||
231 | if (!q) | |
232 | return -ENXIO; | |
233 | ||
a13553c7 ID |
234 | if (bdev_read_only(bdev)) |
235 | return -EPERM; | |
236 | ||
a6f0788e CK |
237 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ |
238 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); | |
239 | ||
240 | if (max_write_zeroes_sectors == 0) | |
241 | return -EOPNOTSUPP; | |
242 | ||
243 | while (nr_sects) { | |
244 | bio = next_bio(bio, 0, gfp_mask); | |
245 | bio->bi_iter.bi_sector = sector; | |
74d46992 | 246 | bio_set_dev(bio, bdev); |
d928be9f CH |
247 | bio->bi_opf = REQ_OP_WRITE_ZEROES; |
248 | if (flags & BLKDEV_ZERO_NOUNMAP) | |
249 | bio->bi_opf |= REQ_NOUNMAP; | |
a6f0788e CK |
250 | |
251 | if (nr_sects > max_write_zeroes_sectors) { | |
252 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; | |
253 | nr_sects -= max_write_zeroes_sectors; | |
254 | sector += max_write_zeroes_sectors; | |
255 | } else { | |
256 | bio->bi_iter.bi_size = nr_sects << 9; | |
257 | nr_sects = 0; | |
258 | } | |
259 | cond_resched(); | |
260 | } | |
261 | ||
262 | *biop = bio; | |
263 | return 0; | |
264 | } | |
265 | ||
615d22a5 DLM |
266 | /* |
267 | * Convert a number of 512B sectors to a number of pages. | |
268 | * The result is limited to a number of pages that can fit into a BIO. | |
269 | * Also make sure that the result is always at least 1 (page) for the cases | |
270 | * where nr_sects is lower than the number of sectors in a page. | |
271 | */ | |
272 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) | |
273 | { | |
09c2c359 | 274 | sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
615d22a5 | 275 | |
09c2c359 | 276 | return min(pages, (sector_t)BIO_MAX_PAGES); |
615d22a5 DLM |
277 | } |
278 | ||
425a4dba ID |
279 | static int __blkdev_issue_zero_pages(struct block_device *bdev, |
280 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
281 | struct bio **biop) | |
282 | { | |
283 | struct request_queue *q = bdev_get_queue(bdev); | |
284 | struct bio *bio = *biop; | |
285 | int bi_size = 0; | |
286 | unsigned int sz; | |
287 | ||
288 | if (!q) | |
289 | return -ENXIO; | |
290 | ||
a13553c7 ID |
291 | if (bdev_read_only(bdev)) |
292 | return -EPERM; | |
293 | ||
425a4dba ID |
294 | while (nr_sects != 0) { |
295 | bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), | |
296 | gfp_mask); | |
297 | bio->bi_iter.bi_sector = sector; | |
298 | bio_set_dev(bio, bdev); | |
299 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | |
300 | ||
301 | while (nr_sects != 0) { | |
302 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); | |
303 | bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); | |
304 | nr_sects -= bi_size >> 9; | |
305 | sector += bi_size >> 9; | |
306 | if (bi_size < sz) | |
307 | break; | |
308 | } | |
309 | cond_resched(); | |
310 | } | |
311 | ||
312 | *biop = bio; | |
313 | return 0; | |
314 | } | |
315 | ||
3f14d792 | 316 | /** |
e73c23ff | 317 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792 DM |
318 | * @bdev: blockdev to issue |
319 | * @sector: start sector | |
320 | * @nr_sects: number of sectors to write | |
321 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e73c23ff | 322 | * @biop: pointer to anchor bio |
ee472d83 | 323 | * @flags: controls detailed behavior |
3f14d792 DM |
324 | * |
325 | * Description: | |
ee472d83 CH |
326 | * Zero-fill a block range, either using hardware offload or by explicitly |
327 | * writing zeroes to the device. | |
328 | * | |
329 | * If a device is using logical block provisioning, the underlying space will | |
330 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. | |
cb365b96 CH |
331 | * |
332 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return | |
333 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. | |
3f14d792 | 334 | */ |
e73c23ff CK |
335 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
336 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, | |
ee472d83 | 337 | unsigned flags) |
3f14d792 | 338 | { |
18edc8ea | 339 | int ret; |
28b2be20 DW |
340 | sector_t bs_mask; |
341 | ||
342 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | |
343 | if ((sector | nr_sects) & bs_mask) | |
344 | return -EINVAL; | |
3f14d792 | 345 | |
a6f0788e | 346 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
d928be9f | 347 | biop, flags); |
cb365b96 | 348 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
425a4dba | 349 | return ret; |
3f14d792 | 350 | |
425a4dba ID |
351 | return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, |
352 | biop); | |
3f14d792 | 353 | } |
e73c23ff | 354 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
579e8f3c MP |
355 | |
356 | /** | |
357 | * blkdev_issue_zeroout - zero-fill a block range | |
358 | * @bdev: blockdev to write | |
359 | * @sector: start sector | |
360 | * @nr_sects: number of sectors to write | |
361 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
ee472d83 | 362 | * @flags: controls detailed behavior |
579e8f3c MP |
363 | * |
364 | * Description: | |
ee472d83 CH |
365 | * Zero-fill a block range, either using hardware offload or by explicitly |
366 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the | |
367 | * valid values for %flags. | |
579e8f3c | 368 | */ |
579e8f3c | 369 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
ee472d83 | 370 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
579e8f3c | 371 | { |
d5ce4c31 ID |
372 | int ret = 0; |
373 | sector_t bs_mask; | |
374 | struct bio *bio; | |
e73c23ff | 375 | struct blk_plug plug; |
d5ce4c31 | 376 | bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
d93ba7a5 | 377 | |
d5ce4c31 ID |
378 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
379 | if ((sector | nr_sects) & bs_mask) | |
380 | return -EINVAL; | |
381 | ||
382 | retry: | |
383 | bio = NULL; | |
e73c23ff | 384 | blk_start_plug(&plug); |
d5ce4c31 ID |
385 | if (try_write_zeroes) { |
386 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, | |
387 | gfp_mask, &bio, flags); | |
388 | } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | |
389 | ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, | |
390 | gfp_mask, &bio); | |
391 | } else { | |
392 | /* No zeroing offload support */ | |
393 | ret = -EOPNOTSUPP; | |
394 | } | |
e73c23ff CK |
395 | if (ret == 0 && bio) { |
396 | ret = submit_bio_wait(bio); | |
397 | bio_put(bio); | |
398 | } | |
399 | blk_finish_plug(&plug); | |
d5ce4c31 ID |
400 | if (ret && try_write_zeroes) { |
401 | if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | |
402 | try_write_zeroes = false; | |
403 | goto retry; | |
404 | } | |
405 | if (!bdev_write_zeroes_sectors(bdev)) { | |
406 | /* | |
407 | * Zeroing offload support was indicated, but the | |
408 | * device reported ILLEGAL REQUEST (for some devices | |
409 | * there is no non-destructive way to verify whether | |
410 | * WRITE ZEROES is actually supported). | |
411 | */ | |
412 | ret = -EOPNOTSUPP; | |
413 | } | |
414 | } | |
579e8f3c | 415 | |
e73c23ff | 416 | return ret; |
579e8f3c | 417 | } |
3f14d792 | 418 | EXPORT_SYMBOL(blkdev_issue_zeroout); |