1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to generic helpers functions
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
13 struct bio
*blk_next_bio(struct bio
*bio
, unsigned int nr_pages
, gfp_t gfp
)
15 struct bio
*new = bio_alloc(gfp
, nr_pages
);
24 EXPORT_SYMBOL_GPL(blk_next_bio
);
26 int __blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
27 sector_t nr_sects
, gfp_t gfp_mask
, int flags
,
30 struct request_queue
*q
= bdev_get_queue(bdev
);
31 struct bio
*bio
= *biop
;
33 sector_t bs_mask
, part_offset
= 0;
38 if (bdev_read_only(bdev
))
41 if (flags
& BLKDEV_DISCARD_SECURE
) {
42 if (!blk_queue_secure_erase(q
))
44 op
= REQ_OP_SECURE_ERASE
;
46 if (!blk_queue_discard(q
))
51 /* In case the discard granularity isn't set by buggy device driver */
52 if (WARN_ON_ONCE(!q
->limits
.discard_granularity
)) {
53 char dev_name
[BDEVNAME_SIZE
];
55 bdevname(bdev
, dev_name
);
56 pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name
);
60 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
61 if ((sector
| nr_sects
) & bs_mask
)
67 /* In case the discard request is in a partition */
68 if (bdev_is_partition(bdev
))
69 part_offset
= bdev
->bd_start_sect
;
72 sector_t granularity_aligned_lba
, req_sects
;
73 sector_t sector_mapped
= sector
+ part_offset
;
75 granularity_aligned_lba
= round_up(sector_mapped
,
76 q
->limits
.discard_granularity
>> SECTOR_SHIFT
);
79 * Check whether the discard bio starts at a discard_granularity
81 * - If no: set (granularity_aligned_lba - sector_mapped) to
82 * bi_size of the first split bio, then the second bio will
83 * start at a discard_granularity aligned LBA on the device.
84 * - If yes: use bio_aligned_discard_max_sectors() as the max
85 * possible bi_size of the first split bio. Then when this bio
86 * is split in device drive, the split ones are very probably
87 * to be aligned to discard_granularity of the device's queue.
89 if (granularity_aligned_lba
== sector_mapped
)
90 req_sects
= min_t(sector_t
, nr_sects
,
91 bio_aligned_discard_max_sectors(q
));
93 req_sects
= min_t(sector_t
, nr_sects
,
94 granularity_aligned_lba
- sector_mapped
);
96 WARN_ON_ONCE((req_sects
<< 9) > UINT_MAX
);
98 bio
= blk_next_bio(bio
, 0, gfp_mask
);
99 bio
->bi_iter
.bi_sector
= sector
;
100 bio_set_dev(bio
, bdev
);
101 bio_set_op_attrs(bio
, op
, 0);
103 bio
->bi_iter
.bi_size
= req_sects
<< 9;
105 nr_sects
-= req_sects
;
108 * We can loop for a long time in here, if someone does
109 * full device discards (like mkfs). Be nice and allow
110 * us to schedule out to avoid softlocking if preempt
119 EXPORT_SYMBOL(__blkdev_issue_discard
);
122 * blkdev_issue_discard - queue a discard
123 * @bdev: blockdev to issue discard for
124 * @sector: start sector
125 * @nr_sects: number of sectors to discard
126 * @gfp_mask: memory allocation flags (for bio_alloc)
127 * @flags: BLKDEV_DISCARD_* flags to control behaviour
130 * Issue a discard request for the sectors in question.
132 int blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
133 sector_t nr_sects
, gfp_t gfp_mask
, unsigned long flags
)
135 struct bio
*bio
= NULL
;
136 struct blk_plug plug
;
139 blk_start_plug(&plug
);
140 ret
= __blkdev_issue_discard(bdev
, sector
, nr_sects
, gfp_mask
, flags
,
143 ret
= submit_bio_wait(bio
);
144 if (ret
== -EOPNOTSUPP
)
148 blk_finish_plug(&plug
);
152 EXPORT_SYMBOL(blkdev_issue_discard
);
155 * __blkdev_issue_write_same - generate number of bios with same page
156 * @bdev: target blockdev
157 * @sector: start sector
158 * @nr_sects: number of sectors to write
159 * @gfp_mask: memory allocation flags (for bio_alloc)
160 * @page: page containing data to write
161 * @biop: pointer to anchor bio
164 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
166 static int __blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
167 sector_t nr_sects
, gfp_t gfp_mask
, struct page
*page
,
170 struct request_queue
*q
= bdev_get_queue(bdev
);
171 unsigned int max_write_same_sectors
;
172 struct bio
*bio
= *biop
;
178 if (bdev_read_only(bdev
))
181 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
182 if ((sector
| nr_sects
) & bs_mask
)
185 if (!bdev_write_same(bdev
))
188 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
189 max_write_same_sectors
= bio_allowed_max_sectors(q
);
192 bio
= blk_next_bio(bio
, 1, gfp_mask
);
193 bio
->bi_iter
.bi_sector
= sector
;
194 bio_set_dev(bio
, bdev
);
196 bio
->bi_io_vec
->bv_page
= page
;
197 bio
->bi_io_vec
->bv_offset
= 0;
198 bio
->bi_io_vec
->bv_len
= bdev_logical_block_size(bdev
);
199 bio_set_op_attrs(bio
, REQ_OP_WRITE_SAME
, 0);
201 if (nr_sects
> max_write_same_sectors
) {
202 bio
->bi_iter
.bi_size
= max_write_same_sectors
<< 9;
203 nr_sects
-= max_write_same_sectors
;
204 sector
+= max_write_same_sectors
;
206 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
217 * blkdev_issue_write_same - queue a write same operation
218 * @bdev: target blockdev
219 * @sector: start sector
220 * @nr_sects: number of sectors to write
221 * @gfp_mask: memory allocation flags (for bio_alloc)
222 * @page: page containing data
225 * Issue a write same request for the sectors in question.
227 int blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
228 sector_t nr_sects
, gfp_t gfp_mask
,
231 struct bio
*bio
= NULL
;
232 struct blk_plug plug
;
235 blk_start_plug(&plug
);
236 ret
= __blkdev_issue_write_same(bdev
, sector
, nr_sects
, gfp_mask
, page
,
238 if (ret
== 0 && bio
) {
239 ret
= submit_bio_wait(bio
);
242 blk_finish_plug(&plug
);
245 EXPORT_SYMBOL(blkdev_issue_write_same
);
247 static int __blkdev_issue_write_zeroes(struct block_device
*bdev
,
248 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
249 struct bio
**biop
, unsigned flags
)
251 struct bio
*bio
= *biop
;
252 unsigned int max_write_zeroes_sectors
;
253 struct request_queue
*q
= bdev_get_queue(bdev
);
258 if (bdev_read_only(bdev
))
261 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
262 max_write_zeroes_sectors
= bdev_write_zeroes_sectors(bdev
);
264 if (max_write_zeroes_sectors
== 0)
268 bio
= blk_next_bio(bio
, 0, gfp_mask
);
269 bio
->bi_iter
.bi_sector
= sector
;
270 bio_set_dev(bio
, bdev
);
271 bio
->bi_opf
= REQ_OP_WRITE_ZEROES
;
272 if (flags
& BLKDEV_ZERO_NOUNMAP
)
273 bio
->bi_opf
|= REQ_NOUNMAP
;
275 if (nr_sects
> max_write_zeroes_sectors
) {
276 bio
->bi_iter
.bi_size
= max_write_zeroes_sectors
<< 9;
277 nr_sects
-= max_write_zeroes_sectors
;
278 sector
+= max_write_zeroes_sectors
;
280 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
291 * Convert a number of 512B sectors to a number of pages.
292 * The result is limited to a number of pages that can fit into a BIO.
293 * Also make sure that the result is always at least 1 (page) for the cases
294 * where nr_sects is lower than the number of sectors in a page.
296 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects
)
298 sector_t pages
= DIV_ROUND_UP_SECTOR_T(nr_sects
, PAGE_SIZE
/ 512);
300 return min(pages
, (sector_t
)BIO_MAX_VECS
);
303 static int __blkdev_issue_zero_pages(struct block_device
*bdev
,
304 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
307 struct request_queue
*q
= bdev_get_queue(bdev
);
308 struct bio
*bio
= *biop
;
315 if (bdev_read_only(bdev
))
318 while (nr_sects
!= 0) {
319 bio
= blk_next_bio(bio
, __blkdev_sectors_to_bio_pages(nr_sects
),
321 bio
->bi_iter
.bi_sector
= sector
;
322 bio_set_dev(bio
, bdev
);
323 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
325 while (nr_sects
!= 0) {
326 sz
= min((sector_t
) PAGE_SIZE
, nr_sects
<< 9);
327 bi_size
= bio_add_page(bio
, ZERO_PAGE(0), sz
, 0);
328 nr_sects
-= bi_size
>> 9;
329 sector
+= bi_size
>> 9;
341 * __blkdev_issue_zeroout - generate number of zero filed write bios
342 * @bdev: blockdev to issue
343 * @sector: start sector
344 * @nr_sects: number of sectors to write
345 * @gfp_mask: memory allocation flags (for bio_alloc)
346 * @biop: pointer to anchor bio
347 * @flags: controls detailed behavior
350 * Zero-fill a block range, either using hardware offload or by explicitly
351 * writing zeroes to the device.
353 * If a device is using logical block provisioning, the underlying space will
354 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
356 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
357 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
359 int __blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
360 sector_t nr_sects
, gfp_t gfp_mask
, struct bio
**biop
,
366 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
367 if ((sector
| nr_sects
) & bs_mask
)
370 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
, gfp_mask
,
372 if (ret
!= -EOPNOTSUPP
|| (flags
& BLKDEV_ZERO_NOFALLBACK
))
375 return __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
, gfp_mask
,
378 EXPORT_SYMBOL(__blkdev_issue_zeroout
);
381 * blkdev_issue_zeroout - zero-fill a block range
382 * @bdev: blockdev to write
383 * @sector: start sector
384 * @nr_sects: number of sectors to write
385 * @gfp_mask: memory allocation flags (for bio_alloc)
386 * @flags: controls detailed behavior
389 * Zero-fill a block range, either using hardware offload or by explicitly
390 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
391 * valid values for %flags.
393 int blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
394 sector_t nr_sects
, gfp_t gfp_mask
, unsigned flags
)
399 struct blk_plug plug
;
400 bool try_write_zeroes
= !!bdev_write_zeroes_sectors(bdev
);
402 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
403 if ((sector
| nr_sects
) & bs_mask
)
408 blk_start_plug(&plug
);
409 if (try_write_zeroes
) {
410 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
,
411 gfp_mask
, &bio
, flags
);
412 } else if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
413 ret
= __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
,
416 /* No zeroing offload support */
419 if (ret
== 0 && bio
) {
420 ret
= submit_bio_wait(bio
);
423 blk_finish_plug(&plug
);
424 if (ret
&& try_write_zeroes
) {
425 if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
426 try_write_zeroes
= false;
429 if (!bdev_write_zeroes_sectors(bdev
)) {
431 * Zeroing offload support was indicated, but the
432 * device reported ILLEGAL REQUEST (for some devices
433 * there is no non-destructive way to verify whether
434 * WRITE ZEROES is actually supported).
442 EXPORT_SYMBOL(blkdev_issue_zeroout
);