1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to generic helpers functions
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
13 struct bio
*blk_next_bio(struct bio
*bio
, unsigned int nr_pages
, gfp_t gfp
)
15 struct bio
*new = bio_alloc(gfp
, nr_pages
);
25 int __blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
26 sector_t nr_sects
, gfp_t gfp_mask
, int flags
,
29 struct request_queue
*q
= bdev_get_queue(bdev
);
30 struct bio
*bio
= *biop
;
32 sector_t bs_mask
, part_offset
= 0;
37 if (bdev_read_only(bdev
))
40 if (flags
& BLKDEV_DISCARD_SECURE
) {
41 if (!blk_queue_secure_erase(q
))
43 op
= REQ_OP_SECURE_ERASE
;
45 if (!blk_queue_discard(q
))
50 /* In case the discard granularity isn't set by buggy device driver */
51 if (WARN_ON_ONCE(!q
->limits
.discard_granularity
)) {
52 char dev_name
[BDEVNAME_SIZE
];
54 bdevname(bdev
, dev_name
);
55 pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name
);
59 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
60 if ((sector
| nr_sects
) & bs_mask
)
66 /* In case the discard request is in a partition */
67 if (bdev_is_partition(bdev
))
68 part_offset
= bdev
->bd_part
->start_sect
;
71 sector_t granularity_aligned_lba
, req_sects
;
72 sector_t sector_mapped
= sector
+ part_offset
;
74 granularity_aligned_lba
= round_up(sector_mapped
,
75 q
->limits
.discard_granularity
>> SECTOR_SHIFT
);
78 * Check whether the discard bio starts at a discard_granularity
80 * - If no: set (granularity_aligned_lba - sector_mapped) to
81 * bi_size of the first split bio, then the second bio will
82 * start at a discard_granularity aligned LBA on the device.
83 * - If yes: use bio_aligned_discard_max_sectors() as the max
84 * possible bi_size of the first split bio. Then when this bio
85 * is split in device drive, the split ones are very probably
86 * to be aligned to discard_granularity of the device's queue.
88 if (granularity_aligned_lba
== sector_mapped
)
89 req_sects
= min_t(sector_t
, nr_sects
,
90 bio_aligned_discard_max_sectors(q
));
92 req_sects
= min_t(sector_t
, nr_sects
,
93 granularity_aligned_lba
- sector_mapped
);
95 WARN_ON_ONCE((req_sects
<< 9) > UINT_MAX
);
97 bio
= blk_next_bio(bio
, 0, gfp_mask
);
98 bio
->bi_iter
.bi_sector
= sector
;
99 bio_set_dev(bio
, bdev
);
100 bio_set_op_attrs(bio
, op
, 0);
102 bio
->bi_iter
.bi_size
= req_sects
<< 9;
104 nr_sects
-= req_sects
;
107 * We can loop for a long time in here, if someone does
108 * full device discards (like mkfs). Be nice and allow
109 * us to schedule out to avoid softlocking if preempt
118 EXPORT_SYMBOL(__blkdev_issue_discard
);
121 * blkdev_issue_discard - queue a discard
122 * @bdev: blockdev to issue discard for
123 * @sector: start sector
124 * @nr_sects: number of sectors to discard
125 * @gfp_mask: memory allocation flags (for bio_alloc)
126 * @flags: BLKDEV_DISCARD_* flags to control behaviour
129 * Issue a discard request for the sectors in question.
131 int blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
132 sector_t nr_sects
, gfp_t gfp_mask
, unsigned long flags
)
134 struct bio
*bio
= NULL
;
135 struct blk_plug plug
;
138 blk_start_plug(&plug
);
139 ret
= __blkdev_issue_discard(bdev
, sector
, nr_sects
, gfp_mask
, flags
,
142 ret
= submit_bio_wait(bio
);
143 if (ret
== -EOPNOTSUPP
)
147 blk_finish_plug(&plug
);
151 EXPORT_SYMBOL(blkdev_issue_discard
);
154 * __blkdev_issue_write_same - generate number of bios with same page
155 * @bdev: target blockdev
156 * @sector: start sector
157 * @nr_sects: number of sectors to write
158 * @gfp_mask: memory allocation flags (for bio_alloc)
159 * @page: page containing data to write
160 * @biop: pointer to anchor bio
163 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
165 static int __blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
166 sector_t nr_sects
, gfp_t gfp_mask
, struct page
*page
,
169 struct request_queue
*q
= bdev_get_queue(bdev
);
170 unsigned int max_write_same_sectors
;
171 struct bio
*bio
= *biop
;
177 if (bdev_read_only(bdev
))
180 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
181 if ((sector
| nr_sects
) & bs_mask
)
184 if (!bdev_write_same(bdev
))
187 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
188 max_write_same_sectors
= bio_allowed_max_sectors(q
);
191 bio
= blk_next_bio(bio
, 1, gfp_mask
);
192 bio
->bi_iter
.bi_sector
= sector
;
193 bio_set_dev(bio
, bdev
);
195 bio
->bi_io_vec
->bv_page
= page
;
196 bio
->bi_io_vec
->bv_offset
= 0;
197 bio
->bi_io_vec
->bv_len
= bdev_logical_block_size(bdev
);
198 bio_set_op_attrs(bio
, REQ_OP_WRITE_SAME
, 0);
200 if (nr_sects
> max_write_same_sectors
) {
201 bio
->bi_iter
.bi_size
= max_write_same_sectors
<< 9;
202 nr_sects
-= max_write_same_sectors
;
203 sector
+= max_write_same_sectors
;
205 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
216 * blkdev_issue_write_same - queue a write same operation
217 * @bdev: target blockdev
218 * @sector: start sector
219 * @nr_sects: number of sectors to write
220 * @gfp_mask: memory allocation flags (for bio_alloc)
221 * @page: page containing data
224 * Issue a write same request for the sectors in question.
226 int blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
227 sector_t nr_sects
, gfp_t gfp_mask
,
230 struct bio
*bio
= NULL
;
231 struct blk_plug plug
;
234 blk_start_plug(&plug
);
235 ret
= __blkdev_issue_write_same(bdev
, sector
, nr_sects
, gfp_mask
, page
,
237 if (ret
== 0 && bio
) {
238 ret
= submit_bio_wait(bio
);
241 blk_finish_plug(&plug
);
244 EXPORT_SYMBOL(blkdev_issue_write_same
);
246 static int __blkdev_issue_write_zeroes(struct block_device
*bdev
,
247 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
248 struct bio
**biop
, unsigned flags
)
250 struct bio
*bio
= *biop
;
251 unsigned int max_write_zeroes_sectors
;
252 struct request_queue
*q
= bdev_get_queue(bdev
);
257 if (bdev_read_only(bdev
))
260 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
261 max_write_zeroes_sectors
= bdev_write_zeroes_sectors(bdev
);
263 if (max_write_zeroes_sectors
== 0)
267 bio
= blk_next_bio(bio
, 0, gfp_mask
);
268 bio
->bi_iter
.bi_sector
= sector
;
269 bio_set_dev(bio
, bdev
);
270 bio
->bi_opf
= REQ_OP_WRITE_ZEROES
;
271 if (flags
& BLKDEV_ZERO_NOUNMAP
)
272 bio
->bi_opf
|= REQ_NOUNMAP
;
274 if (nr_sects
> max_write_zeroes_sectors
) {
275 bio
->bi_iter
.bi_size
= max_write_zeroes_sectors
<< 9;
276 nr_sects
-= max_write_zeroes_sectors
;
277 sector
+= max_write_zeroes_sectors
;
279 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
290 * Convert a number of 512B sectors to a number of pages.
291 * The result is limited to a number of pages that can fit into a BIO.
292 * Also make sure that the result is always at least 1 (page) for the cases
293 * where nr_sects is lower than the number of sectors in a page.
295 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects
)
297 sector_t pages
= DIV_ROUND_UP_SECTOR_T(nr_sects
, PAGE_SIZE
/ 512);
299 return min(pages
, (sector_t
)BIO_MAX_PAGES
);
302 static int __blkdev_issue_zero_pages(struct block_device
*bdev
,
303 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
306 struct request_queue
*q
= bdev_get_queue(bdev
);
307 struct bio
*bio
= *biop
;
314 if (bdev_read_only(bdev
))
317 while (nr_sects
!= 0) {
318 bio
= blk_next_bio(bio
, __blkdev_sectors_to_bio_pages(nr_sects
),
320 bio
->bi_iter
.bi_sector
= sector
;
321 bio_set_dev(bio
, bdev
);
322 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
324 while (nr_sects
!= 0) {
325 sz
= min((sector_t
) PAGE_SIZE
, nr_sects
<< 9);
326 bi_size
= bio_add_page(bio
, ZERO_PAGE(0), sz
, 0);
327 nr_sects
-= bi_size
>> 9;
328 sector
+= bi_size
>> 9;
340 * __blkdev_issue_zeroout - generate number of zero filed write bios
341 * @bdev: blockdev to issue
342 * @sector: start sector
343 * @nr_sects: number of sectors to write
344 * @gfp_mask: memory allocation flags (for bio_alloc)
345 * @biop: pointer to anchor bio
346 * @flags: controls detailed behavior
349 * Zero-fill a block range, either using hardware offload or by explicitly
350 * writing zeroes to the device.
352 * If a device is using logical block provisioning, the underlying space will
353 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
355 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
356 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
358 int __blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
359 sector_t nr_sects
, gfp_t gfp_mask
, struct bio
**biop
,
365 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
366 if ((sector
| nr_sects
) & bs_mask
)
369 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
, gfp_mask
,
371 if (ret
!= -EOPNOTSUPP
|| (flags
& BLKDEV_ZERO_NOFALLBACK
))
374 return __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
, gfp_mask
,
377 EXPORT_SYMBOL(__blkdev_issue_zeroout
);
380 * blkdev_issue_zeroout - zero-fill a block range
381 * @bdev: blockdev to write
382 * @sector: start sector
383 * @nr_sects: number of sectors to write
384 * @gfp_mask: memory allocation flags (for bio_alloc)
385 * @flags: controls detailed behavior
388 * Zero-fill a block range, either using hardware offload or by explicitly
389 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
390 * valid values for %flags.
392 int blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
393 sector_t nr_sects
, gfp_t gfp_mask
, unsigned flags
)
398 struct blk_plug plug
;
399 bool try_write_zeroes
= !!bdev_write_zeroes_sectors(bdev
);
401 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
402 if ((sector
| nr_sects
) & bs_mask
)
407 blk_start_plug(&plug
);
408 if (try_write_zeroes
) {
409 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
,
410 gfp_mask
, &bio
, flags
);
411 } else if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
412 ret
= __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
,
415 /* No zeroing offload support */
418 if (ret
== 0 && bio
) {
419 ret
= submit_bio_wait(bio
);
422 blk_finish_plug(&plug
);
423 if (ret
&& try_write_zeroes
) {
424 if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
425 try_write_zeroes
= false;
428 if (!bdev_write_zeroes_sectors(bdev
)) {
430 * Zeroing offload support was indicated, but the
431 * device reported ILLEGAL REQUEST (for some devices
432 * there is no non-destructive way to verify whether
433 * WRITE ZEROES is actually supported).
441 EXPORT_SYMBOL(blkdev_issue_zeroout
);