]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk-lib.c
1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to generic helpers functions
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
13 static struct bio
*next_bio(struct bio
*bio
, unsigned int nr_pages
,
16 struct bio
*new = bio_alloc(gfp
, nr_pages
);
26 int __blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
27 sector_t nr_sects
, gfp_t gfp_mask
, int flags
,
30 struct request_queue
*q
= bdev_get_queue(bdev
);
31 struct bio
*bio
= *biop
;
38 if (flags
& BLKDEV_DISCARD_SECURE
) {
39 if (!blk_queue_secure_erase(q
))
41 op
= REQ_OP_SECURE_ERASE
;
43 if (!blk_queue_discard(q
))
48 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
49 if ((sector
| nr_sects
) & bs_mask
)
53 unsigned int req_sects
= nr_sects
;
56 end_sect
= sector
+ req_sects
;
58 bio
= next_bio(bio
, 0, gfp_mask
);
59 bio
->bi_iter
.bi_sector
= sector
;
60 bio_set_dev(bio
, bdev
);
61 bio_set_op_attrs(bio
, op
, 0);
63 bio
->bi_iter
.bi_size
= req_sects
<< 9;
64 nr_sects
-= req_sects
;
68 * We can loop for a long time in here, if someone does
69 * full device discards (like mkfs). Be nice and allow
70 * us to schedule out to avoid softlocking if preempt
79 EXPORT_SYMBOL(__blkdev_issue_discard
);
82 * blkdev_issue_discard - queue a discard
83 * @bdev: blockdev to issue discard for
84 * @sector: start sector
85 * @nr_sects: number of sectors to discard
86 * @gfp_mask: memory allocation flags (for bio_alloc)
87 * @flags: BLKDEV_DISCARD_* flags to control behaviour
90 * Issue a discard request for the sectors in question.
92 int blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
93 sector_t nr_sects
, gfp_t gfp_mask
, unsigned long flags
)
95 struct bio
*bio
= NULL
;
99 blk_start_plug(&plug
);
100 ret
= __blkdev_issue_discard(bdev
, sector
, nr_sects
, gfp_mask
, flags
,
103 ret
= submit_bio_wait(bio
);
104 if (ret
== -EOPNOTSUPP
)
108 blk_finish_plug(&plug
);
112 EXPORT_SYMBOL(blkdev_issue_discard
);
115 * __blkdev_issue_write_same - generate number of bios with same page
116 * @bdev: target blockdev
117 * @sector: start sector
118 * @nr_sects: number of sectors to write
119 * @gfp_mask: memory allocation flags (for bio_alloc)
120 * @page: page containing data to write
121 * @biop: pointer to anchor bio
124 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
126 static int __blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
127 sector_t nr_sects
, gfp_t gfp_mask
, struct page
*page
,
130 struct request_queue
*q
= bdev_get_queue(bdev
);
131 unsigned int max_write_same_sectors
;
132 struct bio
*bio
= *biop
;
138 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
139 if ((sector
| nr_sects
) & bs_mask
)
142 if (!bdev_write_same(bdev
))
145 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
146 max_write_same_sectors
= UINT_MAX
>> 9;
149 bio
= next_bio(bio
, 1, gfp_mask
);
150 bio
->bi_iter
.bi_sector
= sector
;
151 bio_set_dev(bio
, bdev
);
153 bio
->bi_io_vec
->bv_page
= page
;
154 bio
->bi_io_vec
->bv_offset
= 0;
155 bio
->bi_io_vec
->bv_len
= bdev_logical_block_size(bdev
);
156 bio_set_op_attrs(bio
, REQ_OP_WRITE_SAME
, 0);
158 if (nr_sects
> max_write_same_sectors
) {
159 bio
->bi_iter
.bi_size
= max_write_same_sectors
<< 9;
160 nr_sects
-= max_write_same_sectors
;
161 sector
+= max_write_same_sectors
;
163 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
174 * blkdev_issue_write_same - queue a write same operation
175 * @bdev: target blockdev
176 * @sector: start sector
177 * @nr_sects: number of sectors to write
178 * @gfp_mask: memory allocation flags (for bio_alloc)
179 * @page: page containing data
182 * Issue a write same request for the sectors in question.
184 int blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
185 sector_t nr_sects
, gfp_t gfp_mask
,
188 struct bio
*bio
= NULL
;
189 struct blk_plug plug
;
192 blk_start_plug(&plug
);
193 ret
= __blkdev_issue_write_same(bdev
, sector
, nr_sects
, gfp_mask
, page
,
195 if (ret
== 0 && bio
) {
196 ret
= submit_bio_wait(bio
);
199 blk_finish_plug(&plug
);
202 EXPORT_SYMBOL(blkdev_issue_write_same
);
204 static int __blkdev_issue_write_zeroes(struct block_device
*bdev
,
205 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
206 struct bio
**biop
, unsigned flags
)
208 struct bio
*bio
= *biop
;
209 unsigned int max_write_zeroes_sectors
;
210 struct request_queue
*q
= bdev_get_queue(bdev
);
215 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
216 max_write_zeroes_sectors
= bdev_write_zeroes_sectors(bdev
);
218 if (max_write_zeroes_sectors
== 0)
222 bio
= next_bio(bio
, 0, gfp_mask
);
223 bio
->bi_iter
.bi_sector
= sector
;
224 bio_set_dev(bio
, bdev
);
225 bio
->bi_opf
= REQ_OP_WRITE_ZEROES
;
226 if (flags
& BLKDEV_ZERO_NOUNMAP
)
227 bio
->bi_opf
|= REQ_NOUNMAP
;
229 if (nr_sects
> max_write_zeroes_sectors
) {
230 bio
->bi_iter
.bi_size
= max_write_zeroes_sectors
<< 9;
231 nr_sects
-= max_write_zeroes_sectors
;
232 sector
+= max_write_zeroes_sectors
;
234 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
245 * Convert a number of 512B sectors to a number of pages.
246 * The result is limited to a number of pages that can fit into a BIO.
247 * Also make sure that the result is always at least 1 (page) for the cases
248 * where nr_sects is lower than the number of sectors in a page.
250 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects
)
252 sector_t pages
= DIV_ROUND_UP_SECTOR_T(nr_sects
, PAGE_SIZE
/ 512);
254 return min(pages
, (sector_t
)BIO_MAX_PAGES
);
257 static int __blkdev_issue_zero_pages(struct block_device
*bdev
,
258 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
261 struct request_queue
*q
= bdev_get_queue(bdev
);
262 struct bio
*bio
= *biop
;
269 while (nr_sects
!= 0) {
270 bio
= next_bio(bio
, __blkdev_sectors_to_bio_pages(nr_sects
),
272 bio
->bi_iter
.bi_sector
= sector
;
273 bio_set_dev(bio
, bdev
);
274 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
276 while (nr_sects
!= 0) {
277 sz
= min((sector_t
) PAGE_SIZE
, nr_sects
<< 9);
278 bi_size
= bio_add_page(bio
, ZERO_PAGE(0), sz
, 0);
279 nr_sects
-= bi_size
>> 9;
280 sector
+= bi_size
>> 9;
292 * __blkdev_issue_zeroout - generate number of zero filed write bios
293 * @bdev: blockdev to issue
294 * @sector: start sector
295 * @nr_sects: number of sectors to write
296 * @gfp_mask: memory allocation flags (for bio_alloc)
297 * @biop: pointer to anchor bio
298 * @flags: controls detailed behavior
301 * Zero-fill a block range, either using hardware offload or by explicitly
302 * writing zeroes to the device.
304 * If a device is using logical block provisioning, the underlying space will
305 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
307 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
308 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
310 int __blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
311 sector_t nr_sects
, gfp_t gfp_mask
, struct bio
**biop
,
317 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
318 if ((sector
| nr_sects
) & bs_mask
)
321 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
, gfp_mask
,
323 if (ret
!= -EOPNOTSUPP
|| (flags
& BLKDEV_ZERO_NOFALLBACK
))
326 return __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
, gfp_mask
,
329 EXPORT_SYMBOL(__blkdev_issue_zeroout
);
332 * blkdev_issue_zeroout - zero-fill a block range
333 * @bdev: blockdev to write
334 * @sector: start sector
335 * @nr_sects: number of sectors to write
336 * @gfp_mask: memory allocation flags (for bio_alloc)
337 * @flags: controls detailed behavior
340 * Zero-fill a block range, either using hardware offload or by explicitly
341 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
342 * valid values for %flags.
344 int blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
345 sector_t nr_sects
, gfp_t gfp_mask
, unsigned flags
)
350 struct blk_plug plug
;
351 bool try_write_zeroes
= !!bdev_write_zeroes_sectors(bdev
);
353 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
354 if ((sector
| nr_sects
) & bs_mask
)
359 blk_start_plug(&plug
);
360 if (try_write_zeroes
) {
361 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
,
362 gfp_mask
, &bio
, flags
);
363 } else if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
364 ret
= __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
,
367 /* No zeroing offload support */
370 if (ret
== 0 && bio
) {
371 ret
= submit_bio_wait(bio
);
374 blk_finish_plug(&plug
);
375 if (ret
&& try_write_zeroes
) {
376 if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
377 try_write_zeroes
= false;
380 if (!bdev_write_zeroes_sectors(bdev
)) {
382 * Zeroing offload support was indicated, but the
383 * device reported ILLEGAL REQUEST (for some devices
384 * there is no non-destructive way to verify whether
385 * WRITE ZEROES is actually supported).
393 EXPORT_SYMBOL(blkdev_issue_zeroout
);