]>
Commit | Line | Data |
---|---|---|
f31e7e40 DM |
1 | /* |
2 | * Functions related to generic helpers functions | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
4e49ea4a | 12 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
9082e87b | 13 | gfp_t gfp) |
f31e7e40 | 14 | { |
9082e87b CH |
15 | struct bio *new = bio_alloc(gfp, nr_pages); |
16 | ||
17 | if (bio) { | |
18 | bio_chain(bio, new); | |
4e49ea4a | 19 | submit_bio(bio); |
9082e87b | 20 | } |
5dba3089 | 21 | |
9082e87b | 22 | return new; |
f31e7e40 DM |
23 | } |
24 | ||
38f25255 | 25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
288dab8a | 26 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
469e3216 | 27 | struct bio **biop) |
f31e7e40 | 28 | { |
f31e7e40 | 29 | struct request_queue *q = bdev_get_queue(bdev); |
38f25255 | 30 | struct bio *bio = *biop; |
a22c4d7e | 31 | unsigned int granularity; |
288dab8a | 32 | enum req_op op; |
a22c4d7e | 33 | int alignment; |
f31e7e40 DM |
34 | |
35 | if (!q) | |
36 | return -ENXIO; | |
288dab8a CH |
37 | |
38 | if (flags & BLKDEV_DISCARD_SECURE) { | |
39 | if (!blk_queue_secure_erase(q)) | |
40 | return -EOPNOTSUPP; | |
41 | op = REQ_OP_SECURE_ERASE; | |
42 | } else { | |
43 | if (!blk_queue_discard(q)) | |
44 | return -EOPNOTSUPP; | |
45 | op = REQ_OP_DISCARD; | |
46 | } | |
f31e7e40 | 47 | |
a22c4d7e ML |
48 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
49 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
50 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | |
51 | ||
5dba3089 | 52 | while (nr_sects) { |
c6e66634 | 53 | unsigned int req_sects; |
a22c4d7e | 54 | sector_t end_sect, tmp; |
c6e66634 | 55 | |
a22c4d7e ML |
56 | /* Make sure bi_size doesn't overflow */ |
57 | req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); | |
58 | ||
9082e87b | 59 | /** |
a22c4d7e ML |
60 | * If splitting a request, and the next starting sector would be |
61 | * misaligned, stop the discard at the previous aligned sector. | |
62 | */ | |
c6e66634 | 63 | end_sect = sector + req_sects; |
a22c4d7e ML |
64 | tmp = end_sect; |
65 | if (req_sects < nr_sects && | |
66 | sector_div(tmp, granularity) != alignment) { | |
67 | end_sect = end_sect - alignment; | |
68 | sector_div(end_sect, granularity); | |
69 | end_sect = end_sect * granularity + alignment; | |
70 | req_sects = end_sect - sector; | |
71 | } | |
c6e66634 | 72 | |
4e49ea4a | 73 | bio = next_bio(bio, 1, gfp_mask); |
4f024f37 | 74 | bio->bi_iter.bi_sector = sector; |
f31e7e40 | 75 | bio->bi_bdev = bdev; |
288dab8a | 76 | bio_set_op_attrs(bio, op, 0); |
f31e7e40 | 77 | |
4f024f37 | 78 | bio->bi_iter.bi_size = req_sects << 9; |
c6e66634 PB |
79 | nr_sects -= req_sects; |
80 | sector = end_sect; | |
f31e7e40 | 81 | |
c8123f8c JA |
82 | /* |
83 | * We can loop for a long time in here, if someone does | |
84 | * full device discards (like mkfs). Be nice and allow | |
85 | * us to schedule out to avoid softlocking if preempt | |
86 | * is disabled. | |
87 | */ | |
88 | cond_resched(); | |
5dba3089 | 89 | } |
38f25255 CH |
90 | |
91 | *biop = bio; | |
92 | return 0; | |
93 | } | |
94 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
95 | ||
96 | /** | |
97 | * blkdev_issue_discard - queue a discard | |
98 | * @bdev: blockdev to issue discard for | |
99 | * @sector: start sector | |
100 | * @nr_sects: number of sectors to discard | |
101 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
102 | * @flags: BLKDEV_IFL_* flags to control behaviour | |
103 | * | |
104 | * Description: | |
105 | * Issue a discard request for the sectors in question. | |
106 | */ | |
107 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
108 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
109 | { | |
38f25255 CH |
110 | struct bio *bio = NULL; |
111 | struct blk_plug plug; | |
112 | int ret; | |
113 | ||
38f25255 | 114 | blk_start_plug(&plug); |
288dab8a | 115 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
38f25255 | 116 | &bio); |
bbd848e0 | 117 | if (!ret && bio) { |
4e49ea4a | 118 | ret = submit_bio_wait(bio); |
bbd848e0 MS |
119 | if (ret == -EOPNOTSUPP) |
120 | ret = 0; | |
121 | } | |
0cfbcafc | 122 | blk_finish_plug(&plug); |
f31e7e40 | 123 | |
bbd848e0 | 124 | return ret; |
f31e7e40 DM |
125 | } |
126 | EXPORT_SYMBOL(blkdev_issue_discard); | |
3f14d792 | 127 | |
4363ac7c MP |
128 | /** |
129 | * blkdev_issue_write_same - queue a write same operation | |
130 | * @bdev: target blockdev | |
131 | * @sector: start sector | |
132 | * @nr_sects: number of sectors to write | |
133 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
134 | * @page: page containing data to write | |
135 | * | |
136 | * Description: | |
137 | * Issue a write same request for the sectors in question. | |
138 | */ | |
139 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
140 | sector_t nr_sects, gfp_t gfp_mask, | |
141 | struct page *page) | |
142 | { | |
4363ac7c MP |
143 | struct request_queue *q = bdev_get_queue(bdev); |
144 | unsigned int max_write_same_sectors; | |
9082e87b | 145 | struct bio *bio = NULL; |
4363ac7c MP |
146 | int ret = 0; |
147 | ||
148 | if (!q) | |
149 | return -ENXIO; | |
150 | ||
b49a0871 ML |
151 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
152 | max_write_same_sectors = UINT_MAX >> 9; | |
4363ac7c | 153 | |
4363ac7c | 154 | while (nr_sects) { |
4e49ea4a | 155 | bio = next_bio(bio, 1, gfp_mask); |
4f024f37 | 156 | bio->bi_iter.bi_sector = sector; |
4363ac7c | 157 | bio->bi_bdev = bdev; |
4363ac7c MP |
158 | bio->bi_vcnt = 1; |
159 | bio->bi_io_vec->bv_page = page; | |
160 | bio->bi_io_vec->bv_offset = 0; | |
161 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | |
95fe6c1a | 162 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
4363ac7c MP |
163 | |
164 | if (nr_sects > max_write_same_sectors) { | |
4f024f37 | 165 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
4363ac7c MP |
166 | nr_sects -= max_write_same_sectors; |
167 | sector += max_write_same_sectors; | |
168 | } else { | |
4f024f37 | 169 | bio->bi_iter.bi_size = nr_sects << 9; |
4363ac7c MP |
170 | nr_sects = 0; |
171 | } | |
4363ac7c MP |
172 | } |
173 | ||
9082e87b | 174 | if (bio) |
4e49ea4a | 175 | ret = submit_bio_wait(bio); |
9082e87b | 176 | return ret != -EOPNOTSUPP ? ret : 0; |
4363ac7c MP |
177 | } |
178 | EXPORT_SYMBOL(blkdev_issue_write_same); | |
179 | ||
3f14d792 | 180 | /** |
291d24f6 | 181 | * blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792 DM |
182 | * @bdev: blockdev to issue |
183 | * @sector: start sector | |
184 | * @nr_sects: number of sectors to write | |
185 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
3f14d792 DM |
186 | * |
187 | * Description: | |
188 | * Generate and issue number of bios with zerofiled pages. | |
3f14d792 DM |
189 | */ |
190 | ||
35086784 FF |
191 | static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
192 | sector_t nr_sects, gfp_t gfp_mask) | |
3f14d792 | 193 | { |
18edc8ea | 194 | int ret; |
9082e87b | 195 | struct bio *bio = NULL; |
0aeea189 | 196 | unsigned int sz; |
3f14d792 | 197 | |
3f14d792 | 198 | while (nr_sects != 0) { |
4e49ea4a | 199 | bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), |
9082e87b | 200 | gfp_mask); |
4f024f37 | 201 | bio->bi_iter.bi_sector = sector; |
3f14d792 | 202 | bio->bi_bdev = bdev; |
95fe6c1a | 203 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
3f14d792 | 204 | |
0341aafb JA |
205 | while (nr_sects != 0) { |
206 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); | |
3f14d792 DM |
207 | ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); |
208 | nr_sects -= ret >> 9; | |
209 | sector += ret >> 9; | |
210 | if (ret < (sz << 9)) | |
211 | break; | |
212 | } | |
3f14d792 | 213 | } |
3f14d792 | 214 | |
9082e87b | 215 | if (bio) |
4e49ea4a | 216 | return submit_bio_wait(bio); |
9082e87b | 217 | return 0; |
3f14d792 | 218 | } |
579e8f3c MP |
219 | |
220 | /** | |
221 | * blkdev_issue_zeroout - zero-fill a block range | |
222 | * @bdev: blockdev to write | |
223 | * @sector: start sector | |
224 | * @nr_sects: number of sectors to write | |
225 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
d93ba7a5 | 226 | * @discard: whether to discard the block range |
579e8f3c MP |
227 | * |
228 | * Description: | |
d93ba7a5 MP |
229 | * Zero-fill a block range. If the discard flag is set and the block |
230 | * device guarantees that subsequent READ operations to the block range | |
231 | * in question will return zeroes, the blocks will be discarded. Should | |
232 | * the discard request fail, if the discard flag is not set, or if | |
233 | * discard_zeroes_data is not supported, this function will resort to | |
234 | * zeroing the blocks manually, thus provisioning (allocating, | |
235 | * anchoring) them. If the block device supports the WRITE SAME command | |
236 | * blkdev_issue_zeroout() will use it to optimize the process of | |
237 | * clearing the block range. Otherwise the zeroing will be performed | |
238 | * using regular WRITE calls. | |
579e8f3c MP |
239 | */ |
240 | ||
241 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |
d93ba7a5 | 242 | sector_t nr_sects, gfp_t gfp_mask, bool discard) |
579e8f3c | 243 | { |
d93ba7a5 | 244 | struct request_queue *q = bdev_get_queue(bdev); |
d93ba7a5 | 245 | |
9f9ee1f2 MP |
246 | if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data && |
247 | blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0) | |
248 | return 0; | |
d93ba7a5 | 249 | |
9f9ee1f2 MP |
250 | if (bdev_write_same(bdev) && |
251 | blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, | |
252 | ZERO_PAGE(0)) == 0) | |
253 | return 0; | |
579e8f3c MP |
254 | |
255 | return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); | |
256 | } | |
3f14d792 | 257 | EXPORT_SYMBOL(blkdev_issue_zeroout); |