]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Functions related to generic helpers functions | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
12 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, | |
13 | gfp_t gfp) | |
14 | { | |
15 | struct bio *new = bio_alloc(gfp, nr_pages); | |
16 | ||
17 | if (bio) { | |
18 | bio_chain(bio, new); | |
19 | submit_bio(bio); | |
20 | } | |
21 | ||
22 | return new; | |
23 | } | |
24 | ||
25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
26 | sector_t nr_sects, gfp_t gfp_mask, int flags, | |
27 | struct bio **biop) | |
28 | { | |
29 | struct request_queue *q = bdev_get_queue(bdev); | |
30 | struct bio *bio = *biop; | |
31 | unsigned int granularity; | |
32 | unsigned int op; | |
33 | int alignment; | |
34 | sector_t bs_mask; | |
35 | ||
36 | if (!q) | |
37 | return -ENXIO; | |
38 | ||
39 | if (flags & BLKDEV_DISCARD_SECURE) { | |
40 | if (flags & BLKDEV_DISCARD_ZERO) | |
41 | return -EOPNOTSUPP; | |
42 | if (!blk_queue_secure_erase(q)) | |
43 | return -EOPNOTSUPP; | |
44 | op = REQ_OP_SECURE_ERASE; | |
45 | } else { | |
46 | if (!blk_queue_discard(q)) | |
47 | return -EOPNOTSUPP; | |
48 | if ((flags & BLKDEV_DISCARD_ZERO) && | |
49 | !q->limits.discard_zeroes_data) | |
50 | return -EOPNOTSUPP; | |
51 | op = REQ_OP_DISCARD; | |
52 | } | |
53 | ||
54 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | |
55 | if ((sector | nr_sects) & bs_mask) | |
56 | return -EINVAL; | |
57 | ||
58 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | |
59 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
60 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | |
61 | ||
62 | while (nr_sects) { | |
63 | unsigned int req_sects; | |
64 | sector_t end_sect, tmp; | |
65 | ||
66 | /* Make sure bi_size doesn't overflow */ | |
67 | req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); | |
68 | ||
69 | /** | |
70 | * If splitting a request, and the next starting sector would be | |
71 | * misaligned, stop the discard at the previous aligned sector. | |
72 | */ | |
73 | end_sect = sector + req_sects; | |
74 | tmp = end_sect; | |
75 | if (req_sects < nr_sects && | |
76 | sector_div(tmp, granularity) != alignment) { | |
77 | end_sect = end_sect - alignment; | |
78 | sector_div(end_sect, granularity); | |
79 | end_sect = end_sect * granularity + alignment; | |
80 | req_sects = end_sect - sector; | |
81 | } | |
82 | ||
83 | bio = next_bio(bio, 0, gfp_mask); | |
84 | bio->bi_iter.bi_sector = sector; | |
85 | bio->bi_bdev = bdev; | |
86 | bio_set_op_attrs(bio, op, 0); | |
87 | ||
88 | bio->bi_iter.bi_size = req_sects << 9; | |
89 | nr_sects -= req_sects; | |
90 | sector = end_sect; | |
91 | ||
92 | /* | |
93 | * We can loop for a long time in here, if someone does | |
94 | * full device discards (like mkfs). Be nice and allow | |
95 | * us to schedule out to avoid softlocking if preempt | |
96 | * is disabled. | |
97 | */ | |
98 | cond_resched(); | |
99 | } | |
100 | ||
101 | *biop = bio; | |
102 | return 0; | |
103 | } | |
104 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
105 | ||
106 | /** | |
107 | * blkdev_issue_discard - queue a discard | |
108 | * @bdev: blockdev to issue discard for | |
109 | * @sector: start sector | |
110 | * @nr_sects: number of sectors to discard | |
111 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
112 | * @flags: BLKDEV_DISCARD_* flags to control behaviour | |
113 | * | |
114 | * Description: | |
115 | * Issue a discard request for the sectors in question. | |
116 | */ | |
117 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
118 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
119 | { | |
120 | struct bio *bio = NULL; | |
121 | struct blk_plug plug; | |
122 | int ret; | |
123 | ||
124 | blk_start_plug(&plug); | |
125 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, | |
126 | &bio); | |
127 | if (!ret && bio) { | |
128 | ret = submit_bio_wait(bio); | |
129 | if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO)) | |
130 | ret = 0; | |
131 | bio_put(bio); | |
132 | } | |
133 | blk_finish_plug(&plug); | |
134 | ||
135 | return ret; | |
136 | } | |
137 | EXPORT_SYMBOL(blkdev_issue_discard); | |
138 | ||
139 | /** | |
140 | * __blkdev_issue_write_same - generate number of bios with same page | |
141 | * @bdev: target blockdev | |
142 | * @sector: start sector | |
143 | * @nr_sects: number of sectors to write | |
144 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
145 | * @page: page containing data to write | |
146 | * @biop: pointer to anchor bio | |
147 | * | |
148 | * Description: | |
149 | * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. | |
150 | */ | |
151 | static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
152 | sector_t nr_sects, gfp_t gfp_mask, struct page *page, | |
153 | struct bio **biop) | |
154 | { | |
155 | struct request_queue *q = bdev_get_queue(bdev); | |
156 | unsigned int max_write_same_sectors; | |
157 | struct bio *bio = *biop; | |
158 | sector_t bs_mask; | |
159 | ||
160 | if (!q) | |
161 | return -ENXIO; | |
162 | ||
163 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | |
164 | if ((sector | nr_sects) & bs_mask) | |
165 | return -EINVAL; | |
166 | ||
167 | if (!bdev_write_same(bdev)) | |
168 | return -EOPNOTSUPP; | |
169 | ||
170 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ | |
171 | max_write_same_sectors = UINT_MAX >> 9; | |
172 | ||
173 | while (nr_sects) { | |
174 | bio = next_bio(bio, 1, gfp_mask); | |
175 | bio->bi_iter.bi_sector = sector; | |
176 | bio->bi_bdev = bdev; | |
177 | bio->bi_vcnt = 1; | |
178 | bio->bi_io_vec->bv_page = page; | |
179 | bio->bi_io_vec->bv_offset = 0; | |
180 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | |
181 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); | |
182 | ||
183 | if (nr_sects > max_write_same_sectors) { | |
184 | bio->bi_iter.bi_size = max_write_same_sectors << 9; | |
185 | nr_sects -= max_write_same_sectors; | |
186 | sector += max_write_same_sectors; | |
187 | } else { | |
188 | bio->bi_iter.bi_size = nr_sects << 9; | |
189 | nr_sects = 0; | |
190 | } | |
191 | cond_resched(); | |
192 | } | |
193 | ||
194 | *biop = bio; | |
195 | return 0; | |
196 | } | |
197 | ||
198 | /** | |
199 | * blkdev_issue_write_same - queue a write same operation | |
200 | * @bdev: target blockdev | |
201 | * @sector: start sector | |
202 | * @nr_sects: number of sectors to write | |
203 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
204 | * @page: page containing data | |
205 | * | |
206 | * Description: | |
207 | * Issue a write same request for the sectors in question. | |
208 | */ | |
209 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
210 | sector_t nr_sects, gfp_t gfp_mask, | |
211 | struct page *page) | |
212 | { | |
213 | struct bio *bio = NULL; | |
214 | struct blk_plug plug; | |
215 | int ret; | |
216 | ||
217 | blk_start_plug(&plug); | |
218 | ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, | |
219 | &bio); | |
220 | if (ret == 0 && bio) { | |
221 | ret = submit_bio_wait(bio); | |
222 | bio_put(bio); | |
223 | } | |
224 | blk_finish_plug(&plug); | |
225 | return ret; | |
226 | } | |
227 | EXPORT_SYMBOL(blkdev_issue_write_same); | |
228 | ||
229 | /** | |
230 | * __blkdev_issue_write_zeroes - generate number of bios with WRITE ZEROES | |
231 | * @bdev: blockdev to issue | |
232 | * @sector: start sector | |
233 | * @nr_sects: number of sectors to write | |
234 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
235 | * @biop: pointer to anchor bio | |
236 | * | |
237 | * Description: | |
238 | * Generate and issue number of bios(REQ_OP_WRITE_ZEROES) with zerofiled pages. | |
239 | */ | |
240 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, | |
241 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
242 | struct bio **biop) | |
243 | { | |
244 | struct bio *bio = *biop; | |
245 | unsigned int max_write_zeroes_sectors; | |
246 | struct request_queue *q = bdev_get_queue(bdev); | |
247 | ||
248 | if (!q) | |
249 | return -ENXIO; | |
250 | ||
251 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ | |
252 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); | |
253 | ||
254 | if (max_write_zeroes_sectors == 0) | |
255 | return -EOPNOTSUPP; | |
256 | ||
257 | while (nr_sects) { | |
258 | bio = next_bio(bio, 0, gfp_mask); | |
259 | bio->bi_iter.bi_sector = sector; | |
260 | bio->bi_bdev = bdev; | |
261 | bio_set_op_attrs(bio, REQ_OP_WRITE_ZEROES, 0); | |
262 | ||
263 | if (nr_sects > max_write_zeroes_sectors) { | |
264 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; | |
265 | nr_sects -= max_write_zeroes_sectors; | |
266 | sector += max_write_zeroes_sectors; | |
267 | } else { | |
268 | bio->bi_iter.bi_size = nr_sects << 9; | |
269 | nr_sects = 0; | |
270 | } | |
271 | cond_resched(); | |
272 | } | |
273 | ||
274 | *biop = bio; | |
275 | return 0; | |
276 | } | |
277 | ||
278 | /** | |
279 | * __blkdev_issue_zeroout - generate number of zero filed write bios | |
280 | * @bdev: blockdev to issue | |
281 | * @sector: start sector | |
282 | * @nr_sects: number of sectors to write | |
283 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
284 | * @biop: pointer to anchor bio | |
285 | * @discard: discard flag | |
286 | * | |
287 | * Description: | |
288 | * Generate and issue number of bios with zerofiled pages. | |
289 | */ | |
290 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |
291 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, | |
292 | bool discard) | |
293 | { | |
294 | int ret; | |
295 | int bi_size = 0; | |
296 | struct bio *bio = *biop; | |
297 | unsigned int sz; | |
298 | sector_t bs_mask; | |
299 | ||
300 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | |
301 | if ((sector | nr_sects) & bs_mask) | |
302 | return -EINVAL; | |
303 | ||
304 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, | |
305 | biop); | |
306 | if (ret == 0 || (ret && ret != -EOPNOTSUPP)) | |
307 | goto out; | |
308 | ||
309 | ret = 0; | |
310 | while (nr_sects != 0) { | |
311 | bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), | |
312 | gfp_mask); | |
313 | bio->bi_iter.bi_sector = sector; | |
314 | bio->bi_bdev = bdev; | |
315 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | |
316 | ||
317 | while (nr_sects != 0) { | |
318 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); | |
319 | bi_size = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); | |
320 | nr_sects -= bi_size >> 9; | |
321 | sector += bi_size >> 9; | |
322 | if (bi_size < (sz << 9)) | |
323 | break; | |
324 | } | |
325 | cond_resched(); | |
326 | } | |
327 | ||
328 | *biop = bio; | |
329 | out: | |
330 | return ret; | |
331 | } | |
332 | EXPORT_SYMBOL(__blkdev_issue_zeroout); | |
333 | ||
334 | /** | |
335 | * blkdev_issue_zeroout - zero-fill a block range | |
336 | * @bdev: blockdev to write | |
337 | * @sector: start sector | |
338 | * @nr_sects: number of sectors to write | |
339 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
340 | * @discard: whether to discard the block range | |
341 | * | |
342 | * Description: | |
343 | * Zero-fill a block range. If the discard flag is set and the block | |
344 | * device guarantees that subsequent READ operations to the block range | |
345 | * in question will return zeroes, the blocks will be discarded. Should | |
346 | * the discard request fail, if the discard flag is not set, or if | |
347 | * discard_zeroes_data is not supported, this function will resort to | |
348 | * zeroing the blocks manually, thus provisioning (allocating, | |
349 | * anchoring) them. If the block device supports WRITE ZEROES or WRITE SAME | |
350 | * command(s), blkdev_issue_zeroout() will use it to optimize the process of | |
351 | * clearing the block range. Otherwise the zeroing will be performed | |
352 | * using regular WRITE calls. | |
353 | */ | |
354 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |
355 | sector_t nr_sects, gfp_t gfp_mask, bool discard) | |
356 | { | |
357 | int ret; | |
358 | struct bio *bio = NULL; | |
359 | struct blk_plug plug; | |
360 | ||
361 | if (discard) { | |
362 | if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, | |
363 | BLKDEV_DISCARD_ZERO)) | |
364 | return 0; | |
365 | } | |
366 | ||
367 | blk_start_plug(&plug); | |
368 | ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, | |
369 | &bio, discard); | |
370 | if (ret == 0 && bio) { | |
371 | ret = submit_bio_wait(bio); | |
372 | bio_put(bio); | |
373 | } | |
374 | blk_finish_plug(&plug); | |
375 | ||
376 | return ret; | |
377 | } | |
378 | EXPORT_SYMBOL(blkdev_issue_zeroout); |