]>
Commit | Line | Data |
---|---|---|
f31e7e40 DM |
1 | /* |
2 | * Functions related to generic helpers functions | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
4e49ea4a | 12 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
9082e87b | 13 | gfp_t gfp) |
f31e7e40 | 14 | { |
9082e87b CH |
15 | struct bio *new = bio_alloc(gfp, nr_pages); |
16 | ||
17 | if (bio) { | |
18 | bio_chain(bio, new); | |
4e49ea4a | 19 | submit_bio(bio); |
9082e87b | 20 | } |
5dba3089 | 21 | |
9082e87b | 22 | return new; |
f31e7e40 DM |
23 | } |
24 | ||
38f25255 | 25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
469e3216 MC |
26 | sector_t nr_sects, gfp_t gfp_mask, int op_flags, |
27 | struct bio **biop) | |
f31e7e40 | 28 | { |
f31e7e40 | 29 | struct request_queue *q = bdev_get_queue(bdev); |
38f25255 | 30 | struct bio *bio = *biop; |
a22c4d7e ML |
31 | unsigned int granularity; |
32 | int alignment; | |
f31e7e40 DM |
33 | |
34 | if (!q) | |
35 | return -ENXIO; | |
f31e7e40 DM |
36 | if (!blk_queue_discard(q)) |
37 | return -EOPNOTSUPP; | |
469e3216 | 38 | if ((op_flags & REQ_SECURE) && !blk_queue_secdiscard(q)) |
38f25255 | 39 | return -EOPNOTSUPP; |
f31e7e40 | 40 | |
a22c4d7e ML |
41 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
42 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
43 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | |
44 | ||
5dba3089 | 45 | while (nr_sects) { |
c6e66634 | 46 | unsigned int req_sects; |
a22c4d7e | 47 | sector_t end_sect, tmp; |
c6e66634 | 48 | |
a22c4d7e ML |
49 | /* Make sure bi_size doesn't overflow */ |
50 | req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); | |
51 | ||
9082e87b | 52 | /** |
a22c4d7e ML |
53 | * If splitting a request, and the next starting sector would be |
54 | * misaligned, stop the discard at the previous aligned sector. | |
55 | */ | |
c6e66634 | 56 | end_sect = sector + req_sects; |
a22c4d7e ML |
57 | tmp = end_sect; |
58 | if (req_sects < nr_sects && | |
59 | sector_div(tmp, granularity) != alignment) { | |
60 | end_sect = end_sect - alignment; | |
61 | sector_div(end_sect, granularity); | |
62 | end_sect = end_sect * granularity + alignment; | |
63 | req_sects = end_sect - sector; | |
64 | } | |
c6e66634 | 65 | |
4e49ea4a | 66 | bio = next_bio(bio, 1, gfp_mask); |
4f024f37 | 67 | bio->bi_iter.bi_sector = sector; |
f31e7e40 | 68 | bio->bi_bdev = bdev; |
469e3216 | 69 | bio_set_op_attrs(bio, REQ_OP_DISCARD, op_flags); |
f31e7e40 | 70 | |
4f024f37 | 71 | bio->bi_iter.bi_size = req_sects << 9; |
c6e66634 PB |
72 | nr_sects -= req_sects; |
73 | sector = end_sect; | |
f31e7e40 | 74 | |
c8123f8c JA |
75 | /* |
76 | * We can loop for a long time in here, if someone does | |
77 | * full device discards (like mkfs). Be nice and allow | |
78 | * us to schedule out to avoid softlocking if preempt | |
79 | * is disabled. | |
80 | */ | |
81 | cond_resched(); | |
5dba3089 | 82 | } |
38f25255 CH |
83 | |
84 | *biop = bio; | |
85 | return 0; | |
86 | } | |
87 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
88 | ||
89 | /** | |
90 | * blkdev_issue_discard - queue a discard | |
91 | * @bdev: blockdev to issue discard for | |
92 | * @sector: start sector | |
93 | * @nr_sects: number of sectors to discard | |
94 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
95 | * @flags: BLKDEV_IFL_* flags to control behaviour | |
96 | * | |
97 | * Description: | |
98 | * Issue a discard request for the sectors in question. | |
99 | */ | |
100 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
101 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
102 | { | |
469e3216 | 103 | int op_flags = 0; |
38f25255 CH |
104 | struct bio *bio = NULL; |
105 | struct blk_plug plug; | |
106 | int ret; | |
107 | ||
108 | if (flags & BLKDEV_DISCARD_SECURE) | |
469e3216 | 109 | op_flags |= REQ_SECURE; |
38f25255 CH |
110 | |
111 | blk_start_plug(&plug); | |
469e3216 | 112 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, op_flags, |
38f25255 | 113 | &bio); |
bbd848e0 | 114 | if (!ret && bio) { |
4e49ea4a | 115 | ret = submit_bio_wait(bio); |
bbd848e0 MS |
116 | if (ret == -EOPNOTSUPP) |
117 | ret = 0; | |
118 | } | |
0cfbcafc | 119 | blk_finish_plug(&plug); |
f31e7e40 | 120 | |
bbd848e0 | 121 | return ret; |
f31e7e40 DM |
122 | } |
123 | EXPORT_SYMBOL(blkdev_issue_discard); | |
3f14d792 | 124 | |
4363ac7c MP |
125 | /** |
126 | * blkdev_issue_write_same - queue a write same operation | |
127 | * @bdev: target blockdev | |
128 | * @sector: start sector | |
129 | * @nr_sects: number of sectors to write | |
130 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
131 | * @page: page containing data to write | |
132 | * | |
133 | * Description: | |
134 | * Issue a write same request for the sectors in question. | |
135 | */ | |
136 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
137 | sector_t nr_sects, gfp_t gfp_mask, | |
138 | struct page *page) | |
139 | { | |
4363ac7c MP |
140 | struct request_queue *q = bdev_get_queue(bdev); |
141 | unsigned int max_write_same_sectors; | |
9082e87b | 142 | struct bio *bio = NULL; |
4363ac7c MP |
143 | int ret = 0; |
144 | ||
145 | if (!q) | |
146 | return -ENXIO; | |
147 | ||
b49a0871 ML |
148 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
149 | max_write_same_sectors = UINT_MAX >> 9; | |
4363ac7c | 150 | |
4363ac7c | 151 | while (nr_sects) { |
4e49ea4a | 152 | bio = next_bio(bio, 1, gfp_mask); |
4f024f37 | 153 | bio->bi_iter.bi_sector = sector; |
4363ac7c | 154 | bio->bi_bdev = bdev; |
4363ac7c MP |
155 | bio->bi_vcnt = 1; |
156 | bio->bi_io_vec->bv_page = page; | |
157 | bio->bi_io_vec->bv_offset = 0; | |
158 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | |
95fe6c1a | 159 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
4363ac7c MP |
160 | |
161 | if (nr_sects > max_write_same_sectors) { | |
4f024f37 | 162 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
4363ac7c MP |
163 | nr_sects -= max_write_same_sectors; |
164 | sector += max_write_same_sectors; | |
165 | } else { | |
4f024f37 | 166 | bio->bi_iter.bi_size = nr_sects << 9; |
4363ac7c MP |
167 | nr_sects = 0; |
168 | } | |
4363ac7c MP |
169 | } |
170 | ||
9082e87b | 171 | if (bio) |
4e49ea4a | 172 | ret = submit_bio_wait(bio); |
9082e87b | 173 | return ret != -EOPNOTSUPP ? ret : 0; |
4363ac7c MP |
174 | } |
175 | EXPORT_SYMBOL(blkdev_issue_write_same); | |
176 | ||
3f14d792 | 177 | /** |
291d24f6 | 178 | * blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792 DM |
179 | * @bdev: blockdev to issue |
180 | * @sector: start sector | |
181 | * @nr_sects: number of sectors to write | |
182 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
3f14d792 DM |
183 | * |
184 | * Description: | |
185 | * Generate and issue number of bios with zerofiled pages. | |
3f14d792 DM |
186 | */ |
187 | ||
35086784 FF |
188 | static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
189 | sector_t nr_sects, gfp_t gfp_mask) | |
3f14d792 | 190 | { |
18edc8ea | 191 | int ret; |
9082e87b | 192 | struct bio *bio = NULL; |
0aeea189 | 193 | unsigned int sz; |
3f14d792 | 194 | |
3f14d792 | 195 | while (nr_sects != 0) { |
4e49ea4a | 196 | bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), |
9082e87b | 197 | gfp_mask); |
4f024f37 | 198 | bio->bi_iter.bi_sector = sector; |
3f14d792 | 199 | bio->bi_bdev = bdev; |
95fe6c1a | 200 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
3f14d792 | 201 | |
0341aafb JA |
202 | while (nr_sects != 0) { |
203 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); | |
3f14d792 DM |
204 | ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); |
205 | nr_sects -= ret >> 9; | |
206 | sector += ret >> 9; | |
207 | if (ret < (sz << 9)) | |
208 | break; | |
209 | } | |
3f14d792 | 210 | } |
3f14d792 | 211 | |
9082e87b | 212 | if (bio) |
4e49ea4a | 213 | return submit_bio_wait(bio); |
9082e87b | 214 | return 0; |
3f14d792 | 215 | } |
579e8f3c MP |
216 | |
217 | /** | |
218 | * blkdev_issue_zeroout - zero-fill a block range | |
219 | * @bdev: blockdev to write | |
220 | * @sector: start sector | |
221 | * @nr_sects: number of sectors to write | |
222 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
d93ba7a5 | 223 | * @discard: whether to discard the block range |
579e8f3c MP |
224 | * |
225 | * Description: | |
d93ba7a5 MP |
226 | * Zero-fill a block range. If the discard flag is set and the block |
227 | * device guarantees that subsequent READ operations to the block range | |
228 | * in question will return zeroes, the blocks will be discarded. Should | |
229 | * the discard request fail, if the discard flag is not set, or if | |
230 | * discard_zeroes_data is not supported, this function will resort to | |
231 | * zeroing the blocks manually, thus provisioning (allocating, | |
232 | * anchoring) them. If the block device supports the WRITE SAME command | |
233 | * blkdev_issue_zeroout() will use it to optimize the process of | |
234 | * clearing the block range. Otherwise the zeroing will be performed | |
235 | * using regular WRITE calls. | |
579e8f3c MP |
236 | */ |
237 | ||
238 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |
d93ba7a5 | 239 | sector_t nr_sects, gfp_t gfp_mask, bool discard) |
579e8f3c | 240 | { |
d93ba7a5 | 241 | struct request_queue *q = bdev_get_queue(bdev); |
d93ba7a5 | 242 | |
9f9ee1f2 MP |
243 | if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data && |
244 | blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0) | |
245 | return 0; | |
d93ba7a5 | 246 | |
9f9ee1f2 MP |
247 | if (bdev_write_same(bdev) && |
248 | blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, | |
249 | ZERO_PAGE(0)) == 0) | |
250 | return 0; | |
579e8f3c MP |
251 | |
252 | return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); | |
253 | } | |
3f14d792 | 254 | EXPORT_SYMBOL(blkdev_issue_zeroout); |