]>
Commit | Line | Data |
---|---|---|
f31e7e40 DM |
1 | /* |
2 | * Functions related to generic helpers functions | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
4e49ea4a | 12 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
9082e87b | 13 | gfp_t gfp) |
f31e7e40 | 14 | { |
9082e87b CH |
15 | struct bio *new = bio_alloc(gfp, nr_pages); |
16 | ||
17 | if (bio) { | |
18 | bio_chain(bio, new); | |
4e49ea4a | 19 | submit_bio(bio); |
9082e87b | 20 | } |
5dba3089 | 21 | |
9082e87b | 22 | return new; |
f31e7e40 DM |
23 | } |
24 | ||
38f25255 CH |
25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
26 | sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop) | |
f31e7e40 | 27 | { |
f31e7e40 | 28 | struct request_queue *q = bdev_get_queue(bdev); |
38f25255 | 29 | struct bio *bio = *biop; |
a22c4d7e ML |
30 | unsigned int granularity; |
31 | int alignment; | |
f31e7e40 DM |
32 | |
33 | if (!q) | |
34 | return -ENXIO; | |
f31e7e40 DM |
35 | if (!blk_queue_discard(q)) |
36 | return -EOPNOTSUPP; | |
38f25255 CH |
37 | if ((type & REQ_SECURE) && !blk_queue_secdiscard(q)) |
38 | return -EOPNOTSUPP; | |
f31e7e40 | 39 | |
a22c4d7e ML |
40 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
41 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
42 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | |
43 | ||
5dba3089 | 44 | while (nr_sects) { |
c6e66634 | 45 | unsigned int req_sects; |
a22c4d7e | 46 | sector_t end_sect, tmp; |
c6e66634 | 47 | |
a22c4d7e ML |
48 | /* Make sure bi_size doesn't overflow */ |
49 | req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); | |
50 | ||
9082e87b | 51 | /** |
a22c4d7e ML |
52 | * If splitting a request, and the next starting sector would be |
53 | * misaligned, stop the discard at the previous aligned sector. | |
54 | */ | |
c6e66634 | 55 | end_sect = sector + req_sects; |
a22c4d7e ML |
56 | tmp = end_sect; |
57 | if (req_sects < nr_sects && | |
58 | sector_div(tmp, granularity) != alignment) { | |
59 | end_sect = end_sect - alignment; | |
60 | sector_div(end_sect, granularity); | |
61 | end_sect = end_sect * granularity + alignment; | |
62 | req_sects = end_sect - sector; | |
63 | } | |
c6e66634 | 64 | |
4e49ea4a | 65 | bio = next_bio(bio, 1, gfp_mask); |
4f024f37 | 66 | bio->bi_iter.bi_sector = sector; |
f31e7e40 | 67 | bio->bi_bdev = bdev; |
4e49ea4a | 68 | bio->bi_rw = type; |
f31e7e40 | 69 | |
4f024f37 | 70 | bio->bi_iter.bi_size = req_sects << 9; |
c6e66634 PB |
71 | nr_sects -= req_sects; |
72 | sector = end_sect; | |
f31e7e40 | 73 | |
c8123f8c JA |
74 | /* |
75 | * We can loop for a long time in here, if someone does | |
76 | * full device discards (like mkfs). Be nice and allow | |
77 | * us to schedule out to avoid softlocking if preempt | |
78 | * is disabled. | |
79 | */ | |
80 | cond_resched(); | |
5dba3089 | 81 | } |
38f25255 CH |
82 | |
83 | *biop = bio; | |
84 | return 0; | |
85 | } | |
86 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
87 | ||
88 | /** | |
89 | * blkdev_issue_discard - queue a discard | |
90 | * @bdev: blockdev to issue discard for | |
91 | * @sector: start sector | |
92 | * @nr_sects: number of sectors to discard | |
93 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
94 | * @flags: BLKDEV_IFL_* flags to control behaviour | |
95 | * | |
96 | * Description: | |
97 | * Issue a discard request for the sectors in question. | |
98 | */ | |
99 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
100 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
101 | { | |
102 | int type = REQ_WRITE | REQ_DISCARD; | |
103 | struct bio *bio = NULL; | |
104 | struct blk_plug plug; | |
105 | int ret; | |
106 | ||
107 | if (flags & BLKDEV_DISCARD_SECURE) | |
108 | type |= REQ_SECURE; | |
109 | ||
110 | blk_start_plug(&plug); | |
111 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type, | |
112 | &bio); | |
bbd848e0 | 113 | if (!ret && bio) { |
4e49ea4a | 114 | ret = submit_bio_wait(bio); |
bbd848e0 MS |
115 | if (ret == -EOPNOTSUPP) |
116 | ret = 0; | |
117 | } | |
0cfbcafc | 118 | blk_finish_plug(&plug); |
f31e7e40 | 119 | |
bbd848e0 | 120 | return ret; |
f31e7e40 DM |
121 | } |
122 | EXPORT_SYMBOL(blkdev_issue_discard); | |
3f14d792 | 123 | |
4363ac7c MP |
124 | /** |
125 | * blkdev_issue_write_same - queue a write same operation | |
126 | * @bdev: target blockdev | |
127 | * @sector: start sector | |
128 | * @nr_sects: number of sectors to write | |
129 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
130 | * @page: page containing data to write | |
131 | * | |
132 | * Description: | |
133 | * Issue a write same request for the sectors in question. | |
134 | */ | |
135 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
136 | sector_t nr_sects, gfp_t gfp_mask, | |
137 | struct page *page) | |
138 | { | |
4363ac7c MP |
139 | struct request_queue *q = bdev_get_queue(bdev); |
140 | unsigned int max_write_same_sectors; | |
9082e87b | 141 | struct bio *bio = NULL; |
4363ac7c MP |
142 | int ret = 0; |
143 | ||
144 | if (!q) | |
145 | return -ENXIO; | |
146 | ||
b49a0871 ML |
147 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
148 | max_write_same_sectors = UINT_MAX >> 9; | |
4363ac7c | 149 | |
4363ac7c | 150 | while (nr_sects) { |
4e49ea4a | 151 | bio = next_bio(bio, 1, gfp_mask); |
4f024f37 | 152 | bio->bi_iter.bi_sector = sector; |
4363ac7c | 153 | bio->bi_bdev = bdev; |
4363ac7c MP |
154 | bio->bi_vcnt = 1; |
155 | bio->bi_io_vec->bv_page = page; | |
156 | bio->bi_io_vec->bv_offset = 0; | |
157 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | |
4e49ea4a | 158 | bio->bi_rw = REQ_WRITE | REQ_WRITE_SAME; |
4363ac7c MP |
159 | |
160 | if (nr_sects > max_write_same_sectors) { | |
4f024f37 | 161 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
4363ac7c MP |
162 | nr_sects -= max_write_same_sectors; |
163 | sector += max_write_same_sectors; | |
164 | } else { | |
4f024f37 | 165 | bio->bi_iter.bi_size = nr_sects << 9; |
4363ac7c MP |
166 | nr_sects = 0; |
167 | } | |
4363ac7c MP |
168 | } |
169 | ||
9082e87b | 170 | if (bio) |
4e49ea4a | 171 | ret = submit_bio_wait(bio); |
9082e87b | 172 | return ret != -EOPNOTSUPP ? ret : 0; |
4363ac7c MP |
173 | } |
174 | EXPORT_SYMBOL(blkdev_issue_write_same); | |
175 | ||
3f14d792 | 176 | /** |
291d24f6 | 177 | * blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792 DM |
178 | * @bdev: blockdev to issue |
179 | * @sector: start sector | |
180 | * @nr_sects: number of sectors to write | |
181 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
3f14d792 DM |
182 | * |
183 | * Description: | |
184 | * Generate and issue number of bios with zerofiled pages. | |
3f14d792 DM |
185 | */ |
186 | ||
35086784 FF |
187 | static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
188 | sector_t nr_sects, gfp_t gfp_mask) | |
3f14d792 | 189 | { |
18edc8ea | 190 | int ret; |
9082e87b | 191 | struct bio *bio = NULL; |
0aeea189 | 192 | unsigned int sz; |
3f14d792 | 193 | |
3f14d792 | 194 | while (nr_sects != 0) { |
4e49ea4a | 195 | bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), |
9082e87b | 196 | gfp_mask); |
4f024f37 | 197 | bio->bi_iter.bi_sector = sector; |
3f14d792 | 198 | bio->bi_bdev = bdev; |
4e49ea4a | 199 | bio->bi_rw = REQ_WRITE; |
3f14d792 | 200 | |
0341aafb JA |
201 | while (nr_sects != 0) { |
202 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); | |
3f14d792 DM |
203 | ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); |
204 | nr_sects -= ret >> 9; | |
205 | sector += ret >> 9; | |
206 | if (ret < (sz << 9)) | |
207 | break; | |
208 | } | |
3f14d792 | 209 | } |
3f14d792 | 210 | |
9082e87b | 211 | if (bio) |
4e49ea4a | 212 | return submit_bio_wait(bio); |
9082e87b | 213 | return 0; |
3f14d792 | 214 | } |
579e8f3c MP |
215 | |
216 | /** | |
217 | * blkdev_issue_zeroout - zero-fill a block range | |
218 | * @bdev: blockdev to write | |
219 | * @sector: start sector | |
220 | * @nr_sects: number of sectors to write | |
221 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
d93ba7a5 | 222 | * @discard: whether to discard the block range |
579e8f3c MP |
223 | * |
224 | * Description: | |
d93ba7a5 MP |
225 | * Zero-fill a block range. If the discard flag is set and the block |
226 | * device guarantees that subsequent READ operations to the block range | |
227 | * in question will return zeroes, the blocks will be discarded. Should | |
228 | * the discard request fail, if the discard flag is not set, or if | |
229 | * discard_zeroes_data is not supported, this function will resort to | |
230 | * zeroing the blocks manually, thus provisioning (allocating, | |
231 | * anchoring) them. If the block device supports the WRITE SAME command | |
232 | * blkdev_issue_zeroout() will use it to optimize the process of | |
233 | * clearing the block range. Otherwise the zeroing will be performed | |
234 | * using regular WRITE calls. | |
579e8f3c MP |
235 | */ |
236 | ||
237 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |
d93ba7a5 | 238 | sector_t nr_sects, gfp_t gfp_mask, bool discard) |
579e8f3c | 239 | { |
d93ba7a5 | 240 | struct request_queue *q = bdev_get_queue(bdev); |
d93ba7a5 | 241 | |
9f9ee1f2 MP |
242 | if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data && |
243 | blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0) | |
244 | return 0; | |
d93ba7a5 | 245 | |
9f9ee1f2 MP |
246 | if (bdev_write_same(bdev) && |
247 | blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, | |
248 | ZERO_PAGE(0)) == 0) | |
249 | return 0; | |
579e8f3c MP |
250 | |
251 | return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); | |
252 | } | |
3f14d792 | 253 | EXPORT_SYMBOL(blkdev_issue_zeroout); |