]>
Commit | Line | Data |
---|---|---|
f31e7e40 DM |
1 | /* |
2 | * Functions related to generic helpers functions | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
9082e87b CH |
12 | static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages, |
13 | gfp_t gfp) | |
f31e7e40 | 14 | { |
9082e87b CH |
15 | struct bio *new = bio_alloc(gfp, nr_pages); |
16 | ||
17 | if (bio) { | |
18 | bio_chain(bio, new); | |
19 | submit_bio(rw, bio); | |
20 | } | |
5dba3089 | 21 | |
9082e87b | 22 | return new; |
f31e7e40 DM |
23 | } |
24 | ||
38f25255 CH |
25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
26 | sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop) | |
f31e7e40 | 27 | { |
f31e7e40 | 28 | struct request_queue *q = bdev_get_queue(bdev); |
38f25255 | 29 | struct bio *bio = *biop; |
a22c4d7e ML |
30 | unsigned int granularity; |
31 | int alignment; | |
f31e7e40 DM |
32 | |
33 | if (!q) | |
34 | return -ENXIO; | |
f31e7e40 DM |
35 | if (!blk_queue_discard(q)) |
36 | return -EOPNOTSUPP; | |
38f25255 CH |
37 | if ((type & REQ_SECURE) && !blk_queue_secdiscard(q)) |
38 | return -EOPNOTSUPP; | |
f31e7e40 | 39 | |
a22c4d7e ML |
40 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
41 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
42 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | |
43 | ||
5dba3089 | 44 | while (nr_sects) { |
c6e66634 | 45 | unsigned int req_sects; |
a22c4d7e | 46 | sector_t end_sect, tmp; |
c6e66634 | 47 | |
a22c4d7e ML |
48 | /* Make sure bi_size doesn't overflow */ |
49 | req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); | |
50 | ||
9082e87b | 51 | /** |
a22c4d7e ML |
52 | * If splitting a request, and the next starting sector would be |
53 | * misaligned, stop the discard at the previous aligned sector. | |
54 | */ | |
c6e66634 | 55 | end_sect = sector + req_sects; |
a22c4d7e ML |
56 | tmp = end_sect; |
57 | if (req_sects < nr_sects && | |
58 | sector_div(tmp, granularity) != alignment) { | |
59 | end_sect = end_sect - alignment; | |
60 | sector_div(end_sect, granularity); | |
61 | end_sect = end_sect * granularity + alignment; | |
62 | req_sects = end_sect - sector; | |
63 | } | |
c6e66634 | 64 | |
9082e87b | 65 | bio = next_bio(bio, type, 1, gfp_mask); |
4f024f37 | 66 | bio->bi_iter.bi_sector = sector; |
f31e7e40 | 67 | bio->bi_bdev = bdev; |
f31e7e40 | 68 | |
4f024f37 | 69 | bio->bi_iter.bi_size = req_sects << 9; |
c6e66634 PB |
70 | nr_sects -= req_sects; |
71 | sector = end_sect; | |
f31e7e40 | 72 | |
c8123f8c JA |
73 | /* |
74 | * We can loop for a long time in here, if someone does | |
75 | * full device discards (like mkfs). Be nice and allow | |
76 | * us to schedule out to avoid softlocking if preempt | |
77 | * is disabled. | |
78 | */ | |
79 | cond_resched(); | |
5dba3089 | 80 | } |
38f25255 CH |
81 | |
82 | *biop = bio; | |
83 | return 0; | |
84 | } | |
85 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
86 | ||
87 | /** | |
88 | * blkdev_issue_discard - queue a discard | |
89 | * @bdev: blockdev to issue discard for | |
90 | * @sector: start sector | |
91 | * @nr_sects: number of sectors to discard | |
92 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
93 | * @flags: BLKDEV_IFL_* flags to control behaviour | |
94 | * | |
95 | * Description: | |
96 | * Issue a discard request for the sectors in question. | |
97 | */ | |
98 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
99 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
100 | { | |
101 | int type = REQ_WRITE | REQ_DISCARD; | |
102 | struct bio *bio = NULL; | |
103 | struct blk_plug plug; | |
104 | int ret; | |
105 | ||
106 | if (flags & BLKDEV_DISCARD_SECURE) | |
107 | type |= REQ_SECURE; | |
108 | ||
109 | blk_start_plug(&plug); | |
110 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type, | |
111 | &bio); | |
bbd848e0 | 112 | if (!ret && bio) { |
9082e87b | 113 | ret = submit_bio_wait(type, bio); |
bbd848e0 MS |
114 | if (ret == -EOPNOTSUPP) |
115 | ret = 0; | |
116 | } | |
0cfbcafc | 117 | blk_finish_plug(&plug); |
f31e7e40 | 118 | |
bbd848e0 | 119 | return ret; |
f31e7e40 DM |
120 | } |
121 | EXPORT_SYMBOL(blkdev_issue_discard); | |
3f14d792 | 122 | |
4363ac7c MP |
123 | /** |
124 | * blkdev_issue_write_same - queue a write same operation | |
125 | * @bdev: target blockdev | |
126 | * @sector: start sector | |
127 | * @nr_sects: number of sectors to write | |
128 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
129 | * @page: page containing data to write | |
130 | * | |
131 | * Description: | |
132 | * Issue a write same request for the sectors in question. | |
133 | */ | |
134 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
135 | sector_t nr_sects, gfp_t gfp_mask, | |
136 | struct page *page) | |
137 | { | |
4363ac7c MP |
138 | struct request_queue *q = bdev_get_queue(bdev); |
139 | unsigned int max_write_same_sectors; | |
9082e87b | 140 | struct bio *bio = NULL; |
4363ac7c MP |
141 | int ret = 0; |
142 | ||
143 | if (!q) | |
144 | return -ENXIO; | |
145 | ||
b49a0871 ML |
146 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
147 | max_write_same_sectors = UINT_MAX >> 9; | |
4363ac7c | 148 | |
4363ac7c | 149 | while (nr_sects) { |
9082e87b | 150 | bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask); |
4f024f37 | 151 | bio->bi_iter.bi_sector = sector; |
4363ac7c | 152 | bio->bi_bdev = bdev; |
4363ac7c MP |
153 | bio->bi_vcnt = 1; |
154 | bio->bi_io_vec->bv_page = page; | |
155 | bio->bi_io_vec->bv_offset = 0; | |
156 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | |
157 | ||
158 | if (nr_sects > max_write_same_sectors) { | |
4f024f37 | 159 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
4363ac7c MP |
160 | nr_sects -= max_write_same_sectors; |
161 | sector += max_write_same_sectors; | |
162 | } else { | |
4f024f37 | 163 | bio->bi_iter.bi_size = nr_sects << 9; |
4363ac7c MP |
164 | nr_sects = 0; |
165 | } | |
4363ac7c MP |
166 | } |
167 | ||
9082e87b CH |
168 | if (bio) |
169 | ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); | |
170 | return ret != -EOPNOTSUPP ? ret : 0; | |
4363ac7c MP |
171 | } |
172 | EXPORT_SYMBOL(blkdev_issue_write_same); | |
173 | ||
3f14d792 | 174 | /** |
291d24f6 | 175 | * blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792 DM |
176 | * @bdev: blockdev to issue |
177 | * @sector: start sector | |
178 | * @nr_sects: number of sectors to write | |
179 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
3f14d792 DM |
180 | * |
181 | * Description: | |
182 | * Generate and issue number of bios with zerofiled pages. | |
3f14d792 DM |
183 | */ |
184 | ||
35086784 FF |
185 | static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
186 | sector_t nr_sects, gfp_t gfp_mask) | |
3f14d792 | 187 | { |
18edc8ea | 188 | int ret; |
9082e87b | 189 | struct bio *bio = NULL; |
0aeea189 | 190 | unsigned int sz; |
3f14d792 | 191 | |
3f14d792 | 192 | while (nr_sects != 0) { |
9082e87b CH |
193 | bio = next_bio(bio, WRITE, |
194 | min(nr_sects, (sector_t)BIO_MAX_PAGES), | |
195 | gfp_mask); | |
4f024f37 | 196 | bio->bi_iter.bi_sector = sector; |
3f14d792 | 197 | bio->bi_bdev = bdev; |
3f14d792 | 198 | |
0341aafb JA |
199 | while (nr_sects != 0) { |
200 | sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); | |
3f14d792 DM |
201 | ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); |
202 | nr_sects -= ret >> 9; | |
203 | sector += ret >> 9; | |
204 | if (ret < (sz << 9)) | |
205 | break; | |
206 | } | |
3f14d792 | 207 | } |
3f14d792 | 208 | |
9082e87b CH |
209 | if (bio) |
210 | return submit_bio_wait(WRITE, bio); | |
211 | return 0; | |
3f14d792 | 212 | } |
579e8f3c MP |
213 | |
214 | /** | |
215 | * blkdev_issue_zeroout - zero-fill a block range | |
216 | * @bdev: blockdev to write | |
217 | * @sector: start sector | |
218 | * @nr_sects: number of sectors to write | |
219 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
d93ba7a5 | 220 | * @discard: whether to discard the block range |
579e8f3c MP |
221 | * |
222 | * Description: | |
d93ba7a5 MP |
223 | * Zero-fill a block range. If the discard flag is set and the block |
224 | * device guarantees that subsequent READ operations to the block range | |
225 | * in question will return zeroes, the blocks will be discarded. Should | |
226 | * the discard request fail, if the discard flag is not set, or if | |
227 | * discard_zeroes_data is not supported, this function will resort to | |
228 | * zeroing the blocks manually, thus provisioning (allocating, | |
229 | * anchoring) them. If the block device supports the WRITE SAME command | |
230 | * blkdev_issue_zeroout() will use it to optimize the process of | |
231 | * clearing the block range. Otherwise the zeroing will be performed | |
232 | * using regular WRITE calls. | |
579e8f3c MP |
233 | */ |
234 | ||
235 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |
d93ba7a5 | 236 | sector_t nr_sects, gfp_t gfp_mask, bool discard) |
579e8f3c | 237 | { |
d93ba7a5 | 238 | struct request_queue *q = bdev_get_queue(bdev); |
d93ba7a5 | 239 | |
9f9ee1f2 MP |
240 | if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data && |
241 | blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0) | |
242 | return 0; | |
d93ba7a5 | 243 | |
9f9ee1f2 MP |
244 | if (bdev_write_same(bdev) && |
245 | blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, | |
246 | ZERO_PAGE(0)) == 0) | |
247 | return 0; | |
579e8f3c MP |
248 | |
249 | return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); | |
250 | } | |
3f14d792 | 251 | EXPORT_SYMBOL(blkdev_issue_zeroout); |