]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - block/blk-lib.c
cfq-iosched: fix broken cfq_ref_get_cfqf() for CONFIG_BLK_CGROUP=y && CFQ_GROUP_IOSCHED=n
[mirror_ubuntu-zesty-kernel.git] / block / blk-lib.c
CommitLineData
f31e7e40
DM
1/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
12static void blkdev_discard_end_io(struct bio *bio, int err)
13{
14 if (err) {
15 if (err == -EOPNOTSUPP)
16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18 }
19
20 if (bio->bi_private)
21 complete(bio->bi_private);
22 __free_page(bio_page(bio));
23
24 bio_put(bio);
25}
26
27/**
28 * blkdev_issue_discard - queue a discard
29 * @bdev: blockdev to issue discard for
30 * @sector: start sector
31 * @nr_sects: number of sectors to discard
32 * @gfp_mask: memory allocation flags (for bio_alloc)
33 * @flags: BLKDEV_IFL_* flags to control behaviour
34 *
35 * Description:
36 * Issue a discard request for the sectors in question.
37 */
38int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
39 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
40{
41 DECLARE_COMPLETION_ONSTACK(wait);
42 struct request_queue *q = bdev_get_queue(bdev);
43 int type = flags & BLKDEV_IFL_BARRIER ?
44 DISCARD_BARRIER : DISCARD_NOBARRIER;
45 struct bio *bio;
46 struct page *page;
47 int ret = 0;
48
49 if (!q)
50 return -ENXIO;
51
52 if (!blk_queue_discard(q))
53 return -EOPNOTSUPP;
54
55 while (nr_sects && !ret) {
56 unsigned int sector_size = q->limits.logical_block_size;
57 unsigned int max_discard_sectors =
58 min(q->limits.max_discard_sectors, UINT_MAX >> 9);
59
60 bio = bio_alloc(gfp_mask, 1);
61 if (!bio)
62 goto out;
63 bio->bi_sector = sector;
64 bio->bi_end_io = blkdev_discard_end_io;
65 bio->bi_bdev = bdev;
66 if (flags & BLKDEV_IFL_WAIT)
67 bio->bi_private = &wait;
68
69 /*
70 * Add a zeroed one-sector payload as that's what
71 * our current implementations need. If we'll ever need
72 * more the interface will need revisiting.
73 */
74 page = alloc_page(gfp_mask | __GFP_ZERO);
75 if (!page)
76 goto out_free_bio;
77 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
78 goto out_free_page;
79
80 /*
81 * And override the bio size - the way discard works we
82 * touch many more blocks on disk than the actual payload
83 * length.
84 */
85 if (nr_sects > max_discard_sectors) {
86 bio->bi_size = max_discard_sectors << 9;
87 nr_sects -= max_discard_sectors;
88 sector += max_discard_sectors;
89 } else {
90 bio->bi_size = nr_sects << 9;
91 nr_sects = 0;
92 }
93
94 bio_get(bio);
95 submit_bio(type, bio);
96
97 if (flags & BLKDEV_IFL_WAIT)
98 wait_for_completion(&wait);
99
100 if (bio_flagged(bio, BIO_EOPNOTSUPP))
101 ret = -EOPNOTSUPP;
102 else if (!bio_flagged(bio, BIO_UPTODATE))
103 ret = -EIO;
104 bio_put(bio);
105 }
106 return ret;
107out_free_page:
108 __free_page(page);
109out_free_bio:
110 bio_put(bio);
111out:
112 return -ENOMEM;
113}
114EXPORT_SYMBOL(blkdev_issue_discard);
3f14d792
DM
115
116struct bio_batch
117{
118 atomic_t done;
119 unsigned long flags;
120 struct completion *wait;
121 bio_end_io_t *end_io;
122};
123
124static void bio_batch_end_io(struct bio *bio, int err)
125{
126 struct bio_batch *bb = bio->bi_private;
127 if (err) {
128 if (err == -EOPNOTSUPP)
129 set_bit(BIO_EOPNOTSUPP, &bb->flags);
130 else
131 clear_bit(BIO_UPTODATE, &bb->flags);
132 }
133 if (bb) {
134 if (bb->end_io)
135 bb->end_io(bio, err);
136 atomic_inc(&bb->done);
137 complete(bb->wait);
138 }
139 bio_put(bio);
140}
141
142/**
143 * blkdev_issue_zeroout generate number of zero filed write bios
144 * @bdev: blockdev to issue
145 * @sector: start sector
146 * @nr_sects: number of sectors to write
147 * @gfp_mask: memory allocation flags (for bio_alloc)
148 * @flags: BLKDEV_IFL_* flags to control behaviour
149 *
150 * Description:
151 * Generate and issue number of bios with zerofiled pages.
152 * Send barrier at the beginning and at the end if requested. This guarantie
153 * correct request ordering. Empty barrier allow us to avoid post queue flush.
154 */
155
156int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
157 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
158{
159 int ret = 0;
160 struct bio *bio;
161 struct bio_batch bb;
162 unsigned int sz, issued = 0;
163 DECLARE_COMPLETION_ONSTACK(wait);
164
165 atomic_set(&bb.done, 0);
166 bb.flags = 1 << BIO_UPTODATE;
167 bb.wait = &wait;
168 bb.end_io = NULL;
169
170 if (flags & BLKDEV_IFL_BARRIER) {
171 /* issue async barrier before the data */
172 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
173 if (ret)
174 return ret;
175 }
176submit:
177 while (nr_sects != 0) {
178 bio = bio_alloc(gfp_mask,
179 min(nr_sects, (sector_t)BIO_MAX_PAGES));
180 if (!bio)
181 break;
182
183 bio->bi_sector = sector;
184 bio->bi_bdev = bdev;
185 bio->bi_end_io = bio_batch_end_io;
186 if (flags & BLKDEV_IFL_WAIT)
187 bio->bi_private = &bb;
188
189 while(nr_sects != 0) {
190 sz = min(PAGE_SIZE >> 9 , nr_sects);
191 if (sz == 0)
192 /* bio has maximum size possible */
193 break;
194 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
195 nr_sects -= ret >> 9;
196 sector += ret >> 9;
197 if (ret < (sz << 9))
198 break;
199 }
200 issued++;
201 submit_bio(WRITE, bio);
202 }
203 /*
204 * When all data bios are in flight. Send final barrier if requeted.
205 */
206 if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
207 ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
208 flags & BLKDEV_IFL_WAIT);
209
210
211 if (flags & BLKDEV_IFL_WAIT)
212 /* Wait for bios in-flight */
213 while ( issued != atomic_read(&bb.done))
214 wait_for_completion(&wait);
215
216 if (!test_bit(BIO_UPTODATE, &bb.flags))
217 /* One of bios in the batch was completed with error.*/
218 ret = -EIO;
219
220 if (ret)
221 goto out;
222
223 if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
224 ret = -EOPNOTSUPP;
225 goto out;
226 }
227 if (nr_sects != 0)
228 goto submit;
229out:
230 return ret;
231}
232EXPORT_SYMBOL(blkdev_issue_zeroout);