]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/logfs/dev_bdev.c
block: submit_bio_wait() conversions
[mirror_ubuntu-zesty-kernel.git] / fs / logfs / dev_bdev.c
1 /*
2 * fs/logfs/dev_bdev.c - Device access methods for block devices
3 *
4 * As should be obvious for Linux kernel code, license is GPLv2
5 *
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
7 */
8 #include "logfs.h"
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/prefetch.h>
14
15 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
16
17 static int sync_request(struct page *page, struct block_device *bdev, int rw)
18 {
19 struct bio bio;
20 struct bio_vec bio_vec;
21
22 bio_init(&bio);
23 bio.bi_max_vecs = 1;
24 bio.bi_io_vec = &bio_vec;
25 bio_vec.bv_page = page;
26 bio_vec.bv_len = PAGE_SIZE;
27 bio_vec.bv_offset = 0;
28 bio.bi_vcnt = 1;
29 bio.bi_size = PAGE_SIZE;
30 bio.bi_bdev = bdev;
31 bio.bi_sector = page->index * (PAGE_SIZE >> 9);
32
33 return submit_bio_wait(rw, &bio);
34 }
35
36 static int bdev_readpage(void *_sb, struct page *page)
37 {
38 struct super_block *sb = _sb;
39 struct block_device *bdev = logfs_super(sb)->s_bdev;
40 int err;
41
42 err = sync_request(page, bdev, READ);
43 if (err) {
44 ClearPageUptodate(page);
45 SetPageError(page);
46 } else {
47 SetPageUptodate(page);
48 ClearPageError(page);
49 }
50 unlock_page(page);
51 return err;
52 }
53
54 static DECLARE_WAIT_QUEUE_HEAD(wq);
55
56 static void writeseg_end_io(struct bio *bio, int err)
57 {
58 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
59 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
60 struct super_block *sb = bio->bi_private;
61 struct logfs_super *super = logfs_super(sb);
62 struct page *page;
63
64 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
65 BUG_ON(err);
66 BUG_ON(bio->bi_vcnt == 0);
67 do {
68 page = bvec->bv_page;
69 if (--bvec >= bio->bi_io_vec)
70 prefetchw(&bvec->bv_page->flags);
71
72 end_page_writeback(page);
73 page_cache_release(page);
74 } while (bvec >= bio->bi_io_vec);
75 bio_put(bio);
76 if (atomic_dec_and_test(&super->s_pending_writes))
77 wake_up(&wq);
78 }
79
80 static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
81 size_t nr_pages)
82 {
83 struct logfs_super *super = logfs_super(sb);
84 struct address_space *mapping = super->s_mapping_inode->i_mapping;
85 struct bio *bio;
86 struct page *page;
87 unsigned int max_pages;
88 int i;
89
90 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
91
92 bio = bio_alloc(GFP_NOFS, max_pages);
93 BUG_ON(!bio);
94
95 for (i = 0; i < nr_pages; i++) {
96 if (i >= max_pages) {
97 /* Block layer cannot split bios :( */
98 bio->bi_vcnt = i;
99 bio->bi_size = i * PAGE_SIZE;
100 bio->bi_bdev = super->s_bdev;
101 bio->bi_sector = ofs >> 9;
102 bio->bi_private = sb;
103 bio->bi_end_io = writeseg_end_io;
104 atomic_inc(&super->s_pending_writes);
105 submit_bio(WRITE, bio);
106
107 ofs += i * PAGE_SIZE;
108 index += i;
109 nr_pages -= i;
110 i = 0;
111
112 bio = bio_alloc(GFP_NOFS, max_pages);
113 BUG_ON(!bio);
114 }
115 page = find_lock_page(mapping, index + i);
116 BUG_ON(!page);
117 bio->bi_io_vec[i].bv_page = page;
118 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
119 bio->bi_io_vec[i].bv_offset = 0;
120
121 BUG_ON(PageWriteback(page));
122 set_page_writeback(page);
123 unlock_page(page);
124 }
125 bio->bi_vcnt = nr_pages;
126 bio->bi_size = nr_pages * PAGE_SIZE;
127 bio->bi_bdev = super->s_bdev;
128 bio->bi_sector = ofs >> 9;
129 bio->bi_private = sb;
130 bio->bi_end_io = writeseg_end_io;
131 atomic_inc(&super->s_pending_writes);
132 submit_bio(WRITE, bio);
133 return 0;
134 }
135
136 static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
137 {
138 struct logfs_super *super = logfs_super(sb);
139 int head;
140
141 BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
142
143 if (len == 0) {
144 /* This can happen when the object fit perfectly into a
145 * segment, the segment gets written per sync and subsequently
146 * closed.
147 */
148 return;
149 }
150 head = ofs & (PAGE_SIZE - 1);
151 if (head) {
152 ofs -= head;
153 len += head;
154 }
155 len = PAGE_ALIGN(len);
156 __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
157 }
158
159
160 static void erase_end_io(struct bio *bio, int err)
161 {
162 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
163 struct super_block *sb = bio->bi_private;
164 struct logfs_super *super = logfs_super(sb);
165
166 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
167 BUG_ON(err);
168 BUG_ON(bio->bi_vcnt == 0);
169 bio_put(bio);
170 if (atomic_dec_and_test(&super->s_pending_writes))
171 wake_up(&wq);
172 }
173
174 static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
175 size_t nr_pages)
176 {
177 struct logfs_super *super = logfs_super(sb);
178 struct bio *bio;
179 unsigned int max_pages;
180 int i;
181
182 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
183
184 bio = bio_alloc(GFP_NOFS, max_pages);
185 BUG_ON(!bio);
186
187 for (i = 0; i < nr_pages; i++) {
188 if (i >= max_pages) {
189 /* Block layer cannot split bios :( */
190 bio->bi_vcnt = i;
191 bio->bi_size = i * PAGE_SIZE;
192 bio->bi_bdev = super->s_bdev;
193 bio->bi_sector = ofs >> 9;
194 bio->bi_private = sb;
195 bio->bi_end_io = erase_end_io;
196 atomic_inc(&super->s_pending_writes);
197 submit_bio(WRITE, bio);
198
199 ofs += i * PAGE_SIZE;
200 index += i;
201 nr_pages -= i;
202 i = 0;
203
204 bio = bio_alloc(GFP_NOFS, max_pages);
205 BUG_ON(!bio);
206 }
207 bio->bi_io_vec[i].bv_page = super->s_erase_page;
208 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
209 bio->bi_io_vec[i].bv_offset = 0;
210 }
211 bio->bi_vcnt = nr_pages;
212 bio->bi_size = nr_pages * PAGE_SIZE;
213 bio->bi_bdev = super->s_bdev;
214 bio->bi_sector = ofs >> 9;
215 bio->bi_private = sb;
216 bio->bi_end_io = erase_end_io;
217 atomic_inc(&super->s_pending_writes);
218 submit_bio(WRITE, bio);
219 return 0;
220 }
221
222 static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
223 int ensure_write)
224 {
225 struct logfs_super *super = logfs_super(sb);
226
227 BUG_ON(to & (PAGE_SIZE - 1));
228 BUG_ON(len & (PAGE_SIZE - 1));
229
230 if (super->s_flags & LOGFS_SB_FLAG_RO)
231 return -EROFS;
232
233 if (ensure_write) {
234 /*
235 * Object store doesn't care whether erases happen or not.
236 * But for the journal they are required. Otherwise a scan
237 * can find an old commit entry and assume it is the current
238 * one, travelling back in time.
239 */
240 do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
241 }
242
243 return 0;
244 }
245
246 static void bdev_sync(struct super_block *sb)
247 {
248 struct logfs_super *super = logfs_super(sb);
249
250 wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
251 }
252
253 static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
254 {
255 struct logfs_super *super = logfs_super(sb);
256 struct address_space *mapping = super->s_mapping_inode->i_mapping;
257 filler_t *filler = bdev_readpage;
258
259 *ofs = 0;
260 return read_cache_page(mapping, 0, filler, sb);
261 }
262
263 static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
264 {
265 struct logfs_super *super = logfs_super(sb);
266 struct address_space *mapping = super->s_mapping_inode->i_mapping;
267 filler_t *filler = bdev_readpage;
268 u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
269 pgoff_t index = pos >> PAGE_SHIFT;
270
271 *ofs = pos;
272 return read_cache_page(mapping, index, filler, sb);
273 }
274
275 static int bdev_write_sb(struct super_block *sb, struct page *page)
276 {
277 struct block_device *bdev = logfs_super(sb)->s_bdev;
278
279 /* Nothing special to do for block devices. */
280 return sync_request(page, bdev, WRITE);
281 }
282
283 static void bdev_put_device(struct logfs_super *s)
284 {
285 blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
286 }
287
288 static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
289 {
290 return 0;
291 }
292
293 static const struct logfs_device_ops bd_devops = {
294 .find_first_sb = bdev_find_first_sb,
295 .find_last_sb = bdev_find_last_sb,
296 .write_sb = bdev_write_sb,
297 .readpage = bdev_readpage,
298 .writeseg = bdev_writeseg,
299 .erase = bdev_erase,
300 .can_write_buf = bdev_can_write_buf,
301 .sync = bdev_sync,
302 .put_device = bdev_put_device,
303 };
304
305 int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
306 const char *devname)
307 {
308 struct block_device *bdev;
309
310 bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
311 type);
312 if (IS_ERR(bdev))
313 return PTR_ERR(bdev);
314
315 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
316 int mtdnr = MINOR(bdev->bd_dev);
317 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
318 return logfs_get_sb_mtd(p, mtdnr);
319 }
320
321 p->s_bdev = bdev;
322 p->s_mtd = NULL;
323 p->s_devops = &bd_devops;
324 return 0;
325 }