]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/md/dm-io.c
tracing: trace_output.c, fix false positive compiler warning
[mirror_ubuntu-artful-kernel.git] / drivers / md / dm-io.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software
891ce207 3 * Copyright (C) 2006 Red Hat GmbH
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
586e80e6 8#include <linux/device-mapper.h>
1da177e4
LT
9
10#include <linux/bio.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
a765e20e 15#include <linux/dm-io.h>
1da177e4 16
891ce207
HM
17struct dm_io_client {
18 mempool_t *pool;
19 struct bio_set *bios;
20};
21
1da177e4
LT
22/* FIXME: can we shrink this ? */
23struct io {
e01fd7ee 24 unsigned long error_bits;
1da177e4
LT
25 atomic_t count;
26 struct task_struct *sleeper;
891ce207 27 struct dm_io_client *client;
1da177e4
LT
28 io_notify_fn callback;
29 void *context;
30};
31
32/*
33 * io contexts are only dynamically allocated for asynchronous
34 * io. Since async io is likely to be the majority of io we'll
891ce207 35 * have the same number of io contexts as bios! (FIXME: must reduce this).
1da177e4 36 */
891ce207 37
1da177e4
LT
38static unsigned int pages_to_ios(unsigned int pages)
39{
40 return 4 * pages; /* too many ? */
41}
42
c8b03afe
HM
43/*
44 * Create a client with mempool and bioset.
45 */
46struct dm_io_client *dm_io_client_create(unsigned num_pages)
47{
48 unsigned ios = pages_to_ios(num_pages);
49 struct dm_io_client *client;
50
51 client = kmalloc(sizeof(*client), GFP_KERNEL);
52 if (!client)
53 return ERR_PTR(-ENOMEM);
54
55 client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
56 if (!client->pool)
57 goto bad;
58
bb799ca0 59 client->bios = bioset_create(16, 0);
c8b03afe
HM
60 if (!client->bios)
61 goto bad;
62
63 return client;
64
65 bad:
66 if (client->pool)
67 mempool_destroy(client->pool);
68 kfree(client);
69 return ERR_PTR(-ENOMEM);
70}
71EXPORT_SYMBOL(dm_io_client_create);
72
73int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
74{
75 return mempool_resize(client->pool, pages_to_ios(num_pages),
76 GFP_KERNEL);
77}
78EXPORT_SYMBOL(dm_io_client_resize);
79
80void dm_io_client_destroy(struct dm_io_client *client)
81{
82 mempool_destroy(client->pool);
83 bioset_free(client->bios);
84 kfree(client);
85}
86EXPORT_SYMBOL(dm_io_client_destroy);
87
1da177e4
LT
88/*-----------------------------------------------------------------
89 * We need to keep track of which region a bio is doing io for.
90 * In order to save a memory allocation we store this the last
91 * bvec which we know is unused (blech).
92 * XXX This is ugly and can OOPS with some configs... find another way.
93 *---------------------------------------------------------------*/
94static inline void bio_set_region(struct bio *bio, unsigned region)
95{
f00b16ad 96 bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
1da177e4
LT
97}
98
99static inline unsigned bio_get_region(struct bio *bio)
100{
f00b16ad 101 return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
1da177e4
LT
102}
103
104/*-----------------------------------------------------------------
105 * We need an io object to keep track of the number of bios that
106 * have been dispatched for a particular io.
107 *---------------------------------------------------------------*/
108static void dec_count(struct io *io, unsigned int region, int error)
109{
110 if (error)
e01fd7ee 111 set_bit(region, &io->error_bits);
1da177e4
LT
112
113 if (atomic_dec_and_test(&io->count)) {
114 if (io->sleeper)
115 wake_up_process(io->sleeper);
116
117 else {
e01fd7ee 118 unsigned long r = io->error_bits;
1da177e4
LT
119 io_notify_fn fn = io->callback;
120 void *context = io->context;
121
bf17ce3a 122 mempool_free(io, io->client->pool);
1da177e4
LT
123 fn(r, context);
124 }
125 }
126}
127
6712ecf8 128static void endio(struct bio *bio, int error)
1da177e4 129{
c897feb3
HM
130 struct io *io;
131 unsigned region;
1da177e4 132
1da177e4
LT
133 if (error && bio_data_dir(bio) == READ)
134 zero_fill_bio(bio);
135
c897feb3
HM
136 /*
137 * The bio destructor in bio_put() may use the io object.
138 */
139 io = bio->bi_private;
140 region = bio_get_region(bio);
141
f00b16ad 142 bio->bi_max_vecs++;
1da177e4
LT
143 bio_put(bio);
144
c897feb3 145 dec_count(io, region, error);
1da177e4
LT
146}
147
148/*-----------------------------------------------------------------
149 * These little objects provide an abstraction for getting a new
150 * destination page for io.
151 *---------------------------------------------------------------*/
152struct dpages {
153 void (*get_page)(struct dpages *dp,
154 struct page **p, unsigned long *len, unsigned *offset);
155 void (*next_page)(struct dpages *dp);
156
157 unsigned context_u;
158 void *context_ptr;
159};
160
161/*
162 * Functions for getting the pages from a list.
163 */
164static void list_get_page(struct dpages *dp,
165 struct page **p, unsigned long *len, unsigned *offset)
166{
167 unsigned o = dp->context_u;
168 struct page_list *pl = (struct page_list *) dp->context_ptr;
169
170 *p = pl->page;
171 *len = PAGE_SIZE - o;
172 *offset = o;
173}
174
175static void list_next_page(struct dpages *dp)
176{
177 struct page_list *pl = (struct page_list *) dp->context_ptr;
178 dp->context_ptr = pl->next;
179 dp->context_u = 0;
180}
181
182static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
183{
184 dp->get_page = list_get_page;
185 dp->next_page = list_next_page;
186 dp->context_u = offset;
187 dp->context_ptr = pl;
188}
189
190/*
191 * Functions for getting the pages from a bvec.
192 */
193static void bvec_get_page(struct dpages *dp,
194 struct page **p, unsigned long *len, unsigned *offset)
195{
196 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
197 *p = bvec->bv_page;
198 *len = bvec->bv_len;
199 *offset = bvec->bv_offset;
200}
201
202static void bvec_next_page(struct dpages *dp)
203{
204 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
205 dp->context_ptr = bvec + 1;
206}
207
208static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
209{
210 dp->get_page = bvec_get_page;
211 dp->next_page = bvec_next_page;
212 dp->context_ptr = bvec;
213}
214
c8b03afe
HM
215/*
216 * Functions for getting the pages from a VMA.
217 */
1da177e4
LT
218static void vm_get_page(struct dpages *dp,
219 struct page **p, unsigned long *len, unsigned *offset)
220{
221 *p = vmalloc_to_page(dp->context_ptr);
222 *offset = dp->context_u;
223 *len = PAGE_SIZE - dp->context_u;
224}
225
226static void vm_next_page(struct dpages *dp)
227{
228 dp->context_ptr += PAGE_SIZE - dp->context_u;
229 dp->context_u = 0;
230}
231
232static void vm_dp_init(struct dpages *dp, void *data)
233{
234 dp->get_page = vm_get_page;
235 dp->next_page = vm_next_page;
236 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
237 dp->context_ptr = data;
238}
239
3676347a
PO
240static void dm_bio_destructor(struct bio *bio)
241{
891ce207
HM
242 struct io *io = bio->bi_private;
243
bf17ce3a 244 bio_free(bio, io->client->bios);
3676347a
PO
245}
246
c8b03afe
HM
247/*
248 * Functions for getting the pages from kernel memory.
249 */
250static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
251 unsigned *offset)
252{
253 *p = virt_to_page(dp->context_ptr);
254 *offset = dp->context_u;
255 *len = PAGE_SIZE - dp->context_u;
256}
257
258static void km_next_page(struct dpages *dp)
259{
260 dp->context_ptr += PAGE_SIZE - dp->context_u;
261 dp->context_u = 0;
262}
263
264static void km_dp_init(struct dpages *dp, void *data)
265{
266 dp->get_page = km_get_page;
267 dp->next_page = km_next_page;
268 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
269 dp->context_ptr = data;
270}
271
1da177e4
LT
272/*-----------------------------------------------------------------
273 * IO routines that accept a list of pages.
274 *---------------------------------------------------------------*/
22a1ceb1 275static void do_region(int rw, unsigned region, struct dm_io_region *where,
1da177e4
LT
276 struct dpages *dp, struct io *io)
277{
278 struct bio *bio;
279 struct page *page;
280 unsigned long len;
281 unsigned offset;
282 unsigned num_bvecs;
283 sector_t remaining = where->count;
284
285 while (remaining) {
286 /*
f00b16ad
HM
287 * Allocate a suitably sized-bio: we add an extra
288 * bvec for bio_get/set_region() and decrement bi_max_vecs
289 * to hide it from bio_add_page().
1da177e4 290 */
596f138e
JN
291 num_bvecs = dm_sector_div_up(remaining,
292 (PAGE_SIZE >> SECTOR_SHIFT));
293 num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
294 num_bvecs);
d659e6cc
MP
295 if (unlikely(num_bvecs > BIO_MAX_PAGES))
296 num_bvecs = BIO_MAX_PAGES;
bf17ce3a 297 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
1da177e4
LT
298 bio->bi_sector = where->sector + (where->count - remaining);
299 bio->bi_bdev = where->bdev;
300 bio->bi_end_io = endio;
301 bio->bi_private = io;
3676347a 302 bio->bi_destructor = dm_bio_destructor;
f00b16ad 303 bio->bi_max_vecs--;
1da177e4
LT
304 bio_set_region(bio, region);
305
306 /*
307 * Try and add as many pages as possible.
308 */
309 while (remaining) {
310 dp->get_page(dp, &page, &len, &offset);
311 len = min(len, to_bytes(remaining));
312 if (!bio_add_page(bio, page, len, offset))
313 break;
314
315 offset = 0;
316 remaining -= to_sector(len);
317 dp->next_page(dp);
318 }
319
320 atomic_inc(&io->count);
321 submit_bio(rw, bio);
322 }
323}
324
325static void dispatch_io(int rw, unsigned int num_regions,
22a1ceb1 326 struct dm_io_region *where, struct dpages *dp,
1da177e4
LT
327 struct io *io, int sync)
328{
329 int i;
330 struct dpages old_pages = *dp;
331
332 if (sync)
93dbb393 333 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
1da177e4
LT
334
335 /*
336 * For multiple regions we need to be careful to rewind
337 * the dp object for each call to do_region.
338 */
339 for (i = 0; i < num_regions; i++) {
340 *dp = old_pages;
341 if (where[i].count)
342 do_region(rw, i, where + i, dp, io);
343 }
344
345 /*
f00b16ad 346 * Drop the extra reference that we were holding to avoid
1da177e4
LT
347 * the io being completed too early.
348 */
349 dec_count(io, 0, 0);
350}
351
891ce207 352static int sync_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 353 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 354 unsigned long *error_bits)
1da177e4
LT
355{
356 struct io io;
357
7ff14a36 358 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
359 WARN_ON(1);
360 return -EIO;
361 }
362
e01fd7ee 363 io.error_bits = 0;
1da177e4
LT
364 atomic_set(&io.count, 1); /* see dispatch_io() */
365 io.sleeper = current;
891ce207 366 io.client = client;
1da177e4
LT
367
368 dispatch_io(rw, num_regions, where, dp, &io, 1);
369
370 while (1) {
371 set_current_state(TASK_UNINTERRUPTIBLE);
372
b64b6bf4 373 if (!atomic_read(&io.count))
1da177e4
LT
374 break;
375
376 io_schedule();
377 }
378 set_current_state(TASK_RUNNING);
379
891ce207 380 if (error_bits)
e01fd7ee 381 *error_bits = io.error_bits;
891ce207 382
e01fd7ee 383 return io.error_bits ? -EIO : 0;
1da177e4
LT
384}
385
891ce207 386static int async_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 387 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 388 io_notify_fn fn, void *context)
1da177e4
LT
389{
390 struct io *io;
391
7ff14a36 392 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
393 WARN_ON(1);
394 fn(1, context);
395 return -EIO;
396 }
397
bf17ce3a 398 io = mempool_alloc(client->pool, GFP_NOIO);
e01fd7ee 399 io->error_bits = 0;
1da177e4
LT
400 atomic_set(&io->count, 1); /* see dispatch_io() */
401 io->sleeper = NULL;
891ce207 402 io->client = client;
1da177e4
LT
403 io->callback = fn;
404 io->context = context;
405
406 dispatch_io(rw, num_regions, where, dp, io, 0);
407 return 0;
408}
409
c8b03afe
HM
410static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
411{
412 /* Set up dpages based on memory type */
413 switch (io_req->mem.type) {
414 case DM_IO_PAGE_LIST:
415 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
416 break;
417
418 case DM_IO_BVEC:
419 bvec_dp_init(dp, io_req->mem.ptr.bvec);
420 break;
421
422 case DM_IO_VMA:
423 vm_dp_init(dp, io_req->mem.ptr.vma);
424 break;
425
426 case DM_IO_KMEM:
427 km_dp_init(dp, io_req->mem.ptr.addr);
428 break;
429
430 default:
431 return -EINVAL;
432 }
433
434 return 0;
435}
436
437/*
7ff14a36
MP
438 * New collapsed (a)synchronous interface.
439 *
440 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
441 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
442 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
443 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
c8b03afe
HM
444 */
445int dm_io(struct dm_io_request *io_req, unsigned num_regions,
22a1ceb1 446 struct dm_io_region *where, unsigned long *sync_error_bits)
c8b03afe
HM
447{
448 int r;
449 struct dpages dp;
450
451 r = dp_init(io_req, &dp);
452 if (r)
453 return r;
454
455 if (!io_req->notify.fn)
456 return sync_io(io_req->client, num_regions, where,
457 io_req->bi_rw, &dp, sync_error_bits);
458
459 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
460 &dp, io_req->notify.fn, io_req->notify.context);
461}
462EXPORT_SYMBOL(dm_io);