]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software | |
891ce207 | 3 | * Copyright (C) 2006 Red Hat GmbH |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
4cc96131 | 8 | #include "dm-core.h" |
952b3557 | 9 | |
586e80e6 | 10 | #include <linux/device-mapper.h> |
1da177e4 LT |
11 | |
12 | #include <linux/bio.h> | |
10f1d5d1 | 13 | #include <linux/completion.h> |
1da177e4 LT |
14 | #include <linux/mempool.h> |
15 | #include <linux/module.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/slab.h> | |
a765e20e | 18 | #include <linux/dm-io.h> |
1da177e4 | 19 | |
f1e53987 MP |
20 | #define DM_MSG_PREFIX "io" |
21 | ||
22 | #define DM_IO_MAX_REGIONS BITS_PER_LONG | |
23 | ||
891ce207 HM |
24 | struct dm_io_client { |
25 | mempool_t *pool; | |
26 | struct bio_set *bios; | |
27 | }; | |
28 | ||
f1e53987 MP |
29 | /* |
30 | * Aligning 'struct io' reduces the number of bits required to store | |
31 | * its address. Refer to store_io_and_region_in_bio() below. | |
32 | */ | |
1da177e4 | 33 | struct io { |
e01fd7ee | 34 | unsigned long error_bits; |
1da177e4 | 35 | atomic_t count; |
891ce207 | 36 | struct dm_io_client *client; |
1da177e4 LT |
37 | io_notify_fn callback; |
38 | void *context; | |
bb91bc7b MP |
39 | void *vma_invalidate_address; |
40 | unsigned long vma_invalidate_size; | |
f1e53987 | 41 | } __attribute__((aligned(DM_IO_MAX_REGIONS))); |
1da177e4 | 42 | |
952b3557 MP |
43 | static struct kmem_cache *_dm_io_cache; |
44 | ||
c8b03afe HM |
45 | /* |
46 | * Create a client with mempool and bioset. | |
47 | */ | |
bda8efec | 48 | struct dm_io_client *dm_io_client_create(void) |
c8b03afe | 49 | { |
c8b03afe | 50 | struct dm_io_client *client; |
e8603136 | 51 | unsigned min_ios = dm_get_reserved_bio_based_ios(); |
c8b03afe HM |
52 | |
53 | client = kmalloc(sizeof(*client), GFP_KERNEL); | |
54 | if (!client) | |
55 | return ERR_PTR(-ENOMEM); | |
56 | ||
e8603136 | 57 | client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); |
c8b03afe HM |
58 | if (!client->pool) |
59 | goto bad; | |
60 | ||
e8603136 | 61 | client->bios = bioset_create(min_ios, 0); |
c8b03afe HM |
62 | if (!client->bios) |
63 | goto bad; | |
64 | ||
65 | return client; | |
66 | ||
67 | bad: | |
6f65985e | 68 | mempool_destroy(client->pool); |
c8b03afe HM |
69 | kfree(client); |
70 | return ERR_PTR(-ENOMEM); | |
71 | } | |
72 | EXPORT_SYMBOL(dm_io_client_create); | |
73 | ||
c8b03afe HM |
74 | void dm_io_client_destroy(struct dm_io_client *client) |
75 | { | |
76 | mempool_destroy(client->pool); | |
77 | bioset_free(client->bios); | |
78 | kfree(client); | |
79 | } | |
80 | EXPORT_SYMBOL(dm_io_client_destroy); | |
81 | ||
1da177e4 LT |
82 | /*----------------------------------------------------------------- |
83 | * We need to keep track of which region a bio is doing io for. | |
f1e53987 MP |
84 | * To avoid a memory allocation to store just 5 or 6 bits, we |
85 | * ensure the 'struct io' pointer is aligned so enough low bits are | |
86 | * always zero and then combine it with the region number directly in | |
87 | * bi_private. | |
1da177e4 | 88 | *---------------------------------------------------------------*/ |
f1e53987 MP |
89 | static void store_io_and_region_in_bio(struct bio *bio, struct io *io, |
90 | unsigned region) | |
1da177e4 | 91 | { |
f1e53987 MP |
92 | if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { |
93 | DMCRIT("Unaligned struct io pointer %p", io); | |
94 | BUG(); | |
95 | } | |
96 | ||
97 | bio->bi_private = (void *)((unsigned long)io | region); | |
1da177e4 LT |
98 | } |
99 | ||
f1e53987 MP |
100 | static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, |
101 | unsigned *region) | |
1da177e4 | 102 | { |
f1e53987 MP |
103 | unsigned long val = (unsigned long)bio->bi_private; |
104 | ||
105 | *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); | |
106 | *region = val & (DM_IO_MAX_REGIONS - 1); | |
1da177e4 LT |
107 | } |
108 | ||
109 | /*----------------------------------------------------------------- | |
110 | * We need an io object to keep track of the number of bios that | |
111 | * have been dispatched for a particular io. | |
112 | *---------------------------------------------------------------*/ | |
97e7cdf1 | 113 | static void complete_io(struct io *io) |
1da177e4 | 114 | { |
97e7cdf1 JT |
115 | unsigned long error_bits = io->error_bits; |
116 | io_notify_fn fn = io->callback; | |
117 | void *context = io->context; | |
1da177e4 | 118 | |
97e7cdf1 JT |
119 | if (io->vma_invalidate_size) |
120 | invalidate_kernel_vmap_range(io->vma_invalidate_address, | |
121 | io->vma_invalidate_size); | |
bb91bc7b | 122 | |
97e7cdf1 JT |
123 | mempool_free(io, io->client->pool); |
124 | fn(error_bits, context); | |
125 | } | |
1da177e4 | 126 | |
4e4cbee9 | 127 | static void dec_count(struct io *io, unsigned int region, blk_status_t error) |
97e7cdf1 JT |
128 | { |
129 | if (error) | |
130 | set_bit(region, &io->error_bits); | |
1da177e4 | 131 | |
97e7cdf1 JT |
132 | if (atomic_dec_and_test(&io->count)) |
133 | complete_io(io); | |
1da177e4 LT |
134 | } |
135 | ||
4246a0b6 | 136 | static void endio(struct bio *bio) |
1da177e4 | 137 | { |
c897feb3 HM |
138 | struct io *io; |
139 | unsigned region; | |
4e4cbee9 | 140 | blk_status_t error; |
1da177e4 | 141 | |
4e4cbee9 | 142 | if (bio->bi_status && bio_data_dir(bio) == READ) |
1da177e4 LT |
143 | zero_fill_bio(bio); |
144 | ||
c897feb3 HM |
145 | /* |
146 | * The bio destructor in bio_put() may use the io object. | |
147 | */ | |
f1e53987 | 148 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
c897feb3 | 149 | |
4e4cbee9 | 150 | error = bio->bi_status; |
1da177e4 LT |
151 | bio_put(bio); |
152 | ||
9b81c842 | 153 | dec_count(io, region, error); |
1da177e4 LT |
154 | } |
155 | ||
156 | /*----------------------------------------------------------------- | |
157 | * These little objects provide an abstraction for getting a new | |
158 | * destination page for io. | |
159 | *---------------------------------------------------------------*/ | |
160 | struct dpages { | |
161 | void (*get_page)(struct dpages *dp, | |
162 | struct page **p, unsigned long *len, unsigned *offset); | |
163 | void (*next_page)(struct dpages *dp); | |
164 | ||
cacc7b05 ML |
165 | union { |
166 | unsigned context_u; | |
167 | struct bvec_iter context_bi; | |
168 | }; | |
1da177e4 | 169 | void *context_ptr; |
bb91bc7b MP |
170 | |
171 | void *vma_invalidate_address; | |
172 | unsigned long vma_invalidate_size; | |
1da177e4 LT |
173 | }; |
174 | ||
175 | /* | |
176 | * Functions for getting the pages from a list. | |
177 | */ | |
178 | static void list_get_page(struct dpages *dp, | |
179 | struct page **p, unsigned long *len, unsigned *offset) | |
180 | { | |
181 | unsigned o = dp->context_u; | |
182 | struct page_list *pl = (struct page_list *) dp->context_ptr; | |
183 | ||
184 | *p = pl->page; | |
185 | *len = PAGE_SIZE - o; | |
186 | *offset = o; | |
187 | } | |
188 | ||
189 | static void list_next_page(struct dpages *dp) | |
190 | { | |
191 | struct page_list *pl = (struct page_list *) dp->context_ptr; | |
192 | dp->context_ptr = pl->next; | |
193 | dp->context_u = 0; | |
194 | } | |
195 | ||
196 | static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) | |
197 | { | |
198 | dp->get_page = list_get_page; | |
199 | dp->next_page = list_next_page; | |
200 | dp->context_u = offset; | |
201 | dp->context_ptr = pl; | |
202 | } | |
203 | ||
204 | /* | |
205 | * Functions for getting the pages from a bvec. | |
206 | */ | |
d73f9907 MP |
207 | static void bio_get_page(struct dpages *dp, struct page **p, |
208 | unsigned long *len, unsigned *offset) | |
1da177e4 | 209 | { |
cacc7b05 ML |
210 | struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr, |
211 | dp->context_bi); | |
212 | ||
213 | *p = bvec.bv_page; | |
214 | *len = bvec.bv_len; | |
215 | *offset = bvec.bv_offset; | |
216 | ||
217 | /* avoid figuring it out again in bio_next_page() */ | |
218 | dp->context_bi.bi_sector = (sector_t)bvec.bv_len; | |
1da177e4 LT |
219 | } |
220 | ||
003b5c57 | 221 | static void bio_next_page(struct dpages *dp) |
1da177e4 | 222 | { |
cacc7b05 ML |
223 | unsigned int len = (unsigned int)dp->context_bi.bi_sector; |
224 | ||
225 | bvec_iter_advance((struct bio_vec *)dp->context_ptr, | |
226 | &dp->context_bi, len); | |
1da177e4 LT |
227 | } |
228 | ||
003b5c57 | 229 | static void bio_dp_init(struct dpages *dp, struct bio *bio) |
1da177e4 | 230 | { |
003b5c57 KO |
231 | dp->get_page = bio_get_page; |
232 | dp->next_page = bio_next_page; | |
cacc7b05 ML |
233 | |
234 | /* | |
235 | * We just use bvec iterator to retrieve pages, so it is ok to | |
236 | * access the bvec table directly here | |
237 | */ | |
238 | dp->context_ptr = bio->bi_io_vec; | |
239 | dp->context_bi = bio->bi_iter; | |
1da177e4 LT |
240 | } |
241 | ||
c8b03afe HM |
242 | /* |
243 | * Functions for getting the pages from a VMA. | |
244 | */ | |
1da177e4 LT |
245 | static void vm_get_page(struct dpages *dp, |
246 | struct page **p, unsigned long *len, unsigned *offset) | |
247 | { | |
248 | *p = vmalloc_to_page(dp->context_ptr); | |
249 | *offset = dp->context_u; | |
250 | *len = PAGE_SIZE - dp->context_u; | |
251 | } | |
252 | ||
253 | static void vm_next_page(struct dpages *dp) | |
254 | { | |
255 | dp->context_ptr += PAGE_SIZE - dp->context_u; | |
256 | dp->context_u = 0; | |
257 | } | |
258 | ||
259 | static void vm_dp_init(struct dpages *dp, void *data) | |
260 | { | |
261 | dp->get_page = vm_get_page; | |
262 | dp->next_page = vm_next_page; | |
93bbf583 | 263 | dp->context_u = offset_in_page(data); |
1da177e4 LT |
264 | dp->context_ptr = data; |
265 | } | |
266 | ||
c8b03afe HM |
267 | /* |
268 | * Functions for getting the pages from kernel memory. | |
269 | */ | |
270 | static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, | |
271 | unsigned *offset) | |
272 | { | |
273 | *p = virt_to_page(dp->context_ptr); | |
274 | *offset = dp->context_u; | |
275 | *len = PAGE_SIZE - dp->context_u; | |
276 | } | |
277 | ||
278 | static void km_next_page(struct dpages *dp) | |
279 | { | |
280 | dp->context_ptr += PAGE_SIZE - dp->context_u; | |
281 | dp->context_u = 0; | |
282 | } | |
283 | ||
284 | static void km_dp_init(struct dpages *dp, void *data) | |
285 | { | |
286 | dp->get_page = km_get_page; | |
287 | dp->next_page = km_next_page; | |
93bbf583 | 288 | dp->context_u = offset_in_page(data); |
c8b03afe HM |
289 | dp->context_ptr = data; |
290 | } | |
291 | ||
1da177e4 LT |
292 | /*----------------------------------------------------------------- |
293 | * IO routines that accept a list of pages. | |
294 | *---------------------------------------------------------------*/ | |
e6047149 MC |
295 | static void do_region(int op, int op_flags, unsigned region, |
296 | struct dm_io_region *where, struct dpages *dp, | |
297 | struct io *io) | |
1da177e4 LT |
298 | { |
299 | struct bio *bio; | |
300 | struct page *page; | |
301 | unsigned long len; | |
302 | unsigned offset; | |
303 | unsigned num_bvecs; | |
304 | sector_t remaining = where->count; | |
0c535e0d | 305 | struct request_queue *q = bdev_get_queue(where->bdev); |
70d6c400 MS |
306 | unsigned short logical_block_size = queue_logical_block_size(q); |
307 | sector_t num_sectors; | |
e5db2980 | 308 | unsigned int uninitialized_var(special_cmd_max_sectors); |
1da177e4 | 309 | |
e5db2980 DW |
310 | /* |
311 | * Reject unsupported discard and write same requests. | |
312 | */ | |
e6047149 | 313 | if (op == REQ_OP_DISCARD) |
e5db2980 | 314 | special_cmd_max_sectors = q->limits.max_discard_sectors; |
ac62d620 CH |
315 | else if (op == REQ_OP_WRITE_ZEROES) |
316 | special_cmd_max_sectors = q->limits.max_write_zeroes_sectors; | |
e6047149 | 317 | else if (op == REQ_OP_WRITE_SAME) |
e5db2980 | 318 | special_cmd_max_sectors = q->limits.max_write_same_sectors; |
ac62d620 CH |
319 | if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || |
320 | op == REQ_OP_WRITE_SAME) && | |
e6047149 | 321 | special_cmd_max_sectors == 0) { |
4e4cbee9 | 322 | dec_count(io, region, BLK_STS_NOTSUPP); |
37527b86 DW |
323 | return; |
324 | } | |
325 | ||
12fc0f49 | 326 | /* |
e6047149 | 327 | * where->count may be zero if op holds a flush and we need to |
d87f4c14 | 328 | * send a zero-sized flush. |
12fc0f49 MP |
329 | */ |
330 | do { | |
1da177e4 | 331 | /* |
f1e53987 | 332 | * Allocate a suitably sized-bio. |
1da177e4 | 333 | */ |
0f5d690f CH |
334 | switch (op) { |
335 | case REQ_OP_DISCARD: | |
ac62d620 | 336 | case REQ_OP_WRITE_ZEROES: |
0f5d690f CH |
337 | num_bvecs = 0; |
338 | break; | |
339 | case REQ_OP_WRITE_SAME: | |
0c535e0d | 340 | num_bvecs = 1; |
0f5d690f CH |
341 | break; |
342 | default: | |
b54ffb73 | 343 | num_bvecs = min_t(int, BIO_MAX_PAGES, |
0c535e0d | 344 | dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); |
0f5d690f | 345 | } |
0c535e0d | 346 | |
bf17ce3a | 347 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); |
4f024f37 | 348 | bio->bi_iter.bi_sector = where->sector + (where->count - remaining); |
1da177e4 LT |
349 | bio->bi_bdev = where->bdev; |
350 | bio->bi_end_io = endio; | |
e6047149 | 351 | bio_set_op_attrs(bio, op, op_flags); |
f1e53987 | 352 | store_io_and_region_in_bio(bio, io, region); |
1da177e4 | 353 | |
ac62d620 | 354 | if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) { |
e5db2980 | 355 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
4f024f37 | 356 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
70d6c400 | 357 | remaining -= num_sectors; |
e6047149 | 358 | } else if (op == REQ_OP_WRITE_SAME) { |
70d6c400 MS |
359 | /* |
360 | * WRITE SAME only uses a single page. | |
361 | */ | |
362 | dp->get_page(dp, &page, &len, &offset); | |
363 | bio_add_page(bio, page, logical_block_size, offset); | |
e5db2980 | 364 | num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); |
4f024f37 | 365 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
70d6c400 MS |
366 | |
367 | offset = 0; | |
368 | remaining -= num_sectors; | |
369 | dp->next_page(dp); | |
0c535e0d MB |
370 | } else while (remaining) { |
371 | /* | |
372 | * Try and add as many pages as possible. | |
373 | */ | |
1da177e4 LT |
374 | dp->get_page(dp, &page, &len, &offset); |
375 | len = min(len, to_bytes(remaining)); | |
376 | if (!bio_add_page(bio, page, len, offset)) | |
377 | break; | |
378 | ||
379 | offset = 0; | |
380 | remaining -= to_sector(len); | |
381 | dp->next_page(dp); | |
382 | } | |
383 | ||
384 | atomic_inc(&io->count); | |
4e49ea4a | 385 | submit_bio(bio); |
12fc0f49 | 386 | } while (remaining); |
1da177e4 LT |
387 | } |
388 | ||
e6047149 | 389 | static void dispatch_io(int op, int op_flags, unsigned int num_regions, |
22a1ceb1 | 390 | struct dm_io_region *where, struct dpages *dp, |
1da177e4 LT |
391 | struct io *io, int sync) |
392 | { | |
393 | int i; | |
394 | struct dpages old_pages = *dp; | |
395 | ||
f1e53987 MP |
396 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
397 | ||
1da177e4 | 398 | if (sync) |
e6047149 | 399 | op_flags |= REQ_SYNC; |
1da177e4 LT |
400 | |
401 | /* | |
402 | * For multiple regions we need to be careful to rewind | |
403 | * the dp object for each call to do_region. | |
404 | */ | |
405 | for (i = 0; i < num_regions; i++) { | |
406 | *dp = old_pages; | |
28a8f0d3 | 407 | if (where[i].count || (op_flags & REQ_PREFLUSH)) |
e6047149 | 408 | do_region(op, op_flags, i, where + i, dp, io); |
1da177e4 LT |
409 | } |
410 | ||
411 | /* | |
f00b16ad | 412 | * Drop the extra reference that we were holding to avoid |
1da177e4 LT |
413 | * the io being completed too early. |
414 | */ | |
415 | dec_count(io, 0, 0); | |
416 | } | |
417 | ||
97e7cdf1 JT |
418 | struct sync_io { |
419 | unsigned long error_bits; | |
420 | struct completion wait; | |
421 | }; | |
422 | ||
423 | static void sync_io_complete(unsigned long error, void *context) | |
424 | { | |
425 | struct sync_io *sio = context; | |
426 | ||
427 | sio->error_bits = error; | |
428 | complete(&sio->wait); | |
429 | } | |
430 | ||
891ce207 | 431 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
e6047149 MC |
432 | struct dm_io_region *where, int op, int op_flags, |
433 | struct dpages *dp, unsigned long *error_bits) | |
1da177e4 | 434 | { |
97e7cdf1 JT |
435 | struct io *io; |
436 | struct sync_io sio; | |
1da177e4 | 437 | |
e6047149 | 438 | if (num_regions > 1 && !op_is_write(op)) { |
1da177e4 LT |
439 | WARN_ON(1); |
440 | return -EIO; | |
441 | } | |
442 | ||
97e7cdf1 JT |
443 | init_completion(&sio.wait); |
444 | ||
445 | io = mempool_alloc(client->pool, GFP_NOIO); | |
f1e53987 | 446 | io->error_bits = 0; |
f1e53987 | 447 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
f1e53987 | 448 | io->client = client; |
97e7cdf1 JT |
449 | io->callback = sync_io_complete; |
450 | io->context = &sio; | |
1da177e4 | 451 | |
bb91bc7b MP |
452 | io->vma_invalidate_address = dp->vma_invalidate_address; |
453 | io->vma_invalidate_size = dp->vma_invalidate_size; | |
454 | ||
e6047149 | 455 | dispatch_io(op, op_flags, num_regions, where, dp, io, 1); |
1da177e4 | 456 | |
97e7cdf1 | 457 | wait_for_completion_io(&sio.wait); |
1da177e4 | 458 | |
891ce207 | 459 | if (error_bits) |
97e7cdf1 | 460 | *error_bits = sio.error_bits; |
891ce207 | 461 | |
97e7cdf1 | 462 | return sio.error_bits ? -EIO : 0; |
1da177e4 LT |
463 | } |
464 | ||
891ce207 | 465 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
e6047149 MC |
466 | struct dm_io_region *where, int op, int op_flags, |
467 | struct dpages *dp, io_notify_fn fn, void *context) | |
1da177e4 LT |
468 | { |
469 | struct io *io; | |
470 | ||
e6047149 | 471 | if (num_regions > 1 && !op_is_write(op)) { |
1da177e4 LT |
472 | WARN_ON(1); |
473 | fn(1, context); | |
474 | return -EIO; | |
475 | } | |
476 | ||
bf17ce3a | 477 | io = mempool_alloc(client->pool, GFP_NOIO); |
e01fd7ee | 478 | io->error_bits = 0; |
1da177e4 | 479 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
891ce207 | 480 | io->client = client; |
1da177e4 LT |
481 | io->callback = fn; |
482 | io->context = context; | |
483 | ||
bb91bc7b MP |
484 | io->vma_invalidate_address = dp->vma_invalidate_address; |
485 | io->vma_invalidate_size = dp->vma_invalidate_size; | |
486 | ||
e6047149 | 487 | dispatch_io(op, op_flags, num_regions, where, dp, io, 0); |
1da177e4 LT |
488 | return 0; |
489 | } | |
490 | ||
bb91bc7b MP |
491 | static int dp_init(struct dm_io_request *io_req, struct dpages *dp, |
492 | unsigned long size) | |
c8b03afe HM |
493 | { |
494 | /* Set up dpages based on memory type */ | |
bb91bc7b MP |
495 | |
496 | dp->vma_invalidate_address = NULL; | |
497 | dp->vma_invalidate_size = 0; | |
498 | ||
c8b03afe HM |
499 | switch (io_req->mem.type) { |
500 | case DM_IO_PAGE_LIST: | |
501 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); | |
502 | break; | |
503 | ||
003b5c57 KO |
504 | case DM_IO_BIO: |
505 | bio_dp_init(dp, io_req->mem.ptr.bio); | |
c8b03afe HM |
506 | break; |
507 | ||
508 | case DM_IO_VMA: | |
bb91bc7b | 509 | flush_kernel_vmap_range(io_req->mem.ptr.vma, size); |
e6047149 | 510 | if (io_req->bi_op == REQ_OP_READ) { |
bb91bc7b MP |
511 | dp->vma_invalidate_address = io_req->mem.ptr.vma; |
512 | dp->vma_invalidate_size = size; | |
513 | } | |
c8b03afe HM |
514 | vm_dp_init(dp, io_req->mem.ptr.vma); |
515 | break; | |
516 | ||
517 | case DM_IO_KMEM: | |
518 | km_dp_init(dp, io_req->mem.ptr.addr); | |
519 | break; | |
520 | ||
521 | default: | |
522 | return -EINVAL; | |
523 | } | |
524 | ||
525 | return 0; | |
526 | } | |
527 | ||
528 | /* | |
7ff14a36 MP |
529 | * New collapsed (a)synchronous interface. |
530 | * | |
531 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug | |
1eff9d32 JA |
532 | * the queue with blk_unplug() some time later or set REQ_SYNC in |
533 | * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to | |
534 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. | |
c8b03afe HM |
535 | */ |
536 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, | |
22a1ceb1 | 537 | struct dm_io_region *where, unsigned long *sync_error_bits) |
c8b03afe HM |
538 | { |
539 | int r; | |
540 | struct dpages dp; | |
541 | ||
bb91bc7b | 542 | r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); |
c8b03afe HM |
543 | if (r) |
544 | return r; | |
545 | ||
546 | if (!io_req->notify.fn) | |
547 | return sync_io(io_req->client, num_regions, where, | |
e6047149 MC |
548 | io_req->bi_op, io_req->bi_op_flags, &dp, |
549 | sync_error_bits); | |
c8b03afe | 550 | |
e6047149 MC |
551 | return async_io(io_req->client, num_regions, where, io_req->bi_op, |
552 | io_req->bi_op_flags, &dp, io_req->notify.fn, | |
553 | io_req->notify.context); | |
c8b03afe HM |
554 | } |
555 | EXPORT_SYMBOL(dm_io); | |
952b3557 MP |
556 | |
557 | int __init dm_io_init(void) | |
558 | { | |
559 | _dm_io_cache = KMEM_CACHE(io, 0); | |
560 | if (!_dm_io_cache) | |
561 | return -ENOMEM; | |
562 | ||
563 | return 0; | |
564 | } | |
565 | ||
566 | void dm_io_exit(void) | |
567 | { | |
568 | kmem_cache_destroy(_dm_io_cache); | |
569 | _dm_io_cache = NULL; | |
570 | } |