]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/md/dm-io.c
selftests: check hot-pluggagble memory for memory-hotplug test
[mirror_ubuntu-zesty-kernel.git] / drivers / md / dm-io.c
1 /*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8 #include "dm-core.h"
9
10 #include <linux/device-mapper.h>
11
12 #include <linux/bio.h>
13 #include <linux/completion.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dm-io.h>
19
20 #define DM_MSG_PREFIX "io"
21
22 #define DM_IO_MAX_REGIONS BITS_PER_LONG
23
24 struct dm_io_client {
25 mempool_t *pool;
26 struct bio_set *bios;
27 };
28
29 /*
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
32 */
33 struct io {
34 unsigned long error_bits;
35 atomic_t count;
36 struct dm_io_client *client;
37 io_notify_fn callback;
38 void *context;
39 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
41 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
42
43 static struct kmem_cache *_dm_io_cache;
44
45 /*
46 * Create a client with mempool and bioset.
47 */
48 struct dm_io_client *dm_io_client_create(void)
49 {
50 struct dm_io_client *client;
51 unsigned min_ios = dm_get_reserved_bio_based_ios();
52
53 client = kmalloc(sizeof(*client), GFP_KERNEL);
54 if (!client)
55 return ERR_PTR(-ENOMEM);
56
57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
58 if (!client->pool)
59 goto bad;
60
61 client->bios = bioset_create(min_ios, 0);
62 if (!client->bios)
63 goto bad;
64
65 return client;
66
67 bad:
68 mempool_destroy(client->pool);
69 kfree(client);
70 return ERR_PTR(-ENOMEM);
71 }
72 EXPORT_SYMBOL(dm_io_client_create);
73
74 void dm_io_client_destroy(struct dm_io_client *client)
75 {
76 mempool_destroy(client->pool);
77 bioset_free(client->bios);
78 kfree(client);
79 }
80 EXPORT_SYMBOL(dm_io_client_destroy);
81
82 /*-----------------------------------------------------------------
83 * We need to keep track of which region a bio is doing io for.
84 * To avoid a memory allocation to store just 5 or 6 bits, we
85 * ensure the 'struct io' pointer is aligned so enough low bits are
86 * always zero and then combine it with the region number directly in
87 * bi_private.
88 *---------------------------------------------------------------*/
89 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
90 unsigned region)
91 {
92 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
93 DMCRIT("Unaligned struct io pointer %p", io);
94 BUG();
95 }
96
97 bio->bi_private = (void *)((unsigned long)io | region);
98 }
99
100 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
101 unsigned *region)
102 {
103 unsigned long val = (unsigned long)bio->bi_private;
104
105 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
106 *region = val & (DM_IO_MAX_REGIONS - 1);
107 }
108
109 /*-----------------------------------------------------------------
110 * We need an io object to keep track of the number of bios that
111 * have been dispatched for a particular io.
112 *---------------------------------------------------------------*/
113 static void complete_io(struct io *io)
114 {
115 unsigned long error_bits = io->error_bits;
116 io_notify_fn fn = io->callback;
117 void *context = io->context;
118
119 if (io->vma_invalidate_size)
120 invalidate_kernel_vmap_range(io->vma_invalidate_address,
121 io->vma_invalidate_size);
122
123 mempool_free(io, io->client->pool);
124 fn(error_bits, context);
125 }
126
127 static void dec_count(struct io *io, unsigned int region, int error)
128 {
129 if (error)
130 set_bit(region, &io->error_bits);
131
132 if (atomic_dec_and_test(&io->count))
133 complete_io(io);
134 }
135
136 static void endio(struct bio *bio)
137 {
138 struct io *io;
139 unsigned region;
140 int error;
141
142 if (bio->bi_error && bio_data_dir(bio) == READ)
143 zero_fill_bio(bio);
144
145 /*
146 * The bio destructor in bio_put() may use the io object.
147 */
148 retrieve_io_and_region_from_bio(bio, &io, &region);
149
150 error = bio->bi_error;
151 bio_put(bio);
152
153 dec_count(io, region, error);
154 }
155
156 /*-----------------------------------------------------------------
157 * These little objects provide an abstraction for getting a new
158 * destination page for io.
159 *---------------------------------------------------------------*/
160 struct dpages {
161 void (*get_page)(struct dpages *dp,
162 struct page **p, unsigned long *len, unsigned *offset);
163 void (*next_page)(struct dpages *dp);
164
165 union {
166 unsigned context_u;
167 struct bvec_iter context_bi;
168 };
169 void *context_ptr;
170
171 void *vma_invalidate_address;
172 unsigned long vma_invalidate_size;
173 };
174
175 /*
176 * Functions for getting the pages from a list.
177 */
178 static void list_get_page(struct dpages *dp,
179 struct page **p, unsigned long *len, unsigned *offset)
180 {
181 unsigned o = dp->context_u;
182 struct page_list *pl = (struct page_list *) dp->context_ptr;
183
184 *p = pl->page;
185 *len = PAGE_SIZE - o;
186 *offset = o;
187 }
188
189 static void list_next_page(struct dpages *dp)
190 {
191 struct page_list *pl = (struct page_list *) dp->context_ptr;
192 dp->context_ptr = pl->next;
193 dp->context_u = 0;
194 }
195
196 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
197 {
198 dp->get_page = list_get_page;
199 dp->next_page = list_next_page;
200 dp->context_u = offset;
201 dp->context_ptr = pl;
202 }
203
204 /*
205 * Functions for getting the pages from a bvec.
206 */
207 static void bio_get_page(struct dpages *dp, struct page **p,
208 unsigned long *len, unsigned *offset)
209 {
210 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
211 dp->context_bi);
212
213 *p = bvec.bv_page;
214 *len = bvec.bv_len;
215 *offset = bvec.bv_offset;
216
217 /* avoid figuring it out again in bio_next_page() */
218 dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
219 }
220
221 static void bio_next_page(struct dpages *dp)
222 {
223 unsigned int len = (unsigned int)dp->context_bi.bi_sector;
224
225 bvec_iter_advance((struct bio_vec *)dp->context_ptr,
226 &dp->context_bi, len);
227 }
228
229 static void bio_dp_init(struct dpages *dp, struct bio *bio)
230 {
231 dp->get_page = bio_get_page;
232 dp->next_page = bio_next_page;
233
234 /*
235 * We just use bvec iterator to retrieve pages, so it is ok to
236 * access the bvec table directly here
237 */
238 dp->context_ptr = bio->bi_io_vec;
239 dp->context_bi = bio->bi_iter;
240 }
241
242 /*
243 * Functions for getting the pages from a VMA.
244 */
245 static void vm_get_page(struct dpages *dp,
246 struct page **p, unsigned long *len, unsigned *offset)
247 {
248 *p = vmalloc_to_page(dp->context_ptr);
249 *offset = dp->context_u;
250 *len = PAGE_SIZE - dp->context_u;
251 }
252
253 static void vm_next_page(struct dpages *dp)
254 {
255 dp->context_ptr += PAGE_SIZE - dp->context_u;
256 dp->context_u = 0;
257 }
258
259 static void vm_dp_init(struct dpages *dp, void *data)
260 {
261 dp->get_page = vm_get_page;
262 dp->next_page = vm_next_page;
263 dp->context_u = offset_in_page(data);
264 dp->context_ptr = data;
265 }
266
267 /*
268 * Functions for getting the pages from kernel memory.
269 */
270 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
271 unsigned *offset)
272 {
273 *p = virt_to_page(dp->context_ptr);
274 *offset = dp->context_u;
275 *len = PAGE_SIZE - dp->context_u;
276 }
277
278 static void km_next_page(struct dpages *dp)
279 {
280 dp->context_ptr += PAGE_SIZE - dp->context_u;
281 dp->context_u = 0;
282 }
283
284 static void km_dp_init(struct dpages *dp, void *data)
285 {
286 dp->get_page = km_get_page;
287 dp->next_page = km_next_page;
288 dp->context_u = offset_in_page(data);
289 dp->context_ptr = data;
290 }
291
292 /*-----------------------------------------------------------------
293 * IO routines that accept a list of pages.
294 *---------------------------------------------------------------*/
295 static void do_region(int op, int op_flags, unsigned region,
296 struct dm_io_region *where, struct dpages *dp,
297 struct io *io)
298 {
299 struct bio *bio;
300 struct page *page;
301 unsigned long len;
302 unsigned offset;
303 unsigned num_bvecs;
304 sector_t remaining = where->count;
305 struct request_queue *q = bdev_get_queue(where->bdev);
306 unsigned short logical_block_size = queue_logical_block_size(q);
307 sector_t num_sectors;
308 unsigned int uninitialized_var(special_cmd_max_sectors);
309
310 /*
311 * Reject unsupported discard and write same requests.
312 */
313 if (op == REQ_OP_DISCARD)
314 special_cmd_max_sectors = q->limits.max_discard_sectors;
315 else if (op == REQ_OP_WRITE_SAME)
316 special_cmd_max_sectors = q->limits.max_write_same_sectors;
317 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
318 special_cmd_max_sectors == 0) {
319 dec_count(io, region, -EOPNOTSUPP);
320 return;
321 }
322
323 /*
324 * where->count may be zero if op holds a flush and we need to
325 * send a zero-sized flush.
326 */
327 do {
328 /*
329 * Allocate a suitably sized-bio.
330 */
331 if ((op == REQ_OP_DISCARD) || (op == REQ_OP_WRITE_SAME))
332 num_bvecs = 1;
333 else
334 num_bvecs = min_t(int, BIO_MAX_PAGES,
335 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
336
337 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
338 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
339 bio->bi_bdev = where->bdev;
340 bio->bi_end_io = endio;
341 bio_set_op_attrs(bio, op, op_flags);
342 store_io_and_region_in_bio(bio, io, region);
343
344 if (op == REQ_OP_DISCARD) {
345 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
346 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
347 remaining -= num_sectors;
348 } else if (op == REQ_OP_WRITE_SAME) {
349 /*
350 * WRITE SAME only uses a single page.
351 */
352 dp->get_page(dp, &page, &len, &offset);
353 bio_add_page(bio, page, logical_block_size, offset);
354 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
355 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
356
357 offset = 0;
358 remaining -= num_sectors;
359 dp->next_page(dp);
360 } else while (remaining) {
361 /*
362 * Try and add as many pages as possible.
363 */
364 dp->get_page(dp, &page, &len, &offset);
365 len = min(len, to_bytes(remaining));
366 if (!bio_add_page(bio, page, len, offset))
367 break;
368
369 offset = 0;
370 remaining -= to_sector(len);
371 dp->next_page(dp);
372 }
373
374 atomic_inc(&io->count);
375 submit_bio(bio);
376 } while (remaining);
377 }
378
379 static void dispatch_io(int op, int op_flags, unsigned int num_regions,
380 struct dm_io_region *where, struct dpages *dp,
381 struct io *io, int sync)
382 {
383 int i;
384 struct dpages old_pages = *dp;
385
386 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
387
388 if (sync)
389 op_flags |= REQ_SYNC;
390
391 /*
392 * For multiple regions we need to be careful to rewind
393 * the dp object for each call to do_region.
394 */
395 for (i = 0; i < num_regions; i++) {
396 *dp = old_pages;
397 if (where[i].count || (op_flags & REQ_PREFLUSH))
398 do_region(op, op_flags, i, where + i, dp, io);
399 }
400
401 /*
402 * Drop the extra reference that we were holding to avoid
403 * the io being completed too early.
404 */
405 dec_count(io, 0, 0);
406 }
407
408 struct sync_io {
409 unsigned long error_bits;
410 struct completion wait;
411 };
412
413 static void sync_io_complete(unsigned long error, void *context)
414 {
415 struct sync_io *sio = context;
416
417 sio->error_bits = error;
418 complete(&sio->wait);
419 }
420
421 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
422 struct dm_io_region *where, int op, int op_flags,
423 struct dpages *dp, unsigned long *error_bits)
424 {
425 struct io *io;
426 struct sync_io sio;
427
428 if (num_regions > 1 && !op_is_write(op)) {
429 WARN_ON(1);
430 return -EIO;
431 }
432
433 init_completion(&sio.wait);
434
435 io = mempool_alloc(client->pool, GFP_NOIO);
436 io->error_bits = 0;
437 atomic_set(&io->count, 1); /* see dispatch_io() */
438 io->client = client;
439 io->callback = sync_io_complete;
440 io->context = &sio;
441
442 io->vma_invalidate_address = dp->vma_invalidate_address;
443 io->vma_invalidate_size = dp->vma_invalidate_size;
444
445 dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
446
447 wait_for_completion_io(&sio.wait);
448
449 if (error_bits)
450 *error_bits = sio.error_bits;
451
452 return sio.error_bits ? -EIO : 0;
453 }
454
455 static int async_io(struct dm_io_client *client, unsigned int num_regions,
456 struct dm_io_region *where, int op, int op_flags,
457 struct dpages *dp, io_notify_fn fn, void *context)
458 {
459 struct io *io;
460
461 if (num_regions > 1 && !op_is_write(op)) {
462 WARN_ON(1);
463 fn(1, context);
464 return -EIO;
465 }
466
467 io = mempool_alloc(client->pool, GFP_NOIO);
468 io->error_bits = 0;
469 atomic_set(&io->count, 1); /* see dispatch_io() */
470 io->client = client;
471 io->callback = fn;
472 io->context = context;
473
474 io->vma_invalidate_address = dp->vma_invalidate_address;
475 io->vma_invalidate_size = dp->vma_invalidate_size;
476
477 dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
478 return 0;
479 }
480
481 static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
482 unsigned long size)
483 {
484 /* Set up dpages based on memory type */
485
486 dp->vma_invalidate_address = NULL;
487 dp->vma_invalidate_size = 0;
488
489 switch (io_req->mem.type) {
490 case DM_IO_PAGE_LIST:
491 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
492 break;
493
494 case DM_IO_BIO:
495 bio_dp_init(dp, io_req->mem.ptr.bio);
496 break;
497
498 case DM_IO_VMA:
499 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
500 if (io_req->bi_op == REQ_OP_READ) {
501 dp->vma_invalidate_address = io_req->mem.ptr.vma;
502 dp->vma_invalidate_size = size;
503 }
504 vm_dp_init(dp, io_req->mem.ptr.vma);
505 break;
506
507 case DM_IO_KMEM:
508 km_dp_init(dp, io_req->mem.ptr.addr);
509 break;
510
511 default:
512 return -EINVAL;
513 }
514
515 return 0;
516 }
517
518 /*
519 * New collapsed (a)synchronous interface.
520 *
521 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
522 * the queue with blk_unplug() some time later or set REQ_SYNC in
523 * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
524 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
525 */
526 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
527 struct dm_io_region *where, unsigned long *sync_error_bits)
528 {
529 int r;
530 struct dpages dp;
531
532 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
533 if (r)
534 return r;
535
536 if (!io_req->notify.fn)
537 return sync_io(io_req->client, num_regions, where,
538 io_req->bi_op, io_req->bi_op_flags, &dp,
539 sync_error_bits);
540
541 return async_io(io_req->client, num_regions, where, io_req->bi_op,
542 io_req->bi_op_flags, &dp, io_req->notify.fn,
543 io_req->notify.context);
544 }
545 EXPORT_SYMBOL(dm_io);
546
547 int __init dm_io_init(void)
548 {
549 _dm_io_cache = KMEM_CACHE(io, 0);
550 if (!_dm_io_cache)
551 return -ENOMEM;
552
553 return 0;
554 }
555
556 void dm_io_exit(void)
557 {
558 kmem_cache_destroy(_dm_io_cache);
559 _dm_io_cache = NULL;
560 }