]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/block/brd.c
Merge branch 'for-4.13-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[mirror_ubuntu-artful-kernel.git] / drivers / block / brd.c
1 /*
2 * Ram backed block device driver.
3 *
4 * Copyright (C) 2007 Nick Piggin
5 * Copyright (C) 2007 Novell Inc.
6 *
7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8 * of their respective owners.
9 */
10
11 #include <linux/init.h>
12 #include <linux/initrd.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/major.h>
16 #include <linux/blkdev.h>
17 #include <linux/bio.h>
18 #include <linux/highmem.h>
19 #include <linux/mutex.h>
20 #include <linux/radix-tree.h>
21 #include <linux/fs.h>
22 #include <linux/slab.h>
23 #ifdef CONFIG_BLK_DEV_RAM_DAX
24 #include <linux/pfn_t.h>
25 #include <linux/dax.h>
26 #include <linux/uio.h>
27 #endif
28
29 #include <linux/uaccess.h>
30
31 #define SECTOR_SHIFT 9
32 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
33 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
34
35 /*
36 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
37 * the pages containing the block device's contents. A brd page's ->index is
38 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
39 * with, the kernel's pagecache or buffer cache (which sit above our block
40 * device).
41 */
42 struct brd_device {
43 int brd_number;
44
45 struct request_queue *brd_queue;
46 struct gendisk *brd_disk;
47 #ifdef CONFIG_BLK_DEV_RAM_DAX
48 struct dax_device *dax_dev;
49 #endif
50 struct list_head brd_list;
51
52 /*
53 * Backing store of pages and lock to protect it. This is the contents
54 * of the block device.
55 */
56 spinlock_t brd_lock;
57 struct radix_tree_root brd_pages;
58 };
59
60 /*
61 * Look up and return a brd's page for a given sector.
62 */
63 static DEFINE_MUTEX(brd_mutex);
64 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
65 {
66 pgoff_t idx;
67 struct page *page;
68
69 /*
70 * The page lifetime is protected by the fact that we have opened the
71 * device node -- brd pages will never be deleted under us, so we
72 * don't need any further locking or refcounting.
73 *
74 * This is strictly true for the radix-tree nodes as well (ie. we
75 * don't actually need the rcu_read_lock()), however that is not a
76 * documented feature of the radix-tree API so it is better to be
77 * safe here (we don't have total exclusion from radix tree updates
78 * here, only deletes).
79 */
80 rcu_read_lock();
81 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
82 page = radix_tree_lookup(&brd->brd_pages, idx);
83 rcu_read_unlock();
84
85 BUG_ON(page && page->index != idx);
86
87 return page;
88 }
89
90 /*
91 * Look up and return a brd's page for a given sector.
92 * If one does not exist, allocate an empty page, and insert that. Then
93 * return it.
94 */
95 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
96 {
97 pgoff_t idx;
98 struct page *page;
99 gfp_t gfp_flags;
100
101 page = brd_lookup_page(brd, sector);
102 if (page)
103 return page;
104
105 /*
106 * Must use NOIO because we don't want to recurse back into the
107 * block or filesystem layers from page reclaim.
108 *
109 * Cannot support DAX and highmem, because our ->direct_access
110 * routine for DAX must return memory that is always addressable.
111 * If DAX was reworked to use pfns and kmap throughout, this
112 * restriction might be able to be lifted.
113 */
114 gfp_flags = GFP_NOIO | __GFP_ZERO;
115 #ifndef CONFIG_BLK_DEV_RAM_DAX
116 gfp_flags |= __GFP_HIGHMEM;
117 #endif
118 page = alloc_page(gfp_flags);
119 if (!page)
120 return NULL;
121
122 if (radix_tree_preload(GFP_NOIO)) {
123 __free_page(page);
124 return NULL;
125 }
126
127 spin_lock(&brd->brd_lock);
128 idx = sector >> PAGE_SECTORS_SHIFT;
129 page->index = idx;
130 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
131 __free_page(page);
132 page = radix_tree_lookup(&brd->brd_pages, idx);
133 BUG_ON(!page);
134 BUG_ON(page->index != idx);
135 }
136 spin_unlock(&brd->brd_lock);
137
138 radix_tree_preload_end();
139
140 return page;
141 }
142
143 /*
144 * Free all backing store pages and radix tree. This must only be called when
145 * there are no other users of the device.
146 */
147 #define FREE_BATCH 16
148 static void brd_free_pages(struct brd_device *brd)
149 {
150 unsigned long pos = 0;
151 struct page *pages[FREE_BATCH];
152 int nr_pages;
153
154 do {
155 int i;
156
157 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
158 (void **)pages, pos, FREE_BATCH);
159
160 for (i = 0; i < nr_pages; i++) {
161 void *ret;
162
163 BUG_ON(pages[i]->index < pos);
164 pos = pages[i]->index;
165 ret = radix_tree_delete(&brd->brd_pages, pos);
166 BUG_ON(!ret || ret != pages[i]);
167 __free_page(pages[i]);
168 }
169
170 pos++;
171
172 /*
173 * This assumes radix_tree_gang_lookup always returns as
174 * many pages as possible. If the radix-tree code changes,
175 * so will this have to.
176 */
177 } while (nr_pages == FREE_BATCH);
178 }
179
180 /*
181 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
182 */
183 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
184 {
185 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
186 size_t copy;
187
188 copy = min_t(size_t, n, PAGE_SIZE - offset);
189 if (!brd_insert_page(brd, sector))
190 return -ENOSPC;
191 if (copy < n) {
192 sector += copy >> SECTOR_SHIFT;
193 if (!brd_insert_page(brd, sector))
194 return -ENOSPC;
195 }
196 return 0;
197 }
198
199 /*
200 * Copy n bytes from src to the brd starting at sector. Does not sleep.
201 */
202 static void copy_to_brd(struct brd_device *brd, const void *src,
203 sector_t sector, size_t n)
204 {
205 struct page *page;
206 void *dst;
207 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
208 size_t copy;
209
210 copy = min_t(size_t, n, PAGE_SIZE - offset);
211 page = brd_lookup_page(brd, sector);
212 BUG_ON(!page);
213
214 dst = kmap_atomic(page);
215 memcpy(dst + offset, src, copy);
216 kunmap_atomic(dst);
217
218 if (copy < n) {
219 src += copy;
220 sector += copy >> SECTOR_SHIFT;
221 copy = n - copy;
222 page = brd_lookup_page(brd, sector);
223 BUG_ON(!page);
224
225 dst = kmap_atomic(page);
226 memcpy(dst, src, copy);
227 kunmap_atomic(dst);
228 }
229 }
230
231 /*
232 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
233 */
234 static void copy_from_brd(void *dst, struct brd_device *brd,
235 sector_t sector, size_t n)
236 {
237 struct page *page;
238 void *src;
239 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
240 size_t copy;
241
242 copy = min_t(size_t, n, PAGE_SIZE - offset);
243 page = brd_lookup_page(brd, sector);
244 if (page) {
245 src = kmap_atomic(page);
246 memcpy(dst, src + offset, copy);
247 kunmap_atomic(src);
248 } else
249 memset(dst, 0, copy);
250
251 if (copy < n) {
252 dst += copy;
253 sector += copy >> SECTOR_SHIFT;
254 copy = n - copy;
255 page = brd_lookup_page(brd, sector);
256 if (page) {
257 src = kmap_atomic(page);
258 memcpy(dst, src, copy);
259 kunmap_atomic(src);
260 } else
261 memset(dst, 0, copy);
262 }
263 }
264
265 /*
266 * Process a single bvec of a bio.
267 */
268 static int brd_do_bvec(struct brd_device *brd, struct page *page,
269 unsigned int len, unsigned int off, bool is_write,
270 sector_t sector)
271 {
272 void *mem;
273 int err = 0;
274
275 if (is_write) {
276 err = copy_to_brd_setup(brd, sector, len);
277 if (err)
278 goto out;
279 }
280
281 mem = kmap_atomic(page);
282 if (!is_write) {
283 copy_from_brd(mem + off, brd, sector, len);
284 flush_dcache_page(page);
285 } else {
286 flush_dcache_page(page);
287 copy_to_brd(brd, mem + off, sector, len);
288 }
289 kunmap_atomic(mem);
290
291 out:
292 return err;
293 }
294
295 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
296 {
297 struct block_device *bdev = bio->bi_bdev;
298 struct brd_device *brd = bdev->bd_disk->private_data;
299 struct bio_vec bvec;
300 sector_t sector;
301 struct bvec_iter iter;
302
303 sector = bio->bi_iter.bi_sector;
304 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
305 goto io_error;
306
307 bio_for_each_segment(bvec, bio, iter) {
308 unsigned int len = bvec.bv_len;
309 int err;
310
311 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
312 op_is_write(bio_op(bio)), sector);
313 if (err)
314 goto io_error;
315 sector += len >> SECTOR_SHIFT;
316 }
317
318 bio_endio(bio);
319 return BLK_QC_T_NONE;
320 io_error:
321 bio_io_error(bio);
322 return BLK_QC_T_NONE;
323 }
324
325 static int brd_rw_page(struct block_device *bdev, sector_t sector,
326 struct page *page, bool is_write)
327 {
328 struct brd_device *brd = bdev->bd_disk->private_data;
329 int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
330 page_endio(page, is_write, err);
331 return err;
332 }
333
334 #ifdef CONFIG_BLK_DEV_RAM_DAX
335 static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
336 long nr_pages, void **kaddr, pfn_t *pfn)
337 {
338 struct page *page;
339
340 if (!brd)
341 return -ENODEV;
342 page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512);
343 if (!page)
344 return -ENOSPC;
345 *kaddr = page_address(page);
346 *pfn = page_to_pfn_t(page);
347
348 return 1;
349 }
350
351 static long brd_dax_direct_access(struct dax_device *dax_dev,
352 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
353 {
354 struct brd_device *brd = dax_get_private(dax_dev);
355
356 return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn);
357 }
358
359 static size_t brd_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
360 void *addr, size_t bytes, struct iov_iter *i)
361 {
362 return copy_from_iter(addr, bytes, i);
363 }
364
365 static const struct dax_operations brd_dax_ops = {
366 .direct_access = brd_dax_direct_access,
367 .copy_from_iter = brd_dax_copy_from_iter,
368 };
369 #endif
370
371 static const struct block_device_operations brd_fops = {
372 .owner = THIS_MODULE,
373 .rw_page = brd_rw_page,
374 };
375
376 /*
377 * And now the modules code and kernel interface.
378 */
379 static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
380 module_param(rd_nr, int, S_IRUGO);
381 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
382
383 unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
384 module_param(rd_size, ulong, S_IRUGO);
385 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
386
387 static int max_part = 1;
388 module_param(max_part, int, S_IRUGO);
389 MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
390
391 MODULE_LICENSE("GPL");
392 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
393 MODULE_ALIAS("rd");
394
395 #ifndef MODULE
396 /* Legacy boot options - nonmodular */
397 static int __init ramdisk_size(char *str)
398 {
399 rd_size = simple_strtol(str, NULL, 0);
400 return 1;
401 }
402 __setup("ramdisk_size=", ramdisk_size);
403 #endif
404
405 /*
406 * The device scheme is derived from loop.c. Keep them in synch where possible
407 * (should share code eventually).
408 */
409 static LIST_HEAD(brd_devices);
410 static DEFINE_MUTEX(brd_devices_mutex);
411
412 static struct brd_device *brd_alloc(int i)
413 {
414 struct brd_device *brd;
415 struct gendisk *disk;
416
417 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
418 if (!brd)
419 goto out;
420 brd->brd_number = i;
421 spin_lock_init(&brd->brd_lock);
422 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
423
424 brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
425 if (!brd->brd_queue)
426 goto out_free_dev;
427
428 blk_queue_make_request(brd->brd_queue, brd_make_request);
429 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
430
431 /* This is so fdisk will align partitions on 4k, because of
432 * direct_access API needing 4k alignment, returning a PFN
433 * (This is only a problem on very small devices <= 4M,
434 * otherwise fdisk will align on 1M. Regardless this call
435 * is harmless)
436 */
437 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
438 disk = brd->brd_disk = alloc_disk(max_part);
439 if (!disk)
440 goto out_free_queue;
441 disk->major = RAMDISK_MAJOR;
442 disk->first_minor = i * max_part;
443 disk->fops = &brd_fops;
444 disk->private_data = brd;
445 disk->queue = brd->brd_queue;
446 disk->flags = GENHD_FL_EXT_DEVT;
447 sprintf(disk->disk_name, "ram%d", i);
448 set_capacity(disk, rd_size * 2);
449
450 #ifdef CONFIG_BLK_DEV_RAM_DAX
451 queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
452 brd->dax_dev = alloc_dax(brd, disk->disk_name, &brd_dax_ops);
453 if (!brd->dax_dev)
454 goto out_free_inode;
455 #endif
456
457
458 return brd;
459
460 #ifdef CONFIG_BLK_DEV_RAM_DAX
461 out_free_inode:
462 kill_dax(brd->dax_dev);
463 put_dax(brd->dax_dev);
464 #endif
465 out_free_queue:
466 blk_cleanup_queue(brd->brd_queue);
467 out_free_dev:
468 kfree(brd);
469 out:
470 return NULL;
471 }
472
473 static void brd_free(struct brd_device *brd)
474 {
475 put_disk(brd->brd_disk);
476 blk_cleanup_queue(brd->brd_queue);
477 brd_free_pages(brd);
478 kfree(brd);
479 }
480
481 static struct brd_device *brd_init_one(int i, bool *new)
482 {
483 struct brd_device *brd;
484
485 *new = false;
486 list_for_each_entry(brd, &brd_devices, brd_list) {
487 if (brd->brd_number == i)
488 goto out;
489 }
490
491 brd = brd_alloc(i);
492 if (brd) {
493 add_disk(brd->brd_disk);
494 list_add_tail(&brd->brd_list, &brd_devices);
495 }
496 *new = true;
497 out:
498 return brd;
499 }
500
501 static void brd_del_one(struct brd_device *brd)
502 {
503 list_del(&brd->brd_list);
504 #ifdef CONFIG_BLK_DEV_RAM_DAX
505 kill_dax(brd->dax_dev);
506 put_dax(brd->dax_dev);
507 #endif
508 del_gendisk(brd->brd_disk);
509 brd_free(brd);
510 }
511
512 static struct kobject *brd_probe(dev_t dev, int *part, void *data)
513 {
514 struct brd_device *brd;
515 struct kobject *kobj;
516 bool new;
517
518 mutex_lock(&brd_devices_mutex);
519 brd = brd_init_one(MINOR(dev) / max_part, &new);
520 kobj = brd ? get_disk(brd->brd_disk) : NULL;
521 mutex_unlock(&brd_devices_mutex);
522
523 if (new)
524 *part = 0;
525
526 return kobj;
527 }
528
529 static int __init brd_init(void)
530 {
531 struct brd_device *brd, *next;
532 int i;
533
534 /*
535 * brd module now has a feature to instantiate underlying device
536 * structure on-demand, provided that there is an access dev node.
537 *
538 * (1) if rd_nr is specified, create that many upfront. else
539 * it defaults to CONFIG_BLK_DEV_RAM_COUNT
540 * (2) User can further extend brd devices by create dev node themselves
541 * and have kernel automatically instantiate actual device
542 * on-demand. Example:
543 * mknod /path/devnod_name b 1 X # 1 is the rd major
544 * fdisk -l /path/devnod_name
545 * If (X / max_part) was not already created it will be created
546 * dynamically.
547 */
548
549 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
550 return -EIO;
551
552 if (unlikely(!max_part))
553 max_part = 1;
554
555 for (i = 0; i < rd_nr; i++) {
556 brd = brd_alloc(i);
557 if (!brd)
558 goto out_free;
559 list_add_tail(&brd->brd_list, &brd_devices);
560 }
561
562 /* point of no return */
563
564 list_for_each_entry(brd, &brd_devices, brd_list)
565 add_disk(brd->brd_disk);
566
567 blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
568 THIS_MODULE, brd_probe, NULL, NULL);
569
570 pr_info("brd: module loaded\n");
571 return 0;
572
573 out_free:
574 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
575 list_del(&brd->brd_list);
576 brd_free(brd);
577 }
578 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
579
580 pr_info("brd: module NOT loaded !!!\n");
581 return -ENOMEM;
582 }
583
584 static void __exit brd_exit(void)
585 {
586 struct brd_device *brd, *next;
587
588 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
589 brd_del_one(brd);
590
591 blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
592 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
593
594 pr_info("brd: module unloaded\n");
595 }
596
597 module_init(brd_init);
598 module_exit(brd_exit);
599