]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/block_dev.c
Fix a crash when block device is read and block size is changed at the same time
[mirror_ubuntu-zesty-kernel.git] / fs / block_dev.c
1 /*
2 * linux/fs/block_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
8 #include <linux/init.h>
9 #include <linux/mm.h>
10 #include <linux/fcntl.h>
11 #include <linux/slab.h>
12 #include <linux/kmod.h>
13 #include <linux/major.h>
14 #include <linux/device_cgroup.h>
15 #include <linux/highmem.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/blkpg.h>
19 #include <linux/magic.h>
20 #include <linux/buffer_head.h>
21 #include <linux/swap.h>
22 #include <linux/pagevec.h>
23 #include <linux/writeback.h>
24 #include <linux/mpage.h>
25 #include <linux/mount.h>
26 #include <linux/uio.h>
27 #include <linux/namei.h>
28 #include <linux/log2.h>
29 #include <linux/cleancache.h>
30 #include <asm/uaccess.h>
31 #include "internal.h"
32
33 struct bdev_inode {
34 struct block_device bdev;
35 struct inode vfs_inode;
36 };
37
38 static const struct address_space_operations def_blk_aops;
39
40 static inline struct bdev_inode *BDEV_I(struct inode *inode)
41 {
42 return container_of(inode, struct bdev_inode, vfs_inode);
43 }
44
45 inline struct block_device *I_BDEV(struct inode *inode)
46 {
47 return &BDEV_I(inode)->bdev;
48 }
49 EXPORT_SYMBOL(I_BDEV);
50
51 /*
52 * Move the inode from its current bdi to a new bdi. If the inode is dirty we
53 * need to move it onto the dirty list of @dst so that the inode is always on
54 * the right list.
55 */
56 static void bdev_inode_switch_bdi(struct inode *inode,
57 struct backing_dev_info *dst)
58 {
59 struct backing_dev_info *old = inode->i_data.backing_dev_info;
60
61 if (unlikely(dst == old)) /* deadlock avoidance */
62 return;
63 bdi_lock_two(&old->wb, &dst->wb);
64 spin_lock(&inode->i_lock);
65 inode->i_data.backing_dev_info = dst;
66 if (inode->i_state & I_DIRTY)
67 list_move(&inode->i_wb_list, &dst->wb.b_dirty);
68 spin_unlock(&inode->i_lock);
69 spin_unlock(&old->wb.list_lock);
70 spin_unlock(&dst->wb.list_lock);
71 }
72
73 sector_t blkdev_max_block(struct block_device *bdev)
74 {
75 sector_t retval = ~((sector_t)0);
76 loff_t sz = i_size_read(bdev->bd_inode);
77
78 if (sz) {
79 unsigned int size = block_size(bdev);
80 unsigned int sizebits = blksize_bits(size);
81 retval = (sz >> sizebits);
82 }
83 return retval;
84 }
85
86 /* Kill _all_ buffers and pagecache , dirty or not.. */
87 void kill_bdev(struct block_device *bdev)
88 {
89 struct address_space *mapping = bdev->bd_inode->i_mapping;
90
91 if (mapping->nrpages == 0)
92 return;
93
94 invalidate_bh_lrus();
95 truncate_inode_pages(mapping, 0);
96 }
97 EXPORT_SYMBOL(kill_bdev);
98
99 /* Invalidate clean unused buffers and pagecache. */
100 void invalidate_bdev(struct block_device *bdev)
101 {
102 struct address_space *mapping = bdev->bd_inode->i_mapping;
103
104 if (mapping->nrpages == 0)
105 return;
106
107 invalidate_bh_lrus();
108 lru_add_drain_all(); /* make sure all lru add caches are flushed */
109 invalidate_mapping_pages(mapping, 0, -1);
110 /* 99% of the time, we don't need to flush the cleancache on the bdev.
111 * But, for the strange corners, lets be cautious
112 */
113 cleancache_invalidate_inode(mapping);
114 }
115 EXPORT_SYMBOL(invalidate_bdev);
116
117 int set_blocksize(struct block_device *bdev, int size)
118 {
119 struct address_space *mapping;
120
121 /* Size must be a power of two, and between 512 and PAGE_SIZE */
122 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
123 return -EINVAL;
124
125 /* Size cannot be smaller than the size supported by the device */
126 if (size < bdev_logical_block_size(bdev))
127 return -EINVAL;
128
129 /* Prevent starting I/O or mapping the device */
130 down_write(&bdev->bd_block_size_semaphore);
131
132 /* Check that the block device is not memory mapped */
133 mapping = bdev->bd_inode->i_mapping;
134 mutex_lock(&mapping->i_mmap_mutex);
135 if (!prio_tree_empty(&mapping->i_mmap) ||
136 !list_empty(&mapping->i_mmap_nonlinear)) {
137 mutex_unlock(&mapping->i_mmap_mutex);
138 up_write(&bdev->bd_block_size_semaphore);
139 return -EBUSY;
140 }
141 mutex_unlock(&mapping->i_mmap_mutex);
142
143 /* Don't change the size if it is same as current */
144 if (bdev->bd_block_size != size) {
145 sync_blockdev(bdev);
146 bdev->bd_block_size = size;
147 bdev->bd_inode->i_blkbits = blksize_bits(size);
148 kill_bdev(bdev);
149 }
150
151 up_write(&bdev->bd_block_size_semaphore);
152
153 return 0;
154 }
155
156 EXPORT_SYMBOL(set_blocksize);
157
158 int sb_set_blocksize(struct super_block *sb, int size)
159 {
160 if (set_blocksize(sb->s_bdev, size))
161 return 0;
162 /* If we get here, we know size is power of two
163 * and it's value is between 512 and PAGE_SIZE */
164 sb->s_blocksize = size;
165 sb->s_blocksize_bits = blksize_bits(size);
166 return sb->s_blocksize;
167 }
168
169 EXPORT_SYMBOL(sb_set_blocksize);
170
171 int sb_min_blocksize(struct super_block *sb, int size)
172 {
173 int minsize = bdev_logical_block_size(sb->s_bdev);
174 if (size < minsize)
175 size = minsize;
176 return sb_set_blocksize(sb, size);
177 }
178
179 EXPORT_SYMBOL(sb_min_blocksize);
180
181 static int
182 blkdev_get_block(struct inode *inode, sector_t iblock,
183 struct buffer_head *bh, int create)
184 {
185 if (iblock >= blkdev_max_block(I_BDEV(inode))) {
186 if (create)
187 return -EIO;
188
189 /*
190 * for reads, we're just trying to fill a partial page.
191 * return a hole, they will have to call get_block again
192 * before they can fill it, and they will get -EIO at that
193 * time
194 */
195 return 0;
196 }
197 bh->b_bdev = I_BDEV(inode);
198 bh->b_blocknr = iblock;
199 set_buffer_mapped(bh);
200 return 0;
201 }
202
203 static int
204 blkdev_get_blocks(struct inode *inode, sector_t iblock,
205 struct buffer_head *bh, int create)
206 {
207 sector_t end_block = blkdev_max_block(I_BDEV(inode));
208 unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
209
210 if ((iblock + max_blocks) > end_block) {
211 max_blocks = end_block - iblock;
212 if ((long)max_blocks <= 0) {
213 if (create)
214 return -EIO; /* write fully beyond EOF */
215 /*
216 * It is a read which is fully beyond EOF. We return
217 * a !buffer_mapped buffer
218 */
219 max_blocks = 0;
220 }
221 }
222
223 bh->b_bdev = I_BDEV(inode);
224 bh->b_blocknr = iblock;
225 bh->b_size = max_blocks << inode->i_blkbits;
226 if (max_blocks)
227 set_buffer_mapped(bh);
228 return 0;
229 }
230
231 static ssize_t
232 blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
233 loff_t offset, unsigned long nr_segs)
234 {
235 struct file *file = iocb->ki_filp;
236 struct inode *inode = file->f_mapping->host;
237
238 return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
239 nr_segs, blkdev_get_blocks, NULL, NULL, 0);
240 }
241
242 int __sync_blockdev(struct block_device *bdev, int wait)
243 {
244 if (!bdev)
245 return 0;
246 if (!wait)
247 return filemap_flush(bdev->bd_inode->i_mapping);
248 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
249 }
250
251 /*
252 * Write out and wait upon all the dirty data associated with a block
253 * device via its mapping. Does not take the superblock lock.
254 */
255 int sync_blockdev(struct block_device *bdev)
256 {
257 return __sync_blockdev(bdev, 1);
258 }
259 EXPORT_SYMBOL(sync_blockdev);
260
261 /*
262 * Write out and wait upon all dirty data associated with this
263 * device. Filesystem data as well as the underlying block
264 * device. Takes the superblock lock.
265 */
266 int fsync_bdev(struct block_device *bdev)
267 {
268 struct super_block *sb = get_super(bdev);
269 if (sb) {
270 int res = sync_filesystem(sb);
271 drop_super(sb);
272 return res;
273 }
274 return sync_blockdev(bdev);
275 }
276 EXPORT_SYMBOL(fsync_bdev);
277
278 /**
279 * freeze_bdev -- lock a filesystem and force it into a consistent state
280 * @bdev: blockdevice to lock
281 *
282 * If a superblock is found on this device, we take the s_umount semaphore
283 * on it to make sure nobody unmounts until the snapshot creation is done.
284 * The reference counter (bd_fsfreeze_count) guarantees that only the last
285 * unfreeze process can unfreeze the frozen filesystem actually when multiple
286 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
287 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
288 * actually.
289 */
290 struct super_block *freeze_bdev(struct block_device *bdev)
291 {
292 struct super_block *sb;
293 int error = 0;
294
295 mutex_lock(&bdev->bd_fsfreeze_mutex);
296 if (++bdev->bd_fsfreeze_count > 1) {
297 /*
298 * We don't even need to grab a reference - the first call
299 * to freeze_bdev grab an active reference and only the last
300 * thaw_bdev drops it.
301 */
302 sb = get_super(bdev);
303 drop_super(sb);
304 mutex_unlock(&bdev->bd_fsfreeze_mutex);
305 return sb;
306 }
307
308 sb = get_active_super(bdev);
309 if (!sb)
310 goto out;
311 error = freeze_super(sb);
312 if (error) {
313 deactivate_super(sb);
314 bdev->bd_fsfreeze_count--;
315 mutex_unlock(&bdev->bd_fsfreeze_mutex);
316 return ERR_PTR(error);
317 }
318 deactivate_super(sb);
319 out:
320 sync_blockdev(bdev);
321 mutex_unlock(&bdev->bd_fsfreeze_mutex);
322 return sb; /* thaw_bdev releases s->s_umount */
323 }
324 EXPORT_SYMBOL(freeze_bdev);
325
326 /**
327 * thaw_bdev -- unlock filesystem
328 * @bdev: blockdevice to unlock
329 * @sb: associated superblock
330 *
331 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
332 */
333 int thaw_bdev(struct block_device *bdev, struct super_block *sb)
334 {
335 int error = -EINVAL;
336
337 mutex_lock(&bdev->bd_fsfreeze_mutex);
338 if (!bdev->bd_fsfreeze_count)
339 goto out;
340
341 error = 0;
342 if (--bdev->bd_fsfreeze_count > 0)
343 goto out;
344
345 if (!sb)
346 goto out;
347
348 error = thaw_super(sb);
349 if (error) {
350 bdev->bd_fsfreeze_count++;
351 mutex_unlock(&bdev->bd_fsfreeze_mutex);
352 return error;
353 }
354 out:
355 mutex_unlock(&bdev->bd_fsfreeze_mutex);
356 return 0;
357 }
358 EXPORT_SYMBOL(thaw_bdev);
359
360 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
361 {
362 return block_write_full_page(page, blkdev_get_block, wbc);
363 }
364
365 static int blkdev_readpage(struct file * file, struct page * page)
366 {
367 return block_read_full_page(page, blkdev_get_block);
368 }
369
370 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
371 loff_t pos, unsigned len, unsigned flags,
372 struct page **pagep, void **fsdata)
373 {
374 return block_write_begin(mapping, pos, len, flags, pagep,
375 blkdev_get_block);
376 }
377
378 static int blkdev_write_end(struct file *file, struct address_space *mapping,
379 loff_t pos, unsigned len, unsigned copied,
380 struct page *page, void *fsdata)
381 {
382 int ret;
383 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
384
385 unlock_page(page);
386 page_cache_release(page);
387
388 return ret;
389 }
390
391 /*
392 * private llseek:
393 * for a block special file file->f_path.dentry->d_inode->i_size is zero
394 * so we compute the size by hand (just as in block_read/write above)
395 */
396 static loff_t block_llseek(struct file *file, loff_t offset, int origin)
397 {
398 struct inode *bd_inode = file->f_mapping->host;
399 loff_t size;
400 loff_t retval;
401
402 mutex_lock(&bd_inode->i_mutex);
403 size = i_size_read(bd_inode);
404
405 retval = -EINVAL;
406 switch (origin) {
407 case SEEK_END:
408 offset += size;
409 break;
410 case SEEK_CUR:
411 offset += file->f_pos;
412 case SEEK_SET:
413 break;
414 default:
415 goto out;
416 }
417 if (offset >= 0 && offset <= size) {
418 if (offset != file->f_pos) {
419 file->f_pos = offset;
420 }
421 retval = offset;
422 }
423 out:
424 mutex_unlock(&bd_inode->i_mutex);
425 return retval;
426 }
427
428 int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
429 {
430 struct inode *bd_inode = filp->f_mapping->host;
431 struct block_device *bdev = I_BDEV(bd_inode);
432 int error;
433
434 error = filemap_write_and_wait_range(filp->f_mapping, start, end);
435 if (error)
436 return error;
437
438 /*
439 * There is no need to serialise calls to blkdev_issue_flush with
440 * i_mutex and doing so causes performance issues with concurrent
441 * O_SYNC writers to a block device.
442 */
443 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
444 if (error == -EOPNOTSUPP)
445 error = 0;
446
447 return error;
448 }
449 EXPORT_SYMBOL(blkdev_fsync);
450
451 /*
452 * pseudo-fs
453 */
454
455 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
456 static struct kmem_cache * bdev_cachep __read_mostly;
457
458 static struct inode *bdev_alloc_inode(struct super_block *sb)
459 {
460 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
461 if (!ei)
462 return NULL;
463 return &ei->vfs_inode;
464 }
465
466 static void bdev_i_callback(struct rcu_head *head)
467 {
468 struct inode *inode = container_of(head, struct inode, i_rcu);
469 struct bdev_inode *bdi = BDEV_I(inode);
470
471 kmem_cache_free(bdev_cachep, bdi);
472 }
473
474 static void bdev_destroy_inode(struct inode *inode)
475 {
476 call_rcu(&inode->i_rcu, bdev_i_callback);
477 }
478
479 static void init_once(void *foo)
480 {
481 struct bdev_inode *ei = (struct bdev_inode *) foo;
482 struct block_device *bdev = &ei->bdev;
483
484 memset(bdev, 0, sizeof(*bdev));
485 mutex_init(&bdev->bd_mutex);
486 INIT_LIST_HEAD(&bdev->bd_inodes);
487 INIT_LIST_HEAD(&bdev->bd_list);
488 #ifdef CONFIG_SYSFS
489 INIT_LIST_HEAD(&bdev->bd_holder_disks);
490 #endif
491 inode_init_once(&ei->vfs_inode);
492 /* Initialize mutex for freeze. */
493 mutex_init(&bdev->bd_fsfreeze_mutex);
494 init_rwsem(&bdev->bd_block_size_semaphore);
495 }
496
497 static inline void __bd_forget(struct inode *inode)
498 {
499 list_del_init(&inode->i_devices);
500 inode->i_bdev = NULL;
501 inode->i_mapping = &inode->i_data;
502 }
503
504 static void bdev_evict_inode(struct inode *inode)
505 {
506 struct block_device *bdev = &BDEV_I(inode)->bdev;
507 struct list_head *p;
508 truncate_inode_pages(&inode->i_data, 0);
509 invalidate_inode_buffers(inode); /* is it needed here? */
510 clear_inode(inode);
511 spin_lock(&bdev_lock);
512 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
513 __bd_forget(list_entry(p, struct inode, i_devices));
514 }
515 list_del_init(&bdev->bd_list);
516 spin_unlock(&bdev_lock);
517 }
518
519 static const struct super_operations bdev_sops = {
520 .statfs = simple_statfs,
521 .alloc_inode = bdev_alloc_inode,
522 .destroy_inode = bdev_destroy_inode,
523 .drop_inode = generic_delete_inode,
524 .evict_inode = bdev_evict_inode,
525 };
526
527 static struct dentry *bd_mount(struct file_system_type *fs_type,
528 int flags, const char *dev_name, void *data)
529 {
530 return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC);
531 }
532
533 static struct file_system_type bd_type = {
534 .name = "bdev",
535 .mount = bd_mount,
536 .kill_sb = kill_anon_super,
537 };
538
539 static struct super_block *blockdev_superblock __read_mostly;
540
541 void __init bdev_cache_init(void)
542 {
543 int err;
544 static struct vfsmount *bd_mnt;
545
546 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
547 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
548 SLAB_MEM_SPREAD|SLAB_PANIC),
549 init_once);
550 err = register_filesystem(&bd_type);
551 if (err)
552 panic("Cannot register bdev pseudo-fs");
553 bd_mnt = kern_mount(&bd_type);
554 if (IS_ERR(bd_mnt))
555 panic("Cannot create bdev pseudo-fs");
556 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
557 }
558
559 /*
560 * Most likely _very_ bad one - but then it's hardly critical for small
561 * /dev and can be fixed when somebody will need really large one.
562 * Keep in mind that it will be fed through icache hash function too.
563 */
564 static inline unsigned long hash(dev_t dev)
565 {
566 return MAJOR(dev)+MINOR(dev);
567 }
568
569 static int bdev_test(struct inode *inode, void *data)
570 {
571 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
572 }
573
574 static int bdev_set(struct inode *inode, void *data)
575 {
576 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
577 return 0;
578 }
579
580 static LIST_HEAD(all_bdevs);
581
582 struct block_device *bdget(dev_t dev)
583 {
584 struct block_device *bdev;
585 struct inode *inode;
586
587 inode = iget5_locked(blockdev_superblock, hash(dev),
588 bdev_test, bdev_set, &dev);
589
590 if (!inode)
591 return NULL;
592
593 bdev = &BDEV_I(inode)->bdev;
594
595 if (inode->i_state & I_NEW) {
596 bdev->bd_contains = NULL;
597 bdev->bd_super = NULL;
598 bdev->bd_inode = inode;
599 bdev->bd_block_size = (1 << inode->i_blkbits);
600 bdev->bd_part_count = 0;
601 bdev->bd_invalidated = 0;
602 inode->i_mode = S_IFBLK;
603 inode->i_rdev = dev;
604 inode->i_bdev = bdev;
605 inode->i_data.a_ops = &def_blk_aops;
606 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
607 inode->i_data.backing_dev_info = &default_backing_dev_info;
608 spin_lock(&bdev_lock);
609 list_add(&bdev->bd_list, &all_bdevs);
610 spin_unlock(&bdev_lock);
611 unlock_new_inode(inode);
612 }
613 return bdev;
614 }
615
616 EXPORT_SYMBOL(bdget);
617
618 /**
619 * bdgrab -- Grab a reference to an already referenced block device
620 * @bdev: Block device to grab a reference to.
621 */
622 struct block_device *bdgrab(struct block_device *bdev)
623 {
624 ihold(bdev->bd_inode);
625 return bdev;
626 }
627
628 long nr_blockdev_pages(void)
629 {
630 struct block_device *bdev;
631 long ret = 0;
632 spin_lock(&bdev_lock);
633 list_for_each_entry(bdev, &all_bdevs, bd_list) {
634 ret += bdev->bd_inode->i_mapping->nrpages;
635 }
636 spin_unlock(&bdev_lock);
637 return ret;
638 }
639
640 void bdput(struct block_device *bdev)
641 {
642 iput(bdev->bd_inode);
643 }
644
645 EXPORT_SYMBOL(bdput);
646
647 static struct block_device *bd_acquire(struct inode *inode)
648 {
649 struct block_device *bdev;
650
651 spin_lock(&bdev_lock);
652 bdev = inode->i_bdev;
653 if (bdev) {
654 ihold(bdev->bd_inode);
655 spin_unlock(&bdev_lock);
656 return bdev;
657 }
658 spin_unlock(&bdev_lock);
659
660 bdev = bdget(inode->i_rdev);
661 if (bdev) {
662 spin_lock(&bdev_lock);
663 if (!inode->i_bdev) {
664 /*
665 * We take an additional reference to bd_inode,
666 * and it's released in clear_inode() of inode.
667 * So, we can access it via ->i_mapping always
668 * without igrab().
669 */
670 ihold(bdev->bd_inode);
671 inode->i_bdev = bdev;
672 inode->i_mapping = bdev->bd_inode->i_mapping;
673 list_add(&inode->i_devices, &bdev->bd_inodes);
674 }
675 spin_unlock(&bdev_lock);
676 }
677 return bdev;
678 }
679
680 static inline int sb_is_blkdev_sb(struct super_block *sb)
681 {
682 return sb == blockdev_superblock;
683 }
684
685 /* Call when you free inode */
686
687 void bd_forget(struct inode *inode)
688 {
689 struct block_device *bdev = NULL;
690
691 spin_lock(&bdev_lock);
692 if (inode->i_bdev) {
693 if (!sb_is_blkdev_sb(inode->i_sb))
694 bdev = inode->i_bdev;
695 __bd_forget(inode);
696 }
697 spin_unlock(&bdev_lock);
698
699 if (bdev)
700 iput(bdev->bd_inode);
701 }
702
703 /**
704 * bd_may_claim - test whether a block device can be claimed
705 * @bdev: block device of interest
706 * @whole: whole block device containing @bdev, may equal @bdev
707 * @holder: holder trying to claim @bdev
708 *
709 * Test whether @bdev can be claimed by @holder.
710 *
711 * CONTEXT:
712 * spin_lock(&bdev_lock).
713 *
714 * RETURNS:
715 * %true if @bdev can be claimed, %false otherwise.
716 */
717 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
718 void *holder)
719 {
720 if (bdev->bd_holder == holder)
721 return true; /* already a holder */
722 else if (bdev->bd_holder != NULL)
723 return false; /* held by someone else */
724 else if (bdev->bd_contains == bdev)
725 return true; /* is a whole device which isn't held */
726
727 else if (whole->bd_holder == bd_may_claim)
728 return true; /* is a partition of a device that is being partitioned */
729 else if (whole->bd_holder != NULL)
730 return false; /* is a partition of a held device */
731 else
732 return true; /* is a partition of an un-held device */
733 }
734
735 /**
736 * bd_prepare_to_claim - prepare to claim a block device
737 * @bdev: block device of interest
738 * @whole: the whole device containing @bdev, may equal @bdev
739 * @holder: holder trying to claim @bdev
740 *
741 * Prepare to claim @bdev. This function fails if @bdev is already
742 * claimed by another holder and waits if another claiming is in
743 * progress. This function doesn't actually claim. On successful
744 * return, the caller has ownership of bd_claiming and bd_holder[s].
745 *
746 * CONTEXT:
747 * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab
748 * it multiple times.
749 *
750 * RETURNS:
751 * 0 if @bdev can be claimed, -EBUSY otherwise.
752 */
753 static int bd_prepare_to_claim(struct block_device *bdev,
754 struct block_device *whole, void *holder)
755 {
756 retry:
757 /* if someone else claimed, fail */
758 if (!bd_may_claim(bdev, whole, holder))
759 return -EBUSY;
760
761 /* if claiming is already in progress, wait for it to finish */
762 if (whole->bd_claiming) {
763 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
764 DEFINE_WAIT(wait);
765
766 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
767 spin_unlock(&bdev_lock);
768 schedule();
769 finish_wait(wq, &wait);
770 spin_lock(&bdev_lock);
771 goto retry;
772 }
773
774 /* yay, all mine */
775 return 0;
776 }
777
778 /**
779 * bd_start_claiming - start claiming a block device
780 * @bdev: block device of interest
781 * @holder: holder trying to claim @bdev
782 *
783 * @bdev is about to be opened exclusively. Check @bdev can be opened
784 * exclusively and mark that an exclusive open is in progress. Each
785 * successful call to this function must be matched with a call to
786 * either bd_finish_claiming() or bd_abort_claiming() (which do not
787 * fail).
788 *
789 * This function is used to gain exclusive access to the block device
790 * without actually causing other exclusive open attempts to fail. It
791 * should be used when the open sequence itself requires exclusive
792 * access but may subsequently fail.
793 *
794 * CONTEXT:
795 * Might sleep.
796 *
797 * RETURNS:
798 * Pointer to the block device containing @bdev on success, ERR_PTR()
799 * value on failure.
800 */
801 static struct block_device *bd_start_claiming(struct block_device *bdev,
802 void *holder)
803 {
804 struct gendisk *disk;
805 struct block_device *whole;
806 int partno, err;
807
808 might_sleep();
809
810 /*
811 * @bdev might not have been initialized properly yet, look up
812 * and grab the outer block device the hard way.
813 */
814 disk = get_gendisk(bdev->bd_dev, &partno);
815 if (!disk)
816 return ERR_PTR(-ENXIO);
817
818 /*
819 * Normally, @bdev should equal what's returned from bdget_disk()
820 * if partno is 0; however, some drivers (floppy) use multiple
821 * bdev's for the same physical device and @bdev may be one of the
822 * aliases. Keep @bdev if partno is 0. This means claimer
823 * tracking is broken for those devices but it has always been that
824 * way.
825 */
826 if (partno)
827 whole = bdget_disk(disk, 0);
828 else
829 whole = bdgrab(bdev);
830
831 module_put(disk->fops->owner);
832 put_disk(disk);
833 if (!whole)
834 return ERR_PTR(-ENOMEM);
835
836 /* prepare to claim, if successful, mark claiming in progress */
837 spin_lock(&bdev_lock);
838
839 err = bd_prepare_to_claim(bdev, whole, holder);
840 if (err == 0) {
841 whole->bd_claiming = holder;
842 spin_unlock(&bdev_lock);
843 return whole;
844 } else {
845 spin_unlock(&bdev_lock);
846 bdput(whole);
847 return ERR_PTR(err);
848 }
849 }
850
851 #ifdef CONFIG_SYSFS
852 struct bd_holder_disk {
853 struct list_head list;
854 struct gendisk *disk;
855 int refcnt;
856 };
857
858 static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
859 struct gendisk *disk)
860 {
861 struct bd_holder_disk *holder;
862
863 list_for_each_entry(holder, &bdev->bd_holder_disks, list)
864 if (holder->disk == disk)
865 return holder;
866 return NULL;
867 }
868
869 static int add_symlink(struct kobject *from, struct kobject *to)
870 {
871 return sysfs_create_link(from, to, kobject_name(to));
872 }
873
874 static void del_symlink(struct kobject *from, struct kobject *to)
875 {
876 sysfs_remove_link(from, kobject_name(to));
877 }
878
879 /**
880 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
881 * @bdev: the claimed slave bdev
882 * @disk: the holding disk
883 *
884 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
885 *
886 * This functions creates the following sysfs symlinks.
887 *
888 * - from "slaves" directory of the holder @disk to the claimed @bdev
889 * - from "holders" directory of the @bdev to the holder @disk
890 *
891 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
892 * passed to bd_link_disk_holder(), then:
893 *
894 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
895 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
896 *
897 * The caller must have claimed @bdev before calling this function and
898 * ensure that both @bdev and @disk are valid during the creation and
899 * lifetime of these symlinks.
900 *
901 * CONTEXT:
902 * Might sleep.
903 *
904 * RETURNS:
905 * 0 on success, -errno on failure.
906 */
907 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
908 {
909 struct bd_holder_disk *holder;
910 int ret = 0;
911
912 mutex_lock(&bdev->bd_mutex);
913
914 WARN_ON_ONCE(!bdev->bd_holder);
915
916 /* FIXME: remove the following once add_disk() handles errors */
917 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
918 goto out_unlock;
919
920 holder = bd_find_holder_disk(bdev, disk);
921 if (holder) {
922 holder->refcnt++;
923 goto out_unlock;
924 }
925
926 holder = kzalloc(sizeof(*holder), GFP_KERNEL);
927 if (!holder) {
928 ret = -ENOMEM;
929 goto out_unlock;
930 }
931
932 INIT_LIST_HEAD(&holder->list);
933 holder->disk = disk;
934 holder->refcnt = 1;
935
936 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
937 if (ret)
938 goto out_free;
939
940 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
941 if (ret)
942 goto out_del;
943 /*
944 * bdev could be deleted beneath us which would implicitly destroy
945 * the holder directory. Hold on to it.
946 */
947 kobject_get(bdev->bd_part->holder_dir);
948
949 list_add(&holder->list, &bdev->bd_holder_disks);
950 goto out_unlock;
951
952 out_del:
953 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
954 out_free:
955 kfree(holder);
956 out_unlock:
957 mutex_unlock(&bdev->bd_mutex);
958 return ret;
959 }
960 EXPORT_SYMBOL_GPL(bd_link_disk_holder);
961
962 /**
963 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
964 * @bdev: the calimed slave bdev
965 * @disk: the holding disk
966 *
967 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
968 *
969 * CONTEXT:
970 * Might sleep.
971 */
972 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
973 {
974 struct bd_holder_disk *holder;
975
976 mutex_lock(&bdev->bd_mutex);
977
978 holder = bd_find_holder_disk(bdev, disk);
979
980 if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
981 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
982 del_symlink(bdev->bd_part->holder_dir,
983 &disk_to_dev(disk)->kobj);
984 kobject_put(bdev->bd_part->holder_dir);
985 list_del_init(&holder->list);
986 kfree(holder);
987 }
988
989 mutex_unlock(&bdev->bd_mutex);
990 }
991 EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
992 #endif
993
994 /**
995 * flush_disk - invalidates all buffer-cache entries on a disk
996 *
997 * @bdev: struct block device to be flushed
998 * @kill_dirty: flag to guide handling of dirty inodes
999 *
1000 * Invalidates all buffer-cache entries on a disk. It should be called
1001 * when a disk has been changed -- either by a media change or online
1002 * resize.
1003 */
1004 static void flush_disk(struct block_device *bdev, bool kill_dirty)
1005 {
1006 if (__invalidate_device(bdev, kill_dirty)) {
1007 char name[BDEVNAME_SIZE] = "";
1008
1009 if (bdev->bd_disk)
1010 disk_name(bdev->bd_disk, 0, name);
1011 printk(KERN_WARNING "VFS: busy inodes on changed media or "
1012 "resized disk %s\n", name);
1013 }
1014
1015 if (!bdev->bd_disk)
1016 return;
1017 if (disk_part_scan_enabled(bdev->bd_disk))
1018 bdev->bd_invalidated = 1;
1019 }
1020
1021 /**
1022 * check_disk_size_change - checks for disk size change and adjusts bdev size.
1023 * @disk: struct gendisk to check
1024 * @bdev: struct bdev to adjust.
1025 *
1026 * This routine checks to see if the bdev size does not match the disk size
1027 * and adjusts it if it differs.
1028 */
1029 void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
1030 {
1031 loff_t disk_size, bdev_size;
1032
1033 disk_size = (loff_t)get_capacity(disk) << 9;
1034 bdev_size = i_size_read(bdev->bd_inode);
1035 if (disk_size != bdev_size) {
1036 char name[BDEVNAME_SIZE];
1037
1038 disk_name(disk, 0, name);
1039 printk(KERN_INFO
1040 "%s: detected capacity change from %lld to %lld\n",
1041 name, bdev_size, disk_size);
1042 i_size_write(bdev->bd_inode, disk_size);
1043 flush_disk(bdev, false);
1044 }
1045 }
1046 EXPORT_SYMBOL(check_disk_size_change);
1047
1048 /**
1049 * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
1050 * @disk: struct gendisk to be revalidated
1051 *
1052 * This routine is a wrapper for lower-level driver's revalidate_disk
1053 * call-backs. It is used to do common pre and post operations needed
1054 * for all revalidate_disk operations.
1055 */
1056 int revalidate_disk(struct gendisk *disk)
1057 {
1058 struct block_device *bdev;
1059 int ret = 0;
1060
1061 if (disk->fops->revalidate_disk)
1062 ret = disk->fops->revalidate_disk(disk);
1063
1064 bdev = bdget_disk(disk, 0);
1065 if (!bdev)
1066 return ret;
1067
1068 mutex_lock(&bdev->bd_mutex);
1069 check_disk_size_change(disk, bdev);
1070 mutex_unlock(&bdev->bd_mutex);
1071 bdput(bdev);
1072 return ret;
1073 }
1074 EXPORT_SYMBOL(revalidate_disk);
1075
1076 /*
1077 * This routine checks whether a removable media has been changed,
1078 * and invalidates all buffer-cache-entries in that case. This
1079 * is a relatively slow routine, so we have to try to minimize using
1080 * it. Thus it is called only upon a 'mount' or 'open'. This
1081 * is the best way of combining speed and utility, I think.
1082 * People changing diskettes in the middle of an operation deserve
1083 * to lose :-)
1084 */
1085 int check_disk_change(struct block_device *bdev)
1086 {
1087 struct gendisk *disk = bdev->bd_disk;
1088 const struct block_device_operations *bdops = disk->fops;
1089 unsigned int events;
1090
1091 events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
1092 DISK_EVENT_EJECT_REQUEST);
1093 if (!(events & DISK_EVENT_MEDIA_CHANGE))
1094 return 0;
1095
1096 flush_disk(bdev, true);
1097 if (bdops->revalidate_disk)
1098 bdops->revalidate_disk(bdev->bd_disk);
1099 return 1;
1100 }
1101
1102 EXPORT_SYMBOL(check_disk_change);
1103
1104 void bd_set_size(struct block_device *bdev, loff_t size)
1105 {
1106 unsigned bsize = bdev_logical_block_size(bdev);
1107
1108 bdev->bd_inode->i_size = size;
1109 while (bsize < PAGE_CACHE_SIZE) {
1110 if (size & bsize)
1111 break;
1112 bsize <<= 1;
1113 }
1114 bdev->bd_block_size = bsize;
1115 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1116 }
1117 EXPORT_SYMBOL(bd_set_size);
1118
1119 static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
1120
1121 /*
1122 * bd_mutex locking:
1123 *
1124 * mutex_lock(part->bd_mutex)
1125 * mutex_lock_nested(whole->bd_mutex, 1)
1126 */
1127
1128 static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1129 {
1130 struct gendisk *disk;
1131 struct module *owner;
1132 int ret;
1133 int partno;
1134 int perm = 0;
1135
1136 if (mode & FMODE_READ)
1137 perm |= MAY_READ;
1138 if (mode & FMODE_WRITE)
1139 perm |= MAY_WRITE;
1140 /*
1141 * hooks: /n/, see "layering violations".
1142 */
1143 if (!for_part) {
1144 ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1145 if (ret != 0) {
1146 bdput(bdev);
1147 return ret;
1148 }
1149 }
1150
1151 restart:
1152
1153 ret = -ENXIO;
1154 disk = get_gendisk(bdev->bd_dev, &partno);
1155 if (!disk)
1156 goto out;
1157 owner = disk->fops->owner;
1158
1159 disk_block_events(disk);
1160 mutex_lock_nested(&bdev->bd_mutex, for_part);
1161 if (!bdev->bd_openers) {
1162 bdev->bd_disk = disk;
1163 bdev->bd_queue = disk->queue;
1164 bdev->bd_contains = bdev;
1165 if (!partno) {
1166 struct backing_dev_info *bdi;
1167
1168 ret = -ENXIO;
1169 bdev->bd_part = disk_get_part(disk, partno);
1170 if (!bdev->bd_part)
1171 goto out_clear;
1172
1173 ret = 0;
1174 if (disk->fops->open) {
1175 ret = disk->fops->open(bdev, mode);
1176 if (ret == -ERESTARTSYS) {
1177 /* Lost a race with 'disk' being
1178 * deleted, try again.
1179 * See md.c
1180 */
1181 disk_put_part(bdev->bd_part);
1182 bdev->bd_part = NULL;
1183 bdev->bd_disk = NULL;
1184 bdev->bd_queue = NULL;
1185 mutex_unlock(&bdev->bd_mutex);
1186 disk_unblock_events(disk);
1187 put_disk(disk);
1188 module_put(owner);
1189 goto restart;
1190 }
1191 }
1192
1193 if (!ret && !bdev->bd_openers) {
1194 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1195 bdi = blk_get_backing_dev_info(bdev);
1196 if (bdi == NULL)
1197 bdi = &default_backing_dev_info;
1198 bdev_inode_switch_bdi(bdev->bd_inode, bdi);
1199 }
1200
1201 /*
1202 * If the device is invalidated, rescan partition
1203 * if open succeeded or failed with -ENOMEDIUM.
1204 * The latter is necessary to prevent ghost
1205 * partitions on a removed medium.
1206 */
1207 if (bdev->bd_invalidated) {
1208 if (!ret)
1209 rescan_partitions(disk, bdev);
1210 else if (ret == -ENOMEDIUM)
1211 invalidate_partitions(disk, bdev);
1212 }
1213 if (ret)
1214 goto out_clear;
1215 } else {
1216 struct block_device *whole;
1217 whole = bdget_disk(disk, 0);
1218 ret = -ENOMEM;
1219 if (!whole)
1220 goto out_clear;
1221 BUG_ON(for_part);
1222 ret = __blkdev_get(whole, mode, 1);
1223 if (ret)
1224 goto out_clear;
1225 bdev->bd_contains = whole;
1226 bdev_inode_switch_bdi(bdev->bd_inode,
1227 whole->bd_inode->i_data.backing_dev_info);
1228 bdev->bd_part = disk_get_part(disk, partno);
1229 if (!(disk->flags & GENHD_FL_UP) ||
1230 !bdev->bd_part || !bdev->bd_part->nr_sects) {
1231 ret = -ENXIO;
1232 goto out_clear;
1233 }
1234 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1235 }
1236 } else {
1237 if (bdev->bd_contains == bdev) {
1238 ret = 0;
1239 if (bdev->bd_disk->fops->open)
1240 ret = bdev->bd_disk->fops->open(bdev, mode);
1241 /* the same as first opener case, read comment there */
1242 if (bdev->bd_invalidated) {
1243 if (!ret)
1244 rescan_partitions(bdev->bd_disk, bdev);
1245 else if (ret == -ENOMEDIUM)
1246 invalidate_partitions(bdev->bd_disk, bdev);
1247 }
1248 if (ret)
1249 goto out_unlock_bdev;
1250 }
1251 /* only one opener holds refs to the module and disk */
1252 put_disk(disk);
1253 module_put(owner);
1254 }
1255 bdev->bd_openers++;
1256 if (for_part)
1257 bdev->bd_part_count++;
1258 mutex_unlock(&bdev->bd_mutex);
1259 disk_unblock_events(disk);
1260 return 0;
1261
1262 out_clear:
1263 disk_put_part(bdev->bd_part);
1264 bdev->bd_disk = NULL;
1265 bdev->bd_part = NULL;
1266 bdev->bd_queue = NULL;
1267 bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info);
1268 if (bdev != bdev->bd_contains)
1269 __blkdev_put(bdev->bd_contains, mode, 1);
1270 bdev->bd_contains = NULL;
1271 out_unlock_bdev:
1272 mutex_unlock(&bdev->bd_mutex);
1273 disk_unblock_events(disk);
1274 put_disk(disk);
1275 module_put(owner);
1276 out:
1277 bdput(bdev);
1278
1279 return ret;
1280 }
1281
1282 /**
1283 * blkdev_get - open a block device
1284 * @bdev: block_device to open
1285 * @mode: FMODE_* mask
1286 * @holder: exclusive holder identifier
1287 *
1288 * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
1289 * open with exclusive access. Specifying %FMODE_EXCL with %NULL
1290 * @holder is invalid. Exclusive opens may nest for the same @holder.
1291 *
1292 * On success, the reference count of @bdev is unchanged. On failure,
1293 * @bdev is put.
1294 *
1295 * CONTEXT:
1296 * Might sleep.
1297 *
1298 * RETURNS:
1299 * 0 on success, -errno on failure.
1300 */
1301 int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1302 {
1303 struct block_device *whole = NULL;
1304 int res;
1305
1306 WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
1307
1308 if ((mode & FMODE_EXCL) && holder) {
1309 whole = bd_start_claiming(bdev, holder);
1310 if (IS_ERR(whole)) {
1311 bdput(bdev);
1312 return PTR_ERR(whole);
1313 }
1314 }
1315
1316 res = __blkdev_get(bdev, mode, 0);
1317
1318 if (whole) {
1319 struct gendisk *disk = whole->bd_disk;
1320
1321 /* finish claiming */
1322 mutex_lock(&bdev->bd_mutex);
1323 spin_lock(&bdev_lock);
1324
1325 if (!res) {
1326 BUG_ON(!bd_may_claim(bdev, whole, holder));
1327 /*
1328 * Note that for a whole device bd_holders
1329 * will be incremented twice, and bd_holder
1330 * will be set to bd_may_claim before being
1331 * set to holder
1332 */
1333 whole->bd_holders++;
1334 whole->bd_holder = bd_may_claim;
1335 bdev->bd_holders++;
1336 bdev->bd_holder = holder;
1337 }
1338
1339 /* tell others that we're done */
1340 BUG_ON(whole->bd_claiming != holder);
1341 whole->bd_claiming = NULL;
1342 wake_up_bit(&whole->bd_claiming, 0);
1343
1344 spin_unlock(&bdev_lock);
1345
1346 /*
1347 * Block event polling for write claims if requested. Any
1348 * write holder makes the write_holder state stick until
1349 * all are released. This is good enough and tracking
1350 * individual writeable reference is too fragile given the
1351 * way @mode is used in blkdev_get/put().
1352 */
1353 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1354 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
1355 bdev->bd_write_holder = true;
1356 disk_block_events(disk);
1357 }
1358
1359 mutex_unlock(&bdev->bd_mutex);
1360 bdput(whole);
1361 }
1362
1363 return res;
1364 }
1365 EXPORT_SYMBOL(blkdev_get);
1366
1367 /**
1368 * blkdev_get_by_path - open a block device by name
1369 * @path: path to the block device to open
1370 * @mode: FMODE_* mask
1371 * @holder: exclusive holder identifier
1372 *
1373 * Open the blockdevice described by the device file at @path. @mode
1374 * and @holder are identical to blkdev_get().
1375 *
1376 * On success, the returned block_device has reference count of one.
1377 *
1378 * CONTEXT:
1379 * Might sleep.
1380 *
1381 * RETURNS:
1382 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1383 */
1384 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1385 void *holder)
1386 {
1387 struct block_device *bdev;
1388 int err;
1389
1390 bdev = lookup_bdev(path);
1391 if (IS_ERR(bdev))
1392 return bdev;
1393
1394 err = blkdev_get(bdev, mode, holder);
1395 if (err)
1396 return ERR_PTR(err);
1397
1398 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1399 blkdev_put(bdev, mode);
1400 return ERR_PTR(-EACCES);
1401 }
1402
1403 return bdev;
1404 }
1405 EXPORT_SYMBOL(blkdev_get_by_path);
1406
1407 /**
1408 * blkdev_get_by_dev - open a block device by device number
1409 * @dev: device number of block device to open
1410 * @mode: FMODE_* mask
1411 * @holder: exclusive holder identifier
1412 *
1413 * Open the blockdevice described by device number @dev. @mode and
1414 * @holder are identical to blkdev_get().
1415 *
1416 * Use it ONLY if you really do not have anything better - i.e. when
1417 * you are behind a truly sucky interface and all you are given is a
1418 * device number. _Never_ to be used for internal purposes. If you
1419 * ever need it - reconsider your API.
1420 *
1421 * On success, the returned block_device has reference count of one.
1422 *
1423 * CONTEXT:
1424 * Might sleep.
1425 *
1426 * RETURNS:
1427 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1428 */
1429 struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1430 {
1431 struct block_device *bdev;
1432 int err;
1433
1434 bdev = bdget(dev);
1435 if (!bdev)
1436 return ERR_PTR(-ENOMEM);
1437
1438 err = blkdev_get(bdev, mode, holder);
1439 if (err)
1440 return ERR_PTR(err);
1441
1442 return bdev;
1443 }
1444 EXPORT_SYMBOL(blkdev_get_by_dev);
1445
1446 static int blkdev_open(struct inode * inode, struct file * filp)
1447 {
1448 struct block_device *bdev;
1449
1450 /*
1451 * Preserve backwards compatibility and allow large file access
1452 * even if userspace doesn't ask for it explicitly. Some mkfs
1453 * binary needs it. We might want to drop this workaround
1454 * during an unstable branch.
1455 */
1456 filp->f_flags |= O_LARGEFILE;
1457
1458 if (filp->f_flags & O_NDELAY)
1459 filp->f_mode |= FMODE_NDELAY;
1460 if (filp->f_flags & O_EXCL)
1461 filp->f_mode |= FMODE_EXCL;
1462 if ((filp->f_flags & O_ACCMODE) == 3)
1463 filp->f_mode |= FMODE_WRITE_IOCTL;
1464
1465 bdev = bd_acquire(inode);
1466 if (bdev == NULL)
1467 return -ENOMEM;
1468
1469 filp->f_mapping = bdev->bd_inode->i_mapping;
1470
1471 return blkdev_get(bdev, filp->f_mode, filp);
1472 }
1473
1474 static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1475 {
1476 int ret = 0;
1477 struct gendisk *disk = bdev->bd_disk;
1478 struct block_device *victim = NULL;
1479
1480 mutex_lock_nested(&bdev->bd_mutex, for_part);
1481 if (for_part)
1482 bdev->bd_part_count--;
1483
1484 if (!--bdev->bd_openers) {
1485 WARN_ON_ONCE(bdev->bd_holders);
1486 sync_blockdev(bdev);
1487 kill_bdev(bdev);
1488 /* ->release can cause the old bdi to disappear,
1489 * so must switch it out first
1490 */
1491 bdev_inode_switch_bdi(bdev->bd_inode,
1492 &default_backing_dev_info);
1493 }
1494 if (bdev->bd_contains == bdev) {
1495 if (disk->fops->release)
1496 ret = disk->fops->release(disk, mode);
1497 }
1498 if (!bdev->bd_openers) {
1499 struct module *owner = disk->fops->owner;
1500
1501 disk_put_part(bdev->bd_part);
1502 bdev->bd_part = NULL;
1503 bdev->bd_disk = NULL;
1504 if (bdev != bdev->bd_contains)
1505 victim = bdev->bd_contains;
1506 bdev->bd_contains = NULL;
1507
1508 put_disk(disk);
1509 module_put(owner);
1510 }
1511 mutex_unlock(&bdev->bd_mutex);
1512 bdput(bdev);
1513 if (victim)
1514 __blkdev_put(victim, mode, 1);
1515 return ret;
1516 }
1517
1518 int blkdev_put(struct block_device *bdev, fmode_t mode)
1519 {
1520 mutex_lock(&bdev->bd_mutex);
1521
1522 if (mode & FMODE_EXCL) {
1523 bool bdev_free;
1524
1525 /*
1526 * Release a claim on the device. The holder fields
1527 * are protected with bdev_lock. bd_mutex is to
1528 * synchronize disk_holder unlinking.
1529 */
1530 spin_lock(&bdev_lock);
1531
1532 WARN_ON_ONCE(--bdev->bd_holders < 0);
1533 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1534
1535 /* bd_contains might point to self, check in a separate step */
1536 if ((bdev_free = !bdev->bd_holders))
1537 bdev->bd_holder = NULL;
1538 if (!bdev->bd_contains->bd_holders)
1539 bdev->bd_contains->bd_holder = NULL;
1540
1541 spin_unlock(&bdev_lock);
1542
1543 /*
1544 * If this was the last claim, remove holder link and
1545 * unblock evpoll if it was a write holder.
1546 */
1547 if (bdev_free && bdev->bd_write_holder) {
1548 disk_unblock_events(bdev->bd_disk);
1549 bdev->bd_write_holder = false;
1550 }
1551 }
1552
1553 /*
1554 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1555 * event. This is to ensure detection of media removal commanded
1556 * from userland - e.g. eject(1).
1557 */
1558 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1559
1560 mutex_unlock(&bdev->bd_mutex);
1561
1562 return __blkdev_put(bdev, mode, 0);
1563 }
1564 EXPORT_SYMBOL(blkdev_put);
1565
1566 static int blkdev_close(struct inode * inode, struct file * filp)
1567 {
1568 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
1569
1570 return blkdev_put(bdev, filp->f_mode);
1571 }
1572
1573 static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1574 {
1575 struct block_device *bdev = I_BDEV(file->f_mapping->host);
1576 fmode_t mode = file->f_mode;
1577
1578 /*
1579 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1580 * to updated it before every ioctl.
1581 */
1582 if (file->f_flags & O_NDELAY)
1583 mode |= FMODE_NDELAY;
1584 else
1585 mode &= ~FMODE_NDELAY;
1586
1587 return blkdev_ioctl(bdev, mode, cmd, arg);
1588 }
1589
1590 ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
1591 unsigned long nr_segs, loff_t pos)
1592 {
1593 ssize_t ret;
1594 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
1595
1596 down_read(&bdev->bd_block_size_semaphore);
1597
1598 ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
1599
1600 up_read(&bdev->bd_block_size_semaphore);
1601
1602 return ret;
1603 }
1604 EXPORT_SYMBOL_GPL(blkdev_aio_read);
1605
1606 /*
1607 * Write data to the block device. Only intended for the block device itself
1608 * and the raw driver which basically is a fake block device.
1609 *
1610 * Does not take i_mutex for the write and thus is not for general purpose
1611 * use.
1612 */
1613 ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
1614 unsigned long nr_segs, loff_t pos)
1615 {
1616 struct file *file = iocb->ki_filp;
1617 struct block_device *bdev = I_BDEV(file->f_mapping->host);
1618 struct blk_plug plug;
1619 ssize_t ret;
1620
1621 BUG_ON(iocb->ki_pos != pos);
1622
1623 blk_start_plug(&plug);
1624
1625 down_read(&bdev->bd_block_size_semaphore);
1626
1627 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
1628 if (ret > 0 || ret == -EIOCBQUEUED) {
1629 ssize_t err;
1630
1631 err = generic_write_sync(file, pos, ret);
1632 if (err < 0 && ret > 0)
1633 ret = err;
1634 }
1635
1636 up_read(&bdev->bd_block_size_semaphore);
1637
1638 blk_finish_plug(&plug);
1639
1640 return ret;
1641 }
1642 EXPORT_SYMBOL_GPL(blkdev_aio_write);
1643
1644 int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
1645 {
1646 int ret;
1647 struct block_device *bdev = I_BDEV(file->f_mapping->host);
1648
1649 down_read(&bdev->bd_block_size_semaphore);
1650
1651 ret = generic_file_mmap(file, vma);
1652
1653 up_read(&bdev->bd_block_size_semaphore);
1654
1655 return ret;
1656 }
1657
1658 /*
1659 * Try to release a page associated with block device when the system
1660 * is under memory pressure.
1661 */
1662 static int blkdev_releasepage(struct page *page, gfp_t wait)
1663 {
1664 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1665
1666 if (super && super->s_op->bdev_try_to_free_page)
1667 return super->s_op->bdev_try_to_free_page(super, page, wait);
1668
1669 return try_to_free_buffers(page);
1670 }
1671
1672 static const struct address_space_operations def_blk_aops = {
1673 .readpage = blkdev_readpage,
1674 .writepage = blkdev_writepage,
1675 .write_begin = blkdev_write_begin,
1676 .write_end = blkdev_write_end,
1677 .writepages = generic_writepages,
1678 .releasepage = blkdev_releasepage,
1679 .direct_IO = blkdev_direct_IO,
1680 };
1681
1682 const struct file_operations def_blk_fops = {
1683 .open = blkdev_open,
1684 .release = blkdev_close,
1685 .llseek = block_llseek,
1686 .read = do_sync_read,
1687 .write = do_sync_write,
1688 .aio_read = blkdev_aio_read,
1689 .aio_write = blkdev_aio_write,
1690 .mmap = blkdev_mmap,
1691 .fsync = blkdev_fsync,
1692 .unlocked_ioctl = block_ioctl,
1693 #ifdef CONFIG_COMPAT
1694 .compat_ioctl = compat_blkdev_ioctl,
1695 #endif
1696 .splice_read = generic_file_splice_read,
1697 .splice_write = generic_file_splice_write,
1698 };
1699
1700 int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
1701 {
1702 int res;
1703 mm_segment_t old_fs = get_fs();
1704 set_fs(KERNEL_DS);
1705 res = blkdev_ioctl(bdev, 0, cmd, arg);
1706 set_fs(old_fs);
1707 return res;
1708 }
1709
1710 EXPORT_SYMBOL(ioctl_by_bdev);
1711
1712 /**
1713 * lookup_bdev - lookup a struct block_device by name
1714 * @pathname: special file representing the block device
1715 *
1716 * Get a reference to the blockdevice at @pathname in the current
1717 * namespace if possible and return it. Return ERR_PTR(error)
1718 * otherwise.
1719 */
1720 struct block_device *lookup_bdev(const char *pathname)
1721 {
1722 struct block_device *bdev;
1723 struct inode *inode;
1724 struct path path;
1725 int error;
1726
1727 if (!pathname || !*pathname)
1728 return ERR_PTR(-EINVAL);
1729
1730 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
1731 if (error)
1732 return ERR_PTR(error);
1733
1734 inode = path.dentry->d_inode;
1735 error = -ENOTBLK;
1736 if (!S_ISBLK(inode->i_mode))
1737 goto fail;
1738 error = -EACCES;
1739 if (path.mnt->mnt_flags & MNT_NODEV)
1740 goto fail;
1741 error = -ENOMEM;
1742 bdev = bd_acquire(inode);
1743 if (!bdev)
1744 goto fail;
1745 out:
1746 path_put(&path);
1747 return bdev;
1748 fail:
1749 bdev = ERR_PTR(error);
1750 goto out;
1751 }
1752 EXPORT_SYMBOL(lookup_bdev);
1753
1754 int __invalidate_device(struct block_device *bdev, bool kill_dirty)
1755 {
1756 struct super_block *sb = get_super(bdev);
1757 int res = 0;
1758
1759 if (sb) {
1760 /*
1761 * no need to lock the super, get_super holds the
1762 * read mutex so the filesystem cannot go away
1763 * under us (->put_super runs with the write lock
1764 * hold).
1765 */
1766 shrink_dcache_sb(sb);
1767 res = invalidate_inodes(sb, kill_dirty);
1768 drop_super(sb);
1769 }
1770 invalidate_bdev(bdev);
1771 return res;
1772 }
1773 EXPORT_SYMBOL(__invalidate_device);
1774
1775 void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
1776 {
1777 struct inode *inode, *old_inode = NULL;
1778
1779 spin_lock(&inode_sb_list_lock);
1780 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1781 struct address_space *mapping = inode->i_mapping;
1782
1783 spin_lock(&inode->i_lock);
1784 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1785 mapping->nrpages == 0) {
1786 spin_unlock(&inode->i_lock);
1787 continue;
1788 }
1789 __iget(inode);
1790 spin_unlock(&inode->i_lock);
1791 spin_unlock(&inode_sb_list_lock);
1792 /*
1793 * We hold a reference to 'inode' so it couldn't have been
1794 * removed from s_inodes list while we dropped the
1795 * inode_sb_list_lock. We cannot iput the inode now as we can
1796 * be holding the last reference and we cannot iput it under
1797 * inode_sb_list_lock. So we keep the reference and iput it
1798 * later.
1799 */
1800 iput(old_inode);
1801 old_inode = inode;
1802
1803 func(I_BDEV(inode), arg);
1804
1805 spin_lock(&inode_sb_list_lock);
1806 }
1807 spin_unlock(&inode_sb_list_lock);
1808 iput(old_inode);
1809 }