1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
8 #include <linux/init.h>
10 #include <linux/fcntl.h>
11 #include <linux/slab.h>
12 #include <linux/kmod.h>
13 #include <linux/major.h>
14 #include <linux/device_cgroup.h>
15 #include <linux/highmem.h>
16 #include <linux/blkdev.h>
17 #include <linux/backing-dev.h>
18 #include <linux/module.h>
19 #include <linux/blkpg.h>
20 #include <linux/magic.h>
21 #include <linux/buffer_head.h>
22 #include <linux/swap.h>
23 #include <linux/pagevec.h>
24 #include <linux/writeback.h>
25 #include <linux/mpage.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
28 #include <linux/uio.h>
29 #include <linux/namei.h>
30 #include <linux/log2.h>
31 #include <linux/cleancache.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/falloc.h>
34 #include <linux/part_stat.h>
35 #include <linux/uaccess.h>
36 #include <linux/suspend.h>
40 struct block_device bdev
;
41 struct inode vfs_inode
;
44 static const struct address_space_operations def_blk_aops
;
46 static inline struct bdev_inode
*BDEV_I(struct inode
*inode
)
48 return container_of(inode
, struct bdev_inode
, vfs_inode
);
51 struct block_device
*I_BDEV(struct inode
*inode
)
53 return &BDEV_I(inode
)->bdev
;
55 EXPORT_SYMBOL(I_BDEV
);
57 static void bdev_write_inode(struct block_device
*bdev
)
59 struct inode
*inode
= bdev
->bd_inode
;
62 spin_lock(&inode
->i_lock
);
63 while (inode
->i_state
& I_DIRTY
) {
64 spin_unlock(&inode
->i_lock
);
65 ret
= write_inode_now(inode
, true);
67 char name
[BDEVNAME_SIZE
];
68 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
69 "for block device %s (err=%d).\n",
70 bdevname(bdev
, name
), ret
);
72 spin_lock(&inode
->i_lock
);
74 spin_unlock(&inode
->i_lock
);
77 /* Kill _all_ buffers and pagecache , dirty or not.. */
78 static void kill_bdev(struct block_device
*bdev
)
80 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
82 if (mapping
->nrpages
== 0 && mapping
->nrexceptional
== 0)
86 truncate_inode_pages(mapping
, 0);
89 /* Invalidate clean unused buffers and pagecache. */
90 void invalidate_bdev(struct block_device
*bdev
)
92 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
94 if (mapping
->nrpages
) {
96 lru_add_drain_all(); /* make sure all lru add caches are flushed */
97 invalidate_mapping_pages(mapping
, 0, -1);
99 /* 99% of the time, we don't need to flush the cleancache on the bdev.
100 * But, for the strange corners, lets be cautious
102 cleancache_invalidate_inode(mapping
);
104 EXPORT_SYMBOL(invalidate_bdev
);
107 * Drop all buffers & page cache for given bdev range. This function bails
108 * with error if bdev has other exclusive owner (such as filesystem).
110 int truncate_bdev_range(struct block_device
*bdev
, fmode_t mode
,
111 loff_t lstart
, loff_t lend
)
114 * If we don't hold exclusive handle for the device, upgrade to it
115 * while we discard the buffer cache to avoid discarding buffers
116 * under live filesystem.
118 if (!(mode
& FMODE_EXCL
)) {
119 int err
= bd_prepare_to_claim(bdev
, truncate_bdev_range
);
124 truncate_inode_pages_range(bdev
->bd_inode
->i_mapping
, lstart
, lend
);
125 if (!(mode
& FMODE_EXCL
))
126 bd_abort_claiming(bdev
, truncate_bdev_range
);
130 static void set_init_blocksize(struct block_device
*bdev
)
132 unsigned int bsize
= bdev_logical_block_size(bdev
);
133 loff_t size
= i_size_read(bdev
->bd_inode
);
135 while (bsize
< PAGE_SIZE
) {
140 bdev
->bd_inode
->i_blkbits
= blksize_bits(bsize
);
143 int set_blocksize(struct block_device
*bdev
, int size
)
145 /* Size must be a power of two, and between 512 and PAGE_SIZE */
146 if (size
> PAGE_SIZE
|| size
< 512 || !is_power_of_2(size
))
149 /* Size cannot be smaller than the size supported by the device */
150 if (size
< bdev_logical_block_size(bdev
))
153 /* Don't change the size if it is same as current */
154 if (bdev
->bd_inode
->i_blkbits
!= blksize_bits(size
)) {
156 bdev
->bd_inode
->i_blkbits
= blksize_bits(size
);
162 EXPORT_SYMBOL(set_blocksize
);
164 int sb_set_blocksize(struct super_block
*sb
, int size
)
166 if (set_blocksize(sb
->s_bdev
, size
))
168 /* If we get here, we know size is power of two
169 * and it's value is between 512 and PAGE_SIZE */
170 sb
->s_blocksize
= size
;
171 sb
->s_blocksize_bits
= blksize_bits(size
);
172 return sb
->s_blocksize
;
175 EXPORT_SYMBOL(sb_set_blocksize
);
177 int sb_min_blocksize(struct super_block
*sb
, int size
)
179 int minsize
= bdev_logical_block_size(sb
->s_bdev
);
182 return sb_set_blocksize(sb
, size
);
185 EXPORT_SYMBOL(sb_min_blocksize
);
188 blkdev_get_block(struct inode
*inode
, sector_t iblock
,
189 struct buffer_head
*bh
, int create
)
191 bh
->b_bdev
= I_BDEV(inode
);
192 bh
->b_blocknr
= iblock
;
193 set_buffer_mapped(bh
);
197 static struct inode
*bdev_file_inode(struct file
*file
)
199 return file
->f_mapping
->host
;
202 static unsigned int dio_bio_write_op(struct kiocb
*iocb
)
204 unsigned int op
= REQ_OP_WRITE
| REQ_SYNC
| REQ_IDLE
;
206 /* avoid the need for a I/O completion work item */
207 if (iocb
->ki_flags
& IOCB_DSYNC
)
212 #define DIO_INLINE_BIO_VECS 4
214 static void blkdev_bio_end_io_simple(struct bio
*bio
)
216 struct task_struct
*waiter
= bio
->bi_private
;
218 WRITE_ONCE(bio
->bi_private
, NULL
);
219 blk_wake_io_task(waiter
);
223 __blkdev_direct_IO_simple(struct kiocb
*iocb
, struct iov_iter
*iter
,
224 unsigned int nr_pages
)
226 struct file
*file
= iocb
->ki_filp
;
227 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
228 struct bio_vec inline_vecs
[DIO_INLINE_BIO_VECS
], *vecs
;
229 loff_t pos
= iocb
->ki_pos
;
230 bool should_dirty
= false;
235 if ((pos
| iov_iter_alignment(iter
)) &
236 (bdev_logical_block_size(bdev
) - 1))
239 if (nr_pages
<= DIO_INLINE_BIO_VECS
)
242 vecs
= kmalloc_array(nr_pages
, sizeof(struct bio_vec
),
248 bio_init(&bio
, vecs
, nr_pages
);
249 bio_set_dev(&bio
, bdev
);
250 bio
.bi_iter
.bi_sector
= pos
>> 9;
251 bio
.bi_write_hint
= iocb
->ki_hint
;
252 bio
.bi_private
= current
;
253 bio
.bi_end_io
= blkdev_bio_end_io_simple
;
254 bio
.bi_ioprio
= iocb
->ki_ioprio
;
256 ret
= bio_iov_iter_get_pages(&bio
, iter
);
259 ret
= bio
.bi_iter
.bi_size
;
261 if (iov_iter_rw(iter
) == READ
) {
262 bio
.bi_opf
= REQ_OP_READ
;
263 if (iter_is_iovec(iter
))
266 bio
.bi_opf
= dio_bio_write_op(iocb
);
267 task_io_account_write(ret
);
269 if (iocb
->ki_flags
& IOCB_HIPRI
)
270 bio_set_polled(&bio
, iocb
);
272 qc
= submit_bio(&bio
);
274 set_current_state(TASK_UNINTERRUPTIBLE
);
275 if (!READ_ONCE(bio
.bi_private
))
277 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
278 !blk_poll(bdev_get_queue(bdev
), qc
, true))
281 __set_current_state(TASK_RUNNING
);
283 bio_release_pages(&bio
, should_dirty
);
284 if (unlikely(bio
.bi_status
))
285 ret
= blk_status_to_errno(bio
.bi_status
);
288 if (vecs
!= inline_vecs
)
299 struct task_struct
*waiter
;
304 bool should_dirty
: 1;
309 static struct bio_set blkdev_dio_pool
;
311 static int blkdev_iopoll(struct kiocb
*kiocb
, bool wait
)
313 struct block_device
*bdev
= I_BDEV(kiocb
->ki_filp
->f_mapping
->host
);
314 struct request_queue
*q
= bdev_get_queue(bdev
);
316 return blk_poll(q
, READ_ONCE(kiocb
->ki_cookie
), wait
);
319 static void blkdev_bio_end_io(struct bio
*bio
)
321 struct blkdev_dio
*dio
= bio
->bi_private
;
322 bool should_dirty
= dio
->should_dirty
;
324 if (bio
->bi_status
&& !dio
->bio
.bi_status
)
325 dio
->bio
.bi_status
= bio
->bi_status
;
327 if (!dio
->multi_bio
|| atomic_dec_and_test(&dio
->ref
)) {
329 struct kiocb
*iocb
= dio
->iocb
;
332 if (likely(!dio
->bio
.bi_status
)) {
336 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
339 dio
->iocb
->ki_complete(iocb
, ret
, 0);
343 struct task_struct
*waiter
= dio
->waiter
;
345 WRITE_ONCE(dio
->waiter
, NULL
);
346 blk_wake_io_task(waiter
);
351 bio_check_pages_dirty(bio
);
353 bio_release_pages(bio
, false);
358 static ssize_t
__blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
359 unsigned int nr_pages
)
361 struct file
*file
= iocb
->ki_filp
;
362 struct inode
*inode
= bdev_file_inode(file
);
363 struct block_device
*bdev
= I_BDEV(inode
);
364 struct blk_plug plug
;
365 struct blkdev_dio
*dio
;
367 bool is_poll
= (iocb
->ki_flags
& IOCB_HIPRI
) != 0;
368 bool is_read
= (iov_iter_rw(iter
) == READ
), is_sync
;
369 loff_t pos
= iocb
->ki_pos
;
370 blk_qc_t qc
= BLK_QC_T_NONE
;
373 if ((pos
| iov_iter_alignment(iter
)) &
374 (bdev_logical_block_size(bdev
) - 1))
377 bio
= bio_alloc_bioset(GFP_KERNEL
, nr_pages
, &blkdev_dio_pool
);
379 dio
= container_of(bio
, struct blkdev_dio
, bio
);
380 dio
->is_sync
= is_sync
= is_sync_kiocb(iocb
);
382 dio
->waiter
= current
;
389 dio
->multi_bio
= false;
390 dio
->should_dirty
= is_read
&& iter_is_iovec(iter
);
393 * Don't plug for HIPRI/polled IO, as those should go straight
397 blk_start_plug(&plug
);
400 bio_set_dev(bio
, bdev
);
401 bio
->bi_iter
.bi_sector
= pos
>> 9;
402 bio
->bi_write_hint
= iocb
->ki_hint
;
403 bio
->bi_private
= dio
;
404 bio
->bi_end_io
= blkdev_bio_end_io
;
405 bio
->bi_ioprio
= iocb
->ki_ioprio
;
407 ret
= bio_iov_iter_get_pages(bio
, iter
);
409 bio
->bi_status
= BLK_STS_IOERR
;
415 bio
->bi_opf
= REQ_OP_READ
;
416 if (dio
->should_dirty
)
417 bio_set_pages_dirty(bio
);
419 bio
->bi_opf
= dio_bio_write_op(iocb
);
420 task_io_account_write(bio
->bi_iter
.bi_size
);
423 dio
->size
+= bio
->bi_iter
.bi_size
;
424 pos
+= bio
->bi_iter
.bi_size
;
426 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_PAGES
);
430 if (iocb
->ki_flags
& IOCB_HIPRI
) {
431 bio_set_polled(bio
, iocb
);
435 qc
= submit_bio(bio
);
438 WRITE_ONCE(iocb
->ki_cookie
, qc
);
442 if (!dio
->multi_bio
) {
444 * AIO needs an extra reference to ensure the dio
445 * structure which is embedded into the first bio
450 dio
->multi_bio
= true;
451 atomic_set(&dio
->ref
, 2);
453 atomic_inc(&dio
->ref
);
457 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
461 blk_finish_plug(&plug
);
467 set_current_state(TASK_UNINTERRUPTIBLE
);
468 if (!READ_ONCE(dio
->waiter
))
471 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
472 !blk_poll(bdev_get_queue(bdev
), qc
, true))
475 __set_current_state(TASK_RUNNING
);
478 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
487 blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
489 unsigned int nr_pages
;
491 if (!iov_iter_count(iter
))
494 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_PAGES
+ 1);
495 if (is_sync_kiocb(iocb
) && nr_pages
<= BIO_MAX_PAGES
)
496 return __blkdev_direct_IO_simple(iocb
, iter
, nr_pages
);
498 return __blkdev_direct_IO(iocb
, iter
, bio_max_segs(nr_pages
));
501 static __init
int blkdev_init(void)
503 return bioset_init(&blkdev_dio_pool
, 4, offsetof(struct blkdev_dio
, bio
), BIOSET_NEED_BVECS
);
505 module_init(blkdev_init
);
507 int __sync_blockdev(struct block_device
*bdev
, int wait
)
512 return filemap_flush(bdev
->bd_inode
->i_mapping
);
513 return filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
517 * Write out and wait upon all the dirty data associated with a block
518 * device via its mapping. Does not take the superblock lock.
520 int sync_blockdev(struct block_device
*bdev
)
522 return __sync_blockdev(bdev
, 1);
524 EXPORT_SYMBOL(sync_blockdev
);
527 * Write out and wait upon all dirty data associated with this
528 * device. Filesystem data as well as the underlying block
529 * device. Takes the superblock lock.
531 int fsync_bdev(struct block_device
*bdev
)
533 struct super_block
*sb
= get_super(bdev
);
535 int res
= sync_filesystem(sb
);
539 return sync_blockdev(bdev
);
541 EXPORT_SYMBOL(fsync_bdev
);
544 * freeze_bdev -- lock a filesystem and force it into a consistent state
545 * @bdev: blockdevice to lock
547 * If a superblock is found on this device, we take the s_umount semaphore
548 * on it to make sure nobody unmounts until the snapshot creation is done.
549 * The reference counter (bd_fsfreeze_count) guarantees that only the last
550 * unfreeze process can unfreeze the frozen filesystem actually when multiple
551 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
552 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
555 int freeze_bdev(struct block_device
*bdev
)
557 struct super_block
*sb
;
560 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
561 if (++bdev
->bd_fsfreeze_count
> 1)
564 sb
= get_active_super(bdev
);
567 if (sb
->s_op
->freeze_super
)
568 error
= sb
->s_op
->freeze_super(sb
);
570 error
= freeze_super(sb
);
571 deactivate_super(sb
);
574 bdev
->bd_fsfreeze_count
--;
577 bdev
->bd_fsfreeze_sb
= sb
;
582 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
585 EXPORT_SYMBOL(freeze_bdev
);
588 * thaw_bdev -- unlock filesystem
589 * @bdev: blockdevice to unlock
591 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
593 int thaw_bdev(struct block_device
*bdev
)
595 struct super_block
*sb
;
598 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
599 if (!bdev
->bd_fsfreeze_count
)
603 if (--bdev
->bd_fsfreeze_count
> 0)
606 sb
= bdev
->bd_fsfreeze_sb
;
610 if (sb
->s_op
->thaw_super
)
611 error
= sb
->s_op
->thaw_super(sb
);
613 error
= thaw_super(sb
);
615 bdev
->bd_fsfreeze_count
++;
617 bdev
->bd_fsfreeze_sb
= NULL
;
619 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
622 EXPORT_SYMBOL(thaw_bdev
);
624 static int blkdev_writepage(struct page
*page
, struct writeback_control
*wbc
)
626 return block_write_full_page(page
, blkdev_get_block
, wbc
);
629 static int blkdev_readpage(struct file
* file
, struct page
* page
)
631 return block_read_full_page(page
, blkdev_get_block
);
634 static void blkdev_readahead(struct readahead_control
*rac
)
636 mpage_readahead(rac
, blkdev_get_block
);
639 static int blkdev_write_begin(struct file
*file
, struct address_space
*mapping
,
640 loff_t pos
, unsigned len
, unsigned flags
,
641 struct page
**pagep
, void **fsdata
)
643 return block_write_begin(mapping
, pos
, len
, flags
, pagep
,
647 static int blkdev_write_end(struct file
*file
, struct address_space
*mapping
,
648 loff_t pos
, unsigned len
, unsigned copied
,
649 struct page
*page
, void *fsdata
)
652 ret
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
662 * for a block special file file_inode(file)->i_size is zero
663 * so we compute the size by hand (just as in block_read/write above)
665 static loff_t
block_llseek(struct file
*file
, loff_t offset
, int whence
)
667 struct inode
*bd_inode
= bdev_file_inode(file
);
670 inode_lock(bd_inode
);
671 retval
= fixed_size_llseek(file
, offset
, whence
, i_size_read(bd_inode
));
672 inode_unlock(bd_inode
);
676 int blkdev_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
678 struct inode
*bd_inode
= bdev_file_inode(filp
);
679 struct block_device
*bdev
= I_BDEV(bd_inode
);
682 error
= file_write_and_wait_range(filp
, start
, end
);
687 * There is no need to serialise calls to blkdev_issue_flush with
688 * i_mutex and doing so causes performance issues with concurrent
689 * O_SYNC writers to a block device.
691 error
= blkdev_issue_flush(bdev
);
692 if (error
== -EOPNOTSUPP
)
697 EXPORT_SYMBOL(blkdev_fsync
);
700 * bdev_read_page() - Start reading a page from a block device
701 * @bdev: The device to read the page from
702 * @sector: The offset on the device to read the page to (need not be aligned)
703 * @page: The page to read
705 * On entry, the page should be locked. It will be unlocked when the page
706 * has been read. If the block driver implements rw_page synchronously,
707 * that will be true on exit from this function, but it need not be.
709 * Errors returned by this function are usually "soft", eg out of memory, or
710 * queue full; callers should try a different route to read this page rather
711 * than propagate an error back up the stack.
713 * Return: negative errno if an error occurs, 0 if submission was successful.
715 int bdev_read_page(struct block_device
*bdev
, sector_t sector
,
718 const struct block_device_operations
*ops
= bdev
->bd_disk
->fops
;
719 int result
= -EOPNOTSUPP
;
721 if (!ops
->rw_page
|| bdev_get_integrity(bdev
))
724 result
= blk_queue_enter(bdev
->bd_disk
->queue
, 0);
727 result
= ops
->rw_page(bdev
, sector
+ get_start_sect(bdev
), page
,
729 blk_queue_exit(bdev
->bd_disk
->queue
);
734 * bdev_write_page() - Start writing a page to a block device
735 * @bdev: The device to write the page to
736 * @sector: The offset on the device to write the page to (need not be aligned)
737 * @page: The page to write
738 * @wbc: The writeback_control for the write
740 * On entry, the page should be locked and not currently under writeback.
741 * On exit, if the write started successfully, the page will be unlocked and
742 * under writeback. If the write failed already (eg the driver failed to
743 * queue the page to the device), the page will still be locked. If the
744 * caller is a ->writepage implementation, it will need to unlock the page.
746 * Errors returned by this function are usually "soft", eg out of memory, or
747 * queue full; callers should try a different route to write this page rather
748 * than propagate an error back up the stack.
750 * Return: negative errno if an error occurs, 0 if submission was successful.
752 int bdev_write_page(struct block_device
*bdev
, sector_t sector
,
753 struct page
*page
, struct writeback_control
*wbc
)
756 const struct block_device_operations
*ops
= bdev
->bd_disk
->fops
;
758 if (!ops
->rw_page
|| bdev_get_integrity(bdev
))
760 result
= blk_queue_enter(bdev
->bd_disk
->queue
, 0);
764 set_page_writeback(page
);
765 result
= ops
->rw_page(bdev
, sector
+ get_start_sect(bdev
), page
,
768 end_page_writeback(page
);
770 clean_page_buffers(page
);
773 blk_queue_exit(bdev
->bd_disk
->queue
);
781 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(bdev_lock
);
782 static struct kmem_cache
* bdev_cachep __read_mostly
;
784 static struct inode
*bdev_alloc_inode(struct super_block
*sb
)
786 struct bdev_inode
*ei
= kmem_cache_alloc(bdev_cachep
, GFP_KERNEL
);
790 memset(&ei
->bdev
, 0, sizeof(ei
->bdev
));
791 ei
->bdev
.bd_bdi
= &noop_backing_dev_info
;
792 return &ei
->vfs_inode
;
795 static void bdev_free_inode(struct inode
*inode
)
797 struct block_device
*bdev
= I_BDEV(inode
);
799 free_percpu(bdev
->bd_stats
);
800 kfree(bdev
->bd_meta_info
);
802 kmem_cache_free(bdev_cachep
, BDEV_I(inode
));
805 static void init_once(void *data
)
807 struct bdev_inode
*ei
= data
;
809 inode_init_once(&ei
->vfs_inode
);
812 static void bdev_evict_inode(struct inode
*inode
)
814 struct block_device
*bdev
= &BDEV_I(inode
)->bdev
;
815 truncate_inode_pages_final(&inode
->i_data
);
816 invalidate_inode_buffers(inode
); /* is it needed here? */
818 /* Detach inode from wb early as bdi_put() may free bdi->wb */
819 inode_detach_wb(inode
);
820 if (bdev
->bd_bdi
!= &noop_backing_dev_info
) {
821 bdi_put(bdev
->bd_bdi
);
822 bdev
->bd_bdi
= &noop_backing_dev_info
;
826 static const struct super_operations bdev_sops
= {
827 .statfs
= simple_statfs
,
828 .alloc_inode
= bdev_alloc_inode
,
829 .free_inode
= bdev_free_inode
,
830 .drop_inode
= generic_delete_inode
,
831 .evict_inode
= bdev_evict_inode
,
834 static int bd_init_fs_context(struct fs_context
*fc
)
836 struct pseudo_fs_context
*ctx
= init_pseudo(fc
, BDEVFS_MAGIC
);
839 fc
->s_iflags
|= SB_I_CGROUPWB
;
840 ctx
->ops
= &bdev_sops
;
844 static struct file_system_type bd_type
= {
846 .init_fs_context
= bd_init_fs_context
,
847 .kill_sb
= kill_anon_super
,
850 struct super_block
*blockdev_superblock __read_mostly
;
851 EXPORT_SYMBOL_GPL(blockdev_superblock
);
853 void __init
bdev_cache_init(void)
856 static struct vfsmount
*bd_mnt
;
858 bdev_cachep
= kmem_cache_create("bdev_cache", sizeof(struct bdev_inode
),
859 0, (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
860 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
|SLAB_PANIC
),
862 err
= register_filesystem(&bd_type
);
864 panic("Cannot register bdev pseudo-fs");
865 bd_mnt
= kern_mount(&bd_type
);
867 panic("Cannot create bdev pseudo-fs");
868 blockdev_superblock
= bd_mnt
->mnt_sb
; /* For writeback */
871 struct block_device
*bdev_alloc(struct gendisk
*disk
, u8 partno
)
873 struct block_device
*bdev
;
876 inode
= new_inode(blockdev_superblock
);
879 inode
->i_mode
= S_IFBLK
;
881 inode
->i_data
.a_ops
= &def_blk_aops
;
882 mapping_set_gfp_mask(&inode
->i_data
, GFP_USER
);
884 bdev
= I_BDEV(inode
);
885 mutex_init(&bdev
->bd_mutex
);
886 mutex_init(&bdev
->bd_fsfreeze_mutex
);
887 spin_lock_init(&bdev
->bd_size_lock
);
888 bdev
->bd_disk
= disk
;
889 bdev
->bd_partno
= partno
;
890 bdev
->bd_inode
= inode
;
892 INIT_LIST_HEAD(&bdev
->bd_holder_disks
);
894 bdev
->bd_stats
= alloc_percpu(struct disk_stats
);
895 if (!bdev
->bd_stats
) {
902 void bdev_add(struct block_device
*bdev
, dev_t dev
)
905 bdev
->bd_inode
->i_rdev
= dev
;
906 bdev
->bd_inode
->i_ino
= dev
;
907 insert_inode_hash(bdev
->bd_inode
);
910 static struct block_device
*bdget(dev_t dev
)
914 inode
= ilookup(blockdev_superblock
, dev
);
917 return &BDEV_I(inode
)->bdev
;
921 * bdgrab -- Grab a reference to an already referenced block device
922 * @bdev: Block device to grab a reference to.
924 * Returns the block_device with an additional reference when successful,
925 * or NULL if the inode is already beeing freed.
927 struct block_device
*bdgrab(struct block_device
*bdev
)
929 if (!igrab(bdev
->bd_inode
))
933 EXPORT_SYMBOL(bdgrab
);
935 long nr_blockdev_pages(void)
940 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
941 list_for_each_entry(inode
, &blockdev_superblock
->s_inodes
, i_sb_list
)
942 ret
+= inode
->i_mapping
->nrpages
;
943 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);
948 void bdput(struct block_device
*bdev
)
950 iput(bdev
->bd_inode
);
952 EXPORT_SYMBOL(bdput
);
955 * bd_may_claim - test whether a block device can be claimed
956 * @bdev: block device of interest
957 * @whole: whole block device containing @bdev, may equal @bdev
958 * @holder: holder trying to claim @bdev
960 * Test whether @bdev can be claimed by @holder.
963 * spin_lock(&bdev_lock).
966 * %true if @bdev can be claimed, %false otherwise.
968 static bool bd_may_claim(struct block_device
*bdev
, struct block_device
*whole
,
971 if (bdev
->bd_holder
== holder
)
972 return true; /* already a holder */
973 else if (bdev
->bd_holder
!= NULL
)
974 return false; /* held by someone else */
975 else if (whole
== bdev
)
976 return true; /* is a whole device which isn't held */
978 else if (whole
->bd_holder
== bd_may_claim
)
979 return true; /* is a partition of a device that is being partitioned */
980 else if (whole
->bd_holder
!= NULL
)
981 return false; /* is a partition of a held device */
983 return true; /* is a partition of an un-held device */
987 * bd_prepare_to_claim - claim a block device
988 * @bdev: block device of interest
989 * @holder: holder trying to claim @bdev
991 * Claim @bdev. This function fails if @bdev is already claimed by another
992 * holder and waits if another claiming is in progress. return, the caller
993 * has ownership of bd_claiming and bd_holder[s].
996 * 0 if @bdev can be claimed, -EBUSY otherwise.
998 int bd_prepare_to_claim(struct block_device
*bdev
, void *holder
)
1000 struct block_device
*whole
= bdev_whole(bdev
);
1002 if (WARN_ON_ONCE(!holder
))
1005 spin_lock(&bdev_lock
);
1006 /* if someone else claimed, fail */
1007 if (!bd_may_claim(bdev
, whole
, holder
)) {
1008 spin_unlock(&bdev_lock
);
1012 /* if claiming is already in progress, wait for it to finish */
1013 if (whole
->bd_claiming
) {
1014 wait_queue_head_t
*wq
= bit_waitqueue(&whole
->bd_claiming
, 0);
1017 prepare_to_wait(wq
, &wait
, TASK_UNINTERRUPTIBLE
);
1018 spin_unlock(&bdev_lock
);
1020 finish_wait(wq
, &wait
);
1025 whole
->bd_claiming
= holder
;
1026 spin_unlock(&bdev_lock
);
1029 EXPORT_SYMBOL_GPL(bd_prepare_to_claim
); /* only for the loop driver */
1031 static void bd_clear_claiming(struct block_device
*whole
, void *holder
)
1033 lockdep_assert_held(&bdev_lock
);
1034 /* tell others that we're done */
1035 BUG_ON(whole
->bd_claiming
!= holder
);
1036 whole
->bd_claiming
= NULL
;
1037 wake_up_bit(&whole
->bd_claiming
, 0);
1041 * bd_finish_claiming - finish claiming of a block device
1042 * @bdev: block device of interest
1043 * @holder: holder that has claimed @bdev
1045 * Finish exclusive open of a block device. Mark the device as exlusively
1046 * open by the holder and wake up all waiters for exclusive open to finish.
1048 static void bd_finish_claiming(struct block_device
*bdev
, void *holder
)
1050 struct block_device
*whole
= bdev_whole(bdev
);
1052 spin_lock(&bdev_lock
);
1053 BUG_ON(!bd_may_claim(bdev
, whole
, holder
));
1055 * Note that for a whole device bd_holders will be incremented twice,
1056 * and bd_holder will be set to bd_may_claim before being set to holder
1058 whole
->bd_holders
++;
1059 whole
->bd_holder
= bd_may_claim
;
1061 bdev
->bd_holder
= holder
;
1062 bd_clear_claiming(whole
, holder
);
1063 spin_unlock(&bdev_lock
);
1067 * bd_abort_claiming - abort claiming of a block device
1068 * @bdev: block device of interest
1069 * @holder: holder that has claimed @bdev
1071 * Abort claiming of a block device when the exclusive open failed. This can be
1072 * also used when exclusive open is not actually desired and we just needed
1073 * to block other exclusive openers for a while.
1075 void bd_abort_claiming(struct block_device
*bdev
, void *holder
)
1077 spin_lock(&bdev_lock
);
1078 bd_clear_claiming(bdev_whole(bdev
), holder
);
1079 spin_unlock(&bdev_lock
);
1081 EXPORT_SYMBOL(bd_abort_claiming
);
1084 struct bd_holder_disk
{
1085 struct list_head list
;
1086 struct gendisk
*disk
;
1090 static struct bd_holder_disk
*bd_find_holder_disk(struct block_device
*bdev
,
1091 struct gendisk
*disk
)
1093 struct bd_holder_disk
*holder
;
1095 list_for_each_entry(holder
, &bdev
->bd_holder_disks
, list
)
1096 if (holder
->disk
== disk
)
1101 static int add_symlink(struct kobject
*from
, struct kobject
*to
)
1103 return sysfs_create_link(from
, to
, kobject_name(to
));
1106 static void del_symlink(struct kobject
*from
, struct kobject
*to
)
1108 sysfs_remove_link(from
, kobject_name(to
));
1112 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1113 * @bdev: the claimed slave bdev
1114 * @disk: the holding disk
1116 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1118 * This functions creates the following sysfs symlinks.
1120 * - from "slaves" directory of the holder @disk to the claimed @bdev
1121 * - from "holders" directory of the @bdev to the holder @disk
1123 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1124 * passed to bd_link_disk_holder(), then:
1126 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1127 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1129 * The caller must have claimed @bdev before calling this function and
1130 * ensure that both @bdev and @disk are valid during the creation and
1131 * lifetime of these symlinks.
1137 * 0 on success, -errno on failure.
1139 int bd_link_disk_holder(struct block_device
*bdev
, struct gendisk
*disk
)
1141 struct bd_holder_disk
*holder
;
1144 mutex_lock(&bdev
->bd_mutex
);
1146 WARN_ON_ONCE(!bdev
->bd_holder
);
1148 /* FIXME: remove the following once add_disk() handles errors */
1149 if (WARN_ON(!disk
->slave_dir
|| !bdev
->bd_holder_dir
))
1152 holder
= bd_find_holder_disk(bdev
, disk
);
1158 holder
= kzalloc(sizeof(*holder
), GFP_KERNEL
);
1164 INIT_LIST_HEAD(&holder
->list
);
1165 holder
->disk
= disk
;
1168 ret
= add_symlink(disk
->slave_dir
, bdev_kobj(bdev
));
1172 ret
= add_symlink(bdev
->bd_holder_dir
, &disk_to_dev(disk
)->kobj
);
1176 * bdev could be deleted beneath us which would implicitly destroy
1177 * the holder directory. Hold on to it.
1179 kobject_get(bdev
->bd_holder_dir
);
1181 list_add(&holder
->list
, &bdev
->bd_holder_disks
);
1185 del_symlink(disk
->slave_dir
, bdev_kobj(bdev
));
1189 mutex_unlock(&bdev
->bd_mutex
);
1192 EXPORT_SYMBOL_GPL(bd_link_disk_holder
);
1195 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1196 * @bdev: the calimed slave bdev
1197 * @disk: the holding disk
1199 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1204 void bd_unlink_disk_holder(struct block_device
*bdev
, struct gendisk
*disk
)
1206 struct bd_holder_disk
*holder
;
1208 mutex_lock(&bdev
->bd_mutex
);
1210 holder
= bd_find_holder_disk(bdev
, disk
);
1212 if (!WARN_ON_ONCE(holder
== NULL
) && !--holder
->refcnt
) {
1213 del_symlink(disk
->slave_dir
, bdev_kobj(bdev
));
1214 del_symlink(bdev
->bd_holder_dir
, &disk_to_dev(disk
)->kobj
);
1215 kobject_put(bdev
->bd_holder_dir
);
1216 list_del_init(&holder
->list
);
1220 mutex_unlock(&bdev
->bd_mutex
);
1222 EXPORT_SYMBOL_GPL(bd_unlink_disk_holder
);
1225 static void __blkdev_put(struct block_device
*bdev
, fmode_t mode
, int for_part
);
1227 int bdev_disk_changed(struct block_device
*bdev
, bool invalidate
)
1229 struct gendisk
*disk
= bdev
->bd_disk
;
1232 lockdep_assert_held(&bdev
->bd_mutex
);
1234 clear_bit(GD_NEED_PART_SCAN
, &bdev
->bd_disk
->state
);
1237 ret
= blk_drop_partitions(bdev
);
1242 * Historically we only set the capacity to zero for devices that
1243 * support partitions (independ of actually having partitions created).
1244 * Doing that is rather inconsistent, but changing it broke legacy
1245 * udisks polling for legacy ide-cdrom devices. Use the crude check
1246 * below to get the sane behavior for most device while not breaking
1247 * userspace for this particular setup.
1250 if (disk_part_scan_enabled(disk
) ||
1251 !(disk
->flags
& GENHD_FL_REMOVABLE
))
1252 set_capacity(disk
, 0);
1254 if (disk
->fops
->revalidate_disk
)
1255 disk
->fops
->revalidate_disk(disk
);
1258 if (get_capacity(disk
)) {
1259 ret
= blk_add_partitions(disk
, bdev
);
1262 } else if (invalidate
) {
1264 * Tell userspace that the media / partition table may have
1267 kobject_uevent(&disk_to_dev(disk
)->kobj
, KOBJ_CHANGE
);
1273 * Only exported for loop and dasd for historic reasons. Don't use in new
1276 EXPORT_SYMBOL_GPL(bdev_disk_changed
);
1281 * mutex_lock(part->bd_mutex)
1282 * mutex_lock_nested(whole->bd_mutex, 1)
1284 static int __blkdev_get(struct block_device
*bdev
, fmode_t mode
)
1286 struct gendisk
*disk
= bdev
->bd_disk
;
1289 if (!bdev
->bd_openers
) {
1290 if (!bdev_is_partition(bdev
)) {
1292 if (disk
->fops
->open
)
1293 ret
= disk
->fops
->open(bdev
, mode
);
1296 set_init_blocksize(bdev
);
1299 * If the device is invalidated, rescan partition
1300 * if open succeeded or failed with -ENOMEDIUM.
1301 * The latter is necessary to prevent ghost
1302 * partitions on a removed medium.
1304 if (test_bit(GD_NEED_PART_SCAN
, &disk
->state
) &&
1305 (!ret
|| ret
== -ENOMEDIUM
))
1306 bdev_disk_changed(bdev
, ret
== -ENOMEDIUM
);
1311 struct block_device
*whole
= bdgrab(disk
->part0
);
1313 mutex_lock_nested(&whole
->bd_mutex
, 1);
1314 ret
= __blkdev_get(whole
, mode
);
1316 mutex_unlock(&whole
->bd_mutex
);
1320 whole
->bd_part_count
++;
1321 mutex_unlock(&whole
->bd_mutex
);
1323 if (!(disk
->flags
& GENHD_FL_UP
) ||
1324 !bdev_nr_sectors(bdev
)) {
1325 __blkdev_put(whole
, mode
, 1);
1329 set_init_blocksize(bdev
);
1332 if (bdev
->bd_bdi
== &noop_backing_dev_info
)
1333 bdev
->bd_bdi
= bdi_get(disk
->queue
->backing_dev_info
);
1335 if (!bdev_is_partition(bdev
)) {
1336 if (bdev
->bd_disk
->fops
->open
)
1337 ret
= bdev
->bd_disk
->fops
->open(bdev
, mode
);
1338 /* the same as first opener case, read comment there */
1339 if (test_bit(GD_NEED_PART_SCAN
, &disk
->state
) &&
1340 (!ret
|| ret
== -ENOMEDIUM
))
1341 bdev_disk_changed(bdev
, ret
== -ENOMEDIUM
);
1350 struct block_device
*blkdev_get_no_open(dev_t dev
)
1352 struct block_device
*bdev
;
1353 struct gendisk
*disk
;
1355 down_read(&bdev_lookup_sem
);
1358 up_read(&bdev_lookup_sem
);
1359 blk_request_module(dev
);
1360 down_read(&bdev_lookup_sem
);
1367 disk
= bdev
->bd_disk
;
1368 if (!kobject_get_unless_zero(&disk_to_dev(disk
)->kobj
))
1370 if ((disk
->flags
& (GENHD_FL_UP
| GENHD_FL_HIDDEN
)) != GENHD_FL_UP
)
1372 if (!try_module_get(bdev
->bd_disk
->fops
->owner
))
1374 up_read(&bdev_lookup_sem
);
1381 up_read(&bdev_lookup_sem
);
1385 void blkdev_put_no_open(struct block_device
*bdev
)
1387 module_put(bdev
->bd_disk
->fops
->owner
);
1388 put_disk(bdev
->bd_disk
);
1393 * blkdev_get_by_dev - open a block device by device number
1394 * @dev: device number of block device to open
1395 * @mode: FMODE_* mask
1396 * @holder: exclusive holder identifier
1398 * Open the block device described by device number @dev. If @mode includes
1399 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying
1400 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for
1403 * Use this interface ONLY if you really do not have anything better - i.e. when
1404 * you are behind a truly sucky interface and all you are given is a device
1405 * number. Everything else should use blkdev_get_by_path().
1411 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
1413 struct block_device
*blkdev_get_by_dev(dev_t dev
, fmode_t mode
, void *holder
)
1415 bool unblock_events
= true;
1416 struct block_device
*bdev
;
1417 struct gendisk
*disk
;
1420 ret
= devcgroup_check_permission(DEVCG_DEV_BLOCK
,
1421 MAJOR(dev
), MINOR(dev
),
1422 ((mode
& FMODE_READ
) ? DEVCG_ACC_READ
: 0) |
1423 ((mode
& FMODE_WRITE
) ? DEVCG_ACC_WRITE
: 0));
1425 return ERR_PTR(ret
);
1428 * If we lost a race with 'disk' being deleted, try again. See md.c.
1431 bdev
= blkdev_get_no_open(dev
);
1433 return ERR_PTR(-ENXIO
);
1434 disk
= bdev
->bd_disk
;
1436 if (mode
& FMODE_EXCL
) {
1437 ret
= bd_prepare_to_claim(bdev
, holder
);
1442 disk_block_events(disk
);
1444 mutex_lock(&bdev
->bd_mutex
);
1445 ret
=__blkdev_get(bdev
, mode
);
1447 goto abort_claiming
;
1448 if (mode
& FMODE_EXCL
) {
1449 bd_finish_claiming(bdev
, holder
);
1452 * Block event polling for write claims if requested. Any write
1453 * holder makes the write_holder state stick until all are
1454 * released. This is good enough and tracking individual
1455 * writeable reference is too fragile given the way @mode is
1456 * used in blkdev_get/put().
1458 if ((mode
& FMODE_WRITE
) && !bdev
->bd_write_holder
&&
1459 (disk
->flags
& GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE
)) {
1460 bdev
->bd_write_holder
= true;
1461 unblock_events
= false;
1464 mutex_unlock(&bdev
->bd_mutex
);
1467 disk_unblock_events(disk
);
1471 if (mode
& FMODE_EXCL
)
1472 bd_abort_claiming(bdev
, holder
);
1473 mutex_unlock(&bdev
->bd_mutex
);
1474 disk_unblock_events(disk
);
1476 blkdev_put_no_open(bdev
);
1477 if (ret
== -ERESTARTSYS
)
1479 return ERR_PTR(ret
);
1481 EXPORT_SYMBOL(blkdev_get_by_dev
);
1484 * blkdev_get_by_path - open a block device by name
1485 * @path: path to the block device to open
1486 * @mode: FMODE_* mask
1487 * @holder: exclusive holder identifier
1489 * Open the block device described by the device file at @path. If @mode
1490 * includes %FMODE_EXCL, the block device is opened with exclusive access.
1491 * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may
1492 * nest for the same @holder.
1498 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
1500 struct block_device
*blkdev_get_by_path(const char *path
, fmode_t mode
,
1503 struct block_device
*bdev
;
1507 error
= lookup_bdev(path
, &dev
);
1509 return ERR_PTR(error
);
1511 bdev
= blkdev_get_by_dev(dev
, mode
, holder
);
1512 if (!IS_ERR(bdev
) && (mode
& FMODE_WRITE
) && bdev_read_only(bdev
)) {
1513 blkdev_put(bdev
, mode
);
1514 return ERR_PTR(-EACCES
);
1519 EXPORT_SYMBOL(blkdev_get_by_path
);
1521 static int blkdev_open(struct inode
* inode
, struct file
* filp
)
1523 struct block_device
*bdev
;
1526 * Preserve backwards compatibility and allow large file access
1527 * even if userspace doesn't ask for it explicitly. Some mkfs
1528 * binary needs it. We might want to drop this workaround
1529 * during an unstable branch.
1531 filp
->f_flags
|= O_LARGEFILE
;
1533 filp
->f_mode
|= FMODE_NOWAIT
| FMODE_BUF_RASYNC
;
1535 if (filp
->f_flags
& O_NDELAY
)
1536 filp
->f_mode
|= FMODE_NDELAY
;
1537 if (filp
->f_flags
& O_EXCL
)
1538 filp
->f_mode
|= FMODE_EXCL
;
1539 if ((filp
->f_flags
& O_ACCMODE
) == 3)
1540 filp
->f_mode
|= FMODE_WRITE_IOCTL
;
1542 bdev
= blkdev_get_by_dev(inode
->i_rdev
, filp
->f_mode
, filp
);
1544 return PTR_ERR(bdev
);
1545 filp
->f_mapping
= bdev
->bd_inode
->i_mapping
;
1546 filp
->f_wb_err
= filemap_sample_wb_err(filp
->f_mapping
);
1550 static void __blkdev_put(struct block_device
*bdev
, fmode_t mode
, int for_part
)
1552 struct gendisk
*disk
= bdev
->bd_disk
;
1553 struct block_device
*victim
= NULL
;
1556 * Sync early if it looks like we're the last one. If someone else
1557 * opens the block device between now and the decrement of bd_openers
1558 * then we did a sync that we didn't need to, but that's not the end
1559 * of the world and we want to avoid long (could be several minute)
1560 * syncs while holding the mutex.
1562 if (bdev
->bd_openers
== 1)
1563 sync_blockdev(bdev
);
1565 mutex_lock_nested(&bdev
->bd_mutex
, for_part
);
1567 bdev
->bd_part_count
--;
1569 if (!--bdev
->bd_openers
) {
1570 WARN_ON_ONCE(bdev
->bd_holders
);
1571 sync_blockdev(bdev
);
1573 bdev_write_inode(bdev
);
1574 if (bdev_is_partition(bdev
))
1575 victim
= bdev_whole(bdev
);
1578 if (!bdev_is_partition(bdev
) && disk
->fops
->release
)
1579 disk
->fops
->release(disk
, mode
);
1580 mutex_unlock(&bdev
->bd_mutex
);
1582 __blkdev_put(victim
, mode
, 1);
1587 void blkdev_put(struct block_device
*bdev
, fmode_t mode
)
1589 struct gendisk
*disk
= bdev
->bd_disk
;
1591 mutex_lock(&bdev
->bd_mutex
);
1593 if (mode
& FMODE_EXCL
) {
1594 struct block_device
*whole
= bdev_whole(bdev
);
1598 * Release a claim on the device. The holder fields
1599 * are protected with bdev_lock. bd_mutex is to
1600 * synchronize disk_holder unlinking.
1602 spin_lock(&bdev_lock
);
1604 WARN_ON_ONCE(--bdev
->bd_holders
< 0);
1605 WARN_ON_ONCE(--whole
->bd_holders
< 0);
1607 if ((bdev_free
= !bdev
->bd_holders
))
1608 bdev
->bd_holder
= NULL
;
1609 if (!whole
->bd_holders
)
1610 whole
->bd_holder
= NULL
;
1612 spin_unlock(&bdev_lock
);
1615 * If this was the last claim, remove holder link and
1616 * unblock evpoll if it was a write holder.
1618 if (bdev_free
&& bdev
->bd_write_holder
) {
1619 disk_unblock_events(disk
);
1620 bdev
->bd_write_holder
= false;
1625 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1626 * event. This is to ensure detection of media removal commanded
1627 * from userland - e.g. eject(1).
1629 disk_flush_events(disk
, DISK_EVENT_MEDIA_CHANGE
);
1630 mutex_unlock(&bdev
->bd_mutex
);
1632 __blkdev_put(bdev
, mode
, 0);
1633 blkdev_put_no_open(bdev
);
1635 EXPORT_SYMBOL(blkdev_put
);
1637 static int blkdev_close(struct inode
* inode
, struct file
* filp
)
1639 struct block_device
*bdev
= I_BDEV(bdev_file_inode(filp
));
1640 blkdev_put(bdev
, filp
->f_mode
);
1644 static long block_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
)
1646 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
1647 fmode_t mode
= file
->f_mode
;
1650 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1651 * to updated it before every ioctl.
1653 if (file
->f_flags
& O_NDELAY
)
1654 mode
|= FMODE_NDELAY
;
1656 mode
&= ~FMODE_NDELAY
;
1658 return blkdev_ioctl(bdev
, mode
, cmd
, arg
);
1662 * Write data to the block device. Only intended for the block device itself
1663 * and the raw driver which basically is a fake block device.
1665 * Does not take i_mutex for the write and thus is not for general purpose
1668 ssize_t
blkdev_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1670 struct file
*file
= iocb
->ki_filp
;
1671 struct inode
*bd_inode
= bdev_file_inode(file
);
1672 loff_t size
= i_size_read(bd_inode
);
1673 struct blk_plug plug
;
1676 if (bdev_read_only(I_BDEV(bd_inode
)))
1679 if (IS_SWAPFILE(bd_inode
) && !is_hibernate_resume_dev(bd_inode
->i_rdev
))
1682 if (!iov_iter_count(from
))
1685 if (iocb
->ki_pos
>= size
)
1688 if ((iocb
->ki_flags
& (IOCB_NOWAIT
| IOCB_DIRECT
)) == IOCB_NOWAIT
)
1691 iov_iter_truncate(from
, size
- iocb
->ki_pos
);
1693 blk_start_plug(&plug
);
1694 ret
= __generic_file_write_iter(iocb
, from
);
1696 ret
= generic_write_sync(iocb
, ret
);
1697 blk_finish_plug(&plug
);
1700 EXPORT_SYMBOL_GPL(blkdev_write_iter
);
1702 ssize_t
blkdev_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1704 struct file
*file
= iocb
->ki_filp
;
1705 struct inode
*bd_inode
= bdev_file_inode(file
);
1706 loff_t size
= i_size_read(bd_inode
);
1707 loff_t pos
= iocb
->ki_pos
;
1713 iov_iter_truncate(to
, size
);
1714 return generic_file_read_iter(iocb
, to
);
1716 EXPORT_SYMBOL_GPL(blkdev_read_iter
);
1719 * Try to release a page associated with block device when the system
1720 * is under memory pressure.
1722 static int blkdev_releasepage(struct page
*page
, gfp_t wait
)
1724 struct super_block
*super
= BDEV_I(page
->mapping
->host
)->bdev
.bd_super
;
1726 if (super
&& super
->s_op
->bdev_try_to_free_page
)
1727 return super
->s_op
->bdev_try_to_free_page(super
, page
, wait
);
1729 return try_to_free_buffers(page
);
1732 static int blkdev_writepages(struct address_space
*mapping
,
1733 struct writeback_control
*wbc
)
1735 return generic_writepages(mapping
, wbc
);
1738 static const struct address_space_operations def_blk_aops
= {
1739 .readpage
= blkdev_readpage
,
1740 .readahead
= blkdev_readahead
,
1741 .writepage
= blkdev_writepage
,
1742 .write_begin
= blkdev_write_begin
,
1743 .write_end
= blkdev_write_end
,
1744 .writepages
= blkdev_writepages
,
1745 .releasepage
= blkdev_releasepage
,
1746 .direct_IO
= blkdev_direct_IO
,
1747 .migratepage
= buffer_migrate_page_norefs
,
1748 .is_dirty_writeback
= buffer_check_dirty_writeback
,
1751 #define BLKDEV_FALLOC_FL_SUPPORTED \
1752 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
1753 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
1755 static long blkdev_fallocate(struct file
*file
, int mode
, loff_t start
,
1758 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
1759 loff_t end
= start
+ len
- 1;
1763 /* Fail if we don't recognize the flags. */
1764 if (mode
& ~BLKDEV_FALLOC_FL_SUPPORTED
)
1767 /* Don't go off the end of the device. */
1768 isize
= i_size_read(bdev
->bd_inode
);
1772 if (mode
& FALLOC_FL_KEEP_SIZE
) {
1773 len
= isize
- start
;
1774 end
= start
+ len
- 1;
1780 * Don't allow IO that isn't aligned to logical block size.
1782 if ((start
| len
) & (bdev_logical_block_size(bdev
) - 1))
1785 /* Invalidate the page cache, including dirty pages. */
1786 error
= truncate_bdev_range(bdev
, file
->f_mode
, start
, end
);
1791 case FALLOC_FL_ZERO_RANGE
:
1792 case FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
:
1793 error
= blkdev_issue_zeroout(bdev
, start
>> 9, len
>> 9,
1794 GFP_KERNEL
, BLKDEV_ZERO_NOUNMAP
);
1796 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
:
1797 error
= blkdev_issue_zeroout(bdev
, start
>> 9, len
>> 9,
1798 GFP_KERNEL
, BLKDEV_ZERO_NOFALLBACK
);
1800 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
| FALLOC_FL_NO_HIDE_STALE
:
1801 error
= blkdev_issue_discard(bdev
, start
>> 9, len
>> 9,
1811 * Invalidate the page cache again; if someone wandered in and dirtied
1812 * a page, we just discard it - userspace has no way of knowing whether
1813 * the write happened before or after discard completing...
1815 return truncate_bdev_range(bdev
, file
->f_mode
, start
, end
);
1818 const struct file_operations def_blk_fops
= {
1819 .open
= blkdev_open
,
1820 .release
= blkdev_close
,
1821 .llseek
= block_llseek
,
1822 .read_iter
= blkdev_read_iter
,
1823 .write_iter
= blkdev_write_iter
,
1824 .iopoll
= blkdev_iopoll
,
1825 .mmap
= generic_file_mmap
,
1826 .fsync
= blkdev_fsync
,
1827 .unlocked_ioctl
= block_ioctl
,
1828 #ifdef CONFIG_COMPAT
1829 .compat_ioctl
= compat_blkdev_ioctl
,
1831 .splice_read
= generic_file_splice_read
,
1832 .splice_write
= iter_file_splice_write
,
1833 .fallocate
= blkdev_fallocate
,
1837 * lookup_bdev - lookup a struct block_device by name
1838 * @pathname: special file representing the block device
1839 * @dev: return value of the block device's dev_t
1841 * Get a reference to the blockdevice at @pathname in the current
1842 * namespace if possible and return it. Return ERR_PTR(error)
1845 int lookup_bdev(const char *pathname
, dev_t
*dev
)
1847 struct inode
*inode
;
1851 if (!pathname
|| !*pathname
)
1854 error
= kern_path(pathname
, LOOKUP_FOLLOW
, &path
);
1858 inode
= d_backing_inode(path
.dentry
);
1860 if (!S_ISBLK(inode
->i_mode
))
1863 if (!may_open_dev(&path
))
1866 *dev
= inode
->i_rdev
;
1872 EXPORT_SYMBOL(lookup_bdev
);
1874 int __invalidate_device(struct block_device
*bdev
, bool kill_dirty
)
1876 struct super_block
*sb
= get_super(bdev
);
1881 * no need to lock the super, get_super holds the
1882 * read mutex so the filesystem cannot go away
1883 * under us (->put_super runs with the write lock
1886 shrink_dcache_sb(sb
);
1887 res
= invalidate_inodes(sb
, kill_dirty
);
1890 invalidate_bdev(bdev
);
1893 EXPORT_SYMBOL(__invalidate_device
);
1895 void iterate_bdevs(void (*func
)(struct block_device
*, void *), void *arg
)
1897 struct inode
*inode
, *old_inode
= NULL
;
1899 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
1900 list_for_each_entry(inode
, &blockdev_superblock
->s_inodes
, i_sb_list
) {
1901 struct address_space
*mapping
= inode
->i_mapping
;
1902 struct block_device
*bdev
;
1904 spin_lock(&inode
->i_lock
);
1905 if (inode
->i_state
& (I_FREEING
|I_WILL_FREE
|I_NEW
) ||
1906 mapping
->nrpages
== 0) {
1907 spin_unlock(&inode
->i_lock
);
1911 spin_unlock(&inode
->i_lock
);
1912 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);
1914 * We hold a reference to 'inode' so it couldn't have been
1915 * removed from s_inodes list while we dropped the
1916 * s_inode_list_lock We cannot iput the inode now as we can
1917 * be holding the last reference and we cannot iput it under
1918 * s_inode_list_lock. So we keep the reference and iput it
1923 bdev
= I_BDEV(inode
);
1925 mutex_lock(&bdev
->bd_mutex
);
1926 if (bdev
->bd_openers
)
1928 mutex_unlock(&bdev
->bd_mutex
);
1930 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
1932 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);