1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
9 #include <linux/init.h>
11 #include <linux/fcntl.h>
12 #include <linux/slab.h>
13 #include <linux/kmod.h>
14 #include <linux/major.h>
15 #include <linux/device_cgroup.h>
16 #include <linux/highmem.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/module.h>
20 #include <linux/blkpg.h>
21 #include <linux/magic.h>
22 #include <linux/buffer_head.h>
23 #include <linux/swap.h>
24 #include <linux/pagevec.h>
25 #include <linux/writeback.h>
26 #include <linux/mpage.h>
27 #include <linux/mount.h>
28 #include <linux/pseudo_fs.h>
29 #include <linux/uio.h>
30 #include <linux/namei.h>
31 #include <linux/log2.h>
32 #include <linux/cleancache.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/falloc.h>
35 #include <linux/part_stat.h>
36 #include <linux/uaccess.h>
37 #include <linux/suspend.h>
41 struct block_device bdev
;
42 struct inode vfs_inode
;
45 static const struct address_space_operations def_blk_aops
;
47 static inline struct bdev_inode
*BDEV_I(struct inode
*inode
)
49 return container_of(inode
, struct bdev_inode
, vfs_inode
);
52 struct block_device
*I_BDEV(struct inode
*inode
)
54 return &BDEV_I(inode
)->bdev
;
56 EXPORT_SYMBOL(I_BDEV
);
58 static void bdev_write_inode(struct block_device
*bdev
)
60 struct inode
*inode
= bdev
->bd_inode
;
63 spin_lock(&inode
->i_lock
);
64 while (inode
->i_state
& I_DIRTY
) {
65 spin_unlock(&inode
->i_lock
);
66 ret
= write_inode_now(inode
, true);
68 char name
[BDEVNAME_SIZE
];
69 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
70 "for block device %s (err=%d).\n",
71 bdevname(bdev
, name
), ret
);
73 spin_lock(&inode
->i_lock
);
75 spin_unlock(&inode
->i_lock
);
78 /* Kill _all_ buffers and pagecache , dirty or not.. */
79 static void kill_bdev(struct block_device
*bdev
)
81 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
83 if (mapping
->nrpages
== 0 && mapping
->nrexceptional
== 0)
87 truncate_inode_pages(mapping
, 0);
90 /* Invalidate clean unused buffers and pagecache. */
91 void invalidate_bdev(struct block_device
*bdev
)
93 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
95 if (mapping
->nrpages
) {
97 lru_add_drain_all(); /* make sure all lru add caches are flushed */
98 invalidate_mapping_pages(mapping
, 0, -1);
100 /* 99% of the time, we don't need to flush the cleancache on the bdev.
101 * But, for the strange corners, lets be cautious
103 cleancache_invalidate_inode(mapping
);
105 EXPORT_SYMBOL(invalidate_bdev
);
108 * Drop all buffers & page cache for given bdev range. This function bails
109 * with error if bdev has other exclusive owner (such as filesystem).
111 int truncate_bdev_range(struct block_device
*bdev
, fmode_t mode
,
112 loff_t lstart
, loff_t lend
)
115 * If we don't hold exclusive handle for the device, upgrade to it
116 * while we discard the buffer cache to avoid discarding buffers
117 * under live filesystem.
119 if (!(mode
& FMODE_EXCL
)) {
120 int err
= bd_prepare_to_claim(bdev
, truncate_bdev_range
);
125 truncate_inode_pages_range(bdev
->bd_inode
->i_mapping
, lstart
, lend
);
126 if (!(mode
& FMODE_EXCL
))
127 bd_abort_claiming(bdev
, truncate_bdev_range
);
130 EXPORT_SYMBOL(truncate_bdev_range
);
132 static void set_init_blocksize(struct block_device
*bdev
)
134 bdev
->bd_inode
->i_blkbits
= blksize_bits(bdev_logical_block_size(bdev
));
137 int set_blocksize(struct block_device
*bdev
, int size
)
139 /* Size must be a power of two, and between 512 and PAGE_SIZE */
140 if (size
> PAGE_SIZE
|| size
< 512 || !is_power_of_2(size
))
143 /* Size cannot be smaller than the size supported by the device */
144 if (size
< bdev_logical_block_size(bdev
))
147 /* Don't change the size if it is same as current */
148 if (bdev
->bd_inode
->i_blkbits
!= blksize_bits(size
)) {
150 bdev
->bd_inode
->i_blkbits
= blksize_bits(size
);
156 EXPORT_SYMBOL(set_blocksize
);
158 int sb_set_blocksize(struct super_block
*sb
, int size
)
160 if (set_blocksize(sb
->s_bdev
, size
))
162 /* If we get here, we know size is power of two
163 * and it's value is between 512 and PAGE_SIZE */
164 sb
->s_blocksize
= size
;
165 sb
->s_blocksize_bits
= blksize_bits(size
);
166 return sb
->s_blocksize
;
169 EXPORT_SYMBOL(sb_set_blocksize
);
171 int sb_min_blocksize(struct super_block
*sb
, int size
)
173 int minsize
= bdev_logical_block_size(sb
->s_bdev
);
176 return sb_set_blocksize(sb
, size
);
179 EXPORT_SYMBOL(sb_min_blocksize
);
182 blkdev_get_block(struct inode
*inode
, sector_t iblock
,
183 struct buffer_head
*bh
, int create
)
185 bh
->b_bdev
= I_BDEV(inode
);
186 bh
->b_blocknr
= iblock
;
187 set_buffer_mapped(bh
);
191 static struct inode
*bdev_file_inode(struct file
*file
)
193 return file
->f_mapping
->host
;
196 static unsigned int dio_bio_write_op(struct kiocb
*iocb
)
198 unsigned int op
= REQ_OP_WRITE
| REQ_SYNC
| REQ_IDLE
;
200 /* avoid the need for a I/O completion work item */
201 if (iocb
->ki_flags
& IOCB_DSYNC
)
206 #define DIO_INLINE_BIO_VECS 4
208 static void blkdev_bio_end_io_simple(struct bio
*bio
)
210 struct task_struct
*waiter
= bio
->bi_private
;
212 WRITE_ONCE(bio
->bi_private
, NULL
);
213 blk_wake_io_task(waiter
);
217 __blkdev_direct_IO_simple(struct kiocb
*iocb
, struct iov_iter
*iter
,
220 struct file
*file
= iocb
->ki_filp
;
221 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
222 struct bio_vec inline_vecs
[DIO_INLINE_BIO_VECS
], *vecs
;
223 loff_t pos
= iocb
->ki_pos
;
224 bool should_dirty
= false;
229 if ((pos
| iov_iter_alignment(iter
)) &
230 (bdev_logical_block_size(bdev
) - 1))
233 if (nr_pages
<= DIO_INLINE_BIO_VECS
)
236 vecs
= kmalloc_array(nr_pages
, sizeof(struct bio_vec
),
242 bio_init(&bio
, vecs
, nr_pages
);
243 bio_set_dev(&bio
, bdev
);
244 bio
.bi_iter
.bi_sector
= pos
>> 9;
245 bio
.bi_write_hint
= iocb
->ki_hint
;
246 bio
.bi_private
= current
;
247 bio
.bi_end_io
= blkdev_bio_end_io_simple
;
248 bio
.bi_ioprio
= iocb
->ki_ioprio
;
250 ret
= bio_iov_iter_get_pages(&bio
, iter
);
253 ret
= bio
.bi_iter
.bi_size
;
255 if (iov_iter_rw(iter
) == READ
) {
256 bio
.bi_opf
= REQ_OP_READ
;
257 if (iter_is_iovec(iter
))
260 bio
.bi_opf
= dio_bio_write_op(iocb
);
261 task_io_account_write(ret
);
263 if (iocb
->ki_flags
& IOCB_HIPRI
)
264 bio_set_polled(&bio
, iocb
);
266 qc
= submit_bio(&bio
);
268 set_current_state(TASK_UNINTERRUPTIBLE
);
269 if (!READ_ONCE(bio
.bi_private
))
271 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
272 !blk_poll(bdev_get_queue(bdev
), qc
, true))
275 __set_current_state(TASK_RUNNING
);
277 bio_release_pages(&bio
, should_dirty
);
278 if (unlikely(bio
.bi_status
))
279 ret
= blk_status_to_errno(bio
.bi_status
);
282 if (vecs
!= inline_vecs
)
293 struct task_struct
*waiter
;
298 bool should_dirty
: 1;
303 static struct bio_set blkdev_dio_pool
;
305 static int blkdev_iopoll(struct kiocb
*kiocb
, bool wait
)
307 struct block_device
*bdev
= I_BDEV(kiocb
->ki_filp
->f_mapping
->host
);
308 struct request_queue
*q
= bdev_get_queue(bdev
);
310 return blk_poll(q
, READ_ONCE(kiocb
->ki_cookie
), wait
);
313 static void blkdev_bio_end_io(struct bio
*bio
)
315 struct blkdev_dio
*dio
= bio
->bi_private
;
316 bool should_dirty
= dio
->should_dirty
;
318 if (bio
->bi_status
&& !dio
->bio
.bi_status
)
319 dio
->bio
.bi_status
= bio
->bi_status
;
321 if (!dio
->multi_bio
|| atomic_dec_and_test(&dio
->ref
)) {
323 struct kiocb
*iocb
= dio
->iocb
;
326 if (likely(!dio
->bio
.bi_status
)) {
330 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
333 dio
->iocb
->ki_complete(iocb
, ret
, 0);
337 struct task_struct
*waiter
= dio
->waiter
;
339 WRITE_ONCE(dio
->waiter
, NULL
);
340 blk_wake_io_task(waiter
);
345 bio_check_pages_dirty(bio
);
347 bio_release_pages(bio
, false);
353 __blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
, int nr_pages
)
355 struct file
*file
= iocb
->ki_filp
;
356 struct inode
*inode
= bdev_file_inode(file
);
357 struct block_device
*bdev
= I_BDEV(inode
);
358 struct blk_plug plug
;
359 struct blkdev_dio
*dio
;
361 bool is_poll
= (iocb
->ki_flags
& IOCB_HIPRI
) != 0;
362 bool is_read
= (iov_iter_rw(iter
) == READ
), is_sync
;
363 loff_t pos
= iocb
->ki_pos
;
364 blk_qc_t qc
= BLK_QC_T_NONE
;
367 if ((pos
| iov_iter_alignment(iter
)) &
368 (bdev_logical_block_size(bdev
) - 1))
371 bio
= bio_alloc_bioset(GFP_KERNEL
, nr_pages
, &blkdev_dio_pool
);
373 dio
= container_of(bio
, struct blkdev_dio
, bio
);
374 dio
->is_sync
= is_sync
= is_sync_kiocb(iocb
);
376 dio
->waiter
= current
;
383 dio
->multi_bio
= false;
384 dio
->should_dirty
= is_read
&& iter_is_iovec(iter
);
387 * Don't plug for HIPRI/polled IO, as those should go straight
391 blk_start_plug(&plug
);
394 bio_set_dev(bio
, bdev
);
395 bio
->bi_iter
.bi_sector
= pos
>> 9;
396 bio
->bi_write_hint
= iocb
->ki_hint
;
397 bio
->bi_private
= dio
;
398 bio
->bi_end_io
= blkdev_bio_end_io
;
399 bio
->bi_ioprio
= iocb
->ki_ioprio
;
401 ret
= bio_iov_iter_get_pages(bio
, iter
);
403 bio
->bi_status
= BLK_STS_IOERR
;
409 bio
->bi_opf
= REQ_OP_READ
;
410 if (dio
->should_dirty
)
411 bio_set_pages_dirty(bio
);
413 bio
->bi_opf
= dio_bio_write_op(iocb
);
414 task_io_account_write(bio
->bi_iter
.bi_size
);
417 dio
->size
+= bio
->bi_iter
.bi_size
;
418 pos
+= bio
->bi_iter
.bi_size
;
420 nr_pages
= iov_iter_npages(iter
, BIO_MAX_PAGES
);
424 if (iocb
->ki_flags
& IOCB_HIPRI
) {
425 bio_set_polled(bio
, iocb
);
429 qc
= submit_bio(bio
);
432 WRITE_ONCE(iocb
->ki_cookie
, qc
);
436 if (!dio
->multi_bio
) {
438 * AIO needs an extra reference to ensure the dio
439 * structure which is embedded into the first bio
444 dio
->multi_bio
= true;
445 atomic_set(&dio
->ref
, 2);
447 atomic_inc(&dio
->ref
);
451 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
455 blk_finish_plug(&plug
);
461 set_current_state(TASK_UNINTERRUPTIBLE
);
462 if (!READ_ONCE(dio
->waiter
))
465 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
466 !blk_poll(bdev_get_queue(bdev
), qc
, true))
469 __set_current_state(TASK_RUNNING
);
472 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
481 blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
485 nr_pages
= iov_iter_npages(iter
, BIO_MAX_PAGES
+ 1);
488 if (is_sync_kiocb(iocb
) && nr_pages
<= BIO_MAX_PAGES
)
489 return __blkdev_direct_IO_simple(iocb
, iter
, nr_pages
);
491 return __blkdev_direct_IO(iocb
, iter
, min(nr_pages
, BIO_MAX_PAGES
));
494 static __init
int blkdev_init(void)
496 return bioset_init(&blkdev_dio_pool
, 4, offsetof(struct blkdev_dio
, bio
), BIOSET_NEED_BVECS
);
498 module_init(blkdev_init
);
500 int __sync_blockdev(struct block_device
*bdev
, int wait
)
505 return filemap_flush(bdev
->bd_inode
->i_mapping
);
506 return filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
510 * Write out and wait upon all the dirty data associated with a block
511 * device via its mapping. Does not take the superblock lock.
513 int sync_blockdev(struct block_device
*bdev
)
515 return __sync_blockdev(bdev
, 1);
517 EXPORT_SYMBOL(sync_blockdev
);
520 * Write out and wait upon all dirty data associated with this
521 * device. Filesystem data as well as the underlying block
522 * device. Takes the superblock lock.
524 int fsync_bdev(struct block_device
*bdev
)
526 struct super_block
*sb
= get_super(bdev
);
528 int res
= sync_filesystem(sb
);
532 return sync_blockdev(bdev
);
534 EXPORT_SYMBOL(fsync_bdev
);
537 * freeze_bdev -- lock a filesystem and force it into a consistent state
538 * @bdev: blockdevice to lock
540 * If a superblock is found on this device, we take the s_umount semaphore
541 * on it to make sure nobody unmounts until the snapshot creation is done.
542 * The reference counter (bd_fsfreeze_count) guarantees that only the last
543 * unfreeze process can unfreeze the frozen filesystem actually when multiple
544 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
545 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
548 int freeze_bdev(struct block_device
*bdev
)
550 struct super_block
*sb
;
553 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
554 if (++bdev
->bd_fsfreeze_count
> 1)
557 sb
= get_active_super(bdev
);
560 if (sb
->s_op
->freeze_super
)
561 error
= sb
->s_op
->freeze_super(sb
);
563 error
= freeze_super(sb
);
564 deactivate_super(sb
);
567 bdev
->bd_fsfreeze_count
--;
570 bdev
->bd_fsfreeze_sb
= sb
;
575 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
578 EXPORT_SYMBOL(freeze_bdev
);
581 * thaw_bdev -- unlock filesystem
582 * @bdev: blockdevice to unlock
584 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
586 int thaw_bdev(struct block_device
*bdev
)
588 struct super_block
*sb
;
591 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
592 if (!bdev
->bd_fsfreeze_count
)
596 if (--bdev
->bd_fsfreeze_count
> 0)
599 sb
= bdev
->bd_fsfreeze_sb
;
603 if (sb
->s_op
->thaw_super
)
604 error
= sb
->s_op
->thaw_super(sb
);
606 error
= thaw_super(sb
);
608 bdev
->bd_fsfreeze_count
++;
610 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
613 EXPORT_SYMBOL(thaw_bdev
);
615 static int blkdev_writepage(struct page
*page
, struct writeback_control
*wbc
)
617 return block_write_full_page(page
, blkdev_get_block
, wbc
);
620 static int blkdev_readpage(struct file
* file
, struct page
* page
)
622 return block_read_full_page(page
, blkdev_get_block
);
625 static void blkdev_readahead(struct readahead_control
*rac
)
627 mpage_readahead(rac
, blkdev_get_block
);
630 static int blkdev_write_begin(struct file
*file
, struct address_space
*mapping
,
631 loff_t pos
, unsigned len
, unsigned flags
,
632 struct page
**pagep
, void **fsdata
)
634 return block_write_begin(mapping
, pos
, len
, flags
, pagep
,
638 static int blkdev_write_end(struct file
*file
, struct address_space
*mapping
,
639 loff_t pos
, unsigned len
, unsigned copied
,
640 struct page
*page
, void *fsdata
)
643 ret
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
653 * for a block special file file_inode(file)->i_size is zero
654 * so we compute the size by hand (just as in block_read/write above)
656 static loff_t
block_llseek(struct file
*file
, loff_t offset
, int whence
)
658 struct inode
*bd_inode
= bdev_file_inode(file
);
661 inode_lock(bd_inode
);
662 retval
= fixed_size_llseek(file
, offset
, whence
, i_size_read(bd_inode
));
663 inode_unlock(bd_inode
);
667 int blkdev_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
669 struct inode
*bd_inode
= bdev_file_inode(filp
);
670 struct block_device
*bdev
= I_BDEV(bd_inode
);
673 error
= file_write_and_wait_range(filp
, start
, end
);
678 * There is no need to serialise calls to blkdev_issue_flush with
679 * i_mutex and doing so causes performance issues with concurrent
680 * O_SYNC writers to a block device.
682 error
= blkdev_issue_flush(bdev
, GFP_KERNEL
);
683 if (error
== -EOPNOTSUPP
)
688 EXPORT_SYMBOL(blkdev_fsync
);
691 * bdev_read_page() - Start reading a page from a block device
692 * @bdev: The device to read the page from
693 * @sector: The offset on the device to read the page to (need not be aligned)
694 * @page: The page to read
696 * On entry, the page should be locked. It will be unlocked when the page
697 * has been read. If the block driver implements rw_page synchronously,
698 * that will be true on exit from this function, but it need not be.
700 * Errors returned by this function are usually "soft", eg out of memory, or
701 * queue full; callers should try a different route to read this page rather
702 * than propagate an error back up the stack.
704 * Return: negative errno if an error occurs, 0 if submission was successful.
706 int bdev_read_page(struct block_device
*bdev
, sector_t sector
,
709 const struct block_device_operations
*ops
= bdev
->bd_disk
->fops
;
710 int result
= -EOPNOTSUPP
;
712 if (!ops
->rw_page
|| bdev_get_integrity(bdev
))
715 result
= blk_queue_enter(bdev
->bd_disk
->queue
, 0);
718 result
= ops
->rw_page(bdev
, sector
+ get_start_sect(bdev
), page
,
720 blk_queue_exit(bdev
->bd_disk
->queue
);
725 * bdev_write_page() - Start writing a page to a block device
726 * @bdev: The device to write the page to
727 * @sector: The offset on the device to write the page to (need not be aligned)
728 * @page: The page to write
729 * @wbc: The writeback_control for the write
731 * On entry, the page should be locked and not currently under writeback.
732 * On exit, if the write started successfully, the page will be unlocked and
733 * under writeback. If the write failed already (eg the driver failed to
734 * queue the page to the device), the page will still be locked. If the
735 * caller is a ->writepage implementation, it will need to unlock the page.
737 * Errors returned by this function are usually "soft", eg out of memory, or
738 * queue full; callers should try a different route to write this page rather
739 * than propagate an error back up the stack.
741 * Return: negative errno if an error occurs, 0 if submission was successful.
743 int bdev_write_page(struct block_device
*bdev
, sector_t sector
,
744 struct page
*page
, struct writeback_control
*wbc
)
747 const struct block_device_operations
*ops
= bdev
->bd_disk
->fops
;
749 if (!ops
->rw_page
|| bdev_get_integrity(bdev
))
751 result
= blk_queue_enter(bdev
->bd_disk
->queue
, 0);
755 set_page_writeback(page
);
756 result
= ops
->rw_page(bdev
, sector
+ get_start_sect(bdev
), page
,
759 end_page_writeback(page
);
761 clean_page_buffers(page
);
764 blk_queue_exit(bdev
->bd_disk
->queue
);
772 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(bdev_lock
);
773 static struct kmem_cache
* bdev_cachep __read_mostly
;
775 static struct inode
*bdev_alloc_inode(struct super_block
*sb
)
777 struct bdev_inode
*ei
= kmem_cache_alloc(bdev_cachep
, GFP_KERNEL
);
780 return &ei
->vfs_inode
;
783 static void bdev_free_inode(struct inode
*inode
)
785 struct block_device
*bdev
= I_BDEV(inode
);
787 free_percpu(bdev
->bd_stats
);
788 kfree(bdev
->bd_meta_info
);
790 kmem_cache_free(bdev_cachep
, BDEV_I(inode
));
793 static void init_once(void *data
)
795 struct bdev_inode
*ei
= data
;
797 inode_init_once(&ei
->vfs_inode
);
800 static void bdev_evict_inode(struct inode
*inode
)
802 struct block_device
*bdev
= &BDEV_I(inode
)->bdev
;
803 truncate_inode_pages_final(&inode
->i_data
);
804 invalidate_inode_buffers(inode
); /* is it needed here? */
806 /* Detach inode from wb early as bdi_put() may free bdi->wb */
807 inode_detach_wb(inode
);
808 if (bdev
->bd_bdi
!= &noop_backing_dev_info
) {
809 bdi_put(bdev
->bd_bdi
);
810 bdev
->bd_bdi
= &noop_backing_dev_info
;
814 static const struct super_operations bdev_sops
= {
815 .statfs
= simple_statfs
,
816 .alloc_inode
= bdev_alloc_inode
,
817 .free_inode
= bdev_free_inode
,
818 .drop_inode
= generic_delete_inode
,
819 .evict_inode
= bdev_evict_inode
,
822 static int bd_init_fs_context(struct fs_context
*fc
)
824 struct pseudo_fs_context
*ctx
= init_pseudo(fc
, BDEVFS_MAGIC
);
827 fc
->s_iflags
|= SB_I_CGROUPWB
;
828 ctx
->ops
= &bdev_sops
;
832 static struct file_system_type bd_type
= {
834 .init_fs_context
= bd_init_fs_context
,
835 .kill_sb
= kill_anon_super
,
838 struct super_block
*blockdev_superblock __read_mostly
;
839 EXPORT_SYMBOL_GPL(blockdev_superblock
);
841 void __init
bdev_cache_init(void)
844 static struct vfsmount
*bd_mnt
;
846 bdev_cachep
= kmem_cache_create("bdev_cache", sizeof(struct bdev_inode
),
847 0, (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
848 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
|SLAB_PANIC
),
850 err
= register_filesystem(&bd_type
);
852 panic("Cannot register bdev pseudo-fs");
853 bd_mnt
= kern_mount(&bd_type
);
855 panic("Cannot create bdev pseudo-fs");
856 blockdev_superblock
= bd_mnt
->mnt_sb
; /* For writeback */
859 struct block_device
*bdev_alloc(struct gendisk
*disk
, u8 partno
)
861 struct block_device
*bdev
;
864 inode
= new_inode(blockdev_superblock
);
867 inode
->i_mode
= S_IFBLK
;
869 inode
->i_data
.a_ops
= &def_blk_aops
;
870 mapping_set_gfp_mask(&inode
->i_data
, GFP_USER
);
872 bdev
= I_BDEV(inode
);
873 memset(bdev
, 0, sizeof(*bdev
));
874 mutex_init(&bdev
->bd_mutex
);
875 mutex_init(&bdev
->bd_fsfreeze_mutex
);
876 spin_lock_init(&bdev
->bd_size_lock
);
877 bdev
->bd_disk
= disk
;
878 bdev
->bd_partno
= partno
;
879 bdev
->bd_inode
= inode
;
880 bdev
->bd_bdi
= &noop_backing_dev_info
;
882 INIT_LIST_HEAD(&bdev
->bd_holder_disks
);
884 bdev
->bd_stats
= alloc_percpu(struct disk_stats
);
885 if (!bdev
->bd_stats
) {
892 void bdev_add(struct block_device
*bdev
, dev_t dev
)
895 bdev
->bd_inode
->i_rdev
= dev
;
896 bdev
->bd_inode
->i_ino
= dev
;
897 insert_inode_hash(bdev
->bd_inode
);
900 static struct block_device
*bdget(dev_t dev
)
904 inode
= ilookup(blockdev_superblock
, dev
);
907 return &BDEV_I(inode
)->bdev
;
911 * bdgrab -- Grab a reference to an already referenced block device
912 * @bdev: Block device to grab a reference to.
914 * Returns the block_device with an additional reference when successful,
915 * or NULL if the inode is already beeing freed.
917 struct block_device
*bdgrab(struct block_device
*bdev
)
919 if (!igrab(bdev
->bd_inode
))
923 EXPORT_SYMBOL(bdgrab
);
925 long nr_blockdev_pages(void)
930 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
931 list_for_each_entry(inode
, &blockdev_superblock
->s_inodes
, i_sb_list
)
932 ret
+= inode
->i_mapping
->nrpages
;
933 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);
938 void bdput(struct block_device
*bdev
)
940 iput(bdev
->bd_inode
);
942 EXPORT_SYMBOL(bdput
);
945 * bd_may_claim - test whether a block device can be claimed
946 * @bdev: block device of interest
947 * @whole: whole block device containing @bdev, may equal @bdev
948 * @holder: holder trying to claim @bdev
950 * Test whether @bdev can be claimed by @holder.
953 * spin_lock(&bdev_lock).
956 * %true if @bdev can be claimed, %false otherwise.
958 static bool bd_may_claim(struct block_device
*bdev
, struct block_device
*whole
,
961 if (bdev
->bd_holder
== holder
)
962 return true; /* already a holder */
963 else if (bdev
->bd_holder
!= NULL
)
964 return false; /* held by someone else */
965 else if (whole
== bdev
)
966 return true; /* is a whole device which isn't held */
968 else if (whole
->bd_holder
== bd_may_claim
)
969 return true; /* is a partition of a device that is being partitioned */
970 else if (whole
->bd_holder
!= NULL
)
971 return false; /* is a partition of a held device */
973 return true; /* is a partition of an un-held device */
977 * bd_prepare_to_claim - claim a block device
978 * @bdev: block device of interest
979 * @holder: holder trying to claim @bdev
981 * Claim @bdev. This function fails if @bdev is already claimed by another
982 * holder and waits if another claiming is in progress. return, the caller
983 * has ownership of bd_claiming and bd_holder[s].
986 * 0 if @bdev can be claimed, -EBUSY otherwise.
988 int bd_prepare_to_claim(struct block_device
*bdev
, void *holder
)
990 struct block_device
*whole
= bdev_whole(bdev
);
992 if (WARN_ON_ONCE(!holder
))
995 spin_lock(&bdev_lock
);
996 /* if someone else claimed, fail */
997 if (!bd_may_claim(bdev
, whole
, holder
)) {
998 spin_unlock(&bdev_lock
);
1002 /* if claiming is already in progress, wait for it to finish */
1003 if (whole
->bd_claiming
) {
1004 wait_queue_head_t
*wq
= bit_waitqueue(&whole
->bd_claiming
, 0);
1007 prepare_to_wait(wq
, &wait
, TASK_UNINTERRUPTIBLE
);
1008 spin_unlock(&bdev_lock
);
1010 finish_wait(wq
, &wait
);
1015 whole
->bd_claiming
= holder
;
1016 spin_unlock(&bdev_lock
);
1019 EXPORT_SYMBOL_GPL(bd_prepare_to_claim
); /* only for the loop driver */
1021 static void bd_clear_claiming(struct block_device
*whole
, void *holder
)
1023 lockdep_assert_held(&bdev_lock
);
1024 /* tell others that we're done */
1025 BUG_ON(whole
->bd_claiming
!= holder
);
1026 whole
->bd_claiming
= NULL
;
1027 wake_up_bit(&whole
->bd_claiming
, 0);
1031 * bd_finish_claiming - finish claiming of a block device
1032 * @bdev: block device of interest
1033 * @holder: holder that has claimed @bdev
1035 * Finish exclusive open of a block device. Mark the device as exlusively
1036 * open by the holder and wake up all waiters for exclusive open to finish.
1038 static void bd_finish_claiming(struct block_device
*bdev
, void *holder
)
1040 struct block_device
*whole
= bdev_whole(bdev
);
1042 spin_lock(&bdev_lock
);
1043 BUG_ON(!bd_may_claim(bdev
, whole
, holder
));
1045 * Note that for a whole device bd_holders will be incremented twice,
1046 * and bd_holder will be set to bd_may_claim before being set to holder
1048 whole
->bd_holders
++;
1049 whole
->bd_holder
= bd_may_claim
;
1051 bdev
->bd_holder
= holder
;
1052 bd_clear_claiming(whole
, holder
);
1053 spin_unlock(&bdev_lock
);
1057 * bd_abort_claiming - abort claiming of a block device
1058 * @bdev: block device of interest
1059 * @whole: whole block device
1060 * @holder: holder that has claimed @bdev
1062 * Abort claiming of a block device when the exclusive open failed. This can be
1063 * also used when exclusive open is not actually desired and we just needed
1064 * to block other exclusive openers for a while.
1066 void bd_abort_claiming(struct block_device
*bdev
, void *holder
)
1068 spin_lock(&bdev_lock
);
1069 bd_clear_claiming(bdev_whole(bdev
), holder
);
1070 spin_unlock(&bdev_lock
);
1072 EXPORT_SYMBOL(bd_abort_claiming
);
1075 struct bd_holder_disk
{
1076 struct list_head list
;
1077 struct gendisk
*disk
;
1081 static struct bd_holder_disk
*bd_find_holder_disk(struct block_device
*bdev
,
1082 struct gendisk
*disk
)
1084 struct bd_holder_disk
*holder
;
1086 list_for_each_entry(holder
, &bdev
->bd_holder_disks
, list
)
1087 if (holder
->disk
== disk
)
1092 static int add_symlink(struct kobject
*from
, struct kobject
*to
)
1094 return sysfs_create_link(from
, to
, kobject_name(to
));
1097 static void del_symlink(struct kobject
*from
, struct kobject
*to
)
1099 sysfs_remove_link(from
, kobject_name(to
));
1103 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1104 * @bdev: the claimed slave bdev
1105 * @disk: the holding disk
1107 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1109 * This functions creates the following sysfs symlinks.
1111 * - from "slaves" directory of the holder @disk to the claimed @bdev
1112 * - from "holders" directory of the @bdev to the holder @disk
1114 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1115 * passed to bd_link_disk_holder(), then:
1117 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1118 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1120 * The caller must have claimed @bdev before calling this function and
1121 * ensure that both @bdev and @disk are valid during the creation and
1122 * lifetime of these symlinks.
1128 * 0 on success, -errno on failure.
1130 int bd_link_disk_holder(struct block_device
*bdev
, struct gendisk
*disk
)
1132 struct bd_holder_disk
*holder
;
1135 mutex_lock(&bdev
->bd_mutex
);
1137 WARN_ON_ONCE(!bdev
->bd_holder
);
1139 /* FIXME: remove the following once add_disk() handles errors */
1140 if (WARN_ON(!disk
->slave_dir
|| !bdev
->bd_holder_dir
))
1143 holder
= bd_find_holder_disk(bdev
, disk
);
1149 holder
= kzalloc(sizeof(*holder
), GFP_KERNEL
);
1155 INIT_LIST_HEAD(&holder
->list
);
1156 holder
->disk
= disk
;
1159 ret
= add_symlink(disk
->slave_dir
, bdev_kobj(bdev
));
1163 ret
= add_symlink(bdev
->bd_holder_dir
, &disk_to_dev(disk
)->kobj
);
1167 * bdev could be deleted beneath us which would implicitly destroy
1168 * the holder directory. Hold on to it.
1170 kobject_get(bdev
->bd_holder_dir
);
1172 list_add(&holder
->list
, &bdev
->bd_holder_disks
);
1176 del_symlink(disk
->slave_dir
, bdev_kobj(bdev
));
1180 mutex_unlock(&bdev
->bd_mutex
);
1183 EXPORT_SYMBOL_GPL(bd_link_disk_holder
);
1186 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1187 * @bdev: the calimed slave bdev
1188 * @disk: the holding disk
1190 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1195 void bd_unlink_disk_holder(struct block_device
*bdev
, struct gendisk
*disk
)
1197 struct bd_holder_disk
*holder
;
1199 mutex_lock(&bdev
->bd_mutex
);
1201 holder
= bd_find_holder_disk(bdev
, disk
);
1203 if (!WARN_ON_ONCE(holder
== NULL
) && !--holder
->refcnt
) {
1204 del_symlink(disk
->slave_dir
, bdev_kobj(bdev
));
1205 del_symlink(bdev
->bd_holder_dir
, &disk_to_dev(disk
)->kobj
);
1206 kobject_put(bdev
->bd_holder_dir
);
1207 list_del_init(&holder
->list
);
1211 mutex_unlock(&bdev
->bd_mutex
);
1213 EXPORT_SYMBOL_GPL(bd_unlink_disk_holder
);
1216 static void __blkdev_put(struct block_device
*bdev
, fmode_t mode
, int for_part
);
1218 int bdev_disk_changed(struct block_device
*bdev
, bool invalidate
)
1220 struct gendisk
*disk
= bdev
->bd_disk
;
1223 lockdep_assert_held(&bdev
->bd_mutex
);
1225 clear_bit(GD_NEED_PART_SCAN
, &bdev
->bd_disk
->state
);
1228 ret
= blk_drop_partitions(bdev
);
1233 * Historically we only set the capacity to zero for devices that
1234 * support partitions (independ of actually having partitions created).
1235 * Doing that is rather inconsistent, but changing it broke legacy
1236 * udisks polling for legacy ide-cdrom devices. Use the crude check
1237 * below to get the sane behavior for most device while not breaking
1238 * userspace for this particular setup.
1241 if (disk_part_scan_enabled(disk
) ||
1242 !(disk
->flags
& GENHD_FL_REMOVABLE
))
1243 set_capacity(disk
, 0);
1245 if (disk
->fops
->revalidate_disk
)
1246 disk
->fops
->revalidate_disk(disk
);
1249 if (get_capacity(disk
)) {
1250 ret
= blk_add_partitions(disk
, bdev
);
1253 } else if (invalidate
) {
1255 * Tell userspace that the media / partition table may have
1258 kobject_uevent(&disk_to_dev(disk
)->kobj
, KOBJ_CHANGE
);
1264 * Only exported for for loop and dasd for historic reasons. Don't use in new
1267 EXPORT_SYMBOL_GPL(bdev_disk_changed
);
1272 * mutex_lock(part->bd_mutex)
1273 * mutex_lock_nested(whole->bd_mutex, 1)
1275 static int __blkdev_get(struct block_device
*bdev
, fmode_t mode
)
1277 struct gendisk
*disk
= bdev
->bd_disk
;
1280 if (!bdev
->bd_openers
) {
1281 if (!bdev_is_partition(bdev
)) {
1283 if (disk
->fops
->open
)
1284 ret
= disk
->fops
->open(bdev
, mode
);
1287 set_init_blocksize(bdev
);
1290 * If the device is invalidated, rescan partition
1291 * if open succeeded or failed with -ENOMEDIUM.
1292 * The latter is necessary to prevent ghost
1293 * partitions on a removed medium.
1295 if (test_bit(GD_NEED_PART_SCAN
, &disk
->state
) &&
1296 (!ret
|| ret
== -ENOMEDIUM
))
1297 bdev_disk_changed(bdev
, ret
== -ENOMEDIUM
);
1302 struct block_device
*whole
= bdgrab(disk
->part0
);
1304 mutex_lock_nested(&whole
->bd_mutex
, 1);
1305 ret
= __blkdev_get(whole
, mode
);
1307 mutex_unlock(&whole
->bd_mutex
);
1311 whole
->bd_part_count
++;
1312 mutex_unlock(&whole
->bd_mutex
);
1314 if (!(disk
->flags
& GENHD_FL_UP
) ||
1315 !bdev_nr_sectors(bdev
)) {
1316 __blkdev_put(whole
, mode
, 1);
1320 set_init_blocksize(bdev
);
1323 if (bdev
->bd_bdi
== &noop_backing_dev_info
)
1324 bdev
->bd_bdi
= bdi_get(disk
->queue
->backing_dev_info
);
1326 if (!bdev_is_partition(bdev
)) {
1327 if (bdev
->bd_disk
->fops
->open
)
1328 ret
= bdev
->bd_disk
->fops
->open(bdev
, mode
);
1329 /* the same as first opener case, read comment there */
1330 if (test_bit(GD_NEED_PART_SCAN
, &disk
->state
) &&
1331 (!ret
|| ret
== -ENOMEDIUM
))
1332 bdev_disk_changed(bdev
, ret
== -ENOMEDIUM
);
1341 struct block_device
*blkdev_get_no_open(dev_t dev
)
1343 struct block_device
*bdev
;
1344 struct gendisk
*disk
;
1346 down_read(&bdev_lookup_sem
);
1349 up_read(&bdev_lookup_sem
);
1350 blk_request_module(dev
);
1351 down_read(&bdev_lookup_sem
);
1358 disk
= bdev
->bd_disk
;
1359 if (!kobject_get_unless_zero(&disk_to_dev(disk
)->kobj
))
1361 if ((disk
->flags
& (GENHD_FL_UP
| GENHD_FL_HIDDEN
)) != GENHD_FL_UP
)
1363 if (!try_module_get(bdev
->bd_disk
->fops
->owner
))
1365 up_read(&bdev_lookup_sem
);
1372 up_read(&bdev_lookup_sem
);
1376 void blkdev_put_no_open(struct block_device
*bdev
)
1378 module_put(bdev
->bd_disk
->fops
->owner
);
1379 put_disk(bdev
->bd_disk
);
1384 * blkdev_get_by_dev - open a block device by device number
1385 * @dev: device number of block device to open
1386 * @mode: FMODE_* mask
1387 * @holder: exclusive holder identifier
1389 * Open the block device described by device number @dev. If @mode includes
1390 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying
1391 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for
1394 * Use this interface ONLY if you really do not have anything better - i.e. when
1395 * you are behind a truly sucky interface and all you are given is a device
1396 * number. Everything else should use blkdev_get_by_path().
1402 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
1404 struct block_device
*blkdev_get_by_dev(dev_t dev
, fmode_t mode
, void *holder
)
1406 bool unblock_events
= true;
1407 struct block_device
*bdev
;
1408 struct gendisk
*disk
;
1411 ret
= devcgroup_check_permission(DEVCG_DEV_BLOCK
,
1412 MAJOR(dev
), MINOR(dev
),
1413 ((mode
& FMODE_READ
) ? DEVCG_ACC_READ
: 0) |
1414 ((mode
& FMODE_WRITE
) ? DEVCG_ACC_WRITE
: 0));
1416 return ERR_PTR(ret
);
1419 * If we lost a race with 'disk' being deleted, try again. See md.c.
1422 bdev
= blkdev_get_no_open(dev
);
1424 return ERR_PTR(-ENXIO
);
1425 disk
= bdev
->bd_disk
;
1427 if (mode
& FMODE_EXCL
) {
1428 ret
= bd_prepare_to_claim(bdev
, holder
);
1433 disk_block_events(disk
);
1435 mutex_lock(&bdev
->bd_mutex
);
1436 ret
=__blkdev_get(bdev
, mode
);
1438 goto abort_claiming
;
1439 if (mode
& FMODE_EXCL
) {
1440 bd_finish_claiming(bdev
, holder
);
1443 * Block event polling for write claims if requested. Any write
1444 * holder makes the write_holder state stick until all are
1445 * released. This is good enough and tracking individual
1446 * writeable reference is too fragile given the way @mode is
1447 * used in blkdev_get/put().
1449 if ((mode
& FMODE_WRITE
) && !bdev
->bd_write_holder
&&
1450 (disk
->flags
& GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE
)) {
1451 bdev
->bd_write_holder
= true;
1452 unblock_events
= false;
1455 mutex_unlock(&bdev
->bd_mutex
);
1458 disk_unblock_events(disk
);
1462 if (mode
& FMODE_EXCL
)
1463 bd_abort_claiming(bdev
, holder
);
1464 mutex_unlock(&bdev
->bd_mutex
);
1465 disk_unblock_events(disk
);
1467 blkdev_put_no_open(bdev
);
1468 if (ret
== -ERESTARTSYS
)
1470 return ERR_PTR(ret
);
1472 EXPORT_SYMBOL(blkdev_get_by_dev
);
1475 * blkdev_get_by_path - open a block device by name
1476 * @path: path to the block device to open
1477 * @mode: FMODE_* mask
1478 * @holder: exclusive holder identifier
1480 * Open the block device described by the device file at @path. If @mode
1481 * includes %FMODE_EXCL, the block device is opened with exclusive access.
1482 * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may
1483 * nest for the same @holder.
1489 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
1491 struct block_device
*blkdev_get_by_path(const char *path
, fmode_t mode
,
1494 struct block_device
*bdev
;
1498 error
= lookup_bdev(path
, &dev
);
1500 return ERR_PTR(error
);
1502 bdev
= blkdev_get_by_dev(dev
, mode
, holder
);
1503 if (!IS_ERR(bdev
) && (mode
& FMODE_WRITE
) && bdev_read_only(bdev
)) {
1504 blkdev_put(bdev
, mode
);
1505 return ERR_PTR(-EACCES
);
1510 EXPORT_SYMBOL(blkdev_get_by_path
);
1512 static int blkdev_open(struct inode
* inode
, struct file
* filp
)
1514 struct block_device
*bdev
;
1517 * Preserve backwards compatibility and allow large file access
1518 * even if userspace doesn't ask for it explicitly. Some mkfs
1519 * binary needs it. We might want to drop this workaround
1520 * during an unstable branch.
1522 filp
->f_flags
|= O_LARGEFILE
;
1524 filp
->f_mode
|= FMODE_NOWAIT
| FMODE_BUF_RASYNC
;
1526 if (filp
->f_flags
& O_NDELAY
)
1527 filp
->f_mode
|= FMODE_NDELAY
;
1528 if (filp
->f_flags
& O_EXCL
)
1529 filp
->f_mode
|= FMODE_EXCL
;
1530 if ((filp
->f_flags
& O_ACCMODE
) == 3)
1531 filp
->f_mode
|= FMODE_WRITE_IOCTL
;
1533 bdev
= blkdev_get_by_dev(inode
->i_rdev
, filp
->f_mode
, filp
);
1535 return PTR_ERR(bdev
);
1536 filp
->f_mapping
= bdev
->bd_inode
->i_mapping
;
1537 filp
->f_wb_err
= filemap_sample_wb_err(filp
->f_mapping
);
1541 static void __blkdev_put(struct block_device
*bdev
, fmode_t mode
, int for_part
)
1543 struct gendisk
*disk
= bdev
->bd_disk
;
1544 struct block_device
*victim
= NULL
;
1547 * Sync early if it looks like we're the last one. If someone else
1548 * opens the block device between now and the decrement of bd_openers
1549 * then we did a sync that we didn't need to, but that's not the end
1550 * of the world and we want to avoid long (could be several minute)
1551 * syncs while holding the mutex.
1553 if (bdev
->bd_openers
== 1)
1554 sync_blockdev(bdev
);
1556 mutex_lock_nested(&bdev
->bd_mutex
, for_part
);
1558 bdev
->bd_part_count
--;
1560 if (!--bdev
->bd_openers
) {
1561 WARN_ON_ONCE(bdev
->bd_holders
);
1562 sync_blockdev(bdev
);
1564 bdev_write_inode(bdev
);
1565 if (bdev_is_partition(bdev
))
1566 victim
= bdev_whole(bdev
);
1569 if (!bdev_is_partition(bdev
) && disk
->fops
->release
)
1570 disk
->fops
->release(disk
, mode
);
1571 mutex_unlock(&bdev
->bd_mutex
);
1573 __blkdev_put(victim
, mode
, 1);
1578 void blkdev_put(struct block_device
*bdev
, fmode_t mode
)
1580 struct gendisk
*disk
= bdev
->bd_disk
;
1582 mutex_lock(&bdev
->bd_mutex
);
1584 if (mode
& FMODE_EXCL
) {
1585 struct block_device
*whole
= bdev_whole(bdev
);
1589 * Release a claim on the device. The holder fields
1590 * are protected with bdev_lock. bd_mutex is to
1591 * synchronize disk_holder unlinking.
1593 spin_lock(&bdev_lock
);
1595 WARN_ON_ONCE(--bdev
->bd_holders
< 0);
1596 WARN_ON_ONCE(--whole
->bd_holders
< 0);
1598 if ((bdev_free
= !bdev
->bd_holders
))
1599 bdev
->bd_holder
= NULL
;
1600 if (!whole
->bd_holders
)
1601 whole
->bd_holder
= NULL
;
1603 spin_unlock(&bdev_lock
);
1606 * If this was the last claim, remove holder link and
1607 * unblock evpoll if it was a write holder.
1609 if (bdev_free
&& bdev
->bd_write_holder
) {
1610 disk_unblock_events(disk
);
1611 bdev
->bd_write_holder
= false;
1616 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1617 * event. This is to ensure detection of media removal commanded
1618 * from userland - e.g. eject(1).
1620 disk_flush_events(disk
, DISK_EVENT_MEDIA_CHANGE
);
1621 mutex_unlock(&bdev
->bd_mutex
);
1623 __blkdev_put(bdev
, mode
, 0);
1624 blkdev_put_no_open(bdev
);
1626 EXPORT_SYMBOL(blkdev_put
);
1628 static int blkdev_close(struct inode
* inode
, struct file
* filp
)
1630 struct block_device
*bdev
= I_BDEV(bdev_file_inode(filp
));
1631 blkdev_put(bdev
, filp
->f_mode
);
1635 static long block_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
)
1637 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
1638 fmode_t mode
= file
->f_mode
;
1641 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1642 * to updated it before every ioctl.
1644 if (file
->f_flags
& O_NDELAY
)
1645 mode
|= FMODE_NDELAY
;
1647 mode
&= ~FMODE_NDELAY
;
1649 return blkdev_ioctl(bdev
, mode
, cmd
, arg
);
1653 * Write data to the block device. Only intended for the block device itself
1654 * and the raw driver which basically is a fake block device.
1656 * Does not take i_mutex for the write and thus is not for general purpose
1659 ssize_t
blkdev_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1661 struct file
*file
= iocb
->ki_filp
;
1662 struct inode
*bd_inode
= bdev_file_inode(file
);
1663 loff_t size
= i_size_read(bd_inode
);
1664 struct blk_plug plug
;
1667 if (bdev_read_only(I_BDEV(bd_inode
)))
1670 if (IS_SWAPFILE(bd_inode
) && !is_hibernate_resume_dev(bd_inode
->i_rdev
))
1673 if (!iov_iter_count(from
))
1676 if (iocb
->ki_pos
>= size
)
1679 if ((iocb
->ki_flags
& (IOCB_NOWAIT
| IOCB_DIRECT
)) == IOCB_NOWAIT
)
1682 iov_iter_truncate(from
, size
- iocb
->ki_pos
);
1684 blk_start_plug(&plug
);
1685 ret
= __generic_file_write_iter(iocb
, from
);
1687 ret
= generic_write_sync(iocb
, ret
);
1688 blk_finish_plug(&plug
);
1691 EXPORT_SYMBOL_GPL(blkdev_write_iter
);
1693 ssize_t
blkdev_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1695 struct file
*file
= iocb
->ki_filp
;
1696 struct inode
*bd_inode
= bdev_file_inode(file
);
1697 loff_t size
= i_size_read(bd_inode
);
1698 loff_t pos
= iocb
->ki_pos
;
1704 iov_iter_truncate(to
, size
);
1705 return generic_file_read_iter(iocb
, to
);
1707 EXPORT_SYMBOL_GPL(blkdev_read_iter
);
1710 * Try to release a page associated with block device when the system
1711 * is under memory pressure.
1713 static int blkdev_releasepage(struct page
*page
, gfp_t wait
)
1715 struct super_block
*super
= BDEV_I(page
->mapping
->host
)->bdev
.bd_super
;
1717 if (super
&& super
->s_op
->bdev_try_to_free_page
)
1718 return super
->s_op
->bdev_try_to_free_page(super
, page
, wait
);
1720 return try_to_free_buffers(page
);
1723 static int blkdev_writepages(struct address_space
*mapping
,
1724 struct writeback_control
*wbc
)
1726 return generic_writepages(mapping
, wbc
);
1729 static const struct address_space_operations def_blk_aops
= {
1730 .readpage
= blkdev_readpage
,
1731 .readahead
= blkdev_readahead
,
1732 .writepage
= blkdev_writepage
,
1733 .write_begin
= blkdev_write_begin
,
1734 .write_end
= blkdev_write_end
,
1735 .writepages
= blkdev_writepages
,
1736 .releasepage
= blkdev_releasepage
,
1737 .direct_IO
= blkdev_direct_IO
,
1738 .migratepage
= buffer_migrate_page_norefs
,
1739 .is_dirty_writeback
= buffer_check_dirty_writeback
,
1742 #define BLKDEV_FALLOC_FL_SUPPORTED \
1743 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
1744 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
1746 static long blkdev_fallocate(struct file
*file
, int mode
, loff_t start
,
1749 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
1750 loff_t end
= start
+ len
- 1;
1754 /* Fail if we don't recognize the flags. */
1755 if (mode
& ~BLKDEV_FALLOC_FL_SUPPORTED
)
1758 /* Don't go off the end of the device. */
1759 isize
= i_size_read(bdev
->bd_inode
);
1763 if (mode
& FALLOC_FL_KEEP_SIZE
) {
1764 len
= isize
- start
;
1765 end
= start
+ len
- 1;
1771 * Don't allow IO that isn't aligned to logical block size.
1773 if ((start
| len
) & (bdev_logical_block_size(bdev
) - 1))
1776 /* Invalidate the page cache, including dirty pages. */
1777 error
= truncate_bdev_range(bdev
, file
->f_mode
, start
, end
);
1782 case FALLOC_FL_ZERO_RANGE
:
1783 case FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
:
1784 error
= blkdev_issue_zeroout(bdev
, start
>> 9, len
>> 9,
1785 GFP_KERNEL
, BLKDEV_ZERO_NOUNMAP
);
1787 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
:
1788 error
= blkdev_issue_zeroout(bdev
, start
>> 9, len
>> 9,
1789 GFP_KERNEL
, BLKDEV_ZERO_NOFALLBACK
);
1791 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
| FALLOC_FL_NO_HIDE_STALE
:
1792 error
= blkdev_issue_discard(bdev
, start
>> 9, len
>> 9,
1802 * Invalidate again; if someone wandered in and dirtied a page,
1803 * the caller will be given -EBUSY. The third argument is
1804 * inclusive, so the rounding here is safe.
1806 return invalidate_inode_pages2_range(bdev
->bd_inode
->i_mapping
,
1807 start
>> PAGE_SHIFT
,
1811 const struct file_operations def_blk_fops
= {
1812 .open
= blkdev_open
,
1813 .release
= blkdev_close
,
1814 .llseek
= block_llseek
,
1815 .read_iter
= blkdev_read_iter
,
1816 .write_iter
= blkdev_write_iter
,
1817 .iopoll
= blkdev_iopoll
,
1818 .mmap
= generic_file_mmap
,
1819 .fsync
= blkdev_fsync
,
1820 .unlocked_ioctl
= block_ioctl
,
1821 #ifdef CONFIG_COMPAT
1822 .compat_ioctl
= compat_blkdev_ioctl
,
1824 .splice_read
= generic_file_splice_read
,
1825 .splice_write
= iter_file_splice_write
,
1826 .fallocate
= blkdev_fallocate
,
1830 * lookup_bdev - lookup a struct block_device by name
1831 * @pathname: special file representing the block device
1833 * Get a reference to the blockdevice at @pathname in the current
1834 * namespace if possible and return it. Return ERR_PTR(error)
1837 int lookup_bdev(const char *pathname
, dev_t
*dev
)
1839 struct inode
*inode
;
1843 if (!pathname
|| !*pathname
)
1846 error
= kern_path(pathname
, LOOKUP_FOLLOW
, &path
);
1850 inode
= d_backing_inode(path
.dentry
);
1852 if (!S_ISBLK(inode
->i_mode
))
1855 if (!may_open_dev(&path
))
1858 *dev
= inode
->i_rdev
;
1864 EXPORT_SYMBOL(lookup_bdev
);
1866 int __invalidate_device(struct block_device
*bdev
, bool kill_dirty
)
1868 struct super_block
*sb
= get_super(bdev
);
1873 * no need to lock the super, get_super holds the
1874 * read mutex so the filesystem cannot go away
1875 * under us (->put_super runs with the write lock
1878 shrink_dcache_sb(sb
);
1879 res
= invalidate_inodes(sb
, kill_dirty
);
1882 invalidate_bdev(bdev
);
1885 EXPORT_SYMBOL(__invalidate_device
);
1887 void iterate_bdevs(void (*func
)(struct block_device
*, void *), void *arg
)
1889 struct inode
*inode
, *old_inode
= NULL
;
1891 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
1892 list_for_each_entry(inode
, &blockdev_superblock
->s_inodes
, i_sb_list
) {
1893 struct address_space
*mapping
= inode
->i_mapping
;
1894 struct block_device
*bdev
;
1896 spin_lock(&inode
->i_lock
);
1897 if (inode
->i_state
& (I_FREEING
|I_WILL_FREE
|I_NEW
) ||
1898 mapping
->nrpages
== 0) {
1899 spin_unlock(&inode
->i_lock
);
1903 spin_unlock(&inode
->i_lock
);
1904 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);
1906 * We hold a reference to 'inode' so it couldn't have been
1907 * removed from s_inodes list while we dropped the
1908 * s_inode_list_lock We cannot iput the inode now as we can
1909 * be holding the last reference and we cannot iput it under
1910 * s_inode_list_lock. So we keep the reference and iput it
1915 bdev
= I_BDEV(inode
);
1917 mutex_lock(&bdev
->bd_mutex
);
1918 if (bdev
->bd_openers
)
1920 mutex_unlock(&bdev
->bd_mutex
);
1922 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
1924 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);