1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
8 #include <linux/init.h>
10 #include <linux/fcntl.h>
11 #include <linux/slab.h>
12 #include <linux/kmod.h>
13 #include <linux/major.h>
14 #include <linux/device_cgroup.h>
15 #include <linux/highmem.h>
16 #include <linux/blkdev.h>
17 #include <linux/backing-dev.h>
18 #include <linux/module.h>
19 #include <linux/blkpg.h>
20 #include <linux/magic.h>
21 #include <linux/buffer_head.h>
22 #include <linux/swap.h>
23 #include <linux/pagevec.h>
24 #include <linux/writeback.h>
25 #include <linux/mpage.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
28 #include <linux/uio.h>
29 #include <linux/namei.h>
30 #include <linux/log2.h>
31 #include <linux/cleancache.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/falloc.h>
34 #include <linux/part_stat.h>
35 #include <linux/uaccess.h>
36 #include <linux/suspend.h>
40 struct block_device bdev
;
41 struct inode vfs_inode
;
44 static const struct address_space_operations def_blk_aops
;
46 static inline struct bdev_inode
*BDEV_I(struct inode
*inode
)
48 return container_of(inode
, struct bdev_inode
, vfs_inode
);
51 struct block_device
*I_BDEV(struct inode
*inode
)
53 return &BDEV_I(inode
)->bdev
;
55 EXPORT_SYMBOL(I_BDEV
);
57 static void bdev_write_inode(struct block_device
*bdev
)
59 struct inode
*inode
= bdev
->bd_inode
;
62 spin_lock(&inode
->i_lock
);
63 while (inode
->i_state
& I_DIRTY
) {
64 spin_unlock(&inode
->i_lock
);
65 ret
= write_inode_now(inode
, true);
67 char name
[BDEVNAME_SIZE
];
68 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
69 "for block device %s (err=%d).\n",
70 bdevname(bdev
, name
), ret
);
72 spin_lock(&inode
->i_lock
);
74 spin_unlock(&inode
->i_lock
);
77 /* Kill _all_ buffers and pagecache , dirty or not.. */
78 static void kill_bdev(struct block_device
*bdev
)
80 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
82 if (mapping_empty(mapping
))
86 truncate_inode_pages(mapping
, 0);
89 /* Invalidate clean unused buffers and pagecache. */
90 void invalidate_bdev(struct block_device
*bdev
)
92 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
94 if (mapping
->nrpages
) {
96 lru_add_drain_all(); /* make sure all lru add caches are flushed */
97 invalidate_mapping_pages(mapping
, 0, -1);
99 /* 99% of the time, we don't need to flush the cleancache on the bdev.
100 * But, for the strange corners, lets be cautious
102 cleancache_invalidate_inode(mapping
);
104 EXPORT_SYMBOL(invalidate_bdev
);
107 * Drop all buffers & page cache for given bdev range. This function bails
108 * with error if bdev has other exclusive owner (such as filesystem).
110 int truncate_bdev_range(struct block_device
*bdev
, fmode_t mode
,
111 loff_t lstart
, loff_t lend
)
114 * If we don't hold exclusive handle for the device, upgrade to it
115 * while we discard the buffer cache to avoid discarding buffers
116 * under live filesystem.
118 if (!(mode
& FMODE_EXCL
)) {
119 int err
= bd_prepare_to_claim(bdev
, truncate_bdev_range
);
124 truncate_inode_pages_range(bdev
->bd_inode
->i_mapping
, lstart
, lend
);
125 if (!(mode
& FMODE_EXCL
))
126 bd_abort_claiming(bdev
, truncate_bdev_range
);
131 * Someone else has handle exclusively open. Try invalidating instead.
132 * The 'end' argument is inclusive so the rounding is safe.
134 return invalidate_inode_pages2_range(bdev
->bd_inode
->i_mapping
,
135 lstart
>> PAGE_SHIFT
,
139 static void set_init_blocksize(struct block_device
*bdev
)
141 unsigned int bsize
= bdev_logical_block_size(bdev
);
142 loff_t size
= i_size_read(bdev
->bd_inode
);
144 while (bsize
< PAGE_SIZE
) {
149 bdev
->bd_inode
->i_blkbits
= blksize_bits(bsize
);
152 int set_blocksize(struct block_device
*bdev
, int size
)
154 /* Size must be a power of two, and between 512 and PAGE_SIZE */
155 if (size
> PAGE_SIZE
|| size
< 512 || !is_power_of_2(size
))
158 /* Size cannot be smaller than the size supported by the device */
159 if (size
< bdev_logical_block_size(bdev
))
162 /* Don't change the size if it is same as current */
163 if (bdev
->bd_inode
->i_blkbits
!= blksize_bits(size
)) {
165 bdev
->bd_inode
->i_blkbits
= blksize_bits(size
);
171 EXPORT_SYMBOL(set_blocksize
);
173 int sb_set_blocksize(struct super_block
*sb
, int size
)
175 if (set_blocksize(sb
->s_bdev
, size
))
177 /* If we get here, we know size is power of two
178 * and it's value is between 512 and PAGE_SIZE */
179 sb
->s_blocksize
= size
;
180 sb
->s_blocksize_bits
= blksize_bits(size
);
181 return sb
->s_blocksize
;
184 EXPORT_SYMBOL(sb_set_blocksize
);
186 int sb_min_blocksize(struct super_block
*sb
, int size
)
188 int minsize
= bdev_logical_block_size(sb
->s_bdev
);
191 return sb_set_blocksize(sb
, size
);
194 EXPORT_SYMBOL(sb_min_blocksize
);
197 blkdev_get_block(struct inode
*inode
, sector_t iblock
,
198 struct buffer_head
*bh
, int create
)
200 bh
->b_bdev
= I_BDEV(inode
);
201 bh
->b_blocknr
= iblock
;
202 set_buffer_mapped(bh
);
206 static struct inode
*bdev_file_inode(struct file
*file
)
208 return file
->f_mapping
->host
;
211 static unsigned int dio_bio_write_op(struct kiocb
*iocb
)
213 unsigned int op
= REQ_OP_WRITE
| REQ_SYNC
| REQ_IDLE
;
215 /* avoid the need for a I/O completion work item */
216 if (iocb
->ki_flags
& IOCB_DSYNC
)
221 #define DIO_INLINE_BIO_VECS 4
223 static void blkdev_bio_end_io_simple(struct bio
*bio
)
225 struct task_struct
*waiter
= bio
->bi_private
;
227 WRITE_ONCE(bio
->bi_private
, NULL
);
228 blk_wake_io_task(waiter
);
232 __blkdev_direct_IO_simple(struct kiocb
*iocb
, struct iov_iter
*iter
,
233 unsigned int nr_pages
)
235 struct file
*file
= iocb
->ki_filp
;
236 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
237 struct bio_vec inline_vecs
[DIO_INLINE_BIO_VECS
], *vecs
;
238 loff_t pos
= iocb
->ki_pos
;
239 bool should_dirty
= false;
244 if ((pos
| iov_iter_alignment(iter
)) &
245 (bdev_logical_block_size(bdev
) - 1))
248 if (nr_pages
<= DIO_INLINE_BIO_VECS
)
251 vecs
= kmalloc_array(nr_pages
, sizeof(struct bio_vec
),
257 bio_init(&bio
, vecs
, nr_pages
);
258 bio_set_dev(&bio
, bdev
);
259 bio
.bi_iter
.bi_sector
= pos
>> 9;
260 bio
.bi_write_hint
= iocb
->ki_hint
;
261 bio
.bi_private
= current
;
262 bio
.bi_end_io
= blkdev_bio_end_io_simple
;
263 bio
.bi_ioprio
= iocb
->ki_ioprio
;
265 ret
= bio_iov_iter_get_pages(&bio
, iter
);
268 ret
= bio
.bi_iter
.bi_size
;
270 if (iov_iter_rw(iter
) == READ
) {
271 bio
.bi_opf
= REQ_OP_READ
;
272 if (iter_is_iovec(iter
))
275 bio
.bi_opf
= dio_bio_write_op(iocb
);
276 task_io_account_write(ret
);
278 if (iocb
->ki_flags
& IOCB_NOWAIT
)
279 bio
.bi_opf
|= REQ_NOWAIT
;
280 if (iocb
->ki_flags
& IOCB_HIPRI
)
281 bio_set_polled(&bio
, iocb
);
283 qc
= submit_bio(&bio
);
285 set_current_state(TASK_UNINTERRUPTIBLE
);
286 if (!READ_ONCE(bio
.bi_private
))
288 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
289 !blk_poll(bdev_get_queue(bdev
), qc
, true))
292 __set_current_state(TASK_RUNNING
);
294 bio_release_pages(&bio
, should_dirty
);
295 if (unlikely(bio
.bi_status
))
296 ret
= blk_status_to_errno(bio
.bi_status
);
299 if (vecs
!= inline_vecs
)
310 struct task_struct
*waiter
;
315 bool should_dirty
: 1;
320 static struct bio_set blkdev_dio_pool
;
322 static int blkdev_iopoll(struct kiocb
*kiocb
, bool wait
)
324 struct block_device
*bdev
= I_BDEV(kiocb
->ki_filp
->f_mapping
->host
);
325 struct request_queue
*q
= bdev_get_queue(bdev
);
327 return blk_poll(q
, READ_ONCE(kiocb
->ki_cookie
), wait
);
330 static void blkdev_bio_end_io(struct bio
*bio
)
332 struct blkdev_dio
*dio
= bio
->bi_private
;
333 bool should_dirty
= dio
->should_dirty
;
335 if (bio
->bi_status
&& !dio
->bio
.bi_status
)
336 dio
->bio
.bi_status
= bio
->bi_status
;
338 if (!dio
->multi_bio
|| atomic_dec_and_test(&dio
->ref
)) {
340 struct kiocb
*iocb
= dio
->iocb
;
343 if (likely(!dio
->bio
.bi_status
)) {
347 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
350 dio
->iocb
->ki_complete(iocb
, ret
, 0);
354 struct task_struct
*waiter
= dio
->waiter
;
356 WRITE_ONCE(dio
->waiter
, NULL
);
357 blk_wake_io_task(waiter
);
362 bio_check_pages_dirty(bio
);
364 bio_release_pages(bio
, false);
369 static ssize_t
__blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
370 unsigned int nr_pages
)
372 struct file
*file
= iocb
->ki_filp
;
373 struct inode
*inode
= bdev_file_inode(file
);
374 struct block_device
*bdev
= I_BDEV(inode
);
375 struct blk_plug plug
;
376 struct blkdev_dio
*dio
;
378 bool is_poll
= (iocb
->ki_flags
& IOCB_HIPRI
) != 0;
379 bool is_read
= (iov_iter_rw(iter
) == READ
), is_sync
;
380 loff_t pos
= iocb
->ki_pos
;
381 blk_qc_t qc
= BLK_QC_T_NONE
;
384 if ((pos
| iov_iter_alignment(iter
)) &
385 (bdev_logical_block_size(bdev
) - 1))
388 bio
= bio_alloc_bioset(GFP_KERNEL
, nr_pages
, &blkdev_dio_pool
);
390 dio
= container_of(bio
, struct blkdev_dio
, bio
);
391 dio
->is_sync
= is_sync
= is_sync_kiocb(iocb
);
393 dio
->waiter
= current
;
400 dio
->multi_bio
= false;
401 dio
->should_dirty
= is_read
&& iter_is_iovec(iter
);
404 * Don't plug for HIPRI/polled IO, as those should go straight
408 blk_start_plug(&plug
);
411 bio_set_dev(bio
, bdev
);
412 bio
->bi_iter
.bi_sector
= pos
>> 9;
413 bio
->bi_write_hint
= iocb
->ki_hint
;
414 bio
->bi_private
= dio
;
415 bio
->bi_end_io
= blkdev_bio_end_io
;
416 bio
->bi_ioprio
= iocb
->ki_ioprio
;
418 ret
= bio_iov_iter_get_pages(bio
, iter
);
420 bio
->bi_status
= BLK_STS_IOERR
;
426 bio
->bi_opf
= REQ_OP_READ
;
427 if (dio
->should_dirty
)
428 bio_set_pages_dirty(bio
);
430 bio
->bi_opf
= dio_bio_write_op(iocb
);
431 task_io_account_write(bio
->bi_iter
.bi_size
);
433 if (iocb
->ki_flags
& IOCB_NOWAIT
)
434 bio
->bi_opf
|= REQ_NOWAIT
;
436 dio
->size
+= bio
->bi_iter
.bi_size
;
437 pos
+= bio
->bi_iter
.bi_size
;
439 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
);
443 if (iocb
->ki_flags
& IOCB_HIPRI
) {
444 bio_set_polled(bio
, iocb
);
448 qc
= submit_bio(bio
);
451 WRITE_ONCE(iocb
->ki_cookie
, qc
);
455 if (!dio
->multi_bio
) {
457 * AIO needs an extra reference to ensure the dio
458 * structure which is embedded into the first bio
463 dio
->multi_bio
= true;
464 atomic_set(&dio
->ref
, 2);
466 atomic_inc(&dio
->ref
);
470 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
474 blk_finish_plug(&plug
);
480 set_current_state(TASK_UNINTERRUPTIBLE
);
481 if (!READ_ONCE(dio
->waiter
))
484 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
485 !blk_poll(bdev_get_queue(bdev
), qc
, true))
488 __set_current_state(TASK_RUNNING
);
491 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
500 blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
502 unsigned int nr_pages
;
504 if (!iov_iter_count(iter
))
507 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
+ 1);
508 if (is_sync_kiocb(iocb
) && nr_pages
<= BIO_MAX_VECS
)
509 return __blkdev_direct_IO_simple(iocb
, iter
, nr_pages
);
511 return __blkdev_direct_IO(iocb
, iter
, bio_max_segs(nr_pages
));
514 static __init
int blkdev_init(void)
516 return bioset_init(&blkdev_dio_pool
, 4, offsetof(struct blkdev_dio
, bio
), BIOSET_NEED_BVECS
);
518 module_init(blkdev_init
);
520 int __sync_blockdev(struct block_device
*bdev
, int wait
)
525 return filemap_flush(bdev
->bd_inode
->i_mapping
);
526 return filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
530 * Write out and wait upon all the dirty data associated with a block
531 * device via its mapping. Does not take the superblock lock.
533 int sync_blockdev(struct block_device
*bdev
)
535 return __sync_blockdev(bdev
, 1);
537 EXPORT_SYMBOL(sync_blockdev
);
540 * Write out and wait upon all dirty data associated with this
541 * device. Filesystem data as well as the underlying block
542 * device. Takes the superblock lock.
544 int fsync_bdev(struct block_device
*bdev
)
546 struct super_block
*sb
= get_super(bdev
);
548 int res
= sync_filesystem(sb
);
552 return sync_blockdev(bdev
);
554 EXPORT_SYMBOL(fsync_bdev
);
557 * freeze_bdev -- lock a filesystem and force it into a consistent state
558 * @bdev: blockdevice to lock
560 * If a superblock is found on this device, we take the s_umount semaphore
561 * on it to make sure nobody unmounts until the snapshot creation is done.
562 * The reference counter (bd_fsfreeze_count) guarantees that only the last
563 * unfreeze process can unfreeze the frozen filesystem actually when multiple
564 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
565 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
568 int freeze_bdev(struct block_device
*bdev
)
570 struct super_block
*sb
;
573 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
574 if (++bdev
->bd_fsfreeze_count
> 1)
577 sb
= get_active_super(bdev
);
580 if (sb
->s_op
->freeze_super
)
581 error
= sb
->s_op
->freeze_super(sb
);
583 error
= freeze_super(sb
);
584 deactivate_super(sb
);
587 bdev
->bd_fsfreeze_count
--;
590 bdev
->bd_fsfreeze_sb
= sb
;
595 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
598 EXPORT_SYMBOL(freeze_bdev
);
601 * thaw_bdev -- unlock filesystem
602 * @bdev: blockdevice to unlock
604 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
606 int thaw_bdev(struct block_device
*bdev
)
608 struct super_block
*sb
;
611 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
612 if (!bdev
->bd_fsfreeze_count
)
616 if (--bdev
->bd_fsfreeze_count
> 0)
619 sb
= bdev
->bd_fsfreeze_sb
;
623 if (sb
->s_op
->thaw_super
)
624 error
= sb
->s_op
->thaw_super(sb
);
626 error
= thaw_super(sb
);
628 bdev
->bd_fsfreeze_count
++;
630 bdev
->bd_fsfreeze_sb
= NULL
;
632 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
635 EXPORT_SYMBOL(thaw_bdev
);
637 static int blkdev_writepage(struct page
*page
, struct writeback_control
*wbc
)
639 return block_write_full_page(page
, blkdev_get_block
, wbc
);
642 static int blkdev_readpage(struct file
* file
, struct page
* page
)
644 return block_read_full_page(page
, blkdev_get_block
);
647 static void blkdev_readahead(struct readahead_control
*rac
)
649 mpage_readahead(rac
, blkdev_get_block
);
652 static int blkdev_write_begin(struct file
*file
, struct address_space
*mapping
,
653 loff_t pos
, unsigned len
, unsigned flags
,
654 struct page
**pagep
, void **fsdata
)
656 return block_write_begin(mapping
, pos
, len
, flags
, pagep
,
660 static int blkdev_write_end(struct file
*file
, struct address_space
*mapping
,
661 loff_t pos
, unsigned len
, unsigned copied
,
662 struct page
*page
, void *fsdata
)
665 ret
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
675 * for a block special file file_inode(file)->i_size is zero
676 * so we compute the size by hand (just as in block_read/write above)
678 static loff_t
block_llseek(struct file
*file
, loff_t offset
, int whence
)
680 struct inode
*bd_inode
= bdev_file_inode(file
);
683 inode_lock(bd_inode
);
684 retval
= fixed_size_llseek(file
, offset
, whence
, i_size_read(bd_inode
));
685 inode_unlock(bd_inode
);
689 int blkdev_fsync(struct file
*filp
, loff_t start
, loff_t end
, int datasync
)
691 struct inode
*bd_inode
= bdev_file_inode(filp
);
692 struct block_device
*bdev
= I_BDEV(bd_inode
);
695 error
= file_write_and_wait_range(filp
, start
, end
);
700 * There is no need to serialise calls to blkdev_issue_flush with
701 * i_mutex and doing so causes performance issues with concurrent
702 * O_SYNC writers to a block device.
704 error
= blkdev_issue_flush(bdev
);
705 if (error
== -EOPNOTSUPP
)
710 EXPORT_SYMBOL(blkdev_fsync
);
713 * bdev_read_page() - Start reading a page from a block device
714 * @bdev: The device to read the page from
715 * @sector: The offset on the device to read the page to (need not be aligned)
716 * @page: The page to read
718 * On entry, the page should be locked. It will be unlocked when the page
719 * has been read. If the block driver implements rw_page synchronously,
720 * that will be true on exit from this function, but it need not be.
722 * Errors returned by this function are usually "soft", eg out of memory, or
723 * queue full; callers should try a different route to read this page rather
724 * than propagate an error back up the stack.
726 * Return: negative errno if an error occurs, 0 if submission was successful.
728 int bdev_read_page(struct block_device
*bdev
, sector_t sector
,
731 const struct block_device_operations
*ops
= bdev
->bd_disk
->fops
;
732 int result
= -EOPNOTSUPP
;
734 if (!ops
->rw_page
|| bdev_get_integrity(bdev
))
737 result
= blk_queue_enter(bdev
->bd_disk
->queue
, 0);
740 result
= ops
->rw_page(bdev
, sector
+ get_start_sect(bdev
), page
,
742 blk_queue_exit(bdev
->bd_disk
->queue
);
747 * bdev_write_page() - Start writing a page to a block device
748 * @bdev: The device to write the page to
749 * @sector: The offset on the device to write the page to (need not be aligned)
750 * @page: The page to write
751 * @wbc: The writeback_control for the write
753 * On entry, the page should be locked and not currently under writeback.
754 * On exit, if the write started successfully, the page will be unlocked and
755 * under writeback. If the write failed already (eg the driver failed to
756 * queue the page to the device), the page will still be locked. If the
757 * caller is a ->writepage implementation, it will need to unlock the page.
759 * Errors returned by this function are usually "soft", eg out of memory, or
760 * queue full; callers should try a different route to write this page rather
761 * than propagate an error back up the stack.
763 * Return: negative errno if an error occurs, 0 if submission was successful.
765 int bdev_write_page(struct block_device
*bdev
, sector_t sector
,
766 struct page
*page
, struct writeback_control
*wbc
)
769 const struct block_device_operations
*ops
= bdev
->bd_disk
->fops
;
771 if (!ops
->rw_page
|| bdev_get_integrity(bdev
))
773 result
= blk_queue_enter(bdev
->bd_disk
->queue
, 0);
777 set_page_writeback(page
);
778 result
= ops
->rw_page(bdev
, sector
+ get_start_sect(bdev
), page
,
781 end_page_writeback(page
);
783 clean_page_buffers(page
);
786 blk_queue_exit(bdev
->bd_disk
->queue
);
794 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(bdev_lock
);
795 static struct kmem_cache
* bdev_cachep __read_mostly
;
797 static struct inode
*bdev_alloc_inode(struct super_block
*sb
)
799 struct bdev_inode
*ei
= kmem_cache_alloc(bdev_cachep
, GFP_KERNEL
);
803 memset(&ei
->bdev
, 0, sizeof(ei
->bdev
));
804 ei
->bdev
.bd_bdi
= &noop_backing_dev_info
;
805 return &ei
->vfs_inode
;
808 static void bdev_free_inode(struct inode
*inode
)
810 struct block_device
*bdev
= I_BDEV(inode
);
812 free_percpu(bdev
->bd_stats
);
813 kfree(bdev
->bd_meta_info
);
815 kmem_cache_free(bdev_cachep
, BDEV_I(inode
));
818 static void init_once(void *data
)
820 struct bdev_inode
*ei
= data
;
822 inode_init_once(&ei
->vfs_inode
);
825 static void bdev_evict_inode(struct inode
*inode
)
827 struct block_device
*bdev
= &BDEV_I(inode
)->bdev
;
828 truncate_inode_pages_final(&inode
->i_data
);
829 invalidate_inode_buffers(inode
); /* is it needed here? */
831 /* Detach inode from wb early as bdi_put() may free bdi->wb */
832 inode_detach_wb(inode
);
833 if (bdev
->bd_bdi
!= &noop_backing_dev_info
) {
834 bdi_put(bdev
->bd_bdi
);
835 bdev
->bd_bdi
= &noop_backing_dev_info
;
839 static const struct super_operations bdev_sops
= {
840 .statfs
= simple_statfs
,
841 .alloc_inode
= bdev_alloc_inode
,
842 .free_inode
= bdev_free_inode
,
843 .drop_inode
= generic_delete_inode
,
844 .evict_inode
= bdev_evict_inode
,
847 static int bd_init_fs_context(struct fs_context
*fc
)
849 struct pseudo_fs_context
*ctx
= init_pseudo(fc
, BDEVFS_MAGIC
);
852 fc
->s_iflags
|= SB_I_CGROUPWB
;
853 ctx
->ops
= &bdev_sops
;
857 static struct file_system_type bd_type
= {
859 .init_fs_context
= bd_init_fs_context
,
860 .kill_sb
= kill_anon_super
,
863 struct super_block
*blockdev_superblock __read_mostly
;
864 EXPORT_SYMBOL_GPL(blockdev_superblock
);
866 void __init
bdev_cache_init(void)
869 static struct vfsmount
*bd_mnt
;
871 bdev_cachep
= kmem_cache_create("bdev_cache", sizeof(struct bdev_inode
),
872 0, (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
873 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
|SLAB_PANIC
),
875 err
= register_filesystem(&bd_type
);
877 panic("Cannot register bdev pseudo-fs");
878 bd_mnt
= kern_mount(&bd_type
);
880 panic("Cannot create bdev pseudo-fs");
881 blockdev_superblock
= bd_mnt
->mnt_sb
; /* For writeback */
884 struct block_device
*bdev_alloc(struct gendisk
*disk
, u8 partno
)
886 struct block_device
*bdev
;
889 inode
= new_inode(blockdev_superblock
);
892 inode
->i_mode
= S_IFBLK
;
894 inode
->i_data
.a_ops
= &def_blk_aops
;
895 mapping_set_gfp_mask(&inode
->i_data
, GFP_USER
);
897 bdev
= I_BDEV(inode
);
898 mutex_init(&bdev
->bd_fsfreeze_mutex
);
899 spin_lock_init(&bdev
->bd_size_lock
);
900 bdev
->bd_disk
= disk
;
901 bdev
->bd_partno
= partno
;
902 bdev
->bd_inode
= inode
;
904 INIT_LIST_HEAD(&bdev
->bd_holder_disks
);
906 bdev
->bd_stats
= alloc_percpu(struct disk_stats
);
907 if (!bdev
->bd_stats
) {
914 void bdev_add(struct block_device
*bdev
, dev_t dev
)
917 bdev
->bd_inode
->i_rdev
= dev
;
918 bdev
->bd_inode
->i_ino
= dev
;
919 insert_inode_hash(bdev
->bd_inode
);
922 static struct block_device
*bdget(dev_t dev
)
926 inode
= ilookup(blockdev_superblock
, dev
);
929 return &BDEV_I(inode
)->bdev
;
933 * bdgrab -- Grab a reference to an already referenced block device
934 * @bdev: Block device to grab a reference to.
936 * Returns the block_device with an additional reference when successful,
937 * or NULL if the inode is already beeing freed.
939 struct block_device
*bdgrab(struct block_device
*bdev
)
941 if (!igrab(bdev
->bd_inode
))
945 EXPORT_SYMBOL(bdgrab
);
947 long nr_blockdev_pages(void)
952 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
953 list_for_each_entry(inode
, &blockdev_superblock
->s_inodes
, i_sb_list
)
954 ret
+= inode
->i_mapping
->nrpages
;
955 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);
960 void bdput(struct block_device
*bdev
)
962 iput(bdev
->bd_inode
);
964 EXPORT_SYMBOL(bdput
);
967 * bd_may_claim - test whether a block device can be claimed
968 * @bdev: block device of interest
969 * @whole: whole block device containing @bdev, may equal @bdev
970 * @holder: holder trying to claim @bdev
972 * Test whether @bdev can be claimed by @holder.
975 * spin_lock(&bdev_lock).
978 * %true if @bdev can be claimed, %false otherwise.
980 static bool bd_may_claim(struct block_device
*bdev
, struct block_device
*whole
,
983 if (bdev
->bd_holder
== holder
)
984 return true; /* already a holder */
985 else if (bdev
->bd_holder
!= NULL
)
986 return false; /* held by someone else */
987 else if (whole
== bdev
)
988 return true; /* is a whole device which isn't held */
990 else if (whole
->bd_holder
== bd_may_claim
)
991 return true; /* is a partition of a device that is being partitioned */
992 else if (whole
->bd_holder
!= NULL
)
993 return false; /* is a partition of a held device */
995 return true; /* is a partition of an un-held device */
999 * bd_prepare_to_claim - claim a block device
1000 * @bdev: block device of interest
1001 * @holder: holder trying to claim @bdev
1003 * Claim @bdev. This function fails if @bdev is already claimed by another
1004 * holder and waits if another claiming is in progress. return, the caller
1005 * has ownership of bd_claiming and bd_holder[s].
1008 * 0 if @bdev can be claimed, -EBUSY otherwise.
1010 int bd_prepare_to_claim(struct block_device
*bdev
, void *holder
)
1012 struct block_device
*whole
= bdev_whole(bdev
);
1014 if (WARN_ON_ONCE(!holder
))
1017 spin_lock(&bdev_lock
);
1018 /* if someone else claimed, fail */
1019 if (!bd_may_claim(bdev
, whole
, holder
)) {
1020 spin_unlock(&bdev_lock
);
1024 /* if claiming is already in progress, wait for it to finish */
1025 if (whole
->bd_claiming
) {
1026 wait_queue_head_t
*wq
= bit_waitqueue(&whole
->bd_claiming
, 0);
1029 prepare_to_wait(wq
, &wait
, TASK_UNINTERRUPTIBLE
);
1030 spin_unlock(&bdev_lock
);
1032 finish_wait(wq
, &wait
);
1037 whole
->bd_claiming
= holder
;
1038 spin_unlock(&bdev_lock
);
1041 EXPORT_SYMBOL_GPL(bd_prepare_to_claim
); /* only for the loop driver */
1043 static void bd_clear_claiming(struct block_device
*whole
, void *holder
)
1045 lockdep_assert_held(&bdev_lock
);
1046 /* tell others that we're done */
1047 BUG_ON(whole
->bd_claiming
!= holder
);
1048 whole
->bd_claiming
= NULL
;
1049 wake_up_bit(&whole
->bd_claiming
, 0);
1053 * bd_finish_claiming - finish claiming of a block device
1054 * @bdev: block device of interest
1055 * @holder: holder that has claimed @bdev
1057 * Finish exclusive open of a block device. Mark the device as exlusively
1058 * open by the holder and wake up all waiters for exclusive open to finish.
1060 static void bd_finish_claiming(struct block_device
*bdev
, void *holder
)
1062 struct block_device
*whole
= bdev_whole(bdev
);
1064 spin_lock(&bdev_lock
);
1065 BUG_ON(!bd_may_claim(bdev
, whole
, holder
));
1067 * Note that for a whole device bd_holders will be incremented twice,
1068 * and bd_holder will be set to bd_may_claim before being set to holder
1070 whole
->bd_holders
++;
1071 whole
->bd_holder
= bd_may_claim
;
1073 bdev
->bd_holder
= holder
;
1074 bd_clear_claiming(whole
, holder
);
1075 spin_unlock(&bdev_lock
);
1079 * bd_abort_claiming - abort claiming of a block device
1080 * @bdev: block device of interest
1081 * @holder: holder that has claimed @bdev
1083 * Abort claiming of a block device when the exclusive open failed. This can be
1084 * also used when exclusive open is not actually desired and we just needed
1085 * to block other exclusive openers for a while.
1087 void bd_abort_claiming(struct block_device
*bdev
, void *holder
)
1089 spin_lock(&bdev_lock
);
1090 bd_clear_claiming(bdev_whole(bdev
), holder
);
1091 spin_unlock(&bdev_lock
);
1093 EXPORT_SYMBOL(bd_abort_claiming
);
1096 struct bd_holder_disk
{
1097 struct list_head list
;
1098 struct gendisk
*disk
;
1102 static struct bd_holder_disk
*bd_find_holder_disk(struct block_device
*bdev
,
1103 struct gendisk
*disk
)
1105 struct bd_holder_disk
*holder
;
1107 list_for_each_entry(holder
, &bdev
->bd_holder_disks
, list
)
1108 if (holder
->disk
== disk
)
1113 static int add_symlink(struct kobject
*from
, struct kobject
*to
)
1115 return sysfs_create_link(from
, to
, kobject_name(to
));
1118 static void del_symlink(struct kobject
*from
, struct kobject
*to
)
1120 sysfs_remove_link(from
, kobject_name(to
));
1124 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1125 * @bdev: the claimed slave bdev
1126 * @disk: the holding disk
1128 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1130 * This functions creates the following sysfs symlinks.
1132 * - from "slaves" directory of the holder @disk to the claimed @bdev
1133 * - from "holders" directory of the @bdev to the holder @disk
1135 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1136 * passed to bd_link_disk_holder(), then:
1138 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1139 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1141 * The caller must have claimed @bdev before calling this function and
1142 * ensure that both @bdev and @disk are valid during the creation and
1143 * lifetime of these symlinks.
1149 * 0 on success, -errno on failure.
1151 int bd_link_disk_holder(struct block_device
*bdev
, struct gendisk
*disk
)
1153 struct bd_holder_disk
*holder
;
1156 mutex_lock(&bdev
->bd_disk
->open_mutex
);
1158 WARN_ON_ONCE(!bdev
->bd_holder
);
1160 /* FIXME: remove the following once add_disk() handles errors */
1161 if (WARN_ON(!disk
->slave_dir
|| !bdev
->bd_holder_dir
))
1164 holder
= bd_find_holder_disk(bdev
, disk
);
1170 holder
= kzalloc(sizeof(*holder
), GFP_KERNEL
);
1176 INIT_LIST_HEAD(&holder
->list
);
1177 holder
->disk
= disk
;
1180 ret
= add_symlink(disk
->slave_dir
, bdev_kobj(bdev
));
1184 ret
= add_symlink(bdev
->bd_holder_dir
, &disk_to_dev(disk
)->kobj
);
1188 * bdev could be deleted beneath us which would implicitly destroy
1189 * the holder directory. Hold on to it.
1191 kobject_get(bdev
->bd_holder_dir
);
1193 list_add(&holder
->list
, &bdev
->bd_holder_disks
);
1197 del_symlink(disk
->slave_dir
, bdev_kobj(bdev
));
1201 mutex_unlock(&bdev
->bd_disk
->open_mutex
);
1204 EXPORT_SYMBOL_GPL(bd_link_disk_holder
);
1207 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1208 * @bdev: the calimed slave bdev
1209 * @disk: the holding disk
1211 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1216 void bd_unlink_disk_holder(struct block_device
*bdev
, struct gendisk
*disk
)
1218 struct bd_holder_disk
*holder
;
1220 mutex_lock(&bdev
->bd_disk
->open_mutex
);
1222 holder
= bd_find_holder_disk(bdev
, disk
);
1224 if (!WARN_ON_ONCE(holder
== NULL
) && !--holder
->refcnt
) {
1225 del_symlink(disk
->slave_dir
, bdev_kobj(bdev
));
1226 del_symlink(bdev
->bd_holder_dir
, &disk_to_dev(disk
)->kobj
);
1227 kobject_put(bdev
->bd_holder_dir
);
1228 list_del_init(&holder
->list
);
1232 mutex_unlock(&bdev
->bd_disk
->open_mutex
);
1234 EXPORT_SYMBOL_GPL(bd_unlink_disk_holder
);
1237 static void blkdev_flush_mapping(struct block_device
*bdev
)
1239 WARN_ON_ONCE(bdev
->bd_holders
);
1240 sync_blockdev(bdev
);
1242 bdev_write_inode(bdev
);
1245 static int blkdev_get_whole(struct block_device
*bdev
, fmode_t mode
)
1247 struct gendisk
*disk
= bdev
->bd_disk
;
1250 if (disk
->fops
->open
) {
1251 ret
= disk
->fops
->open(bdev
, mode
);
1253 /* avoid ghost partitions on a removed medium */
1254 if (ret
== -ENOMEDIUM
&&
1255 test_bit(GD_NEED_PART_SCAN
, &disk
->state
))
1256 bdev_disk_changed(disk
, true);
1261 if (!bdev
->bd_openers
) {
1262 set_init_blocksize(bdev
);
1263 if (bdev
->bd_bdi
== &noop_backing_dev_info
)
1264 bdev
->bd_bdi
= bdi_get(disk
->queue
->backing_dev_info
);
1266 if (test_bit(GD_NEED_PART_SCAN
, &disk
->state
))
1267 bdev_disk_changed(disk
, false);
1272 static void blkdev_put_whole(struct block_device
*bdev
, fmode_t mode
)
1274 if (!--bdev
->bd_openers
)
1275 blkdev_flush_mapping(bdev
);
1276 if (bdev
->bd_disk
->fops
->release
)
1277 bdev
->bd_disk
->fops
->release(bdev
->bd_disk
, mode
);
1280 static int blkdev_get_part(struct block_device
*part
, fmode_t mode
)
1282 struct gendisk
*disk
= part
->bd_disk
;
1283 struct block_device
*whole
;
1286 if (part
->bd_openers
)
1289 whole
= bdgrab(disk
->part0
);
1290 ret
= blkdev_get_whole(whole
, mode
);
1295 if (!bdev_nr_sectors(part
))
1296 goto out_blkdev_put
;
1298 disk
->open_partitions
++;
1299 set_init_blocksize(part
);
1300 if (part
->bd_bdi
== &noop_backing_dev_info
)
1301 part
->bd_bdi
= bdi_get(disk
->queue
->backing_dev_info
);
1307 blkdev_put_whole(whole
, mode
);
1313 static void blkdev_put_part(struct block_device
*part
, fmode_t mode
)
1315 struct block_device
*whole
= bdev_whole(part
);
1317 if (--part
->bd_openers
)
1319 blkdev_flush_mapping(part
);
1320 whole
->bd_disk
->open_partitions
--;
1321 blkdev_put_whole(whole
, mode
);
1325 struct block_device
*blkdev_get_no_open(dev_t dev
)
1327 struct block_device
*bdev
;
1328 struct gendisk
*disk
;
1332 blk_request_module(dev
);
1338 disk
= bdev
->bd_disk
;
1339 if (!kobject_get_unless_zero(&disk_to_dev(disk
)->kobj
))
1341 if ((disk
->flags
& (GENHD_FL_UP
| GENHD_FL_HIDDEN
)) != GENHD_FL_UP
)
1343 if (!try_module_get(bdev
->bd_disk
->fops
->owner
))
1353 void blkdev_put_no_open(struct block_device
*bdev
)
1355 module_put(bdev
->bd_disk
->fops
->owner
);
1356 put_disk(bdev
->bd_disk
);
1361 * blkdev_get_by_dev - open a block device by device number
1362 * @dev: device number of block device to open
1363 * @mode: FMODE_* mask
1364 * @holder: exclusive holder identifier
1366 * Open the block device described by device number @dev. If @mode includes
1367 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying
1368 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for
1371 * Use this interface ONLY if you really do not have anything better - i.e. when
1372 * you are behind a truly sucky interface and all you are given is a device
1373 * number. Everything else should use blkdev_get_by_path().
1379 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
1381 struct block_device
*blkdev_get_by_dev(dev_t dev
, fmode_t mode
, void *holder
)
1383 bool unblock_events
= true;
1384 struct block_device
*bdev
;
1385 struct gendisk
*disk
;
1388 ret
= devcgroup_check_permission(DEVCG_DEV_BLOCK
,
1389 MAJOR(dev
), MINOR(dev
),
1390 ((mode
& FMODE_READ
) ? DEVCG_ACC_READ
: 0) |
1391 ((mode
& FMODE_WRITE
) ? DEVCG_ACC_WRITE
: 0));
1393 return ERR_PTR(ret
);
1395 bdev
= blkdev_get_no_open(dev
);
1397 return ERR_PTR(-ENXIO
);
1398 disk
= bdev
->bd_disk
;
1400 if (mode
& FMODE_EXCL
) {
1401 ret
= bd_prepare_to_claim(bdev
, holder
);
1406 disk_block_events(disk
);
1408 mutex_lock(&disk
->open_mutex
);
1410 if (!(disk
->flags
& GENHD_FL_UP
))
1411 goto abort_claiming
;
1412 if (bdev_is_partition(bdev
))
1413 ret
= blkdev_get_part(bdev
, mode
);
1415 ret
= blkdev_get_whole(bdev
, mode
);
1417 goto abort_claiming
;
1418 if (mode
& FMODE_EXCL
) {
1419 bd_finish_claiming(bdev
, holder
);
1422 * Block event polling for write claims if requested. Any write
1423 * holder makes the write_holder state stick until all are
1424 * released. This is good enough and tracking individual
1425 * writeable reference is too fragile given the way @mode is
1426 * used in blkdev_get/put().
1428 if ((mode
& FMODE_WRITE
) && !bdev
->bd_write_holder
&&
1429 (disk
->flags
& GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE
)) {
1430 bdev
->bd_write_holder
= true;
1431 unblock_events
= false;
1434 mutex_unlock(&disk
->open_mutex
);
1437 disk_unblock_events(disk
);
1441 if (mode
& FMODE_EXCL
)
1442 bd_abort_claiming(bdev
, holder
);
1443 mutex_unlock(&disk
->open_mutex
);
1444 disk_unblock_events(disk
);
1446 blkdev_put_no_open(bdev
);
1447 return ERR_PTR(ret
);
1449 EXPORT_SYMBOL(blkdev_get_by_dev
);
1452 * blkdev_get_by_path - open a block device by name
1453 * @path: path to the block device to open
1454 * @mode: FMODE_* mask
1455 * @holder: exclusive holder identifier
1457 * Open the block device described by the device file at @path. If @mode
1458 * includes %FMODE_EXCL, the block device is opened with exclusive access.
1459 * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may
1460 * nest for the same @holder.
1466 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
1468 struct block_device
*blkdev_get_by_path(const char *path
, fmode_t mode
,
1471 struct block_device
*bdev
;
1475 error
= lookup_bdev(path
, &dev
);
1477 return ERR_PTR(error
);
1479 bdev
= blkdev_get_by_dev(dev
, mode
, holder
);
1480 if (!IS_ERR(bdev
) && (mode
& FMODE_WRITE
) && bdev_read_only(bdev
)) {
1481 blkdev_put(bdev
, mode
);
1482 return ERR_PTR(-EACCES
);
1487 EXPORT_SYMBOL(blkdev_get_by_path
);
1489 static int blkdev_open(struct inode
* inode
, struct file
* filp
)
1491 struct block_device
*bdev
;
1494 * Preserve backwards compatibility and allow large file access
1495 * even if userspace doesn't ask for it explicitly. Some mkfs
1496 * binary needs it. We might want to drop this workaround
1497 * during an unstable branch.
1499 filp
->f_flags
|= O_LARGEFILE
;
1501 filp
->f_mode
|= FMODE_NOWAIT
| FMODE_BUF_RASYNC
;
1503 if (filp
->f_flags
& O_NDELAY
)
1504 filp
->f_mode
|= FMODE_NDELAY
;
1505 if (filp
->f_flags
& O_EXCL
)
1506 filp
->f_mode
|= FMODE_EXCL
;
1507 if ((filp
->f_flags
& O_ACCMODE
) == 3)
1508 filp
->f_mode
|= FMODE_WRITE_IOCTL
;
1510 bdev
= blkdev_get_by_dev(inode
->i_rdev
, filp
->f_mode
, filp
);
1512 return PTR_ERR(bdev
);
1513 filp
->f_mapping
= bdev
->bd_inode
->i_mapping
;
1514 filp
->f_wb_err
= filemap_sample_wb_err(filp
->f_mapping
);
1518 void blkdev_put(struct block_device
*bdev
, fmode_t mode
)
1520 struct gendisk
*disk
= bdev
->bd_disk
;
1523 * Sync early if it looks like we're the last one. If someone else
1524 * opens the block device between now and the decrement of bd_openers
1525 * then we did a sync that we didn't need to, but that's not the end
1526 * of the world and we want to avoid long (could be several minute)
1527 * syncs while holding the mutex.
1529 if (bdev
->bd_openers
== 1)
1530 sync_blockdev(bdev
);
1532 mutex_lock(&disk
->open_mutex
);
1533 if (mode
& FMODE_EXCL
) {
1534 struct block_device
*whole
= bdev_whole(bdev
);
1538 * Release a claim on the device. The holder fields
1539 * are protected with bdev_lock. open_mutex is to
1540 * synchronize disk_holder unlinking.
1542 spin_lock(&bdev_lock
);
1544 WARN_ON_ONCE(--bdev
->bd_holders
< 0);
1545 WARN_ON_ONCE(--whole
->bd_holders
< 0);
1547 if ((bdev_free
= !bdev
->bd_holders
))
1548 bdev
->bd_holder
= NULL
;
1549 if (!whole
->bd_holders
)
1550 whole
->bd_holder
= NULL
;
1552 spin_unlock(&bdev_lock
);
1555 * If this was the last claim, remove holder link and
1556 * unblock evpoll if it was a write holder.
1558 if (bdev_free
&& bdev
->bd_write_holder
) {
1559 disk_unblock_events(disk
);
1560 bdev
->bd_write_holder
= false;
1565 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1566 * event. This is to ensure detection of media removal commanded
1567 * from userland - e.g. eject(1).
1569 disk_flush_events(disk
, DISK_EVENT_MEDIA_CHANGE
);
1571 if (bdev_is_partition(bdev
))
1572 blkdev_put_part(bdev
, mode
);
1574 blkdev_put_whole(bdev
, mode
);
1575 mutex_unlock(&disk
->open_mutex
);
1577 blkdev_put_no_open(bdev
);
1579 EXPORT_SYMBOL(blkdev_put
);
1581 static int blkdev_close(struct inode
* inode
, struct file
* filp
)
1583 struct block_device
*bdev
= I_BDEV(bdev_file_inode(filp
));
1584 blkdev_put(bdev
, filp
->f_mode
);
1588 static long block_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
)
1590 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
1591 fmode_t mode
= file
->f_mode
;
1594 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1595 * to updated it before every ioctl.
1597 if (file
->f_flags
& O_NDELAY
)
1598 mode
|= FMODE_NDELAY
;
1600 mode
&= ~FMODE_NDELAY
;
1602 return blkdev_ioctl(bdev
, mode
, cmd
, arg
);
1606 * Write data to the block device. Only intended for the block device itself
1607 * and the raw driver which basically is a fake block device.
1609 * Does not take i_mutex for the write and thus is not for general purpose
1612 static ssize_t
blkdev_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1614 struct file
*file
= iocb
->ki_filp
;
1615 struct inode
*bd_inode
= bdev_file_inode(file
);
1616 loff_t size
= i_size_read(bd_inode
);
1617 struct blk_plug plug
;
1621 if (bdev_read_only(I_BDEV(bd_inode
)))
1624 if (IS_SWAPFILE(bd_inode
) && !is_hibernate_resume_dev(bd_inode
->i_rdev
))
1627 if (!iov_iter_count(from
))
1630 if (iocb
->ki_pos
>= size
)
1633 if ((iocb
->ki_flags
& (IOCB_NOWAIT
| IOCB_DIRECT
)) == IOCB_NOWAIT
)
1636 size
-= iocb
->ki_pos
;
1637 if (iov_iter_count(from
) > size
) {
1638 shorted
= iov_iter_count(from
) - size
;
1639 iov_iter_truncate(from
, size
);
1642 blk_start_plug(&plug
);
1643 ret
= __generic_file_write_iter(iocb
, from
);
1645 ret
= generic_write_sync(iocb
, ret
);
1646 iov_iter_reexpand(from
, iov_iter_count(from
) + shorted
);
1647 blk_finish_plug(&plug
);
1651 static ssize_t
blkdev_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
1653 struct file
*file
= iocb
->ki_filp
;
1654 struct inode
*bd_inode
= bdev_file_inode(file
);
1655 loff_t size
= i_size_read(bd_inode
);
1656 loff_t pos
= iocb
->ki_pos
;
1664 if (iov_iter_count(to
) > size
) {
1665 shorted
= iov_iter_count(to
) - size
;
1666 iov_iter_truncate(to
, size
);
1669 ret
= generic_file_read_iter(iocb
, to
);
1670 iov_iter_reexpand(to
, iov_iter_count(to
) + shorted
);
1674 static int blkdev_writepages(struct address_space
*mapping
,
1675 struct writeback_control
*wbc
)
1677 return generic_writepages(mapping
, wbc
);
1680 static const struct address_space_operations def_blk_aops
= {
1681 .set_page_dirty
= __set_page_dirty_buffers
,
1682 .readpage
= blkdev_readpage
,
1683 .readahead
= blkdev_readahead
,
1684 .writepage
= blkdev_writepage
,
1685 .write_begin
= blkdev_write_begin
,
1686 .write_end
= blkdev_write_end
,
1687 .writepages
= blkdev_writepages
,
1688 .direct_IO
= blkdev_direct_IO
,
1689 .migratepage
= buffer_migrate_page_norefs
,
1690 .is_dirty_writeback
= buffer_check_dirty_writeback
,
1693 #define BLKDEV_FALLOC_FL_SUPPORTED \
1694 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
1695 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
1697 static long blkdev_fallocate(struct file
*file
, int mode
, loff_t start
,
1700 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
1701 loff_t end
= start
+ len
- 1;
1705 /* Fail if we don't recognize the flags. */
1706 if (mode
& ~BLKDEV_FALLOC_FL_SUPPORTED
)
1709 /* Don't go off the end of the device. */
1710 isize
= i_size_read(bdev
->bd_inode
);
1714 if (mode
& FALLOC_FL_KEEP_SIZE
) {
1715 len
= isize
- start
;
1716 end
= start
+ len
- 1;
1722 * Don't allow IO that isn't aligned to logical block size.
1724 if ((start
| len
) & (bdev_logical_block_size(bdev
) - 1))
1727 /* Invalidate the page cache, including dirty pages. */
1728 error
= truncate_bdev_range(bdev
, file
->f_mode
, start
, end
);
1733 case FALLOC_FL_ZERO_RANGE
:
1734 case FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
:
1735 error
= blkdev_issue_zeroout(bdev
, start
>> 9, len
>> 9,
1736 GFP_KERNEL
, BLKDEV_ZERO_NOUNMAP
);
1738 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
:
1739 error
= blkdev_issue_zeroout(bdev
, start
>> 9, len
>> 9,
1740 GFP_KERNEL
, BLKDEV_ZERO_NOFALLBACK
);
1742 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
| FALLOC_FL_NO_HIDE_STALE
:
1743 error
= blkdev_issue_discard(bdev
, start
>> 9, len
>> 9,
1753 * Invalidate the page cache again; if someone wandered in and dirtied
1754 * a page, we just discard it - userspace has no way of knowing whether
1755 * the write happened before or after discard completing...
1757 return truncate_bdev_range(bdev
, file
->f_mode
, start
, end
);
1760 const struct file_operations def_blk_fops
= {
1761 .open
= blkdev_open
,
1762 .release
= blkdev_close
,
1763 .llseek
= block_llseek
,
1764 .read_iter
= blkdev_read_iter
,
1765 .write_iter
= blkdev_write_iter
,
1766 .iopoll
= blkdev_iopoll
,
1767 .mmap
= generic_file_mmap
,
1768 .fsync
= blkdev_fsync
,
1769 .unlocked_ioctl
= block_ioctl
,
1770 #ifdef CONFIG_COMPAT
1771 .compat_ioctl
= compat_blkdev_ioctl
,
1773 .splice_read
= generic_file_splice_read
,
1774 .splice_write
= iter_file_splice_write
,
1775 .fallocate
= blkdev_fallocate
,
1779 * lookup_bdev - lookup a struct block_device by name
1780 * @pathname: special file representing the block device
1781 * @dev: return value of the block device's dev_t
1783 * Get a reference to the blockdevice at @pathname in the current
1784 * namespace if possible and return it. Return ERR_PTR(error)
1787 int lookup_bdev(const char *pathname
, dev_t
*dev
)
1789 struct inode
*inode
;
1793 if (!pathname
|| !*pathname
)
1796 error
= kern_path(pathname
, LOOKUP_FOLLOW
, &path
);
1800 inode
= d_backing_inode(path
.dentry
);
1802 if (!S_ISBLK(inode
->i_mode
))
1805 if (!may_open_dev(&path
))
1808 *dev
= inode
->i_rdev
;
1814 EXPORT_SYMBOL(lookup_bdev
);
1816 int __invalidate_device(struct block_device
*bdev
, bool kill_dirty
)
1818 struct super_block
*sb
= get_super(bdev
);
1823 * no need to lock the super, get_super holds the
1824 * read mutex so the filesystem cannot go away
1825 * under us (->put_super runs with the write lock
1828 shrink_dcache_sb(sb
);
1829 res
= invalidate_inodes(sb
, kill_dirty
);
1832 invalidate_bdev(bdev
);
1835 EXPORT_SYMBOL(__invalidate_device
);
1837 void iterate_bdevs(void (*func
)(struct block_device
*, void *), void *arg
)
1839 struct inode
*inode
, *old_inode
= NULL
;
1841 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
1842 list_for_each_entry(inode
, &blockdev_superblock
->s_inodes
, i_sb_list
) {
1843 struct address_space
*mapping
= inode
->i_mapping
;
1844 struct block_device
*bdev
;
1846 spin_lock(&inode
->i_lock
);
1847 if (inode
->i_state
& (I_FREEING
|I_WILL_FREE
|I_NEW
) ||
1848 mapping
->nrpages
== 0) {
1849 spin_unlock(&inode
->i_lock
);
1853 spin_unlock(&inode
->i_lock
);
1854 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);
1856 * We hold a reference to 'inode' so it couldn't have been
1857 * removed from s_inodes list while we dropped the
1858 * s_inode_list_lock We cannot iput the inode now as we can
1859 * be holding the last reference and we cannot iput it under
1860 * s_inode_list_lock. So we keep the reference and iput it
1865 bdev
= I_BDEV(inode
);
1867 mutex_lock(&bdev
->bd_disk
->open_mutex
);
1868 if (bdev
->bd_openers
)
1870 mutex_unlock(&bdev
->bd_disk
->open_mutex
);
1872 spin_lock(&blockdev_superblock
->s_inode_list_lock
);
1874 spin_unlock(&blockdev_superblock
->s_inode_list_lock
);