1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
18 #include <linux/module.h>
21 static inline struct inode
*bdev_file_inode(struct file
*file
)
23 return file
->f_mapping
->host
;
26 static int blkdev_get_block(struct inode
*inode
, sector_t iblock
,
27 struct buffer_head
*bh
, int create
)
29 bh
->b_bdev
= I_BDEV(inode
);
30 bh
->b_blocknr
= iblock
;
31 set_buffer_mapped(bh
);
35 static blk_opf_t
dio_bio_write_op(struct kiocb
*iocb
)
37 blk_opf_t opf
= REQ_OP_WRITE
| REQ_SYNC
| REQ_IDLE
;
39 /* avoid the need for a I/O completion work item */
40 if (iocb_is_dsync(iocb
))
45 static bool blkdev_dio_unaligned(struct block_device
*bdev
, loff_t pos
,
46 struct iov_iter
*iter
)
48 return pos
& (bdev_logical_block_size(bdev
) - 1) ||
49 !bdev_iter_is_aligned(bdev
, iter
);
52 #define DIO_INLINE_BIO_VECS 4
54 static ssize_t
__blkdev_direct_IO_simple(struct kiocb
*iocb
,
55 struct iov_iter
*iter
, unsigned int nr_pages
)
57 struct block_device
*bdev
= iocb
->ki_filp
->private_data
;
58 struct bio_vec inline_vecs
[DIO_INLINE_BIO_VECS
], *vecs
;
59 loff_t pos
= iocb
->ki_pos
;
60 bool should_dirty
= false;
64 if (blkdev_dio_unaligned(bdev
, pos
, iter
))
67 if (nr_pages
<= DIO_INLINE_BIO_VECS
)
70 vecs
= kmalloc_array(nr_pages
, sizeof(struct bio_vec
),
76 if (iov_iter_rw(iter
) == READ
) {
77 bio_init(&bio
, bdev
, vecs
, nr_pages
, REQ_OP_READ
);
78 if (user_backed_iter(iter
))
81 bio_init(&bio
, bdev
, vecs
, nr_pages
, dio_bio_write_op(iocb
));
83 bio
.bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
84 bio
.bi_ioprio
= iocb
->ki_ioprio
;
86 ret
= bio_iov_iter_get_pages(&bio
, iter
);
89 ret
= bio
.bi_iter
.bi_size
;
91 if (iov_iter_rw(iter
) == WRITE
)
92 task_io_account_write(ret
);
94 if (iocb
->ki_flags
& IOCB_NOWAIT
)
95 bio
.bi_opf
|= REQ_NOWAIT
;
97 submit_bio_wait(&bio
);
99 bio_release_pages(&bio
, should_dirty
);
100 if (unlikely(bio
.bi_status
))
101 ret
= blk_status_to_errno(bio
.bi_status
);
104 if (vecs
!= inline_vecs
)
113 DIO_SHOULD_DIRTY
= 1,
120 struct task_struct
*waiter
;
125 struct bio bio ____cacheline_aligned_in_smp
;
128 static struct bio_set blkdev_dio_pool
;
130 static void blkdev_bio_end_io(struct bio
*bio
)
132 struct blkdev_dio
*dio
= bio
->bi_private
;
133 bool should_dirty
= dio
->flags
& DIO_SHOULD_DIRTY
;
135 if (bio
->bi_status
&& !dio
->bio
.bi_status
)
136 dio
->bio
.bi_status
= bio
->bi_status
;
138 if (atomic_dec_and_test(&dio
->ref
)) {
139 if (!(dio
->flags
& DIO_IS_SYNC
)) {
140 struct kiocb
*iocb
= dio
->iocb
;
143 WRITE_ONCE(iocb
->private, NULL
);
145 if (likely(!dio
->bio
.bi_status
)) {
149 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
152 dio
->iocb
->ki_complete(iocb
, ret
);
155 struct task_struct
*waiter
= dio
->waiter
;
157 WRITE_ONCE(dio
->waiter
, NULL
);
158 blk_wake_io_task(waiter
);
163 bio_check_pages_dirty(bio
);
165 bio_release_pages(bio
, false);
170 static ssize_t
__blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
171 unsigned int nr_pages
)
173 struct block_device
*bdev
= iocb
->ki_filp
->private_data
;
174 struct blk_plug plug
;
175 struct blkdev_dio
*dio
;
177 bool is_read
= (iov_iter_rw(iter
) == READ
), is_sync
;
178 blk_opf_t opf
= is_read
? REQ_OP_READ
: dio_bio_write_op(iocb
);
179 loff_t pos
= iocb
->ki_pos
;
182 if (blkdev_dio_unaligned(bdev
, pos
, iter
))
185 if (iocb
->ki_flags
& IOCB_ALLOC_CACHE
)
186 opf
|= REQ_ALLOC_CACHE
;
187 bio
= bio_alloc_bioset(bdev
, nr_pages
, opf
, GFP_KERNEL
,
189 dio
= container_of(bio
, struct blkdev_dio
, bio
);
190 atomic_set(&dio
->ref
, 1);
192 * Grab an extra reference to ensure the dio structure which is embedded
193 * into the first bio stays around.
197 is_sync
= is_sync_kiocb(iocb
);
199 dio
->flags
= DIO_IS_SYNC
;
200 dio
->waiter
= current
;
207 if (is_read
&& user_backed_iter(iter
))
208 dio
->flags
|= DIO_SHOULD_DIRTY
;
210 blk_start_plug(&plug
);
213 bio
->bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
214 bio
->bi_private
= dio
;
215 bio
->bi_end_io
= blkdev_bio_end_io
;
216 bio
->bi_ioprio
= iocb
->ki_ioprio
;
218 ret
= bio_iov_iter_get_pages(bio
, iter
);
220 bio
->bi_status
= BLK_STS_IOERR
;
226 if (dio
->flags
& DIO_SHOULD_DIRTY
)
227 bio_set_pages_dirty(bio
);
229 task_io_account_write(bio
->bi_iter
.bi_size
);
231 if (iocb
->ki_flags
& IOCB_NOWAIT
)
232 bio
->bi_opf
|= REQ_NOWAIT
;
234 dio
->size
+= bio
->bi_iter
.bi_size
;
235 pos
+= bio
->bi_iter
.bi_size
;
237 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
);
242 atomic_inc(&dio
->ref
);
244 bio
= bio_alloc(bdev
, nr_pages
, opf
, GFP_KERNEL
);
247 blk_finish_plug(&plug
);
253 set_current_state(TASK_UNINTERRUPTIBLE
);
254 if (!READ_ONCE(dio
->waiter
))
258 __set_current_state(TASK_RUNNING
);
261 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
269 static void blkdev_bio_end_io_async(struct bio
*bio
)
271 struct blkdev_dio
*dio
= container_of(bio
, struct blkdev_dio
, bio
);
272 struct kiocb
*iocb
= dio
->iocb
;
275 WRITE_ONCE(iocb
->private, NULL
);
277 if (likely(!bio
->bi_status
)) {
281 ret
= blk_status_to_errno(bio
->bi_status
);
284 iocb
->ki_complete(iocb
, ret
);
286 if (dio
->flags
& DIO_SHOULD_DIRTY
) {
287 bio_check_pages_dirty(bio
);
289 bio_release_pages(bio
, false);
294 static ssize_t
__blkdev_direct_IO_async(struct kiocb
*iocb
,
295 struct iov_iter
*iter
,
296 unsigned int nr_pages
)
298 struct block_device
*bdev
= iocb
->ki_filp
->private_data
;
299 bool is_read
= iov_iter_rw(iter
) == READ
;
300 blk_opf_t opf
= is_read
? REQ_OP_READ
: dio_bio_write_op(iocb
);
301 struct blkdev_dio
*dio
;
303 loff_t pos
= iocb
->ki_pos
;
306 if (blkdev_dio_unaligned(bdev
, pos
, iter
))
309 if (iocb
->ki_flags
& IOCB_ALLOC_CACHE
)
310 opf
|= REQ_ALLOC_CACHE
;
311 bio
= bio_alloc_bioset(bdev
, nr_pages
, opf
, GFP_KERNEL
,
313 dio
= container_of(bio
, struct blkdev_dio
, bio
);
316 bio
->bi_iter
.bi_sector
= pos
>> SECTOR_SHIFT
;
317 bio
->bi_end_io
= blkdev_bio_end_io_async
;
318 bio
->bi_ioprio
= iocb
->ki_ioprio
;
320 if (iov_iter_is_bvec(iter
)) {
322 * Users don't rely on the iterator being in any particular
323 * state for async I/O returning -EIOCBQUEUED, hence we can
324 * avoid expensive iov_iter_advance(). Bypass
325 * bio_iov_iter_get_pages() and set the bvec directly.
327 bio_iov_bvec_set(bio
, iter
);
329 ret
= bio_iov_iter_get_pages(bio
, iter
);
335 dio
->size
= bio
->bi_iter
.bi_size
;
338 if (user_backed_iter(iter
)) {
339 dio
->flags
|= DIO_SHOULD_DIRTY
;
340 bio_set_pages_dirty(bio
);
343 task_io_account_write(bio
->bi_iter
.bi_size
);
346 if (iocb
->ki_flags
& IOCB_HIPRI
) {
347 bio
->bi_opf
|= REQ_POLLED
| REQ_NOWAIT
;
349 WRITE_ONCE(iocb
->private, bio
);
351 if (iocb
->ki_flags
& IOCB_NOWAIT
)
352 bio
->bi_opf
|= REQ_NOWAIT
;
358 static ssize_t
blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
360 unsigned int nr_pages
;
362 if (!iov_iter_count(iter
))
365 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
+ 1);
366 if (likely(nr_pages
<= BIO_MAX_VECS
)) {
367 if (is_sync_kiocb(iocb
))
368 return __blkdev_direct_IO_simple(iocb
, iter
, nr_pages
);
369 return __blkdev_direct_IO_async(iocb
, iter
, nr_pages
);
371 return __blkdev_direct_IO(iocb
, iter
, bio_max_segs(nr_pages
));
374 static int blkdev_writepage(struct page
*page
, struct writeback_control
*wbc
)
376 return block_write_full_page(page
, blkdev_get_block
, wbc
);
379 static int blkdev_read_folio(struct file
*file
, struct folio
*folio
)
381 return block_read_full_folio(folio
, blkdev_get_block
);
384 static void blkdev_readahead(struct readahead_control
*rac
)
386 mpage_readahead(rac
, blkdev_get_block
);
389 static int blkdev_write_begin(struct file
*file
, struct address_space
*mapping
,
390 loff_t pos
, unsigned len
, struct page
**pagep
, void **fsdata
)
392 return block_write_begin(mapping
, pos
, len
, pagep
, blkdev_get_block
);
395 static int blkdev_write_end(struct file
*file
, struct address_space
*mapping
,
396 loff_t pos
, unsigned len
, unsigned copied
, struct page
*page
,
400 ret
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
408 const struct address_space_operations def_blk_aops
= {
409 .dirty_folio
= block_dirty_folio
,
410 .invalidate_folio
= block_invalidate_folio
,
411 .read_folio
= blkdev_read_folio
,
412 .readahead
= blkdev_readahead
,
413 .writepage
= blkdev_writepage
,
414 .write_begin
= blkdev_write_begin
,
415 .write_end
= blkdev_write_end
,
416 .direct_IO
= blkdev_direct_IO
,
417 .migrate_folio
= buffer_migrate_folio_norefs
,
418 .is_dirty_writeback
= buffer_check_dirty_writeback
,
422 * for a block special file file_inode(file)->i_size is zero
423 * so we compute the size by hand (just as in block_read/write above)
425 static loff_t
blkdev_llseek(struct file
*file
, loff_t offset
, int whence
)
427 struct inode
*bd_inode
= bdev_file_inode(file
);
430 inode_lock(bd_inode
);
431 retval
= fixed_size_llseek(file
, offset
, whence
, i_size_read(bd_inode
));
432 inode_unlock(bd_inode
);
436 static int blkdev_fsync(struct file
*filp
, loff_t start
, loff_t end
,
439 struct block_device
*bdev
= filp
->private_data
;
442 error
= file_write_and_wait_range(filp
, start
, end
);
447 * There is no need to serialise calls to blkdev_issue_flush with
448 * i_mutex and doing so causes performance issues with concurrent
449 * O_SYNC writers to a block device.
451 error
= blkdev_issue_flush(bdev
);
452 if (error
== -EOPNOTSUPP
)
458 static int blkdev_open(struct inode
*inode
, struct file
*filp
)
460 struct block_device
*bdev
;
463 * Preserve backwards compatibility and allow large file access
464 * even if userspace doesn't ask for it explicitly. Some mkfs
465 * binary needs it. We might want to drop this workaround
466 * during an unstable branch.
468 filp
->f_flags
|= O_LARGEFILE
;
469 filp
->f_mode
|= FMODE_NOWAIT
| FMODE_BUF_RASYNC
;
471 if (filp
->f_flags
& O_NDELAY
)
472 filp
->f_mode
|= FMODE_NDELAY
;
473 if (filp
->f_flags
& O_EXCL
)
474 filp
->f_mode
|= FMODE_EXCL
;
475 if ((filp
->f_flags
& O_ACCMODE
) == 3)
476 filp
->f_mode
|= FMODE_WRITE_IOCTL
;
478 bdev
= blkdev_get_by_dev(inode
->i_rdev
, filp
->f_mode
, filp
);
480 return PTR_ERR(bdev
);
482 filp
->private_data
= bdev
;
483 filp
->f_mapping
= bdev
->bd_inode
->i_mapping
;
484 filp
->f_wb_err
= filemap_sample_wb_err(filp
->f_mapping
);
488 static int blkdev_close(struct inode
*inode
, struct file
*filp
)
490 struct block_device
*bdev
= filp
->private_data
;
492 blkdev_put(bdev
, filp
->f_mode
);
497 * Write data to the block device. Only intended for the block device itself
498 * and the raw driver which basically is a fake block device.
500 * Does not take i_mutex for the write and thus is not for general purpose
503 static ssize_t
blkdev_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
505 struct block_device
*bdev
= iocb
->ki_filp
->private_data
;
506 struct inode
*bd_inode
= bdev
->bd_inode
;
507 loff_t size
= bdev_nr_bytes(bdev
);
508 struct blk_plug plug
;
512 if (bdev_read_only(bdev
))
515 if (IS_SWAPFILE(bd_inode
) && !is_hibernate_resume_dev(bd_inode
->i_rdev
))
518 if (!iov_iter_count(from
))
521 if (iocb
->ki_pos
>= size
)
524 if ((iocb
->ki_flags
& (IOCB_NOWAIT
| IOCB_DIRECT
)) == IOCB_NOWAIT
)
527 size
-= iocb
->ki_pos
;
528 if (iov_iter_count(from
) > size
) {
529 shorted
= iov_iter_count(from
) - size
;
530 iov_iter_truncate(from
, size
);
533 blk_start_plug(&plug
);
534 ret
= __generic_file_write_iter(iocb
, from
);
536 ret
= generic_write_sync(iocb
, ret
);
537 iov_iter_reexpand(from
, iov_iter_count(from
) + shorted
);
538 blk_finish_plug(&plug
);
542 static ssize_t
blkdev_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
544 struct block_device
*bdev
= iocb
->ki_filp
->private_data
;
545 loff_t size
= bdev_nr_bytes(bdev
);
546 loff_t pos
= iocb
->ki_pos
;
551 if (unlikely(pos
+ iov_iter_count(to
) > size
)) {
555 shorted
= iov_iter_count(to
) - size
;
556 iov_iter_truncate(to
, size
);
559 count
= iov_iter_count(to
);
561 goto reexpand
; /* skip atime */
563 if (iocb
->ki_flags
& IOCB_DIRECT
) {
564 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
566 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
567 if (filemap_range_needs_writeback(mapping
, pos
,
573 ret
= filemap_write_and_wait_range(mapping
, pos
,
579 file_accessed(iocb
->ki_filp
);
581 ret
= blkdev_direct_IO(iocb
, to
);
586 iov_iter_revert(to
, count
- iov_iter_count(to
));
587 if (ret
< 0 || !count
)
591 ret
= filemap_read(iocb
, to
, ret
);
594 if (unlikely(shorted
))
595 iov_iter_reexpand(to
, iov_iter_count(to
) + shorted
);
599 #define BLKDEV_FALLOC_FL_SUPPORTED \
600 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
601 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
603 static long blkdev_fallocate(struct file
*file
, int mode
, loff_t start
,
606 struct inode
*inode
= bdev_file_inode(file
);
607 struct block_device
*bdev
= I_BDEV(inode
);
608 loff_t end
= start
+ len
- 1;
612 /* Fail if we don't recognize the flags. */
613 if (mode
& ~BLKDEV_FALLOC_FL_SUPPORTED
)
616 /* Don't go off the end of the device. */
617 isize
= bdev_nr_bytes(bdev
);
621 if (mode
& FALLOC_FL_KEEP_SIZE
) {
623 end
= start
+ len
- 1;
629 * Don't allow IO that isn't aligned to logical block size.
631 if ((start
| len
) & (bdev_logical_block_size(bdev
) - 1))
634 filemap_invalidate_lock(inode
->i_mapping
);
636 /* Invalidate the page cache, including dirty pages. */
637 error
= truncate_bdev_range(bdev
, file
->f_mode
, start
, end
);
642 case FALLOC_FL_ZERO_RANGE
:
643 case FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
:
644 error
= blkdev_issue_zeroout(bdev
, start
>> SECTOR_SHIFT
,
645 len
>> SECTOR_SHIFT
, GFP_KERNEL
,
646 BLKDEV_ZERO_NOUNMAP
);
648 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
:
649 error
= blkdev_issue_zeroout(bdev
, start
>> SECTOR_SHIFT
,
650 len
>> SECTOR_SHIFT
, GFP_KERNEL
,
651 BLKDEV_ZERO_NOFALLBACK
);
653 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
| FALLOC_FL_NO_HIDE_STALE
:
654 error
= blkdev_issue_discard(bdev
, start
>> SECTOR_SHIFT
,
655 len
>> SECTOR_SHIFT
, GFP_KERNEL
);
662 filemap_invalidate_unlock(inode
->i_mapping
);
666 const struct file_operations def_blk_fops
= {
668 .release
= blkdev_close
,
669 .llseek
= blkdev_llseek
,
670 .read_iter
= blkdev_read_iter
,
671 .write_iter
= blkdev_write_iter
,
672 .iopoll
= iocb_bio_iopoll
,
673 .mmap
= generic_file_mmap
,
674 .fsync
= blkdev_fsync
,
675 .unlocked_ioctl
= blkdev_ioctl
,
677 .compat_ioctl
= compat_blkdev_ioctl
,
679 .splice_read
= generic_file_splice_read
,
680 .splice_write
= iter_file_splice_write
,
681 .fallocate
= blkdev_fallocate
,
684 static __init
int blkdev_init(void)
686 return bioset_init(&blkdev_dio_pool
, 4,
687 offsetof(struct blkdev_dio
, bio
),
688 BIOSET_NEED_BVECS
|BIOSET_PERCPU_CACHE
);
690 module_init(blkdev_init
);