1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
20 static struct inode
*bdev_file_inode(struct file
*file
)
22 return file
->f_mapping
->host
;
25 static int blkdev_get_block(struct inode
*inode
, sector_t iblock
,
26 struct buffer_head
*bh
, int create
)
28 bh
->b_bdev
= I_BDEV(inode
);
29 bh
->b_blocknr
= iblock
;
30 set_buffer_mapped(bh
);
34 static unsigned int dio_bio_write_op(struct kiocb
*iocb
)
36 unsigned int op
= REQ_OP_WRITE
| REQ_SYNC
| REQ_IDLE
;
38 /* avoid the need for a I/O completion work item */
39 if (iocb
->ki_flags
& IOCB_DSYNC
)
44 #define DIO_INLINE_BIO_VECS 4
46 static void blkdev_bio_end_io_simple(struct bio
*bio
)
48 struct task_struct
*waiter
= bio
->bi_private
;
50 WRITE_ONCE(bio
->bi_private
, NULL
);
51 blk_wake_io_task(waiter
);
54 static ssize_t
__blkdev_direct_IO_simple(struct kiocb
*iocb
,
55 struct iov_iter
*iter
, unsigned int nr_pages
)
57 struct file
*file
= iocb
->ki_filp
;
58 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
59 struct bio_vec inline_vecs
[DIO_INLINE_BIO_VECS
], *vecs
;
60 loff_t pos
= iocb
->ki_pos
;
61 bool should_dirty
= false;
66 if ((pos
| iov_iter_alignment(iter
)) &
67 (bdev_logical_block_size(bdev
) - 1))
70 if (nr_pages
<= DIO_INLINE_BIO_VECS
)
73 vecs
= kmalloc_array(nr_pages
, sizeof(struct bio_vec
),
79 bio_init(&bio
, vecs
, nr_pages
);
80 bio_set_dev(&bio
, bdev
);
81 bio
.bi_iter
.bi_sector
= pos
>> 9;
82 bio
.bi_write_hint
= iocb
->ki_hint
;
83 bio
.bi_private
= current
;
84 bio
.bi_end_io
= blkdev_bio_end_io_simple
;
85 bio
.bi_ioprio
= iocb
->ki_ioprio
;
87 ret
= bio_iov_iter_get_pages(&bio
, iter
);
90 ret
= bio
.bi_iter
.bi_size
;
92 if (iov_iter_rw(iter
) == READ
) {
93 bio
.bi_opf
= REQ_OP_READ
;
94 if (iter_is_iovec(iter
))
97 bio
.bi_opf
= dio_bio_write_op(iocb
);
98 task_io_account_write(ret
);
100 if (iocb
->ki_flags
& IOCB_NOWAIT
)
101 bio
.bi_opf
|= REQ_NOWAIT
;
102 if (iocb
->ki_flags
& IOCB_HIPRI
)
103 bio_set_polled(&bio
, iocb
);
105 qc
= submit_bio(&bio
);
107 set_current_state(TASK_UNINTERRUPTIBLE
);
108 if (!READ_ONCE(bio
.bi_private
))
110 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
111 !blk_poll(bdev_get_queue(bdev
), qc
, true))
114 __set_current_state(TASK_RUNNING
);
116 bio_release_pages(&bio
, should_dirty
);
117 if (unlikely(bio
.bi_status
))
118 ret
= blk_status_to_errno(bio
.bi_status
);
121 if (vecs
!= inline_vecs
)
132 struct task_struct
*waiter
;
137 bool should_dirty
: 1;
142 static struct bio_set blkdev_dio_pool
;
144 static int blkdev_iopoll(struct kiocb
*kiocb
, bool wait
)
146 struct block_device
*bdev
= I_BDEV(kiocb
->ki_filp
->f_mapping
->host
);
147 struct request_queue
*q
= bdev_get_queue(bdev
);
149 return blk_poll(q
, READ_ONCE(kiocb
->ki_cookie
), wait
);
152 static void blkdev_bio_end_io(struct bio
*bio
)
154 struct blkdev_dio
*dio
= bio
->bi_private
;
155 bool should_dirty
= dio
->should_dirty
;
157 if (bio
->bi_status
&& !dio
->bio
.bi_status
)
158 dio
->bio
.bi_status
= bio
->bi_status
;
160 if (!dio
->multi_bio
|| atomic_dec_and_test(&dio
->ref
)) {
162 struct kiocb
*iocb
= dio
->iocb
;
165 if (likely(!dio
->bio
.bi_status
)) {
169 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
172 dio
->iocb
->ki_complete(iocb
, ret
, 0);
176 struct task_struct
*waiter
= dio
->waiter
;
178 WRITE_ONCE(dio
->waiter
, NULL
);
179 blk_wake_io_task(waiter
);
184 bio_check_pages_dirty(bio
);
186 bio_release_pages(bio
, false);
191 static ssize_t
__blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
192 unsigned int nr_pages
)
194 struct file
*file
= iocb
->ki_filp
;
195 struct inode
*inode
= bdev_file_inode(file
);
196 struct block_device
*bdev
= I_BDEV(inode
);
197 struct blk_plug plug
;
198 struct blkdev_dio
*dio
;
200 bool is_poll
= (iocb
->ki_flags
& IOCB_HIPRI
) != 0;
201 bool is_read
= (iov_iter_rw(iter
) == READ
), is_sync
;
202 loff_t pos
= iocb
->ki_pos
;
203 blk_qc_t qc
= BLK_QC_T_NONE
;
206 if ((pos
| iov_iter_alignment(iter
)) &
207 (bdev_logical_block_size(bdev
) - 1))
210 bio
= bio_alloc_kiocb(iocb
, nr_pages
, &blkdev_dio_pool
);
212 dio
= container_of(bio
, struct blkdev_dio
, bio
);
213 dio
->is_sync
= is_sync
= is_sync_kiocb(iocb
);
215 dio
->waiter
= current
;
222 dio
->multi_bio
= false;
223 dio
->should_dirty
= is_read
&& iter_is_iovec(iter
);
226 * Don't plug for HIPRI/polled IO, as those should go straight
230 blk_start_plug(&plug
);
233 bio_set_dev(bio
, bdev
);
234 bio
->bi_iter
.bi_sector
= pos
>> 9;
235 bio
->bi_write_hint
= iocb
->ki_hint
;
236 bio
->bi_private
= dio
;
237 bio
->bi_end_io
= blkdev_bio_end_io
;
238 bio
->bi_ioprio
= iocb
->ki_ioprio
;
240 ret
= bio_iov_iter_get_pages(bio
, iter
);
242 bio
->bi_status
= BLK_STS_IOERR
;
248 bio
->bi_opf
= REQ_OP_READ
;
249 if (dio
->should_dirty
)
250 bio_set_pages_dirty(bio
);
252 bio
->bi_opf
= dio_bio_write_op(iocb
);
253 task_io_account_write(bio
->bi_iter
.bi_size
);
255 if (iocb
->ki_flags
& IOCB_NOWAIT
)
256 bio
->bi_opf
|= REQ_NOWAIT
;
258 dio
->size
+= bio
->bi_iter
.bi_size
;
259 pos
+= bio
->bi_iter
.bi_size
;
261 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
);
265 if (iocb
->ki_flags
& IOCB_HIPRI
) {
266 bio_set_polled(bio
, iocb
);
270 qc
= submit_bio(bio
);
273 WRITE_ONCE(iocb
->ki_cookie
, qc
);
277 if (!dio
->multi_bio
) {
279 * AIO needs an extra reference to ensure the dio
280 * structure which is embedded into the first bio
285 dio
->multi_bio
= true;
286 atomic_set(&dio
->ref
, 2);
288 atomic_inc(&dio
->ref
);
292 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
296 blk_finish_plug(&plug
);
302 set_current_state(TASK_UNINTERRUPTIBLE
);
303 if (!READ_ONCE(dio
->waiter
))
306 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
307 !blk_poll(bdev_get_queue(bdev
), qc
, true))
310 __set_current_state(TASK_RUNNING
);
313 ret
= blk_status_to_errno(dio
->bio
.bi_status
);
321 static ssize_t
blkdev_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
323 unsigned int nr_pages
;
325 if (!iov_iter_count(iter
))
328 nr_pages
= bio_iov_vecs_to_alloc(iter
, BIO_MAX_VECS
+ 1);
329 if (is_sync_kiocb(iocb
) && nr_pages
<= BIO_MAX_VECS
)
330 return __blkdev_direct_IO_simple(iocb
, iter
, nr_pages
);
332 return __blkdev_direct_IO(iocb
, iter
, bio_max_segs(nr_pages
));
335 static int blkdev_writepage(struct page
*page
, struct writeback_control
*wbc
)
337 return block_write_full_page(page
, blkdev_get_block
, wbc
);
340 static int blkdev_readpage(struct file
* file
, struct page
* page
)
342 return block_read_full_page(page
, blkdev_get_block
);
345 static void blkdev_readahead(struct readahead_control
*rac
)
347 mpage_readahead(rac
, blkdev_get_block
);
350 static int blkdev_write_begin(struct file
*file
, struct address_space
*mapping
,
351 loff_t pos
, unsigned len
, unsigned flags
, struct page
**pagep
,
354 return block_write_begin(mapping
, pos
, len
, flags
, pagep
,
358 static int blkdev_write_end(struct file
*file
, struct address_space
*mapping
,
359 loff_t pos
, unsigned len
, unsigned copied
, struct page
*page
,
363 ret
= block_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
371 static int blkdev_writepages(struct address_space
*mapping
,
372 struct writeback_control
*wbc
)
374 return generic_writepages(mapping
, wbc
);
377 const struct address_space_operations def_blk_aops
= {
378 .set_page_dirty
= __set_page_dirty_buffers
,
379 .readpage
= blkdev_readpage
,
380 .readahead
= blkdev_readahead
,
381 .writepage
= blkdev_writepage
,
382 .write_begin
= blkdev_write_begin
,
383 .write_end
= blkdev_write_end
,
384 .writepages
= blkdev_writepages
,
385 .direct_IO
= blkdev_direct_IO
,
386 .migratepage
= buffer_migrate_page_norefs
,
387 .is_dirty_writeback
= buffer_check_dirty_writeback
,
391 * for a block special file file_inode(file)->i_size is zero
392 * so we compute the size by hand (just as in block_read/write above)
394 static loff_t
blkdev_llseek(struct file
*file
, loff_t offset
, int whence
)
396 struct inode
*bd_inode
= bdev_file_inode(file
);
399 inode_lock(bd_inode
);
400 retval
= fixed_size_llseek(file
, offset
, whence
, i_size_read(bd_inode
));
401 inode_unlock(bd_inode
);
405 static int blkdev_fsync(struct file
*filp
, loff_t start
, loff_t end
,
408 struct inode
*bd_inode
= bdev_file_inode(filp
);
409 struct block_device
*bdev
= I_BDEV(bd_inode
);
412 error
= file_write_and_wait_range(filp
, start
, end
);
417 * There is no need to serialise calls to blkdev_issue_flush with
418 * i_mutex and doing so causes performance issues with concurrent
419 * O_SYNC writers to a block device.
421 error
= blkdev_issue_flush(bdev
);
422 if (error
== -EOPNOTSUPP
)
428 static int blkdev_open(struct inode
*inode
, struct file
*filp
)
430 struct block_device
*bdev
;
433 * Preserve backwards compatibility and allow large file access
434 * even if userspace doesn't ask for it explicitly. Some mkfs
435 * binary needs it. We might want to drop this workaround
436 * during an unstable branch.
438 filp
->f_flags
|= O_LARGEFILE
;
439 filp
->f_mode
|= FMODE_NOWAIT
| FMODE_BUF_RASYNC
;
441 if (filp
->f_flags
& O_NDELAY
)
442 filp
->f_mode
|= FMODE_NDELAY
;
443 if (filp
->f_flags
& O_EXCL
)
444 filp
->f_mode
|= FMODE_EXCL
;
445 if ((filp
->f_flags
& O_ACCMODE
) == 3)
446 filp
->f_mode
|= FMODE_WRITE_IOCTL
;
448 bdev
= blkdev_get_by_dev(inode
->i_rdev
, filp
->f_mode
, filp
);
450 return PTR_ERR(bdev
);
451 filp
->f_mapping
= bdev
->bd_inode
->i_mapping
;
452 filp
->f_wb_err
= filemap_sample_wb_err(filp
->f_mapping
);
456 static int blkdev_close(struct inode
*inode
, struct file
*filp
)
458 struct block_device
*bdev
= I_BDEV(bdev_file_inode(filp
));
460 blkdev_put(bdev
, filp
->f_mode
);
464 static long block_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
)
466 struct block_device
*bdev
= I_BDEV(bdev_file_inode(file
));
467 fmode_t mode
= file
->f_mode
;
470 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
471 * to updated it before every ioctl.
473 if (file
->f_flags
& O_NDELAY
)
474 mode
|= FMODE_NDELAY
;
476 mode
&= ~FMODE_NDELAY
;
478 return blkdev_ioctl(bdev
, mode
, cmd
, arg
);
482 * Write data to the block device. Only intended for the block device itself
483 * and the raw driver which basically is a fake block device.
485 * Does not take i_mutex for the write and thus is not for general purpose
488 static ssize_t
blkdev_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
490 struct file
*file
= iocb
->ki_filp
;
491 struct inode
*bd_inode
= bdev_file_inode(file
);
492 loff_t size
= i_size_read(bd_inode
);
493 struct blk_plug plug
;
497 if (bdev_read_only(I_BDEV(bd_inode
)))
500 if (IS_SWAPFILE(bd_inode
) && !is_hibernate_resume_dev(bd_inode
->i_rdev
))
503 if (!iov_iter_count(from
))
506 if (iocb
->ki_pos
>= size
)
509 if ((iocb
->ki_flags
& (IOCB_NOWAIT
| IOCB_DIRECT
)) == IOCB_NOWAIT
)
512 size
-= iocb
->ki_pos
;
513 if (iov_iter_count(from
) > size
) {
514 shorted
= iov_iter_count(from
) - size
;
515 iov_iter_truncate(from
, size
);
518 blk_start_plug(&plug
);
519 ret
= __generic_file_write_iter(iocb
, from
);
521 ret
= generic_write_sync(iocb
, ret
);
522 iov_iter_reexpand(from
, iov_iter_count(from
) + shorted
);
523 blk_finish_plug(&plug
);
527 static ssize_t
blkdev_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
529 struct file
*file
= iocb
->ki_filp
;
530 struct inode
*bd_inode
= bdev_file_inode(file
);
531 loff_t size
= i_size_read(bd_inode
);
532 loff_t pos
= iocb
->ki_pos
;
540 if (iov_iter_count(to
) > size
) {
541 shorted
= iov_iter_count(to
) - size
;
542 iov_iter_truncate(to
, size
);
545 ret
= generic_file_read_iter(iocb
, to
);
546 iov_iter_reexpand(to
, iov_iter_count(to
) + shorted
);
550 #define BLKDEV_FALLOC_FL_SUPPORTED \
551 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
552 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
554 static long blkdev_fallocate(struct file
*file
, int mode
, loff_t start
,
557 struct inode
*inode
= bdev_file_inode(file
);
558 struct block_device
*bdev
= I_BDEV(inode
);
559 loff_t end
= start
+ len
- 1;
563 /* Fail if we don't recognize the flags. */
564 if (mode
& ~BLKDEV_FALLOC_FL_SUPPORTED
)
567 /* Don't go off the end of the device. */
568 isize
= i_size_read(bdev
->bd_inode
);
572 if (mode
& FALLOC_FL_KEEP_SIZE
) {
574 end
= start
+ len
- 1;
580 * Don't allow IO that isn't aligned to logical block size.
582 if ((start
| len
) & (bdev_logical_block_size(bdev
) - 1))
585 filemap_invalidate_lock(inode
->i_mapping
);
587 /* Invalidate the page cache, including dirty pages. */
588 error
= truncate_bdev_range(bdev
, file
->f_mode
, start
, end
);
593 case FALLOC_FL_ZERO_RANGE
:
594 case FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
:
595 error
= blkdev_issue_zeroout(bdev
, start
>> 9, len
>> 9,
596 GFP_KERNEL
, BLKDEV_ZERO_NOUNMAP
);
598 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
:
599 error
= blkdev_issue_zeroout(bdev
, start
>> 9, len
>> 9,
600 GFP_KERNEL
, BLKDEV_ZERO_NOFALLBACK
);
602 case FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
| FALLOC_FL_NO_HIDE_STALE
:
603 error
= blkdev_issue_discard(bdev
, start
>> 9, len
>> 9,
611 filemap_invalidate_unlock(inode
->i_mapping
);
615 const struct file_operations def_blk_fops
= {
617 .release
= blkdev_close
,
618 .llseek
= blkdev_llseek
,
619 .read_iter
= blkdev_read_iter
,
620 .write_iter
= blkdev_write_iter
,
621 .iopoll
= blkdev_iopoll
,
622 .mmap
= generic_file_mmap
,
623 .fsync
= blkdev_fsync
,
624 .unlocked_ioctl
= block_ioctl
,
626 .compat_ioctl
= compat_blkdev_ioctl
,
628 .splice_read
= generic_file_splice_read
,
629 .splice_write
= iter_file_splice_write
,
630 .fallocate
= blkdev_fallocate
,
633 static __init
int blkdev_init(void)
635 return bioset_init(&blkdev_dio_pool
, 4,
636 offsetof(struct blkdev_dio
, bio
),
637 BIOSET_NEED_BVECS
|BIOSET_PERCPU_CACHE
);
639 module_init(blkdev_init
);