]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - fs/ext4/file.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-kernels.git] / fs / ext4 / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/file.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/file.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * ext4 fs regular file handling primitives
17 *
18 * 64-bit file support on 64-bit platforms by Jakub Jelinek
19 * (jj@sunsite.ms.mff.cuni.cz)
20 */
21
22 #include <linux/time.h>
23 #include <linux/fs.h>
24 #include <linux/mount.h>
25 #include <linux/path.h>
26 #include <linux/dax.h>
27 #include <linux/quotaops.h>
28 #include <linux/pagevec.h>
29 #include <linux/uio.h>
30 #include "ext4.h"
31 #include "ext4_jbd2.h"
32 #include "xattr.h"
33 #include "acl.h"
34
35 #ifdef CONFIG_FS_DAX
36 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
37 {
38 struct inode *inode = file_inode(iocb->ki_filp);
39 ssize_t ret;
40
41 if (!inode_trylock_shared(inode)) {
42 if (iocb->ki_flags & IOCB_NOWAIT)
43 return -EAGAIN;
44 inode_lock_shared(inode);
45 }
46 /*
47 * Recheck under inode lock - at this point we are sure it cannot
48 * change anymore
49 */
50 if (!IS_DAX(inode)) {
51 inode_unlock_shared(inode);
52 /* Fallback to buffered IO in case we cannot support DAX */
53 return generic_file_read_iter(iocb, to);
54 }
55 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
56 inode_unlock_shared(inode);
57
58 file_accessed(iocb->ki_filp);
59 return ret;
60 }
61 #endif
62
63 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
64 {
65 if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
66 return -EIO;
67
68 if (!iov_iter_count(to))
69 return 0; /* skip atime */
70
71 #ifdef CONFIG_FS_DAX
72 if (IS_DAX(file_inode(iocb->ki_filp)))
73 return ext4_dax_read_iter(iocb, to);
74 #endif
75 return generic_file_read_iter(iocb, to);
76 }
77
78 /*
79 * Called when an inode is released. Note that this is different
80 * from ext4_file_open: open gets called at every open, but release
81 * gets called only when /all/ the files are closed.
82 */
83 static int ext4_release_file(struct inode *inode, struct file *filp)
84 {
85 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
86 ext4_alloc_da_blocks(inode);
87 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
88 }
89 /* if we are the last writer on the inode, drop the block reservation */
90 if ((filp->f_mode & FMODE_WRITE) &&
91 (atomic_read(&inode->i_writecount) == 1) &&
92 !EXT4_I(inode)->i_reserved_data_blocks)
93 {
94 down_write(&EXT4_I(inode)->i_data_sem);
95 ext4_discard_preallocations(inode);
96 up_write(&EXT4_I(inode)->i_data_sem);
97 }
98 if (is_dx(inode) && filp->private_data)
99 ext4_htree_free_dir_info(filp->private_data);
100
101 return 0;
102 }
103
104 static void ext4_unwritten_wait(struct inode *inode)
105 {
106 wait_queue_head_t *wq = ext4_ioend_wq(inode);
107
108 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
109 }
110
111 /*
112 * This tests whether the IO in question is block-aligned or not.
113 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
114 * are converted to written only after the IO is complete. Until they are
115 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
116 * it needs to zero out portions of the start and/or end block. If 2 AIO
117 * threads are at work on the same unwritten block, they must be synchronized
118 * or one thread will zero the other's data, causing corruption.
119 */
120 static int
121 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
122 {
123 struct super_block *sb = inode->i_sb;
124 int blockmask = sb->s_blocksize - 1;
125
126 if (pos >= i_size_read(inode))
127 return 0;
128
129 if ((pos | iov_iter_alignment(from)) & blockmask)
130 return 1;
131
132 return 0;
133 }
134
135 /* Is IO overwriting allocated and initialized blocks? */
136 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
137 {
138 struct ext4_map_blocks map;
139 unsigned int blkbits = inode->i_blkbits;
140 int err, blklen;
141
142 if (pos + len > i_size_read(inode))
143 return false;
144
145 map.m_lblk = pos >> blkbits;
146 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
147 blklen = map.m_len;
148
149 err = ext4_map_blocks(NULL, inode, &map, 0);
150 /*
151 * 'err==len' means that all of the blocks have been preallocated,
152 * regardless of whether they have been initialized or not. To exclude
153 * unwritten extents, we need to check m_flags.
154 */
155 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
156 }
157
158 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
159 {
160 struct inode *inode = file_inode(iocb->ki_filp);
161 ssize_t ret;
162
163 ret = generic_write_checks(iocb, from);
164 if (ret <= 0)
165 return ret;
166 /*
167 * If we have encountered a bitmap-format file, the size limit
168 * is smaller than s_maxbytes, which is for extent-mapped files.
169 */
170 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
171 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
172
173 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
174 return -EFBIG;
175 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
176 }
177 return iov_iter_count(from);
178 }
179
180 #ifdef CONFIG_FS_DAX
181 static ssize_t
182 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
183 {
184 struct inode *inode = file_inode(iocb->ki_filp);
185 ssize_t ret;
186
187 if (!inode_trylock(inode)) {
188 if (iocb->ki_flags & IOCB_NOWAIT)
189 return -EAGAIN;
190 inode_lock(inode);
191 }
192 ret = ext4_write_checks(iocb, from);
193 if (ret <= 0)
194 goto out;
195 ret = file_remove_privs(iocb->ki_filp);
196 if (ret)
197 goto out;
198 ret = file_update_time(iocb->ki_filp);
199 if (ret)
200 goto out;
201
202 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
203 out:
204 inode_unlock(inode);
205 if (ret > 0)
206 ret = generic_write_sync(iocb, ret);
207 return ret;
208 }
209 #endif
210
211 static ssize_t
212 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
213 {
214 struct inode *inode = file_inode(iocb->ki_filp);
215 int o_direct = iocb->ki_flags & IOCB_DIRECT;
216 int unaligned_aio = 0;
217 int overwrite = 0;
218 ssize_t ret;
219
220 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
221 return -EIO;
222
223 #ifdef CONFIG_FS_DAX
224 if (IS_DAX(inode))
225 return ext4_dax_write_iter(iocb, from);
226 #endif
227 if (!o_direct && (iocb->ki_flags & IOCB_NOWAIT))
228 return -EOPNOTSUPP;
229
230 if (!inode_trylock(inode)) {
231 if (iocb->ki_flags & IOCB_NOWAIT)
232 return -EAGAIN;
233 inode_lock(inode);
234 }
235
236 ret = ext4_write_checks(iocb, from);
237 if (ret <= 0)
238 goto out;
239
240 /*
241 * Unaligned direct AIO must be serialized among each other as zeroing
242 * of partial blocks of two competing unaligned AIOs can result in data
243 * corruption.
244 */
245 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
246 !is_sync_kiocb(iocb) &&
247 ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
248 unaligned_aio = 1;
249 ext4_unwritten_wait(inode);
250 }
251
252 iocb->private = &overwrite;
253 /* Check whether we do a DIO overwrite or not */
254 if (o_direct && !unaligned_aio) {
255 if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
256 if (ext4_should_dioread_nolock(inode))
257 overwrite = 1;
258 } else if (iocb->ki_flags & IOCB_NOWAIT) {
259 ret = -EAGAIN;
260 goto out;
261 }
262 }
263
264 ret = __generic_file_write_iter(iocb, from);
265 inode_unlock(inode);
266
267 if (ret > 0)
268 ret = generic_write_sync(iocb, ret);
269
270 return ret;
271
272 out:
273 inode_unlock(inode);
274 return ret;
275 }
276
277 #ifdef CONFIG_FS_DAX
278 static int ext4_dax_huge_fault(struct vm_fault *vmf,
279 enum page_entry_size pe_size)
280 {
281 int result;
282 handle_t *handle = NULL;
283 struct inode *inode = file_inode(vmf->vma->vm_file);
284 struct super_block *sb = inode->i_sb;
285
286 /*
287 * We have to distinguish real writes from writes which will result in a
288 * COW page; COW writes should *not* poke the journal (the file will not
289 * be changed). Doing so would cause unintended failures when mounted
290 * read-only.
291 *
292 * We check for VM_SHARED rather than vmf->cow_page since the latter is
293 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
294 * other sizes, dax_iomap_fault will handle splitting / fallback so that
295 * we eventually come back with a COW page.
296 */
297 bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
298 (vmf->vma->vm_flags & VM_SHARED);
299
300 if (write) {
301 sb_start_pagefault(sb);
302 file_update_time(vmf->vma->vm_file);
303 down_read(&EXT4_I(inode)->i_mmap_sem);
304 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
305 EXT4_DATA_TRANS_BLOCKS(sb));
306 } else {
307 down_read(&EXT4_I(inode)->i_mmap_sem);
308 }
309 if (!IS_ERR(handle))
310 result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
311 else
312 result = VM_FAULT_SIGBUS;
313 if (write) {
314 if (!IS_ERR(handle))
315 ext4_journal_stop(handle);
316 up_read(&EXT4_I(inode)->i_mmap_sem);
317 sb_end_pagefault(sb);
318 } else {
319 up_read(&EXT4_I(inode)->i_mmap_sem);
320 }
321
322 return result;
323 }
324
325 static int ext4_dax_fault(struct vm_fault *vmf)
326 {
327 return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
328 }
329
330 static const struct vm_operations_struct ext4_dax_vm_ops = {
331 .fault = ext4_dax_fault,
332 .huge_fault = ext4_dax_huge_fault,
333 .page_mkwrite = ext4_dax_fault,
334 .pfn_mkwrite = ext4_dax_fault,
335 };
336 #else
337 #define ext4_dax_vm_ops ext4_file_vm_ops
338 #endif
339
340 static const struct vm_operations_struct ext4_file_vm_ops = {
341 .fault = ext4_filemap_fault,
342 .map_pages = filemap_map_pages,
343 .page_mkwrite = ext4_page_mkwrite,
344 };
345
346 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
347 {
348 struct inode *inode = file->f_mapping->host;
349
350 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
351 return -EIO;
352
353 file_accessed(file);
354 if (IS_DAX(file_inode(file))) {
355 vma->vm_ops = &ext4_dax_vm_ops;
356 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
357 } else {
358 vma->vm_ops = &ext4_file_vm_ops;
359 }
360 return 0;
361 }
362
363 static int ext4_file_open(struct inode * inode, struct file * filp)
364 {
365 struct super_block *sb = inode->i_sb;
366 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
367 struct vfsmount *mnt = filp->f_path.mnt;
368 struct dentry *dir;
369 struct path path;
370 char buf[64], *cp;
371 int ret;
372
373 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
374 return -EIO;
375
376 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
377 !sb_rdonly(sb))) {
378 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
379 /*
380 * Sample where the filesystem has been mounted and
381 * store it in the superblock for sysadmin convenience
382 * when trying to sort through large numbers of block
383 * devices or filesystem images.
384 */
385 memset(buf, 0, sizeof(buf));
386 path.mnt = mnt;
387 path.dentry = mnt->mnt_root;
388 cp = d_path(&path, buf, sizeof(buf));
389 if (!IS_ERR(cp)) {
390 handle_t *handle;
391 int err;
392
393 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
394 if (IS_ERR(handle))
395 return PTR_ERR(handle);
396 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
397 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
398 if (err) {
399 ext4_journal_stop(handle);
400 return err;
401 }
402 strlcpy(sbi->s_es->s_last_mounted, cp,
403 sizeof(sbi->s_es->s_last_mounted));
404 ext4_handle_dirty_super(handle, sb);
405 ext4_journal_stop(handle);
406 }
407 }
408 if (ext4_encrypted_inode(inode)) {
409 ret = fscrypt_get_encryption_info(inode);
410 if (ret)
411 return -EACCES;
412 if (!fscrypt_has_encryption_key(inode))
413 return -ENOKEY;
414 }
415
416 dir = dget_parent(file_dentry(filp));
417 if (ext4_encrypted_inode(d_inode(dir)) &&
418 !fscrypt_has_permitted_context(d_inode(dir), inode)) {
419 ext4_warning(inode->i_sb,
420 "Inconsistent encryption contexts: %lu/%lu",
421 (unsigned long) d_inode(dir)->i_ino,
422 (unsigned long) inode->i_ino);
423 dput(dir);
424 return -EPERM;
425 }
426 dput(dir);
427 /*
428 * Set up the jbd2_inode if we are opening the inode for
429 * writing and the journal is present
430 */
431 if (filp->f_mode & FMODE_WRITE) {
432 ret = ext4_inode_attach_jinode(inode);
433 if (ret < 0)
434 return ret;
435 }
436
437 filp->f_mode |= FMODE_NOWAIT;
438 return dquot_file_open(inode, filp);
439 }
440
441 /*
442 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
443 * file rather than ext4_ext_walk_space() because we can introduce
444 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
445 * function. When extent status tree has been fully implemented, it will
446 * track all extent status for a file and we can directly use it to
447 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
448 */
449
450 /*
451 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
452 * lookup page cache to check whether or not there has some data between
453 * [startoff, endoff] because, if this range contains an unwritten extent,
454 * we determine this extent as a data or a hole according to whether the
455 * page cache has data or not.
456 */
457 static int ext4_find_unwritten_pgoff(struct inode *inode,
458 int whence,
459 ext4_lblk_t end_blk,
460 loff_t *offset)
461 {
462 struct pagevec pvec;
463 unsigned int blkbits;
464 pgoff_t index;
465 pgoff_t end;
466 loff_t endoff;
467 loff_t startoff;
468 loff_t lastoff;
469 int found = 0;
470
471 blkbits = inode->i_sb->s_blocksize_bits;
472 startoff = *offset;
473 lastoff = startoff;
474 endoff = (loff_t)end_blk << blkbits;
475
476 index = startoff >> PAGE_SHIFT;
477 end = (endoff - 1) >> PAGE_SHIFT;
478
479 pagevec_init(&pvec, 0);
480 do {
481 int i;
482 unsigned long nr_pages;
483
484 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
485 &index, end);
486 if (nr_pages == 0)
487 break;
488
489 for (i = 0; i < nr_pages; i++) {
490 struct page *page = pvec.pages[i];
491 struct buffer_head *bh, *head;
492
493 /*
494 * If current offset is smaller than the page offset,
495 * there is a hole at this offset.
496 */
497 if (whence == SEEK_HOLE && lastoff < endoff &&
498 lastoff < page_offset(pvec.pages[i])) {
499 found = 1;
500 *offset = lastoff;
501 goto out;
502 }
503
504 lock_page(page);
505
506 if (unlikely(page->mapping != inode->i_mapping)) {
507 unlock_page(page);
508 continue;
509 }
510
511 if (!page_has_buffers(page)) {
512 unlock_page(page);
513 continue;
514 }
515
516 if (page_has_buffers(page)) {
517 lastoff = page_offset(page);
518 bh = head = page_buffers(page);
519 do {
520 if (lastoff + bh->b_size <= startoff)
521 goto next;
522 if (buffer_uptodate(bh) ||
523 buffer_unwritten(bh)) {
524 if (whence == SEEK_DATA)
525 found = 1;
526 } else {
527 if (whence == SEEK_HOLE)
528 found = 1;
529 }
530 if (found) {
531 *offset = max_t(loff_t,
532 startoff, lastoff);
533 unlock_page(page);
534 goto out;
535 }
536 next:
537 lastoff += bh->b_size;
538 bh = bh->b_this_page;
539 } while (bh != head);
540 }
541
542 lastoff = page_offset(page) + PAGE_SIZE;
543 unlock_page(page);
544 }
545
546 pagevec_release(&pvec);
547 } while (index <= end);
548
549 /* There are no pages upto endoff - that would be a hole in there. */
550 if (whence == SEEK_HOLE && lastoff < endoff) {
551 found = 1;
552 *offset = lastoff;
553 }
554 out:
555 pagevec_release(&pvec);
556 return found;
557 }
558
559 /*
560 * ext4_seek_data() retrieves the offset for SEEK_DATA.
561 */
562 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
563 {
564 struct inode *inode = file->f_mapping->host;
565 struct extent_status es;
566 ext4_lblk_t start, last, end;
567 loff_t dataoff, isize;
568 int blkbits;
569 int ret;
570
571 inode_lock(inode);
572
573 isize = i_size_read(inode);
574 if (offset < 0 || offset >= isize) {
575 inode_unlock(inode);
576 return -ENXIO;
577 }
578
579 blkbits = inode->i_sb->s_blocksize_bits;
580 start = offset >> blkbits;
581 last = start;
582 end = isize >> blkbits;
583 dataoff = offset;
584
585 do {
586 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
587 if (ret <= 0) {
588 /* No extent found -> no data */
589 if (ret == 0)
590 ret = -ENXIO;
591 inode_unlock(inode);
592 return ret;
593 }
594
595 last = es.es_lblk;
596 if (last != start)
597 dataoff = (loff_t)last << blkbits;
598 if (!ext4_es_is_unwritten(&es))
599 break;
600
601 /*
602 * If there is a unwritten extent at this offset,
603 * it will be as a data or a hole according to page
604 * cache that has data or not.
605 */
606 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
607 es.es_lblk + es.es_len, &dataoff))
608 break;
609 last += es.es_len;
610 dataoff = (loff_t)last << blkbits;
611 cond_resched();
612 } while (last <= end);
613
614 inode_unlock(inode);
615
616 if (dataoff > isize)
617 return -ENXIO;
618
619 return vfs_setpos(file, dataoff, maxsize);
620 }
621
622 /*
623 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
624 */
625 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
626 {
627 struct inode *inode = file->f_mapping->host;
628 struct extent_status es;
629 ext4_lblk_t start, last, end;
630 loff_t holeoff, isize;
631 int blkbits;
632 int ret;
633
634 inode_lock(inode);
635
636 isize = i_size_read(inode);
637 if (offset < 0 || offset >= isize) {
638 inode_unlock(inode);
639 return -ENXIO;
640 }
641
642 blkbits = inode->i_sb->s_blocksize_bits;
643 start = offset >> blkbits;
644 last = start;
645 end = isize >> blkbits;
646 holeoff = offset;
647
648 do {
649 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
650 if (ret < 0) {
651 inode_unlock(inode);
652 return ret;
653 }
654 /* Found a hole? */
655 if (ret == 0 || es.es_lblk > last) {
656 if (last != start)
657 holeoff = (loff_t)last << blkbits;
658 break;
659 }
660 /*
661 * If there is a unwritten extent at this offset,
662 * it will be as a data or a hole according to page
663 * cache that has data or not.
664 */
665 if (ext4_es_is_unwritten(&es) &&
666 ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
667 last + es.es_len, &holeoff))
668 break;
669
670 last += es.es_len;
671 holeoff = (loff_t)last << blkbits;
672 cond_resched();
673 } while (last <= end);
674
675 inode_unlock(inode);
676
677 if (holeoff > isize)
678 holeoff = isize;
679
680 return vfs_setpos(file, holeoff, maxsize);
681 }
682
683 /*
684 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
685 * by calling generic_file_llseek_size() with the appropriate maxbytes
686 * value for each.
687 */
688 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
689 {
690 struct inode *inode = file->f_mapping->host;
691 loff_t maxbytes;
692
693 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
694 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
695 else
696 maxbytes = inode->i_sb->s_maxbytes;
697
698 switch (whence) {
699 case SEEK_SET:
700 case SEEK_CUR:
701 case SEEK_END:
702 return generic_file_llseek_size(file, offset, whence,
703 maxbytes, i_size_read(inode));
704 case SEEK_DATA:
705 return ext4_seek_data(file, offset, maxbytes);
706 case SEEK_HOLE:
707 return ext4_seek_hole(file, offset, maxbytes);
708 }
709
710 return -EINVAL;
711 }
712
713 const struct file_operations ext4_file_operations = {
714 .llseek = ext4_llseek,
715 .read_iter = ext4_file_read_iter,
716 .write_iter = ext4_file_write_iter,
717 .unlocked_ioctl = ext4_ioctl,
718 #ifdef CONFIG_COMPAT
719 .compat_ioctl = ext4_compat_ioctl,
720 #endif
721 .mmap = ext4_file_mmap,
722 .open = ext4_file_open,
723 .release = ext4_release_file,
724 .fsync = ext4_sync_file,
725 .get_unmapped_area = thp_get_unmapped_area,
726 .splice_read = generic_file_splice_read,
727 .splice_write = iter_file_splice_write,
728 .fallocate = ext4_fallocate,
729 };
730
731 const struct inode_operations ext4_file_inode_operations = {
732 .setattr = ext4_setattr,
733 .getattr = ext4_file_getattr,
734 .listxattr = ext4_listxattr,
735 .get_acl = ext4_get_acl,
736 .set_acl = ext4_set_acl,
737 .fiemap = ext4_fiemap,
738 };
739