]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ext4/file.c
drm/amdgpu: drop verde dpm quirks
[mirror_ubuntu-artful-kernel.git] / fs / ext4 / file.c
1 /*
2 * linux/fs/ext4/file.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * ext4 fs regular file handling primitives
16 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/path.h>
25 #include <linux/dax.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include <linux/uio.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 #ifdef CONFIG_FS_DAX
35 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
36 {
37 struct inode *inode = file_inode(iocb->ki_filp);
38 ssize_t ret;
39
40 inode_lock_shared(inode);
41 /*
42 * Recheck under inode lock - at this point we are sure it cannot
43 * change anymore
44 */
45 if (!IS_DAX(inode)) {
46 inode_unlock_shared(inode);
47 /* Fallback to buffered IO in case we cannot support DAX */
48 return generic_file_read_iter(iocb, to);
49 }
50 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
51 inode_unlock_shared(inode);
52
53 file_accessed(iocb->ki_filp);
54 return ret;
55 }
56 #endif
57
58 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
59 {
60 if (!iov_iter_count(to))
61 return 0; /* skip atime */
62
63 #ifdef CONFIG_FS_DAX
64 if (IS_DAX(file_inode(iocb->ki_filp)))
65 return ext4_dax_read_iter(iocb, to);
66 #endif
67 return generic_file_read_iter(iocb, to);
68 }
69
70 /*
71 * Called when an inode is released. Note that this is different
72 * from ext4_file_open: open gets called at every open, but release
73 * gets called only when /all/ the files are closed.
74 */
75 static int ext4_release_file(struct inode *inode, struct file *filp)
76 {
77 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
78 ext4_alloc_da_blocks(inode);
79 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
80 }
81 /* if we are the last writer on the inode, drop the block reservation */
82 if ((filp->f_mode & FMODE_WRITE) &&
83 (atomic_read(&inode->i_writecount) == 1) &&
84 !EXT4_I(inode)->i_reserved_data_blocks)
85 {
86 down_write(&EXT4_I(inode)->i_data_sem);
87 ext4_discard_preallocations(inode);
88 up_write(&EXT4_I(inode)->i_data_sem);
89 }
90 if (is_dx(inode) && filp->private_data)
91 ext4_htree_free_dir_info(filp->private_data);
92
93 return 0;
94 }
95
96 static void ext4_unwritten_wait(struct inode *inode)
97 {
98 wait_queue_head_t *wq = ext4_ioend_wq(inode);
99
100 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
101 }
102
103 /*
104 * This tests whether the IO in question is block-aligned or not.
105 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
106 * are converted to written only after the IO is complete. Until they are
107 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
108 * it needs to zero out portions of the start and/or end block. If 2 AIO
109 * threads are at work on the same unwritten block, they must be synchronized
110 * or one thread will zero the other's data, causing corruption.
111 */
112 static int
113 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
114 {
115 struct super_block *sb = inode->i_sb;
116 int blockmask = sb->s_blocksize - 1;
117
118 if (pos >= i_size_read(inode))
119 return 0;
120
121 if ((pos | iov_iter_alignment(from)) & blockmask)
122 return 1;
123
124 return 0;
125 }
126
127 /* Is IO overwriting allocated and initialized blocks? */
128 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
129 {
130 struct ext4_map_blocks map;
131 unsigned int blkbits = inode->i_blkbits;
132 int err, blklen;
133
134 if (pos + len > i_size_read(inode))
135 return false;
136
137 map.m_lblk = pos >> blkbits;
138 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
139 blklen = map.m_len;
140
141 err = ext4_map_blocks(NULL, inode, &map, 0);
142 /*
143 * 'err==len' means that all of the blocks have been preallocated,
144 * regardless of whether they have been initialized or not. To exclude
145 * unwritten extents, we need to check m_flags.
146 */
147 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
148 }
149
150 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
151 {
152 struct inode *inode = file_inode(iocb->ki_filp);
153 ssize_t ret;
154
155 ret = generic_write_checks(iocb, from);
156 if (ret <= 0)
157 return ret;
158 /*
159 * If we have encountered a bitmap-format file, the size limit
160 * is smaller than s_maxbytes, which is for extent-mapped files.
161 */
162 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
163 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
164
165 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
166 return -EFBIG;
167 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
168 }
169 return iov_iter_count(from);
170 }
171
172 #ifdef CONFIG_FS_DAX
173 static ssize_t
174 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
175 {
176 struct inode *inode = file_inode(iocb->ki_filp);
177 ssize_t ret;
178 bool overwrite = false;
179
180 inode_lock(inode);
181 ret = ext4_write_checks(iocb, from);
182 if (ret <= 0)
183 goto out;
184 ret = file_remove_privs(iocb->ki_filp);
185 if (ret)
186 goto out;
187 ret = file_update_time(iocb->ki_filp);
188 if (ret)
189 goto out;
190
191 if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
192 overwrite = true;
193 downgrade_write(&inode->i_rwsem);
194 }
195 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
196 out:
197 if (!overwrite)
198 inode_unlock(inode);
199 else
200 inode_unlock_shared(inode);
201 if (ret > 0)
202 ret = generic_write_sync(iocb, ret);
203 return ret;
204 }
205 #endif
206
207 static ssize_t
208 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
209 {
210 struct inode *inode = file_inode(iocb->ki_filp);
211 int o_direct = iocb->ki_flags & IOCB_DIRECT;
212 int unaligned_aio = 0;
213 int overwrite = 0;
214 ssize_t ret;
215
216 #ifdef CONFIG_FS_DAX
217 if (IS_DAX(inode))
218 return ext4_dax_write_iter(iocb, from);
219 #endif
220
221 inode_lock(inode);
222 ret = ext4_write_checks(iocb, from);
223 if (ret <= 0)
224 goto out;
225
226 /*
227 * Unaligned direct AIO must be serialized among each other as zeroing
228 * of partial blocks of two competing unaligned AIOs can result in data
229 * corruption.
230 */
231 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
232 !is_sync_kiocb(iocb) &&
233 ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
234 unaligned_aio = 1;
235 ext4_unwritten_wait(inode);
236 }
237
238 iocb->private = &overwrite;
239 /* Check whether we do a DIO overwrite or not */
240 if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
241 ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
242 overwrite = 1;
243
244 ret = __generic_file_write_iter(iocb, from);
245 inode_unlock(inode);
246
247 if (ret > 0)
248 ret = generic_write_sync(iocb, ret);
249
250 return ret;
251
252 out:
253 inode_unlock(inode);
254 return ret;
255 }
256
257 #ifdef CONFIG_FS_DAX
258 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
259 {
260 int result;
261 handle_t *handle = NULL;
262 struct inode *inode = file_inode(vma->vm_file);
263 struct super_block *sb = inode->i_sb;
264 bool write = vmf->flags & FAULT_FLAG_WRITE;
265
266 if (write) {
267 sb_start_pagefault(sb);
268 file_update_time(vma->vm_file);
269 down_read(&EXT4_I(inode)->i_mmap_sem);
270 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
271 EXT4_DATA_TRANS_BLOCKS(sb));
272 } else
273 down_read(&EXT4_I(inode)->i_mmap_sem);
274
275 if (IS_ERR(handle))
276 result = VM_FAULT_SIGBUS;
277 else
278 result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
279
280 if (write) {
281 if (!IS_ERR(handle))
282 ext4_journal_stop(handle);
283 up_read(&EXT4_I(inode)->i_mmap_sem);
284 sb_end_pagefault(sb);
285 } else
286 up_read(&EXT4_I(inode)->i_mmap_sem);
287
288 return result;
289 }
290
291 static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
292 pmd_t *pmd, unsigned int flags)
293 {
294 int result;
295 handle_t *handle = NULL;
296 struct inode *inode = file_inode(vma->vm_file);
297 struct super_block *sb = inode->i_sb;
298 bool write = flags & FAULT_FLAG_WRITE;
299
300 if (write) {
301 sb_start_pagefault(sb);
302 file_update_time(vma->vm_file);
303 down_read(&EXT4_I(inode)->i_mmap_sem);
304 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
305 ext4_chunk_trans_blocks(inode,
306 PMD_SIZE / PAGE_SIZE));
307 } else
308 down_read(&EXT4_I(inode)->i_mmap_sem);
309
310 if (IS_ERR(handle))
311 result = VM_FAULT_SIGBUS;
312 else {
313 result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
314 &ext4_iomap_ops);
315 }
316
317 if (write) {
318 if (!IS_ERR(handle))
319 ext4_journal_stop(handle);
320 up_read(&EXT4_I(inode)->i_mmap_sem);
321 sb_end_pagefault(sb);
322 } else
323 up_read(&EXT4_I(inode)->i_mmap_sem);
324
325 return result;
326 }
327
328 /*
329 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
330 * handler we check for races agaist truncate. Note that since we cycle through
331 * i_mmap_sem, we are sure that also any hole punching that began before we
332 * were called is finished by now and so if it included part of the file we
333 * are working on, our pte will get unmapped and the check for pte_same() in
334 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
335 * desired.
336 */
337 static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
338 struct vm_fault *vmf)
339 {
340 struct inode *inode = file_inode(vma->vm_file);
341 struct super_block *sb = inode->i_sb;
342 loff_t size;
343 int ret;
344
345 sb_start_pagefault(sb);
346 file_update_time(vma->vm_file);
347 down_read(&EXT4_I(inode)->i_mmap_sem);
348 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
349 if (vmf->pgoff >= size)
350 ret = VM_FAULT_SIGBUS;
351 else
352 ret = dax_pfn_mkwrite(vma, vmf);
353 up_read(&EXT4_I(inode)->i_mmap_sem);
354 sb_end_pagefault(sb);
355
356 return ret;
357 }
358
359 static const struct vm_operations_struct ext4_dax_vm_ops = {
360 .fault = ext4_dax_fault,
361 .pmd_fault = ext4_dax_pmd_fault,
362 .page_mkwrite = ext4_dax_fault,
363 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
364 };
365 #else
366 #define ext4_dax_vm_ops ext4_file_vm_ops
367 #endif
368
369 static const struct vm_operations_struct ext4_file_vm_ops = {
370 .fault = ext4_filemap_fault,
371 .map_pages = filemap_map_pages,
372 .page_mkwrite = ext4_page_mkwrite,
373 };
374
375 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
376 {
377 struct inode *inode = file->f_mapping->host;
378
379 if (ext4_encrypted_inode(inode)) {
380 int err = fscrypt_get_encryption_info(inode);
381 if (err)
382 return 0;
383 if (!fscrypt_has_encryption_key(inode))
384 return -ENOKEY;
385 }
386 file_accessed(file);
387 if (IS_DAX(file_inode(file))) {
388 vma->vm_ops = &ext4_dax_vm_ops;
389 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
390 } else {
391 vma->vm_ops = &ext4_file_vm_ops;
392 }
393 return 0;
394 }
395
396 static int ext4_file_open(struct inode * inode, struct file * filp)
397 {
398 struct super_block *sb = inode->i_sb;
399 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
400 struct vfsmount *mnt = filp->f_path.mnt;
401 struct dentry *dir;
402 struct path path;
403 char buf[64], *cp;
404 int ret;
405
406 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
407 !(sb->s_flags & MS_RDONLY))) {
408 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
409 /*
410 * Sample where the filesystem has been mounted and
411 * store it in the superblock for sysadmin convenience
412 * when trying to sort through large numbers of block
413 * devices or filesystem images.
414 */
415 memset(buf, 0, sizeof(buf));
416 path.mnt = mnt;
417 path.dentry = mnt->mnt_root;
418 cp = d_path(&path, buf, sizeof(buf));
419 if (!IS_ERR(cp)) {
420 handle_t *handle;
421 int err;
422
423 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
424 if (IS_ERR(handle))
425 return PTR_ERR(handle);
426 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
427 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
428 if (err) {
429 ext4_journal_stop(handle);
430 return err;
431 }
432 strlcpy(sbi->s_es->s_last_mounted, cp,
433 sizeof(sbi->s_es->s_last_mounted));
434 ext4_handle_dirty_super(handle, sb);
435 ext4_journal_stop(handle);
436 }
437 }
438 if (ext4_encrypted_inode(inode)) {
439 ret = fscrypt_get_encryption_info(inode);
440 if (ret)
441 return -EACCES;
442 if (!fscrypt_has_encryption_key(inode))
443 return -ENOKEY;
444 }
445
446 dir = dget_parent(file_dentry(filp));
447 if (ext4_encrypted_inode(d_inode(dir)) &&
448 !fscrypt_has_permitted_context(d_inode(dir), inode)) {
449 ext4_warning(inode->i_sb,
450 "Inconsistent encryption contexts: %lu/%lu",
451 (unsigned long) d_inode(dir)->i_ino,
452 (unsigned long) inode->i_ino);
453 dput(dir);
454 return -EPERM;
455 }
456 dput(dir);
457 /*
458 * Set up the jbd2_inode if we are opening the inode for
459 * writing and the journal is present
460 */
461 if (filp->f_mode & FMODE_WRITE) {
462 ret = ext4_inode_attach_jinode(inode);
463 if (ret < 0)
464 return ret;
465 }
466 return dquot_file_open(inode, filp);
467 }
468
469 /*
470 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
471 * file rather than ext4_ext_walk_space() because we can introduce
472 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
473 * function. When extent status tree has been fully implemented, it will
474 * track all extent status for a file and we can directly use it to
475 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
476 */
477
478 /*
479 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
480 * lookup page cache to check whether or not there has some data between
481 * [startoff, endoff] because, if this range contains an unwritten extent,
482 * we determine this extent as a data or a hole according to whether the
483 * page cache has data or not.
484 */
485 static int ext4_find_unwritten_pgoff(struct inode *inode,
486 int whence,
487 ext4_lblk_t end_blk,
488 loff_t *offset)
489 {
490 struct pagevec pvec;
491 unsigned int blkbits;
492 pgoff_t index;
493 pgoff_t end;
494 loff_t endoff;
495 loff_t startoff;
496 loff_t lastoff;
497 int found = 0;
498
499 blkbits = inode->i_sb->s_blocksize_bits;
500 startoff = *offset;
501 lastoff = startoff;
502 endoff = (loff_t)end_blk << blkbits;
503
504 index = startoff >> PAGE_SHIFT;
505 end = endoff >> PAGE_SHIFT;
506
507 pagevec_init(&pvec, 0);
508 do {
509 int i, num;
510 unsigned long nr_pages;
511
512 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
513 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
514 (pgoff_t)num);
515 if (nr_pages == 0) {
516 if (whence == SEEK_DATA)
517 break;
518
519 BUG_ON(whence != SEEK_HOLE);
520 /*
521 * If this is the first time to go into the loop and
522 * offset is not beyond the end offset, it will be a
523 * hole at this offset
524 */
525 if (lastoff == startoff || lastoff < endoff)
526 found = 1;
527 break;
528 }
529
530 /*
531 * If this is the first time to go into the loop and
532 * offset is smaller than the first page offset, it will be a
533 * hole at this offset.
534 */
535 if (lastoff == startoff && whence == SEEK_HOLE &&
536 lastoff < page_offset(pvec.pages[0])) {
537 found = 1;
538 break;
539 }
540
541 for (i = 0; i < nr_pages; i++) {
542 struct page *page = pvec.pages[i];
543 struct buffer_head *bh, *head;
544
545 /*
546 * If the current offset is not beyond the end of given
547 * range, it will be a hole.
548 */
549 if (lastoff < endoff && whence == SEEK_HOLE &&
550 page->index > end) {
551 found = 1;
552 *offset = lastoff;
553 goto out;
554 }
555
556 lock_page(page);
557
558 if (unlikely(page->mapping != inode->i_mapping)) {
559 unlock_page(page);
560 continue;
561 }
562
563 if (!page_has_buffers(page)) {
564 unlock_page(page);
565 continue;
566 }
567
568 if (page_has_buffers(page)) {
569 lastoff = page_offset(page);
570 bh = head = page_buffers(page);
571 do {
572 if (buffer_uptodate(bh) ||
573 buffer_unwritten(bh)) {
574 if (whence == SEEK_DATA)
575 found = 1;
576 } else {
577 if (whence == SEEK_HOLE)
578 found = 1;
579 }
580 if (found) {
581 *offset = max_t(loff_t,
582 startoff, lastoff);
583 unlock_page(page);
584 goto out;
585 }
586 lastoff += bh->b_size;
587 bh = bh->b_this_page;
588 } while (bh != head);
589 }
590
591 lastoff = page_offset(page) + PAGE_SIZE;
592 unlock_page(page);
593 }
594
595 /*
596 * The no. of pages is less than our desired, that would be a
597 * hole in there.
598 */
599 if (nr_pages < num && whence == SEEK_HOLE) {
600 found = 1;
601 *offset = lastoff;
602 break;
603 }
604
605 index = pvec.pages[i - 1]->index + 1;
606 pagevec_release(&pvec);
607 } while (index <= end);
608
609 out:
610 pagevec_release(&pvec);
611 return found;
612 }
613
614 /*
615 * ext4_seek_data() retrieves the offset for SEEK_DATA.
616 */
617 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
618 {
619 struct inode *inode = file->f_mapping->host;
620 struct extent_status es;
621 ext4_lblk_t start, last, end;
622 loff_t dataoff, isize;
623 int blkbits;
624 int ret;
625
626 inode_lock(inode);
627
628 isize = i_size_read(inode);
629 if (offset >= isize) {
630 inode_unlock(inode);
631 return -ENXIO;
632 }
633
634 blkbits = inode->i_sb->s_blocksize_bits;
635 start = offset >> blkbits;
636 last = start;
637 end = isize >> blkbits;
638 dataoff = offset;
639
640 do {
641 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
642 if (ret <= 0) {
643 /* No extent found -> no data */
644 if (ret == 0)
645 ret = -ENXIO;
646 inode_unlock(inode);
647 return ret;
648 }
649
650 last = es.es_lblk;
651 if (last != start)
652 dataoff = (loff_t)last << blkbits;
653 if (!ext4_es_is_unwritten(&es))
654 break;
655
656 /*
657 * If there is a unwritten extent at this offset,
658 * it will be as a data or a hole according to page
659 * cache that has data or not.
660 */
661 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
662 es.es_lblk + es.es_len, &dataoff))
663 break;
664 last += es.es_len;
665 dataoff = (loff_t)last << blkbits;
666 cond_resched();
667 } while (last <= end);
668
669 inode_unlock(inode);
670
671 if (dataoff > isize)
672 return -ENXIO;
673
674 return vfs_setpos(file, dataoff, maxsize);
675 }
676
677 /*
678 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
679 */
680 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
681 {
682 struct inode *inode = file->f_mapping->host;
683 struct extent_status es;
684 ext4_lblk_t start, last, end;
685 loff_t holeoff, isize;
686 int blkbits;
687 int ret;
688
689 inode_lock(inode);
690
691 isize = i_size_read(inode);
692 if (offset >= isize) {
693 inode_unlock(inode);
694 return -ENXIO;
695 }
696
697 blkbits = inode->i_sb->s_blocksize_bits;
698 start = offset >> blkbits;
699 last = start;
700 end = isize >> blkbits;
701 holeoff = offset;
702
703 do {
704 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
705 if (ret < 0) {
706 inode_unlock(inode);
707 return ret;
708 }
709 /* Found a hole? */
710 if (ret == 0 || es.es_lblk > last) {
711 if (last != start)
712 holeoff = (loff_t)last << blkbits;
713 break;
714 }
715 /*
716 * If there is a unwritten extent at this offset,
717 * it will be as a data or a hole according to page
718 * cache that has data or not.
719 */
720 if (ext4_es_is_unwritten(&es) &&
721 ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
722 last + es.es_len, &holeoff))
723 break;
724
725 last += es.es_len;
726 holeoff = (loff_t)last << blkbits;
727 cond_resched();
728 } while (last <= end);
729
730 inode_unlock(inode);
731
732 if (holeoff > isize)
733 holeoff = isize;
734
735 return vfs_setpos(file, holeoff, maxsize);
736 }
737
738 /*
739 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
740 * by calling generic_file_llseek_size() with the appropriate maxbytes
741 * value for each.
742 */
743 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
744 {
745 struct inode *inode = file->f_mapping->host;
746 loff_t maxbytes;
747
748 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
749 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
750 else
751 maxbytes = inode->i_sb->s_maxbytes;
752
753 switch (whence) {
754 case SEEK_SET:
755 case SEEK_CUR:
756 case SEEK_END:
757 return generic_file_llseek_size(file, offset, whence,
758 maxbytes, i_size_read(inode));
759 case SEEK_DATA:
760 return ext4_seek_data(file, offset, maxbytes);
761 case SEEK_HOLE:
762 return ext4_seek_hole(file, offset, maxbytes);
763 }
764
765 return -EINVAL;
766 }
767
768 const struct file_operations ext4_file_operations = {
769 .llseek = ext4_llseek,
770 .read_iter = ext4_file_read_iter,
771 .write_iter = ext4_file_write_iter,
772 .unlocked_ioctl = ext4_ioctl,
773 #ifdef CONFIG_COMPAT
774 .compat_ioctl = ext4_compat_ioctl,
775 #endif
776 .mmap = ext4_file_mmap,
777 .open = ext4_file_open,
778 .release = ext4_release_file,
779 .fsync = ext4_sync_file,
780 .get_unmapped_area = thp_get_unmapped_area,
781 .splice_read = generic_file_splice_read,
782 .splice_write = iter_file_splice_write,
783 .fallocate = ext4_fallocate,
784 };
785
786 const struct inode_operations ext4_file_inode_operations = {
787 .setattr = ext4_setattr,
788 .getattr = ext4_getattr,
789 .listxattr = ext4_listxattr,
790 .get_acl = ext4_get_acl,
791 .set_acl = ext4_set_acl,
792 .fiemap = ext4_fiemap,
793 };
794