]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/ext4/file.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-zesty-kernel.git] / fs / ext4 / file.c
1 /*
2 * linux/fs/ext4/file.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * ext4 fs regular file handling primitives
16 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/jbd2.h>
24 #include <linux/mount.h>
25 #include <linux/path.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include <linux/uio.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 /*
35 * Called when an inode is released. Note that this is different
36 * from ext4_file_open: open gets called at every open, but release
37 * gets called only when /all/ the files are closed.
38 */
39 static int ext4_release_file(struct inode *inode, struct file *filp)
40 {
41 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42 ext4_alloc_da_blocks(inode);
43 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44 }
45 /* if we are the last writer on the inode, drop the block reservation */
46 if ((filp->f_mode & FMODE_WRITE) &&
47 (atomic_read(&inode->i_writecount) == 1) &&
48 !EXT4_I(inode)->i_reserved_data_blocks)
49 {
50 down_write(&EXT4_I(inode)->i_data_sem);
51 ext4_discard_preallocations(inode);
52 up_write(&EXT4_I(inode)->i_data_sem);
53 }
54 if (is_dx(inode) && filp->private_data)
55 ext4_htree_free_dir_info(filp->private_data);
56
57 return 0;
58 }
59
60 static void ext4_unwritten_wait(struct inode *inode)
61 {
62 wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
64 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65 }
66
67 /*
68 * This tests whether the IO in question is block-aligned or not.
69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70 * are converted to written only after the IO is complete. Until they are
71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72 * it needs to zero out portions of the start and/or end block. If 2 AIO
73 * threads are at work on the same unwritten block, they must be synchronized
74 * or one thread will zero the other's data, causing corruption.
75 */
76 static int
77 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
78 {
79 struct super_block *sb = inode->i_sb;
80 int blockmask = sb->s_blocksize - 1;
81
82 if (pos >= i_size_read(inode))
83 return 0;
84
85 if ((pos | iov_iter_alignment(from)) & blockmask)
86 return 1;
87
88 return 0;
89 }
90
91 static ssize_t
92 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
93 {
94 struct file *file = iocb->ki_filp;
95 struct inode *inode = file_inode(iocb->ki_filp);
96 struct mutex *aio_mutex = NULL;
97 struct blk_plug plug;
98 int o_direct = iocb->ki_flags & IOCB_DIRECT;
99 int overwrite = 0;
100 ssize_t ret;
101
102 /*
103 * Unaligned direct AIO must be serialized; see comment above
104 * In the case of O_APPEND, assume that we must always serialize
105 */
106 if (o_direct &&
107 ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
108 !is_sync_kiocb(iocb) &&
109 (iocb->ki_flags & IOCB_APPEND ||
110 ext4_unaligned_aio(inode, from, iocb->ki_pos))) {
111 aio_mutex = ext4_aio_mutex(inode);
112 mutex_lock(aio_mutex);
113 ext4_unwritten_wait(inode);
114 }
115
116 mutex_lock(&inode->i_mutex);
117 ret = generic_write_checks(iocb, from);
118 if (ret <= 0)
119 goto out;
120
121 /*
122 * If we have encountered a bitmap-format file, the size limit
123 * is smaller than s_maxbytes, which is for extent-mapped files.
124 */
125 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
126 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
127
128 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
129 ret = -EFBIG;
130 goto out;
131 }
132 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
133 }
134
135 iocb->private = &overwrite;
136 if (o_direct) {
137 size_t length = iov_iter_count(from);
138 loff_t pos = iocb->ki_pos;
139 blk_start_plug(&plug);
140
141 /* check whether we do a DIO overwrite or not */
142 if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
143 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
144 struct ext4_map_blocks map;
145 unsigned int blkbits = inode->i_blkbits;
146 int err, len;
147
148 map.m_lblk = pos >> blkbits;
149 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
150 - map.m_lblk;
151 len = map.m_len;
152
153 err = ext4_map_blocks(NULL, inode, &map, 0);
154 /*
155 * 'err==len' means that all of blocks has
156 * been preallocated no matter they are
157 * initialized or not. For excluding
158 * unwritten extents, we need to check
159 * m_flags. There are two conditions that
160 * indicate for initialized extents. 1) If we
161 * hit extent cache, EXT4_MAP_MAPPED flag is
162 * returned; 2) If we do a real lookup,
163 * non-flags are returned. So we should check
164 * these two conditions.
165 */
166 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
167 overwrite = 1;
168 }
169 }
170
171 ret = __generic_file_write_iter(iocb, from);
172 mutex_unlock(&inode->i_mutex);
173
174 if (ret > 0) {
175 ssize_t err;
176
177 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
178 if (err < 0)
179 ret = err;
180 }
181 if (o_direct)
182 blk_finish_plug(&plug);
183
184 if (aio_mutex)
185 mutex_unlock(aio_mutex);
186 return ret;
187
188 out:
189 mutex_unlock(&inode->i_mutex);
190 if (aio_mutex)
191 mutex_unlock(aio_mutex);
192 return ret;
193 }
194
195 #ifdef CONFIG_FS_DAX
196 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
197 {
198 return dax_fault(vma, vmf, ext4_get_block);
199 /* Is this the right get_block? */
200 }
201
202 static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
203 {
204 return dax_mkwrite(vma, vmf, ext4_get_block);
205 }
206
207 static const struct vm_operations_struct ext4_dax_vm_ops = {
208 .fault = ext4_dax_fault,
209 .page_mkwrite = ext4_dax_mkwrite,
210 .pfn_mkwrite = dax_pfn_mkwrite,
211 };
212 #else
213 #define ext4_dax_vm_ops ext4_file_vm_ops
214 #endif
215
216 static const struct vm_operations_struct ext4_file_vm_ops = {
217 .fault = filemap_fault,
218 .map_pages = filemap_map_pages,
219 .page_mkwrite = ext4_page_mkwrite,
220 };
221
222 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
223 {
224 file_accessed(file);
225 if (IS_DAX(file_inode(file))) {
226 vma->vm_ops = &ext4_dax_vm_ops;
227 vma->vm_flags |= VM_MIXEDMAP;
228 } else {
229 vma->vm_ops = &ext4_file_vm_ops;
230 }
231 return 0;
232 }
233
234 static int ext4_file_open(struct inode * inode, struct file * filp)
235 {
236 struct super_block *sb = inode->i_sb;
237 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
238 struct vfsmount *mnt = filp->f_path.mnt;
239 struct path path;
240 char buf[64], *cp;
241
242 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
243 !(sb->s_flags & MS_RDONLY))) {
244 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
245 /*
246 * Sample where the filesystem has been mounted and
247 * store it in the superblock for sysadmin convenience
248 * when trying to sort through large numbers of block
249 * devices or filesystem images.
250 */
251 memset(buf, 0, sizeof(buf));
252 path.mnt = mnt;
253 path.dentry = mnt->mnt_root;
254 cp = d_path(&path, buf, sizeof(buf));
255 if (!IS_ERR(cp)) {
256 handle_t *handle;
257 int err;
258
259 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
260 if (IS_ERR(handle))
261 return PTR_ERR(handle);
262 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
263 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
264 if (err) {
265 ext4_journal_stop(handle);
266 return err;
267 }
268 strlcpy(sbi->s_es->s_last_mounted, cp,
269 sizeof(sbi->s_es->s_last_mounted));
270 ext4_handle_dirty_super(handle, sb);
271 ext4_journal_stop(handle);
272 }
273 }
274 /*
275 * Set up the jbd2_inode if we are opening the inode for
276 * writing and the journal is present
277 */
278 if (filp->f_mode & FMODE_WRITE) {
279 int ret = ext4_inode_attach_jinode(inode);
280 if (ret < 0)
281 return ret;
282 }
283 return dquot_file_open(inode, filp);
284 }
285
286 /*
287 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
288 * file rather than ext4_ext_walk_space() because we can introduce
289 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
290 * function. When extent status tree has been fully implemented, it will
291 * track all extent status for a file and we can directly use it to
292 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
293 */
294
295 /*
296 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
297 * lookup page cache to check whether or not there has some data between
298 * [startoff, endoff] because, if this range contains an unwritten extent,
299 * we determine this extent as a data or a hole according to whether the
300 * page cache has data or not.
301 */
302 static int ext4_find_unwritten_pgoff(struct inode *inode,
303 int whence,
304 struct ext4_map_blocks *map,
305 loff_t *offset)
306 {
307 struct pagevec pvec;
308 unsigned int blkbits;
309 pgoff_t index;
310 pgoff_t end;
311 loff_t endoff;
312 loff_t startoff;
313 loff_t lastoff;
314 int found = 0;
315
316 blkbits = inode->i_sb->s_blocksize_bits;
317 startoff = *offset;
318 lastoff = startoff;
319 endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
320
321 index = startoff >> PAGE_CACHE_SHIFT;
322 end = endoff >> PAGE_CACHE_SHIFT;
323
324 pagevec_init(&pvec, 0);
325 do {
326 int i, num;
327 unsigned long nr_pages;
328
329 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
330 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
331 (pgoff_t)num);
332 if (nr_pages == 0) {
333 if (whence == SEEK_DATA)
334 break;
335
336 BUG_ON(whence != SEEK_HOLE);
337 /*
338 * If this is the first time to go into the loop and
339 * offset is not beyond the end offset, it will be a
340 * hole at this offset
341 */
342 if (lastoff == startoff || lastoff < endoff)
343 found = 1;
344 break;
345 }
346
347 /*
348 * If this is the first time to go into the loop and
349 * offset is smaller than the first page offset, it will be a
350 * hole at this offset.
351 */
352 if (lastoff == startoff && whence == SEEK_HOLE &&
353 lastoff < page_offset(pvec.pages[0])) {
354 found = 1;
355 break;
356 }
357
358 for (i = 0; i < nr_pages; i++) {
359 struct page *page = pvec.pages[i];
360 struct buffer_head *bh, *head;
361
362 /*
363 * If the current offset is not beyond the end of given
364 * range, it will be a hole.
365 */
366 if (lastoff < endoff && whence == SEEK_HOLE &&
367 page->index > end) {
368 found = 1;
369 *offset = lastoff;
370 goto out;
371 }
372
373 lock_page(page);
374
375 if (unlikely(page->mapping != inode->i_mapping)) {
376 unlock_page(page);
377 continue;
378 }
379
380 if (!page_has_buffers(page)) {
381 unlock_page(page);
382 continue;
383 }
384
385 if (page_has_buffers(page)) {
386 lastoff = page_offset(page);
387 bh = head = page_buffers(page);
388 do {
389 if (buffer_uptodate(bh) ||
390 buffer_unwritten(bh)) {
391 if (whence == SEEK_DATA)
392 found = 1;
393 } else {
394 if (whence == SEEK_HOLE)
395 found = 1;
396 }
397 if (found) {
398 *offset = max_t(loff_t,
399 startoff, lastoff);
400 unlock_page(page);
401 goto out;
402 }
403 lastoff += bh->b_size;
404 bh = bh->b_this_page;
405 } while (bh != head);
406 }
407
408 lastoff = page_offset(page) + PAGE_SIZE;
409 unlock_page(page);
410 }
411
412 /*
413 * The no. of pages is less than our desired, that would be a
414 * hole in there.
415 */
416 if (nr_pages < num && whence == SEEK_HOLE) {
417 found = 1;
418 *offset = lastoff;
419 break;
420 }
421
422 index = pvec.pages[i - 1]->index + 1;
423 pagevec_release(&pvec);
424 } while (index <= end);
425
426 out:
427 pagevec_release(&pvec);
428 return found;
429 }
430
431 /*
432 * ext4_seek_data() retrieves the offset for SEEK_DATA.
433 */
434 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
435 {
436 struct inode *inode = file->f_mapping->host;
437 struct ext4_map_blocks map;
438 struct extent_status es;
439 ext4_lblk_t start, last, end;
440 loff_t dataoff, isize;
441 int blkbits;
442 int ret = 0;
443
444 mutex_lock(&inode->i_mutex);
445
446 isize = i_size_read(inode);
447 if (offset >= isize) {
448 mutex_unlock(&inode->i_mutex);
449 return -ENXIO;
450 }
451
452 blkbits = inode->i_sb->s_blocksize_bits;
453 start = offset >> blkbits;
454 last = start;
455 end = isize >> blkbits;
456 dataoff = offset;
457
458 do {
459 map.m_lblk = last;
460 map.m_len = end - last + 1;
461 ret = ext4_map_blocks(NULL, inode, &map, 0);
462 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
463 if (last != start)
464 dataoff = (loff_t)last << blkbits;
465 break;
466 }
467
468 /*
469 * If there is a delay extent at this offset,
470 * it will be as a data.
471 */
472 ext4_es_find_delayed_extent_range(inode, last, last, &es);
473 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
474 if (last != start)
475 dataoff = (loff_t)last << blkbits;
476 break;
477 }
478
479 /*
480 * If there is a unwritten extent at this offset,
481 * it will be as a data or a hole according to page
482 * cache that has data or not.
483 */
484 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
485 int unwritten;
486 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
487 &map, &dataoff);
488 if (unwritten)
489 break;
490 }
491
492 last++;
493 dataoff = (loff_t)last << blkbits;
494 } while (last <= end);
495
496 mutex_unlock(&inode->i_mutex);
497
498 if (dataoff > isize)
499 return -ENXIO;
500
501 return vfs_setpos(file, dataoff, maxsize);
502 }
503
504 /*
505 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
506 */
507 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
508 {
509 struct inode *inode = file->f_mapping->host;
510 struct ext4_map_blocks map;
511 struct extent_status es;
512 ext4_lblk_t start, last, end;
513 loff_t holeoff, isize;
514 int blkbits;
515 int ret = 0;
516
517 mutex_lock(&inode->i_mutex);
518
519 isize = i_size_read(inode);
520 if (offset >= isize) {
521 mutex_unlock(&inode->i_mutex);
522 return -ENXIO;
523 }
524
525 blkbits = inode->i_sb->s_blocksize_bits;
526 start = offset >> blkbits;
527 last = start;
528 end = isize >> blkbits;
529 holeoff = offset;
530
531 do {
532 map.m_lblk = last;
533 map.m_len = end - last + 1;
534 ret = ext4_map_blocks(NULL, inode, &map, 0);
535 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
536 last += ret;
537 holeoff = (loff_t)last << blkbits;
538 continue;
539 }
540
541 /*
542 * If there is a delay extent at this offset,
543 * we will skip this extent.
544 */
545 ext4_es_find_delayed_extent_range(inode, last, last, &es);
546 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
547 last = es.es_lblk + es.es_len;
548 holeoff = (loff_t)last << blkbits;
549 continue;
550 }
551
552 /*
553 * If there is a unwritten extent at this offset,
554 * it will be as a data or a hole according to page
555 * cache that has data or not.
556 */
557 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
558 int unwritten;
559 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
560 &map, &holeoff);
561 if (!unwritten) {
562 last += ret;
563 holeoff = (loff_t)last << blkbits;
564 continue;
565 }
566 }
567
568 /* find a hole */
569 break;
570 } while (last <= end);
571
572 mutex_unlock(&inode->i_mutex);
573
574 if (holeoff > isize)
575 holeoff = isize;
576
577 return vfs_setpos(file, holeoff, maxsize);
578 }
579
580 /*
581 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
582 * by calling generic_file_llseek_size() with the appropriate maxbytes
583 * value for each.
584 */
585 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
586 {
587 struct inode *inode = file->f_mapping->host;
588 loff_t maxbytes;
589
590 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
591 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
592 else
593 maxbytes = inode->i_sb->s_maxbytes;
594
595 switch (whence) {
596 case SEEK_SET:
597 case SEEK_CUR:
598 case SEEK_END:
599 return generic_file_llseek_size(file, offset, whence,
600 maxbytes, i_size_read(inode));
601 case SEEK_DATA:
602 return ext4_seek_data(file, offset, maxbytes);
603 case SEEK_HOLE:
604 return ext4_seek_hole(file, offset, maxbytes);
605 }
606
607 return -EINVAL;
608 }
609
610 const struct file_operations ext4_file_operations = {
611 .llseek = ext4_llseek,
612 .read_iter = generic_file_read_iter,
613 .write_iter = ext4_file_write_iter,
614 .unlocked_ioctl = ext4_ioctl,
615 #ifdef CONFIG_COMPAT
616 .compat_ioctl = ext4_compat_ioctl,
617 #endif
618 .mmap = ext4_file_mmap,
619 .open = ext4_file_open,
620 .release = ext4_release_file,
621 .fsync = ext4_sync_file,
622 .splice_read = generic_file_splice_read,
623 .splice_write = iter_file_splice_write,
624 .fallocate = ext4_fallocate,
625 };
626
627 const struct inode_operations ext4_file_inode_operations = {
628 .setattr = ext4_setattr,
629 .getattr = ext4_getattr,
630 .setxattr = generic_setxattr,
631 .getxattr = generic_getxattr,
632 .listxattr = ext4_listxattr,
633 .removexattr = generic_removexattr,
634 .get_acl = ext4_get_acl,
635 .set_acl = ext4_set_acl,
636 .fiemap = ext4_fiemap,
637 };
638