]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ext4/file.c
Merge branches 'core-urgent-for-linus' and 'irq-urgent-for-linus' of git://git.kernel...
[mirror_ubuntu-artful-kernel.git] / fs / ext4 / file.c
1 /*
2 * linux/fs/ext4/file.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * ext4 fs regular file handling primitives
16 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/jbd2.h>
24 #include <linux/mount.h>
25 #include <linux/path.h>
26 #include <linux/aio.h>
27 #include <linux/quotaops.h>
28 #include <linux/pagevec.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 /*
35 * Called when an inode is released. Note that this is different
36 * from ext4_file_open: open gets called at every open, but release
37 * gets called only when /all/ the files are closed.
38 */
39 static int ext4_release_file(struct inode *inode, struct file *filp)
40 {
41 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42 ext4_alloc_da_blocks(inode);
43 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44 }
45 /* if we are the last writer on the inode, drop the block reservation */
46 if ((filp->f_mode & FMODE_WRITE) &&
47 (atomic_read(&inode->i_writecount) == 1) &&
48 !EXT4_I(inode)->i_reserved_data_blocks)
49 {
50 down_write(&EXT4_I(inode)->i_data_sem);
51 ext4_discard_preallocations(inode);
52 up_write(&EXT4_I(inode)->i_data_sem);
53 }
54 if (is_dx(inode) && filp->private_data)
55 ext4_htree_free_dir_info(filp->private_data);
56
57 return 0;
58 }
59
60 static void ext4_unwritten_wait(struct inode *inode)
61 {
62 wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
64 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65 }
66
67 /*
68 * This tests whether the IO in question is block-aligned or not.
69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70 * are converted to written only after the IO is complete. Until they are
71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72 * it needs to zero out portions of the start and/or end block. If 2 AIO
73 * threads are at work on the same unwritten block, they must be synchronized
74 * or one thread will zero the other's data, causing corruption.
75 */
76 static int
77 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
78 {
79 struct super_block *sb = inode->i_sb;
80 int blockmask = sb->s_blocksize - 1;
81
82 if (pos >= i_size_read(inode))
83 return 0;
84
85 if ((pos | iov_iter_alignment(from)) & blockmask)
86 return 1;
87
88 return 0;
89 }
90
91 static ssize_t
92 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
93 {
94 struct file *file = iocb->ki_filp;
95 struct inode *inode = file_inode(iocb->ki_filp);
96 struct mutex *aio_mutex = NULL;
97 struct blk_plug plug;
98 int o_direct = io_is_direct(file);
99 int overwrite = 0;
100 size_t length = iov_iter_count(from);
101 ssize_t ret;
102 loff_t pos = iocb->ki_pos;
103
104 /*
105 * Unaligned direct AIO must be serialized; see comment above
106 * In the case of O_APPEND, assume that we must always serialize
107 */
108 if (o_direct &&
109 ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
110 !is_sync_kiocb(iocb) &&
111 (file->f_flags & O_APPEND ||
112 ext4_unaligned_aio(inode, from, pos))) {
113 aio_mutex = ext4_aio_mutex(inode);
114 mutex_lock(aio_mutex);
115 ext4_unwritten_wait(inode);
116 }
117
118 mutex_lock(&inode->i_mutex);
119 if (file->f_flags & O_APPEND)
120 iocb->ki_pos = pos = i_size_read(inode);
121
122 /*
123 * If we have encountered a bitmap-format file, the size limit
124 * is smaller than s_maxbytes, which is for extent-mapped files.
125 */
126 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
127 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
128
129 if ((pos > sbi->s_bitmap_maxbytes) ||
130 (pos == sbi->s_bitmap_maxbytes && length > 0)) {
131 mutex_unlock(&inode->i_mutex);
132 ret = -EFBIG;
133 goto errout;
134 }
135
136 if (pos + length > sbi->s_bitmap_maxbytes)
137 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
138 }
139
140 iocb->private = &overwrite;
141 if (o_direct) {
142 blk_start_plug(&plug);
143
144
145 /* check whether we do a DIO overwrite or not */
146 if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
147 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
148 struct ext4_map_blocks map;
149 unsigned int blkbits = inode->i_blkbits;
150 int err, len;
151
152 map.m_lblk = pos >> blkbits;
153 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
154 - map.m_lblk;
155 len = map.m_len;
156
157 err = ext4_map_blocks(NULL, inode, &map, 0);
158 /*
159 * 'err==len' means that all of blocks has
160 * been preallocated no matter they are
161 * initialized or not. For excluding
162 * unwritten extents, we need to check
163 * m_flags. There are two conditions that
164 * indicate for initialized extents. 1) If we
165 * hit extent cache, EXT4_MAP_MAPPED flag is
166 * returned; 2) If we do a real lookup,
167 * non-flags are returned. So we should check
168 * these two conditions.
169 */
170 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
171 overwrite = 1;
172 }
173 }
174
175 ret = __generic_file_write_iter(iocb, from);
176 mutex_unlock(&inode->i_mutex);
177
178 if (ret > 0) {
179 ssize_t err;
180
181 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
182 if (err < 0)
183 ret = err;
184 }
185 if (o_direct)
186 blk_finish_plug(&plug);
187
188 errout:
189 if (aio_mutex)
190 mutex_unlock(aio_mutex);
191 return ret;
192 }
193
194 #ifdef CONFIG_FS_DAX
195 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
196 {
197 return dax_fault(vma, vmf, ext4_get_block);
198 /* Is this the right get_block? */
199 }
200
201 static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
202 {
203 return dax_mkwrite(vma, vmf, ext4_get_block);
204 }
205
206 static const struct vm_operations_struct ext4_dax_vm_ops = {
207 .fault = ext4_dax_fault,
208 .page_mkwrite = ext4_dax_mkwrite,
209 };
210 #else
211 #define ext4_dax_vm_ops ext4_file_vm_ops
212 #endif
213
214 static const struct vm_operations_struct ext4_file_vm_ops = {
215 .fault = filemap_fault,
216 .map_pages = filemap_map_pages,
217 .page_mkwrite = ext4_page_mkwrite,
218 };
219
220 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
221 {
222 file_accessed(file);
223 if (IS_DAX(file_inode(file))) {
224 vma->vm_ops = &ext4_dax_vm_ops;
225 vma->vm_flags |= VM_MIXEDMAP;
226 } else {
227 vma->vm_ops = &ext4_file_vm_ops;
228 }
229 return 0;
230 }
231
232 static int ext4_file_open(struct inode * inode, struct file * filp)
233 {
234 struct super_block *sb = inode->i_sb;
235 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
236 struct vfsmount *mnt = filp->f_path.mnt;
237 struct path path;
238 char buf[64], *cp;
239
240 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
241 !(sb->s_flags & MS_RDONLY))) {
242 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
243 /*
244 * Sample where the filesystem has been mounted and
245 * store it in the superblock for sysadmin convenience
246 * when trying to sort through large numbers of block
247 * devices or filesystem images.
248 */
249 memset(buf, 0, sizeof(buf));
250 path.mnt = mnt;
251 path.dentry = mnt->mnt_root;
252 cp = d_path(&path, buf, sizeof(buf));
253 if (!IS_ERR(cp)) {
254 handle_t *handle;
255 int err;
256
257 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
258 if (IS_ERR(handle))
259 return PTR_ERR(handle);
260 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
261 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
262 if (err) {
263 ext4_journal_stop(handle);
264 return err;
265 }
266 strlcpy(sbi->s_es->s_last_mounted, cp,
267 sizeof(sbi->s_es->s_last_mounted));
268 ext4_handle_dirty_super(handle, sb);
269 ext4_journal_stop(handle);
270 }
271 }
272 /*
273 * Set up the jbd2_inode if we are opening the inode for
274 * writing and the journal is present
275 */
276 if (filp->f_mode & FMODE_WRITE) {
277 int ret = ext4_inode_attach_jinode(inode);
278 if (ret < 0)
279 return ret;
280 }
281 return dquot_file_open(inode, filp);
282 }
283
284 /*
285 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
286 * file rather than ext4_ext_walk_space() because we can introduce
287 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
288 * function. When extent status tree has been fully implemented, it will
289 * track all extent status for a file and we can directly use it to
290 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
291 */
292
293 /*
294 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
295 * lookup page cache to check whether or not there has some data between
296 * [startoff, endoff] because, if this range contains an unwritten extent,
297 * we determine this extent as a data or a hole according to whether the
298 * page cache has data or not.
299 */
300 static int ext4_find_unwritten_pgoff(struct inode *inode,
301 int whence,
302 struct ext4_map_blocks *map,
303 loff_t *offset)
304 {
305 struct pagevec pvec;
306 unsigned int blkbits;
307 pgoff_t index;
308 pgoff_t end;
309 loff_t endoff;
310 loff_t startoff;
311 loff_t lastoff;
312 int found = 0;
313
314 blkbits = inode->i_sb->s_blocksize_bits;
315 startoff = *offset;
316 lastoff = startoff;
317 endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
318
319 index = startoff >> PAGE_CACHE_SHIFT;
320 end = endoff >> PAGE_CACHE_SHIFT;
321
322 pagevec_init(&pvec, 0);
323 do {
324 int i, num;
325 unsigned long nr_pages;
326
327 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
328 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
329 (pgoff_t)num);
330 if (nr_pages == 0) {
331 if (whence == SEEK_DATA)
332 break;
333
334 BUG_ON(whence != SEEK_HOLE);
335 /*
336 * If this is the first time to go into the loop and
337 * offset is not beyond the end offset, it will be a
338 * hole at this offset
339 */
340 if (lastoff == startoff || lastoff < endoff)
341 found = 1;
342 break;
343 }
344
345 /*
346 * If this is the first time to go into the loop and
347 * offset is smaller than the first page offset, it will be a
348 * hole at this offset.
349 */
350 if (lastoff == startoff && whence == SEEK_HOLE &&
351 lastoff < page_offset(pvec.pages[0])) {
352 found = 1;
353 break;
354 }
355
356 for (i = 0; i < nr_pages; i++) {
357 struct page *page = pvec.pages[i];
358 struct buffer_head *bh, *head;
359
360 /*
361 * If the current offset is not beyond the end of given
362 * range, it will be a hole.
363 */
364 if (lastoff < endoff && whence == SEEK_HOLE &&
365 page->index > end) {
366 found = 1;
367 *offset = lastoff;
368 goto out;
369 }
370
371 lock_page(page);
372
373 if (unlikely(page->mapping != inode->i_mapping)) {
374 unlock_page(page);
375 continue;
376 }
377
378 if (!page_has_buffers(page)) {
379 unlock_page(page);
380 continue;
381 }
382
383 if (page_has_buffers(page)) {
384 lastoff = page_offset(page);
385 bh = head = page_buffers(page);
386 do {
387 if (buffer_uptodate(bh) ||
388 buffer_unwritten(bh)) {
389 if (whence == SEEK_DATA)
390 found = 1;
391 } else {
392 if (whence == SEEK_HOLE)
393 found = 1;
394 }
395 if (found) {
396 *offset = max_t(loff_t,
397 startoff, lastoff);
398 unlock_page(page);
399 goto out;
400 }
401 lastoff += bh->b_size;
402 bh = bh->b_this_page;
403 } while (bh != head);
404 }
405
406 lastoff = page_offset(page) + PAGE_SIZE;
407 unlock_page(page);
408 }
409
410 /*
411 * The no. of pages is less than our desired, that would be a
412 * hole in there.
413 */
414 if (nr_pages < num && whence == SEEK_HOLE) {
415 found = 1;
416 *offset = lastoff;
417 break;
418 }
419
420 index = pvec.pages[i - 1]->index + 1;
421 pagevec_release(&pvec);
422 } while (index <= end);
423
424 out:
425 pagevec_release(&pvec);
426 return found;
427 }
428
429 /*
430 * ext4_seek_data() retrieves the offset for SEEK_DATA.
431 */
432 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
433 {
434 struct inode *inode = file->f_mapping->host;
435 struct ext4_map_blocks map;
436 struct extent_status es;
437 ext4_lblk_t start, last, end;
438 loff_t dataoff, isize;
439 int blkbits;
440 int ret = 0;
441
442 mutex_lock(&inode->i_mutex);
443
444 isize = i_size_read(inode);
445 if (offset >= isize) {
446 mutex_unlock(&inode->i_mutex);
447 return -ENXIO;
448 }
449
450 blkbits = inode->i_sb->s_blocksize_bits;
451 start = offset >> blkbits;
452 last = start;
453 end = isize >> blkbits;
454 dataoff = offset;
455
456 do {
457 map.m_lblk = last;
458 map.m_len = end - last + 1;
459 ret = ext4_map_blocks(NULL, inode, &map, 0);
460 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
461 if (last != start)
462 dataoff = (loff_t)last << blkbits;
463 break;
464 }
465
466 /*
467 * If there is a delay extent at this offset,
468 * it will be as a data.
469 */
470 ext4_es_find_delayed_extent_range(inode, last, last, &es);
471 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
472 if (last != start)
473 dataoff = (loff_t)last << blkbits;
474 break;
475 }
476
477 /*
478 * If there is a unwritten extent at this offset,
479 * it will be as a data or a hole according to page
480 * cache that has data or not.
481 */
482 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
483 int unwritten;
484 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
485 &map, &dataoff);
486 if (unwritten)
487 break;
488 }
489
490 last++;
491 dataoff = (loff_t)last << blkbits;
492 } while (last <= end);
493
494 mutex_unlock(&inode->i_mutex);
495
496 if (dataoff > isize)
497 return -ENXIO;
498
499 return vfs_setpos(file, dataoff, maxsize);
500 }
501
502 /*
503 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
504 */
505 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
506 {
507 struct inode *inode = file->f_mapping->host;
508 struct ext4_map_blocks map;
509 struct extent_status es;
510 ext4_lblk_t start, last, end;
511 loff_t holeoff, isize;
512 int blkbits;
513 int ret = 0;
514
515 mutex_lock(&inode->i_mutex);
516
517 isize = i_size_read(inode);
518 if (offset >= isize) {
519 mutex_unlock(&inode->i_mutex);
520 return -ENXIO;
521 }
522
523 blkbits = inode->i_sb->s_blocksize_bits;
524 start = offset >> blkbits;
525 last = start;
526 end = isize >> blkbits;
527 holeoff = offset;
528
529 do {
530 map.m_lblk = last;
531 map.m_len = end - last + 1;
532 ret = ext4_map_blocks(NULL, inode, &map, 0);
533 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
534 last += ret;
535 holeoff = (loff_t)last << blkbits;
536 continue;
537 }
538
539 /*
540 * If there is a delay extent at this offset,
541 * we will skip this extent.
542 */
543 ext4_es_find_delayed_extent_range(inode, last, last, &es);
544 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
545 last = es.es_lblk + es.es_len;
546 holeoff = (loff_t)last << blkbits;
547 continue;
548 }
549
550 /*
551 * If there is a unwritten extent at this offset,
552 * it will be as a data or a hole according to page
553 * cache that has data or not.
554 */
555 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
556 int unwritten;
557 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
558 &map, &holeoff);
559 if (!unwritten) {
560 last += ret;
561 holeoff = (loff_t)last << blkbits;
562 continue;
563 }
564 }
565
566 /* find a hole */
567 break;
568 } while (last <= end);
569
570 mutex_unlock(&inode->i_mutex);
571
572 if (holeoff > isize)
573 holeoff = isize;
574
575 return vfs_setpos(file, holeoff, maxsize);
576 }
577
578 /*
579 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
580 * by calling generic_file_llseek_size() with the appropriate maxbytes
581 * value for each.
582 */
583 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
584 {
585 struct inode *inode = file->f_mapping->host;
586 loff_t maxbytes;
587
588 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
589 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
590 else
591 maxbytes = inode->i_sb->s_maxbytes;
592
593 switch (whence) {
594 case SEEK_SET:
595 case SEEK_CUR:
596 case SEEK_END:
597 return generic_file_llseek_size(file, offset, whence,
598 maxbytes, i_size_read(inode));
599 case SEEK_DATA:
600 return ext4_seek_data(file, offset, maxbytes);
601 case SEEK_HOLE:
602 return ext4_seek_hole(file, offset, maxbytes);
603 }
604
605 return -EINVAL;
606 }
607
608 const struct file_operations ext4_file_operations = {
609 .llseek = ext4_llseek,
610 .read = new_sync_read,
611 .write = new_sync_write,
612 .read_iter = generic_file_read_iter,
613 .write_iter = ext4_file_write_iter,
614 .unlocked_ioctl = ext4_ioctl,
615 #ifdef CONFIG_COMPAT
616 .compat_ioctl = ext4_compat_ioctl,
617 #endif
618 .mmap = ext4_file_mmap,
619 .open = ext4_file_open,
620 .release = ext4_release_file,
621 .fsync = ext4_sync_file,
622 .splice_read = generic_file_splice_read,
623 .splice_write = iter_file_splice_write,
624 .fallocate = ext4_fallocate,
625 };
626
627 #ifdef CONFIG_FS_DAX
628 const struct file_operations ext4_dax_file_operations = {
629 .llseek = ext4_llseek,
630 .read = new_sync_read,
631 .write = new_sync_write,
632 .read_iter = generic_file_read_iter,
633 .write_iter = ext4_file_write_iter,
634 .unlocked_ioctl = ext4_ioctl,
635 #ifdef CONFIG_COMPAT
636 .compat_ioctl = ext4_compat_ioctl,
637 #endif
638 .mmap = ext4_file_mmap,
639 .open = ext4_file_open,
640 .release = ext4_release_file,
641 .fsync = ext4_sync_file,
642 /* Splice not yet supported with DAX */
643 .fallocate = ext4_fallocate,
644 };
645 #endif
646
647 const struct inode_operations ext4_file_inode_operations = {
648 .setattr = ext4_setattr,
649 .getattr = ext4_getattr,
650 .setxattr = generic_setxattr,
651 .getxattr = generic_getxattr,
652 .listxattr = ext4_listxattr,
653 .removexattr = generic_removexattr,
654 .get_acl = ext4_get_acl,
655 .set_acl = ext4_set_acl,
656 .fiemap = ext4_fiemap,
657 };
658