2 * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
4 * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.
6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program/include file is distributed in the hope that it will be
12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program (in the main directory of the Linux-NTFS
18 * distribution in the file COPYING); if not, write to the Free Software
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/buffer_head.h>
23 #include <linux/gfp.h>
24 #include <linux/pagemap.h>
25 #include <linux/pagevec.h>
26 #include <linux/sched.h>
27 #include <linux/swap.h>
28 #include <linux/uio.h>
29 #include <linux/writeback.h>
30 #include <linux/aio.h>
33 #include <asm/uaccess.h>
45 * ntfs_file_open - called when an inode is about to be opened
46 * @vi: inode to be opened
47 * @filp: file structure describing the inode
49 * Limit file size to the page cache limit on architectures where unsigned long
50 * is 32-bits. This is the most we can do for now without overflowing the page
51 * cache page index. Doing it this way means we don't run into problems because
52 * of existing too large files. It would be better to allow the user to read
53 * the beginning of the file but I doubt very much anyone is going to hit this
54 * check on a 32-bit architecture, so there is no point in adding the extra
55 * complexity required to support this.
57 * On 64-bit architectures, the check is hopefully optimized away by the
60 * After the check passes, just call generic_file_open() to do its work.
62 static int ntfs_file_open(struct inode
*vi
, struct file
*filp
)
64 if (sizeof(unsigned long) < 8) {
65 if (i_size_read(vi
) > MAX_LFS_FILESIZE
)
68 return generic_file_open(vi
, filp
);
74 * ntfs_attr_extend_initialized - extend the initialized size of an attribute
75 * @ni: ntfs inode of the attribute to extend
76 * @new_init_size: requested new initialized size in bytes
78 * Extend the initialized size of an attribute described by the ntfs inode @ni
79 * to @new_init_size bytes. This involves zeroing any non-sparse space between
80 * the old initialized size and @new_init_size both in the page cache and on
81 * disk (if relevant complete pages are already uptodate in the page cache then
82 * these are simply marked dirty).
84 * As a side-effect, the file size (vfs inode->i_size) may be incremented as,
85 * in the resident attribute case, it is tied to the initialized size and, in
86 * the non-resident attribute case, it may not fall below the initialized size.
88 * Note that if the attribute is resident, we do not need to touch the page
89 * cache at all. This is because if the page cache page is not uptodate we
90 * bring it uptodate later, when doing the write to the mft record since we
91 * then already have the page mapped. And if the page is uptodate, the
92 * non-initialized region will already have been zeroed when the page was
93 * brought uptodate and the region may in fact already have been overwritten
94 * with new data via mmap() based writes, so we cannot just zero it. And since
95 * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped
96 * is unspecified, we choose not to do zeroing and thus we do not need to touch
97 * the page at all. For a more detailed explanation see ntfs_truncate() in
100 * Return 0 on success and -errno on error. In the case that an error is
101 * encountered it is possible that the initialized size will already have been
102 * incremented some way towards @new_init_size but it is guaranteed that if
103 * this is the case, the necessary zeroing will also have happened and that all
104 * metadata is self-consistent.
106 * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
107 * held by the caller.
109 static int ntfs_attr_extend_initialized(ntfs_inode
*ni
, const s64 new_init_size
)
113 pgoff_t index
, end_index
;
115 struct inode
*vi
= VFS_I(ni
);
117 MFT_RECORD
*m
= NULL
;
119 ntfs_attr_search_ctx
*ctx
= NULL
;
120 struct address_space
*mapping
;
121 struct page
*page
= NULL
;
126 read_lock_irqsave(&ni
->size_lock
, flags
);
127 old_init_size
= ni
->initialized_size
;
128 old_i_size
= i_size_read(vi
);
129 BUG_ON(new_init_size
> ni
->allocated_size
);
130 read_unlock_irqrestore(&ni
->size_lock
, flags
);
131 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
132 "old_initialized_size 0x%llx, "
133 "new_initialized_size 0x%llx, i_size 0x%llx.",
134 vi
->i_ino
, (unsigned)le32_to_cpu(ni
->type
),
135 (unsigned long long)old_init_size
,
136 (unsigned long long)new_init_size
, old_i_size
);
140 base_ni
= ni
->ext
.base_ntfs_ino
;
141 /* Use goto to reduce indentation and we need the label below anyway. */
142 if (NInoNonResident(ni
))
143 goto do_non_resident_extend
;
144 BUG_ON(old_init_size
!= old_i_size
);
145 m
= map_mft_record(base_ni
);
151 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
152 if (unlikely(!ctx
)) {
156 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
157 CASE_SENSITIVE
, 0, NULL
, 0, ctx
);
165 BUG_ON(a
->non_resident
);
166 /* The total length of the attribute value. */
167 attr_len
= le32_to_cpu(a
->data
.resident
.value_length
);
168 BUG_ON(old_i_size
!= (loff_t
)attr_len
);
170 * Do the zeroing in the mft record and update the attribute size in
173 kattr
= (u8
*)a
+ le16_to_cpu(a
->data
.resident
.value_offset
);
174 memset(kattr
+ attr_len
, 0, new_init_size
- attr_len
);
175 a
->data
.resident
.value_length
= cpu_to_le32((u32
)new_init_size
);
176 /* Finally, update the sizes in the vfs and ntfs inodes. */
177 write_lock_irqsave(&ni
->size_lock
, flags
);
178 i_size_write(vi
, new_init_size
);
179 ni
->initialized_size
= new_init_size
;
180 write_unlock_irqrestore(&ni
->size_lock
, flags
);
182 do_non_resident_extend
:
184 * If the new initialized size @new_init_size exceeds the current file
185 * size (vfs inode->i_size), we need to extend the file size to the
186 * new initialized size.
188 if (new_init_size
> old_i_size
) {
189 m
= map_mft_record(base_ni
);
195 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
196 if (unlikely(!ctx
)) {
200 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
201 CASE_SENSITIVE
, 0, NULL
, 0, ctx
);
209 BUG_ON(!a
->non_resident
);
210 BUG_ON(old_i_size
!= (loff_t
)
211 sle64_to_cpu(a
->data
.non_resident
.data_size
));
212 a
->data
.non_resident
.data_size
= cpu_to_sle64(new_init_size
);
213 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
214 mark_mft_record_dirty(ctx
->ntfs_ino
);
215 /* Update the file size in the vfs inode. */
216 i_size_write(vi
, new_init_size
);
217 ntfs_attr_put_search_ctx(ctx
);
219 unmap_mft_record(base_ni
);
222 mapping
= vi
->i_mapping
;
223 index
= old_init_size
>> PAGE_CACHE_SHIFT
;
224 end_index
= (new_init_size
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
227 * Read the page. If the page is not present, this will zero
228 * the uninitialized regions for us.
230 page
= read_mapping_page(mapping
, index
, NULL
);
235 if (unlikely(PageError(page
))) {
236 page_cache_release(page
);
241 * Update the initialized size in the ntfs inode. This is
242 * enough to make ntfs_writepage() work.
244 write_lock_irqsave(&ni
->size_lock
, flags
);
245 ni
->initialized_size
= (s64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
246 if (ni
->initialized_size
> new_init_size
)
247 ni
->initialized_size
= new_init_size
;
248 write_unlock_irqrestore(&ni
->size_lock
, flags
);
249 /* Set the page dirty so it gets written out. */
250 set_page_dirty(page
);
251 page_cache_release(page
);
253 * Play nice with the vm and the rest of the system. This is
254 * very much needed as we can potentially be modifying the
255 * initialised size from a very small value to a really huge
257 * f = open(somefile, O_TRUNC);
258 * truncate(f, 10GiB);
261 * And this would mean we would be marking dirty hundreds of
262 * thousands of pages or as in the above example more than
263 * two and a half million pages!
265 * TODO: For sparse pages could optimize this workload by using
266 * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This
267 * would be set in readpage for sparse pages and here we would
268 * not need to mark dirty any pages which have this bit set.
269 * The only caveat is that we have to clear the bit everywhere
270 * where we allocate any clusters that lie in the page or that
273 * TODO: An even greater optimization would be for us to only
274 * call readpage() on pages which are not in sparse regions as
275 * determined from the runlist. This would greatly reduce the
276 * number of pages we read and make dirty in the case of sparse
279 balance_dirty_pages_ratelimited(mapping
);
281 } while (++index
< end_index
);
282 read_lock_irqsave(&ni
->size_lock
, flags
);
283 BUG_ON(ni
->initialized_size
!= new_init_size
);
284 read_unlock_irqrestore(&ni
->size_lock
, flags
);
285 /* Now bring in sync the initialized_size in the mft record. */
286 m
= map_mft_record(base_ni
);
292 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
293 if (unlikely(!ctx
)) {
297 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
298 CASE_SENSITIVE
, 0, NULL
, 0, ctx
);
306 BUG_ON(!a
->non_resident
);
307 a
->data
.non_resident
.initialized_size
= cpu_to_sle64(new_init_size
);
309 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
310 mark_mft_record_dirty(ctx
->ntfs_ino
);
312 ntfs_attr_put_search_ctx(ctx
);
314 unmap_mft_record(base_ni
);
315 ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
316 (unsigned long long)new_init_size
, i_size_read(vi
));
319 write_lock_irqsave(&ni
->size_lock
, flags
);
320 ni
->initialized_size
= old_init_size
;
321 write_unlock_irqrestore(&ni
->size_lock
, flags
);
324 ntfs_attr_put_search_ctx(ctx
);
326 unmap_mft_record(base_ni
);
327 ntfs_debug("Failed. Returning error code %i.", err
);
332 * ntfs_fault_in_pages_readable -
334 * Fault a number of userspace pages into pagetables.
336 * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes
337 * with more than two userspace pages as well as handling the single page case
340 * If you find this difficult to understand, then think of the while loop being
341 * the following code, except that we do without the integer variable ret:
344 * ret = __get_user(c, uaddr);
345 * uaddr += PAGE_SIZE;
346 * } while (!ret && uaddr < end);
348 * Note, the final __get_user() may well run out-of-bounds of the user buffer,
349 * but _not_ out-of-bounds of the page the user buffer belongs to, and since
350 * this is only a read and not a write, and since it is still in the same page,
351 * it should not matter and this makes the code much simpler.
353 static inline void ntfs_fault_in_pages_readable(const char __user
*uaddr
,
356 const char __user
*end
;
359 /* Set @end to the first byte outside the last page we care about. */
360 end
= (const char __user
*)PAGE_ALIGN((unsigned long)uaddr
+ bytes
);
362 while (!__get_user(c
, uaddr
) && (uaddr
+= PAGE_SIZE
, uaddr
< end
))
367 * ntfs_fault_in_pages_readable_iovec -
369 * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs.
371 static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec
*iov
,
372 size_t iov_ofs
, int bytes
)
375 const char __user
*buf
;
378 buf
= iov
->iov_base
+ iov_ofs
;
379 len
= iov
->iov_len
- iov_ofs
;
382 ntfs_fault_in_pages_readable(buf
, len
);
390 * __ntfs_grab_cache_pages - obtain a number of locked pages
391 * @mapping: address space mapping from which to obtain page cache pages
392 * @index: starting index in @mapping at which to begin obtaining pages
393 * @nr_pages: number of page cache pages to obtain
394 * @pages: array of pages in which to return the obtained page cache pages
395 * @cached_page: allocated but as yet unused page
397 * Obtain @nr_pages locked page cache pages from the mapping @mapping and
398 * starting at index @index.
400 * If a page is newly created, add it to lru list
402 * Note, the page locks are obtained in ascending page index order.
404 static inline int __ntfs_grab_cache_pages(struct address_space
*mapping
,
405 pgoff_t index
, const unsigned nr_pages
, struct page
**pages
,
406 struct page
**cached_page
)
413 pages
[nr
] = find_get_page_flags(mapping
, index
, FGP_LOCK
|
417 *cached_page
= page_cache_alloc(mapping
);
418 if (unlikely(!*cached_page
)) {
423 err
= add_to_page_cache_lru(*cached_page
, mapping
, index
,
430 pages
[nr
] = *cached_page
;
435 } while (nr
< nr_pages
);
440 unlock_page(pages
[--nr
]);
441 page_cache_release(pages
[nr
]);
446 static inline int ntfs_submit_bh_for_read(struct buffer_head
*bh
)
450 bh
->b_end_io
= end_buffer_read_sync
;
451 return submit_bh(READ
, bh
);
455 * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
456 * @pages: array of destination pages
457 * @nr_pages: number of pages in @pages
458 * @pos: byte position in file at which the write begins
459 * @bytes: number of bytes to be written
461 * This is called for non-resident attributes from ntfs_file_buffered_write()
462 * with i_mutex held on the inode (@pages[0]->mapping->host). There are
463 * @nr_pages pages in @pages which are locked but not kmap()ped. The source
464 * data has not yet been copied into the @pages.
466 * Need to fill any holes with actual clusters, allocate buffers if necessary,
467 * ensure all the buffers are mapped, and bring uptodate any buffers that are
468 * only partially being written to.
470 * If @nr_pages is greater than one, we are guaranteed that the cluster size is
471 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
472 * the same cluster and that they are the entirety of that cluster, and that
473 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
475 * i_size is not to be modified yet.
477 * Return 0 on success or -errno on error.
479 static int ntfs_prepare_pages_for_non_resident_write(struct page
**pages
,
480 unsigned nr_pages
, s64 pos
, size_t bytes
)
482 VCN vcn
, highest_vcn
= 0, cpos
, cend
, bh_cpos
, bh_cend
;
484 s64 bh_pos
, vcn_len
, end
, initialized_size
;
488 ntfs_inode
*ni
, *base_ni
= NULL
;
490 runlist_element
*rl
, *rl2
;
491 struct buffer_head
*bh
, *head
, *wait
[2], **wait_bh
= wait
;
492 ntfs_attr_search_ctx
*ctx
= NULL
;
493 MFT_RECORD
*m
= NULL
;
494 ATTR_RECORD
*a
= NULL
;
496 u32 attr_rec_len
= 0;
497 unsigned blocksize
, u
;
499 bool rl_write_locked
, was_hole
, is_retry
;
500 unsigned char blocksize_bits
;
503 u8 mft_attr_mapped
:1;
506 } status
= { 0, 0, 0, 0 };
511 vi
= pages
[0]->mapping
->host
;
514 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
515 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
516 vi
->i_ino
, ni
->type
, pages
[0]->index
, nr_pages
,
517 (long long)pos
, bytes
);
518 blocksize
= vol
->sb
->s_blocksize
;
519 blocksize_bits
= vol
->sb
->s_blocksize_bits
;
525 * create_empty_buffers() will create uptodate/dirty buffers if
526 * the page is uptodate/dirty.
528 if (!page_has_buffers(page
)) {
529 create_empty_buffers(page
, blocksize
, 0);
530 if (unlikely(!page_has_buffers(page
)))
533 } while (++u
< nr_pages
);
534 rl_write_locked
= false;
541 cpos
= pos
>> vol
->cluster_size_bits
;
543 cend
= (end
+ vol
->cluster_size
- 1) >> vol
->cluster_size_bits
;
545 * Loop over each page and for each page over each buffer. Use goto to
546 * reduce indentation.
551 bh_pos
= (s64
)page
->index
<< PAGE_CACHE_SHIFT
;
552 bh
= head
= page_buffers(page
);
558 /* Clear buffer_new on all buffers to reinitialise state. */
560 clear_buffer_new(bh
);
561 bh_end
= bh_pos
+ blocksize
;
562 bh_cpos
= bh_pos
>> vol
->cluster_size_bits
;
563 bh_cofs
= bh_pos
& vol
->cluster_size_mask
;
564 if (buffer_mapped(bh
)) {
566 * The buffer is already mapped. If it is uptodate,
569 if (buffer_uptodate(bh
))
572 * The buffer is not uptodate. If the page is uptodate
573 * set the buffer uptodate and otherwise ignore it.
575 if (PageUptodate(page
)) {
576 set_buffer_uptodate(bh
);
580 * Neither the page nor the buffer are uptodate. If
581 * the buffer is only partially being written to, we
582 * need to read it in before the write, i.e. now.
584 if ((bh_pos
< pos
&& bh_end
> pos
) ||
585 (bh_pos
< end
&& bh_end
> end
)) {
587 * If the buffer is fully or partially within
588 * the initialized size, do an actual read.
589 * Otherwise, simply zero the buffer.
591 read_lock_irqsave(&ni
->size_lock
, flags
);
592 initialized_size
= ni
->initialized_size
;
593 read_unlock_irqrestore(&ni
->size_lock
, flags
);
594 if (bh_pos
< initialized_size
) {
595 ntfs_submit_bh_for_read(bh
);
598 zero_user(page
, bh_offset(bh
),
600 set_buffer_uptodate(bh
);
605 /* Unmapped buffer. Need to map it. */
606 bh
->b_bdev
= vol
->sb
->s_bdev
;
608 * If the current buffer is in the same clusters as the map
609 * cache, there is no need to check the runlist again. The
610 * map cache is made up of @vcn, which is the first cached file
611 * cluster, @vcn_len which is the number of cached file
612 * clusters, @lcn is the device cluster corresponding to @vcn,
613 * and @lcn_block is the block number corresponding to @lcn.
615 cdelta
= bh_cpos
- vcn
;
616 if (likely(!cdelta
|| (cdelta
> 0 && cdelta
< vcn_len
))) {
619 bh
->b_blocknr
= lcn_block
+
620 (cdelta
<< (vol
->cluster_size_bits
-
622 (bh_cofs
>> blocksize_bits
);
623 set_buffer_mapped(bh
);
625 * If the page is uptodate so is the buffer. If the
626 * buffer is fully outside the write, we ignore it if
627 * it was already allocated and we mark it dirty so it
628 * gets written out if we allocated it. On the other
629 * hand, if we allocated the buffer but we are not
630 * marking it dirty we set buffer_new so we can do
633 if (PageUptodate(page
)) {
634 if (!buffer_uptodate(bh
))
635 set_buffer_uptodate(bh
);
636 if (unlikely(was_hole
)) {
637 /* We allocated the buffer. */
638 unmap_underlying_metadata(bh
->b_bdev
,
640 if (bh_end
<= pos
|| bh_pos
>= end
)
641 mark_buffer_dirty(bh
);
647 /* Page is _not_ uptodate. */
648 if (likely(!was_hole
)) {
650 * Buffer was already allocated. If it is not
651 * uptodate and is only partially being written
652 * to, we need to read it in before the write,
655 if (!buffer_uptodate(bh
) && bh_pos
< end
&&
660 * If the buffer is fully or partially
661 * within the initialized size, do an
662 * actual read. Otherwise, simply zero
665 read_lock_irqsave(&ni
->size_lock
,
667 initialized_size
= ni
->initialized_size
;
668 read_unlock_irqrestore(&ni
->size_lock
,
670 if (bh_pos
< initialized_size
) {
671 ntfs_submit_bh_for_read(bh
);
674 zero_user(page
, bh_offset(bh
),
676 set_buffer_uptodate(bh
);
681 /* We allocated the buffer. */
682 unmap_underlying_metadata(bh
->b_bdev
, bh
->b_blocknr
);
684 * If the buffer is fully outside the write, zero it,
685 * set it uptodate, and mark it dirty so it gets
686 * written out. If it is partially being written to,
687 * zero region surrounding the write but leave it to
688 * commit write to do anything else. Finally, if the
689 * buffer is fully being overwritten, do nothing.
691 if (bh_end
<= pos
|| bh_pos
>= end
) {
692 if (!buffer_uptodate(bh
)) {
693 zero_user(page
, bh_offset(bh
),
695 set_buffer_uptodate(bh
);
697 mark_buffer_dirty(bh
);
701 if (!buffer_uptodate(bh
) &&
702 (bh_pos
< pos
|| bh_end
> end
)) {
706 kaddr
= kmap_atomic(page
);
708 pofs
= bh_pos
& ~PAGE_CACHE_MASK
;
709 memset(kaddr
+ pofs
, 0, pos
- bh_pos
);
712 pofs
= end
& ~PAGE_CACHE_MASK
;
713 memset(kaddr
+ pofs
, 0, bh_end
- end
);
715 kunmap_atomic(kaddr
);
716 flush_dcache_page(page
);
721 * Slow path: this is the first buffer in the cluster. If it
722 * is outside allocated size and is not uptodate, zero it and
725 read_lock_irqsave(&ni
->size_lock
, flags
);
726 initialized_size
= ni
->allocated_size
;
727 read_unlock_irqrestore(&ni
->size_lock
, flags
);
728 if (bh_pos
> initialized_size
) {
729 if (PageUptodate(page
)) {
730 if (!buffer_uptodate(bh
))
731 set_buffer_uptodate(bh
);
732 } else if (!buffer_uptodate(bh
)) {
733 zero_user(page
, bh_offset(bh
), blocksize
);
734 set_buffer_uptodate(bh
);
740 down_read(&ni
->runlist
.lock
);
744 if (likely(rl
!= NULL
)) {
745 /* Seek to element containing target cluster. */
746 while (rl
->length
&& rl
[1].vcn
<= bh_cpos
)
748 lcn
= ntfs_rl_vcn_to_lcn(rl
, bh_cpos
);
749 if (likely(lcn
>= 0)) {
751 * Successful remap, setup the map cache and
752 * use that to deal with the buffer.
756 vcn_len
= rl
[1].vcn
- vcn
;
757 lcn_block
= lcn
<< (vol
->cluster_size_bits
-
761 * If the number of remaining clusters touched
762 * by the write is smaller or equal to the
763 * number of cached clusters, unlock the
764 * runlist as the map cache will be used from
767 if (likely(vcn
+ vcn_len
>= cend
)) {
768 if (rl_write_locked
) {
769 up_write(&ni
->runlist
.lock
);
770 rl_write_locked
= false;
772 up_read(&ni
->runlist
.lock
);
775 goto map_buffer_cached
;
778 lcn
= LCN_RL_NOT_MAPPED
;
780 * If it is not a hole and not out of bounds, the runlist is
781 * probably unmapped so try to map it now.
783 if (unlikely(lcn
!= LCN_HOLE
&& lcn
!= LCN_ENOENT
)) {
784 if (likely(!is_retry
&& lcn
== LCN_RL_NOT_MAPPED
)) {
785 /* Attempt to map runlist. */
786 if (!rl_write_locked
) {
788 * We need the runlist locked for
789 * writing, so if it is locked for
790 * reading relock it now and retry in
791 * case it changed whilst we dropped
794 up_read(&ni
->runlist
.lock
);
795 down_write(&ni
->runlist
.lock
);
796 rl_write_locked
= true;
799 err
= ntfs_map_runlist_nolock(ni
, bh_cpos
,
806 * If @vcn is out of bounds, pretend @lcn is
807 * LCN_ENOENT. As long as the buffer is out
808 * of bounds this will work fine.
810 if (err
== -ENOENT
) {
813 goto rl_not_mapped_enoent
;
817 /* Failed to map the buffer, even after retrying. */
819 ntfs_error(vol
->sb
, "Failed to write to inode 0x%lx, "
820 "attribute type 0x%x, vcn 0x%llx, "
821 "vcn offset 0x%x, because its "
822 "location on disk could not be "
823 "determined%s (error code %i).",
824 ni
->mft_no
, ni
->type
,
825 (unsigned long long)bh_cpos
,
827 vol
->cluster_size_mask
,
828 is_retry
? " even after retrying" : "",
832 rl_not_mapped_enoent
:
834 * The buffer is in a hole or out of bounds. We need to fill
835 * the hole, unless the buffer is in a cluster which is not
836 * touched by the write, in which case we just leave the buffer
837 * unmapped. This can only happen when the cluster size is
838 * less than the page cache size.
840 if (unlikely(vol
->cluster_size
< PAGE_CACHE_SIZE
)) {
841 bh_cend
= (bh_end
+ vol
->cluster_size
- 1) >>
842 vol
->cluster_size_bits
;
843 if ((bh_cend
<= cpos
|| bh_cpos
>= cend
)) {
846 * If the buffer is uptodate we skip it. If it
847 * is not but the page is uptodate, we can set
848 * the buffer uptodate. If the page is not
849 * uptodate, we can clear the buffer and set it
850 * uptodate. Whether this is worthwhile is
851 * debatable and this could be removed.
853 if (PageUptodate(page
)) {
854 if (!buffer_uptodate(bh
))
855 set_buffer_uptodate(bh
);
856 } else if (!buffer_uptodate(bh
)) {
857 zero_user(page
, bh_offset(bh
),
859 set_buffer_uptodate(bh
);
865 * Out of bounds buffer is invalid if it was not really out of
868 BUG_ON(lcn
!= LCN_HOLE
);
870 * We need the runlist locked for writing, so if it is locked
871 * for reading relock it now and retry in case it changed
872 * whilst we dropped the lock.
875 if (!rl_write_locked
) {
876 up_read(&ni
->runlist
.lock
);
877 down_write(&ni
->runlist
.lock
);
878 rl_write_locked
= true;
881 /* Find the previous last allocated cluster. */
882 BUG_ON(rl
->lcn
!= LCN_HOLE
);
885 while (--rl2
>= ni
->runlist
.rl
) {
887 lcn
= rl2
->lcn
+ rl2
->length
;
891 rl2
= ntfs_cluster_alloc(vol
, bh_cpos
, 1, lcn
, DATA_ZONE
,
895 ntfs_debug("Failed to allocate cluster, error code %i.",
900 rl
= ntfs_runlists_merge(ni
->runlist
.rl
, rl2
);
905 if (ntfs_cluster_free_from_rl(vol
, rl2
)) {
906 ntfs_error(vol
->sb
, "Failed to release "
907 "allocated cluster in error "
908 "code path. Run chkdsk to "
909 "recover the lost cluster.");
916 status
.runlist_merged
= 1;
917 ntfs_debug("Allocated cluster, lcn 0x%llx.",
918 (unsigned long long)lcn
);
919 /* Map and lock the mft record and get the attribute record. */
923 base_ni
= ni
->ext
.base_ntfs_ino
;
924 m
= map_mft_record(base_ni
);
929 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
930 if (unlikely(!ctx
)) {
932 unmap_mft_record(base_ni
);
935 status
.mft_attr_mapped
= 1;
936 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
937 CASE_SENSITIVE
, bh_cpos
, NULL
, 0, ctx
);
946 * Find the runlist element with which the attribute extent
947 * starts. Note, we cannot use the _attr_ version because we
948 * have mapped the mft record. That is ok because we know the
949 * runlist fragment must be mapped already to have ever gotten
950 * here, so we can just use the _rl_ version.
952 vcn
= sle64_to_cpu(a
->data
.non_resident
.lowest_vcn
);
953 rl2
= ntfs_rl_find_vcn_nolock(rl
, vcn
);
955 BUG_ON(!rl2
->length
);
956 BUG_ON(rl2
->lcn
< LCN_HOLE
);
957 highest_vcn
= sle64_to_cpu(a
->data
.non_resident
.highest_vcn
);
959 * If @highest_vcn is zero, calculate the real highest_vcn
960 * (which can really be zero).
963 highest_vcn
= (sle64_to_cpu(
964 a
->data
.non_resident
.allocated_size
) >>
965 vol
->cluster_size_bits
) - 1;
967 * Determine the size of the mapping pairs array for the new
968 * extent, i.e. the old extent with the hole filled.
970 mp_size
= ntfs_get_size_for_mapping_pairs(vol
, rl2
, vcn
,
972 if (unlikely(mp_size
<= 0)) {
973 if (!(err
= mp_size
))
975 ntfs_debug("Failed to get size for mapping pairs "
976 "array, error code %i.", err
);
980 * Resize the attribute record to fit the new mapping pairs
983 attr_rec_len
= le32_to_cpu(a
->length
);
984 err
= ntfs_attr_record_resize(m
, a
, mp_size
+ le16_to_cpu(
985 a
->data
.non_resident
.mapping_pairs_offset
));
987 BUG_ON(err
!= -ENOSPC
);
988 // TODO: Deal with this by using the current attribute
989 // and fill it with as much of the mapping pairs
990 // array as possible. Then loop over each attribute
991 // extent rewriting the mapping pairs arrays as we go
992 // along and if when we reach the end we have not
993 // enough space, try to resize the last attribute
994 // extent and if even that fails, add a new attribute
996 // We could also try to resize at each step in the hope
997 // that we will not need to rewrite every single extent.
998 // Note, we may need to decompress some extents to fill
999 // the runlist as we are walking the extents...
1000 ntfs_error(vol
->sb
, "Not enough space in the mft "
1001 "record for the extended attribute "
1002 "record. This case is not "
1003 "implemented yet.");
1007 status
.mp_rebuilt
= 1;
1009 * Generate the mapping pairs array directly into the attribute
1012 err
= ntfs_mapping_pairs_build(vol
, (u8
*)a
+ le16_to_cpu(
1013 a
->data
.non_resident
.mapping_pairs_offset
),
1014 mp_size
, rl2
, vcn
, highest_vcn
, NULL
);
1015 if (unlikely(err
)) {
1016 ntfs_error(vol
->sb
, "Cannot fill hole in inode 0x%lx, "
1017 "attribute type 0x%x, because building "
1018 "the mapping pairs failed with error "
1019 "code %i.", vi
->i_ino
,
1020 (unsigned)le32_to_cpu(ni
->type
), err
);
1024 /* Update the highest_vcn but only if it was not set. */
1025 if (unlikely(!a
->data
.non_resident
.highest_vcn
))
1026 a
->data
.non_resident
.highest_vcn
=
1027 cpu_to_sle64(highest_vcn
);
1029 * If the attribute is sparse/compressed, update the compressed
1030 * size in the ntfs_inode structure and the attribute record.
1032 if (likely(NInoSparse(ni
) || NInoCompressed(ni
))) {
1034 * If we are not in the first attribute extent, switch
1035 * to it, but first ensure the changes will make it to
1038 if (a
->data
.non_resident
.lowest_vcn
) {
1039 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1040 mark_mft_record_dirty(ctx
->ntfs_ino
);
1041 ntfs_attr_reinit_search_ctx(ctx
);
1042 err
= ntfs_attr_lookup(ni
->type
, ni
->name
,
1043 ni
->name_len
, CASE_SENSITIVE
,
1045 if (unlikely(err
)) {
1046 status
.attr_switched
= 1;
1049 /* @m is not used any more so do not set it. */
1052 write_lock_irqsave(&ni
->size_lock
, flags
);
1053 ni
->itype
.compressed
.size
+= vol
->cluster_size
;
1054 a
->data
.non_resident
.compressed_size
=
1055 cpu_to_sle64(ni
->itype
.compressed
.size
);
1056 write_unlock_irqrestore(&ni
->size_lock
, flags
);
1058 /* Ensure the changes make it to disk. */
1059 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1060 mark_mft_record_dirty(ctx
->ntfs_ino
);
1061 ntfs_attr_put_search_ctx(ctx
);
1062 unmap_mft_record(base_ni
);
1063 /* Successfully filled the hole. */
1064 status
.runlist_merged
= 0;
1065 status
.mft_attr_mapped
= 0;
1066 status
.mp_rebuilt
= 0;
1067 /* Setup the map cache and use that to deal with the buffer. */
1071 lcn_block
= lcn
<< (vol
->cluster_size_bits
- blocksize_bits
);
1074 * If the number of remaining clusters in the @pages is smaller
1075 * or equal to the number of cached clusters, unlock the
1076 * runlist as the map cache will be used from now on.
1078 if (likely(vcn
+ vcn_len
>= cend
)) {
1079 up_write(&ni
->runlist
.lock
);
1080 rl_write_locked
= false;
1083 goto map_buffer_cached
;
1084 } while (bh_pos
+= blocksize
, (bh
= bh
->b_this_page
) != head
);
1085 /* If there are no errors, do the next page. */
1086 if (likely(!err
&& ++u
< nr_pages
))
1088 /* If there are no errors, release the runlist lock if we took it. */
1090 if (unlikely(rl_write_locked
)) {
1091 up_write(&ni
->runlist
.lock
);
1092 rl_write_locked
= false;
1093 } else if (unlikely(rl
))
1094 up_read(&ni
->runlist
.lock
);
1097 /* If we issued read requests, let them complete. */
1098 read_lock_irqsave(&ni
->size_lock
, flags
);
1099 initialized_size
= ni
->initialized_size
;
1100 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1101 while (wait_bh
> wait
) {
1104 if (likely(buffer_uptodate(bh
))) {
1106 bh_pos
= ((s64
)page
->index
<< PAGE_CACHE_SHIFT
) +
1109 * If the buffer overflows the initialized size, need
1110 * to zero the overflowing region.
1112 if (unlikely(bh_pos
+ blocksize
> initialized_size
)) {
1115 if (likely(bh_pos
< initialized_size
))
1116 ofs
= initialized_size
- bh_pos
;
1117 zero_user_segment(page
, bh_offset(bh
) + ofs
,
1120 } else /* if (unlikely(!buffer_uptodate(bh))) */
1124 /* Clear buffer_new on all buffers. */
1127 bh
= head
= page_buffers(pages
[u
]);
1130 clear_buffer_new(bh
);
1131 } while ((bh
= bh
->b_this_page
) != head
);
1132 } while (++u
< nr_pages
);
1133 ntfs_debug("Done.");
1136 if (status
.attr_switched
) {
1137 /* Get back to the attribute extent we modified. */
1138 ntfs_attr_reinit_search_ctx(ctx
);
1139 if (ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
1140 CASE_SENSITIVE
, bh_cpos
, NULL
, 0, ctx
)) {
1141 ntfs_error(vol
->sb
, "Failed to find required "
1142 "attribute extent of attribute in "
1143 "error code path. Run chkdsk to "
1145 write_lock_irqsave(&ni
->size_lock
, flags
);
1146 ni
->itype
.compressed
.size
+= vol
->cluster_size
;
1147 write_unlock_irqrestore(&ni
->size_lock
, flags
);
1148 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1149 mark_mft_record_dirty(ctx
->ntfs_ino
);
1151 * The only thing that is now wrong is the compressed
1152 * size of the base attribute extent which chkdsk
1153 * should be able to fix.
1159 status
.attr_switched
= 0;
1163 * If the runlist has been modified, need to restore it by punching a
1164 * hole into it and we then need to deallocate the on-disk cluster as
1165 * well. Note, we only modify the runlist if we are able to generate a
1166 * new mapping pairs array, i.e. only when the mapped attribute extent
1169 if (status
.runlist_merged
&& !status
.attr_switched
) {
1170 BUG_ON(!rl_write_locked
);
1171 /* Make the file cluster we allocated sparse in the runlist. */
1172 if (ntfs_rl_punch_nolock(vol
, &ni
->runlist
, bh_cpos
, 1)) {
1173 ntfs_error(vol
->sb
, "Failed to punch hole into "
1174 "attribute runlist in error code "
1175 "path. Run chkdsk to recover the "
1178 } else /* if (success) */ {
1179 status
.runlist_merged
= 0;
1181 * Deallocate the on-disk cluster we allocated but only
1182 * if we succeeded in punching its vcn out of the
1185 down_write(&vol
->lcnbmp_lock
);
1186 if (ntfs_bitmap_clear_bit(vol
->lcnbmp_ino
, lcn
)) {
1187 ntfs_error(vol
->sb
, "Failed to release "
1188 "allocated cluster in error "
1189 "code path. Run chkdsk to "
1190 "recover the lost cluster.");
1193 up_write(&vol
->lcnbmp_lock
);
1197 * Resize the attribute record to its old size and rebuild the mapping
1198 * pairs array. Note, we only can do this if the runlist has been
1199 * restored to its old state which also implies that the mapped
1200 * attribute extent is not switched.
1202 if (status
.mp_rebuilt
&& !status
.runlist_merged
) {
1203 if (ntfs_attr_record_resize(m
, a
, attr_rec_len
)) {
1204 ntfs_error(vol
->sb
, "Failed to restore attribute "
1205 "record in error code path. Run "
1206 "chkdsk to recover.");
1208 } else /* if (success) */ {
1209 if (ntfs_mapping_pairs_build(vol
, (u8
*)a
+
1210 le16_to_cpu(a
->data
.non_resident
.
1211 mapping_pairs_offset
), attr_rec_len
-
1212 le16_to_cpu(a
->data
.non_resident
.
1213 mapping_pairs_offset
), ni
->runlist
.rl
,
1214 vcn
, highest_vcn
, NULL
)) {
1215 ntfs_error(vol
->sb
, "Failed to restore "
1216 "mapping pairs array in error "
1217 "code path. Run chkdsk to "
1221 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1222 mark_mft_record_dirty(ctx
->ntfs_ino
);
1225 /* Release the mft record and the attribute. */
1226 if (status
.mft_attr_mapped
) {
1227 ntfs_attr_put_search_ctx(ctx
);
1228 unmap_mft_record(base_ni
);
1230 /* Release the runlist lock. */
1231 if (rl_write_locked
)
1232 up_write(&ni
->runlist
.lock
);
1234 up_read(&ni
->runlist
.lock
);
1236 * Zero out any newly allocated blocks to avoid exposing stale data.
1237 * If BH_New is set, we know that the block was newly allocated above
1238 * and that it has not been fully zeroed and marked dirty yet.
1242 end
= bh_cpos
<< vol
->cluster_size_bits
;
1245 bh
= head
= page_buffers(page
);
1247 if (u
== nr_pages
&&
1248 ((s64
)page
->index
<< PAGE_CACHE_SHIFT
) +
1249 bh_offset(bh
) >= end
)
1251 if (!buffer_new(bh
))
1253 clear_buffer_new(bh
);
1254 if (!buffer_uptodate(bh
)) {
1255 if (PageUptodate(page
))
1256 set_buffer_uptodate(bh
);
1258 zero_user(page
, bh_offset(bh
),
1260 set_buffer_uptodate(bh
);
1263 mark_buffer_dirty(bh
);
1264 } while ((bh
= bh
->b_this_page
) != head
);
1265 } while (++u
<= nr_pages
);
1266 ntfs_error(vol
->sb
, "Failed. Returning error code %i.", err
);
1271 * Copy as much as we can into the pages and return the number of bytes which
1272 * were successfully copied. If a fault is encountered then clear the pages
1273 * out to (ofs + bytes) and return the number of bytes which were copied.
1275 static inline size_t ntfs_copy_from_user(struct page
**pages
,
1276 unsigned nr_pages
, unsigned ofs
, const char __user
*buf
,
1279 struct page
**last_page
= pages
+ nr_pages
;
1286 len
= PAGE_CACHE_SIZE
- ofs
;
1289 addr
= kmap_atomic(*pages
);
1290 left
= __copy_from_user_inatomic(addr
+ ofs
, buf
, len
);
1291 kunmap_atomic(addr
);
1292 if (unlikely(left
)) {
1293 /* Do it the slow way. */
1294 addr
= kmap(*pages
);
1295 left
= __copy_from_user(addr
+ ofs
, buf
, len
);
1306 } while (++pages
< last_page
);
1310 total
+= len
- left
;
1311 /* Zero the rest of the target like __copy_from_user(). */
1312 while (++pages
< last_page
) {
1316 len
= PAGE_CACHE_SIZE
;
1319 zero_user(*pages
, 0, len
);
1324 static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr
,
1325 const struct iovec
*iov
, size_t iov_ofs
, size_t bytes
)
1330 const char __user
*buf
= iov
->iov_base
+ iov_ofs
;
1334 len
= iov
->iov_len
- iov_ofs
;
1337 left
= __copy_from_user_inatomic(vaddr
, buf
, len
);
1341 if (unlikely(left
)) {
1353 static inline void ntfs_set_next_iovec(const struct iovec
**iovp
,
1354 size_t *iov_ofsp
, size_t bytes
)
1356 const struct iovec
*iov
= *iovp
;
1357 size_t iov_ofs
= *iov_ofsp
;
1362 len
= iov
->iov_len
- iov_ofs
;
1367 if (iov
->iov_len
== iov_ofs
) {
1373 *iov_ofsp
= iov_ofs
;
1377 * This has the same side-effects and return value as ntfs_copy_from_user().
1378 * The difference is that on a fault we need to memset the remainder of the
1379 * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
1380 * single-segment behaviour.
1382 * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when
1383 * atomic and when not atomic. This is ok because it calls
1384 * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In
1385 * fact, the only difference between __copy_from_user_inatomic() and
1386 * __copy_from_user() is that the latter calls might_sleep() and the former
1387 * should not zero the tail of the buffer on error. And on many architectures
1388 * __copy_from_user_inatomic() is just defined to __copy_from_user() so it
1389 * makes no difference at all on those architectures.
1391 static inline size_t ntfs_copy_from_user_iovec(struct page
**pages
,
1392 unsigned nr_pages
, unsigned ofs
, const struct iovec
**iov
,
1393 size_t *iov_ofs
, size_t bytes
)
1395 struct page
**last_page
= pages
+ nr_pages
;
1397 size_t copied
, len
, total
= 0;
1400 len
= PAGE_CACHE_SIZE
- ofs
;
1403 addr
= kmap_atomic(*pages
);
1404 copied
= __ntfs_copy_from_user_iovec_inatomic(addr
+ ofs
,
1405 *iov
, *iov_ofs
, len
);
1406 kunmap_atomic(addr
);
1407 if (unlikely(copied
!= len
)) {
1408 /* Do it the slow way. */
1409 addr
= kmap(*pages
);
1410 copied
= __ntfs_copy_from_user_iovec_inatomic(addr
+
1411 ofs
, *iov
, *iov_ofs
, len
);
1412 if (unlikely(copied
!= len
))
1417 ntfs_set_next_iovec(iov
, iov_ofs
, len
);
1422 } while (++pages
< last_page
);
1426 BUG_ON(copied
> len
);
1427 /* Zero the rest of the target like __copy_from_user(). */
1428 memset(addr
+ ofs
+ copied
, 0, len
- copied
);
1431 ntfs_set_next_iovec(iov
, iov_ofs
, copied
);
1432 while (++pages
< last_page
) {
1436 len
= PAGE_CACHE_SIZE
;
1439 zero_user(*pages
, 0, len
);
1444 static inline void ntfs_flush_dcache_pages(struct page
**pages
,
1449 * Warning: Do not do the decrement at the same time as the call to
1450 * flush_dcache_page() because it is a NULL macro on i386 and hence the
1451 * decrement never happens so the loop never terminates.
1455 flush_dcache_page(pages
[nr_pages
]);
1456 } while (nr_pages
> 0);
1460 * ntfs_commit_pages_after_non_resident_write - commit the received data
1461 * @pages: array of destination pages
1462 * @nr_pages: number of pages in @pages
1463 * @pos: byte position in file at which the write begins
1464 * @bytes: number of bytes to be written
1466 * See description of ntfs_commit_pages_after_write(), below.
1468 static inline int ntfs_commit_pages_after_non_resident_write(
1469 struct page
**pages
, const unsigned nr_pages
,
1470 s64 pos
, size_t bytes
)
1472 s64 end
, initialized_size
;
1474 ntfs_inode
*ni
, *base_ni
;
1475 struct buffer_head
*bh
, *head
;
1476 ntfs_attr_search_ctx
*ctx
;
1479 unsigned long flags
;
1480 unsigned blocksize
, u
;
1483 vi
= pages
[0]->mapping
->host
;
1485 blocksize
= vi
->i_sb
->s_blocksize
;
1494 bh_pos
= (s64
)page
->index
<< PAGE_CACHE_SHIFT
;
1495 bh
= head
= page_buffers(page
);
1500 bh_end
= bh_pos
+ blocksize
;
1501 if (bh_end
<= pos
|| bh_pos
>= end
) {
1502 if (!buffer_uptodate(bh
))
1505 set_buffer_uptodate(bh
);
1506 mark_buffer_dirty(bh
);
1508 } while (bh_pos
+= blocksize
, (bh
= bh
->b_this_page
) != head
);
1510 * If all buffers are now uptodate but the page is not, set the
1513 if (!partial
&& !PageUptodate(page
))
1514 SetPageUptodate(page
);
1515 } while (++u
< nr_pages
);
1517 * Finally, if we do not need to update initialized_size or i_size we
1520 read_lock_irqsave(&ni
->size_lock
, flags
);
1521 initialized_size
= ni
->initialized_size
;
1522 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1523 if (end
<= initialized_size
) {
1524 ntfs_debug("Done.");
1528 * Update initialized_size/i_size as appropriate, both in the inode and
1534 base_ni
= ni
->ext
.base_ntfs_ino
;
1535 /* Map, pin, and lock the mft record. */
1536 m
= map_mft_record(base_ni
);
1543 BUG_ON(!NInoNonResident(ni
));
1544 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
1545 if (unlikely(!ctx
)) {
1549 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
1550 CASE_SENSITIVE
, 0, NULL
, 0, ctx
);
1551 if (unlikely(err
)) {
1557 BUG_ON(!a
->non_resident
);
1558 write_lock_irqsave(&ni
->size_lock
, flags
);
1559 BUG_ON(end
> ni
->allocated_size
);
1560 ni
->initialized_size
= end
;
1561 a
->data
.non_resident
.initialized_size
= cpu_to_sle64(end
);
1562 if (end
> i_size_read(vi
)) {
1563 i_size_write(vi
, end
);
1564 a
->data
.non_resident
.data_size
=
1565 a
->data
.non_resident
.initialized_size
;
1567 write_unlock_irqrestore(&ni
->size_lock
, flags
);
1568 /* Mark the mft record dirty, so it gets written back. */
1569 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1570 mark_mft_record_dirty(ctx
->ntfs_ino
);
1571 ntfs_attr_put_search_ctx(ctx
);
1572 unmap_mft_record(base_ni
);
1573 ntfs_debug("Done.");
1577 ntfs_attr_put_search_ctx(ctx
);
1579 unmap_mft_record(base_ni
);
1580 ntfs_error(vi
->i_sb
, "Failed to update initialized_size/i_size (error "
1583 NVolSetErrors(ni
->vol
);
1588 * ntfs_commit_pages_after_write - commit the received data
1589 * @pages: array of destination pages
1590 * @nr_pages: number of pages in @pages
1591 * @pos: byte position in file at which the write begins
1592 * @bytes: number of bytes to be written
1594 * This is called from ntfs_file_buffered_write() with i_mutex held on the inode
1595 * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
1596 * locked but not kmap()ped. The source data has already been copied into the
1597 * @page. ntfs_prepare_pages_for_non_resident_write() has been called before
1598 * the data was copied (for non-resident attributes only) and it returned
1601 * Need to set uptodate and mark dirty all buffers within the boundary of the
1602 * write. If all buffers in a page are uptodate we set the page uptodate, too.
1604 * Setting the buffers dirty ensures that they get written out later when
1605 * ntfs_writepage() is invoked by the VM.
1607 * Finally, we need to update i_size and initialized_size as appropriate both
1608 * in the inode and the mft record.
1610 * This is modelled after fs/buffer.c::generic_commit_write(), which marks
1611 * buffers uptodate and dirty, sets the page uptodate if all buffers in the
1612 * page are uptodate, and updates i_size if the end of io is beyond i_size. In
1613 * that case, it also marks the inode dirty.
1615 * If things have gone as outlined in
1616 * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page
1617 * content modifications here for non-resident attributes. For resident
1618 * attributes we need to do the uptodate bringing here which we combine with
1619 * the copying into the mft record which means we save one atomic kmap.
1621 * Return 0 on success or -errno on error.
1623 static int ntfs_commit_pages_after_write(struct page
**pages
,
1624 const unsigned nr_pages
, s64 pos
, size_t bytes
)
1626 s64 end
, initialized_size
;
1629 ntfs_inode
*ni
, *base_ni
;
1631 ntfs_attr_search_ctx
*ctx
;
1634 char *kattr
, *kaddr
;
1635 unsigned long flags
;
1643 vi
= page
->mapping
->host
;
1645 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
1646 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
1647 vi
->i_ino
, ni
->type
, page
->index
, nr_pages
,
1648 (long long)pos
, bytes
);
1649 if (NInoNonResident(ni
))
1650 return ntfs_commit_pages_after_non_resident_write(pages
,
1651 nr_pages
, pos
, bytes
);
1652 BUG_ON(nr_pages
> 1);
1654 * Attribute is resident, implying it is not compressed, encrypted, or
1660 base_ni
= ni
->ext
.base_ntfs_ino
;
1661 BUG_ON(NInoNonResident(ni
));
1662 /* Map, pin, and lock the mft record. */
1663 m
= map_mft_record(base_ni
);
1670 ctx
= ntfs_attr_get_search_ctx(base_ni
, m
);
1671 if (unlikely(!ctx
)) {
1675 err
= ntfs_attr_lookup(ni
->type
, ni
->name
, ni
->name_len
,
1676 CASE_SENSITIVE
, 0, NULL
, 0, ctx
);
1677 if (unlikely(err
)) {
1683 BUG_ON(a
->non_resident
);
1684 /* The total length of the attribute value. */
1685 attr_len
= le32_to_cpu(a
->data
.resident
.value_length
);
1686 i_size
= i_size_read(vi
);
1687 BUG_ON(attr_len
!= i_size
);
1688 BUG_ON(pos
> attr_len
);
1690 BUG_ON(end
> le32_to_cpu(a
->length
) -
1691 le16_to_cpu(a
->data
.resident
.value_offset
));
1692 kattr
= (u8
*)a
+ le16_to_cpu(a
->data
.resident
.value_offset
);
1693 kaddr
= kmap_atomic(page
);
1694 /* Copy the received data from the page to the mft record. */
1695 memcpy(kattr
+ pos
, kaddr
+ pos
, bytes
);
1696 /* Update the attribute length if necessary. */
1697 if (end
> attr_len
) {
1699 a
->data
.resident
.value_length
= cpu_to_le32(attr_len
);
1702 * If the page is not uptodate, bring the out of bounds area(s)
1703 * uptodate by copying data from the mft record to the page.
1705 if (!PageUptodate(page
)) {
1707 memcpy(kaddr
, kattr
, pos
);
1709 memcpy(kaddr
+ end
, kattr
+ end
, attr_len
- end
);
1710 /* Zero the region outside the end of the attribute value. */
1711 memset(kaddr
+ attr_len
, 0, PAGE_CACHE_SIZE
- attr_len
);
1712 flush_dcache_page(page
);
1713 SetPageUptodate(page
);
1715 kunmap_atomic(kaddr
);
1716 /* Update initialized_size/i_size if necessary. */
1717 read_lock_irqsave(&ni
->size_lock
, flags
);
1718 initialized_size
= ni
->initialized_size
;
1719 BUG_ON(end
> ni
->allocated_size
);
1720 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1721 BUG_ON(initialized_size
!= i_size
);
1722 if (end
> initialized_size
) {
1723 write_lock_irqsave(&ni
->size_lock
, flags
);
1724 ni
->initialized_size
= end
;
1725 i_size_write(vi
, end
);
1726 write_unlock_irqrestore(&ni
->size_lock
, flags
);
1728 /* Mark the mft record dirty, so it gets written back. */
1729 flush_dcache_mft_record_page(ctx
->ntfs_ino
);
1730 mark_mft_record_dirty(ctx
->ntfs_ino
);
1731 ntfs_attr_put_search_ctx(ctx
);
1732 unmap_mft_record(base_ni
);
1733 ntfs_debug("Done.");
1736 if (err
== -ENOMEM
) {
1737 ntfs_warning(vi
->i_sb
, "Error allocating memory required to "
1738 "commit the write.");
1739 if (PageUptodate(page
)) {
1740 ntfs_warning(vi
->i_sb
, "Page is uptodate, setting "
1741 "dirty so the write will be retried "
1742 "later on by the VM.");
1744 * Put the page on mapping->dirty_pages, but leave its
1745 * buffers' dirty state as-is.
1747 __set_page_dirty_nobuffers(page
);
1750 ntfs_error(vi
->i_sb
, "Page is not uptodate. Written "
1751 "data has been lost.");
1753 ntfs_error(vi
->i_sb
, "Resident attribute commit write failed "
1754 "with error %i.", err
);
1755 NVolSetErrors(ni
->vol
);
1758 ntfs_attr_put_search_ctx(ctx
);
1760 unmap_mft_record(base_ni
);
1764 static void ntfs_write_failed(struct address_space
*mapping
, loff_t to
)
1766 struct inode
*inode
= mapping
->host
;
1768 if (to
> inode
->i_size
) {
1769 truncate_pagecache(inode
, inode
->i_size
);
1770 ntfs_truncate_vfs(inode
);
1775 * ntfs_file_buffered_write -
1777 * Locking: The vfs is holding ->i_mutex on the inode.
1779 static ssize_t
ntfs_file_buffered_write(struct kiocb
*iocb
,
1780 const struct iovec
*iov
, unsigned long nr_segs
,
1781 loff_t pos
, loff_t
*ppos
, size_t count
)
1783 struct file
*file
= iocb
->ki_filp
;
1784 struct address_space
*mapping
= file
->f_mapping
;
1785 struct inode
*vi
= mapping
->host
;
1786 ntfs_inode
*ni
= NTFS_I(vi
);
1787 ntfs_volume
*vol
= ni
->vol
;
1788 struct page
*pages
[NTFS_MAX_PAGES_PER_CLUSTER
];
1789 struct page
*cached_page
= NULL
;
1790 char __user
*buf
= NULL
;
1794 unsigned long flags
;
1795 size_t bytes
, iov_ofs
= 0; /* Offset in the current iovec. */
1796 ssize_t status
, written
;
1800 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1801 "pos 0x%llx, count 0x%lx.",
1802 vi
->i_ino
, (unsigned)le32_to_cpu(ni
->type
),
1803 (unsigned long long)pos
, (unsigned long)count
);
1804 if (unlikely(!count
))
1806 BUG_ON(NInoMstProtected(ni
));
1808 * If the attribute is not an index root and it is encrypted or
1809 * compressed, we cannot write to it yet. Note we need to check for
1810 * AT_INDEX_ALLOCATION since this is the type of both directory and
1813 if (ni
->type
!= AT_INDEX_ALLOCATION
) {
1814 /* If file is encrypted, deny access, just like NT4. */
1815 if (NInoEncrypted(ni
)) {
1817 * Reminder for later: Encrypted files are _always_
1818 * non-resident so that the content can always be
1821 ntfs_debug("Denying write access to encrypted file.");
1824 if (NInoCompressed(ni
)) {
1825 /* Only unnamed $DATA attribute can be compressed. */
1826 BUG_ON(ni
->type
!= AT_DATA
);
1827 BUG_ON(ni
->name_len
);
1829 * Reminder for later: If resident, the data is not
1830 * actually compressed. Only on the switch to non-
1831 * resident does compression kick in. This is in
1832 * contrast to encrypted files (see above).
1834 ntfs_error(vi
->i_sb
, "Writing to compressed files is "
1835 "not implemented yet. Sorry.");
1840 * If a previous ntfs_truncate() failed, repeat it and abort if it
1843 if (unlikely(NInoTruncateFailed(ni
))) {
1845 err
= ntfs_truncate(vi
);
1846 if (err
|| NInoTruncateFailed(ni
)) {
1849 ntfs_error(vol
->sb
, "Cannot perform write to inode "
1850 "0x%lx, attribute type 0x%x, because "
1851 "ntfs_truncate() failed (error code "
1853 (unsigned)le32_to_cpu(ni
->type
), err
);
1857 /* The first byte after the write. */
1860 * If the write goes beyond the allocated size, extend the allocation
1861 * to cover the whole of the write, rounded up to the nearest cluster.
1863 read_lock_irqsave(&ni
->size_lock
, flags
);
1864 ll
= ni
->allocated_size
;
1865 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1867 /* Extend the allocation without changing the data size. */
1868 ll
= ntfs_attr_extend_allocation(ni
, end
, -1, pos
);
1869 if (likely(ll
>= 0)) {
1871 /* If the extension was partial truncate the write. */
1873 ntfs_debug("Truncating write to inode 0x%lx, "
1874 "attribute type 0x%x, because "
1875 "the allocation was only "
1876 "partially extended.",
1877 vi
->i_ino
, (unsigned)
1878 le32_to_cpu(ni
->type
));
1884 read_lock_irqsave(&ni
->size_lock
, flags
);
1885 ll
= ni
->allocated_size
;
1886 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1887 /* Perform a partial write if possible or fail. */
1889 ntfs_debug("Truncating write to inode 0x%lx, "
1890 "attribute type 0x%x, because "
1891 "extending the allocation "
1892 "failed (error code %i).",
1893 vi
->i_ino
, (unsigned)
1894 le32_to_cpu(ni
->type
), err
);
1898 ntfs_error(vol
->sb
, "Cannot perform write to "
1899 "inode 0x%lx, attribute type "
1900 "0x%x, because extending the "
1901 "allocation failed (error "
1902 "code %i).", vi
->i_ino
,
1904 le32_to_cpu(ni
->type
), err
);
1911 * If the write starts beyond the initialized size, extend it up to the
1912 * beginning of the write and initialize all non-sparse space between
1913 * the old initialized size and the new one. This automatically also
1914 * increments the vfs inode->i_size to keep it above or equal to the
1917 read_lock_irqsave(&ni
->size_lock
, flags
);
1918 ll
= ni
->initialized_size
;
1919 read_unlock_irqrestore(&ni
->size_lock
, flags
);
1921 err
= ntfs_attr_extend_initialized(ni
, pos
);
1923 ntfs_error(vol
->sb
, "Cannot perform write to inode "
1924 "0x%lx, attribute type 0x%x, because "
1925 "extending the initialized size "
1926 "failed (error code %i).", vi
->i_ino
,
1927 (unsigned)le32_to_cpu(ni
->type
), err
);
1933 * Determine the number of pages per cluster for non-resident
1937 if (vol
->cluster_size
> PAGE_CACHE_SIZE
&& NInoNonResident(ni
))
1938 nr_pages
= vol
->cluster_size
>> PAGE_CACHE_SHIFT
;
1939 /* Finally, perform the actual write. */
1941 if (likely(nr_segs
== 1))
1942 buf
= iov
->iov_base
;
1945 pgoff_t idx
, start_idx
;
1946 unsigned ofs
, do_pages
, u
;
1949 start_idx
= idx
= pos
>> PAGE_CACHE_SHIFT
;
1950 ofs
= pos
& ~PAGE_CACHE_MASK
;
1951 bytes
= PAGE_CACHE_SIZE
- ofs
;
1954 vcn
= pos
>> vol
->cluster_size_bits
;
1955 if (vcn
!= last_vcn
) {
1958 * Get the lcn of the vcn the write is in. If
1959 * it is a hole, need to lock down all pages in
1962 down_read(&ni
->runlist
.lock
);
1963 lcn
= ntfs_attr_vcn_to_lcn_nolock(ni
, pos
>>
1964 vol
->cluster_size_bits
, false);
1965 up_read(&ni
->runlist
.lock
);
1966 if (unlikely(lcn
< LCN_HOLE
)) {
1968 if (lcn
== LCN_ENOMEM
)
1971 ntfs_error(vol
->sb
, "Cannot "
1974 "attribute type 0x%x, "
1975 "because the attribute "
1977 vi
->i_ino
, (unsigned)
1978 le32_to_cpu(ni
->type
));
1981 if (lcn
== LCN_HOLE
) {
1982 start_idx
= (pos
& ~(s64
)
1983 vol
->cluster_size_mask
)
1984 >> PAGE_CACHE_SHIFT
;
1985 bytes
= vol
->cluster_size
- (pos
&
1986 vol
->cluster_size_mask
);
1987 do_pages
= nr_pages
;
1994 * Bring in the user page(s) that we will copy from _first_.
1995 * Otherwise there is a nasty deadlock on copying from the same
1996 * page(s) as we are writing to, without it/them being marked
1997 * up-to-date. Note, at present there is nothing to stop the
1998 * pages being swapped out between us bringing them into memory
1999 * and doing the actual copying.
2001 if (likely(nr_segs
== 1))
2002 ntfs_fault_in_pages_readable(buf
, bytes
);
2004 ntfs_fault_in_pages_readable_iovec(iov
, iov_ofs
, bytes
);
2005 /* Get and lock @do_pages starting at index @start_idx. */
2006 status
= __ntfs_grab_cache_pages(mapping
, start_idx
, do_pages
,
2007 pages
, &cached_page
);
2008 if (unlikely(status
))
2011 * For non-resident attributes, we need to fill any holes with
2012 * actual clusters and ensure all bufferes are mapped. We also
2013 * need to bring uptodate any buffers that are only partially
2016 if (NInoNonResident(ni
)) {
2017 status
= ntfs_prepare_pages_for_non_resident_write(
2018 pages
, do_pages
, pos
, bytes
);
2019 if (unlikely(status
)) {
2023 unlock_page(pages
[--do_pages
]);
2024 page_cache_release(pages
[do_pages
]);
2027 * The write preparation may have instantiated
2028 * allocated space outside i_size. Trim this
2029 * off again. We can ignore any errors in this
2030 * case as we will just be waisting a bit of
2031 * allocated space, which is not a disaster.
2033 i_size
= i_size_read(vi
);
2034 if (pos
+ bytes
> i_size
) {
2035 ntfs_write_failed(mapping
, pos
+ bytes
);
2040 u
= (pos
>> PAGE_CACHE_SHIFT
) - pages
[0]->index
;
2041 if (likely(nr_segs
== 1)) {
2042 copied
= ntfs_copy_from_user(pages
+ u
, do_pages
- u
,
2046 copied
= ntfs_copy_from_user_iovec(pages
+ u
,
2047 do_pages
- u
, ofs
, &iov
, &iov_ofs
,
2049 ntfs_flush_dcache_pages(pages
+ u
, do_pages
- u
);
2050 status
= ntfs_commit_pages_after_write(pages
, do_pages
, pos
,
2052 if (likely(!status
)) {
2056 if (unlikely(copied
!= bytes
))
2060 unlock_page(pages
[--do_pages
]);
2061 page_cache_release(pages
[do_pages
]);
2063 if (unlikely(status
))
2065 balance_dirty_pages_ratelimited(mapping
);
2071 page_cache_release(cached_page
);
2072 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
2073 written
? "written" : "status", (unsigned long)written
,
2075 return written
? written
: status
;
2079 * ntfs_file_aio_write_nolock -
2081 static ssize_t
ntfs_file_aio_write_nolock(struct kiocb
*iocb
,
2082 const struct iovec
*iov
, unsigned long nr_segs
, loff_t
*ppos
)
2084 struct file
*file
= iocb
->ki_filp
;
2085 struct address_space
*mapping
= file
->f_mapping
;
2086 struct inode
*inode
= mapping
->host
;
2088 size_t count
; /* after file limit checks */
2089 ssize_t written
, err
;
2091 count
= iov_length(iov
, nr_segs
);
2093 /* We can write back this queue in page reclaim. */
2094 current
->backing_dev_info
= mapping
->backing_dev_info
;
2096 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
2101 err
= file_remove_suid(file
);
2104 err
= file_update_time(file
);
2107 written
= ntfs_file_buffered_write(iocb
, iov
, nr_segs
, pos
, ppos
,
2110 current
->backing_dev_info
= NULL
;
2111 return written
? written
: err
;
2115 * ntfs_file_aio_write -
2117 static ssize_t
ntfs_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
2118 unsigned long nr_segs
, loff_t pos
)
2120 struct file
*file
= iocb
->ki_filp
;
2121 struct address_space
*mapping
= file
->f_mapping
;
2122 struct inode
*inode
= mapping
->host
;
2125 BUG_ON(iocb
->ki_pos
!= pos
);
2127 mutex_lock(&inode
->i_mutex
);
2128 ret
= ntfs_file_aio_write_nolock(iocb
, iov
, nr_segs
, &iocb
->ki_pos
);
2129 mutex_unlock(&inode
->i_mutex
);
2131 int err
= generic_write_sync(file
, iocb
->ki_pos
- ret
, ret
);
2139 * ntfs_file_fsync - sync a file to disk
2140 * @filp: file to be synced
2141 * @datasync: if non-zero only flush user data and not metadata
2143 * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
2144 * system calls. This function is inspired by fs/buffer.c::file_fsync().
2146 * If @datasync is false, write the mft record and all associated extent mft
2147 * records as well as the $DATA attribute and then sync the block device.
2149 * If @datasync is true and the attribute is non-resident, we skip the writing
2150 * of the mft record and all associated extent mft records (this might still
2151 * happen due to the write_inode_now() call).
2153 * Also, if @datasync is true, we do not wait on the inode to be written out
2154 * but we always wait on the page cache pages to be written out.
2156 * Locking: Caller must hold i_mutex on the inode.
2158 * TODO: We should probably also write all attribute/index inodes associated
2159 * with this inode but since we have no simple way of getting to them we ignore
2160 * this problem for now.
2162 static int ntfs_file_fsync(struct file
*filp
, loff_t start
, loff_t end
,
2165 struct inode
*vi
= filp
->f_mapping
->host
;
2168 ntfs_debug("Entering for inode 0x%lx.", vi
->i_ino
);
2170 err
= filemap_write_and_wait_range(vi
->i_mapping
, start
, end
);
2173 mutex_lock(&vi
->i_mutex
);
2175 BUG_ON(S_ISDIR(vi
->i_mode
));
2176 if (!datasync
|| !NInoNonResident(NTFS_I(vi
)))
2177 ret
= __ntfs_write_inode(vi
, 1);
2178 write_inode_now(vi
, !datasync
);
2180 * NOTE: If we were to use mapping->private_list (see ext2 and
2181 * fs/buffer.c) for dirty blocks then we could optimize the below to be
2182 * sync_mapping_buffers(vi->i_mapping).
2184 err
= sync_blockdev(vi
->i_sb
->s_bdev
);
2185 if (unlikely(err
&& !ret
))
2188 ntfs_debug("Done.");
2190 ntfs_warning(vi
->i_sb
, "Failed to f%ssync inode 0x%lx. Error "
2191 "%u.", datasync
? "data" : "", vi
->i_ino
, -ret
);
2192 mutex_unlock(&vi
->i_mutex
);
2196 #endif /* NTFS_RW */
2198 const struct file_operations ntfs_file_ops
= {
2199 .llseek
= generic_file_llseek
, /* Seek inside file. */
2200 .read
= new_sync_read
, /* Read from file. */
2201 .read_iter
= generic_file_read_iter
, /* Async read from file. */
2203 .write
= do_sync_write
, /* Write to file. */
2204 .aio_write
= ntfs_file_aio_write
, /* Async write to file. */
2205 /*.release = ,*/ /* Last file is closed. See
2207 ext2_release_file() for
2208 how to use this to discard
2209 preallocated space for
2210 write opened files. */
2211 .fsync
= ntfs_file_fsync
, /* Sync a file to disk. */
2212 /*.aio_fsync = ,*/ /* Sync all outstanding async
2215 #endif /* NTFS_RW */
2216 /*.ioctl = ,*/ /* Perform function on the
2217 mounted filesystem. */
2218 .mmap
= generic_file_mmap
, /* Mmap file. */
2219 .open
= ntfs_file_open
, /* Open file. */
2220 .splice_read
= generic_file_splice_read
/* Zero-copy data send with
2221 the data source being on
2222 the ntfs partition. We do
2223 not need to care about the
2224 data destination. */
2225 /*.sendpage = ,*/ /* Zero-copy data send with
2226 the data destination being
2227 on the ntfs partition. We
2228 do not need to care about
2232 const struct inode_operations ntfs_file_inode_ops
= {
2234 .setattr
= ntfs_setattr
,
2235 #endif /* NTFS_RW */
2238 const struct file_operations ntfs_empty_file_ops
= {};
2240 const struct inode_operations ntfs_empty_inode_ops
= {};