]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ntfs/file.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-bionic-kernel.git] / fs / ntfs / file.c
1 /*
2 * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
3 *
4 * Copyright (c) 2001-2007 Anton Altaparmakov
5 *
6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program/include file is distributed in the hope that it will be
12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program (in the main directory of the Linux-NTFS
18 * distribution in the file COPYING); if not, write to the Free Software
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #include <linux/buffer_head.h>
23 #include <linux/gfp.h>
24 #include <linux/pagemap.h>
25 #include <linux/pagevec.h>
26 #include <linux/sched.h>
27 #include <linux/swap.h>
28 #include <linux/uio.h>
29 #include <linux/writeback.h>
30
31 #include <asm/page.h>
32 #include <asm/uaccess.h>
33
34 #include "attrib.h"
35 #include "bitmap.h"
36 #include "inode.h"
37 #include "debug.h"
38 #include "lcnalloc.h"
39 #include "malloc.h"
40 #include "mft.h"
41 #include "ntfs.h"
42
43 /**
44 * ntfs_file_open - called when an inode is about to be opened
45 * @vi: inode to be opened
46 * @filp: file structure describing the inode
47 *
48 * Limit file size to the page cache limit on architectures where unsigned long
49 * is 32-bits. This is the most we can do for now without overflowing the page
50 * cache page index. Doing it this way means we don't run into problems because
51 * of existing too large files. It would be better to allow the user to read
52 * the beginning of the file but I doubt very much anyone is going to hit this
53 * check on a 32-bit architecture, so there is no point in adding the extra
54 * complexity required to support this.
55 *
56 * On 64-bit architectures, the check is hopefully optimized away by the
57 * compiler.
58 *
59 * After the check passes, just call generic_file_open() to do its work.
60 */
61 static int ntfs_file_open(struct inode *vi, struct file *filp)
62 {
63 if (sizeof(unsigned long) < 8) {
64 if (i_size_read(vi) > MAX_LFS_FILESIZE)
65 return -EOVERFLOW;
66 }
67 return generic_file_open(vi, filp);
68 }
69
70 #ifdef NTFS_RW
71
72 /**
73 * ntfs_attr_extend_initialized - extend the initialized size of an attribute
74 * @ni: ntfs inode of the attribute to extend
75 * @new_init_size: requested new initialized size in bytes
76 * @cached_page: store any allocated but unused page here
77 * @lru_pvec: lru-buffering pagevec of the caller
78 *
79 * Extend the initialized size of an attribute described by the ntfs inode @ni
80 * to @new_init_size bytes. This involves zeroing any non-sparse space between
81 * the old initialized size and @new_init_size both in the page cache and on
82 * disk (if relevant complete pages are already uptodate in the page cache then
83 * these are simply marked dirty).
84 *
85 * As a side-effect, the file size (vfs inode->i_size) may be incremented as,
86 * in the resident attribute case, it is tied to the initialized size and, in
87 * the non-resident attribute case, it may not fall below the initialized size.
88 *
89 * Note that if the attribute is resident, we do not need to touch the page
90 * cache at all. This is because if the page cache page is not uptodate we
91 * bring it uptodate later, when doing the write to the mft record since we
92 * then already have the page mapped. And if the page is uptodate, the
93 * non-initialized region will already have been zeroed when the page was
94 * brought uptodate and the region may in fact already have been overwritten
95 * with new data via mmap() based writes, so we cannot just zero it. And since
96 * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped
97 * is unspecified, we choose not to do zeroing and thus we do not need to touch
98 * the page at all. For a more detailed explanation see ntfs_truncate() in
99 * fs/ntfs/inode.c.
100 *
101 * @cached_page and @lru_pvec are just optimizations for dealing with multiple
102 * pages.
103 *
104 * Return 0 on success and -errno on error. In the case that an error is
105 * encountered it is possible that the initialized size will already have been
106 * incremented some way towards @new_init_size but it is guaranteed that if
107 * this is the case, the necessary zeroing will also have happened and that all
108 * metadata is self-consistent.
109 *
110 * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
111 * held by the caller.
112 */
113 static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size,
114 struct page **cached_page, struct pagevec *lru_pvec)
115 {
116 s64 old_init_size;
117 loff_t old_i_size;
118 pgoff_t index, end_index;
119 unsigned long flags;
120 struct inode *vi = VFS_I(ni);
121 ntfs_inode *base_ni;
122 MFT_RECORD *m = NULL;
123 ATTR_RECORD *a;
124 ntfs_attr_search_ctx *ctx = NULL;
125 struct address_space *mapping;
126 struct page *page = NULL;
127 u8 *kattr;
128 int err;
129 u32 attr_len;
130
131 read_lock_irqsave(&ni->size_lock, flags);
132 old_init_size = ni->initialized_size;
133 old_i_size = i_size_read(vi);
134 BUG_ON(new_init_size > ni->allocated_size);
135 read_unlock_irqrestore(&ni->size_lock, flags);
136 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
137 "old_initialized_size 0x%llx, "
138 "new_initialized_size 0x%llx, i_size 0x%llx.",
139 vi->i_ino, (unsigned)le32_to_cpu(ni->type),
140 (unsigned long long)old_init_size,
141 (unsigned long long)new_init_size, old_i_size);
142 if (!NInoAttr(ni))
143 base_ni = ni;
144 else
145 base_ni = ni->ext.base_ntfs_ino;
146 /* Use goto to reduce indentation and we need the label below anyway. */
147 if (NInoNonResident(ni))
148 goto do_non_resident_extend;
149 BUG_ON(old_init_size != old_i_size);
150 m = map_mft_record(base_ni);
151 if (IS_ERR(m)) {
152 err = PTR_ERR(m);
153 m = NULL;
154 goto err_out;
155 }
156 ctx = ntfs_attr_get_search_ctx(base_ni, m);
157 if (unlikely(!ctx)) {
158 err = -ENOMEM;
159 goto err_out;
160 }
161 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
162 CASE_SENSITIVE, 0, NULL, 0, ctx);
163 if (unlikely(err)) {
164 if (err == -ENOENT)
165 err = -EIO;
166 goto err_out;
167 }
168 m = ctx->mrec;
169 a = ctx->attr;
170 BUG_ON(a->non_resident);
171 /* The total length of the attribute value. */
172 attr_len = le32_to_cpu(a->data.resident.value_length);
173 BUG_ON(old_i_size != (loff_t)attr_len);
174 /*
175 * Do the zeroing in the mft record and update the attribute size in
176 * the mft record.
177 */
178 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
179 memset(kattr + attr_len, 0, new_init_size - attr_len);
180 a->data.resident.value_length = cpu_to_le32((u32)new_init_size);
181 /* Finally, update the sizes in the vfs and ntfs inodes. */
182 write_lock_irqsave(&ni->size_lock, flags);
183 i_size_write(vi, new_init_size);
184 ni->initialized_size = new_init_size;
185 write_unlock_irqrestore(&ni->size_lock, flags);
186 goto done;
187 do_non_resident_extend:
188 /*
189 * If the new initialized size @new_init_size exceeds the current file
190 * size (vfs inode->i_size), we need to extend the file size to the
191 * new initialized size.
192 */
193 if (new_init_size > old_i_size) {
194 m = map_mft_record(base_ni);
195 if (IS_ERR(m)) {
196 err = PTR_ERR(m);
197 m = NULL;
198 goto err_out;
199 }
200 ctx = ntfs_attr_get_search_ctx(base_ni, m);
201 if (unlikely(!ctx)) {
202 err = -ENOMEM;
203 goto err_out;
204 }
205 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
206 CASE_SENSITIVE, 0, NULL, 0, ctx);
207 if (unlikely(err)) {
208 if (err == -ENOENT)
209 err = -EIO;
210 goto err_out;
211 }
212 m = ctx->mrec;
213 a = ctx->attr;
214 BUG_ON(!a->non_resident);
215 BUG_ON(old_i_size != (loff_t)
216 sle64_to_cpu(a->data.non_resident.data_size));
217 a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
218 flush_dcache_mft_record_page(ctx->ntfs_ino);
219 mark_mft_record_dirty(ctx->ntfs_ino);
220 /* Update the file size in the vfs inode. */
221 i_size_write(vi, new_init_size);
222 ntfs_attr_put_search_ctx(ctx);
223 ctx = NULL;
224 unmap_mft_record(base_ni);
225 m = NULL;
226 }
227 mapping = vi->i_mapping;
228 index = old_init_size >> PAGE_CACHE_SHIFT;
229 end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
230 do {
231 /*
232 * Read the page. If the page is not present, this will zero
233 * the uninitialized regions for us.
234 */
235 page = read_mapping_page(mapping, index, NULL);
236 if (IS_ERR(page)) {
237 err = PTR_ERR(page);
238 goto init_err_out;
239 }
240 if (unlikely(PageError(page))) {
241 page_cache_release(page);
242 err = -EIO;
243 goto init_err_out;
244 }
245 /*
246 * Update the initialized size in the ntfs inode. This is
247 * enough to make ntfs_writepage() work.
248 */
249 write_lock_irqsave(&ni->size_lock, flags);
250 ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT;
251 if (ni->initialized_size > new_init_size)
252 ni->initialized_size = new_init_size;
253 write_unlock_irqrestore(&ni->size_lock, flags);
254 /* Set the page dirty so it gets written out. */
255 set_page_dirty(page);
256 page_cache_release(page);
257 /*
258 * Play nice with the vm and the rest of the system. This is
259 * very much needed as we can potentially be modifying the
260 * initialised size from a very small value to a really huge
261 * value, e.g.
262 * f = open(somefile, O_TRUNC);
263 * truncate(f, 10GiB);
264 * seek(f, 10GiB);
265 * write(f, 1);
266 * And this would mean we would be marking dirty hundreds of
267 * thousands of pages or as in the above example more than
268 * two and a half million pages!
269 *
270 * TODO: For sparse pages could optimize this workload by using
271 * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This
272 * would be set in readpage for sparse pages and here we would
273 * not need to mark dirty any pages which have this bit set.
274 * The only caveat is that we have to clear the bit everywhere
275 * where we allocate any clusters that lie in the page or that
276 * contain the page.
277 *
278 * TODO: An even greater optimization would be for us to only
279 * call readpage() on pages which are not in sparse regions as
280 * determined from the runlist. This would greatly reduce the
281 * number of pages we read and make dirty in the case of sparse
282 * files.
283 */
284 balance_dirty_pages_ratelimited(mapping);
285 cond_resched();
286 } while (++index < end_index);
287 read_lock_irqsave(&ni->size_lock, flags);
288 BUG_ON(ni->initialized_size != new_init_size);
289 read_unlock_irqrestore(&ni->size_lock, flags);
290 /* Now bring in sync the initialized_size in the mft record. */
291 m = map_mft_record(base_ni);
292 if (IS_ERR(m)) {
293 err = PTR_ERR(m);
294 m = NULL;
295 goto init_err_out;
296 }
297 ctx = ntfs_attr_get_search_ctx(base_ni, m);
298 if (unlikely(!ctx)) {
299 err = -ENOMEM;
300 goto init_err_out;
301 }
302 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
303 CASE_SENSITIVE, 0, NULL, 0, ctx);
304 if (unlikely(err)) {
305 if (err == -ENOENT)
306 err = -EIO;
307 goto init_err_out;
308 }
309 m = ctx->mrec;
310 a = ctx->attr;
311 BUG_ON(!a->non_resident);
312 a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
313 done:
314 flush_dcache_mft_record_page(ctx->ntfs_ino);
315 mark_mft_record_dirty(ctx->ntfs_ino);
316 if (ctx)
317 ntfs_attr_put_search_ctx(ctx);
318 if (m)
319 unmap_mft_record(base_ni);
320 ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
321 (unsigned long long)new_init_size, i_size_read(vi));
322 return 0;
323 init_err_out:
324 write_lock_irqsave(&ni->size_lock, flags);
325 ni->initialized_size = old_init_size;
326 write_unlock_irqrestore(&ni->size_lock, flags);
327 err_out:
328 if (ctx)
329 ntfs_attr_put_search_ctx(ctx);
330 if (m)
331 unmap_mft_record(base_ni);
332 ntfs_debug("Failed. Returning error code %i.", err);
333 return err;
334 }
335
336 /**
337 * ntfs_fault_in_pages_readable -
338 *
339 * Fault a number of userspace pages into pagetables.
340 *
341 * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes
342 * with more than two userspace pages as well as handling the single page case
343 * elegantly.
344 *
345 * If you find this difficult to understand, then think of the while loop being
346 * the following code, except that we do without the integer variable ret:
347 *
348 * do {
349 * ret = __get_user(c, uaddr);
350 * uaddr += PAGE_SIZE;
351 * } while (!ret && uaddr < end);
352 *
353 * Note, the final __get_user() may well run out-of-bounds of the user buffer,
354 * but _not_ out-of-bounds of the page the user buffer belongs to, and since
355 * this is only a read and not a write, and since it is still in the same page,
356 * it should not matter and this makes the code much simpler.
357 */
358 static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
359 int bytes)
360 {
361 const char __user *end;
362 volatile char c;
363
364 /* Set @end to the first byte outside the last page we care about. */
365 end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes);
366
367 while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
368 ;
369 }
370
371 /**
372 * ntfs_fault_in_pages_readable_iovec -
373 *
374 * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs.
375 */
376 static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
377 size_t iov_ofs, int bytes)
378 {
379 do {
380 const char __user *buf;
381 unsigned len;
382
383 buf = iov->iov_base + iov_ofs;
384 len = iov->iov_len - iov_ofs;
385 if (len > bytes)
386 len = bytes;
387 ntfs_fault_in_pages_readable(buf, len);
388 bytes -= len;
389 iov++;
390 iov_ofs = 0;
391 } while (bytes);
392 }
393
394 /**
395 * __ntfs_grab_cache_pages - obtain a number of locked pages
396 * @mapping: address space mapping from which to obtain page cache pages
397 * @index: starting index in @mapping at which to begin obtaining pages
398 * @nr_pages: number of page cache pages to obtain
399 * @pages: array of pages in which to return the obtained page cache pages
400 * @cached_page: allocated but as yet unused page
401 * @lru_pvec: lru-buffering pagevec of caller
402 *
403 * Obtain @nr_pages locked page cache pages from the mapping @mapping and
404 * starting at index @index.
405 *
406 * If a page is newly created, increment its refcount and add it to the
407 * caller's lru-buffering pagevec @lru_pvec.
408 *
409 * This is the same as mm/filemap.c::__grab_cache_page(), except that @nr_pages
410 * are obtained at once instead of just one page and that 0 is returned on
411 * success and -errno on error.
412 *
413 * Note, the page locks are obtained in ascending page index order.
414 */
415 static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
416 pgoff_t index, const unsigned nr_pages, struct page **pages,
417 struct page **cached_page, struct pagevec *lru_pvec)
418 {
419 int err, nr;
420
421 BUG_ON(!nr_pages);
422 err = nr = 0;
423 do {
424 pages[nr] = find_lock_page(mapping, index);
425 if (!pages[nr]) {
426 if (!*cached_page) {
427 *cached_page = page_cache_alloc(mapping);
428 if (unlikely(!*cached_page)) {
429 err = -ENOMEM;
430 goto err_out;
431 }
432 }
433 err = add_to_page_cache(*cached_page, mapping, index,
434 GFP_KERNEL);
435 if (unlikely(err)) {
436 if (err == -EEXIST)
437 continue;
438 goto err_out;
439 }
440 pages[nr] = *cached_page;
441 page_cache_get(*cached_page);
442 if (unlikely(!pagevec_add(lru_pvec, *cached_page)))
443 __pagevec_lru_add_file(lru_pvec);
444 *cached_page = NULL;
445 }
446 index++;
447 nr++;
448 } while (nr < nr_pages);
449 out:
450 return err;
451 err_out:
452 while (nr > 0) {
453 unlock_page(pages[--nr]);
454 page_cache_release(pages[nr]);
455 }
456 goto out;
457 }
458
459 static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
460 {
461 lock_buffer(bh);
462 get_bh(bh);
463 bh->b_end_io = end_buffer_read_sync;
464 return submit_bh(READ, bh);
465 }
466
467 /**
468 * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
469 * @pages: array of destination pages
470 * @nr_pages: number of pages in @pages
471 * @pos: byte position in file at which the write begins
472 * @bytes: number of bytes to be written
473 *
474 * This is called for non-resident attributes from ntfs_file_buffered_write()
475 * with i_mutex held on the inode (@pages[0]->mapping->host). There are
476 * @nr_pages pages in @pages which are locked but not kmap()ped. The source
477 * data has not yet been copied into the @pages.
478 *
479 * Need to fill any holes with actual clusters, allocate buffers if necessary,
480 * ensure all the buffers are mapped, and bring uptodate any buffers that are
481 * only partially being written to.
482 *
483 * If @nr_pages is greater than one, we are guaranteed that the cluster size is
484 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
485 * the same cluster and that they are the entirety of that cluster, and that
486 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
487 *
488 * i_size is not to be modified yet.
489 *
490 * Return 0 on success or -errno on error.
491 */
492 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
493 unsigned nr_pages, s64 pos, size_t bytes)
494 {
495 VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;
496 LCN lcn;
497 s64 bh_pos, vcn_len, end, initialized_size;
498 sector_t lcn_block;
499 struct page *page;
500 struct inode *vi;
501 ntfs_inode *ni, *base_ni = NULL;
502 ntfs_volume *vol;
503 runlist_element *rl, *rl2;
504 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
505 ntfs_attr_search_ctx *ctx = NULL;
506 MFT_RECORD *m = NULL;
507 ATTR_RECORD *a = NULL;
508 unsigned long flags;
509 u32 attr_rec_len = 0;
510 unsigned blocksize, u;
511 int err, mp_size;
512 bool rl_write_locked, was_hole, is_retry;
513 unsigned char blocksize_bits;
514 struct {
515 u8 runlist_merged:1;
516 u8 mft_attr_mapped:1;
517 u8 mp_rebuilt:1;
518 u8 attr_switched:1;
519 } status = { 0, 0, 0, 0 };
520
521 BUG_ON(!nr_pages);
522 BUG_ON(!pages);
523 BUG_ON(!*pages);
524 vi = pages[0]->mapping->host;
525 ni = NTFS_I(vi);
526 vol = ni->vol;
527 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
528 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
529 vi->i_ino, ni->type, pages[0]->index, nr_pages,
530 (long long)pos, bytes);
531 blocksize = vol->sb->s_blocksize;
532 blocksize_bits = vol->sb->s_blocksize_bits;
533 u = 0;
534 do {
535 page = pages[u];
536 BUG_ON(!page);
537 /*
538 * create_empty_buffers() will create uptodate/dirty buffers if
539 * the page is uptodate/dirty.
540 */
541 if (!page_has_buffers(page)) {
542 create_empty_buffers(page, blocksize, 0);
543 if (unlikely(!page_has_buffers(page)))
544 return -ENOMEM;
545 }
546 } while (++u < nr_pages);
547 rl_write_locked = false;
548 rl = NULL;
549 err = 0;
550 vcn = lcn = -1;
551 vcn_len = 0;
552 lcn_block = -1;
553 was_hole = false;
554 cpos = pos >> vol->cluster_size_bits;
555 end = pos + bytes;
556 cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
557 /*
558 * Loop over each page and for each page over each buffer. Use goto to
559 * reduce indentation.
560 */
561 u = 0;
562 do_next_page:
563 page = pages[u];
564 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
565 bh = head = page_buffers(page);
566 do {
567 VCN cdelta;
568 s64 bh_end;
569 unsigned bh_cofs;
570
571 /* Clear buffer_new on all buffers to reinitialise state. */
572 if (buffer_new(bh))
573 clear_buffer_new(bh);
574 bh_end = bh_pos + blocksize;
575 bh_cpos = bh_pos >> vol->cluster_size_bits;
576 bh_cofs = bh_pos & vol->cluster_size_mask;
577 if (buffer_mapped(bh)) {
578 /*
579 * The buffer is already mapped. If it is uptodate,
580 * ignore it.
581 */
582 if (buffer_uptodate(bh))
583 continue;
584 /*
585 * The buffer is not uptodate. If the page is uptodate
586 * set the buffer uptodate and otherwise ignore it.
587 */
588 if (PageUptodate(page)) {
589 set_buffer_uptodate(bh);
590 continue;
591 }
592 /*
593 * Neither the page nor the buffer are uptodate. If
594 * the buffer is only partially being written to, we
595 * need to read it in before the write, i.e. now.
596 */
597 if ((bh_pos < pos && bh_end > pos) ||
598 (bh_pos < end && bh_end > end)) {
599 /*
600 * If the buffer is fully or partially within
601 * the initialized size, do an actual read.
602 * Otherwise, simply zero the buffer.
603 */
604 read_lock_irqsave(&ni->size_lock, flags);
605 initialized_size = ni->initialized_size;
606 read_unlock_irqrestore(&ni->size_lock, flags);
607 if (bh_pos < initialized_size) {
608 ntfs_submit_bh_for_read(bh);
609 *wait_bh++ = bh;
610 } else {
611 zero_user(page, bh_offset(bh),
612 blocksize);
613 set_buffer_uptodate(bh);
614 }
615 }
616 continue;
617 }
618 /* Unmapped buffer. Need to map it. */
619 bh->b_bdev = vol->sb->s_bdev;
620 /*
621 * If the current buffer is in the same clusters as the map
622 * cache, there is no need to check the runlist again. The
623 * map cache is made up of @vcn, which is the first cached file
624 * cluster, @vcn_len which is the number of cached file
625 * clusters, @lcn is the device cluster corresponding to @vcn,
626 * and @lcn_block is the block number corresponding to @lcn.
627 */
628 cdelta = bh_cpos - vcn;
629 if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
630 map_buffer_cached:
631 BUG_ON(lcn < 0);
632 bh->b_blocknr = lcn_block +
633 (cdelta << (vol->cluster_size_bits -
634 blocksize_bits)) +
635 (bh_cofs >> blocksize_bits);
636 set_buffer_mapped(bh);
637 /*
638 * If the page is uptodate so is the buffer. If the
639 * buffer is fully outside the write, we ignore it if
640 * it was already allocated and we mark it dirty so it
641 * gets written out if we allocated it. On the other
642 * hand, if we allocated the buffer but we are not
643 * marking it dirty we set buffer_new so we can do
644 * error recovery.
645 */
646 if (PageUptodate(page)) {
647 if (!buffer_uptodate(bh))
648 set_buffer_uptodate(bh);
649 if (unlikely(was_hole)) {
650 /* We allocated the buffer. */
651 unmap_underlying_metadata(bh->b_bdev,
652 bh->b_blocknr);
653 if (bh_end <= pos || bh_pos >= end)
654 mark_buffer_dirty(bh);
655 else
656 set_buffer_new(bh);
657 }
658 continue;
659 }
660 /* Page is _not_ uptodate. */
661 if (likely(!was_hole)) {
662 /*
663 * Buffer was already allocated. If it is not
664 * uptodate and is only partially being written
665 * to, we need to read it in before the write,
666 * i.e. now.
667 */
668 if (!buffer_uptodate(bh) && bh_pos < end &&
669 bh_end > pos &&
670 (bh_pos < pos ||
671 bh_end > end)) {
672 /*
673 * If the buffer is fully or partially
674 * within the initialized size, do an
675 * actual read. Otherwise, simply zero
676 * the buffer.
677 */
678 read_lock_irqsave(&ni->size_lock,
679 flags);
680 initialized_size = ni->initialized_size;
681 read_unlock_irqrestore(&ni->size_lock,
682 flags);
683 if (bh_pos < initialized_size) {
684 ntfs_submit_bh_for_read(bh);
685 *wait_bh++ = bh;
686 } else {
687 zero_user(page, bh_offset(bh),
688 blocksize);
689 set_buffer_uptodate(bh);
690 }
691 }
692 continue;
693 }
694 /* We allocated the buffer. */
695 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
696 /*
697 * If the buffer is fully outside the write, zero it,
698 * set it uptodate, and mark it dirty so it gets
699 * written out. If it is partially being written to,
700 * zero region surrounding the write but leave it to
701 * commit write to do anything else. Finally, if the
702 * buffer is fully being overwritten, do nothing.
703 */
704 if (bh_end <= pos || bh_pos >= end) {
705 if (!buffer_uptodate(bh)) {
706 zero_user(page, bh_offset(bh),
707 blocksize);
708 set_buffer_uptodate(bh);
709 }
710 mark_buffer_dirty(bh);
711 continue;
712 }
713 set_buffer_new(bh);
714 if (!buffer_uptodate(bh) &&
715 (bh_pos < pos || bh_end > end)) {
716 u8 *kaddr;
717 unsigned pofs;
718
719 kaddr = kmap_atomic(page, KM_USER0);
720 if (bh_pos < pos) {
721 pofs = bh_pos & ~PAGE_CACHE_MASK;
722 memset(kaddr + pofs, 0, pos - bh_pos);
723 }
724 if (bh_end > end) {
725 pofs = end & ~PAGE_CACHE_MASK;
726 memset(kaddr + pofs, 0, bh_end - end);
727 }
728 kunmap_atomic(kaddr, KM_USER0);
729 flush_dcache_page(page);
730 }
731 continue;
732 }
733 /*
734 * Slow path: this is the first buffer in the cluster. If it
735 * is outside allocated size and is not uptodate, zero it and
736 * set it uptodate.
737 */
738 read_lock_irqsave(&ni->size_lock, flags);
739 initialized_size = ni->allocated_size;
740 read_unlock_irqrestore(&ni->size_lock, flags);
741 if (bh_pos > initialized_size) {
742 if (PageUptodate(page)) {
743 if (!buffer_uptodate(bh))
744 set_buffer_uptodate(bh);
745 } else if (!buffer_uptodate(bh)) {
746 zero_user(page, bh_offset(bh), blocksize);
747 set_buffer_uptodate(bh);
748 }
749 continue;
750 }
751 is_retry = false;
752 if (!rl) {
753 down_read(&ni->runlist.lock);
754 retry_remap:
755 rl = ni->runlist.rl;
756 }
757 if (likely(rl != NULL)) {
758 /* Seek to element containing target cluster. */
759 while (rl->length && rl[1].vcn <= bh_cpos)
760 rl++;
761 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
762 if (likely(lcn >= 0)) {
763 /*
764 * Successful remap, setup the map cache and
765 * use that to deal with the buffer.
766 */
767 was_hole = false;
768 vcn = bh_cpos;
769 vcn_len = rl[1].vcn - vcn;
770 lcn_block = lcn << (vol->cluster_size_bits -
771 blocksize_bits);
772 cdelta = 0;
773 /*
774 * If the number of remaining clusters touched
775 * by the write is smaller or equal to the
776 * number of cached clusters, unlock the
777 * runlist as the map cache will be used from
778 * now on.
779 */
780 if (likely(vcn + vcn_len >= cend)) {
781 if (rl_write_locked) {
782 up_write(&ni->runlist.lock);
783 rl_write_locked = false;
784 } else
785 up_read(&ni->runlist.lock);
786 rl = NULL;
787 }
788 goto map_buffer_cached;
789 }
790 } else
791 lcn = LCN_RL_NOT_MAPPED;
792 /*
793 * If it is not a hole and not out of bounds, the runlist is
794 * probably unmapped so try to map it now.
795 */
796 if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {
797 if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {
798 /* Attempt to map runlist. */
799 if (!rl_write_locked) {
800 /*
801 * We need the runlist locked for
802 * writing, so if it is locked for
803 * reading relock it now and retry in
804 * case it changed whilst we dropped
805 * the lock.
806 */
807 up_read(&ni->runlist.lock);
808 down_write(&ni->runlist.lock);
809 rl_write_locked = true;
810 goto retry_remap;
811 }
812 err = ntfs_map_runlist_nolock(ni, bh_cpos,
813 NULL);
814 if (likely(!err)) {
815 is_retry = true;
816 goto retry_remap;
817 }
818 /*
819 * If @vcn is out of bounds, pretend @lcn is
820 * LCN_ENOENT. As long as the buffer is out
821 * of bounds this will work fine.
822 */
823 if (err == -ENOENT) {
824 lcn = LCN_ENOENT;
825 err = 0;
826 goto rl_not_mapped_enoent;
827 }
828 } else
829 err = -EIO;
830 /* Failed to map the buffer, even after retrying. */
831 bh->b_blocknr = -1;
832 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
833 "attribute type 0x%x, vcn 0x%llx, "
834 "vcn offset 0x%x, because its "
835 "location on disk could not be "
836 "determined%s (error code %i).",
837 ni->mft_no, ni->type,
838 (unsigned long long)bh_cpos,
839 (unsigned)bh_pos &
840 vol->cluster_size_mask,
841 is_retry ? " even after retrying" : "",
842 err);
843 break;
844 }
845 rl_not_mapped_enoent:
846 /*
847 * The buffer is in a hole or out of bounds. We need to fill
848 * the hole, unless the buffer is in a cluster which is not
849 * touched by the write, in which case we just leave the buffer
850 * unmapped. This can only happen when the cluster size is
851 * less than the page cache size.
852 */
853 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) {
854 bh_cend = (bh_end + vol->cluster_size - 1) >>
855 vol->cluster_size_bits;
856 if ((bh_cend <= cpos || bh_cpos >= cend)) {
857 bh->b_blocknr = -1;
858 /*
859 * If the buffer is uptodate we skip it. If it
860 * is not but the page is uptodate, we can set
861 * the buffer uptodate. If the page is not
862 * uptodate, we can clear the buffer and set it
863 * uptodate. Whether this is worthwhile is
864 * debatable and this could be removed.
865 */
866 if (PageUptodate(page)) {
867 if (!buffer_uptodate(bh))
868 set_buffer_uptodate(bh);
869 } else if (!buffer_uptodate(bh)) {
870 zero_user(page, bh_offset(bh),
871 blocksize);
872 set_buffer_uptodate(bh);
873 }
874 continue;
875 }
876 }
877 /*
878 * Out of bounds buffer is invalid if it was not really out of
879 * bounds.
880 */
881 BUG_ON(lcn != LCN_HOLE);
882 /*
883 * We need the runlist locked for writing, so if it is locked
884 * for reading relock it now and retry in case it changed
885 * whilst we dropped the lock.
886 */
887 BUG_ON(!rl);
888 if (!rl_write_locked) {
889 up_read(&ni->runlist.lock);
890 down_write(&ni->runlist.lock);
891 rl_write_locked = true;
892 goto retry_remap;
893 }
894 /* Find the previous last allocated cluster. */
895 BUG_ON(rl->lcn != LCN_HOLE);
896 lcn = -1;
897 rl2 = rl;
898 while (--rl2 >= ni->runlist.rl) {
899 if (rl2->lcn >= 0) {
900 lcn = rl2->lcn + rl2->length;
901 break;
902 }
903 }
904 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
905 false);
906 if (IS_ERR(rl2)) {
907 err = PTR_ERR(rl2);
908 ntfs_debug("Failed to allocate cluster, error code %i.",
909 err);
910 break;
911 }
912 lcn = rl2->lcn;
913 rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
914 if (IS_ERR(rl)) {
915 err = PTR_ERR(rl);
916 if (err != -ENOMEM)
917 err = -EIO;
918 if (ntfs_cluster_free_from_rl(vol, rl2)) {
919 ntfs_error(vol->sb, "Failed to release "
920 "allocated cluster in error "
921 "code path. Run chkdsk to "
922 "recover the lost cluster.");
923 NVolSetErrors(vol);
924 }
925 ntfs_free(rl2);
926 break;
927 }
928 ni->runlist.rl = rl;
929 status.runlist_merged = 1;
930 ntfs_debug("Allocated cluster, lcn 0x%llx.",
931 (unsigned long long)lcn);
932 /* Map and lock the mft record and get the attribute record. */
933 if (!NInoAttr(ni))
934 base_ni = ni;
935 else
936 base_ni = ni->ext.base_ntfs_ino;
937 m = map_mft_record(base_ni);
938 if (IS_ERR(m)) {
939 err = PTR_ERR(m);
940 break;
941 }
942 ctx = ntfs_attr_get_search_ctx(base_ni, m);
943 if (unlikely(!ctx)) {
944 err = -ENOMEM;
945 unmap_mft_record(base_ni);
946 break;
947 }
948 status.mft_attr_mapped = 1;
949 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
950 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);
951 if (unlikely(err)) {
952 if (err == -ENOENT)
953 err = -EIO;
954 break;
955 }
956 m = ctx->mrec;
957 a = ctx->attr;
958 /*
959 * Find the runlist element with which the attribute extent
960 * starts. Note, we cannot use the _attr_ version because we
961 * have mapped the mft record. That is ok because we know the
962 * runlist fragment must be mapped already to have ever gotten
963 * here, so we can just use the _rl_ version.
964 */
965 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
966 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
967 BUG_ON(!rl2);
968 BUG_ON(!rl2->length);
969 BUG_ON(rl2->lcn < LCN_HOLE);
970 highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
971 /*
972 * If @highest_vcn is zero, calculate the real highest_vcn
973 * (which can really be zero).
974 */
975 if (!highest_vcn)
976 highest_vcn = (sle64_to_cpu(
977 a->data.non_resident.allocated_size) >>
978 vol->cluster_size_bits) - 1;
979 /*
980 * Determine the size of the mapping pairs array for the new
981 * extent, i.e. the old extent with the hole filled.
982 */
983 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
984 highest_vcn);
985 if (unlikely(mp_size <= 0)) {
986 if (!(err = mp_size))
987 err = -EIO;
988 ntfs_debug("Failed to get size for mapping pairs "
989 "array, error code %i.", err);
990 break;
991 }
992 /*
993 * Resize the attribute record to fit the new mapping pairs
994 * array.
995 */
996 attr_rec_len = le32_to_cpu(a->length);
997 err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(
998 a->data.non_resident.mapping_pairs_offset));
999 if (unlikely(err)) {
1000 BUG_ON(err != -ENOSPC);
1001 // TODO: Deal with this by using the current attribute
1002 // and fill it with as much of the mapping pairs
1003 // array as possible. Then loop over each attribute
1004 // extent rewriting the mapping pairs arrays as we go
1005 // along and if when we reach the end we have not
1006 // enough space, try to resize the last attribute
1007 // extent and if even that fails, add a new attribute
1008 // extent.
1009 // We could also try to resize at each step in the hope
1010 // that we will not need to rewrite every single extent.
1011 // Note, we may need to decompress some extents to fill
1012 // the runlist as we are walking the extents...
1013 ntfs_error(vol->sb, "Not enough space in the mft "
1014 "record for the extended attribute "
1015 "record. This case is not "
1016 "implemented yet.");
1017 err = -EOPNOTSUPP;
1018 break ;
1019 }
1020 status.mp_rebuilt = 1;
1021 /*
1022 * Generate the mapping pairs array directly into the attribute
1023 * record.
1024 */
1025 err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
1026 a->data.non_resident.mapping_pairs_offset),
1027 mp_size, rl2, vcn, highest_vcn, NULL);
1028 if (unlikely(err)) {
1029 ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "
1030 "attribute type 0x%x, because building "
1031 "the mapping pairs failed with error "
1032 "code %i.", vi->i_ino,
1033 (unsigned)le32_to_cpu(ni->type), err);
1034 err = -EIO;
1035 break;
1036 }
1037 /* Update the highest_vcn but only if it was not set. */
1038 if (unlikely(!a->data.non_resident.highest_vcn))
1039 a->data.non_resident.highest_vcn =
1040 cpu_to_sle64(highest_vcn);
1041 /*
1042 * If the attribute is sparse/compressed, update the compressed
1043 * size in the ntfs_inode structure and the attribute record.
1044 */
1045 if (likely(NInoSparse(ni) || NInoCompressed(ni))) {
1046 /*
1047 * If we are not in the first attribute extent, switch
1048 * to it, but first ensure the changes will make it to
1049 * disk later.
1050 */
1051 if (a->data.non_resident.lowest_vcn) {
1052 flush_dcache_mft_record_page(ctx->ntfs_ino);
1053 mark_mft_record_dirty(ctx->ntfs_ino);
1054 ntfs_attr_reinit_search_ctx(ctx);
1055 err = ntfs_attr_lookup(ni->type, ni->name,
1056 ni->name_len, CASE_SENSITIVE,
1057 0, NULL, 0, ctx);
1058 if (unlikely(err)) {
1059 status.attr_switched = 1;
1060 break;
1061 }
1062 /* @m is not used any more so do not set it. */
1063 a = ctx->attr;
1064 }
1065 write_lock_irqsave(&ni->size_lock, flags);
1066 ni->itype.compressed.size += vol->cluster_size;
1067 a->data.non_resident.compressed_size =
1068 cpu_to_sle64(ni->itype.compressed.size);
1069 write_unlock_irqrestore(&ni->size_lock, flags);
1070 }
1071 /* Ensure the changes make it to disk. */
1072 flush_dcache_mft_record_page(ctx->ntfs_ino);
1073 mark_mft_record_dirty(ctx->ntfs_ino);
1074 ntfs_attr_put_search_ctx(ctx);
1075 unmap_mft_record(base_ni);
1076 /* Successfully filled the hole. */
1077 status.runlist_merged = 0;
1078 status.mft_attr_mapped = 0;
1079 status.mp_rebuilt = 0;
1080 /* Setup the map cache and use that to deal with the buffer. */
1081 was_hole = true;
1082 vcn = bh_cpos;
1083 vcn_len = 1;
1084 lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
1085 cdelta = 0;
1086 /*
1087 * If the number of remaining clusters in the @pages is smaller
1088 * or equal to the number of cached clusters, unlock the
1089 * runlist as the map cache will be used from now on.
1090 */
1091 if (likely(vcn + vcn_len >= cend)) {
1092 up_write(&ni->runlist.lock);
1093 rl_write_locked = false;
1094 rl = NULL;
1095 }
1096 goto map_buffer_cached;
1097 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1098 /* If there are no errors, do the next page. */
1099 if (likely(!err && ++u < nr_pages))
1100 goto do_next_page;
1101 /* If there are no errors, release the runlist lock if we took it. */
1102 if (likely(!err)) {
1103 if (unlikely(rl_write_locked)) {
1104 up_write(&ni->runlist.lock);
1105 rl_write_locked = false;
1106 } else if (unlikely(rl))
1107 up_read(&ni->runlist.lock);
1108 rl = NULL;
1109 }
1110 /* If we issued read requests, let them complete. */
1111 read_lock_irqsave(&ni->size_lock, flags);
1112 initialized_size = ni->initialized_size;
1113 read_unlock_irqrestore(&ni->size_lock, flags);
1114 while (wait_bh > wait) {
1115 bh = *--wait_bh;
1116 wait_on_buffer(bh);
1117 if (likely(buffer_uptodate(bh))) {
1118 page = bh->b_page;
1119 bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +
1120 bh_offset(bh);
1121 /*
1122 * If the buffer overflows the initialized size, need
1123 * to zero the overflowing region.
1124 */
1125 if (unlikely(bh_pos + blocksize > initialized_size)) {
1126 int ofs = 0;
1127
1128 if (likely(bh_pos < initialized_size))
1129 ofs = initialized_size - bh_pos;
1130 zero_user_segment(page, bh_offset(bh) + ofs,
1131 blocksize);
1132 }
1133 } else /* if (unlikely(!buffer_uptodate(bh))) */
1134 err = -EIO;
1135 }
1136 if (likely(!err)) {
1137 /* Clear buffer_new on all buffers. */
1138 u = 0;
1139 do {
1140 bh = head = page_buffers(pages[u]);
1141 do {
1142 if (buffer_new(bh))
1143 clear_buffer_new(bh);
1144 } while ((bh = bh->b_this_page) != head);
1145 } while (++u < nr_pages);
1146 ntfs_debug("Done.");
1147 return err;
1148 }
1149 if (status.attr_switched) {
1150 /* Get back to the attribute extent we modified. */
1151 ntfs_attr_reinit_search_ctx(ctx);
1152 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1153 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {
1154 ntfs_error(vol->sb, "Failed to find required "
1155 "attribute extent of attribute in "
1156 "error code path. Run chkdsk to "
1157 "recover.");
1158 write_lock_irqsave(&ni->size_lock, flags);
1159 ni->itype.compressed.size += vol->cluster_size;
1160 write_unlock_irqrestore(&ni->size_lock, flags);
1161 flush_dcache_mft_record_page(ctx->ntfs_ino);
1162 mark_mft_record_dirty(ctx->ntfs_ino);
1163 /*
1164 * The only thing that is now wrong is the compressed
1165 * size of the base attribute extent which chkdsk
1166 * should be able to fix.
1167 */
1168 NVolSetErrors(vol);
1169 } else {
1170 m = ctx->mrec;
1171 a = ctx->attr;
1172 status.attr_switched = 0;
1173 }
1174 }
1175 /*
1176 * If the runlist has been modified, need to restore it by punching a
1177 * hole into it and we then need to deallocate the on-disk cluster as
1178 * well. Note, we only modify the runlist if we are able to generate a
1179 * new mapping pairs array, i.e. only when the mapped attribute extent
1180 * is not switched.
1181 */
1182 if (status.runlist_merged && !status.attr_switched) {
1183 BUG_ON(!rl_write_locked);
1184 /* Make the file cluster we allocated sparse in the runlist. */
1185 if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {
1186 ntfs_error(vol->sb, "Failed to punch hole into "
1187 "attribute runlist in error code "
1188 "path. Run chkdsk to recover the "
1189 "lost cluster.");
1190 NVolSetErrors(vol);
1191 } else /* if (success) */ {
1192 status.runlist_merged = 0;
1193 /*
1194 * Deallocate the on-disk cluster we allocated but only
1195 * if we succeeded in punching its vcn out of the
1196 * runlist.
1197 */
1198 down_write(&vol->lcnbmp_lock);
1199 if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
1200 ntfs_error(vol->sb, "Failed to release "
1201 "allocated cluster in error "
1202 "code path. Run chkdsk to "
1203 "recover the lost cluster.");
1204 NVolSetErrors(vol);
1205 }
1206 up_write(&vol->lcnbmp_lock);
1207 }
1208 }
1209 /*
1210 * Resize the attribute record to its old size and rebuild the mapping
1211 * pairs array. Note, we only can do this if the runlist has been
1212 * restored to its old state which also implies that the mapped
1213 * attribute extent is not switched.
1214 */
1215 if (status.mp_rebuilt && !status.runlist_merged) {
1216 if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
1217 ntfs_error(vol->sb, "Failed to restore attribute "
1218 "record in error code path. Run "
1219 "chkdsk to recover.");
1220 NVolSetErrors(vol);
1221 } else /* if (success) */ {
1222 if (ntfs_mapping_pairs_build(vol, (u8*)a +
1223 le16_to_cpu(a->data.non_resident.
1224 mapping_pairs_offset), attr_rec_len -
1225 le16_to_cpu(a->data.non_resident.
1226 mapping_pairs_offset), ni->runlist.rl,
1227 vcn, highest_vcn, NULL)) {
1228 ntfs_error(vol->sb, "Failed to restore "
1229 "mapping pairs array in error "
1230 "code path. Run chkdsk to "
1231 "recover.");
1232 NVolSetErrors(vol);
1233 }
1234 flush_dcache_mft_record_page(ctx->ntfs_ino);
1235 mark_mft_record_dirty(ctx->ntfs_ino);
1236 }
1237 }
1238 /* Release the mft record and the attribute. */
1239 if (status.mft_attr_mapped) {
1240 ntfs_attr_put_search_ctx(ctx);
1241 unmap_mft_record(base_ni);
1242 }
1243 /* Release the runlist lock. */
1244 if (rl_write_locked)
1245 up_write(&ni->runlist.lock);
1246 else if (rl)
1247 up_read(&ni->runlist.lock);
1248 /*
1249 * Zero out any newly allocated blocks to avoid exposing stale data.
1250 * If BH_New is set, we know that the block was newly allocated above
1251 * and that it has not been fully zeroed and marked dirty yet.
1252 */
1253 nr_pages = u;
1254 u = 0;
1255 end = bh_cpos << vol->cluster_size_bits;
1256 do {
1257 page = pages[u];
1258 bh = head = page_buffers(page);
1259 do {
1260 if (u == nr_pages &&
1261 ((s64)page->index << PAGE_CACHE_SHIFT) +
1262 bh_offset(bh) >= end)
1263 break;
1264 if (!buffer_new(bh))
1265 continue;
1266 clear_buffer_new(bh);
1267 if (!buffer_uptodate(bh)) {
1268 if (PageUptodate(page))
1269 set_buffer_uptodate(bh);
1270 else {
1271 zero_user(page, bh_offset(bh),
1272 blocksize);
1273 set_buffer_uptodate(bh);
1274 }
1275 }
1276 mark_buffer_dirty(bh);
1277 } while ((bh = bh->b_this_page) != head);
1278 } while (++u <= nr_pages);
1279 ntfs_error(vol->sb, "Failed. Returning error code %i.", err);
1280 return err;
1281 }
1282
1283 /*
1284 * Copy as much as we can into the pages and return the number of bytes which
1285 * were successfully copied. If a fault is encountered then clear the pages
1286 * out to (ofs + bytes) and return the number of bytes which were copied.
1287 */
1288 static inline size_t ntfs_copy_from_user(struct page **pages,
1289 unsigned nr_pages, unsigned ofs, const char __user *buf,
1290 size_t bytes)
1291 {
1292 struct page **last_page = pages + nr_pages;
1293 char *addr;
1294 size_t total = 0;
1295 unsigned len;
1296 int left;
1297
1298 do {
1299 len = PAGE_CACHE_SIZE - ofs;
1300 if (len > bytes)
1301 len = bytes;
1302 addr = kmap_atomic(*pages, KM_USER0);
1303 left = __copy_from_user_inatomic(addr + ofs, buf, len);
1304 kunmap_atomic(addr, KM_USER0);
1305 if (unlikely(left)) {
1306 /* Do it the slow way. */
1307 addr = kmap(*pages);
1308 left = __copy_from_user(addr + ofs, buf, len);
1309 kunmap(*pages);
1310 if (unlikely(left))
1311 goto err_out;
1312 }
1313 total += len;
1314 bytes -= len;
1315 if (!bytes)
1316 break;
1317 buf += len;
1318 ofs = 0;
1319 } while (++pages < last_page);
1320 out:
1321 return total;
1322 err_out:
1323 total += len - left;
1324 /* Zero the rest of the target like __copy_from_user(). */
1325 while (++pages < last_page) {
1326 bytes -= len;
1327 if (!bytes)
1328 break;
1329 len = PAGE_CACHE_SIZE;
1330 if (len > bytes)
1331 len = bytes;
1332 zero_user(*pages, 0, len);
1333 }
1334 goto out;
1335 }
1336
1337 static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
1338 const struct iovec *iov, size_t iov_ofs, size_t bytes)
1339 {
1340 size_t total = 0;
1341
1342 while (1) {
1343 const char __user *buf = iov->iov_base + iov_ofs;
1344 unsigned len;
1345 size_t left;
1346
1347 len = iov->iov_len - iov_ofs;
1348 if (len > bytes)
1349 len = bytes;
1350 left = __copy_from_user_inatomic(vaddr, buf, len);
1351 total += len;
1352 bytes -= len;
1353 vaddr += len;
1354 if (unlikely(left)) {
1355 total -= left;
1356 break;
1357 }
1358 if (!bytes)
1359 break;
1360 iov++;
1361 iov_ofs = 0;
1362 }
1363 return total;
1364 }
1365
1366 static inline void ntfs_set_next_iovec(const struct iovec **iovp,
1367 size_t *iov_ofsp, size_t bytes)
1368 {
1369 const struct iovec *iov = *iovp;
1370 size_t iov_ofs = *iov_ofsp;
1371
1372 while (bytes) {
1373 unsigned len;
1374
1375 len = iov->iov_len - iov_ofs;
1376 if (len > bytes)
1377 len = bytes;
1378 bytes -= len;
1379 iov_ofs += len;
1380 if (iov->iov_len == iov_ofs) {
1381 iov++;
1382 iov_ofs = 0;
1383 }
1384 }
1385 *iovp = iov;
1386 *iov_ofsp = iov_ofs;
1387 }
1388
1389 /*
1390 * This has the same side-effects and return value as ntfs_copy_from_user().
1391 * The difference is that on a fault we need to memset the remainder of the
1392 * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
1393 * single-segment behaviour.
1394 *
1395 * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
1396 * when atomic and when not atomic. This is ok because
1397 * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
1398 * and it is ok to call this when non-atomic.
1399 * Infact, the only difference between __copy_from_user_inatomic() and
1400 * __copy_from_user() is that the latter calls might_sleep() and the former
1401 * should not zero the tail of the buffer on error. And on many
1402 * architectures __copy_from_user_inatomic() is just defined to
1403 * __copy_from_user() so it makes no difference at all on those architectures.
1404 */
1405 static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
1406 unsigned nr_pages, unsigned ofs, const struct iovec **iov,
1407 size_t *iov_ofs, size_t bytes)
1408 {
1409 struct page **last_page = pages + nr_pages;
1410 char *addr;
1411 size_t copied, len, total = 0;
1412
1413 do {
1414 len = PAGE_CACHE_SIZE - ofs;
1415 if (len > bytes)
1416 len = bytes;
1417 addr = kmap_atomic(*pages, KM_USER0);
1418 copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
1419 *iov, *iov_ofs, len);
1420 kunmap_atomic(addr, KM_USER0);
1421 if (unlikely(copied != len)) {
1422 /* Do it the slow way. */
1423 addr = kmap(*pages);
1424 copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
1425 *iov, *iov_ofs, len);
1426 /*
1427 * Zero the rest of the target like __copy_from_user().
1428 */
1429 memset(addr + ofs + copied, 0, len - copied);
1430 kunmap(*pages);
1431 if (unlikely(copied != len))
1432 goto err_out;
1433 }
1434 total += len;
1435 bytes -= len;
1436 if (!bytes)
1437 break;
1438 ntfs_set_next_iovec(iov, iov_ofs, len);
1439 ofs = 0;
1440 } while (++pages < last_page);
1441 out:
1442 return total;
1443 err_out:
1444 total += copied;
1445 /* Zero the rest of the target like __copy_from_user(). */
1446 while (++pages < last_page) {
1447 bytes -= len;
1448 if (!bytes)
1449 break;
1450 len = PAGE_CACHE_SIZE;
1451 if (len > bytes)
1452 len = bytes;
1453 zero_user(*pages, 0, len);
1454 }
1455 goto out;
1456 }
1457
1458 static inline void ntfs_flush_dcache_pages(struct page **pages,
1459 unsigned nr_pages)
1460 {
1461 BUG_ON(!nr_pages);
1462 /*
1463 * Warning: Do not do the decrement at the same time as the call to
1464 * flush_dcache_page() because it is a NULL macro on i386 and hence the
1465 * decrement never happens so the loop never terminates.
1466 */
1467 do {
1468 --nr_pages;
1469 flush_dcache_page(pages[nr_pages]);
1470 } while (nr_pages > 0);
1471 }
1472
1473 /**
1474 * ntfs_commit_pages_after_non_resident_write - commit the received data
1475 * @pages: array of destination pages
1476 * @nr_pages: number of pages in @pages
1477 * @pos: byte position in file at which the write begins
1478 * @bytes: number of bytes to be written
1479 *
1480 * See description of ntfs_commit_pages_after_write(), below.
1481 */
1482 static inline int ntfs_commit_pages_after_non_resident_write(
1483 struct page **pages, const unsigned nr_pages,
1484 s64 pos, size_t bytes)
1485 {
1486 s64 end, initialized_size;
1487 struct inode *vi;
1488 ntfs_inode *ni, *base_ni;
1489 struct buffer_head *bh, *head;
1490 ntfs_attr_search_ctx *ctx;
1491 MFT_RECORD *m;
1492 ATTR_RECORD *a;
1493 unsigned long flags;
1494 unsigned blocksize, u;
1495 int err;
1496
1497 vi = pages[0]->mapping->host;
1498 ni = NTFS_I(vi);
1499 blocksize = vi->i_sb->s_blocksize;
1500 end = pos + bytes;
1501 u = 0;
1502 do {
1503 s64 bh_pos;
1504 struct page *page;
1505 bool partial;
1506
1507 page = pages[u];
1508 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
1509 bh = head = page_buffers(page);
1510 partial = false;
1511 do {
1512 s64 bh_end;
1513
1514 bh_end = bh_pos + blocksize;
1515 if (bh_end <= pos || bh_pos >= end) {
1516 if (!buffer_uptodate(bh))
1517 partial = true;
1518 } else {
1519 set_buffer_uptodate(bh);
1520 mark_buffer_dirty(bh);
1521 }
1522 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1523 /*
1524 * If all buffers are now uptodate but the page is not, set the
1525 * page uptodate.
1526 */
1527 if (!partial && !PageUptodate(page))
1528 SetPageUptodate(page);
1529 } while (++u < nr_pages);
1530 /*
1531 * Finally, if we do not need to update initialized_size or i_size we
1532 * are finished.
1533 */
1534 read_lock_irqsave(&ni->size_lock, flags);
1535 initialized_size = ni->initialized_size;
1536 read_unlock_irqrestore(&ni->size_lock, flags);
1537 if (end <= initialized_size) {
1538 ntfs_debug("Done.");
1539 return 0;
1540 }
1541 /*
1542 * Update initialized_size/i_size as appropriate, both in the inode and
1543 * the mft record.
1544 */
1545 if (!NInoAttr(ni))
1546 base_ni = ni;
1547 else
1548 base_ni = ni->ext.base_ntfs_ino;
1549 /* Map, pin, and lock the mft record. */
1550 m = map_mft_record(base_ni);
1551 if (IS_ERR(m)) {
1552 err = PTR_ERR(m);
1553 m = NULL;
1554 ctx = NULL;
1555 goto err_out;
1556 }
1557 BUG_ON(!NInoNonResident(ni));
1558 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1559 if (unlikely(!ctx)) {
1560 err = -ENOMEM;
1561 goto err_out;
1562 }
1563 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1564 CASE_SENSITIVE, 0, NULL, 0, ctx);
1565 if (unlikely(err)) {
1566 if (err == -ENOENT)
1567 err = -EIO;
1568 goto err_out;
1569 }
1570 a = ctx->attr;
1571 BUG_ON(!a->non_resident);
1572 write_lock_irqsave(&ni->size_lock, flags);
1573 BUG_ON(end > ni->allocated_size);
1574 ni->initialized_size = end;
1575 a->data.non_resident.initialized_size = cpu_to_sle64(end);
1576 if (end > i_size_read(vi)) {
1577 i_size_write(vi, end);
1578 a->data.non_resident.data_size =
1579 a->data.non_resident.initialized_size;
1580 }
1581 write_unlock_irqrestore(&ni->size_lock, flags);
1582 /* Mark the mft record dirty, so it gets written back. */
1583 flush_dcache_mft_record_page(ctx->ntfs_ino);
1584 mark_mft_record_dirty(ctx->ntfs_ino);
1585 ntfs_attr_put_search_ctx(ctx);
1586 unmap_mft_record(base_ni);
1587 ntfs_debug("Done.");
1588 return 0;
1589 err_out:
1590 if (ctx)
1591 ntfs_attr_put_search_ctx(ctx);
1592 if (m)
1593 unmap_mft_record(base_ni);
1594 ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
1595 "code %i).", err);
1596 if (err != -ENOMEM)
1597 NVolSetErrors(ni->vol);
1598 return err;
1599 }
1600
1601 /**
1602 * ntfs_commit_pages_after_write - commit the received data
1603 * @pages: array of destination pages
1604 * @nr_pages: number of pages in @pages
1605 * @pos: byte position in file at which the write begins
1606 * @bytes: number of bytes to be written
1607 *
1608 * This is called from ntfs_file_buffered_write() with i_mutex held on the inode
1609 * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
1610 * locked but not kmap()ped. The source data has already been copied into the
1611 * @page. ntfs_prepare_pages_for_non_resident_write() has been called before
1612 * the data was copied (for non-resident attributes only) and it returned
1613 * success.
1614 *
1615 * Need to set uptodate and mark dirty all buffers within the boundary of the
1616 * write. If all buffers in a page are uptodate we set the page uptodate, too.
1617 *
1618 * Setting the buffers dirty ensures that they get written out later when
1619 * ntfs_writepage() is invoked by the VM.
1620 *
1621 * Finally, we need to update i_size and initialized_size as appropriate both
1622 * in the inode and the mft record.
1623 *
1624 * This is modelled after fs/buffer.c::generic_commit_write(), which marks
1625 * buffers uptodate and dirty, sets the page uptodate if all buffers in the
1626 * page are uptodate, and updates i_size if the end of io is beyond i_size. In
1627 * that case, it also marks the inode dirty.
1628 *
1629 * If things have gone as outlined in
1630 * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page
1631 * content modifications here for non-resident attributes. For resident
1632 * attributes we need to do the uptodate bringing here which we combine with
1633 * the copying into the mft record which means we save one atomic kmap.
1634 *
1635 * Return 0 on success or -errno on error.
1636 */
1637 static int ntfs_commit_pages_after_write(struct page **pages,
1638 const unsigned nr_pages, s64 pos, size_t bytes)
1639 {
1640 s64 end, initialized_size;
1641 loff_t i_size;
1642 struct inode *vi;
1643 ntfs_inode *ni, *base_ni;
1644 struct page *page;
1645 ntfs_attr_search_ctx *ctx;
1646 MFT_RECORD *m;
1647 ATTR_RECORD *a;
1648 char *kattr, *kaddr;
1649 unsigned long flags;
1650 u32 attr_len;
1651 int err;
1652
1653 BUG_ON(!nr_pages);
1654 BUG_ON(!pages);
1655 page = pages[0];
1656 BUG_ON(!page);
1657 vi = page->mapping->host;
1658 ni = NTFS_I(vi);
1659 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
1660 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
1661 vi->i_ino, ni->type, page->index, nr_pages,
1662 (long long)pos, bytes);
1663 if (NInoNonResident(ni))
1664 return ntfs_commit_pages_after_non_resident_write(pages,
1665 nr_pages, pos, bytes);
1666 BUG_ON(nr_pages > 1);
1667 /*
1668 * Attribute is resident, implying it is not compressed, encrypted, or
1669 * sparse.
1670 */
1671 if (!NInoAttr(ni))
1672 base_ni = ni;
1673 else
1674 base_ni = ni->ext.base_ntfs_ino;
1675 BUG_ON(NInoNonResident(ni));
1676 /* Map, pin, and lock the mft record. */
1677 m = map_mft_record(base_ni);
1678 if (IS_ERR(m)) {
1679 err = PTR_ERR(m);
1680 m = NULL;
1681 ctx = NULL;
1682 goto err_out;
1683 }
1684 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1685 if (unlikely(!ctx)) {
1686 err = -ENOMEM;
1687 goto err_out;
1688 }
1689 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1690 CASE_SENSITIVE, 0, NULL, 0, ctx);
1691 if (unlikely(err)) {
1692 if (err == -ENOENT)
1693 err = -EIO;
1694 goto err_out;
1695 }
1696 a = ctx->attr;
1697 BUG_ON(a->non_resident);
1698 /* The total length of the attribute value. */
1699 attr_len = le32_to_cpu(a->data.resident.value_length);
1700 i_size = i_size_read(vi);
1701 BUG_ON(attr_len != i_size);
1702 BUG_ON(pos > attr_len);
1703 end = pos + bytes;
1704 BUG_ON(end > le32_to_cpu(a->length) -
1705 le16_to_cpu(a->data.resident.value_offset));
1706 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
1707 kaddr = kmap_atomic(page, KM_USER0);
1708 /* Copy the received data from the page to the mft record. */
1709 memcpy(kattr + pos, kaddr + pos, bytes);
1710 /* Update the attribute length if necessary. */
1711 if (end > attr_len) {
1712 attr_len = end;
1713 a->data.resident.value_length = cpu_to_le32(attr_len);
1714 }
1715 /*
1716 * If the page is not uptodate, bring the out of bounds area(s)
1717 * uptodate by copying data from the mft record to the page.
1718 */
1719 if (!PageUptodate(page)) {
1720 if (pos > 0)
1721 memcpy(kaddr, kattr, pos);
1722 if (end < attr_len)
1723 memcpy(kaddr + end, kattr + end, attr_len - end);
1724 /* Zero the region outside the end of the attribute value. */
1725 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1726 flush_dcache_page(page);
1727 SetPageUptodate(page);
1728 }
1729 kunmap_atomic(kaddr, KM_USER0);
1730 /* Update initialized_size/i_size if necessary. */
1731 read_lock_irqsave(&ni->size_lock, flags);
1732 initialized_size = ni->initialized_size;
1733 BUG_ON(end > ni->allocated_size);
1734 read_unlock_irqrestore(&ni->size_lock, flags);
1735 BUG_ON(initialized_size != i_size);
1736 if (end > initialized_size) {
1737 write_lock_irqsave(&ni->size_lock, flags);
1738 ni->initialized_size = end;
1739 i_size_write(vi, end);
1740 write_unlock_irqrestore(&ni->size_lock, flags);
1741 }
1742 /* Mark the mft record dirty, so it gets written back. */
1743 flush_dcache_mft_record_page(ctx->ntfs_ino);
1744 mark_mft_record_dirty(ctx->ntfs_ino);
1745 ntfs_attr_put_search_ctx(ctx);
1746 unmap_mft_record(base_ni);
1747 ntfs_debug("Done.");
1748 return 0;
1749 err_out:
1750 if (err == -ENOMEM) {
1751 ntfs_warning(vi->i_sb, "Error allocating memory required to "
1752 "commit the write.");
1753 if (PageUptodate(page)) {
1754 ntfs_warning(vi->i_sb, "Page is uptodate, setting "
1755 "dirty so the write will be retried "
1756 "later on by the VM.");
1757 /*
1758 * Put the page on mapping->dirty_pages, but leave its
1759 * buffers' dirty state as-is.
1760 */
1761 __set_page_dirty_nobuffers(page);
1762 err = 0;
1763 } else
1764 ntfs_error(vi->i_sb, "Page is not uptodate. Written "
1765 "data has been lost.");
1766 } else {
1767 ntfs_error(vi->i_sb, "Resident attribute commit write failed "
1768 "with error %i.", err);
1769 NVolSetErrors(ni->vol);
1770 }
1771 if (ctx)
1772 ntfs_attr_put_search_ctx(ctx);
1773 if (m)
1774 unmap_mft_record(base_ni);
1775 return err;
1776 }
1777
1778 /**
1779 * ntfs_file_buffered_write -
1780 *
1781 * Locking: The vfs is holding ->i_mutex on the inode.
1782 */
1783 static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
1784 const struct iovec *iov, unsigned long nr_segs,
1785 loff_t pos, loff_t *ppos, size_t count)
1786 {
1787 struct file *file = iocb->ki_filp;
1788 struct address_space *mapping = file->f_mapping;
1789 struct inode *vi = mapping->host;
1790 ntfs_inode *ni = NTFS_I(vi);
1791 ntfs_volume *vol = ni->vol;
1792 struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
1793 struct page *cached_page = NULL;
1794 char __user *buf = NULL;
1795 s64 end, ll;
1796 VCN last_vcn;
1797 LCN lcn;
1798 unsigned long flags;
1799 size_t bytes, iov_ofs = 0; /* Offset in the current iovec. */
1800 ssize_t status, written;
1801 unsigned nr_pages;
1802 int err;
1803 struct pagevec lru_pvec;
1804
1805 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1806 "pos 0x%llx, count 0x%lx.",
1807 vi->i_ino, (unsigned)le32_to_cpu(ni->type),
1808 (unsigned long long)pos, (unsigned long)count);
1809 if (unlikely(!count))
1810 return 0;
1811 BUG_ON(NInoMstProtected(ni));
1812 /*
1813 * If the attribute is not an index root and it is encrypted or
1814 * compressed, we cannot write to it yet. Note we need to check for
1815 * AT_INDEX_ALLOCATION since this is the type of both directory and
1816 * index inodes.
1817 */
1818 if (ni->type != AT_INDEX_ALLOCATION) {
1819 /* If file is encrypted, deny access, just like NT4. */
1820 if (NInoEncrypted(ni)) {
1821 /*
1822 * Reminder for later: Encrypted files are _always_
1823 * non-resident so that the content can always be
1824 * encrypted.
1825 */
1826 ntfs_debug("Denying write access to encrypted file.");
1827 return -EACCES;
1828 }
1829 if (NInoCompressed(ni)) {
1830 /* Only unnamed $DATA attribute can be compressed. */
1831 BUG_ON(ni->type != AT_DATA);
1832 BUG_ON(ni->name_len);
1833 /*
1834 * Reminder for later: If resident, the data is not
1835 * actually compressed. Only on the switch to non-
1836 * resident does compression kick in. This is in
1837 * contrast to encrypted files (see above).
1838 */
1839 ntfs_error(vi->i_sb, "Writing to compressed files is "
1840 "not implemented yet. Sorry.");
1841 return -EOPNOTSUPP;
1842 }
1843 }
1844 /*
1845 * If a previous ntfs_truncate() failed, repeat it and abort if it
1846 * fails again.
1847 */
1848 if (unlikely(NInoTruncateFailed(ni))) {
1849 down_write(&vi->i_alloc_sem);
1850 err = ntfs_truncate(vi);
1851 up_write(&vi->i_alloc_sem);
1852 if (err || NInoTruncateFailed(ni)) {
1853 if (!err)
1854 err = -EIO;
1855 ntfs_error(vol->sb, "Cannot perform write to inode "
1856 "0x%lx, attribute type 0x%x, because "
1857 "ntfs_truncate() failed (error code "
1858 "%i).", vi->i_ino,
1859 (unsigned)le32_to_cpu(ni->type), err);
1860 return err;
1861 }
1862 }
1863 /* The first byte after the write. */
1864 end = pos + count;
1865 /*
1866 * If the write goes beyond the allocated size, extend the allocation
1867 * to cover the whole of the write, rounded up to the nearest cluster.
1868 */
1869 read_lock_irqsave(&ni->size_lock, flags);
1870 ll = ni->allocated_size;
1871 read_unlock_irqrestore(&ni->size_lock, flags);
1872 if (end > ll) {
1873 /* Extend the allocation without changing the data size. */
1874 ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
1875 if (likely(ll >= 0)) {
1876 BUG_ON(pos >= ll);
1877 /* If the extension was partial truncate the write. */
1878 if (end > ll) {
1879 ntfs_debug("Truncating write to inode 0x%lx, "
1880 "attribute type 0x%x, because "
1881 "the allocation was only "
1882 "partially extended.",
1883 vi->i_ino, (unsigned)
1884 le32_to_cpu(ni->type));
1885 end = ll;
1886 count = ll - pos;
1887 }
1888 } else {
1889 err = ll;
1890 read_lock_irqsave(&ni->size_lock, flags);
1891 ll = ni->allocated_size;
1892 read_unlock_irqrestore(&ni->size_lock, flags);
1893 /* Perform a partial write if possible or fail. */
1894 if (pos < ll) {
1895 ntfs_debug("Truncating write to inode 0x%lx, "
1896 "attribute type 0x%x, because "
1897 "extending the allocation "
1898 "failed (error code %i).",
1899 vi->i_ino, (unsigned)
1900 le32_to_cpu(ni->type), err);
1901 end = ll;
1902 count = ll - pos;
1903 } else {
1904 ntfs_error(vol->sb, "Cannot perform write to "
1905 "inode 0x%lx, attribute type "
1906 "0x%x, because extending the "
1907 "allocation failed (error "
1908 "code %i).", vi->i_ino,
1909 (unsigned)
1910 le32_to_cpu(ni->type), err);
1911 return err;
1912 }
1913 }
1914 }
1915 pagevec_init(&lru_pvec, 0);
1916 written = 0;
1917 /*
1918 * If the write starts beyond the initialized size, extend it up to the
1919 * beginning of the write and initialize all non-sparse space between
1920 * the old initialized size and the new one. This automatically also
1921 * increments the vfs inode->i_size to keep it above or equal to the
1922 * initialized_size.
1923 */
1924 read_lock_irqsave(&ni->size_lock, flags);
1925 ll = ni->initialized_size;
1926 read_unlock_irqrestore(&ni->size_lock, flags);
1927 if (pos > ll) {
1928 err = ntfs_attr_extend_initialized(ni, pos, &cached_page,
1929 &lru_pvec);
1930 if (err < 0) {
1931 ntfs_error(vol->sb, "Cannot perform write to inode "
1932 "0x%lx, attribute type 0x%x, because "
1933 "extending the initialized size "
1934 "failed (error code %i).", vi->i_ino,
1935 (unsigned)le32_to_cpu(ni->type), err);
1936 status = err;
1937 goto err_out;
1938 }
1939 }
1940 /*
1941 * Determine the number of pages per cluster for non-resident
1942 * attributes.
1943 */
1944 nr_pages = 1;
1945 if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
1946 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
1947 /* Finally, perform the actual write. */
1948 last_vcn = -1;
1949 if (likely(nr_segs == 1))
1950 buf = iov->iov_base;
1951 do {
1952 VCN vcn;
1953 pgoff_t idx, start_idx;
1954 unsigned ofs, do_pages, u;
1955 size_t copied;
1956
1957 start_idx = idx = pos >> PAGE_CACHE_SHIFT;
1958 ofs = pos & ~PAGE_CACHE_MASK;
1959 bytes = PAGE_CACHE_SIZE - ofs;
1960 do_pages = 1;
1961 if (nr_pages > 1) {
1962 vcn = pos >> vol->cluster_size_bits;
1963 if (vcn != last_vcn) {
1964 last_vcn = vcn;
1965 /*
1966 * Get the lcn of the vcn the write is in. If
1967 * it is a hole, need to lock down all pages in
1968 * the cluster.
1969 */
1970 down_read(&ni->runlist.lock);
1971 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
1972 vol->cluster_size_bits, false);
1973 up_read(&ni->runlist.lock);
1974 if (unlikely(lcn < LCN_HOLE)) {
1975 status = -EIO;
1976 if (lcn == LCN_ENOMEM)
1977 status = -ENOMEM;
1978 else
1979 ntfs_error(vol->sb, "Cannot "
1980 "perform write to "
1981 "inode 0x%lx, "
1982 "attribute type 0x%x, "
1983 "because the attribute "
1984 "is corrupt.",
1985 vi->i_ino, (unsigned)
1986 le32_to_cpu(ni->type));
1987 break;
1988 }
1989 if (lcn == LCN_HOLE) {
1990 start_idx = (pos & ~(s64)
1991 vol->cluster_size_mask)
1992 >> PAGE_CACHE_SHIFT;
1993 bytes = vol->cluster_size - (pos &
1994 vol->cluster_size_mask);
1995 do_pages = nr_pages;
1996 }
1997 }
1998 }
1999 if (bytes > count)
2000 bytes = count;
2001 /*
2002 * Bring in the user page(s) that we will copy from _first_.
2003 * Otherwise there is a nasty deadlock on copying from the same
2004 * page(s) as we are writing to, without it/them being marked
2005 * up-to-date. Note, at present there is nothing to stop the
2006 * pages being swapped out between us bringing them into memory
2007 * and doing the actual copying.
2008 */
2009 if (likely(nr_segs == 1))
2010 ntfs_fault_in_pages_readable(buf, bytes);
2011 else
2012 ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
2013 /* Get and lock @do_pages starting at index @start_idx. */
2014 status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
2015 pages, &cached_page, &lru_pvec);
2016 if (unlikely(status))
2017 break;
2018 /*
2019 * For non-resident attributes, we need to fill any holes with
2020 * actual clusters and ensure all bufferes are mapped. We also
2021 * need to bring uptodate any buffers that are only partially
2022 * being written to.
2023 */
2024 if (NInoNonResident(ni)) {
2025 status = ntfs_prepare_pages_for_non_resident_write(
2026 pages, do_pages, pos, bytes);
2027 if (unlikely(status)) {
2028 loff_t i_size;
2029
2030 do {
2031 unlock_page(pages[--do_pages]);
2032 page_cache_release(pages[do_pages]);
2033 } while (do_pages);
2034 /*
2035 * The write preparation may have instantiated
2036 * allocated space outside i_size. Trim this
2037 * off again. We can ignore any errors in this
2038 * case as we will just be waisting a bit of
2039 * allocated space, which is not a disaster.
2040 */
2041 i_size = i_size_read(vi);
2042 if (pos + bytes > i_size)
2043 vmtruncate(vi, i_size);
2044 break;
2045 }
2046 }
2047 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
2048 if (likely(nr_segs == 1)) {
2049 copied = ntfs_copy_from_user(pages + u, do_pages - u,
2050 ofs, buf, bytes);
2051 buf += copied;
2052 } else
2053 copied = ntfs_copy_from_user_iovec(pages + u,
2054 do_pages - u, ofs, &iov, &iov_ofs,
2055 bytes);
2056 ntfs_flush_dcache_pages(pages + u, do_pages - u);
2057 status = ntfs_commit_pages_after_write(pages, do_pages, pos,
2058 bytes);
2059 if (likely(!status)) {
2060 written += copied;
2061 count -= copied;
2062 pos += copied;
2063 if (unlikely(copied != bytes))
2064 status = -EFAULT;
2065 }
2066 do {
2067 unlock_page(pages[--do_pages]);
2068 mark_page_accessed(pages[do_pages]);
2069 page_cache_release(pages[do_pages]);
2070 } while (do_pages);
2071 if (unlikely(status))
2072 break;
2073 balance_dirty_pages_ratelimited(mapping);
2074 cond_resched();
2075 } while (count);
2076 err_out:
2077 *ppos = pos;
2078 if (cached_page)
2079 page_cache_release(cached_page);
2080 pagevec_lru_add_file(&lru_pvec);
2081 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
2082 written ? "written" : "status", (unsigned long)written,
2083 (long)status);
2084 return written ? written : status;
2085 }
2086
2087 /**
2088 * ntfs_file_aio_write_nolock -
2089 */
2090 static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
2091 const struct iovec *iov, unsigned long nr_segs, loff_t *ppos)
2092 {
2093 struct file *file = iocb->ki_filp;
2094 struct address_space *mapping = file->f_mapping;
2095 struct inode *inode = mapping->host;
2096 loff_t pos;
2097 size_t count; /* after file limit checks */
2098 ssize_t written, err;
2099
2100 count = 0;
2101 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
2102 if (err)
2103 return err;
2104 pos = *ppos;
2105 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2106 /* We can write back this queue in page reclaim. */
2107 current->backing_dev_info = mapping->backing_dev_info;
2108 written = 0;
2109 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2110 if (err)
2111 goto out;
2112 if (!count)
2113 goto out;
2114 err = file_remove_suid(file);
2115 if (err)
2116 goto out;
2117 file_update_time(file);
2118 written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
2119 count);
2120 out:
2121 current->backing_dev_info = NULL;
2122 return written ? written : err;
2123 }
2124
2125 /**
2126 * ntfs_file_aio_write -
2127 */
2128 static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2129 unsigned long nr_segs, loff_t pos)
2130 {
2131 struct file *file = iocb->ki_filp;
2132 struct address_space *mapping = file->f_mapping;
2133 struct inode *inode = mapping->host;
2134 ssize_t ret;
2135
2136 BUG_ON(iocb->ki_pos != pos);
2137
2138 mutex_lock(&inode->i_mutex);
2139 ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
2140 mutex_unlock(&inode->i_mutex);
2141 if (ret > 0) {
2142 int err = generic_write_sync(file, pos, ret);
2143 if (err < 0)
2144 ret = err;
2145 }
2146 return ret;
2147 }
2148
2149 /**
2150 * ntfs_file_fsync - sync a file to disk
2151 * @filp: file to be synced
2152 * @dentry: dentry describing the file to sync
2153 * @datasync: if non-zero only flush user data and not metadata
2154 *
2155 * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
2156 * system calls. This function is inspired by fs/buffer.c::file_fsync().
2157 *
2158 * If @datasync is false, write the mft record and all associated extent mft
2159 * records as well as the $DATA attribute and then sync the block device.
2160 *
2161 * If @datasync is true and the attribute is non-resident, we skip the writing
2162 * of the mft record and all associated extent mft records (this might still
2163 * happen due to the write_inode_now() call).
2164 *
2165 * Also, if @datasync is true, we do not wait on the inode to be written out
2166 * but we always wait on the page cache pages to be written out.
2167 *
2168 * Note: In the past @filp could be NULL so we ignore it as we don't need it
2169 * anyway.
2170 *
2171 * Locking: Caller must hold i_mutex on the inode.
2172 *
2173 * TODO: We should probably also write all attribute/index inodes associated
2174 * with this inode but since we have no simple way of getting to them we ignore
2175 * this problem for now.
2176 */
2177 static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
2178 int datasync)
2179 {
2180 struct inode *vi = dentry->d_inode;
2181 int err, ret = 0;
2182
2183 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
2184 BUG_ON(S_ISDIR(vi->i_mode));
2185 if (!datasync || !NInoNonResident(NTFS_I(vi)))
2186 ret = __ntfs_write_inode(vi, 1);
2187 write_inode_now(vi, !datasync);
2188 /*
2189 * NOTE: If we were to use mapping->private_list (see ext2 and
2190 * fs/buffer.c) for dirty blocks then we could optimize the below to be
2191 * sync_mapping_buffers(vi->i_mapping).
2192 */
2193 err = sync_blockdev(vi->i_sb->s_bdev);
2194 if (unlikely(err && !ret))
2195 ret = err;
2196 if (likely(!ret))
2197 ntfs_debug("Done.");
2198 else
2199 ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error "
2200 "%u.", datasync ? "data" : "", vi->i_ino, -ret);
2201 return ret;
2202 }
2203
2204 #endif /* NTFS_RW */
2205
2206 const struct file_operations ntfs_file_ops = {
2207 .llseek = generic_file_llseek, /* Seek inside file. */
2208 .read = do_sync_read, /* Read from file. */
2209 .aio_read = generic_file_aio_read, /* Async read from file. */
2210 #ifdef NTFS_RW
2211 .write = do_sync_write, /* Write to file. */
2212 .aio_write = ntfs_file_aio_write, /* Async write to file. */
2213 /*.release = ,*/ /* Last file is closed. See
2214 fs/ext2/file.c::
2215 ext2_release_file() for
2216 how to use this to discard
2217 preallocated space for
2218 write opened files. */
2219 .fsync = ntfs_file_fsync, /* Sync a file to disk. */
2220 /*.aio_fsync = ,*/ /* Sync all outstanding async
2221 i/o operations on a
2222 kiocb. */
2223 #endif /* NTFS_RW */
2224 /*.ioctl = ,*/ /* Perform function on the
2225 mounted filesystem. */
2226 .mmap = generic_file_mmap, /* Mmap file. */
2227 .open = ntfs_file_open, /* Open file. */
2228 .splice_read = generic_file_splice_read /* Zero-copy data send with
2229 the data source being on
2230 the ntfs partition. We do
2231 not need to care about the
2232 data destination. */
2233 /*.sendpage = ,*/ /* Zero-copy data send with
2234 the data destination being
2235 on the ntfs partition. We
2236 do not need to care about
2237 the data source. */
2238 };
2239
2240 const struct inode_operations ntfs_file_inode_ops = {
2241 #ifdef NTFS_RW
2242 .truncate = ntfs_truncate_vfs,
2243 .setattr = ntfs_setattr,
2244 #endif /* NTFS_RW */
2245 };
2246
2247 const struct file_operations ntfs_empty_file_ops = {};
2248
2249 const struct inode_operations ntfs_empty_inode_ops = {};