]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/ntfs/aops.c
NTFS: Fix handling of compressed directories that I broke in earlier changeset.
[mirror_ubuntu-artful-kernel.git] / fs / ntfs / aops.c
CommitLineData
1da177e4
LT
1/**
2 * aops.c - NTFS kernel address space operations and page cache handling.
3 * Part of the Linux-NTFS project.
4 *
b6ad6c52 5 * Copyright (c) 2001-2005 Anton Altaparmakov
1da177e4
LT
6 * Copyright (c) 2002 Richard Russon
7 *
8 * This program/include file is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program/include file is distributed in the hope that it will be
14 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program (in the main directory of the Linux-NTFS
20 * distribution in the file COPYING); if not, write to the Free Software
21 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/pagemap.h>
27#include <linux/swap.h>
28#include <linux/buffer_head.h>
29#include <linux/writeback.h>
b4012a98 30#include <linux/bit_spinlock.h>
1da177e4
LT
31
32#include "aops.h"
33#include "attrib.h"
34#include "debug.h"
35#include "inode.h"
36#include "mft.h"
37#include "runlist.h"
38#include "types.h"
39#include "ntfs.h"
40
41/**
42 * ntfs_end_buffer_async_read - async io completion for reading attributes
43 * @bh: buffer head on which io is completed
44 * @uptodate: whether @bh is now uptodate or not
45 *
46 * Asynchronous I/O completion handler for reading pages belonging to the
47 * attribute address space of an inode. The inodes can either be files or
48 * directories or they can be fake inodes describing some attribute.
49 *
50 * If NInoMstProtected(), perform the post read mst fixups when all IO on the
51 * page has been completed and mark the page uptodate or set the error bit on
52 * the page. To determine the size of the records that need fixing up, we
53 * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs
54 * record size, and index_block_size_bits, to the log(base 2) of the ntfs
55 * record size.
56 */
57static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
58{
1da177e4 59 unsigned long flags;
e604635c 60 struct buffer_head *first, *tmp;
1da177e4
LT
61 struct page *page;
62 ntfs_inode *ni;
63 int page_uptodate = 1;
64
65 page = bh->b_page;
66 ni = NTFS_I(page->mapping->host);
67
68 if (likely(uptodate)) {
07a4e2da 69 s64 file_ofs, initialized_size;
1da177e4
LT
70
71 set_buffer_uptodate(bh);
72
73 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
74 bh_offset(bh);
07a4e2da
AA
75 read_lock_irqsave(&ni->size_lock, flags);
76 initialized_size = ni->initialized_size;
77 read_unlock_irqrestore(&ni->size_lock, flags);
1da177e4 78 /* Check for the current buffer head overflowing. */
07a4e2da 79 if (file_ofs + bh->b_size > initialized_size) {
1da177e4
LT
80 char *addr;
81 int ofs = 0;
82
07a4e2da
AA
83 if (file_ofs < initialized_size)
84 ofs = initialized_size - file_ofs;
1da177e4
LT
85 addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
86 memset(addr + bh_offset(bh) + ofs, 0, bh->b_size - ofs);
87 flush_dcache_page(page);
88 kunmap_atomic(addr, KM_BIO_SRC_IRQ);
89 }
90 } else {
91 clear_buffer_uptodate(bh);
e604635c 92 SetPageError(page);
1da177e4
LT
93 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block %llu.",
94 (unsigned long long)bh->b_blocknr);
1da177e4 95 }
e604635c
AA
96 first = page_buffers(page);
97 local_irq_save(flags);
98 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
1da177e4
LT
99 clear_buffer_async_read(bh);
100 unlock_buffer(bh);
101 tmp = bh;
102 do {
103 if (!buffer_uptodate(tmp))
104 page_uptodate = 0;
105 if (buffer_async_read(tmp)) {
106 if (likely(buffer_locked(tmp)))
107 goto still_busy;
108 /* Async buffers must be locked. */
109 BUG();
110 }
111 tmp = tmp->b_this_page;
112 } while (tmp != bh);
e604635c
AA
113 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
114 local_irq_restore(flags);
1da177e4
LT
115 /*
116 * If none of the buffers had errors then we can set the page uptodate,
117 * but we first have to perform the post read mst fixups, if the
118 * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
119 * Note we ignore fixup errors as those are detected when
120 * map_mft_record() is called which gives us per record granularity
121 * rather than per page granularity.
122 */
123 if (!NInoMstProtected(ni)) {
124 if (likely(page_uptodate && !PageError(page)))
125 SetPageUptodate(page);
126 } else {
127 char *addr;
128 unsigned int i, recs;
129 u32 rec_size;
130
131 rec_size = ni->itype.index.block_size;
132 recs = PAGE_CACHE_SIZE / rec_size;
133 /* Should have been verified before we got here... */
134 BUG_ON(!recs);
135 addr = kmap_atomic(page, KM_BIO_SRC_IRQ);
136 for (i = 0; i < recs; i++)
137 post_read_mst_fixup((NTFS_RECORD*)(addr +
138 i * rec_size), rec_size);
139 flush_dcache_page(page);
140 kunmap_atomic(addr, KM_BIO_SRC_IRQ);
b6ad6c52 141 if (likely(page_uptodate && !PageError(page)))
1da177e4
LT
142 SetPageUptodate(page);
143 }
144 unlock_page(page);
145 return;
146still_busy:
e604635c
AA
147 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
148 local_irq_restore(flags);
1da177e4
LT
149 return;
150}
151
152/**
153 * ntfs_read_block - fill a @page of an address space with data
154 * @page: page cache page to fill with data
155 *
156 * Fill the page @page of the address space belonging to the @page->host inode.
157 * We read each buffer asynchronously and when all buffers are read in, our io
158 * completion handler ntfs_end_buffer_read_async(), if required, automatically
159 * applies the mst fixups to the page before finally marking it uptodate and
160 * unlocking it.
161 *
162 * We only enforce allocated_size limit because i_size is checked for in
163 * generic_file_read().
164 *
165 * Return 0 on success and -errno on error.
166 *
167 * Contains an adapted version of fs/buffer.c::block_read_full_page().
168 */
169static int ntfs_read_block(struct page *page)
170{
171 VCN vcn;
172 LCN lcn;
173 ntfs_inode *ni;
174 ntfs_volume *vol;
175 runlist_element *rl;
176 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
177 sector_t iblock, lblock, zblock;
07a4e2da 178 unsigned long flags;
1da177e4
LT
179 unsigned int blocksize, vcn_ofs;
180 int i, nr;
181 unsigned char blocksize_bits;
182
183 ni = NTFS_I(page->mapping->host);
184 vol = ni->vol;
185
186 /* $MFT/$DATA must have its complete runlist in memory at all times. */
187 BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
188
189 blocksize_bits = VFS_I(ni)->i_blkbits;
190 blocksize = 1 << blocksize_bits;
191
a01ac532 192 if (!page_has_buffers(page)) {
1da177e4 193 create_empty_buffers(page, blocksize, 0);
a01ac532
AA
194 if (unlikely(!page_has_buffers(page))) {
195 unlock_page(page);
196 return -ENOMEM;
197 }
1da177e4 198 }
a01ac532
AA
199 bh = head = page_buffers(page);
200 BUG_ON(!bh);
1da177e4
LT
201
202 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
07a4e2da 203 read_lock_irqsave(&ni->size_lock, flags);
1da177e4
LT
204 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
205 zblock = (ni->initialized_size + blocksize - 1) >> blocksize_bits;
07a4e2da 206 read_unlock_irqrestore(&ni->size_lock, flags);
1da177e4
LT
207
208 /* Loop through all the buffers in the page. */
209 rl = NULL;
210 nr = i = 0;
211 do {
212 u8 *kaddr;
8273d5d4 213 int err;
1da177e4
LT
214
215 if (unlikely(buffer_uptodate(bh)))
216 continue;
217 if (unlikely(buffer_mapped(bh))) {
218 arr[nr++] = bh;
219 continue;
220 }
8273d5d4 221 err = 0;
1da177e4
LT
222 bh->b_bdev = vol->sb->s_bdev;
223 /* Is the block within the allowed limits? */
224 if (iblock < lblock) {
225 BOOL is_retry = FALSE;
226
227 /* Convert iblock into corresponding vcn and offset. */
228 vcn = (VCN)iblock << blocksize_bits >>
229 vol->cluster_size_bits;
230 vcn_ofs = ((VCN)iblock << blocksize_bits) &
231 vol->cluster_size_mask;
232 if (!rl) {
233lock_retry_remap:
234 down_read(&ni->runlist.lock);
235 rl = ni->runlist.rl;
236 }
237 if (likely(rl != NULL)) {
238 /* Seek to element containing target vcn. */
239 while (rl->length && rl[1].vcn <= vcn)
240 rl++;
241 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
242 } else
243 lcn = LCN_RL_NOT_MAPPED;
244 /* Successful remap. */
245 if (lcn >= 0) {
246 /* Setup buffer head to correct block. */
247 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
248 + vcn_ofs) >> blocksize_bits;
249 set_buffer_mapped(bh);
250 /* Only read initialized data blocks. */
251 if (iblock < zblock) {
252 arr[nr++] = bh;
253 continue;
254 }
255 /* Fully non-initialized data block, zero it. */
256 goto handle_zblock;
257 }
258 /* It is a hole, need to zero it. */
259 if (lcn == LCN_HOLE)
260 goto handle_hole;
261 /* If first try and runlist unmapped, map and retry. */
262 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
1da177e4
LT
263 is_retry = TRUE;
264 /*
265 * Attempt to map runlist, dropping lock for
266 * the duration.
267 */
268 up_read(&ni->runlist.lock);
269 err = ntfs_map_runlist(ni, vcn);
270 if (likely(!err))
271 goto lock_retry_remap;
272 rl = NULL;
9f993fe4
AA
273 } else if (!rl)
274 up_read(&ni->runlist.lock);
8273d5d4
AA
275 /*
276 * If buffer is outside the runlist, treat it as a
277 * hole. This can happen due to concurrent truncate
278 * for example.
279 */
280 if (err == -ENOENT || lcn == LCN_ENOENT) {
281 err = 0;
282 goto handle_hole;
283 }
1da177e4 284 /* Hard error, zero out region. */
8273d5d4
AA
285 if (!err)
286 err = -EIO;
1da177e4
LT
287 bh->b_blocknr = -1;
288 SetPageError(page);
289 ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
290 "attribute type 0x%x, vcn 0x%llx, "
291 "offset 0x%x because its location on "
292 "disk could not be determined%s "
8273d5d4 293 "(error code %i).", ni->mft_no,
1da177e4
LT
294 ni->type, (unsigned long long)vcn,
295 vcn_ofs, is_retry ? " even after "
8273d5d4 296 "retrying" : "", err);
1da177e4
LT
297 }
298 /*
299 * Either iblock was outside lblock limits or
300 * ntfs_rl_vcn_to_lcn() returned error. Just zero that portion
301 * of the page and set the buffer uptodate.
302 */
303handle_hole:
304 bh->b_blocknr = -1UL;
305 clear_buffer_mapped(bh);
306handle_zblock:
307 kaddr = kmap_atomic(page, KM_USER0);
308 memset(kaddr + i * blocksize, 0, blocksize);
1da177e4 309 kunmap_atomic(kaddr, KM_USER0);
8273d5d4
AA
310 flush_dcache_page(page);
311 if (likely(!err))
312 set_buffer_uptodate(bh);
1da177e4
LT
313 } while (i++, iblock++, (bh = bh->b_this_page) != head);
314
315 /* Release the lock if we took it. */
316 if (rl)
317 up_read(&ni->runlist.lock);
318
319 /* Check we have at least one buffer ready for i/o. */
320 if (nr) {
321 struct buffer_head *tbh;
322
323 /* Lock the buffers. */
324 for (i = 0; i < nr; i++) {
325 tbh = arr[i];
326 lock_buffer(tbh);
327 tbh->b_end_io = ntfs_end_buffer_async_read;
328 set_buffer_async_read(tbh);
329 }
330 /* Finally, start i/o on the buffers. */
331 for (i = 0; i < nr; i++) {
332 tbh = arr[i];
333 if (likely(!buffer_uptodate(tbh)))
334 submit_bh(READ, tbh);
335 else
336 ntfs_end_buffer_async_read(tbh, 1);
337 }
338 return 0;
339 }
340 /* No i/o was scheduled on any of the buffers. */
341 if (likely(!PageError(page)))
342 SetPageUptodate(page);
343 else /* Signal synchronous i/o error. */
344 nr = -EIO;
345 unlock_page(page);
346 return nr;
347}
348
349/**
350 * ntfs_readpage - fill a @page of a @file with data from the device
351 * @file: open file to which the page @page belongs or NULL
352 * @page: page cache page to fill with data
353 *
354 * For non-resident attributes, ntfs_readpage() fills the @page of the open
355 * file @file by calling the ntfs version of the generic block_read_full_page()
356 * function, ntfs_read_block(), which in turn creates and reads in the buffers
357 * associated with the page asynchronously.
358 *
359 * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
360 * data from the mft record (which at this stage is most likely in memory) and
361 * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
362 * even if the mft record is not cached at this point in time, we need to wait
363 * for it to be read in before we can do the copy.
364 *
365 * Return 0 on success and -errno on error.
366 */
367static int ntfs_readpage(struct file *file, struct page *page)
368{
1da177e4
LT
369 ntfs_inode *ni, *base_ni;
370 u8 *kaddr;
371 ntfs_attr_search_ctx *ctx;
372 MFT_RECORD *mrec;
b6ad6c52 373 unsigned long flags;
1da177e4
LT
374 u32 attr_len;
375 int err = 0;
376
905685f6 377retry_readpage:
1da177e4
LT
378 BUG_ON(!PageLocked(page));
379 /*
380 * This can potentially happen because we clear PageUptodate() during
381 * ntfs_writepage() of MstProtected() attributes.
382 */
383 if (PageUptodate(page)) {
384 unlock_page(page);
385 return 0;
386 }
387 ni = NTFS_I(page->mapping->host);
311120ec
AA
388 /*
389 * Only $DATA attributes can be encrypted and only unnamed $DATA
390 * attributes can be compressed. Index root can have the flags set but
391 * this means to create compressed/encrypted files, not that the
4e64c886
AA
392 * attribute is compressed/encrypted. Note we need to check for
393 * AT_INDEX_ALLOCATION since this is the type of both directory and
394 * index inodes.
311120ec 395 */
4e64c886 396 if (ni->type != AT_INDEX_ALLOCATION) {
311120ec
AA
397 /* If attribute is encrypted, deny access, just like NT4. */
398 if (NInoEncrypted(ni)) {
399 BUG_ON(ni->type != AT_DATA);
400 err = -EACCES;
401 goto err_out;
402 }
403 /* Compressed data streams are handled in compress.c. */
404 if (NInoNonResident(ni) && NInoCompressed(ni)) {
405 BUG_ON(ni->type != AT_DATA);
406 BUG_ON(ni->name_len);
407 return ntfs_read_compressed_block(page);
408 }
409 }
1da177e4
LT
410 /* NInoNonResident() == NInoIndexAllocPresent() */
411 if (NInoNonResident(ni)) {
311120ec 412 /* Normal, non-resident data stream. */
1da177e4
LT
413 return ntfs_read_block(page);
414 }
415 /*
416 * Attribute is resident, implying it is not compressed or encrypted.
417 * This also means the attribute is smaller than an mft record and
418 * hence smaller than a page, so can simply zero out any pages with
311120ec
AA
419 * index above 0. Note the attribute can actually be marked compressed
420 * but if it is resident the actual data is not compressed so we are
421 * ok to ignore the compressed flag here.
1da177e4 422 */
b6ad6c52 423 if (unlikely(page->index > 0)) {
1da177e4
LT
424 kaddr = kmap_atomic(page, KM_USER0);
425 memset(kaddr, 0, PAGE_CACHE_SIZE);
426 flush_dcache_page(page);
427 kunmap_atomic(kaddr, KM_USER0);
428 goto done;
429 }
430 if (!NInoAttr(ni))
431 base_ni = ni;
432 else
433 base_ni = ni->ext.base_ntfs_ino;
434 /* Map, pin, and lock the mft record. */
435 mrec = map_mft_record(base_ni);
436 if (IS_ERR(mrec)) {
437 err = PTR_ERR(mrec);
438 goto err_out;
439 }
905685f6
AA
440 /*
441 * If a parallel write made the attribute non-resident, drop the mft
442 * record and retry the readpage.
443 */
444 if (unlikely(NInoNonResident(ni))) {
445 unmap_mft_record(base_ni);
446 goto retry_readpage;
447 }
1da177e4
LT
448 ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
449 if (unlikely(!ctx)) {
450 err = -ENOMEM;
451 goto unm_err_out;
452 }
453 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
454 CASE_SENSITIVE, 0, NULL, 0, ctx);
455 if (unlikely(err))
456 goto put_unm_err_out;
457 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
b6ad6c52
AA
458 read_lock_irqsave(&ni->size_lock, flags);
459 if (unlikely(attr_len > ni->initialized_size))
460 attr_len = ni->initialized_size;
461 read_unlock_irqrestore(&ni->size_lock, flags);
1da177e4
LT
462 kaddr = kmap_atomic(page, KM_USER0);
463 /* Copy the data to the page. */
464 memcpy(kaddr, (u8*)ctx->attr +
465 le16_to_cpu(ctx->attr->data.resident.value_offset),
466 attr_len);
467 /* Zero the remainder of the page. */
468 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
469 flush_dcache_page(page);
470 kunmap_atomic(kaddr, KM_USER0);
471put_unm_err_out:
472 ntfs_attr_put_search_ctx(ctx);
473unm_err_out:
474 unmap_mft_record(base_ni);
475done:
476 SetPageUptodate(page);
477err_out:
478 unlock_page(page);
479 return err;
480}
481
482#ifdef NTFS_RW
483
484/**
485 * ntfs_write_block - write a @page to the backing store
486 * @page: page cache page to write out
487 * @wbc: writeback control structure
488 *
489 * This function is for writing pages belonging to non-resident, non-mst
490 * protected attributes to their backing store.
491 *
492 * For a page with buffers, map and write the dirty buffers asynchronously
493 * under page writeback. For a page without buffers, create buffers for the
494 * page, then proceed as above.
495 *
496 * If a page doesn't have buffers the page dirty state is definitive. If a page
497 * does have buffers, the page dirty state is just a hint, and the buffer dirty
498 * state is definitive. (A hint which has rules: dirty buffers against a clean
499 * page is illegal. Other combinations are legal and need to be handled. In
500 * particular a dirty page containing clean buffers for example.)
501 *
502 * Return 0 on success and -errno on error.
503 *
504 * Based on ntfs_read_block() and __block_write_full_page().
505 */
506static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
507{
508 VCN vcn;
509 LCN lcn;
07a4e2da
AA
510 s64 initialized_size;
511 loff_t i_size;
1da177e4
LT
512 sector_t block, dblock, iblock;
513 struct inode *vi;
514 ntfs_inode *ni;
515 ntfs_volume *vol;
516 runlist_element *rl;
517 struct buffer_head *bh, *head;
07a4e2da 518 unsigned long flags;
1da177e4
LT
519 unsigned int blocksize, vcn_ofs;
520 int err;
521 BOOL need_end_writeback;
522 unsigned char blocksize_bits;
523
524 vi = page->mapping->host;
525 ni = NTFS_I(vi);
526 vol = ni->vol;
527
528 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
529 "0x%lx.", ni->mft_no, ni->type, page->index);
530
531 BUG_ON(!NInoNonResident(ni));
532 BUG_ON(NInoMstProtected(ni));
533
534 blocksize_bits = vi->i_blkbits;
535 blocksize = 1 << blocksize_bits;
536
537 if (!page_has_buffers(page)) {
538 BUG_ON(!PageUptodate(page));
539 create_empty_buffers(page, blocksize,
540 (1 << BH_Uptodate) | (1 << BH_Dirty));
a01ac532
AA
541 if (unlikely(!page_has_buffers(page))) {
542 ntfs_warning(vol->sb, "Error allocating page "
543 "buffers. Redirtying page so we try "
544 "again later.");
545 /*
546 * Put the page back on mapping->dirty_pages, but leave
547 * its buffers' dirty state as-is.
548 */
549 redirty_page_for_writepage(wbc, page);
550 unlock_page(page);
551 return 0;
552 }
1da177e4
LT
553 }
554 bh = head = page_buffers(page);
a01ac532 555 BUG_ON(!bh);
1da177e4
LT
556
557 /* NOTE: Different naming scheme to ntfs_read_block()! */
558
559 /* The first block in the page. */
560 block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
561
07a4e2da
AA
562 read_lock_irqsave(&ni->size_lock, flags);
563 i_size = i_size_read(vi);
564 initialized_size = ni->initialized_size;
565 read_unlock_irqrestore(&ni->size_lock, flags);
566
1da177e4 567 /* The first out of bounds block for the data size. */
07a4e2da 568 dblock = (i_size + blocksize - 1) >> blocksize_bits;
1da177e4
LT
569
570 /* The last (fully or partially) initialized block. */
07a4e2da 571 iblock = initialized_size >> blocksize_bits;
1da177e4
LT
572
573 /*
574 * Be very careful. We have no exclusion from __set_page_dirty_buffers
575 * here, and the (potentially unmapped) buffers may become dirty at
576 * any time. If a buffer becomes dirty here after we've inspected it
577 * then we just miss that fact, and the page stays dirty.
578 *
579 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
580 * handle that here by just cleaning them.
581 */
582
583 /*
584 * Loop through all the buffers in the page, mapping all the dirty
585 * buffers to disk addresses and handling any aliases from the
586 * underlying block device's mapping.
587 */
588 rl = NULL;
589 err = 0;
590 do {
591 BOOL is_retry = FALSE;
592
593 if (unlikely(block >= dblock)) {
594 /*
595 * Mapped buffers outside i_size will occur, because
596 * this page can be outside i_size when there is a
597 * truncate in progress. The contents of such buffers
598 * were zeroed by ntfs_writepage().
599 *
600 * FIXME: What about the small race window where
601 * ntfs_writepage() has not done any clearing because
602 * the page was within i_size but before we get here,
603 * vmtruncate() modifies i_size?
604 */
605 clear_buffer_dirty(bh);
606 set_buffer_uptodate(bh);
607 continue;
608 }
609
610 /* Clean buffers are not written out, so no need to map them. */
611 if (!buffer_dirty(bh))
612 continue;
613
614 /* Make sure we have enough initialized size. */
615 if (unlikely((block >= iblock) &&
07a4e2da 616 (initialized_size < i_size))) {
1da177e4
LT
617 /*
618 * If this page is fully outside initialized size, zero
619 * out all pages between the current initialized size
620 * and the current page. Just use ntfs_readpage() to do
621 * the zeroing transparently.
622 */
623 if (block > iblock) {
624 // TODO:
625 // For each page do:
626 // - read_cache_page()
627 // Again for each page do:
628 // - wait_on_page_locked()
629 // - Check (PageUptodate(page) &&
630 // !PageError(page))
631 // Update initialized size in the attribute and
632 // in the inode.
633 // Again, for each page do:
634 // __set_page_dirty_buffers();
635 // page_cache_release()
636 // We don't need to wait on the writes.
637 // Update iblock.
638 }
639 /*
640 * The current page straddles initialized size. Zero
641 * all non-uptodate buffers and set them uptodate (and
642 * dirty?). Note, there aren't any non-uptodate buffers
643 * if the page is uptodate.
644 * FIXME: For an uptodate page, the buffers may need to
645 * be written out because they were not initialized on
646 * disk before.
647 */
648 if (!PageUptodate(page)) {
649 // TODO:
650 // Zero any non-uptodate buffers up to i_size.
651 // Set them uptodate and dirty.
652 }
653 // TODO:
654 // Update initialized size in the attribute and in the
655 // inode (up to i_size).
656 // Update iblock.
657 // FIXME: This is inefficient. Try to batch the two
658 // size changes to happen in one go.
659 ntfs_error(vol->sb, "Writing beyond initialized size "
660 "is not supported yet. Sorry.");
661 err = -EOPNOTSUPP;
662 break;
663 // Do NOT set_buffer_new() BUT DO clear buffer range
664 // outside write request range.
665 // set_buffer_uptodate() on complete buffers as well as
666 // set_buffer_dirty().
667 }
668
669 /* No need to map buffers that are already mapped. */
670 if (buffer_mapped(bh))
671 continue;
672
673 /* Unmapped, dirty buffer. Need to map it. */
674 bh->b_bdev = vol->sb->s_bdev;
675
676 /* Convert block into corresponding vcn and offset. */
677 vcn = (VCN)block << blocksize_bits;
678 vcn_ofs = vcn & vol->cluster_size_mask;
679 vcn >>= vol->cluster_size_bits;
680 if (!rl) {
681lock_retry_remap:
682 down_read(&ni->runlist.lock);
683 rl = ni->runlist.rl;
684 }
685 if (likely(rl != NULL)) {
686 /* Seek to element containing target vcn. */
687 while (rl->length && rl[1].vcn <= vcn)
688 rl++;
689 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
690 } else
691 lcn = LCN_RL_NOT_MAPPED;
692 /* Successful remap. */
693 if (lcn >= 0) {
694 /* Setup buffer head to point to correct block. */
695 bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
696 vcn_ofs) >> blocksize_bits;
697 set_buffer_mapped(bh);
698 continue;
699 }
700 /* It is a hole, need to instantiate it. */
701 if (lcn == LCN_HOLE) {
8dcdebaf
AA
702 u8 *kaddr;
703 unsigned long *bpos, *bend;
704
705 /* Check if the buffer is zero. */
706 kaddr = kmap_atomic(page, KM_USER0);
707 bpos = (unsigned long *)(kaddr + bh_offset(bh));
708 bend = (unsigned long *)((u8*)bpos + blocksize);
709 do {
710 if (unlikely(*bpos))
711 break;
712 } while (likely(++bpos < bend));
713 kunmap_atomic(kaddr, KM_USER0);
714 if (bpos == bend) {
715 /*
716 * Buffer is zero and sparse, no need to write
717 * it.
718 */
719 bh->b_blocknr = -1;
720 clear_buffer_dirty(bh);
721 continue;
722 }
1da177e4
LT
723 // TODO: Instantiate the hole.
724 // clear_buffer_new(bh);
725 // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
726 ntfs_error(vol->sb, "Writing into sparse regions is "
727 "not supported yet. Sorry.");
728 err = -EOPNOTSUPP;
729 break;
730 }
731 /* If first try and runlist unmapped, map and retry. */
732 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
733 is_retry = TRUE;
734 /*
735 * Attempt to map runlist, dropping lock for
736 * the duration.
737 */
738 up_read(&ni->runlist.lock);
739 err = ntfs_map_runlist(ni, vcn);
740 if (likely(!err))
741 goto lock_retry_remap;
742 rl = NULL;
9f993fe4
AA
743 } else if (!rl)
744 up_read(&ni->runlist.lock);
8273d5d4
AA
745 /*
746 * If buffer is outside the runlist, truncate has cut it out
747 * of the runlist. Just clean and clear the buffer and set it
748 * uptodate so it can get discarded by the VM.
749 */
750 if (err == -ENOENT || lcn == LCN_ENOENT) {
751 u8 *kaddr;
752
753 bh->b_blocknr = -1;
754 clear_buffer_dirty(bh);
755 kaddr = kmap_atomic(page, KM_USER0);
756 memset(kaddr + bh_offset(bh), 0, blocksize);
757 kunmap_atomic(kaddr, KM_USER0);
758 flush_dcache_page(page);
759 set_buffer_uptodate(bh);
760 err = 0;
761 continue;
762 }
1da177e4 763 /* Failed to map the buffer, even after retrying. */
8273d5d4
AA
764 if (!err)
765 err = -EIO;
1da177e4
LT
766 bh->b_blocknr = -1;
767 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
768 "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
769 "because its location on disk could not be "
8273d5d4 770 "determined%s (error code %i).", ni->mft_no,
1da177e4
LT
771 ni->type, (unsigned long long)vcn,
772 vcn_ofs, is_retry ? " even after "
8273d5d4 773 "retrying" : "", err);
1da177e4
LT
774 break;
775 } while (block++, (bh = bh->b_this_page) != head);
776
777 /* Release the lock if we took it. */
778 if (rl)
779 up_read(&ni->runlist.lock);
780
781 /* For the error case, need to reset bh to the beginning. */
782 bh = head;
783
54b02eb0 784 /* Just an optimization, so ->readpage() is not called later. */
1da177e4
LT
785 if (unlikely(!PageUptodate(page))) {
786 int uptodate = 1;
787 do {
788 if (!buffer_uptodate(bh)) {
789 uptodate = 0;
790 bh = head;
791 break;
792 }
793 } while ((bh = bh->b_this_page) != head);
794 if (uptodate)
795 SetPageUptodate(page);
796 }
797
798 /* Setup all mapped, dirty buffers for async write i/o. */
799 do {
1da177e4
LT
800 if (buffer_mapped(bh) && buffer_dirty(bh)) {
801 lock_buffer(bh);
802 if (test_clear_buffer_dirty(bh)) {
803 BUG_ON(!buffer_uptodate(bh));
804 mark_buffer_async_write(bh);
805 } else
806 unlock_buffer(bh);
807 } else if (unlikely(err)) {
808 /*
809 * For the error case. The buffer may have been set
810 * dirty during attachment to a dirty page.
811 */
812 if (err != -ENOMEM)
813 clear_buffer_dirty(bh);
814 }
815 } while ((bh = bh->b_this_page) != head);
816
817 if (unlikely(err)) {
818 // TODO: Remove the -EOPNOTSUPP check later on...
819 if (unlikely(err == -EOPNOTSUPP))
820 err = 0;
821 else if (err == -ENOMEM) {
822 ntfs_warning(vol->sb, "Error allocating memory. "
823 "Redirtying page so we try again "
824 "later.");
825 /*
826 * Put the page back on mapping->dirty_pages, but
827 * leave its buffer's dirty state as-is.
828 */
829 redirty_page_for_writepage(wbc, page);
830 err = 0;
831 } else
832 SetPageError(page);
833 }
834
835 BUG_ON(PageWriteback(page));
836 set_page_writeback(page); /* Keeps try_to_free_buffers() away. */
1da177e4 837
54b02eb0 838 /* Submit the prepared buffers for i/o. */
1da177e4
LT
839 need_end_writeback = TRUE;
840 do {
841 struct buffer_head *next = bh->b_this_page;
842 if (buffer_async_write(bh)) {
843 submit_bh(WRITE, bh);
844 need_end_writeback = FALSE;
845 }
1da177e4
LT
846 bh = next;
847 } while (bh != head);
54b02eb0 848 unlock_page(page);
1da177e4
LT
849
850 /* If no i/o was started, need to end_page_writeback(). */
851 if (unlikely(need_end_writeback))
852 end_page_writeback(page);
853
854 ntfs_debug("Done.");
855 return err;
856}
857
858/**
859 * ntfs_write_mst_block - write a @page to the backing store
860 * @page: page cache page to write out
861 * @wbc: writeback control structure
862 *
863 * This function is for writing pages belonging to non-resident, mst protected
864 * attributes to their backing store. The only supported attributes are index
865 * allocation and $MFT/$DATA. Both directory inodes and index inodes are
866 * supported for the index allocation case.
867 *
868 * The page must remain locked for the duration of the write because we apply
869 * the mst fixups, write, and then undo the fixups, so if we were to unlock the
870 * page before undoing the fixups, any other user of the page will see the
871 * page contents as corrupt.
872 *
873 * We clear the page uptodate flag for the duration of the function to ensure
874 * exclusion for the $MFT/$DATA case against someone mapping an mft record we
875 * are about to apply the mst fixups to.
876 *
877 * Return 0 on success and -errno on error.
878 *
879 * Based on ntfs_write_block(), ntfs_mft_writepage(), and
880 * write_mft_record_nolock().
881 */
882static int ntfs_write_mst_block(struct page *page,
883 struct writeback_control *wbc)
884{
885 sector_t block, dblock, rec_block;
886 struct inode *vi = page->mapping->host;
887 ntfs_inode *ni = NTFS_I(vi);
888 ntfs_volume *vol = ni->vol;
889 u8 *kaddr;
1da177e4
LT
890 unsigned int rec_size = ni->itype.index.block_size;
891 ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size];
892 struct buffer_head *bh, *head, *tbh, *rec_start_bh;
d53ee322 893 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
1da177e4 894 runlist_element *rl;
d53ee322
AA
895 int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
896 unsigned bh_size, rec_size_bits;
1da177e4 897 BOOL sync, is_mft, page_is_dirty, rec_is_dirty;
d53ee322 898 unsigned char bh_size_bits;
1da177e4
LT
899
900 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
901 "0x%lx.", vi->i_ino, ni->type, page->index);
902 BUG_ON(!NInoNonResident(ni));
903 BUG_ON(!NInoMstProtected(ni));
904 is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
905 /*
906 * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page
907 * in its page cache were to be marked dirty. However this should
908 * never happen with the current driver and considering we do not
909 * handle this case here we do want to BUG(), at least for now.
910 */
911 BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
912 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
d53ee322
AA
913 bh_size_bits = vi->i_blkbits;
914 bh_size = 1 << bh_size_bits;
915 max_bhs = PAGE_CACHE_SIZE / bh_size;
1da177e4 916 BUG_ON(!max_bhs);
d53ee322 917 BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
1da177e4
LT
918
919 /* Were we called for sync purposes? */
920 sync = (wbc->sync_mode == WB_SYNC_ALL);
921
922 /* Make sure we have mapped buffers. */
1da177e4
LT
923 bh = head = page_buffers(page);
924 BUG_ON(!bh);
925
926 rec_size_bits = ni->itype.index.block_size_bits;
927 BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits));
928 bhs_per_rec = rec_size >> bh_size_bits;
929 BUG_ON(!bhs_per_rec);
930
931 /* The first block in the page. */
932 rec_block = block = (sector_t)page->index <<
933 (PAGE_CACHE_SHIFT - bh_size_bits);
934
935 /* The first out of bounds block for the data size. */
07a4e2da 936 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
1da177e4
LT
937
938 rl = NULL;
939 err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
940 page_is_dirty = rec_is_dirty = FALSE;
941 rec_start_bh = NULL;
942 do {
943 BOOL is_retry = FALSE;
944
945 if (likely(block < rec_block)) {
946 if (unlikely(block >= dblock)) {
947 clear_buffer_dirty(bh);
946929d8 948 set_buffer_uptodate(bh);
1da177e4
LT
949 continue;
950 }
951 /*
952 * This block is not the first one in the record. We
953 * ignore the buffer's dirty state because we could
954 * have raced with a parallel mark_ntfs_record_dirty().
955 */
956 if (!rec_is_dirty)
957 continue;
958 if (unlikely(err2)) {
959 if (err2 != -ENOMEM)
960 clear_buffer_dirty(bh);
961 continue;
962 }
963 } else /* if (block == rec_block) */ {
964 BUG_ON(block > rec_block);
965 /* This block is the first one in the record. */
966 rec_block += bhs_per_rec;
967 err2 = 0;
968 if (unlikely(block >= dblock)) {
969 clear_buffer_dirty(bh);
970 continue;
971 }
972 if (!buffer_dirty(bh)) {
973 /* Clean records are not written out. */
974 rec_is_dirty = FALSE;
975 continue;
976 }
977 rec_is_dirty = TRUE;
978 rec_start_bh = bh;
979 }
980 /* Need to map the buffer if it is not mapped already. */
981 if (unlikely(!buffer_mapped(bh))) {
982 VCN vcn;
983 LCN lcn;
984 unsigned int vcn_ofs;
985
481d0374 986 bh->b_bdev = vol->sb->s_bdev;
1da177e4
LT
987 /* Obtain the vcn and offset of the current block. */
988 vcn = (VCN)block << bh_size_bits;
989 vcn_ofs = vcn & vol->cluster_size_mask;
990 vcn >>= vol->cluster_size_bits;
991 if (!rl) {
992lock_retry_remap:
993 down_read(&ni->runlist.lock);
994 rl = ni->runlist.rl;
995 }
996 if (likely(rl != NULL)) {
997 /* Seek to element containing target vcn. */
998 while (rl->length && rl[1].vcn <= vcn)
999 rl++;
1000 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
1001 } else
1002 lcn = LCN_RL_NOT_MAPPED;
1003 /* Successful remap. */
1004 if (likely(lcn >= 0)) {
1005 /* Setup buffer head to correct block. */
1006 bh->b_blocknr = ((lcn <<
1007 vol->cluster_size_bits) +
1008 vcn_ofs) >> bh_size_bits;
1009 set_buffer_mapped(bh);
1010 } else {
1011 /*
1012 * Remap failed. Retry to map the runlist once
1013 * unless we are working on $MFT which always
1014 * has the whole of its runlist in memory.
1015 */
1016 if (!is_mft && !is_retry &&
1017 lcn == LCN_RL_NOT_MAPPED) {
1018 is_retry = TRUE;
1019 /*
1020 * Attempt to map runlist, dropping
1021 * lock for the duration.
1022 */
1023 up_read(&ni->runlist.lock);
1024 err2 = ntfs_map_runlist(ni, vcn);
1025 if (likely(!err2))
1026 goto lock_retry_remap;
1027 if (err2 == -ENOMEM)
1028 page_is_dirty = TRUE;
1029 lcn = err2;
9f993fe4 1030 } else {
1da177e4 1031 err2 = -EIO;
9f993fe4
AA
1032 if (!rl)
1033 up_read(&ni->runlist.lock);
1034 }
1da177e4
LT
1035 /* Hard error. Abort writing this record. */
1036 if (!err || err == -ENOMEM)
1037 err = err2;
1038 bh->b_blocknr = -1;
1039 ntfs_error(vol->sb, "Cannot write ntfs record "
1040 "0x%llx (inode 0x%lx, "
1041 "attribute type 0x%x) because "
1042 "its location on disk could "
1043 "not be determined (error "
8907547d
RD
1044 "code %lli).",
1045 (long long)block <<
1da177e4
LT
1046 bh_size_bits >>
1047 vol->mft_record_size_bits,
1048 ni->mft_no, ni->type,
1049 (long long)lcn);
1050 /*
1051 * If this is not the first buffer, remove the
1052 * buffers in this record from the list of
1053 * buffers to write and clear their dirty bit
1054 * if not error -ENOMEM.
1055 */
1056 if (rec_start_bh != bh) {
1057 while (bhs[--nr_bhs] != rec_start_bh)
1058 ;
1059 if (err2 != -ENOMEM) {
1060 do {
1061 clear_buffer_dirty(
1062 rec_start_bh);
1063 } while ((rec_start_bh =
1064 rec_start_bh->
1065 b_this_page) !=
1066 bh);
1067 }
1068 }
1069 continue;
1070 }
1071 }
1072 BUG_ON(!buffer_uptodate(bh));
1073 BUG_ON(nr_bhs >= max_bhs);
1074 bhs[nr_bhs++] = bh;
1075 } while (block++, (bh = bh->b_this_page) != head);
1076 if (unlikely(rl))
1077 up_read(&ni->runlist.lock);
1078 /* If there were no dirty buffers, we are done. */
1079 if (!nr_bhs)
1080 goto done;
1081 /* Map the page so we can access its contents. */
1082 kaddr = kmap(page);
1083 /* Clear the page uptodate flag whilst the mst fixups are applied. */
1084 BUG_ON(!PageUptodate(page));
1085 ClearPageUptodate(page);
1086 for (i = 0; i < nr_bhs; i++) {
1087 unsigned int ofs;
1088
1089 /* Skip buffers which are not at the beginning of records. */
1090 if (i % bhs_per_rec)
1091 continue;
1092 tbh = bhs[i];
1093 ofs = bh_offset(tbh);
1094 if (is_mft) {
1095 ntfs_inode *tni;
1096 unsigned long mft_no;
1097
1098 /* Get the mft record number. */
1099 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
1100 >> rec_size_bits;
1101 /* Check whether to write this mft record. */
1102 tni = NULL;
1103 if (!ntfs_may_write_mft_record(vol, mft_no,
1104 (MFT_RECORD*)(kaddr + ofs), &tni)) {
1105 /*
1106 * The record should not be written. This
1107 * means we need to redirty the page before
1108 * returning.
1109 */
1110 page_is_dirty = TRUE;
1111 /*
1112 * Remove the buffers in this mft record from
1113 * the list of buffers to write.
1114 */
1115 do {
1116 bhs[i] = NULL;
1117 } while (++i % bhs_per_rec);
1118 continue;
1119 }
1120 /*
1121 * The record should be written. If a locked ntfs
1122 * inode was returned, add it to the array of locked
1123 * ntfs inodes.
1124 */
1125 if (tni)
1126 locked_nis[nr_locked_nis++] = tni;
1127 }
1128 /* Apply the mst protection fixups. */
1129 err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
1130 rec_size);
1131 if (unlikely(err2)) {
1132 if (!err || err == -ENOMEM)
1133 err = -EIO;
1134 ntfs_error(vol->sb, "Failed to apply mst fixups "
1135 "(inode 0x%lx, attribute type 0x%x, "
1136 "page index 0x%lx, page offset 0x%x)!"
1137 " Unmount and run chkdsk.", vi->i_ino,
1138 ni->type, page->index, ofs);
1139 /*
1140 * Mark all the buffers in this record clean as we do
1141 * not want to write corrupt data to disk.
1142 */
1143 do {
1144 clear_buffer_dirty(bhs[i]);
1145 bhs[i] = NULL;
1146 } while (++i % bhs_per_rec);
1147 continue;
1148 }
1149 nr_recs++;
1150 }
1151 /* If no records are to be written out, we are done. */
1152 if (!nr_recs)
1153 goto unm_done;
1154 flush_dcache_page(page);
1155 /* Lock buffers and start synchronous write i/o on them. */
1156 for (i = 0; i < nr_bhs; i++) {
1157 tbh = bhs[i];
1158 if (!tbh)
1159 continue;
1160 if (unlikely(test_set_buffer_locked(tbh)))
1161 BUG();
1162 /* The buffer dirty state is now irrelevant, just clean it. */
1163 clear_buffer_dirty(tbh);
1164 BUG_ON(!buffer_uptodate(tbh));
1165 BUG_ON(!buffer_mapped(tbh));
1166 get_bh(tbh);
1167 tbh->b_end_io = end_buffer_write_sync;
1168 submit_bh(WRITE, tbh);
1169 }
1170 /* Synchronize the mft mirror now if not @sync. */
1171 if (is_mft && !sync)
1172 goto do_mirror;
1173do_wait:
1174 /* Wait on i/o completion of buffers. */
1175 for (i = 0; i < nr_bhs; i++) {
1176 tbh = bhs[i];
1177 if (!tbh)
1178 continue;
1179 wait_on_buffer(tbh);
1180 if (unlikely(!buffer_uptodate(tbh))) {
1181 ntfs_error(vol->sb, "I/O error while writing ntfs "
1182 "record buffer (inode 0x%lx, "
1183 "attribute type 0x%x, page index "
1184 "0x%lx, page offset 0x%lx)! Unmount "
1185 "and run chkdsk.", vi->i_ino, ni->type,
1186 page->index, bh_offset(tbh));
1187 if (!err || err == -ENOMEM)
1188 err = -EIO;
1189 /*
1190 * Set the buffer uptodate so the page and buffer
1191 * states do not become out of sync.
1192 */
1193 set_buffer_uptodate(tbh);
1194 }
1195 }
1196 /* If @sync, now synchronize the mft mirror. */
1197 if (is_mft && sync) {
1198do_mirror:
1199 for (i = 0; i < nr_bhs; i++) {
1200 unsigned long mft_no;
1201 unsigned int ofs;
1202
1203 /*
1204 * Skip buffers which are not at the beginning of
1205 * records.
1206 */
1207 if (i % bhs_per_rec)
1208 continue;
1209 tbh = bhs[i];
1210 /* Skip removed buffers (and hence records). */
1211 if (!tbh)
1212 continue;
1213 ofs = bh_offset(tbh);
1214 /* Get the mft record number. */
1215 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
1216 >> rec_size_bits;
1217 if (mft_no < vol->mftmirr_size)
1218 ntfs_sync_mft_mirror(vol, mft_no,
1219 (MFT_RECORD*)(kaddr + ofs),
1220 sync);
1221 }
1222 if (!sync)
1223 goto do_wait;
1224 }
1225 /* Remove the mst protection fixups again. */
1226 for (i = 0; i < nr_bhs; i++) {
1227 if (!(i % bhs_per_rec)) {
1228 tbh = bhs[i];
1229 if (!tbh)
1230 continue;
1231 post_write_mst_fixup((NTFS_RECORD*)(kaddr +
1232 bh_offset(tbh)));
1233 }
1234 }
1235 flush_dcache_page(page);
1236unm_done:
1237 /* Unlock any locked inodes. */
1238 while (nr_locked_nis-- > 0) {
1239 ntfs_inode *tni, *base_tni;
1240
1241 tni = locked_nis[nr_locked_nis];
1242 /* Get the base inode. */
1243 down(&tni->extent_lock);
1244 if (tni->nr_extents >= 0)
1245 base_tni = tni;
1246 else {
1247 base_tni = tni->ext.base_ntfs_ino;
1248 BUG_ON(!base_tni);
1249 }
1250 up(&tni->extent_lock);
1251 ntfs_debug("Unlocking %s inode 0x%lx.",
1252 tni == base_tni ? "base" : "extent",
1253 tni->mft_no);
1254 up(&tni->mrec_lock);
1255 atomic_dec(&tni->count);
1256 iput(VFS_I(base_tni));
1257 }
1258 SetPageUptodate(page);
1259 kunmap(page);
1260done:
1261 if (unlikely(err && err != -ENOMEM)) {
1262 /*
1263 * Set page error if there is only one ntfs record in the page.
1264 * Otherwise we would loose per-record granularity.
1265 */
1266 if (ni->itype.index.block_size == PAGE_CACHE_SIZE)
1267 SetPageError(page);
1268 NVolSetErrors(vol);
1269 }
1270 if (page_is_dirty) {
1271 ntfs_debug("Page still contains one or more dirty ntfs "
1272 "records. Redirtying the page starting at "
1273 "record 0x%lx.", page->index <<
1274 (PAGE_CACHE_SHIFT - rec_size_bits));
1275 redirty_page_for_writepage(wbc, page);
1276 unlock_page(page);
1277 } else {
1278 /*
1279 * Keep the VM happy. This must be done otherwise the
1280 * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
1281 * the page is clean.
1282 */
1283 BUG_ON(PageWriteback(page));
1284 set_page_writeback(page);
1285 unlock_page(page);
1286 end_page_writeback(page);
1287 }
1288 if (likely(!err))
1289 ntfs_debug("Done.");
1290 return err;
1291}
1292
1293/**
1294 * ntfs_writepage - write a @page to the backing store
1295 * @page: page cache page to write out
1296 * @wbc: writeback control structure
1297 *
1298 * This is called from the VM when it wants to have a dirty ntfs page cache
1299 * page cleaned. The VM has already locked the page and marked it clean.
1300 *
1301 * For non-resident attributes, ntfs_writepage() writes the @page by calling
1302 * the ntfs version of the generic block_write_full_page() function,
1303 * ntfs_write_block(), which in turn if necessary creates and writes the
1304 * buffers associated with the page asynchronously.
1305 *
1306 * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
1307 * the data to the mft record (which at this stage is most likely in memory).
1308 * The mft record is then marked dirty and written out asynchronously via the
1309 * vfs inode dirty code path for the inode the mft record belongs to or via the
1310 * vm page dirty code path for the page the mft record is in.
1311 *
1312 * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
1313 *
1314 * Return 0 on success and -errno on error.
1315 */
1316static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
1317{
1318 loff_t i_size;
149f0c52
AA
1319 struct inode *vi = page->mapping->host;
1320 ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1da177e4 1321 char *kaddr;
149f0c52
AA
1322 ntfs_attr_search_ctx *ctx = NULL;
1323 MFT_RECORD *m = NULL;
1da177e4
LT
1324 u32 attr_len;
1325 int err;
1326
905685f6 1327retry_writepage:
1da177e4 1328 BUG_ON(!PageLocked(page));
1da177e4 1329 i_size = i_size_read(vi);
1da177e4
LT
1330 /* Is the page fully outside i_size? (truncate in progress) */
1331 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
1332 PAGE_CACHE_SHIFT)) {
1333 /*
1334 * The page may have dirty, unmapped buffers. Make them
1335 * freeable here, so the page does not leak.
1336 */
1337 block_invalidatepage(page, 0);
1338 unlock_page(page);
1339 ntfs_debug("Write outside i_size - truncated?");
1340 return 0;
1341 }
bd45fdd2
AA
1342 /*
1343 * Only $DATA attributes can be encrypted and only unnamed $DATA
1344 * attributes can be compressed. Index root can have the flags set but
1345 * this means to create compressed/encrypted files, not that the
4e64c886
AA
1346 * attribute is compressed/encrypted. Note we need to check for
1347 * AT_INDEX_ALLOCATION since this is the type of both directory and
1348 * index inodes.
bd45fdd2 1349 */
4e64c886 1350 if (ni->type != AT_INDEX_ALLOCATION) {
bd45fdd2
AA
1351 /* If file is encrypted, deny access, just like NT4. */
1352 if (NInoEncrypted(ni)) {
1353 unlock_page(page);
1354 BUG_ON(ni->type != AT_DATA);
1355 ntfs_debug("Denying write access to encrypted "
1356 "file.");
1357 return -EACCES;
1358 }
1359 /* Compressed data streams are handled in compress.c. */
1360 if (NInoNonResident(ni) && NInoCompressed(ni)) {
1361 BUG_ON(ni->type != AT_DATA);
1362 BUG_ON(ni->name_len);
1363 // TODO: Implement and replace this with
1364 // return ntfs_write_compressed_block(page);
1365 unlock_page(page);
1366 ntfs_error(vi->i_sb, "Writing to compressed files is "
1367 "not supported yet. Sorry.");
1368 return -EOPNOTSUPP;
1369 }
1370 // TODO: Implement and remove this check.
1371 if (NInoNonResident(ni) && NInoSparse(ni)) {
1372 unlock_page(page);
1373 ntfs_error(vi->i_sb, "Writing to sparse files is not "
1374 "supported yet. Sorry.");
1375 return -EOPNOTSUPP;
1376 }
1377 }
1da177e4
LT
1378 /* NInoNonResident() == NInoIndexAllocPresent() */
1379 if (NInoNonResident(ni)) {
1da177e4
LT
1380 /* We have to zero every time due to mmap-at-end-of-file. */
1381 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
1382 /* The page straddles i_size. */
1383 unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
1384 kaddr = kmap_atomic(page, KM_USER0);
1385 memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
1386 flush_dcache_page(page);
1387 kunmap_atomic(kaddr, KM_USER0);
1388 }
1389 /* Handle mst protected attributes. */
1390 if (NInoMstProtected(ni))
1391 return ntfs_write_mst_block(page, wbc);
bd45fdd2 1392 /* Normal, non-resident data stream. */
1da177e4
LT
1393 return ntfs_write_block(page, wbc);
1394 }
1395 /*
bd45fdd2
AA
1396 * Attribute is resident, implying it is not compressed, encrypted, or
1397 * mst protected. This also means the attribute is smaller than an mft
1398 * record and hence smaller than a page, so can simply return error on
1399 * any pages with index above 0. Note the attribute can actually be
1400 * marked compressed but if it is resident the actual data is not
1401 * compressed so we are ok to ignore the compressed flag here.
1da177e4
LT
1402 */
1403 BUG_ON(page_has_buffers(page));
1404 BUG_ON(!PageUptodate(page));
1405 if (unlikely(page->index > 0)) {
1406 ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. "
1407 "Aborting write.", page->index);
1408 BUG_ON(PageWriteback(page));
1409 set_page_writeback(page);
1410 unlock_page(page);
1411 end_page_writeback(page);
1412 return -EIO;
1413 }
1414 if (!NInoAttr(ni))
1415 base_ni = ni;
1416 else
1417 base_ni = ni->ext.base_ntfs_ino;
1418 /* Map, pin, and lock the mft record. */
1419 m = map_mft_record(base_ni);
1420 if (IS_ERR(m)) {
1421 err = PTR_ERR(m);
1422 m = NULL;
1423 ctx = NULL;
1424 goto err_out;
1425 }
905685f6
AA
1426 /*
1427 * If a parallel write made the attribute non-resident, drop the mft
1428 * record and retry the writepage.
1429 */
1430 if (unlikely(NInoNonResident(ni))) {
1431 unmap_mft_record(base_ni);
1432 goto retry_writepage;
1433 }
1da177e4
LT
1434 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1435 if (unlikely(!ctx)) {
1436 err = -ENOMEM;
1437 goto err_out;
1438 }
1439 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1440 CASE_SENSITIVE, 0, NULL, 0, ctx);
1441 if (unlikely(err))
1442 goto err_out;
1443 /*
1444 * Keep the VM happy. This must be done otherwise the radix-tree tag
1445 * PAGECACHE_TAG_DIRTY remains set even though the page is clean.
1446 */
1447 BUG_ON(PageWriteback(page));
1448 set_page_writeback(page);
1449 unlock_page(page);
1da177e4 1450 /*
bd45fdd2
AA
1451 * Here, we do not need to zero the out of bounds area everytime
1452 * because the below memcpy() already takes care of the
1453 * mmap-at-end-of-file requirements. If the file is converted to a
1454 * non-resident one, then the code path use is switched to the
1455 * non-resident one where the zeroing happens on each ntfs_writepage()
1456 * invocation.
1da177e4 1457 */
1da177e4 1458 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
07a4e2da 1459 i_size = i_size_read(vi);
1da177e4 1460 if (unlikely(attr_len > i_size)) {
1da177e4 1461 attr_len = i_size;
f40661be 1462 ctx->attr->data.resident.value_length = cpu_to_le32(attr_len);
1da177e4 1463 }
f40661be 1464 kaddr = kmap_atomic(page, KM_USER0);
1da177e4
LT
1465 /* Copy the data from the page to the mft record. */
1466 memcpy((u8*)ctx->attr +
1467 le16_to_cpu(ctx->attr->data.resident.value_offset),
1468 kaddr, attr_len);
1469 flush_dcache_mft_record_page(ctx->ntfs_ino);
1470 /* Zero out of bounds area in the page cache page. */
1471 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1472 flush_dcache_page(page);
1473 kunmap_atomic(kaddr, KM_USER0);
1474
1475 end_page_writeback(page);
1476
1477 /* Mark the mft record dirty, so it gets written back. */
1478 mark_mft_record_dirty(ctx->ntfs_ino);
1479 ntfs_attr_put_search_ctx(ctx);
1480 unmap_mft_record(base_ni);
1481 return 0;
1482err_out:
1483 if (err == -ENOMEM) {
1484 ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
1485 "page so we try again later.");
1486 /*
1487 * Put the page back on mapping->dirty_pages, but leave its
1488 * buffers' dirty state as-is.
1489 */
1490 redirty_page_for_writepage(wbc, page);
1491 err = 0;
1492 } else {
1493 ntfs_error(vi->i_sb, "Resident attribute write failed with "
149f0c52 1494 "error %i.", err);
1da177e4 1495 SetPageError(page);
149f0c52
AA
1496 NVolSetErrors(ni->vol);
1497 make_bad_inode(vi);
1da177e4
LT
1498 }
1499 unlock_page(page);
1500 if (ctx)
1501 ntfs_attr_put_search_ctx(ctx);
1502 if (m)
1503 unmap_mft_record(base_ni);
1504 return err;
1505}
1506
1507/**
1508 * ntfs_prepare_nonresident_write -
1509 *
1510 */
1511static int ntfs_prepare_nonresident_write(struct page *page,
1512 unsigned from, unsigned to)
1513{
1514 VCN vcn;
1515 LCN lcn;
07a4e2da
AA
1516 s64 initialized_size;
1517 loff_t i_size;
1da177e4
LT
1518 sector_t block, ablock, iblock;
1519 struct inode *vi;
1520 ntfs_inode *ni;
1521 ntfs_volume *vol;
1522 runlist_element *rl;
1523 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
07a4e2da 1524 unsigned long flags;
1da177e4
LT
1525 unsigned int vcn_ofs, block_start, block_end, blocksize;
1526 int err;
1527 BOOL is_retry;
1528 unsigned char blocksize_bits;
1529
1530 vi = page->mapping->host;
1531 ni = NTFS_I(vi);
1532 vol = ni->vol;
1533
1534 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
1535 "0x%lx, from = %u, to = %u.", ni->mft_no, ni->type,
1536 page->index, from, to);
1537
1538 BUG_ON(!NInoNonResident(ni));
1539
1540 blocksize_bits = vi->i_blkbits;
1541 blocksize = 1 << blocksize_bits;
1542
1543 /*
1544 * create_empty_buffers() will create uptodate/dirty buffers if the
1545 * page is uptodate/dirty.
1546 */
1547 if (!page_has_buffers(page))
1548 create_empty_buffers(page, blocksize, 0);
1549 bh = head = page_buffers(page);
1550 if (unlikely(!bh))
1551 return -ENOMEM;
1552
1553 /* The first block in the page. */
1554 block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
1555
07a4e2da 1556 read_lock_irqsave(&ni->size_lock, flags);
1da177e4 1557 /*
b6ad6c52 1558 * The first out of bounds block for the allocated size. No need to
1da177e4
LT
1559 * round up as allocated_size is in multiples of cluster size and the
1560 * minimum cluster size is 512 bytes, which is equal to the smallest
1561 * blocksize.
1562 */
1563 ablock = ni->allocated_size >> blocksize_bits;
07a4e2da
AA
1564 i_size = i_size_read(vi);
1565 initialized_size = ni->initialized_size;
1566 read_unlock_irqrestore(&ni->size_lock, flags);
1567
1da177e4 1568 /* The last (fully or partially) initialized block. */
07a4e2da 1569 iblock = initialized_size >> blocksize_bits;
1da177e4
LT
1570
1571 /* Loop through all the buffers in the page. */
1572 block_start = 0;
1573 rl = NULL;
1574 err = 0;
1575 do {
1576 block_end = block_start + blocksize;
1577 /*
1578 * If buffer @bh is outside the write, just mark it uptodate
1579 * if the page is uptodate and continue with the next buffer.
1580 */
1581 if (block_end <= from || block_start >= to) {
1582 if (PageUptodate(page)) {
1583 if (!buffer_uptodate(bh))
1584 set_buffer_uptodate(bh);
1585 }
1586 continue;
1587 }
1588 /*
1589 * @bh is at least partially being written to.
1590 * Make sure it is not marked as new.
1591 */
1592 //if (buffer_new(bh))
1593 // clear_buffer_new(bh);
1594
1595 if (block >= ablock) {
1596 // TODO: block is above allocated_size, need to
1597 // allocate it. Best done in one go to accommodate not
1598 // only block but all above blocks up to and including:
1599 // ((page->index << PAGE_CACHE_SHIFT) + to + blocksize
1600 // - 1) >> blobksize_bits. Obviously will need to round
1601 // up to next cluster boundary, too. This should be
1602 // done with a helper function, so it can be reused.
1603 ntfs_error(vol->sb, "Writing beyond allocated size "
1604 "is not supported yet. Sorry.");
1605 err = -EOPNOTSUPP;
1606 goto err_out;
1607 // Need to update ablock.
1608 // Need to set_buffer_new() on all block bhs that are
1609 // newly allocated.
1610 }
1611 /*
1612 * Now we have enough allocated size to fulfill the whole
1613 * request, i.e. block < ablock is true.
1614 */
1615 if (unlikely((block >= iblock) &&
07a4e2da 1616 (initialized_size < i_size))) {
1da177e4
LT
1617 /*
1618 * If this page is fully outside initialized size, zero
1619 * out all pages between the current initialized size
1620 * and the current page. Just use ntfs_readpage() to do
1621 * the zeroing transparently.
1622 */
1623 if (block > iblock) {
1624 // TODO:
1625 // For each page do:
1626 // - read_cache_page()
1627 // Again for each page do:
1628 // - wait_on_page_locked()
1629 // - Check (PageUptodate(page) &&
1630 // !PageError(page))
1631 // Update initialized size in the attribute and
1632 // in the inode.
1633 // Again, for each page do:
1634 // __set_page_dirty_buffers();
1635 // page_cache_release()
1636 // We don't need to wait on the writes.
1637 // Update iblock.
1638 }
1639 /*
1640 * The current page straddles initialized size. Zero
1641 * all non-uptodate buffers and set them uptodate (and
1642 * dirty?). Note, there aren't any non-uptodate buffers
1643 * if the page is uptodate.
1644 * FIXME: For an uptodate page, the buffers may need to
1645 * be written out because they were not initialized on
1646 * disk before.
1647 */
1648 if (!PageUptodate(page)) {
1649 // TODO:
1650 // Zero any non-uptodate buffers up to i_size.
1651 // Set them uptodate and dirty.
1652 }
1653 // TODO:
1654 // Update initialized size in the attribute and in the
1655 // inode (up to i_size).
1656 // Update iblock.
1657 // FIXME: This is inefficient. Try to batch the two
1658 // size changes to happen in one go.
1659 ntfs_error(vol->sb, "Writing beyond initialized size "
1660 "is not supported yet. Sorry.");
1661 err = -EOPNOTSUPP;
1662 goto err_out;
1663 // Do NOT set_buffer_new() BUT DO clear buffer range
1664 // outside write request range.
1665 // set_buffer_uptodate() on complete buffers as well as
1666 // set_buffer_dirty().
1667 }
1668
1669 /* Need to map unmapped buffers. */
1670 if (!buffer_mapped(bh)) {
1671 /* Unmapped buffer. Need to map it. */
1672 bh->b_bdev = vol->sb->s_bdev;
1673
1674 /* Convert block into corresponding vcn and offset. */
1675 vcn = (VCN)block << blocksize_bits >>
1676 vol->cluster_size_bits;
1677 vcn_ofs = ((VCN)block << blocksize_bits) &
1678 vol->cluster_size_mask;
1679
1680 is_retry = FALSE;
1681 if (!rl) {
1682lock_retry_remap:
1683 down_read(&ni->runlist.lock);
1684 rl = ni->runlist.rl;
1685 }
1686 if (likely(rl != NULL)) {
1687 /* Seek to element containing target vcn. */
1688 while (rl->length && rl[1].vcn <= vcn)
1689 rl++;
1690 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
1691 } else
1692 lcn = LCN_RL_NOT_MAPPED;
1693 if (unlikely(lcn < 0)) {
1694 /*
1695 * We extended the attribute allocation above.
1696 * If we hit an ENOENT here it means that the
1697 * allocation was insufficient which is a bug.
1698 */
1699 BUG_ON(lcn == LCN_ENOENT);
1700
1701 /* It is a hole, need to instantiate it. */
1702 if (lcn == LCN_HOLE) {
1703 // TODO: Instantiate the hole.
1704 // clear_buffer_new(bh);
1705 // unmap_underlying_metadata(bh->b_bdev,
1706 // bh->b_blocknr);
1707 // For non-uptodate buffers, need to
1708 // zero out the region outside the
1709 // request in this bh or all bhs,
1710 // depending on what we implemented
1711 // above.
1712 // Need to flush_dcache_page().
1713 // Or could use set_buffer_new()
1714 // instead?
1715 ntfs_error(vol->sb, "Writing into "
1716 "sparse regions is "
1717 "not supported yet. "
1718 "Sorry.");
1719 err = -EOPNOTSUPP;
9f993fe4
AA
1720 if (!rl)
1721 up_read(&ni->runlist.lock);
1da177e4
LT
1722 goto err_out;
1723 } else if (!is_retry &&
1724 lcn == LCN_RL_NOT_MAPPED) {
1725 is_retry = TRUE;
1726 /*
1727 * Attempt to map runlist, dropping
1728 * lock for the duration.
1729 */
1730 up_read(&ni->runlist.lock);
1731 err = ntfs_map_runlist(ni, vcn);
1732 if (likely(!err))
1733 goto lock_retry_remap;
1734 rl = NULL;
9f993fe4
AA
1735 } else if (!rl)
1736 up_read(&ni->runlist.lock);
1da177e4
LT
1737 /*
1738 * Failed to map the buffer, even after
1739 * retrying.
1740 */
7d333d6c
AA
1741 if (!err)
1742 err = -EIO;
1da177e4
LT
1743 bh->b_blocknr = -1;
1744 ntfs_error(vol->sb, "Failed to write to inode "
1745 "0x%lx, attribute type 0x%x, "
1746 "vcn 0x%llx, offset 0x%x "
1747 "because its location on disk "
1748 "could not be determined%s "
7d333d6c 1749 "(error code %i).",
1da177e4
LT
1750 ni->mft_no, ni->type,
1751 (unsigned long long)vcn,
1752 vcn_ofs, is_retry ? " even "
7d333d6c 1753 "after retrying" : "", err);
1da177e4
LT
1754 goto err_out;
1755 }
1756 /* We now have a successful remap, i.e. lcn >= 0. */
1757
1758 /* Setup buffer head to correct block. */
1759 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
1760 + vcn_ofs) >> blocksize_bits;
1761 set_buffer_mapped(bh);
1762
1763 // FIXME: Something analogous to this is needed for
1764 // each newly allocated block, i.e. BH_New.
1765 // FIXME: Might need to take this out of the
1766 // if (!buffer_mapped(bh)) {}, depending on how we
1767 // implement things during the allocated_size and
1768 // initialized_size extension code above.
1769 if (buffer_new(bh)) {
1770 clear_buffer_new(bh);
1771 unmap_underlying_metadata(bh->b_bdev,
1772 bh->b_blocknr);
1773 if (PageUptodate(page)) {
1774 set_buffer_uptodate(bh);
1775 continue;
1776 }
1777 /*
1778 * Page is _not_ uptodate, zero surrounding
1779 * region. NOTE: This is how we decide if to
1780 * zero or not!
1781 */
1782 if (block_end > to || block_start < from) {
1783 void *kaddr;
1784
1785 kaddr = kmap_atomic(page, KM_USER0);
1786 if (block_end > to)
1787 memset(kaddr + to, 0,
1788 block_end - to);
1789 if (block_start < from)
1790 memset(kaddr + block_start, 0,
1791 from -
1792 block_start);
1793 flush_dcache_page(page);
1794 kunmap_atomic(kaddr, KM_USER0);
1795 }
1796 continue;
1797 }
1798 }
1799 /* @bh is mapped, set it uptodate if the page is uptodate. */
1800 if (PageUptodate(page)) {
1801 if (!buffer_uptodate(bh))
1802 set_buffer_uptodate(bh);
1803 continue;
1804 }
1805 /*
1806 * The page is not uptodate. The buffer is mapped. If it is not
1807 * uptodate, and it is only partially being written to, we need
1808 * to read the buffer in before the write, i.e. right now.
1809 */
1810 if (!buffer_uptodate(bh) &&
1811 (block_start < from || block_end > to)) {
1812 ll_rw_block(READ, 1, &bh);
1813 *wait_bh++ = bh;
1814 }
1815 } while (block++, block_start = block_end,
1816 (bh = bh->b_this_page) != head);
1817
1818 /* Release the lock if we took it. */
1819 if (rl) {
1820 up_read(&ni->runlist.lock);
1821 rl = NULL;
1822 }
1823
1824 /* If we issued read requests, let them complete. */
1825 while (wait_bh > wait) {
1826 wait_on_buffer(*--wait_bh);
1827 if (!buffer_uptodate(*wait_bh))
1828 return -EIO;
1829 }
1830
1831 ntfs_debug("Done.");
1832 return 0;
1833err_out:
1834 /*
1835 * Zero out any newly allocated blocks to avoid exposing stale data.
1836 * If BH_New is set, we know that the block was newly allocated in the
1837 * above loop.
1838 * FIXME: What about initialized_size increments? Have we done all the
1839 * required zeroing above? If not this error handling is broken, and
1840 * in particular the if (block_end <= from) check is completely bogus.
1841 */
1842 bh = head;
1843 block_start = 0;
1844 is_retry = FALSE;
1845 do {
1846 block_end = block_start + blocksize;
1847 if (block_end <= from)
1848 continue;
1849 if (block_start >= to)
1850 break;
1851 if (buffer_new(bh)) {
1852 void *kaddr;
1853
1854 clear_buffer_new(bh);
1855 kaddr = kmap_atomic(page, KM_USER0);
1856 memset(kaddr + block_start, 0, bh->b_size);
1857 kunmap_atomic(kaddr, KM_USER0);
1858 set_buffer_uptodate(bh);
1859 mark_buffer_dirty(bh);
1860 is_retry = TRUE;
1861 }
1862 } while (block_start = block_end, (bh = bh->b_this_page) != head);
1863 if (is_retry)
1864 flush_dcache_page(page);
1865 if (rl)
1866 up_read(&ni->runlist.lock);
1867 return err;
1868}
1869
1870/**
1871 * ntfs_prepare_write - prepare a page for receiving data
1872 *
1873 * This is called from generic_file_write() with i_sem held on the inode
1874 * (@page->mapping->host). The @page is locked but not kmap()ped. The source
1875 * data has not yet been copied into the @page.
1876 *
1877 * Need to extend the attribute/fill in holes if necessary, create blocks and
1878 * make partially overwritten blocks uptodate,
1879 *
1880 * i_size is not to be modified yet.
1881 *
1882 * Return 0 on success or -errno on error.
1883 *
1884 * Should be using block_prepare_write() [support for sparse files] or
1885 * cont_prepare_write() [no support for sparse files]. Cannot do that due to
1886 * ntfs specifics but can look at them for implementation guidance.
1887 *
1888 * Note: In the range, @from is inclusive and @to is exclusive, i.e. @from is
1889 * the first byte in the page that will be written to and @to is the first byte
1890 * after the last byte that will be written to.
1891 */
1892static int ntfs_prepare_write(struct file *file, struct page *page,
1893 unsigned from, unsigned to)
1894{
1895 s64 new_size;
f40661be 1896 loff_t i_size;
1da177e4
LT
1897 struct inode *vi = page->mapping->host;
1898 ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1899 ntfs_volume *vol = ni->vol;
1900 ntfs_attr_search_ctx *ctx = NULL;
1901 MFT_RECORD *m = NULL;
1902 ATTR_RECORD *a;
1903 u8 *kaddr;
1904 u32 attr_len;
1905 int err;
1906
1907 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
1908 "0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
1909 page->index, from, to);
1910 BUG_ON(!PageLocked(page));
1911 BUG_ON(from > PAGE_CACHE_SIZE);
1912 BUG_ON(to > PAGE_CACHE_SIZE);
1913 BUG_ON(from > to);
1914 BUG_ON(NInoMstProtected(ni));
1915 /*
1916 * If a previous ntfs_truncate() failed, repeat it and abort if it
1917 * fails again.
1918 */
1919 if (unlikely(NInoTruncateFailed(ni))) {
1920 down_write(&vi->i_alloc_sem);
1921 err = ntfs_truncate(vi);
1922 up_write(&vi->i_alloc_sem);
1923 if (err || NInoTruncateFailed(ni)) {
1924 if (!err)
1925 err = -EIO;
1926 goto err_out;
1927 }
1928 }
1929 /* If the attribute is not resident, deal with it elsewhere. */
1930 if (NInoNonResident(ni)) {
1931 /*
1932 * Only unnamed $DATA attributes can be compressed, encrypted,
1933 * and/or sparse.
1934 */
1935 if (ni->type == AT_DATA && !ni->name_len) {
1936 /* If file is encrypted, deny access, just like NT4. */
1937 if (NInoEncrypted(ni)) {
1938 ntfs_debug("Denying write access to encrypted "
1939 "file.");
1940 return -EACCES;
1941 }
1942 /* Compressed data streams are handled in compress.c. */
1943 if (NInoCompressed(ni)) {
1944 // TODO: Implement and replace this check with
1945 // return ntfs_write_compressed_block(page);
1946 ntfs_error(vi->i_sb, "Writing to compressed "
1947 "files is not supported yet. "
1948 "Sorry.");
1949 return -EOPNOTSUPP;
1950 }
1951 // TODO: Implement and remove this check.
1952 if (NInoSparse(ni)) {
1953 ntfs_error(vi->i_sb, "Writing to sparse files "
1954 "is not supported yet. Sorry.");
1955 return -EOPNOTSUPP;
1956 }
1957 }
1958 /* Normal data stream. */
1959 return ntfs_prepare_nonresident_write(page, from, to);
1960 }
1961 /*
1962 * Attribute is resident, implying it is not compressed, encrypted, or
1963 * sparse.
1964 */
1965 BUG_ON(page_has_buffers(page));
1966 new_size = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
1967 /* If we do not need to resize the attribute allocation we are done. */
07a4e2da 1968 if (new_size <= i_size_read(vi))
1da177e4 1969 goto done;
1da177e4
LT
1970 /* Map, pin, and lock the (base) mft record. */
1971 if (!NInoAttr(ni))
1972 base_ni = ni;
1973 else
1974 base_ni = ni->ext.base_ntfs_ino;
1975 m = map_mft_record(base_ni);
1976 if (IS_ERR(m)) {
1977 err = PTR_ERR(m);
1978 m = NULL;
1979 ctx = NULL;
1980 goto err_out;
1981 }
1982 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1983 if (unlikely(!ctx)) {
1984 err = -ENOMEM;
1985 goto err_out;
1986 }
1987 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1988 CASE_SENSITIVE, 0, NULL, 0, ctx);
1989 if (unlikely(err)) {
1990 if (err == -ENOENT)
1991 err = -EIO;
1992 goto err_out;
1993 }
1994 m = ctx->mrec;
1995 a = ctx->attr;
1996 /* The total length of the attribute value. */
1997 attr_len = le32_to_cpu(a->data.resident.value_length);
946929d8 1998 /* Fix an eventual previous failure of ntfs_commit_write(). */
f40661be
AA
1999 i_size = i_size_read(vi);
2000 if (unlikely(attr_len > i_size)) {
2001 attr_len = i_size;
946929d8 2002 a->data.resident.value_length = cpu_to_le32(attr_len);
946929d8 2003 }
946929d8
AA
2004 /* If we do not need to resize the attribute allocation we are done. */
2005 if (new_size <= attr_len)
2006 goto done_unm;
1da177e4
LT
2007 /* Check if new size is allowed in $AttrDef. */
2008 err = ntfs_attr_size_bounds_check(vol, ni->type, new_size);
2009 if (unlikely(err)) {
2010 if (err == -ERANGE) {
2011 ntfs_error(vol->sb, "Write would cause the inode "
2012 "0x%lx to exceed the maximum size for "
2013 "its attribute type (0x%x). Aborting "
2014 "write.", vi->i_ino,
2015 le32_to_cpu(ni->type));
2016 } else {
2017 ntfs_error(vol->sb, "Inode 0x%lx has unknown "
2018 "attribute type 0x%x. Aborting "
2019 "write.", vi->i_ino,
2020 le32_to_cpu(ni->type));
2021 err = -EIO;
2022 }
2023 goto err_out2;
2024 }
2025 /*
2026 * Extend the attribute record to be able to store the new attribute
2027 * size.
2028 */
2029 if (new_size >= vol->mft_record_size || ntfs_attr_record_resize(m, a,
2030 le16_to_cpu(a->data.resident.value_offset) +
2031 new_size)) {
2032 /* Not enough space in the mft record. */
2033 ntfs_error(vol->sb, "Not enough space in the mft record for "
2034 "the resized attribute value. This is not "
2035 "supported yet. Aborting write.");
2036 err = -EOPNOTSUPP;
2037 goto err_out2;
2038 }
2039 /*
2040 * We have enough space in the mft record to fit the write. This
2041 * implies the attribute is smaller than the mft record and hence the
2042 * attribute must be in a single page and hence page->index must be 0.
2043 */
2044 BUG_ON(page->index);
2045 /*
2046 * If the beginning of the write is past the old size, enlarge the
2047 * attribute value up to the beginning of the write and fill it with
2048 * zeroes.
2049 */
2050 if (from > attr_len) {
2051 memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
2052 attr_len, 0, from - attr_len);
2053 a->data.resident.value_length = cpu_to_le32(from);
2054 /* Zero the corresponding area in the page as well. */
2055 if (PageUptodate(page)) {
2056 kaddr = kmap_atomic(page, KM_USER0);
2057 memset(kaddr + attr_len, 0, from - attr_len);
2058 kunmap_atomic(kaddr, KM_USER0);
2059 flush_dcache_page(page);
2060 }
2061 }
2062 flush_dcache_mft_record_page(ctx->ntfs_ino);
2063 mark_mft_record_dirty(ctx->ntfs_ino);
946929d8 2064done_unm:
1da177e4
LT
2065 ntfs_attr_put_search_ctx(ctx);
2066 unmap_mft_record(base_ni);
2067 /*
2068 * Because resident attributes are handled by memcpy() to/from the
2069 * corresponding MFT record, and because this form of i/o is byte
2070 * aligned rather than block aligned, there is no need to bring the
2071 * page uptodate here as in the non-resident case where we need to
2072 * bring the buffers straddled by the write uptodate before
2073 * generic_file_write() does the copying from userspace.
2074 *
2075 * We thus defer the uptodate bringing of the page region outside the
2076 * region written to to ntfs_commit_write(), which makes the code
2077 * simpler and saves one atomic kmap which is good.
2078 */
2079done:
2080 ntfs_debug("Done.");
2081 return 0;
2082err_out:
2083 if (err == -ENOMEM)
2084 ntfs_warning(vi->i_sb, "Error allocating memory required to "
2085 "prepare the write.");
2086 else {
2087 ntfs_error(vi->i_sb, "Resident attribute prepare write failed "
2088 "with error %i.", err);
2089 NVolSetErrors(vol);
2090 make_bad_inode(vi);
2091 }
2092err_out2:
2093 if (ctx)
2094 ntfs_attr_put_search_ctx(ctx);
2095 if (m)
2096 unmap_mft_record(base_ni);
2097 return err;
2098}
2099
2100/**
2101 * ntfs_commit_nonresident_write -
2102 *
2103 */
2104static int ntfs_commit_nonresident_write(struct page *page,
2105 unsigned from, unsigned to)
2106{
2107 s64 pos = ((s64)page->index << PAGE_CACHE_SHIFT) + to;
2108 struct inode *vi = page->mapping->host;
2109 struct buffer_head *bh, *head;
2110 unsigned int block_start, block_end, blocksize;
2111 BOOL partial;
2112
2113 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
2114 "0x%lx, from = %u, to = %u.", vi->i_ino,
2115 NTFS_I(vi)->type, page->index, from, to);
2116 blocksize = 1 << vi->i_blkbits;
2117
2118 // FIXME: We need a whole slew of special cases in here for compressed
2119 // files for example...
2120 // For now, we know ntfs_prepare_write() would have failed so we can't
2121 // get here in any of the cases which we have to special case, so we
2122 // are just a ripped off, unrolled generic_commit_write().
2123
2124 bh = head = page_buffers(page);
2125 block_start = 0;
2126 partial = FALSE;
2127 do {
2128 block_end = block_start + blocksize;
2129 if (block_end <= from || block_start >= to) {
2130 if (!buffer_uptodate(bh))
2131 partial = TRUE;
2132 } else {
2133 set_buffer_uptodate(bh);
2134 mark_buffer_dirty(bh);
2135 }
2136 } while (block_start = block_end, (bh = bh->b_this_page) != head);
2137 /*
2138 * If this is a partial write which happened to make all buffers
2139 * uptodate then we can optimize away a bogus ->readpage() for the next
2140 * read(). Here we 'discover' whether the page went uptodate as a
2141 * result of this (potentially partial) write.
2142 */
2143 if (!partial)
2144 SetPageUptodate(page);
2145 /*
2146 * Not convinced about this at all. See disparity comment above. For
2147 * now we know ntfs_prepare_write() would have failed in the write
2148 * exceeds i_size case, so this will never trigger which is fine.
2149 */
07a4e2da 2150 if (pos > i_size_read(vi)) {
1da177e4
LT
2151 ntfs_error(vi->i_sb, "Writing beyond the existing file size is "
2152 "not supported yet. Sorry.");
2153 return -EOPNOTSUPP;
2154 // vi->i_size = pos;
2155 // mark_inode_dirty(vi);
2156 }
2157 ntfs_debug("Done.");
2158 return 0;
2159}
2160
2161/**
2162 * ntfs_commit_write - commit the received data
2163 *
2164 * This is called from generic_file_write() with i_sem held on the inode
2165 * (@page->mapping->host). The @page is locked but not kmap()ped. The source
2166 * data has already been copied into the @page. ntfs_prepare_write() has been
2167 * called before the data copied and it returned success so we can take the
2168 * results of various BUG checks and some error handling for granted.
2169 *
2170 * Need to mark modified blocks dirty so they get written out later when
2171 * ntfs_writepage() is invoked by the VM.
2172 *
2173 * Return 0 on success or -errno on error.
2174 *
2175 * Should be using generic_commit_write(). This marks buffers uptodate and
2176 * dirty, sets the page uptodate if all buffers in the page are uptodate, and
2177 * updates i_size if the end of io is beyond i_size. In that case, it also
2178 * marks the inode dirty.
2179 *
2180 * Cannot use generic_commit_write() due to ntfs specialities but can look at
2181 * it for implementation guidance.
2182 *
2183 * If things have gone as outlined in ntfs_prepare_write(), then we do not
2184 * need to do any page content modifications here at all, except in the write
2185 * to resident attribute case, where we need to do the uptodate bringing here
2186 * which we combine with the copying into the mft record which means we save
2187 * one atomic kmap.
2188 */
2189static int ntfs_commit_write(struct file *file, struct page *page,
2190 unsigned from, unsigned to)
2191{
2192 struct inode *vi = page->mapping->host;
2193 ntfs_inode *base_ni, *ni = NTFS_I(vi);
2194 char *kaddr, *kattr;
2195 ntfs_attr_search_ctx *ctx;
2196 MFT_RECORD *m;
2197 ATTR_RECORD *a;
2198 u32 attr_len;
2199 int err;
2200
2201 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
2202 "0x%lx, from = %u, to = %u.", vi->i_ino, ni->type,
2203 page->index, from, to);
2204 /* If the attribute is not resident, deal with it elsewhere. */
2205 if (NInoNonResident(ni)) {
2206 /* Only unnamed $DATA attributes can be compressed/encrypted. */
2207 if (ni->type == AT_DATA && !ni->name_len) {
2208 /* Encrypted files need separate handling. */
2209 if (NInoEncrypted(ni)) {
2210 // We never get here at present!
2211 BUG();
2212 }
2213 /* Compressed data streams are handled in compress.c. */
2214 if (NInoCompressed(ni)) {
2215 // TODO: Implement this!
2216 // return ntfs_write_compressed_block(page);
2217 // We never get here at present!
2218 BUG();
2219 }
2220 }
2221 /* Normal data stream. */
2222 return ntfs_commit_nonresident_write(page, from, to);
2223 }
2224 /*
2225 * Attribute is resident, implying it is not compressed, encrypted, or
2226 * sparse.
2227 */
2228 if (!NInoAttr(ni))
2229 base_ni = ni;
2230 else
2231 base_ni = ni->ext.base_ntfs_ino;
2232 /* Map, pin, and lock the mft record. */
2233 m = map_mft_record(base_ni);
2234 if (IS_ERR(m)) {
2235 err = PTR_ERR(m);
2236 m = NULL;
2237 ctx = NULL;
2238 goto err_out;
2239 }
2240 ctx = ntfs_attr_get_search_ctx(base_ni, m);
2241 if (unlikely(!ctx)) {
2242 err = -ENOMEM;
2243 goto err_out;
2244 }
2245 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2246 CASE_SENSITIVE, 0, NULL, 0, ctx);
2247 if (unlikely(err)) {
2248 if (err == -ENOENT)
2249 err = -EIO;
2250 goto err_out;
2251 }
2252 a = ctx->attr;
2253 /* The total length of the attribute value. */
2254 attr_len = le32_to_cpu(a->data.resident.value_length);
2255 BUG_ON(from > attr_len);
2256 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
2257 kaddr = kmap_atomic(page, KM_USER0);
2258 /* Copy the received data from the page to the mft record. */
2259 memcpy(kattr + from, kaddr + from, to - from);
2260 /* Update the attribute length if necessary. */
2261 if (to > attr_len) {
2262 attr_len = to;
2263 a->data.resident.value_length = cpu_to_le32(attr_len);
2264 }
2265 /*
2266 * If the page is not uptodate, bring the out of bounds area(s)
2267 * uptodate by copying data from the mft record to the page.
2268 */
2269 if (!PageUptodate(page)) {
2270 if (from > 0)
2271 memcpy(kaddr, kattr, from);
2272 if (to < attr_len)
2273 memcpy(kaddr + to, kattr + to, attr_len - to);
2274 /* Zero the region outside the end of the attribute value. */
2275 if (attr_len < PAGE_CACHE_SIZE)
2276 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
2277 /*
2278 * The probability of not having done any of the above is
2279 * extremely small, so we just flush unconditionally.
2280 */
2281 flush_dcache_page(page);
2282 SetPageUptodate(page);
2283 }
2284 kunmap_atomic(kaddr, KM_USER0);
2285 /* Update i_size if necessary. */
07a4e2da
AA
2286 if (i_size_read(vi) < attr_len) {
2287 unsigned long flags;
2288
2289 write_lock_irqsave(&ni->size_lock, flags);
1da177e4
LT
2290 ni->allocated_size = ni->initialized_size = attr_len;
2291 i_size_write(vi, attr_len);
07a4e2da 2292 write_unlock_irqrestore(&ni->size_lock, flags);
1da177e4
LT
2293 }
2294 /* Mark the mft record dirty, so it gets written back. */
2295 flush_dcache_mft_record_page(ctx->ntfs_ino);
2296 mark_mft_record_dirty(ctx->ntfs_ino);
2297 ntfs_attr_put_search_ctx(ctx);
2298 unmap_mft_record(base_ni);
2299 ntfs_debug("Done.");
2300 return 0;
2301err_out:
2302 if (err == -ENOMEM) {
2303 ntfs_warning(vi->i_sb, "Error allocating memory required to "
2304 "commit the write.");
2305 if (PageUptodate(page)) {
2306 ntfs_warning(vi->i_sb, "Page is uptodate, setting "
2307 "dirty so the write will be retried "
2308 "later on by the VM.");
2309 /*
2310 * Put the page on mapping->dirty_pages, but leave its
2311 * buffers' dirty state as-is.
2312 */
2313 __set_page_dirty_nobuffers(page);
2314 err = 0;
2315 } else
2316 ntfs_error(vi->i_sb, "Page is not uptodate. Written "
2317 "data has been lost.");
2318 } else {
2319 ntfs_error(vi->i_sb, "Resident attribute commit write failed "
2320 "with error %i.", err);
2321 NVolSetErrors(ni->vol);
2322 make_bad_inode(vi);
2323 }
2324 if (ctx)
2325 ntfs_attr_put_search_ctx(ctx);
2326 if (m)
2327 unmap_mft_record(base_ni);
2328 return err;
2329}
2330
2331#endif /* NTFS_RW */
2332
2333/**
2334 * ntfs_aops - general address space operations for inodes and attributes
2335 */
2336struct address_space_operations ntfs_aops = {
2337 .readpage = ntfs_readpage, /* Fill page with data. */
2338 .sync_page = block_sync_page, /* Currently, just unplugs the
2339 disk request queue. */
2340#ifdef NTFS_RW
2341 .writepage = ntfs_writepage, /* Write dirty page to disk. */
2342 .prepare_write = ntfs_prepare_write, /* Prepare page and buffers
2343 ready to receive data. */
2344 .commit_write = ntfs_commit_write, /* Commit received data. */
2345#endif /* NTFS_RW */
2346};
2347
2348/**
2349 * ntfs_mst_aops - general address space operations for mst protecteed inodes
2350 * and attributes
2351 */
2352struct address_space_operations ntfs_mst_aops = {
2353 .readpage = ntfs_readpage, /* Fill page with data. */
2354 .sync_page = block_sync_page, /* Currently, just unplugs the
2355 disk request queue. */
2356#ifdef NTFS_RW
2357 .writepage = ntfs_writepage, /* Write dirty page to disk. */
2358 .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
2359 without touching the buffers
2360 belonging to the page. */
2361#endif /* NTFS_RW */
2362};
2363
2364#ifdef NTFS_RW
2365
2366/**
2367 * mark_ntfs_record_dirty - mark an ntfs record dirty
2368 * @page: page containing the ntfs record to mark dirty
2369 * @ofs: byte offset within @page at which the ntfs record begins
2370 *
2371 * Set the buffers and the page in which the ntfs record is located dirty.
2372 *
2373 * The latter also marks the vfs inode the ntfs record belongs to dirty
2374 * (I_DIRTY_PAGES only).
2375 *
2376 * If the page does not have buffers, we create them and set them uptodate.
2377 * The page may not be locked which is why we need to handle the buffers under
2378 * the mapping->private_lock. Once the buffers are marked dirty we no longer
2379 * need the lock since try_to_free_buffers() does not free dirty buffers.
2380 */
2381void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
2382 struct address_space *mapping = page->mapping;
2383 ntfs_inode *ni = NTFS_I(mapping->host);
2384 struct buffer_head *bh, *head, *buffers_to_free = NULL;
2385 unsigned int end, bh_size, bh_ofs;
2386
2387 BUG_ON(!PageUptodate(page));
2388 end = ofs + ni->itype.index.block_size;
2389 bh_size = 1 << VFS_I(ni)->i_blkbits;
2390 spin_lock(&mapping->private_lock);
2391 if (unlikely(!page_has_buffers(page))) {
2392 spin_unlock(&mapping->private_lock);
2393 bh = head = alloc_page_buffers(page, bh_size, 1);
2394 spin_lock(&mapping->private_lock);
2395 if (likely(!page_has_buffers(page))) {
2396 struct buffer_head *tail;
2397
2398 do {
2399 set_buffer_uptodate(bh);
2400 tail = bh;
2401 bh = bh->b_this_page;
2402 } while (bh);
2403 tail->b_this_page = head;
2404 attach_page_buffers(page, head);
2405 } else
2406 buffers_to_free = bh;
2407 }
2408 bh = head = page_buffers(page);
a01ac532 2409 BUG_ON(!bh);
1da177e4
LT
2410 do {
2411 bh_ofs = bh_offset(bh);
2412 if (bh_ofs + bh_size <= ofs)
2413 continue;
2414 if (unlikely(bh_ofs >= end))
2415 break;
2416 set_buffer_dirty(bh);
2417 } while ((bh = bh->b_this_page) != head);
2418 spin_unlock(&mapping->private_lock);
2419 __set_page_dirty_nobuffers(page);
2420 if (unlikely(buffers_to_free)) {
2421 do {
2422 bh = buffers_to_free->b_this_page;
2423 free_buffer_head(buffers_to_free);
2424 buffers_to_free = bh;
2425 } while (buffers_to_free);
2426 }
2427}
2428
2429#endif /* NTFS_RW */