1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19 * preallocate algorithm.
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
34 static inline u64
get_pre_allocated(u64 size
)
40 if (size
<= NTFS_CLUMP_MIN
) {
41 clump
= 1 << NTFS_MIN_LOG2_OF_CLUMP
;
42 align_shift
= NTFS_MIN_LOG2_OF_CLUMP
;
43 } else if (size
>= NTFS_CLUMP_MAX
) {
44 clump
= 1 << NTFS_MAX_LOG2_OF_CLUMP
;
45 align_shift
= NTFS_MAX_LOG2_OF_CLUMP
;
47 align_shift
= NTFS_MIN_LOG2_OF_CLUMP
- 1 +
48 __ffs(size
>> (8 + NTFS_MIN_LOG2_OF_CLUMP
));
49 clump
= 1u << align_shift
;
52 ret
= (((size
+ clump
- 1) >> align_shift
)) << align_shift
;
58 * attr_load_runs - Load all runs stored in @attr.
60 static int attr_load_runs(struct ATTRIB
*attr
, struct ntfs_inode
*ni
,
61 struct runs_tree
*run
, const CLST
*vcn
)
64 CLST svcn
= le64_to_cpu(attr
->nres
.svcn
);
65 CLST evcn
= le64_to_cpu(attr
->nres
.evcn
);
69 if (svcn
>= evcn
+ 1 || run_is_mapped_full(run
, svcn
, evcn
))
72 if (vcn
&& (evcn
< *vcn
|| *vcn
< svcn
))
75 asize
= le32_to_cpu(attr
->size
);
76 run_off
= le16_to_cpu(attr
->nres
.run_off
);
81 err
= run_unpack_ex(run
, ni
->mi
.sbi
, ni
->mi
.rno
, svcn
, evcn
,
82 vcn
? *vcn
: svcn
, Add2Ptr(attr
, run_off
),
91 * run_deallocate_ex - Deallocate clusters.
93 static int run_deallocate_ex(struct ntfs_sb_info
*sbi
, struct runs_tree
*run
,
94 CLST vcn
, CLST len
, CLST
*done
, bool trim
)
97 CLST vcn_next
, vcn0
= vcn
, lcn
, clen
, dn
= 0;
103 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
)) {
105 run_truncate(run
, vcn0
);
119 if (lcn
!= SPARSE_LCN
) {
121 /* mark bitmap range [lcn + clen) as free and trim clusters. */
122 mark_as_free_ex(sbi
, lcn
, clen
, trim
);
131 vcn_next
= vcn
+ clen
;
132 if (!run_get_entry(run
, ++idx
, &vcn
, &lcn
, &clen
) ||
134 /* Save memory - don't load entire run. */
147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
149 int attr_allocate_clusters(struct ntfs_sb_info
*sbi
, struct runs_tree
*run
,
150 CLST vcn
, CLST lcn
, CLST len
, CLST
*pre_alloc
,
151 enum ALLOCATE_OPT opt
, CLST
*alen
, const size_t fr
,
152 CLST
*new_lcn
, CLST
*new_len
)
155 CLST flen
, vcn0
= vcn
, pre
= pre_alloc
? *pre_alloc
: 0;
156 size_t cnt
= run
->count
;
159 err
= ntfs_look_for_free_space(sbi
, lcn
, len
+ pre
, &lcn
, &flen
,
162 if (err
== -ENOSPC
&& pre
) {
173 /* Return the first fragment. */
180 /* Add new fragment into run storage. */
181 if (!run_add_entry(run
, vcn
, lcn
, flen
, opt
& ALLOCATE_MFT
)) {
182 /* Undo last 'ntfs_look_for_free_space' */
183 mark_as_free_ex(sbi
, lcn
, len
, false);
188 if (opt
& ALLOCATE_ZERO
) {
189 u8 shift
= sbi
->cluster_bits
- SECTOR_SHIFT
;
191 err
= blkdev_issue_zeroout(sbi
->sb
->s_bdev
,
192 (sector_t
)lcn
<< shift
,
193 (sector_t
)flen
<< shift
,
201 if (flen
>= len
|| (opt
& ALLOCATE_MFT
) ||
202 (fr
&& run
->count
- cnt
>= fr
)) {
211 /* Undo 'ntfs_look_for_free_space' */
213 run_deallocate_ex(sbi
, run
, vcn0
, vcn
- vcn0
, NULL
, false);
214 run_truncate(run
, vcn0
);
221 * attr_make_nonresident
223 * If page is not NULL - it is already contains resident data
224 * and locked (called from ni_write_frame()).
226 int attr_make_nonresident(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
227 struct ATTR_LIST_ENTRY
*le
, struct mft_inode
*mi
,
228 u64 new_size
, struct runs_tree
*run
,
229 struct ATTRIB
**ins_attr
, struct page
*page
)
231 struct ntfs_sb_info
*sbi
;
232 struct ATTRIB
*attr_s
;
234 u32 used
, asize
, rsize
, aoff
, align
;
248 used
= le32_to_cpu(rec
->used
);
249 asize
= le32_to_cpu(attr
->size
);
250 next
= Add2Ptr(attr
, asize
);
251 aoff
= PtrOffset(rec
, attr
);
252 rsize
= le32_to_cpu(attr
->res
.data_size
);
253 is_data
= attr
->type
== ATTR_DATA
&& !attr
->name_len
;
255 align
= sbi
->cluster_size
;
256 if (is_attr_compressed(attr
))
257 align
<<= COMPRESSION_UNIT
;
258 len
= (rsize
+ align
- 1) >> sbi
->cluster_bits
;
262 /* Make a copy of original attribute. */
263 attr_s
= kmemdup(attr
, asize
, GFP_NOFS
);
270 /* Empty resident -> Empty nonresident. */
273 const char *data
= resident_data(attr
);
275 err
= attr_allocate_clusters(sbi
, run
, 0, 0, len
, NULL
,
276 ALLOCATE_DEF
, &alen
, 0, NULL
,
282 /* Empty resident -> Non empty nonresident. */
283 } else if (!is_data
) {
284 err
= ntfs_sb_write_run(sbi
, run
, 0, data
, rsize
, 0);
290 page
= grab_cache_page(ni
->vfs_inode
.i_mapping
, 0);
295 kaddr
= kmap_atomic(page
);
296 memcpy(kaddr
, data
, rsize
);
297 memset(kaddr
+ rsize
, 0, PAGE_SIZE
- rsize
);
298 kunmap_atomic(kaddr
);
299 flush_dcache_page(page
);
300 SetPageUptodate(page
);
301 set_page_dirty(page
);
307 /* Remove original attribute. */
309 memmove(attr
, Add2Ptr(attr
, asize
), used
- aoff
);
310 rec
->used
= cpu_to_le32(used
);
313 al_remove_le(ni
, le
);
315 err
= ni_insert_nonresident(ni
, attr_s
->type
, attr_name(attr_s
),
316 attr_s
->name_len
, run
, 0, alen
,
317 attr_s
->flags
, &attr
, NULL
, NULL
);
322 attr
->nres
.data_size
= cpu_to_le64(rsize
);
323 attr
->nres
.valid_size
= attr
->nres
.data_size
;
328 ni
->ni_flags
&= ~NI_FLAG_RESIDENT
;
330 /* Resident attribute becomes non resident. */
334 attr
= Add2Ptr(rec
, aoff
);
335 memmove(next
, attr
, used
- aoff
);
336 memcpy(attr
, attr_s
, asize
);
337 rec
->used
= cpu_to_le32(used
+ asize
);
340 /* Undo: do not trim new allocated clusters. */
341 run_deallocate(sbi
, run
, false);
350 * attr_set_size_res - Helper for attr_set_size().
352 static int attr_set_size_res(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
353 struct ATTR_LIST_ENTRY
*le
, struct mft_inode
*mi
,
354 u64 new_size
, struct runs_tree
*run
,
355 struct ATTRIB
**ins_attr
)
357 struct ntfs_sb_info
*sbi
= mi
->sbi
;
358 struct MFT_REC
*rec
= mi
->mrec
;
359 u32 used
= le32_to_cpu(rec
->used
);
360 u32 asize
= le32_to_cpu(attr
->size
);
361 u32 aoff
= PtrOffset(rec
, attr
);
362 u32 rsize
= le32_to_cpu(attr
->res
.data_size
);
363 u32 tail
= used
- aoff
- asize
;
364 char *next
= Add2Ptr(attr
, asize
);
365 s64 dsize
= ALIGN(new_size
, 8) - ALIGN(rsize
, 8);
368 memmove(next
+ dsize
, next
, tail
);
369 } else if (dsize
> 0) {
370 if (used
+ dsize
> sbi
->max_bytes_per_attr
)
371 return attr_make_nonresident(ni
, attr
, le
, mi
, new_size
,
372 run
, ins_attr
, NULL
);
374 memmove(next
+ dsize
, next
, tail
);
375 memset(next
, 0, dsize
);
378 if (new_size
> rsize
)
379 memset(Add2Ptr(resident_data(attr
), rsize
), 0,
382 rec
->used
= cpu_to_le32(used
+ dsize
);
383 attr
->size
= cpu_to_le32(asize
+ dsize
);
384 attr
->res
.data_size
= cpu_to_le32(new_size
);
392 * attr_set_size - Change the size of attribute.
395 * - Sparse/compressed: No allocated clusters.
396 * - Normal: Append allocated and preallocated new clusters.
398 * - No deallocate if @keep_prealloc is set.
400 int attr_set_size(struct ntfs_inode
*ni
, enum ATTR_TYPE type
,
401 const __le16
*name
, u8 name_len
, struct runs_tree
*run
,
402 u64 new_size
, const u64
*new_valid
, bool keep_prealloc
,
406 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
407 u8 cluster_bits
= sbi
->cluster_bits
;
409 ni
->mi
.rno
== MFT_REC_MFT
&& type
== ATTR_DATA
&& !name_len
;
410 u64 old_valid
, old_size
, old_alloc
, new_alloc
, new_alloc_tmp
;
411 struct ATTRIB
*attr
= NULL
, *attr_b
;
412 struct ATTR_LIST_ENTRY
*le
, *le_b
;
413 struct mft_inode
*mi
, *mi_b
;
414 CLST alen
, vcn
, lcn
, new_alen
, old_alen
, svcn
, evcn
;
415 CLST next_svcn
, pre_alloc
= -1, done
= 0;
416 bool is_ext
, is_bad
= false;
424 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, type
, name
, name_len
, NULL
,
431 if (!attr_b
->non_res
) {
432 err
= attr_set_size_res(ni
, attr_b
, le_b
, mi_b
, new_size
, run
,
437 /* Return if file is still resident. */
438 if (!attr_b
->non_res
) {
443 /* Layout of records may be changed, so do a full search. */
447 is_ext
= is_attr_ext(attr_b
);
448 align
= sbi
->cluster_size
;
450 align
<<= attr_b
->nres
.c_unit
;
452 old_valid
= le64_to_cpu(attr_b
->nres
.valid_size
);
453 old_size
= le64_to_cpu(attr_b
->nres
.data_size
);
454 old_alloc
= le64_to_cpu(attr_b
->nres
.alloc_size
);
457 old_alen
= old_alloc
>> cluster_bits
;
459 new_alloc
= (new_size
+ align
- 1) & ~(u64
)(align
- 1);
460 new_alen
= new_alloc
>> cluster_bits
;
462 if (keep_prealloc
&& new_size
< old_size
) {
463 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
464 mi_b
->dirty
= dirty
= true;
470 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
471 evcn
= le64_to_cpu(attr_b
->nres
.evcn
);
473 if (svcn
<= vcn
&& vcn
<= evcn
) {
482 attr
= ni_find_attr(ni
, attr_b
, &le
, type
, name
, name_len
, &vcn
,
490 svcn
= le64_to_cpu(attr
->nres
.svcn
);
491 evcn
= le64_to_cpu(attr
->nres
.evcn
);
495 * attr,mi,le - last attribute segment (containing 'vcn').
496 * attr_b,mi_b,le_b - base (primary) attribute segment.
500 err
= attr_load_runs(attr
, ni
, run
, NULL
);
504 if (new_size
> old_size
) {
508 if (new_alloc
<= old_alloc
) {
509 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
510 mi_b
->dirty
= dirty
= true;
515 * Add clusters. In simple case we have to:
516 * - allocate space (vcn, lcn, len)
517 * - update packed run in 'mi'
518 * - update attr->nres.evcn
519 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
521 to_allocate
= new_alen
- old_alen
;
522 add_alloc_in_same_attr_seg
:
525 /* MFT allocates clusters from MFT zone. */
528 /* No preallocate for sparse/compress. */
530 } else if (pre_alloc
== -1) {
532 if (type
== ATTR_DATA
&& !name_len
&&
533 sbi
->options
->prealloc
) {
537 get_pre_allocated(new_size
)) -
541 /* Get the last LCN to allocate from. */
543 !run_lookup_entry(run
, vcn
, &lcn
, NULL
, NULL
)) {
547 if (lcn
== SPARSE_LCN
)
552 free
= wnd_zeroes(&sbi
->used
.bitmap
);
553 if (to_allocate
> free
) {
558 if (pre_alloc
&& to_allocate
+ pre_alloc
> free
)
565 if (!run_add_entry(run
, vcn
, SPARSE_LCN
, to_allocate
,
572 /* ~3 bytes per fragment. */
573 err
= attr_allocate_clusters(
574 sbi
, run
, vcn
, lcn
, to_allocate
, &pre_alloc
,
575 is_mft
? ALLOCATE_MFT
: ALLOCATE_DEF
, &alen
,
577 : (sbi
->record_size
-
578 le32_to_cpu(rec
->used
) + 8) /
588 if (to_allocate
> alen
)
594 err
= mi_pack_runs(mi
, attr
, run
, vcn
- svcn
);
598 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
599 new_alloc_tmp
= (u64
)next_svcn
<< cluster_bits
;
600 attr_b
->nres
.alloc_size
= cpu_to_le64(new_alloc_tmp
);
601 mi_b
->dirty
= dirty
= true;
603 if (next_svcn
>= vcn
&& !to_allocate
) {
604 /* Normal way. Update attribute and exit. */
605 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
609 /* At least two MFT to avoid recursive loop. */
610 if (is_mft
&& next_svcn
== vcn
&&
611 ((u64
)done
<< sbi
->cluster_bits
) >= 2 * sbi
->record_size
) {
612 new_size
= new_alloc_tmp
;
613 attr_b
->nres
.data_size
= attr_b
->nres
.alloc_size
;
617 if (le32_to_cpu(rec
->used
) < sbi
->record_size
) {
618 old_alen
= next_svcn
;
620 goto add_alloc_in_same_attr_seg
;
623 attr_b
->nres
.data_size
= attr_b
->nres
.alloc_size
;
624 if (new_alloc_tmp
< old_valid
)
625 attr_b
->nres
.valid_size
= attr_b
->nres
.data_size
;
627 if (type
== ATTR_LIST
) {
628 err
= ni_expand_list(ni
);
634 /* Layout of records is changed. */
638 if (!ni
->attr_list
.size
) {
639 err
= ni_create_attr_list(ni
);
640 /* In case of error layout of records is not changed. */
643 /* Layout of records is changed. */
646 if (next_svcn
>= vcn
) {
647 /* This is MFT data, repeat. */
651 /* Insert new attribute segment. */
652 err
= ni_insert_nonresident(ni
, type
, name
, name_len
, run
,
653 next_svcn
, vcn
- next_svcn
,
654 attr_b
->flags
, &attr
, &mi
, NULL
);
657 * Layout of records maybe changed.
658 * Find base attribute to update.
661 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, type
, name
, name_len
,
669 /* ni_insert_nonresident failed. */
675 run_truncate_head(run
, evcn
+ 1);
677 svcn
= le64_to_cpu(attr
->nres
.svcn
);
678 evcn
= le64_to_cpu(attr
->nres
.evcn
);
681 * Attribute is in consistency state.
682 * Save this point to restore to if next steps fail.
684 old_valid
= old_size
= old_alloc
= (u64
)vcn
<< cluster_bits
;
685 attr_b
->nres
.valid_size
= attr_b
->nres
.data_size
=
686 attr_b
->nres
.alloc_size
= cpu_to_le64(old_size
);
687 mi_b
->dirty
= dirty
= true;
691 if (new_size
!= old_size
||
692 (new_alloc
!= old_alloc
&& !keep_prealloc
)) {
694 * Truncate clusters. In simple case we have to:
695 * - update packed run in 'mi'
696 * - update attr->nres.evcn
697 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
698 * - mark and trim clusters as free (vcn, lcn, len)
702 vcn
= max(svcn
, new_alen
);
703 new_alloc_tmp
= (u64
)vcn
<< cluster_bits
;
706 err
= mi_pack_runs(mi
, attr
, run
, vcn
- svcn
);
709 } else if (le
&& le
->vcn
) {
710 u16 le_sz
= le16_to_cpu(le
->size
);
713 * NOTE: List entries for one attribute are always
714 * the same size. We deal with last entry (vcn==0)
715 * and it is not first in entries array
716 * (list entry for std attribute always first).
717 * So it is safe to step back.
719 mi_remove_attr(NULL
, mi
, attr
);
721 if (!al_remove_le(ni
, le
)) {
726 le
= (struct ATTR_LIST_ENTRY
*)((u8
*)le
- le_sz
);
728 attr
->nres
.evcn
= cpu_to_le64((u64
)vcn
- 1);
732 attr_b
->nres
.alloc_size
= cpu_to_le64(new_alloc_tmp
);
734 if (vcn
== new_alen
) {
735 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
736 if (new_size
< old_valid
)
737 attr_b
->nres
.valid_size
=
738 attr_b
->nres
.data_size
;
741 le64_to_cpu(attr_b
->nres
.data_size
))
742 attr_b
->nres
.data_size
=
743 attr_b
->nres
.alloc_size
;
745 le64_to_cpu(attr_b
->nres
.valid_size
))
746 attr_b
->nres
.valid_size
=
747 attr_b
->nres
.alloc_size
;
749 mi_b
->dirty
= dirty
= true;
751 err
= run_deallocate_ex(sbi
, run
, vcn
, evcn
- vcn
+ 1, &dlen
,
757 /* dlen - really deallocated clusters. */
758 le64_sub_cpu(&attr_b
->nres
.total_size
,
759 ((u64
)dlen
<< cluster_bits
));
762 run_truncate(run
, vcn
);
764 if (new_alloc_tmp
<= new_alloc
)
767 old_size
= new_alloc_tmp
;
778 if (le
->type
!= type
|| le
->name_len
!= name_len
||
779 memcmp(le_name(le
), name
, name_len
* sizeof(short))) {
784 err
= ni_load_mi(ni
, le
, &mi
);
788 attr
= mi_find_attr(mi
, NULL
, type
, name
, name_len
, &le
->id
);
798 __le64 valid
= cpu_to_le64(min(*new_valid
, new_size
));
800 if (attr_b
->nres
.valid_size
!= valid
) {
801 attr_b
->nres
.valid_size
= valid
;
810 if (((type
== ATTR_DATA
&& !name_len
) ||
811 (type
== ATTR_ALLOC
&& name
== I30_NAME
))) {
812 /* Update inode_set_bytes. */
813 if (attr_b
->non_res
) {
814 new_alloc
= le64_to_cpu(attr_b
->nres
.alloc_size
);
815 if (inode_get_bytes(&ni
->vfs_inode
) != new_alloc
) {
816 inode_set_bytes(&ni
->vfs_inode
, new_alloc
);
821 /* Don't forget to update duplicate information in parent. */
823 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
824 mark_inode_dirty(&ni
->vfs_inode
);
832 attr_b
->nres
.data_size
= cpu_to_le64(old_size
);
833 attr_b
->nres
.valid_size
= cpu_to_le64(old_valid
);
834 attr_b
->nres
.alloc_size
= cpu_to_le64(old_alloc
);
836 /* Restore 'attr' and 'mi'. */
840 if (le64_to_cpu(attr_b
->nres
.svcn
) <= svcn
&&
841 svcn
<= le64_to_cpu(attr_b
->nres
.evcn
)) {
850 attr
= ni_find_attr(ni
, attr_b
, &le
, type
, name
, name_len
,
857 if (mi_pack_runs(mi
, attr
, run
, evcn
- svcn
+ 1))
861 run_deallocate_ex(sbi
, run
, vcn
, alen
, NULL
, false);
863 run_truncate(run
, vcn
);
867 _ntfs_bad_inode(&ni
->vfs_inode
);
873 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
875 * @new == NULL means just to get current mapping for 'vcn'
876 * @new != NULL means allocate real cluster if 'vcn' maps to hole
877 * @zero - zeroout new allocated clusters
880 * - @new != NULL is called only for sparsed or compressed attributes.
881 * - new allocated clusters are zeroed via blkdev_issue_zeroout.
883 int attr_data_get_block(struct ntfs_inode
*ni
, CLST vcn
, CLST clen
, CLST
*lcn
,
884 CLST
*len
, bool *new, bool zero
)
887 struct runs_tree
*run
= &ni
->file
.run
;
888 struct ntfs_sb_info
*sbi
;
890 struct ATTRIB
*attr
= NULL
, *attr_b
;
891 struct ATTR_LIST_ENTRY
*le
, *le_b
;
892 struct mft_inode
*mi
, *mi_b
;
893 CLST hint
, svcn
, to_alloc
, evcn1
, next_svcn
, asize
, end
, vcn0
, alen
;
896 u64 total_size
, total_size0
;
902 /* Try to find in cache. */
903 down_read(&ni
->file
.run_lock
);
904 if (!run_lookup_entry(run
, vcn
, lcn
, len
, NULL
))
906 up_read(&ni
->file
.run_lock
);
909 if (*lcn
!= SPARSE_LCN
|| !new)
910 return 0; /* Fast normal way without allocation. */
911 else if (clen
> *len
)
915 /* No cluster in cache or we need to allocate cluster in hole. */
917 cluster_bits
= sbi
->cluster_bits
;
920 down_write(&ni
->file
.run_lock
);
923 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
929 if (!attr_b
->non_res
) {
935 asize
= le64_to_cpu(attr_b
->nres
.alloc_size
) >> cluster_bits
;
946 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
947 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
953 if (le_b
&& (vcn
< svcn
|| evcn1
<= vcn
)) {
954 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
960 svcn
= le64_to_cpu(attr
->nres
.svcn
);
961 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
964 /* Load in cache actual information. */
965 err
= attr_load_runs(attr
, ni
, run
, NULL
);
970 if (run_lookup_entry(run
, vcn
, lcn
, len
, NULL
)) {
971 if (*lcn
!= SPARSE_LCN
|| !new)
972 goto ok
; /* Slow normal way without allocation. */
977 /* Here we may return -ENOENT.
978 * In any case caller gets zero length. */
983 if (!is_attr_ext(attr_b
)) {
984 /* The code below only for sparsed or compressed attributes. */
991 fr
= (sbi
->record_size
- le32_to_cpu(mi
->mrec
->used
) + 8) / 3 + 1;
992 /* Allocate frame aligned clusters.
993 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
994 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
995 if (attr_b
->nres
.c_unit
) {
996 CLST clst_per_frame
= 1u << attr_b
->nres
.c_unit
;
997 CLST cmask
= ~(clst_per_frame
- 1);
999 /* Get frame aligned vcn and to_alloc. */
1001 to_alloc
= ((vcn0
+ clen
+ clst_per_frame
- 1) & cmask
) - vcn
;
1002 if (fr
< clst_per_frame
)
1003 fr
= clst_per_frame
;
1006 /* Check if 'vcn' and 'vcn0' in different attribute segments. */
1007 if (vcn
< svcn
|| evcn1
<= vcn
) {
1008 /* Load attribute for truncated vcn. */
1009 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0,
1015 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1016 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1017 err
= attr_load_runs(attr
, ni
, run
, NULL
);
1023 if (vcn
+ to_alloc
> asize
)
1024 to_alloc
= asize
- vcn
;
1026 /* Get the last LCN to allocate from. */
1030 if (!run_add_entry(run
, evcn1
, SPARSE_LCN
, vcn
- evcn1
,
1035 } else if (vcn
&& !run_lookup_entry(run
, vcn
- 1, &hint
, NULL
, NULL
)) {
1039 /* Allocate and zeroout new clusters. */
1040 err
= attr_allocate_clusters(sbi
, run
, vcn
, hint
+ 1, to_alloc
, NULL
,
1041 zero
? ALLOCATE_ZERO
: ALLOCATE_DEF
, &alen
,
1049 /* Save 'total_size0' to restore if error. */
1050 total_size0
= le64_to_cpu(attr_b
->nres
.total_size
);
1051 total_size
= total_size0
+ ((u64
)alen
<< cluster_bits
);
1054 if (!run_lookup_entry(run
, vcn0
, lcn
, len
, NULL
)) {
1058 if (*lcn
== SPARSE_LCN
) {
1059 /* Internal error. Should not happened. */
1064 /* Check case when vcn0 + len overlaps new allocated clusters. */
1065 if (vcn0
+ *len
> end
)
1070 err
= mi_pack_runs(mi
, attr
, run
, max(end
, evcn1
) - svcn
);
1074 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
1075 inode_set_bytes(&ni
->vfs_inode
, total_size
);
1076 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
1079 mark_inode_dirty(&ni
->vfs_inode
);
1081 /* Stored [vcn : next_svcn) from [vcn : end). */
1082 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1085 if (next_svcn
== evcn1
) {
1086 /* Normal way. Update attribute and exit. */
1089 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1090 if (!ni
->attr_list
.size
) {
1091 err
= ni_create_attr_list(ni
);
1094 /* Layout of records is changed. */
1096 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
,
1111 * The code below may require additional cluster (to extend attribute list)
1112 * and / or one MFT record
1113 * It is too complex to undo operations if -ENOSPC occurs deep inside
1114 * in 'ni_insert_nonresident'.
1115 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1117 if (!ntfs_check_for_free_space(sbi
, 1, 1)) {
1126 /* Estimate next attribute. */
1127 attr
= ni_find_attr(ni
, attr
, &le
, ATTR_DATA
, NULL
, 0, &svcn
, &mi
);
1130 /* Insert new attribute segment. */
1134 /* Try to update existed attribute segment. */
1135 alloc
= bytes_to_cluster(sbi
, le64_to_cpu(attr_b
->nres
.alloc_size
));
1136 evcn
= le64_to_cpu(attr
->nres
.evcn
);
1138 if (end
< next_svcn
)
1140 while (end
> evcn
) {
1141 /* Remove segment [svcn : evcn). */
1142 mi_remove_attr(NULL
, mi
, attr
);
1144 if (!al_remove_le(ni
, le
)) {
1149 if (evcn
+ 1 >= alloc
) {
1150 /* Last attribute segment. */
1155 if (ni_load_mi(ni
, le
, &mi
)) {
1160 attr
= mi_find_attr(mi
, NULL
, ATTR_DATA
, NULL
, 0, &le
->id
);
1165 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1166 evcn
= le64_to_cpu(attr
->nres
.evcn
);
1172 err
= attr_load_runs(attr
, ni
, run
, &end
);
1177 attr
->nres
.svcn
= cpu_to_le64(next_svcn
);
1178 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- next_svcn
);
1182 le
->vcn
= cpu_to_le64(next_svcn
);
1183 ni
->attr_list
.dirty
= true;
1185 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1188 if (evcn1
> next_svcn
) {
1189 err
= ni_insert_nonresident(ni
, ATTR_DATA
, NULL
, 0, run
,
1190 next_svcn
, evcn1
- next_svcn
,
1191 attr_b
->flags
, &attr
, &mi
, NULL
);
1196 run_truncate_around(run
, vcn
);
1198 if (err
&& step
> 1) {
1199 /* Too complex to restore. */
1200 _ntfs_bad_inode(&ni
->vfs_inode
);
1202 up_write(&ni
->file
.run_lock
);
1209 attr_b
->nres
.total_size
= cpu_to_le64(total_size0
);
1210 inode_set_bytes(&ni
->vfs_inode
, total_size0
);
1212 if (run_deallocate_ex(sbi
, run
, vcn
, alen
, NULL
, false) ||
1213 !run_add_entry(run
, vcn
, SPARSE_LCN
, alen
, false) ||
1214 mi_pack_runs(mi
, attr
, run
, max(end
, evcn1
) - svcn
)) {
1215 _ntfs_bad_inode(&ni
->vfs_inode
);
1220 int attr_data_read_resident(struct ntfs_inode
*ni
, struct page
*page
)
1223 struct ATTRIB
*attr
;
1226 attr
= ni_find_attr(ni
, NULL
, NULL
, ATTR_DATA
, NULL
, 0, NULL
, NULL
);
1231 return E_NTFS_NONRESIDENT
;
1233 vbo
= page
->index
<< PAGE_SHIFT
;
1234 data_size
= le32_to_cpu(attr
->res
.data_size
);
1235 if (vbo
< data_size
) {
1236 const char *data
= resident_data(attr
);
1237 char *kaddr
= kmap_atomic(page
);
1238 u32 use
= data_size
- vbo
;
1240 if (use
> PAGE_SIZE
)
1243 memcpy(kaddr
, data
+ vbo
, use
);
1244 memset(kaddr
+ use
, 0, PAGE_SIZE
- use
);
1245 kunmap_atomic(kaddr
);
1246 flush_dcache_page(page
);
1247 SetPageUptodate(page
);
1248 } else if (!PageUptodate(page
)) {
1249 zero_user_segment(page
, 0, PAGE_SIZE
);
1250 SetPageUptodate(page
);
1256 int attr_data_write_resident(struct ntfs_inode
*ni
, struct page
*page
)
1259 struct mft_inode
*mi
;
1260 struct ATTRIB
*attr
;
1263 attr
= ni_find_attr(ni
, NULL
, NULL
, ATTR_DATA
, NULL
, 0, NULL
, &mi
);
1267 if (attr
->non_res
) {
1268 /* Return special error code to check this case. */
1269 return E_NTFS_NONRESIDENT
;
1272 vbo
= page
->index
<< PAGE_SHIFT
;
1273 data_size
= le32_to_cpu(attr
->res
.data_size
);
1274 if (vbo
< data_size
) {
1275 char *data
= resident_data(attr
);
1276 char *kaddr
= kmap_atomic(page
);
1277 u32 use
= data_size
- vbo
;
1279 if (use
> PAGE_SIZE
)
1281 memcpy(data
+ vbo
, kaddr
, use
);
1282 kunmap_atomic(kaddr
);
1285 ni
->i_valid
= data_size
;
1291 * attr_load_runs_vcn - Load runs with VCN.
1293 int attr_load_runs_vcn(struct ntfs_inode
*ni
, enum ATTR_TYPE type
,
1294 const __le16
*name
, u8 name_len
, struct runs_tree
*run
,
1297 struct ATTRIB
*attr
;
1303 /* Is record corrupted? */
1307 attr
= ni_find_attr(ni
, NULL
, NULL
, type
, name
, name_len
, &vcn
, NULL
);
1309 /* Is record corrupted? */
1313 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1314 evcn
= le64_to_cpu(attr
->nres
.evcn
);
1316 if (evcn
< vcn
|| vcn
< svcn
) {
1317 /* Is record corrupted? */
1321 ro
= le16_to_cpu(attr
->nres
.run_off
);
1323 if (ro
> le32_to_cpu(attr
->size
))
1326 err
= run_unpack_ex(run
, ni
->mi
.sbi
, ni
->mi
.rno
, svcn
, evcn
, svcn
,
1327 Add2Ptr(attr
, ro
), le32_to_cpu(attr
->size
) - ro
);
1334 * attr_load_runs_range - Load runs for given range [from to).
1336 int attr_load_runs_range(struct ntfs_inode
*ni
, enum ATTR_TYPE type
,
1337 const __le16
*name
, u8 name_len
, struct runs_tree
*run
,
1340 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1341 u8 cluster_bits
= sbi
->cluster_bits
;
1343 CLST vcn_last
= (to
- 1) >> cluster_bits
;
1347 for (vcn
= from
>> cluster_bits
; vcn
<= vcn_last
; vcn
+= clen
) {
1348 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, NULL
)) {
1349 err
= attr_load_runs_vcn(ni
, type
, name
, name_len
, run
,
1353 clen
= 0; /* Next run_lookup_entry(vcn) must be success. */
1360 #ifdef CONFIG_NTFS3_LZX_XPRESS
1362 * attr_wof_frame_info
1364 * Read header of Xpress/LZX file to get info about frame.
1366 int attr_wof_frame_info(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
1367 struct runs_tree
*run
, u64 frame
, u64 frames
,
1368 u8 frame_bits
, u32
*ondisk_size
, u64
*vbo_data
)
1370 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1371 u64 vbo
[2], off
[2], wof_size
;
1380 if (ni
->vfs_inode
.i_size
< 0x100000000ull
) {
1381 /* File starts with array of 32 bit offsets. */
1382 bytes_per_off
= sizeof(__le32
);
1383 vbo
[1] = frame
<< 2;
1384 *vbo_data
= frames
<< 2;
1386 /* File starts with array of 64 bit offsets. */
1387 bytes_per_off
= sizeof(__le64
);
1388 vbo
[1] = frame
<< 3;
1389 *vbo_data
= frames
<< 3;
1393 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1394 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1396 if (!attr
->non_res
) {
1397 if (vbo
[1] + bytes_per_off
> le32_to_cpu(attr
->res
.data_size
)) {
1398 ntfs_inode_err(&ni
->vfs_inode
, "is corrupted");
1401 addr
= resident_data(attr
);
1403 if (bytes_per_off
== sizeof(__le32
)) {
1404 off32
= Add2Ptr(addr
, vbo
[1]);
1405 off
[0] = vbo
[1] ? le32_to_cpu(off32
[-1]) : 0;
1406 off
[1] = le32_to_cpu(off32
[0]);
1408 off64
= Add2Ptr(addr
, vbo
[1]);
1409 off
[0] = vbo
[1] ? le64_to_cpu(off64
[-1]) : 0;
1410 off
[1] = le64_to_cpu(off64
[0]);
1413 *vbo_data
+= off
[0];
1414 *ondisk_size
= off
[1] - off
[0];
1418 wof_size
= le64_to_cpu(attr
->nres
.data_size
);
1419 down_write(&ni
->file
.run_lock
);
1420 page
= ni
->file
.offs_page
;
1422 page
= alloc_page(GFP_KERNEL
);
1428 ni
->file
.offs_page
= page
;
1431 addr
= page_address(page
);
1434 voff
= vbo
[1] & (PAGE_SIZE
- 1);
1435 vbo
[0] = vbo
[1] - bytes_per_off
;
1445 pgoff_t index
= vbo
[i
] >> PAGE_SHIFT
;
1447 if (index
!= page
->index
) {
1448 u64 from
= vbo
[i
] & ~(u64
)(PAGE_SIZE
- 1);
1449 u64 to
= min(from
+ PAGE_SIZE
, wof_size
);
1451 err
= attr_load_runs_range(ni
, ATTR_DATA
, WOF_NAME
,
1452 ARRAY_SIZE(WOF_NAME
), run
,
1457 err
= ntfs_bio_pages(sbi
, run
, &page
, 1, from
,
1458 to
- from
, REQ_OP_READ
);
1463 page
->index
= index
;
1467 if (bytes_per_off
== sizeof(__le32
)) {
1468 off32
= Add2Ptr(addr
, voff
);
1469 off
[1] = le32_to_cpu(*off32
);
1471 off64
= Add2Ptr(addr
, voff
);
1472 off
[1] = le64_to_cpu(*off64
);
1475 if (bytes_per_off
== sizeof(__le32
)) {
1476 off32
= Add2Ptr(addr
, PAGE_SIZE
- sizeof(u32
));
1477 off
[0] = le32_to_cpu(*off32
);
1479 off64
= Add2Ptr(addr
, PAGE_SIZE
- sizeof(u64
));
1480 off
[0] = le64_to_cpu(*off64
);
1483 /* Two values in one page. */
1484 if (bytes_per_off
== sizeof(__le32
)) {
1485 off32
= Add2Ptr(addr
, voff
);
1486 off
[0] = le32_to_cpu(off32
[-1]);
1487 off
[1] = le32_to_cpu(off32
[0]);
1489 off64
= Add2Ptr(addr
, voff
);
1490 off
[0] = le64_to_cpu(off64
[-1]);
1491 off
[1] = le64_to_cpu(off64
[0]);
1497 *vbo_data
+= off
[0];
1498 *ondisk_size
= off
[1] - off
[0];
1503 up_write(&ni
->file
.run_lock
);
1509 * attr_is_frame_compressed - Used to detect compressed frame.
1511 int attr_is_frame_compressed(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
1512 CLST frame
, CLST
*clst_data
)
1516 CLST clen
, lcn
, vcn
, alen
, slen
, vcn_next
;
1518 struct runs_tree
*run
;
1522 if (!is_attr_compressed(attr
))
1528 clst_frame
= 1u << attr
->nres
.c_unit
;
1529 vcn
= frame
* clst_frame
;
1530 run
= &ni
->file
.run
;
1532 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
)) {
1533 err
= attr_load_runs_vcn(ni
, attr
->type
, attr_name(attr
),
1534 attr
->name_len
, run
, vcn
);
1538 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
))
1542 if (lcn
== SPARSE_LCN
) {
1543 /* Sparsed frame. */
1547 if (clen
>= clst_frame
) {
1549 * The frame is not compressed 'cause
1550 * it does not contain any sparse clusters.
1552 *clst_data
= clst_frame
;
1556 alen
= bytes_to_cluster(ni
->mi
.sbi
, le64_to_cpu(attr
->nres
.alloc_size
));
1561 * The frame is compressed if *clst_data + slen >= clst_frame.
1562 * Check next fragments.
1564 while ((vcn
+= clen
) < alen
) {
1567 if (!run_get_entry(run
, ++idx
, &vcn
, &lcn
, &clen
) ||
1569 err
= attr_load_runs_vcn(ni
, attr
->type
,
1571 attr
->name_len
, run
, vcn_next
);
1576 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
))
1580 if (lcn
== SPARSE_LCN
) {
1585 * Data_clusters + sparse_clusters =
1586 * not enough for frame.
1593 if (*clst_data
+ slen
>= clst_frame
) {
1596 * There is no sparsed clusters in this frame
1597 * so it is not compressed.
1599 *clst_data
= clst_frame
;
1601 /* Frame is compressed. */
1611 * attr_allocate_frame - Allocate/free clusters for @frame.
1613 * Assumed: down_write(&ni->file.run_lock);
1615 int attr_allocate_frame(struct ntfs_inode
*ni
, CLST frame
, size_t compr_size
,
1619 struct runs_tree
*run
= &ni
->file
.run
;
1620 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1621 struct ATTRIB
*attr
= NULL
, *attr_b
;
1622 struct ATTR_LIST_ENTRY
*le
, *le_b
;
1623 struct mft_inode
*mi
, *mi_b
;
1624 CLST svcn
, evcn1
, next_svcn
, len
;
1625 CLST vcn
, end
, clst_data
;
1626 u64 total_size
, valid_size
, data_size
;
1629 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
1633 if (!is_attr_ext(attr_b
))
1636 vcn
= frame
<< NTFS_LZNT_CUNIT
;
1637 total_size
= le64_to_cpu(attr_b
->nres
.total_size
);
1639 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
1640 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
1641 data_size
= le64_to_cpu(attr_b
->nres
.data_size
);
1643 if (svcn
<= vcn
&& vcn
< evcn1
) {
1652 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
1658 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1659 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1662 err
= attr_load_runs(attr
, ni
, run
, NULL
);
1666 err
= attr_is_frame_compressed(ni
, attr_b
, frame
, &clst_data
);
1670 total_size
-= (u64
)clst_data
<< sbi
->cluster_bits
;
1672 len
= bytes_to_cluster(sbi
, compr_size
);
1674 if (len
== clst_data
)
1677 if (len
< clst_data
) {
1678 err
= run_deallocate_ex(sbi
, run
, vcn
+ len
, clst_data
- len
,
1683 if (!run_add_entry(run
, vcn
+ len
, SPARSE_LCN
, clst_data
- len
,
1688 end
= vcn
+ clst_data
;
1689 /* Run contains updated range [vcn + len : end). */
1691 CLST alen
, hint
= 0;
1692 /* Get the last LCN to allocate from. */
1693 if (vcn
+ clst_data
&&
1694 !run_lookup_entry(run
, vcn
+ clst_data
- 1, &hint
, NULL
,
1699 err
= attr_allocate_clusters(sbi
, run
, vcn
+ clst_data
,
1700 hint
+ 1, len
- clst_data
, NULL
,
1701 ALLOCATE_DEF
, &alen
, 0, NULL
,
1707 /* Run contains updated range [vcn + clst_data : end). */
1710 total_size
+= (u64
)len
<< sbi
->cluster_bits
;
1713 err
= mi_pack_runs(mi
, attr
, run
, max(end
, evcn1
) - svcn
);
1717 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
1718 inode_set_bytes(&ni
->vfs_inode
, total_size
);
1721 mark_inode_dirty(&ni
->vfs_inode
);
1723 /* Stored [vcn : next_svcn) from [vcn : end). */
1724 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1727 if (next_svcn
== evcn1
) {
1728 /* Normal way. Update attribute and exit. */
1731 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1732 if (!ni
->attr_list
.size
) {
1733 err
= ni_create_attr_list(ni
);
1736 /* Layout of records is changed. */
1738 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
,
1754 /* Estimate next attribute. */
1755 attr
= ni_find_attr(ni
, attr
, &le
, ATTR_DATA
, NULL
, 0, &svcn
, &mi
);
1758 CLST alloc
= bytes_to_cluster(
1759 sbi
, le64_to_cpu(attr_b
->nres
.alloc_size
));
1760 CLST evcn
= le64_to_cpu(attr
->nres
.evcn
);
1762 if (end
< next_svcn
)
1764 while (end
> evcn
) {
1765 /* Remove segment [svcn : evcn). */
1766 mi_remove_attr(NULL
, mi
, attr
);
1768 if (!al_remove_le(ni
, le
)) {
1773 if (evcn
+ 1 >= alloc
) {
1774 /* Last attribute segment. */
1779 if (ni_load_mi(ni
, le
, &mi
)) {
1784 attr
= mi_find_attr(mi
, NULL
, ATTR_DATA
, NULL
, 0,
1790 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1791 evcn
= le64_to_cpu(attr
->nres
.evcn
);
1797 err
= attr_load_runs(attr
, ni
, run
, &end
);
1802 attr
->nres
.svcn
= cpu_to_le64(next_svcn
);
1803 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- next_svcn
);
1807 le
->vcn
= cpu_to_le64(next_svcn
);
1808 ni
->attr_list
.dirty
= true;
1811 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1814 if (evcn1
> next_svcn
) {
1815 err
= ni_insert_nonresident(ni
, ATTR_DATA
, NULL
, 0, run
,
1816 next_svcn
, evcn1
- next_svcn
,
1817 attr_b
->flags
, &attr
, &mi
, NULL
);
1822 run_truncate_around(run
, vcn
);
1824 if (new_valid
> data_size
)
1825 new_valid
= data_size
;
1827 valid_size
= le64_to_cpu(attr_b
->nres
.valid_size
);
1828 if (new_valid
!= valid_size
) {
1829 attr_b
->nres
.valid_size
= cpu_to_le64(valid_size
);
1837 * attr_collapse_range - Collapse range in file.
1839 int attr_collapse_range(struct ntfs_inode
*ni
, u64 vbo
, u64 bytes
)
1842 struct runs_tree
*run
= &ni
->file
.run
;
1843 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1844 struct ATTRIB
*attr
= NULL
, *attr_b
;
1845 struct ATTR_LIST_ENTRY
*le
, *le_b
;
1846 struct mft_inode
*mi
, *mi_b
;
1847 CLST svcn
, evcn1
, len
, dealloc
, alen
;
1849 u64 valid_size
, data_size
, alloc_size
, total_size
;
1857 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
1861 if (!attr_b
->non_res
) {
1862 /* Attribute is resident. Nothing to do? */
1866 data_size
= le64_to_cpu(attr_b
->nres
.data_size
);
1867 alloc_size
= le64_to_cpu(attr_b
->nres
.alloc_size
);
1868 a_flags
= attr_b
->flags
;
1870 if (is_attr_ext(attr_b
)) {
1871 total_size
= le64_to_cpu(attr_b
->nres
.total_size
);
1872 mask
= (sbi
->cluster_size
<< attr_b
->nres
.c_unit
) - 1;
1874 total_size
= alloc_size
;
1875 mask
= sbi
->cluster_mask
;
1878 if ((vbo
& mask
) || (bytes
& mask
)) {
1879 /* Allow to collapse only cluster aligned ranges. */
1883 if (vbo
> data_size
)
1886 down_write(&ni
->file
.run_lock
);
1888 if (vbo
+ bytes
>= data_size
) {
1889 u64 new_valid
= min(ni
->i_valid
, vbo
);
1891 /* Simple truncate file at 'vbo'. */
1892 truncate_setsize(&ni
->vfs_inode
, vbo
);
1893 err
= attr_set_size(ni
, ATTR_DATA
, NULL
, 0, &ni
->file
.run
, vbo
,
1894 &new_valid
, true, NULL
);
1896 if (!err
&& new_valid
< ni
->i_valid
)
1897 ni
->i_valid
= new_valid
;
1903 * Enumerate all attribute segments and collapse.
1905 alen
= alloc_size
>> sbi
->cluster_bits
;
1906 vcn
= vbo
>> sbi
->cluster_bits
;
1907 len
= bytes
>> sbi
->cluster_bits
;
1911 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
1912 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
1914 if (svcn
<= vcn
&& vcn
< evcn1
) {
1923 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
1930 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1931 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1937 attr
->nres
.svcn
= cpu_to_le64(svcn
- len
);
1938 attr
->nres
.evcn
= cpu_to_le64(evcn1
- 1 - len
);
1940 le
->vcn
= attr
->nres
.svcn
;
1941 ni
->attr_list
.dirty
= true;
1944 } else if (svcn
< vcn
|| end
< evcn1
) {
1945 CLST vcn1
, eat
, next_svcn
;
1947 /* Collapse a part of this attribute segment. */
1948 err
= attr_load_runs(attr
, ni
, run
, &svcn
);
1951 vcn1
= max(vcn
, svcn
);
1952 eat
= min(end
, evcn1
) - vcn1
;
1954 err
= run_deallocate_ex(sbi
, run
, vcn1
, eat
, &dealloc
,
1959 if (!run_collapse_range(run
, vcn1
, eat
)) {
1966 attr
->nres
.svcn
= cpu_to_le64(vcn
);
1968 le
->vcn
= attr
->nres
.svcn
;
1969 ni
->attr_list
.dirty
= true;
1973 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- svcn
- eat
);
1977 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1978 if (next_svcn
+ eat
< evcn1
) {
1979 err
= ni_insert_nonresident(
1980 ni
, ATTR_DATA
, NULL
, 0, run
, next_svcn
,
1981 evcn1
- eat
- next_svcn
, a_flags
, &attr
,
1986 /* Layout of records maybe changed. */
1990 /* Free all allocated memory. */
1991 run_truncate(run
, 0);
1994 u16 roff
= le16_to_cpu(attr
->nres
.run_off
);
1996 if (roff
> le32_to_cpu(attr
->size
)) {
2001 run_unpack_ex(RUN_DEALLOCATE
, sbi
, ni
->mi
.rno
, svcn
,
2002 evcn1
- 1, svcn
, Add2Ptr(attr
, roff
),
2003 le32_to_cpu(attr
->size
) - roff
);
2005 /* Delete this attribute segment. */
2006 mi_remove_attr(NULL
, mi
, attr
);
2010 le_sz
= le16_to_cpu(le
->size
);
2011 if (!al_remove_le(ni
, le
)) {
2020 /* Load next record that contains this attribute. */
2021 if (ni_load_mi(ni
, le
, &mi
)) {
2026 /* Look for required attribute. */
2027 attr
= mi_find_attr(mi
, NULL
, ATTR_DATA
, NULL
,
2035 le
= (struct ATTR_LIST_ENTRY
*)((u8
*)le
- le_sz
);
2041 attr
= ni_enum_attr_ex(ni
, attr
, &le
, &mi
);
2048 svcn
= le64_to_cpu(attr
->nres
.svcn
);
2049 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2054 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
,
2063 valid_size
= ni
->i_valid
;
2064 if (vbo
+ bytes
<= valid_size
)
2065 valid_size
-= bytes
;
2066 else if (vbo
< valid_size
)
2069 attr_b
->nres
.alloc_size
= cpu_to_le64(alloc_size
- bytes
);
2070 attr_b
->nres
.data_size
= cpu_to_le64(data_size
);
2071 attr_b
->nres
.valid_size
= cpu_to_le64(min(valid_size
, data_size
));
2072 total_size
-= (u64
)dealloc
<< sbi
->cluster_bits
;
2073 if (is_attr_ext(attr_b
))
2074 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
2077 /* Update inode size. */
2078 ni
->i_valid
= valid_size
;
2079 ni
->vfs_inode
.i_size
= data_size
;
2080 inode_set_bytes(&ni
->vfs_inode
, total_size
);
2081 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
2082 mark_inode_dirty(&ni
->vfs_inode
);
2085 up_write(&ni
->file
.run_lock
);
2087 _ntfs_bad_inode(&ni
->vfs_inode
);
2095 * Not for normal files.
2097 int attr_punch_hole(struct ntfs_inode
*ni
, u64 vbo
, u64 bytes
, u32
*frame_size
)
2100 struct runs_tree
*run
= &ni
->file
.run
;
2101 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
2102 struct ATTRIB
*attr
= NULL
, *attr_b
;
2103 struct ATTR_LIST_ENTRY
*le
, *le_b
;
2104 struct mft_inode
*mi
, *mi_b
;
2105 CLST svcn
, evcn1
, vcn
, len
, end
, alen
, hole
, next_svcn
;
2106 u64 total_size
, alloc_size
;
2109 struct runs_tree run2
;
2115 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
2119 if (!attr_b
->non_res
) {
2120 u32 data_size
= le32_to_cpu(attr_b
->res
.data_size
);
2123 if (vbo
> data_size
)
2127 to
= min_t(u64
, vbo
+ bytes
, data_size
);
2128 memset(Add2Ptr(resident_data(attr_b
), from
), 0, to
- from
);
2132 if (!is_attr_ext(attr_b
))
2135 alloc_size
= le64_to_cpu(attr_b
->nres
.alloc_size
);
2136 total_size
= le64_to_cpu(attr_b
->nres
.total_size
);
2138 if (vbo
>= alloc_size
) {
2139 /* NOTE: It is allowed. */
2143 mask
= (sbi
->cluster_size
<< attr_b
->nres
.c_unit
) - 1;
2146 if (bytes
> alloc_size
)
2150 if ((vbo
& mask
) || (bytes
& mask
)) {
2151 /* We have to zero a range(s). */
2152 if (frame_size
== NULL
) {
2153 /* Caller insists range is aligned. */
2156 *frame_size
= mask
+ 1;
2157 return E_NTFS_NOTALIGNED
;
2160 down_write(&ni
->file
.run_lock
);
2162 run_truncate(run
, 0);
2165 * Enumerate all attribute segments and punch hole where necessary.
2167 alen
= alloc_size
>> sbi
->cluster_bits
;
2168 vcn
= vbo
>> sbi
->cluster_bits
;
2169 len
= bytes
>> sbi
->cluster_bits
;
2173 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
2174 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
2175 a_flags
= attr_b
->flags
;
2177 if (svcn
<= vcn
&& vcn
< evcn1
) {
2186 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
2193 svcn
= le64_to_cpu(attr
->nres
.svcn
);
2194 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2197 while (svcn
< end
) {
2198 CLST vcn1
, zero
, hole2
= hole
;
2200 err
= attr_load_runs(attr
, ni
, run
, &svcn
);
2203 vcn1
= max(vcn
, svcn
);
2204 zero
= min(end
, evcn1
) - vcn1
;
2207 * Check range [vcn1 + zero).
2208 * Calculate how many clusters there are.
2209 * Don't do any destructive actions.
2211 err
= run_deallocate_ex(NULL
, run
, vcn1
, zero
, &hole2
, false);
2215 /* Check if required range is already hole. */
2219 /* Make a clone of run to undo. */
2220 err
= run_clone(run
, &run2
);
2224 /* Make a hole range (sparse) [vcn1 + zero). */
2225 if (!run_add_entry(run
, vcn1
, SPARSE_LCN
, zero
, false)) {
2230 /* Update run in attribute segment. */
2231 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- svcn
);
2234 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2235 if (next_svcn
< evcn1
) {
2236 /* Insert new attribute segment. */
2237 err
= ni_insert_nonresident(ni
, ATTR_DATA
, NULL
, 0, run
,
2239 evcn1
- next_svcn
, a_flags
,
2244 /* Layout of records maybe changed. */
2248 /* Real deallocate. Should not fail. */
2249 run_deallocate_ex(sbi
, &run2
, vcn1
, zero
, &hole
, true);
2252 /* Free all allocated memory. */
2253 run_truncate(run
, 0);
2258 /* Get next attribute segment. */
2259 attr
= ni_enum_attr_ex(ni
, attr
, &le
, &mi
);
2265 svcn
= le64_to_cpu(attr
->nres
.svcn
);
2266 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2274 attr_b
= ni_find_attr(ni
, NULL
, NULL
, ATTR_DATA
, NULL
, 0, NULL
,
2282 total_size
-= (u64
)hole
<< sbi
->cluster_bits
;
2283 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
2286 /* Update inode size. */
2287 inode_set_bytes(&ni
->vfs_inode
, total_size
);
2288 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
2289 mark_inode_dirty(&ni
->vfs_inode
);
2293 up_write(&ni
->file
.run_lock
);
2297 _ntfs_bad_inode(&ni
->vfs_inode
);
2302 * Restore packed runs.
2303 * 'mi_pack_runs' should not fail, cause we restore original.
2305 if (mi_pack_runs(mi
, attr
, &run2
, evcn1
- svcn
))
2312 * attr_insert_range - Insert range (hole) in file.
2313 * Not for normal files.
2315 int attr_insert_range(struct ntfs_inode
*ni
, u64 vbo
, u64 bytes
)
2318 struct runs_tree
*run
= &ni
->file
.run
;
2319 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
2320 struct ATTRIB
*attr
= NULL
, *attr_b
;
2321 struct ATTR_LIST_ENTRY
*le
, *le_b
;
2322 struct mft_inode
*mi
, *mi_b
;
2323 CLST vcn
, svcn
, evcn1
, len
, next_svcn
;
2324 u64 data_size
, alloc_size
;
2332 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
2336 if (!is_attr_ext(attr_b
)) {
2337 /* It was checked above. See fallocate. */
2341 if (!attr_b
->non_res
) {
2342 data_size
= le32_to_cpu(attr_b
->res
.data_size
);
2343 alloc_size
= data_size
;
2344 mask
= sbi
->cluster_mask
; /* cluster_size - 1 */
2346 data_size
= le64_to_cpu(attr_b
->nres
.data_size
);
2347 alloc_size
= le64_to_cpu(attr_b
->nres
.alloc_size
);
2348 mask
= (sbi
->cluster_size
<< attr_b
->nres
.c_unit
) - 1;
2351 if (vbo
> data_size
) {
2352 /* Insert range after the file size is not allowed. */
2356 if ((vbo
& mask
) || (bytes
& mask
)) {
2357 /* Allow to insert only frame aligned ranges. */
2362 * valid_size <= data_size <= alloc_size
2363 * Check alloc_size for maximum possible.
2365 if (bytes
> sbi
->maxbytes_sparse
- alloc_size
)
2368 vcn
= vbo
>> sbi
->cluster_bits
;
2369 len
= bytes
>> sbi
->cluster_bits
;
2371 down_write(&ni
->file
.run_lock
);
2373 if (!attr_b
->non_res
) {
2374 err
= attr_set_size(ni
, ATTR_DATA
, NULL
, 0, run
,
2375 data_size
+ bytes
, NULL
, false, NULL
);
2378 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
,
2388 if (!attr_b
->non_res
) {
2389 /* Still resident. */
2390 char *data
= Add2Ptr(attr_b
,
2391 le16_to_cpu(attr_b
->res
.data_off
));
2393 memmove(data
+ bytes
, data
, bytes
);
2394 memset(data
, 0, bytes
);
2398 /* Resident files becomes nonresident. */
2399 data_size
= le64_to_cpu(attr_b
->nres
.data_size
);
2400 alloc_size
= le64_to_cpu(attr_b
->nres
.alloc_size
);
2404 * Enumerate all attribute segments and shift start vcn.
2406 a_flags
= attr_b
->flags
;
2407 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
2408 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
2410 if (svcn
<= vcn
&& vcn
< evcn1
) {
2419 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
2426 svcn
= le64_to_cpu(attr
->nres
.svcn
);
2427 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2430 run_truncate(run
, 0); /* clear cached values. */
2431 err
= attr_load_runs(attr
, ni
, run
, NULL
);
2435 if (!run_insert_range(run
, vcn
, len
)) {
2440 /* Try to pack in current record as much as possible. */
2441 err
= mi_pack_runs(mi
, attr
, run
, evcn1
+ len
- svcn
);
2445 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2447 while ((attr
= ni_enum_attr_ex(ni
, attr
, &le
, &mi
)) &&
2448 attr
->type
== ATTR_DATA
&& !attr
->name_len
) {
2449 le64_add_cpu(&attr
->nres
.svcn
, len
);
2450 le64_add_cpu(&attr
->nres
.evcn
, len
);
2452 le
->vcn
= attr
->nres
.svcn
;
2453 ni
->attr_list
.dirty
= true;
2458 if (next_svcn
< evcn1
+ len
) {
2459 err
= ni_insert_nonresident(ni
, ATTR_DATA
, NULL
, 0, run
,
2460 next_svcn
, evcn1
+ len
- next_svcn
,
2461 a_flags
, NULL
, NULL
, NULL
);
2464 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
,
2472 /* ni_insert_nonresident failed. Try to undo. */
2473 goto undo_insert_range
;
2478 * Update primary attribute segment.
2480 if (vbo
<= ni
->i_valid
)
2481 ni
->i_valid
+= bytes
;
2483 attr_b
->nres
.data_size
= cpu_to_le64(data_size
+ bytes
);
2484 attr_b
->nres
.alloc_size
= cpu_to_le64(alloc_size
+ bytes
);
2486 /* ni->valid may be not equal valid_size (temporary). */
2487 if (ni
->i_valid
> data_size
+ bytes
)
2488 attr_b
->nres
.valid_size
= attr_b
->nres
.data_size
;
2490 attr_b
->nres
.valid_size
= cpu_to_le64(ni
->i_valid
);
2494 ni
->vfs_inode
.i_size
+= bytes
;
2495 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
2496 mark_inode_dirty(&ni
->vfs_inode
);
2499 run_truncate(run
, 0); /* clear cached values. */
2501 up_write(&ni
->file
.run_lock
);
2506 _ntfs_bad_inode(&ni
->vfs_inode
);
2510 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
2511 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
2513 if (svcn
<= vcn
&& vcn
< evcn1
) {
2521 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
2527 svcn
= le64_to_cpu(attr
->nres
.svcn
);
2528 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2531 if (attr_load_runs(attr
, ni
, run
, NULL
))
2534 if (!run_collapse_range(run
, vcn
, len
))
2537 if (mi_pack_runs(mi
, attr
, run
, evcn1
+ len
- svcn
))
2540 while ((attr
= ni_enum_attr_ex(ni
, attr
, &le
, &mi
)) &&
2541 attr
->type
== ATTR_DATA
&& !attr
->name_len
) {
2542 le64_sub_cpu(&attr
->nres
.svcn
, len
);
2543 le64_sub_cpu(&attr
->nres
.evcn
, len
);
2545 le
->vcn
= attr
->nres
.svcn
;
2546 ni
->attr_list
.dirty
= true;