1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19 * preallocate algorithm.
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
34 static inline u64
get_pre_allocated(u64 size
)
40 if (size
<= NTFS_CLUMP_MIN
) {
41 clump
= 1 << NTFS_MIN_LOG2_OF_CLUMP
;
42 align_shift
= NTFS_MIN_LOG2_OF_CLUMP
;
43 } else if (size
>= NTFS_CLUMP_MAX
) {
44 clump
= 1 << NTFS_MAX_LOG2_OF_CLUMP
;
45 align_shift
= NTFS_MAX_LOG2_OF_CLUMP
;
47 align_shift
= NTFS_MIN_LOG2_OF_CLUMP
- 1 +
48 __ffs(size
>> (8 + NTFS_MIN_LOG2_OF_CLUMP
));
49 clump
= 1u << align_shift
;
52 ret
= (((size
+ clump
- 1) >> align_shift
)) << align_shift
;
58 * attr_must_be_resident
60 * Return: True if attribute must be resident.
62 static inline bool attr_must_be_resident(struct ntfs_sb_info
*sbi
,
65 const struct ATTR_DEF_ENTRY
*de
;
77 de
= ntfs_query_def(sbi
, type
);
78 if (de
&& (de
->flags
& NTFS_ATTR_MUST_BE_RESIDENT
))
85 * attr_load_runs - Load all runs stored in @attr.
87 static int attr_load_runs(struct ATTRIB
*attr
, struct ntfs_inode
*ni
,
88 struct runs_tree
*run
, const CLST
*vcn
)
91 CLST svcn
= le64_to_cpu(attr
->nres
.svcn
);
92 CLST evcn
= le64_to_cpu(attr
->nres
.evcn
);
96 if (svcn
>= evcn
+ 1 || run_is_mapped_full(run
, svcn
, evcn
))
99 if (vcn
&& (evcn
< *vcn
|| *vcn
< svcn
))
102 asize
= le32_to_cpu(attr
->size
);
103 run_off
= le16_to_cpu(attr
->nres
.run_off
);
104 err
= run_unpack_ex(run
, ni
->mi
.sbi
, ni
->mi
.rno
, svcn
, evcn
,
105 vcn
? *vcn
: svcn
, Add2Ptr(attr
, run_off
),
114 * run_deallocate_ex - Deallocate clusters.
116 static int run_deallocate_ex(struct ntfs_sb_info
*sbi
, struct runs_tree
*run
,
117 CLST vcn
, CLST len
, CLST
*done
, bool trim
)
120 CLST vcn_next
, vcn0
= vcn
, lcn
, clen
, dn
= 0;
126 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
)) {
128 run_truncate(run
, vcn0
);
142 if (lcn
!= SPARSE_LCN
) {
143 mark_as_free_ex(sbi
, lcn
, clen
, trim
);
151 vcn_next
= vcn
+ clen
;
152 if (!run_get_entry(run
, ++idx
, &vcn
, &lcn
, &clen
) ||
154 /* Save memory - don't load entire run. */
167 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
169 int attr_allocate_clusters(struct ntfs_sb_info
*sbi
, struct runs_tree
*run
,
170 CLST vcn
, CLST lcn
, CLST len
, CLST
*pre_alloc
,
171 enum ALLOCATE_OPT opt
, CLST
*alen
, const size_t fr
,
175 CLST flen
, vcn0
= vcn
, pre
= pre_alloc
? *pre_alloc
: 0;
176 size_t cnt
= run
->count
;
179 err
= ntfs_look_for_free_space(sbi
, lcn
, len
+ pre
, &lcn
, &flen
,
182 if (err
== -ENOSPC
&& pre
) {
192 if (new_lcn
&& vcn
== vcn0
)
195 /* Add new fragment into run storage. */
196 if (!run_add_entry(run
, vcn
, lcn
, flen
, opt
== ALLOCATE_MFT
)) {
197 /* Undo last 'ntfs_look_for_free_space' */
198 mark_as_free_ex(sbi
, lcn
, len
, false);
205 if (flen
>= len
|| opt
== ALLOCATE_MFT
||
206 (fr
&& run
->count
- cnt
>= fr
)) {
215 /* Undo 'ntfs_look_for_free_space' */
217 run_deallocate_ex(sbi
, run
, vcn0
, vcn
- vcn0
, NULL
, false);
218 run_truncate(run
, vcn0
);
225 * attr_make_nonresident
227 * If page is not NULL - it is already contains resident data
228 * and locked (called from ni_write_frame()).
230 int attr_make_nonresident(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
231 struct ATTR_LIST_ENTRY
*le
, struct mft_inode
*mi
,
232 u64 new_size
, struct runs_tree
*run
,
233 struct ATTRIB
**ins_attr
, struct page
*page
)
235 struct ntfs_sb_info
*sbi
;
236 struct ATTRIB
*attr_s
;
238 u32 used
, asize
, rsize
, aoff
, align
;
252 used
= le32_to_cpu(rec
->used
);
253 asize
= le32_to_cpu(attr
->size
);
254 next
= Add2Ptr(attr
, asize
);
255 aoff
= PtrOffset(rec
, attr
);
256 rsize
= le32_to_cpu(attr
->res
.data_size
);
257 is_data
= attr
->type
== ATTR_DATA
&& !attr
->name_len
;
259 align
= sbi
->cluster_size
;
260 if (is_attr_compressed(attr
))
261 align
<<= COMPRESSION_UNIT
;
262 len
= (rsize
+ align
- 1) >> sbi
->cluster_bits
;
266 /* Make a copy of original attribute. */
267 attr_s
= kmemdup(attr
, asize
, GFP_NOFS
);
274 /* Empty resident -> Empty nonresident. */
277 const char *data
= resident_data(attr
);
279 err
= attr_allocate_clusters(sbi
, run
, 0, 0, len
, NULL
,
280 ALLOCATE_DEF
, &alen
, 0, NULL
);
285 /* Empty resident -> Non empty nonresident. */
286 } else if (!is_data
) {
287 err
= ntfs_sb_write_run(sbi
, run
, 0, data
, rsize
, 0);
293 page
= grab_cache_page(ni
->vfs_inode
.i_mapping
, 0);
298 kaddr
= kmap_atomic(page
);
299 memcpy(kaddr
, data
, rsize
);
300 memset(kaddr
+ rsize
, 0, PAGE_SIZE
- rsize
);
301 kunmap_atomic(kaddr
);
302 flush_dcache_page(page
);
303 SetPageUptodate(page
);
304 set_page_dirty(page
);
310 /* Remove original attribute. */
312 memmove(attr
, Add2Ptr(attr
, asize
), used
- aoff
);
313 rec
->used
= cpu_to_le32(used
);
316 al_remove_le(ni
, le
);
318 err
= ni_insert_nonresident(ni
, attr_s
->type
, attr_name(attr_s
),
319 attr_s
->name_len
, run
, 0, alen
,
320 attr_s
->flags
, &attr
, NULL
, NULL
);
325 attr
->nres
.data_size
= cpu_to_le64(rsize
);
326 attr
->nres
.valid_size
= attr
->nres
.data_size
;
331 ni
->ni_flags
&= ~NI_FLAG_RESIDENT
;
333 /* Resident attribute becomes non resident. */
337 attr
= Add2Ptr(rec
, aoff
);
338 memmove(next
, attr
, used
- aoff
);
339 memcpy(attr
, attr_s
, asize
);
340 rec
->used
= cpu_to_le32(used
+ asize
);
343 /* Undo: do not trim new allocated clusters. */
344 run_deallocate(sbi
, run
, false);
353 * attr_set_size_res - Helper for attr_set_size().
355 static int attr_set_size_res(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
356 struct ATTR_LIST_ENTRY
*le
, struct mft_inode
*mi
,
357 u64 new_size
, struct runs_tree
*run
,
358 struct ATTRIB
**ins_attr
)
360 struct ntfs_sb_info
*sbi
= mi
->sbi
;
361 struct MFT_REC
*rec
= mi
->mrec
;
362 u32 used
= le32_to_cpu(rec
->used
);
363 u32 asize
= le32_to_cpu(attr
->size
);
364 u32 aoff
= PtrOffset(rec
, attr
);
365 u32 rsize
= le32_to_cpu(attr
->res
.data_size
);
366 u32 tail
= used
- aoff
- asize
;
367 char *next
= Add2Ptr(attr
, asize
);
368 s64 dsize
= ALIGN(new_size
, 8) - ALIGN(rsize
, 8);
371 memmove(next
+ dsize
, next
, tail
);
372 } else if (dsize
> 0) {
373 if (used
+ dsize
> sbi
->max_bytes_per_attr
)
374 return attr_make_nonresident(ni
, attr
, le
, mi
, new_size
,
375 run
, ins_attr
, NULL
);
377 memmove(next
+ dsize
, next
, tail
);
378 memset(next
, 0, dsize
);
381 if (new_size
> rsize
)
382 memset(Add2Ptr(resident_data(attr
), rsize
), 0,
385 rec
->used
= cpu_to_le32(used
+ dsize
);
386 attr
->size
= cpu_to_le32(asize
+ dsize
);
387 attr
->res
.data_size
= cpu_to_le32(new_size
);
395 * attr_set_size - Change the size of attribute.
398 * - Sparse/compressed: No allocated clusters.
399 * - Normal: Append allocated and preallocated new clusters.
401 * - No deallocate if @keep_prealloc is set.
403 int attr_set_size(struct ntfs_inode
*ni
, enum ATTR_TYPE type
,
404 const __le16
*name
, u8 name_len
, struct runs_tree
*run
,
405 u64 new_size
, const u64
*new_valid
, bool keep_prealloc
,
409 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
410 u8 cluster_bits
= sbi
->cluster_bits
;
412 ni
->mi
.rno
== MFT_REC_MFT
&& type
== ATTR_DATA
&& !name_len
;
413 u64 old_valid
, old_size
, old_alloc
, new_alloc
, new_alloc_tmp
;
414 struct ATTRIB
*attr
= NULL
, *attr_b
;
415 struct ATTR_LIST_ENTRY
*le
, *le_b
;
416 struct mft_inode
*mi
, *mi_b
;
417 CLST alen
, vcn
, lcn
, new_alen
, old_alen
, svcn
, evcn
;
418 CLST next_svcn
, pre_alloc
= -1, done
= 0;
419 bool is_ext
, is_bad
= false;
426 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, type
, name
, name_len
, NULL
,
433 if (!attr_b
->non_res
) {
434 err
= attr_set_size_res(ni
, attr_b
, le_b
, mi_b
, new_size
, run
,
439 /* Return if file is still resident. */
440 if (!attr_b
->non_res
)
443 /* Layout of records may be changed, so do a full search. */
447 is_ext
= is_attr_ext(attr_b
);
448 align
= sbi
->cluster_size
;
450 align
<<= attr_b
->nres
.c_unit
;
452 old_valid
= le64_to_cpu(attr_b
->nres
.valid_size
);
453 old_size
= le64_to_cpu(attr_b
->nres
.data_size
);
454 old_alloc
= le64_to_cpu(attr_b
->nres
.alloc_size
);
457 old_alen
= old_alloc
>> cluster_bits
;
459 new_alloc
= (new_size
+ align
- 1) & ~(u64
)(align
- 1);
460 new_alen
= new_alloc
>> cluster_bits
;
462 if (keep_prealloc
&& new_size
< old_size
) {
463 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
470 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
471 evcn
= le64_to_cpu(attr_b
->nres
.evcn
);
473 if (svcn
<= vcn
&& vcn
<= evcn
) {
482 attr
= ni_find_attr(ni
, attr_b
, &le
, type
, name
, name_len
, &vcn
,
490 svcn
= le64_to_cpu(attr
->nres
.svcn
);
491 evcn
= le64_to_cpu(attr
->nres
.evcn
);
495 * attr,mi,le - last attribute segment (containing 'vcn').
496 * attr_b,mi_b,le_b - base (primary) attribute segment.
500 err
= attr_load_runs(attr
, ni
, run
, NULL
);
504 if (new_size
> old_size
) {
508 if (new_alloc
<= old_alloc
) {
509 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
515 * Add clusters. In simple case we have to:
516 * - allocate space (vcn, lcn, len)
517 * - update packed run in 'mi'
518 * - update attr->nres.evcn
519 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
521 to_allocate
= new_alen
- old_alen
;
522 add_alloc_in_same_attr_seg
:
525 /* MFT allocates clusters from MFT zone. */
528 /* No preallocate for sparse/compress. */
530 } else if (pre_alloc
== -1) {
532 if (type
== ATTR_DATA
&& !name_len
&&
533 sbi
->options
->prealloc
) {
537 get_pre_allocated(new_size
)) -
541 /* Get the last LCN to allocate from. */
543 !run_lookup_entry(run
, vcn
, &lcn
, NULL
, NULL
)) {
547 if (lcn
== SPARSE_LCN
)
552 free
= wnd_zeroes(&sbi
->used
.bitmap
);
553 if (to_allocate
> free
) {
558 if (pre_alloc
&& to_allocate
+ pre_alloc
> free
)
565 if (!run_add_entry(run
, vcn
, SPARSE_LCN
, to_allocate
,
572 /* ~3 bytes per fragment. */
573 err
= attr_allocate_clusters(
574 sbi
, run
, vcn
, lcn
, to_allocate
, &pre_alloc
,
575 is_mft
? ALLOCATE_MFT
: 0, &alen
,
577 : (sbi
->record_size
-
578 le32_to_cpu(rec
->used
) + 8) /
588 if (to_allocate
> alen
)
594 err
= mi_pack_runs(mi
, attr
, run
, vcn
- svcn
);
598 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
599 new_alloc_tmp
= (u64
)next_svcn
<< cluster_bits
;
600 attr_b
->nres
.alloc_size
= cpu_to_le64(new_alloc_tmp
);
603 if (next_svcn
>= vcn
&& !to_allocate
) {
604 /* Normal way. Update attribute and exit. */
605 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
609 /* At least two MFT to avoid recursive loop. */
610 if (is_mft
&& next_svcn
== vcn
&&
611 ((u64
)done
<< sbi
->cluster_bits
) >= 2 * sbi
->record_size
) {
612 new_size
= new_alloc_tmp
;
613 attr_b
->nres
.data_size
= attr_b
->nres
.alloc_size
;
617 if (le32_to_cpu(rec
->used
) < sbi
->record_size
) {
618 old_alen
= next_svcn
;
620 goto add_alloc_in_same_attr_seg
;
623 attr_b
->nres
.data_size
= attr_b
->nres
.alloc_size
;
624 if (new_alloc_tmp
< old_valid
)
625 attr_b
->nres
.valid_size
= attr_b
->nres
.data_size
;
627 if (type
== ATTR_LIST
) {
628 err
= ni_expand_list(ni
);
634 /* Layout of records is changed. */
638 if (!ni
->attr_list
.size
) {
639 err
= ni_create_attr_list(ni
);
640 /* In case of error layout of records is not changed. */
643 /* Layout of records is changed. */
646 if (next_svcn
>= vcn
) {
647 /* This is MFT data, repeat. */
651 /* Insert new attribute segment. */
652 err
= ni_insert_nonresident(ni
, type
, name
, name_len
, run
,
653 next_svcn
, vcn
- next_svcn
,
654 attr_b
->flags
, &attr
, &mi
, NULL
);
657 * Layout of records maybe changed.
658 * Find base attribute to update.
661 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, type
, name
, name_len
,
669 /* ni_insert_nonresident failed. */
675 run_truncate_head(run
, evcn
+ 1);
677 svcn
= le64_to_cpu(attr
->nres
.svcn
);
678 evcn
= le64_to_cpu(attr
->nres
.evcn
);
681 * Attribute is in consistency state.
682 * Save this point to restore to if next steps fail.
684 old_valid
= old_size
= old_alloc
= (u64
)vcn
<< cluster_bits
;
685 attr_b
->nres
.valid_size
= attr_b
->nres
.data_size
=
686 attr_b
->nres
.alloc_size
= cpu_to_le64(old_size
);
691 if (new_size
!= old_size
||
692 (new_alloc
!= old_alloc
&& !keep_prealloc
)) {
694 * Truncate clusters. In simple case we have to:
695 * - update packed run in 'mi'
696 * - update attr->nres.evcn
697 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
698 * - mark and trim clusters as free (vcn, lcn, len)
702 vcn
= max(svcn
, new_alen
);
703 new_alloc_tmp
= (u64
)vcn
<< cluster_bits
;
706 err
= mi_pack_runs(mi
, attr
, run
, vcn
- svcn
);
709 } else if (le
&& le
->vcn
) {
710 u16 le_sz
= le16_to_cpu(le
->size
);
713 * NOTE: List entries for one attribute are always
714 * the same size. We deal with last entry (vcn==0)
715 * and it is not first in entries array
716 * (list entry for std attribute always first).
717 * So it is safe to step back.
719 mi_remove_attr(NULL
, mi
, attr
);
721 if (!al_remove_le(ni
, le
)) {
726 le
= (struct ATTR_LIST_ENTRY
*)((u8
*)le
- le_sz
);
728 attr
->nres
.evcn
= cpu_to_le64((u64
)vcn
- 1);
732 attr_b
->nres
.alloc_size
= cpu_to_le64(new_alloc_tmp
);
734 if (vcn
== new_alen
) {
735 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
736 if (new_size
< old_valid
)
737 attr_b
->nres
.valid_size
=
738 attr_b
->nres
.data_size
;
741 le64_to_cpu(attr_b
->nres
.data_size
))
742 attr_b
->nres
.data_size
=
743 attr_b
->nres
.alloc_size
;
745 le64_to_cpu(attr_b
->nres
.valid_size
))
746 attr_b
->nres
.valid_size
=
747 attr_b
->nres
.alloc_size
;
751 err
= run_deallocate_ex(sbi
, run
, vcn
, evcn
- vcn
+ 1, &dlen
,
757 /* dlen - really deallocated clusters. */
758 le64_sub_cpu(&attr_b
->nres
.total_size
,
759 ((u64
)dlen
<< cluster_bits
));
762 run_truncate(run
, vcn
);
764 if (new_alloc_tmp
<= new_alloc
)
767 old_size
= new_alloc_tmp
;
778 if (le
->type
!= type
|| le
->name_len
!= name_len
||
779 memcmp(le_name(le
), name
, name_len
* sizeof(short))) {
784 err
= ni_load_mi(ni
, le
, &mi
);
788 attr
= mi_find_attr(mi
, NULL
, type
, name
, name_len
, &le
->id
);
798 __le64 valid
= cpu_to_le64(min(*new_valid
, new_size
));
800 if (attr_b
->nres
.valid_size
!= valid
) {
801 attr_b
->nres
.valid_size
= valid
;
810 /* Update inode_set_bytes. */
811 if (((type
== ATTR_DATA
&& !name_len
) ||
812 (type
== ATTR_ALLOC
&& name
== I30_NAME
))) {
815 if (ni
->vfs_inode
.i_size
!= new_size
) {
816 ni
->vfs_inode
.i_size
= new_size
;
820 if (attr_b
->non_res
) {
821 new_alloc
= le64_to_cpu(attr_b
->nres
.alloc_size
);
822 if (inode_get_bytes(&ni
->vfs_inode
) != new_alloc
) {
823 inode_set_bytes(&ni
->vfs_inode
, new_alloc
);
829 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
830 mark_inode_dirty(&ni
->vfs_inode
);
838 attr_b
->nres
.data_size
= cpu_to_le64(old_size
);
839 attr_b
->nres
.valid_size
= cpu_to_le64(old_valid
);
840 attr_b
->nres
.alloc_size
= cpu_to_le64(old_alloc
);
842 /* Restore 'attr' and 'mi'. */
846 if (le64_to_cpu(attr_b
->nres
.svcn
) <= svcn
&&
847 svcn
<= le64_to_cpu(attr_b
->nres
.evcn
)) {
856 attr
= ni_find_attr(ni
, attr_b
, &le
, type
, name
, name_len
,
863 if (mi_pack_runs(mi
, attr
, run
, evcn
- svcn
+ 1))
867 run_deallocate_ex(sbi
, run
, vcn
, alen
, NULL
, false);
869 run_truncate(run
, vcn
);
873 _ntfs_bad_inode(&ni
->vfs_inode
);
878 int attr_data_get_block(struct ntfs_inode
*ni
, CLST vcn
, CLST clen
, CLST
*lcn
,
879 CLST
*len
, bool *new)
882 struct runs_tree
*run
= &ni
->file
.run
;
883 struct ntfs_sb_info
*sbi
;
885 struct ATTRIB
*attr
= NULL
, *attr_b
;
886 struct ATTR_LIST_ENTRY
*le
, *le_b
;
887 struct mft_inode
*mi
, *mi_b
;
888 CLST hint
, svcn
, to_alloc
, evcn1
, next_svcn
, asize
, end
;
896 down_read(&ni
->file
.run_lock
);
897 ok
= run_lookup_entry(run
, vcn
, lcn
, len
, NULL
);
898 up_read(&ni
->file
.run_lock
);
900 if (ok
&& (*lcn
!= SPARSE_LCN
|| !new)) {
908 if (ok
&& clen
> *len
)
912 cluster_bits
= sbi
->cluster_bits
;
915 down_write(&ni
->file
.run_lock
);
918 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
924 if (!attr_b
->non_res
) {
930 asize
= le64_to_cpu(attr_b
->nres
.alloc_size
) >> cluster_bits
;
936 clst_per_frame
= 1u << attr_b
->nres
.c_unit
;
937 to_alloc
= (clen
+ clst_per_frame
- 1) & ~(clst_per_frame
- 1);
939 if (vcn
+ to_alloc
> asize
)
940 to_alloc
= asize
- vcn
;
942 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
943 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
949 if (le_b
&& (vcn
< svcn
|| evcn1
<= vcn
)) {
950 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
956 svcn
= le64_to_cpu(attr
->nres
.svcn
);
957 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
960 err
= attr_load_runs(attr
, ni
, run
, NULL
);
965 ok
= run_lookup_entry(run
, vcn
, lcn
, len
, NULL
);
966 if (ok
&& (*lcn
!= SPARSE_LCN
|| !new)) {
978 if (ok
&& clen
> *len
) {
980 to_alloc
= (clen
+ clst_per_frame
- 1) &
981 ~(clst_per_frame
- 1);
985 if (!is_attr_ext(attr_b
)) {
990 /* Get the last LCN to allocate from. */
994 if (!run_add_entry(run
, evcn1
, SPARSE_LCN
, vcn
- evcn1
,
999 } else if (vcn
&& !run_lookup_entry(run
, vcn
- 1, &hint
, NULL
, NULL
)) {
1003 err
= attr_allocate_clusters(
1004 sbi
, run
, vcn
, hint
+ 1, to_alloc
, NULL
, 0, len
,
1005 (sbi
->record_size
- le32_to_cpu(mi
->mrec
->used
) + 8) / 3 + 1,
1013 total_size
= le64_to_cpu(attr_b
->nres
.total_size
) +
1014 ((u64
)*len
<< cluster_bits
);
1017 err
= mi_pack_runs(mi
, attr
, run
, max(end
, evcn1
) - svcn
);
1021 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
1022 inode_set_bytes(&ni
->vfs_inode
, total_size
);
1023 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
1026 mark_inode_dirty(&ni
->vfs_inode
);
1028 /* Stored [vcn : next_svcn) from [vcn : end). */
1029 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1032 if (next_svcn
== evcn1
) {
1033 /* Normal way. Update attribute and exit. */
1036 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1037 if (!ni
->attr_list
.size
) {
1038 err
= ni_create_attr_list(ni
);
1041 /* Layout of records is changed. */
1043 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
,
1059 /* Estimate next attribute. */
1060 attr
= ni_find_attr(ni
, attr
, &le
, ATTR_DATA
, NULL
, 0, &svcn
, &mi
);
1063 CLST alloc
= bytes_to_cluster(
1064 sbi
, le64_to_cpu(attr_b
->nres
.alloc_size
));
1065 CLST evcn
= le64_to_cpu(attr
->nres
.evcn
);
1067 if (end
< next_svcn
)
1069 while (end
> evcn
) {
1070 /* Remove segment [svcn : evcn). */
1071 mi_remove_attr(NULL
, mi
, attr
);
1073 if (!al_remove_le(ni
, le
)) {
1078 if (evcn
+ 1 >= alloc
) {
1079 /* Last attribute segment. */
1084 if (ni_load_mi(ni
, le
, &mi
)) {
1089 attr
= mi_find_attr(mi
, NULL
, ATTR_DATA
, NULL
, 0,
1095 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1096 evcn
= le64_to_cpu(attr
->nres
.evcn
);
1102 err
= attr_load_runs(attr
, ni
, run
, &end
);
1107 attr
->nres
.svcn
= cpu_to_le64(next_svcn
);
1108 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- next_svcn
);
1112 le
->vcn
= cpu_to_le64(next_svcn
);
1113 ni
->attr_list
.dirty
= true;
1116 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1119 if (evcn1
> next_svcn
) {
1120 err
= ni_insert_nonresident(ni
, ATTR_DATA
, NULL
, 0, run
,
1121 next_svcn
, evcn1
- next_svcn
,
1122 attr_b
->flags
, &attr
, &mi
, NULL
);
1127 run_truncate_around(run
, vcn
);
1129 up_write(&ni
->file
.run_lock
);
1135 int attr_data_read_resident(struct ntfs_inode
*ni
, struct page
*page
)
1138 struct ATTRIB
*attr
;
1141 attr
= ni_find_attr(ni
, NULL
, NULL
, ATTR_DATA
, NULL
, 0, NULL
, NULL
);
1146 return E_NTFS_NONRESIDENT
;
1148 vbo
= page
->index
<< PAGE_SHIFT
;
1149 data_size
= le32_to_cpu(attr
->res
.data_size
);
1150 if (vbo
< data_size
) {
1151 const char *data
= resident_data(attr
);
1152 char *kaddr
= kmap_atomic(page
);
1153 u32 use
= data_size
- vbo
;
1155 if (use
> PAGE_SIZE
)
1158 memcpy(kaddr
, data
+ vbo
, use
);
1159 memset(kaddr
+ use
, 0, PAGE_SIZE
- use
);
1160 kunmap_atomic(kaddr
);
1161 flush_dcache_page(page
);
1162 SetPageUptodate(page
);
1163 } else if (!PageUptodate(page
)) {
1164 zero_user_segment(page
, 0, PAGE_SIZE
);
1165 SetPageUptodate(page
);
1171 int attr_data_write_resident(struct ntfs_inode
*ni
, struct page
*page
)
1174 struct mft_inode
*mi
;
1175 struct ATTRIB
*attr
;
1178 attr
= ni_find_attr(ni
, NULL
, NULL
, ATTR_DATA
, NULL
, 0, NULL
, &mi
);
1182 if (attr
->non_res
) {
1183 /* Return special error code to check this case. */
1184 return E_NTFS_NONRESIDENT
;
1187 vbo
= page
->index
<< PAGE_SHIFT
;
1188 data_size
= le32_to_cpu(attr
->res
.data_size
);
1189 if (vbo
< data_size
) {
1190 char *data
= resident_data(attr
);
1191 char *kaddr
= kmap_atomic(page
);
1192 u32 use
= data_size
- vbo
;
1194 if (use
> PAGE_SIZE
)
1196 memcpy(data
+ vbo
, kaddr
, use
);
1197 kunmap_atomic(kaddr
);
1200 ni
->i_valid
= data_size
;
1206 * attr_load_runs_vcn - Load runs with VCN.
1208 int attr_load_runs_vcn(struct ntfs_inode
*ni
, enum ATTR_TYPE type
,
1209 const __le16
*name
, u8 name_len
, struct runs_tree
*run
,
1212 struct ATTRIB
*attr
;
1217 attr
= ni_find_attr(ni
, NULL
, NULL
, type
, name
, name_len
, &vcn
, NULL
);
1219 /* Is record corrupted? */
1223 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1224 evcn
= le64_to_cpu(attr
->nres
.evcn
);
1226 if (evcn
< vcn
|| vcn
< svcn
) {
1227 /* Is record corrupted? */
1231 ro
= le16_to_cpu(attr
->nres
.run_off
);
1232 err
= run_unpack_ex(run
, ni
->mi
.sbi
, ni
->mi
.rno
, svcn
, evcn
, svcn
,
1233 Add2Ptr(attr
, ro
), le32_to_cpu(attr
->size
) - ro
);
1240 * attr_load_runs_range - Load runs for given range [from to).
1242 int attr_load_runs_range(struct ntfs_inode
*ni
, enum ATTR_TYPE type
,
1243 const __le16
*name
, u8 name_len
, struct runs_tree
*run
,
1246 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1247 u8 cluster_bits
= sbi
->cluster_bits
;
1249 CLST vcn_last
= (to
- 1) >> cluster_bits
;
1253 for (vcn
= from
>> cluster_bits
; vcn
<= vcn_last
; vcn
+= clen
) {
1254 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, NULL
)) {
1255 err
= attr_load_runs_vcn(ni
, type
, name
, name_len
, run
,
1259 clen
= 0; /* Next run_lookup_entry(vcn) must be success. */
1266 #ifdef CONFIG_NTFS3_LZX_XPRESS
1268 * attr_wof_frame_info
1270 * Read header of Xpress/LZX file to get info about frame.
1272 int attr_wof_frame_info(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
1273 struct runs_tree
*run
, u64 frame
, u64 frames
,
1274 u8 frame_bits
, u32
*ondisk_size
, u64
*vbo_data
)
1276 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1277 u64 vbo
[2], off
[2], wof_size
;
1286 if (ni
->vfs_inode
.i_size
< 0x100000000ull
) {
1287 /* File starts with array of 32 bit offsets. */
1288 bytes_per_off
= sizeof(__le32
);
1289 vbo
[1] = frame
<< 2;
1290 *vbo_data
= frames
<< 2;
1292 /* File starts with array of 64 bit offsets. */
1293 bytes_per_off
= sizeof(__le64
);
1294 vbo
[1] = frame
<< 3;
1295 *vbo_data
= frames
<< 3;
1299 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1300 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1302 if (!attr
->non_res
) {
1303 if (vbo
[1] + bytes_per_off
> le32_to_cpu(attr
->res
.data_size
)) {
1304 ntfs_inode_err(&ni
->vfs_inode
, "is corrupted");
1307 addr
= resident_data(attr
);
1309 if (bytes_per_off
== sizeof(__le32
)) {
1310 off32
= Add2Ptr(addr
, vbo
[1]);
1311 off
[0] = vbo
[1] ? le32_to_cpu(off32
[-1]) : 0;
1312 off
[1] = le32_to_cpu(off32
[0]);
1314 off64
= Add2Ptr(addr
, vbo
[1]);
1315 off
[0] = vbo
[1] ? le64_to_cpu(off64
[-1]) : 0;
1316 off
[1] = le64_to_cpu(off64
[0]);
1319 *vbo_data
+= off
[0];
1320 *ondisk_size
= off
[1] - off
[0];
1324 wof_size
= le64_to_cpu(attr
->nres
.data_size
);
1325 down_write(&ni
->file
.run_lock
);
1326 page
= ni
->file
.offs_page
;
1328 page
= alloc_page(GFP_KERNEL
);
1334 ni
->file
.offs_page
= page
;
1337 addr
= page_address(page
);
1340 voff
= vbo
[1] & (PAGE_SIZE
- 1);
1341 vbo
[0] = vbo
[1] - bytes_per_off
;
1351 pgoff_t index
= vbo
[i
] >> PAGE_SHIFT
;
1353 if (index
!= page
->index
) {
1354 u64 from
= vbo
[i
] & ~(u64
)(PAGE_SIZE
- 1);
1355 u64 to
= min(from
+ PAGE_SIZE
, wof_size
);
1357 err
= attr_load_runs_range(ni
, ATTR_DATA
, WOF_NAME
,
1358 ARRAY_SIZE(WOF_NAME
), run
,
1363 err
= ntfs_bio_pages(sbi
, run
, &page
, 1, from
,
1364 to
- from
, REQ_OP_READ
);
1369 page
->index
= index
;
1373 if (bytes_per_off
== sizeof(__le32
)) {
1374 off32
= Add2Ptr(addr
, voff
);
1375 off
[1] = le32_to_cpu(*off32
);
1377 off64
= Add2Ptr(addr
, voff
);
1378 off
[1] = le64_to_cpu(*off64
);
1381 if (bytes_per_off
== sizeof(__le32
)) {
1382 off32
= Add2Ptr(addr
, PAGE_SIZE
- sizeof(u32
));
1383 off
[0] = le32_to_cpu(*off32
);
1385 off64
= Add2Ptr(addr
, PAGE_SIZE
- sizeof(u64
));
1386 off
[0] = le64_to_cpu(*off64
);
1389 /* Two values in one page. */
1390 if (bytes_per_off
== sizeof(__le32
)) {
1391 off32
= Add2Ptr(addr
, voff
);
1392 off
[0] = le32_to_cpu(off32
[-1]);
1393 off
[1] = le32_to_cpu(off32
[0]);
1395 off64
= Add2Ptr(addr
, voff
);
1396 off
[0] = le64_to_cpu(off64
[-1]);
1397 off
[1] = le64_to_cpu(off64
[0]);
1403 *vbo_data
+= off
[0];
1404 *ondisk_size
= off
[1] - off
[0];
1409 up_write(&ni
->file
.run_lock
);
1415 * attr_is_frame_compressed - Used to detect compressed frame.
1417 int attr_is_frame_compressed(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
1418 CLST frame
, CLST
*clst_data
)
1422 CLST clen
, lcn
, vcn
, alen
, slen
, vcn_next
;
1424 struct runs_tree
*run
;
1428 if (!is_attr_compressed(attr
))
1434 clst_frame
= 1u << attr
->nres
.c_unit
;
1435 vcn
= frame
* clst_frame
;
1436 run
= &ni
->file
.run
;
1438 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
)) {
1439 err
= attr_load_runs_vcn(ni
, attr
->type
, attr_name(attr
),
1440 attr
->name_len
, run
, vcn
);
1444 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
))
1448 if (lcn
== SPARSE_LCN
) {
1449 /* Sparsed frame. */
1453 if (clen
>= clst_frame
) {
1455 * The frame is not compressed 'cause
1456 * it does not contain any sparse clusters.
1458 *clst_data
= clst_frame
;
1462 alen
= bytes_to_cluster(ni
->mi
.sbi
, le64_to_cpu(attr
->nres
.alloc_size
));
1467 * The frame is compressed if *clst_data + slen >= clst_frame.
1468 * Check next fragments.
1470 while ((vcn
+= clen
) < alen
) {
1473 if (!run_get_entry(run
, ++idx
, &vcn
, &lcn
, &clen
) ||
1475 err
= attr_load_runs_vcn(ni
, attr
->type
,
1477 attr
->name_len
, run
, vcn_next
);
1482 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
))
1486 if (lcn
== SPARSE_LCN
) {
1491 * Data_clusters + sparse_clusters =
1492 * not enough for frame.
1499 if (*clst_data
+ slen
>= clst_frame
) {
1502 * There is no sparsed clusters in this frame
1503 * so it is not compressed.
1505 *clst_data
= clst_frame
;
1507 /* Frame is compressed. */
1517 * attr_allocate_frame - Allocate/free clusters for @frame.
1519 * Assumed: down_write(&ni->file.run_lock);
1521 int attr_allocate_frame(struct ntfs_inode
*ni
, CLST frame
, size_t compr_size
,
1525 struct runs_tree
*run
= &ni
->file
.run
;
1526 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1527 struct ATTRIB
*attr
= NULL
, *attr_b
;
1528 struct ATTR_LIST_ENTRY
*le
, *le_b
;
1529 struct mft_inode
*mi
, *mi_b
;
1530 CLST svcn
, evcn1
, next_svcn
, lcn
, len
;
1531 CLST vcn
, end
, clst_data
;
1532 u64 total_size
, valid_size
, data_size
;
1535 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
1539 if (!is_attr_ext(attr_b
))
1542 vcn
= frame
<< NTFS_LZNT_CUNIT
;
1543 total_size
= le64_to_cpu(attr_b
->nres
.total_size
);
1545 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
1546 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
1547 data_size
= le64_to_cpu(attr_b
->nres
.data_size
);
1549 if (svcn
<= vcn
&& vcn
< evcn1
) {
1558 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
1564 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1565 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1568 err
= attr_load_runs(attr
, ni
, run
, NULL
);
1572 err
= attr_is_frame_compressed(ni
, attr_b
, frame
, &clst_data
);
1576 total_size
-= (u64
)clst_data
<< sbi
->cluster_bits
;
1578 len
= bytes_to_cluster(sbi
, compr_size
);
1580 if (len
== clst_data
)
1583 if (len
< clst_data
) {
1584 err
= run_deallocate_ex(sbi
, run
, vcn
+ len
, clst_data
- len
,
1589 if (!run_add_entry(run
, vcn
+ len
, SPARSE_LCN
, clst_data
- len
,
1594 end
= vcn
+ clst_data
;
1595 /* Run contains updated range [vcn + len : end). */
1597 CLST alen
, hint
= 0;
1598 /* Get the last LCN to allocate from. */
1599 if (vcn
+ clst_data
&&
1600 !run_lookup_entry(run
, vcn
+ clst_data
- 1, &hint
, NULL
,
1605 err
= attr_allocate_clusters(sbi
, run
, vcn
+ clst_data
,
1606 hint
+ 1, len
- clst_data
, NULL
, 0,
1612 /* Run contains updated range [vcn + clst_data : end). */
1615 total_size
+= (u64
)len
<< sbi
->cluster_bits
;
1618 err
= mi_pack_runs(mi
, attr
, run
, max(end
, evcn1
) - svcn
);
1622 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
1623 inode_set_bytes(&ni
->vfs_inode
, total_size
);
1626 mark_inode_dirty(&ni
->vfs_inode
);
1628 /* Stored [vcn : next_svcn) from [vcn : end). */
1629 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1632 if (next_svcn
== evcn1
) {
1633 /* Normal way. Update attribute and exit. */
1636 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1637 if (!ni
->attr_list
.size
) {
1638 err
= ni_create_attr_list(ni
);
1641 /* Layout of records is changed. */
1643 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
,
1659 /* Estimate next attribute. */
1660 attr
= ni_find_attr(ni
, attr
, &le
, ATTR_DATA
, NULL
, 0, &svcn
, &mi
);
1663 CLST alloc
= bytes_to_cluster(
1664 sbi
, le64_to_cpu(attr_b
->nres
.alloc_size
));
1665 CLST evcn
= le64_to_cpu(attr
->nres
.evcn
);
1667 if (end
< next_svcn
)
1669 while (end
> evcn
) {
1670 /* Remove segment [svcn : evcn). */
1671 mi_remove_attr(NULL
, mi
, attr
);
1673 if (!al_remove_le(ni
, le
)) {
1678 if (evcn
+ 1 >= alloc
) {
1679 /* Last attribute segment. */
1684 if (ni_load_mi(ni
, le
, &mi
)) {
1689 attr
= mi_find_attr(mi
, NULL
, ATTR_DATA
, NULL
, 0,
1695 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1696 evcn
= le64_to_cpu(attr
->nres
.evcn
);
1702 err
= attr_load_runs(attr
, ni
, run
, &end
);
1707 attr
->nres
.svcn
= cpu_to_le64(next_svcn
);
1708 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- next_svcn
);
1712 le
->vcn
= cpu_to_le64(next_svcn
);
1713 ni
->attr_list
.dirty
= true;
1716 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1719 if (evcn1
> next_svcn
) {
1720 err
= ni_insert_nonresident(ni
, ATTR_DATA
, NULL
, 0, run
,
1721 next_svcn
, evcn1
- next_svcn
,
1722 attr_b
->flags
, &attr
, &mi
, NULL
);
1727 run_truncate_around(run
, vcn
);
1729 if (new_valid
> data_size
)
1730 new_valid
= data_size
;
1732 valid_size
= le64_to_cpu(attr_b
->nres
.valid_size
);
1733 if (new_valid
!= valid_size
) {
1734 attr_b
->nres
.valid_size
= cpu_to_le64(valid_size
);
1742 * attr_collapse_range - Collapse range in file.
1744 int attr_collapse_range(struct ntfs_inode
*ni
, u64 vbo
, u64 bytes
)
1747 struct runs_tree
*run
= &ni
->file
.run
;
1748 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1749 struct ATTRIB
*attr
= NULL
, *attr_b
;
1750 struct ATTR_LIST_ENTRY
*le
, *le_b
;
1751 struct mft_inode
*mi
, *mi_b
;
1752 CLST svcn
, evcn1
, len
, dealloc
, alen
;
1754 u64 valid_size
, data_size
, alloc_size
, total_size
;
1762 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
1766 if (!attr_b
->non_res
) {
1767 /* Attribute is resident. Nothing to do? */
1771 data_size
= le64_to_cpu(attr_b
->nres
.data_size
);
1772 alloc_size
= le64_to_cpu(attr_b
->nres
.alloc_size
);
1773 a_flags
= attr_b
->flags
;
1775 if (is_attr_ext(attr_b
)) {
1776 total_size
= le64_to_cpu(attr_b
->nres
.total_size
);
1777 mask
= (sbi
->cluster_size
<< attr_b
->nres
.c_unit
) - 1;
1779 total_size
= alloc_size
;
1780 mask
= sbi
->cluster_mask
;
1783 if ((vbo
& mask
) || (bytes
& mask
)) {
1784 /* Allow to collapse only cluster aligned ranges. */
1788 if (vbo
> data_size
)
1791 down_write(&ni
->file
.run_lock
);
1793 if (vbo
+ bytes
>= data_size
) {
1794 u64 new_valid
= min(ni
->i_valid
, vbo
);
1796 /* Simple truncate file at 'vbo'. */
1797 truncate_setsize(&ni
->vfs_inode
, vbo
);
1798 err
= attr_set_size(ni
, ATTR_DATA
, NULL
, 0, &ni
->file
.run
, vbo
,
1799 &new_valid
, true, NULL
);
1801 if (!err
&& new_valid
< ni
->i_valid
)
1802 ni
->i_valid
= new_valid
;
1808 * Enumerate all attribute segments and collapse.
1810 alen
= alloc_size
>> sbi
->cluster_bits
;
1811 vcn
= vbo
>> sbi
->cluster_bits
;
1812 len
= bytes
>> sbi
->cluster_bits
;
1816 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
1817 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
1819 if (svcn
<= vcn
&& vcn
< evcn1
) {
1828 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
1835 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1836 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1842 attr
->nres
.svcn
= cpu_to_le64(svcn
- len
);
1843 attr
->nres
.evcn
= cpu_to_le64(evcn1
- 1 - len
);
1845 le
->vcn
= attr
->nres
.svcn
;
1846 ni
->attr_list
.dirty
= true;
1849 } else if (svcn
< vcn
|| end
< evcn1
) {
1850 CLST vcn1
, eat
, next_svcn
;
1852 /* Collapse a part of this attribute segment. */
1853 err
= attr_load_runs(attr
, ni
, run
, &svcn
);
1856 vcn1
= max(vcn
, svcn
);
1857 eat
= min(end
, evcn1
) - vcn1
;
1859 err
= run_deallocate_ex(sbi
, run
, vcn1
, eat
, &dealloc
,
1864 if (!run_collapse_range(run
, vcn1
, eat
)) {
1871 attr
->nres
.svcn
= cpu_to_le64(vcn
);
1873 le
->vcn
= attr
->nres
.svcn
;
1874 ni
->attr_list
.dirty
= true;
1878 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- svcn
- eat
);
1882 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1883 if (next_svcn
+ eat
< evcn1
) {
1884 err
= ni_insert_nonresident(
1885 ni
, ATTR_DATA
, NULL
, 0, run
, next_svcn
,
1886 evcn1
- eat
- next_svcn
, a_flags
, &attr
,
1891 /* Layout of records maybe changed. */
1895 /* Free all allocated memory. */
1896 run_truncate(run
, 0);
1899 u16 roff
= le16_to_cpu(attr
->nres
.run_off
);
1901 run_unpack_ex(RUN_DEALLOCATE
, sbi
, ni
->mi
.rno
, svcn
,
1902 evcn1
- 1, svcn
, Add2Ptr(attr
, roff
),
1903 le32_to_cpu(attr
->size
) - roff
);
1905 /* Delete this attribute segment. */
1906 mi_remove_attr(NULL
, mi
, attr
);
1910 le_sz
= le16_to_cpu(le
->size
);
1911 if (!al_remove_le(ni
, le
)) {
1920 /* Load next record that contains this attribute. */
1921 if (ni_load_mi(ni
, le
, &mi
)) {
1926 /* Look for required attribute. */
1927 attr
= mi_find_attr(mi
, NULL
, ATTR_DATA
, NULL
,
1935 le
= (struct ATTR_LIST_ENTRY
*)((u8
*)le
- le_sz
);
1941 attr
= ni_enum_attr_ex(ni
, attr
, &le
, &mi
);
1948 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1949 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1954 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
,
1963 valid_size
= ni
->i_valid
;
1964 if (vbo
+ bytes
<= valid_size
)
1965 valid_size
-= bytes
;
1966 else if (vbo
< valid_size
)
1969 attr_b
->nres
.alloc_size
= cpu_to_le64(alloc_size
- bytes
);
1970 attr_b
->nres
.data_size
= cpu_to_le64(data_size
);
1971 attr_b
->nres
.valid_size
= cpu_to_le64(min(valid_size
, data_size
));
1972 total_size
-= (u64
)dealloc
<< sbi
->cluster_bits
;
1973 if (is_attr_ext(attr_b
))
1974 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
1977 /* Update inode size. */
1978 ni
->i_valid
= valid_size
;
1979 ni
->vfs_inode
.i_size
= data_size
;
1980 inode_set_bytes(&ni
->vfs_inode
, total_size
);
1981 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
1982 mark_inode_dirty(&ni
->vfs_inode
);
1985 up_write(&ni
->file
.run_lock
);
1987 _ntfs_bad_inode(&ni
->vfs_inode
);
1995 * Not for normal files.
1997 int attr_punch_hole(struct ntfs_inode
*ni
, u64 vbo
, u64 bytes
, u32
*frame_size
)
2000 struct runs_tree
*run
= &ni
->file
.run
;
2001 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
2002 struct ATTRIB
*attr
= NULL
, *attr_b
;
2003 struct ATTR_LIST_ENTRY
*le
, *le_b
;
2004 struct mft_inode
*mi
, *mi_b
;
2005 CLST svcn
, evcn1
, vcn
, len
, end
, alen
, dealloc
, next_svcn
;
2006 u64 total_size
, alloc_size
;
2014 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
2018 if (!attr_b
->non_res
) {
2019 u32 data_size
= le32_to_cpu(attr
->res
.data_size
);
2022 if (vbo
> data_size
)
2026 to
= min_t(u64
, vbo
+ bytes
, data_size
);
2027 memset(Add2Ptr(resident_data(attr_b
), from
), 0, to
- from
);
2031 if (!is_attr_ext(attr_b
))
2034 alloc_size
= le64_to_cpu(attr_b
->nres
.alloc_size
);
2035 total_size
= le64_to_cpu(attr_b
->nres
.total_size
);
2037 if (vbo
>= alloc_size
) {
2038 /* NOTE: It is allowed. */
2042 mask
= (sbi
->cluster_size
<< attr_b
->nres
.c_unit
) - 1;
2045 if (bytes
> alloc_size
)
2049 if ((vbo
& mask
) || (bytes
& mask
)) {
2050 /* We have to zero a range(s). */
2051 if (frame_size
== NULL
) {
2052 /* Caller insists range is aligned. */
2055 *frame_size
= mask
+ 1;
2056 return E_NTFS_NOTALIGNED
;
2059 down_write(&ni
->file
.run_lock
);
2061 * Enumerate all attribute segments and punch hole where necessary.
2063 alen
= alloc_size
>> sbi
->cluster_bits
;
2064 vcn
= vbo
>> sbi
->cluster_bits
;
2065 len
= bytes
>> sbi
->cluster_bits
;
2069 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
2070 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
2071 a_flags
= attr_b
->flags
;
2073 if (svcn
<= vcn
&& vcn
< evcn1
) {
2082 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
2089 svcn
= le64_to_cpu(attr
->nres
.svcn
);
2090 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2093 while (svcn
< end
) {
2094 CLST vcn1
, zero
, dealloc2
;
2096 err
= attr_load_runs(attr
, ni
, run
, &svcn
);
2099 vcn1
= max(vcn
, svcn
);
2100 zero
= min(end
, evcn1
) - vcn1
;
2103 err
= run_deallocate_ex(sbi
, run
, vcn1
, zero
, &dealloc
, true);
2107 if (dealloc2
== dealloc
) {
2108 /* Looks like the required range is already sparsed. */
2110 if (!run_add_entry(run
, vcn1
, SPARSE_LCN
, zero
,
2116 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- svcn
);
2119 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2120 if (next_svcn
< evcn1
) {
2121 err
= ni_insert_nonresident(ni
, ATTR_DATA
, NULL
,
2124 a_flags
, &attr
, &mi
,
2128 /* Layout of records maybe changed. */
2132 /* Free all allocated memory. */
2133 run_truncate(run
, 0);
2138 attr
= ni_enum_attr_ex(ni
, attr
, &le
, &mi
);
2144 svcn
= le64_to_cpu(attr
->nres
.svcn
);
2145 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2148 total_size
-= (u64
)dealloc
<< sbi
->cluster_bits
;
2150 attr_b
= ni_find_attr(ni
, NULL
, NULL
, ATTR_DATA
, NULL
, 0, NULL
,
2157 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
2160 /* Update inode size. */
2161 inode_set_bytes(&ni
->vfs_inode
, total_size
);
2162 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
2163 mark_inode_dirty(&ni
->vfs_inode
);
2166 up_write(&ni
->file
.run_lock
);
2168 _ntfs_bad_inode(&ni
->vfs_inode
);
2174 * attr_insert_range - Insert range (hole) in file.
2175 * Not for normal files.
2177 int attr_insert_range(struct ntfs_inode
*ni
, u64 vbo
, u64 bytes
)
2180 struct runs_tree
*run
= &ni
->file
.run
;
2181 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
2182 struct ATTRIB
*attr
= NULL
, *attr_b
;
2183 struct ATTR_LIST_ENTRY
*le
, *le_b
;
2184 struct mft_inode
*mi
, *mi_b
;
2185 CLST vcn
, svcn
, evcn1
, len
, next_svcn
;
2186 u64 data_size
, alloc_size
;
2194 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
2198 if (!is_attr_ext(attr_b
)) {
2199 /* It was checked above. See fallocate. */
2203 if (!attr_b
->non_res
) {
2204 data_size
= le32_to_cpu(attr_b
->res
.data_size
);
2205 alloc_size
= data_size
;
2206 mask
= sbi
->cluster_mask
; /* cluster_size - 1 */
2208 data_size
= le64_to_cpu(attr_b
->nres
.data_size
);
2209 alloc_size
= le64_to_cpu(attr_b
->nres
.alloc_size
);
2210 mask
= (sbi
->cluster_size
<< attr_b
->nres
.c_unit
) - 1;
2213 if (vbo
> data_size
) {
2214 /* Insert range after the file size is not allowed. */
2218 if ((vbo
& mask
) || (bytes
& mask
)) {
2219 /* Allow to insert only frame aligned ranges. */
2224 * valid_size <= data_size <= alloc_size
2225 * Check alloc_size for maximum possible.
2227 if (bytes
> sbi
->maxbytes_sparse
- alloc_size
)
2230 vcn
= vbo
>> sbi
->cluster_bits
;
2231 len
= bytes
>> sbi
->cluster_bits
;
2233 down_write(&ni
->file
.run_lock
);
2235 if (!attr_b
->non_res
) {
2236 err
= attr_set_size(ni
, ATTR_DATA
, NULL
, 0, run
,
2237 data_size
+ bytes
, NULL
, false, &attr
);
2240 if (!attr
->non_res
) {
2241 /* Still resident. */
2242 char *data
= Add2Ptr(attr
, attr
->res
.data_off
);
2244 memmove(data
+ bytes
, data
, bytes
);
2245 memset(data
, 0, bytes
);
2249 /* Resident files becomes nonresident. */
2251 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
,
2257 if (!attr_b
->non_res
) {
2261 data_size
= le64_to_cpu(attr_b
->nres
.data_size
);
2262 alloc_size
= le64_to_cpu(attr_b
->nres
.alloc_size
);
2266 * Enumerate all attribute segments and shift start vcn.
2268 a_flags
= attr_b
->flags
;
2269 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
2270 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
2272 if (svcn
<= vcn
&& vcn
< evcn1
) {
2281 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
2288 svcn
= le64_to_cpu(attr
->nres
.svcn
);
2289 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2292 run_truncate(run
, 0); /* clear cached values. */
2293 err
= attr_load_runs(attr
, ni
, run
, NULL
);
2297 if (!run_insert_range(run
, vcn
, len
)) {
2302 /* Try to pack in current record as much as possible. */
2303 err
= mi_pack_runs(mi
, attr
, run
, evcn1
+ len
- svcn
);
2307 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2308 run_truncate_head(run
, next_svcn
);
2310 while ((attr
= ni_enum_attr_ex(ni
, attr
, &le
, &mi
)) &&
2311 attr
->type
== ATTR_DATA
&& !attr
->name_len
) {
2312 le64_add_cpu(&attr
->nres
.svcn
, len
);
2313 le64_add_cpu(&attr
->nres
.evcn
, len
);
2315 le
->vcn
= attr
->nres
.svcn
;
2316 ni
->attr_list
.dirty
= true;
2322 * Update primary attribute segment in advance.
2323 * pointer attr_b may become invalid (layout of mft is changed)
2325 if (vbo
<= ni
->i_valid
)
2326 ni
->i_valid
+= bytes
;
2328 attr_b
->nres
.data_size
= le64_to_cpu(data_size
+ bytes
);
2329 attr_b
->nres
.alloc_size
= le64_to_cpu(alloc_size
+ bytes
);
2331 /* ni->valid may be not equal valid_size (temporary). */
2332 if (ni
->i_valid
> data_size
+ bytes
)
2333 attr_b
->nres
.valid_size
= attr_b
->nres
.data_size
;
2335 attr_b
->nres
.valid_size
= cpu_to_le64(ni
->i_valid
);
2338 if (next_svcn
< evcn1
+ len
) {
2339 err
= ni_insert_nonresident(ni
, ATTR_DATA
, NULL
, 0, run
,
2340 next_svcn
, evcn1
+ len
- next_svcn
,
2341 a_flags
, NULL
, NULL
, NULL
);
2346 ni
->vfs_inode
.i_size
+= bytes
;
2347 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
2348 mark_inode_dirty(&ni
->vfs_inode
);
2351 run_truncate(run
, 0); /* clear cached values. */
2353 up_write(&ni
->file
.run_lock
);
2355 _ntfs_bad_inode(&ni
->vfs_inode
);