1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19 * preallocate algorithm.
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
34 static inline u64
get_pre_allocated(u64 size
)
40 if (size
<= NTFS_CLUMP_MIN
) {
41 clump
= 1 << NTFS_MIN_LOG2_OF_CLUMP
;
42 align_shift
= NTFS_MIN_LOG2_OF_CLUMP
;
43 } else if (size
>= NTFS_CLUMP_MAX
) {
44 clump
= 1 << NTFS_MAX_LOG2_OF_CLUMP
;
45 align_shift
= NTFS_MAX_LOG2_OF_CLUMP
;
47 align_shift
= NTFS_MIN_LOG2_OF_CLUMP
- 1 +
48 __ffs(size
>> (8 + NTFS_MIN_LOG2_OF_CLUMP
));
49 clump
= 1u << align_shift
;
52 ret
= (((size
+ clump
- 1) >> align_shift
)) << align_shift
;
58 * attr_must_be_resident
60 * Return: True if attribute must be resident.
62 static inline bool attr_must_be_resident(struct ntfs_sb_info
*sbi
,
65 const struct ATTR_DEF_ENTRY
*de
;
77 de
= ntfs_query_def(sbi
, type
);
78 if (de
&& (de
->flags
& NTFS_ATTR_MUST_BE_RESIDENT
))
85 * attr_load_runs - Load all runs stored in @attr.
87 int attr_load_runs(struct ATTRIB
*attr
, struct ntfs_inode
*ni
,
88 struct runs_tree
*run
, const CLST
*vcn
)
91 CLST svcn
= le64_to_cpu(attr
->nres
.svcn
);
92 CLST evcn
= le64_to_cpu(attr
->nres
.evcn
);
96 if (svcn
>= evcn
+ 1 || run_is_mapped_full(run
, svcn
, evcn
))
99 if (vcn
&& (evcn
< *vcn
|| *vcn
< svcn
))
102 asize
= le32_to_cpu(attr
->size
);
103 run_off
= le16_to_cpu(attr
->nres
.run_off
);
104 err
= run_unpack_ex(run
, ni
->mi
.sbi
, ni
->mi
.rno
, svcn
, evcn
,
105 vcn
? *vcn
: svcn
, Add2Ptr(attr
, run_off
),
114 * run_deallocate_ex - Deallocate clusters.
116 static int run_deallocate_ex(struct ntfs_sb_info
*sbi
, struct runs_tree
*run
,
117 CLST vcn
, CLST len
, CLST
*done
, bool trim
)
120 CLST vcn_next
, vcn0
= vcn
, lcn
, clen
, dn
= 0;
126 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
)) {
128 run_truncate(run
, vcn0
);
142 if (lcn
!= SPARSE_LCN
) {
143 mark_as_free_ex(sbi
, lcn
, clen
, trim
);
151 vcn_next
= vcn
+ clen
;
152 if (!run_get_entry(run
, ++idx
, &vcn
, &lcn
, &clen
) ||
154 /* Save memory - don't load entire run. */
167 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
169 int attr_allocate_clusters(struct ntfs_sb_info
*sbi
, struct runs_tree
*run
,
170 CLST vcn
, CLST lcn
, CLST len
, CLST
*pre_alloc
,
171 enum ALLOCATE_OPT opt
, CLST
*alen
, const size_t fr
,
175 CLST flen
, vcn0
= vcn
, pre
= pre_alloc
? *pre_alloc
: 0;
176 struct wnd_bitmap
*wnd
= &sbi
->used
.bitmap
;
177 size_t cnt
= run
->count
;
180 err
= ntfs_look_for_free_space(sbi
, lcn
, len
+ pre
, &lcn
, &flen
,
183 if (err
== -ENOSPC
&& pre
) {
193 if (new_lcn
&& vcn
== vcn0
)
196 /* Add new fragment into run storage. */
197 if (!run_add_entry(run
, vcn
, lcn
, flen
, opt
== ALLOCATE_MFT
)) {
198 /* Undo last 'ntfs_look_for_free_space' */
199 down_write_nested(&wnd
->rw_lock
, BITMAP_MUTEX_CLUSTERS
);
200 wnd_set_free(wnd
, lcn
, flen
);
201 up_write(&wnd
->rw_lock
);
208 if (flen
>= len
|| opt
== ALLOCATE_MFT
||
209 (fr
&& run
->count
- cnt
>= fr
)) {
218 /* Undo 'ntfs_look_for_free_space' */
220 run_deallocate_ex(sbi
, run
, vcn0
, vcn
- vcn0
, NULL
, false);
221 run_truncate(run
, vcn0
);
228 * attr_make_nonresident
230 * If page is not NULL - it is already contains resident data
231 * and locked (called from ni_write_frame()).
233 int attr_make_nonresident(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
234 struct ATTR_LIST_ENTRY
*le
, struct mft_inode
*mi
,
235 u64 new_size
, struct runs_tree
*run
,
236 struct ATTRIB
**ins_attr
, struct page
*page
)
238 struct ntfs_sb_info
*sbi
;
239 struct ATTRIB
*attr_s
;
241 u32 used
, asize
, rsize
, aoff
, align
;
255 used
= le32_to_cpu(rec
->used
);
256 asize
= le32_to_cpu(attr
->size
);
257 next
= Add2Ptr(attr
, asize
);
258 aoff
= PtrOffset(rec
, attr
);
259 rsize
= le32_to_cpu(attr
->res
.data_size
);
260 is_data
= attr
->type
== ATTR_DATA
&& !attr
->name_len
;
262 align
= sbi
->cluster_size
;
263 if (is_attr_compressed(attr
))
264 align
<<= COMPRESSION_UNIT
;
265 len
= (rsize
+ align
- 1) >> sbi
->cluster_bits
;
269 /* Make a copy of original attribute. */
270 attr_s
= kmemdup(attr
, asize
, GFP_NOFS
);
277 /* Empty resident -> Empty nonresident. */
280 const char *data
= resident_data(attr
);
282 err
= attr_allocate_clusters(sbi
, run
, 0, 0, len
, NULL
,
283 ALLOCATE_DEF
, &alen
, 0, NULL
);
288 /* Empty resident -> Non empty nonresident. */
289 } else if (!is_data
) {
290 err
= ntfs_sb_write_run(sbi
, run
, 0, data
, rsize
, 0);
296 page
= grab_cache_page(ni
->vfs_inode
.i_mapping
, 0);
301 kaddr
= kmap_atomic(page
);
302 memcpy(kaddr
, data
, rsize
);
303 memset(kaddr
+ rsize
, 0, PAGE_SIZE
- rsize
);
304 kunmap_atomic(kaddr
);
305 flush_dcache_page(page
);
306 SetPageUptodate(page
);
307 set_page_dirty(page
);
313 /* Remove original attribute. */
315 memmove(attr
, Add2Ptr(attr
, asize
), used
- aoff
);
316 rec
->used
= cpu_to_le32(used
);
319 al_remove_le(ni
, le
);
321 err
= ni_insert_nonresident(ni
, attr_s
->type
, attr_name(attr_s
),
322 attr_s
->name_len
, run
, 0, alen
,
323 attr_s
->flags
, &attr
, NULL
);
328 attr
->nres
.data_size
= cpu_to_le64(rsize
);
329 attr
->nres
.valid_size
= attr
->nres
.data_size
;
334 ni
->ni_flags
&= ~NI_FLAG_RESIDENT
;
336 /* Resident attribute becomes non resident. */
340 attr
= Add2Ptr(rec
, aoff
);
341 memmove(next
, attr
, used
- aoff
);
342 memcpy(attr
, attr_s
, asize
);
343 rec
->used
= cpu_to_le32(used
+ asize
);
346 /* Undo: do not trim new allocated clusters. */
347 run_deallocate(sbi
, run
, false);
356 * attr_set_size_res - Helper for attr_set_size().
358 static int attr_set_size_res(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
359 struct ATTR_LIST_ENTRY
*le
, struct mft_inode
*mi
,
360 u64 new_size
, struct runs_tree
*run
,
361 struct ATTRIB
**ins_attr
)
363 struct ntfs_sb_info
*sbi
= mi
->sbi
;
364 struct MFT_REC
*rec
= mi
->mrec
;
365 u32 used
= le32_to_cpu(rec
->used
);
366 u32 asize
= le32_to_cpu(attr
->size
);
367 u32 aoff
= PtrOffset(rec
, attr
);
368 u32 rsize
= le32_to_cpu(attr
->res
.data_size
);
369 u32 tail
= used
- aoff
- asize
;
370 char *next
= Add2Ptr(attr
, asize
);
371 s64 dsize
= ALIGN(new_size
, 8) - ALIGN(rsize
, 8);
374 memmove(next
+ dsize
, next
, tail
);
375 } else if (dsize
> 0) {
376 if (used
+ dsize
> sbi
->max_bytes_per_attr
)
377 return attr_make_nonresident(ni
, attr
, le
, mi
, new_size
,
378 run
, ins_attr
, NULL
);
380 memmove(next
+ dsize
, next
, tail
);
381 memset(next
, 0, dsize
);
384 if (new_size
> rsize
)
385 memset(Add2Ptr(resident_data(attr
), rsize
), 0,
388 rec
->used
= cpu_to_le32(used
+ dsize
);
389 attr
->size
= cpu_to_le32(asize
+ dsize
);
390 attr
->res
.data_size
= cpu_to_le32(new_size
);
398 * attr_set_size - Change the size of attribute.
401 * - Sparse/compressed: No allocated clusters.
402 * - Normal: Append allocated and preallocated new clusters.
404 * - No deallocate if @keep_prealloc is set.
406 int attr_set_size(struct ntfs_inode
*ni
, enum ATTR_TYPE type
,
407 const __le16
*name
, u8 name_len
, struct runs_tree
*run
,
408 u64 new_size
, const u64
*new_valid
, bool keep_prealloc
,
412 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
413 u8 cluster_bits
= sbi
->cluster_bits
;
415 ni
->mi
.rno
== MFT_REC_MFT
&& type
== ATTR_DATA
&& !name_len
;
416 u64 old_valid
, old_size
, old_alloc
, new_alloc
, new_alloc_tmp
;
417 struct ATTRIB
*attr
= NULL
, *attr_b
;
418 struct ATTR_LIST_ENTRY
*le
, *le_b
;
419 struct mft_inode
*mi
, *mi_b
;
420 CLST alen
, vcn
, lcn
, new_alen
, old_alen
, svcn
, evcn
;
421 CLST next_svcn
, pre_alloc
= -1, done
= 0;
428 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, type
, name
, name_len
, NULL
,
435 if (!attr_b
->non_res
) {
436 err
= attr_set_size_res(ni
, attr_b
, le_b
, mi_b
, new_size
, run
,
438 if (err
|| !attr_b
->non_res
)
441 /* Layout of records may be changed, so do a full search. */
445 is_ext
= is_attr_ext(attr_b
);
448 align
= sbi
->cluster_size
;
451 align
<<= attr_b
->nres
.c_unit
;
453 old_valid
= le64_to_cpu(attr_b
->nres
.valid_size
);
454 old_size
= le64_to_cpu(attr_b
->nres
.data_size
);
455 old_alloc
= le64_to_cpu(attr_b
->nres
.alloc_size
);
456 old_alen
= old_alloc
>> cluster_bits
;
458 new_alloc
= (new_size
+ align
- 1) & ~(u64
)(align
- 1);
459 new_alen
= new_alloc
>> cluster_bits
;
461 if (keep_prealloc
&& new_size
< old_size
) {
462 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
469 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
470 evcn
= le64_to_cpu(attr_b
->nres
.evcn
);
472 if (svcn
<= vcn
&& vcn
<= evcn
) {
481 attr
= ni_find_attr(ni
, attr_b
, &le
, type
, name
, name_len
, &vcn
,
489 svcn
= le64_to_cpu(attr
->nres
.svcn
);
490 evcn
= le64_to_cpu(attr
->nres
.evcn
);
496 err
= attr_load_runs(attr
, ni
, run
, NULL
);
500 if (new_size
> old_size
) {
504 if (new_alloc
<= old_alloc
) {
505 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
510 to_allocate
= new_alen
- old_alen
;
511 add_alloc_in_same_attr_seg
:
514 /* MFT allocates clusters from MFT zone. */
517 /* No preallocate for sparse/compress. */
519 } else if (pre_alloc
== -1) {
521 if (type
== ATTR_DATA
&& !name_len
&&
522 sbi
->options
->prealloc
) {
523 CLST new_alen2
= bytes_to_cluster(
524 sbi
, get_pre_allocated(new_size
));
525 pre_alloc
= new_alen2
- new_alen
;
528 /* Get the last LCN to allocate from. */
530 !run_lookup_entry(run
, vcn
, &lcn
, NULL
, NULL
)) {
534 if (lcn
== SPARSE_LCN
)
539 free
= wnd_zeroes(&sbi
->used
.bitmap
);
540 if (to_allocate
> free
) {
545 if (pre_alloc
&& to_allocate
+ pre_alloc
> free
)
552 if (!run_add_entry(run
, vcn
, SPARSE_LCN
, to_allocate
,
559 /* ~3 bytes per fragment. */
560 err
= attr_allocate_clusters(
561 sbi
, run
, vcn
, lcn
, to_allocate
, &pre_alloc
,
562 is_mft
? ALLOCATE_MFT
: 0, &alen
,
564 : (sbi
->record_size
-
565 le32_to_cpu(rec
->used
) + 8) /
575 if (to_allocate
> alen
)
581 err
= mi_pack_runs(mi
, attr
, run
, vcn
- svcn
);
585 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
586 new_alloc_tmp
= (u64
)next_svcn
<< cluster_bits
;
587 attr_b
->nres
.alloc_size
= cpu_to_le64(new_alloc_tmp
);
590 if (next_svcn
>= vcn
&& !to_allocate
) {
591 /* Normal way. Update attribute and exit. */
592 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
596 /* At least two MFT to avoid recursive loop. */
597 if (is_mft
&& next_svcn
== vcn
&&
598 ((u64
)done
<< sbi
->cluster_bits
) >= 2 * sbi
->record_size
) {
599 new_size
= new_alloc_tmp
;
600 attr_b
->nres
.data_size
= attr_b
->nres
.alloc_size
;
604 if (le32_to_cpu(rec
->used
) < sbi
->record_size
) {
605 old_alen
= next_svcn
;
607 goto add_alloc_in_same_attr_seg
;
610 attr_b
->nres
.data_size
= attr_b
->nres
.alloc_size
;
611 if (new_alloc_tmp
< old_valid
)
612 attr_b
->nres
.valid_size
= attr_b
->nres
.data_size
;
614 if (type
== ATTR_LIST
) {
615 err
= ni_expand_list(ni
);
621 /* Layout of records is changed. */
625 if (!ni
->attr_list
.size
) {
626 err
= ni_create_attr_list(ni
);
629 /* Layout of records is changed. */
632 if (next_svcn
>= vcn
) {
633 /* This is MFT data, repeat. */
637 /* Insert new attribute segment. */
638 err
= ni_insert_nonresident(ni
, type
, name
, name_len
, run
,
639 next_svcn
, vcn
- next_svcn
,
640 attr_b
->flags
, &attr
, &mi
);
645 run_truncate_head(run
, evcn
+ 1);
647 svcn
= le64_to_cpu(attr
->nres
.svcn
);
648 evcn
= le64_to_cpu(attr
->nres
.evcn
);
652 * Layout of records maybe changed.
653 * Find base attribute to update.
655 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, type
, name
, name_len
,
662 attr_b
->nres
.alloc_size
= cpu_to_le64((u64
)vcn
<< cluster_bits
);
663 attr_b
->nres
.data_size
= attr_b
->nres
.alloc_size
;
664 attr_b
->nres
.valid_size
= attr_b
->nres
.alloc_size
;
669 if (new_size
!= old_size
||
670 (new_alloc
!= old_alloc
&& !keep_prealloc
)) {
671 vcn
= max(svcn
, new_alen
);
672 new_alloc_tmp
= (u64
)vcn
<< cluster_bits
;
675 err
= run_deallocate_ex(sbi
, run
, vcn
, evcn
- vcn
+ 1, &alen
,
680 run_truncate(run
, vcn
);
683 err
= mi_pack_runs(mi
, attr
, run
, vcn
- svcn
);
686 } else if (le
&& le
->vcn
) {
687 u16 le_sz
= le16_to_cpu(le
->size
);
690 * NOTE: List entries for one attribute are always
691 * the same size. We deal with last entry (vcn==0)
692 * and it is not first in entries array
693 * (list entry for std attribute always first).
694 * So it is safe to step back.
696 mi_remove_attr(NULL
, mi
, attr
);
698 if (!al_remove_le(ni
, le
)) {
703 le
= (struct ATTR_LIST_ENTRY
*)((u8
*)le
- le_sz
);
705 attr
->nres
.evcn
= cpu_to_le64((u64
)vcn
- 1);
709 attr_b
->nres
.alloc_size
= cpu_to_le64(new_alloc_tmp
);
711 if (vcn
== new_alen
) {
712 attr_b
->nres
.data_size
= cpu_to_le64(new_size
);
713 if (new_size
< old_valid
)
714 attr_b
->nres
.valid_size
=
715 attr_b
->nres
.data_size
;
718 le64_to_cpu(attr_b
->nres
.data_size
))
719 attr_b
->nres
.data_size
=
720 attr_b
->nres
.alloc_size
;
722 le64_to_cpu(attr_b
->nres
.valid_size
))
723 attr_b
->nres
.valid_size
=
724 attr_b
->nres
.alloc_size
;
728 le64_sub_cpu(&attr_b
->nres
.total_size
,
729 ((u64
)alen
<< cluster_bits
));
733 if (new_alloc_tmp
<= new_alloc
)
736 old_size
= new_alloc_tmp
;
747 if (le
->type
!= type
|| le
->name_len
!= name_len
||
748 memcmp(le_name(le
), name
, name_len
* sizeof(short))) {
753 err
= ni_load_mi(ni
, le
, &mi
);
757 attr
= mi_find_attr(mi
, NULL
, type
, name
, name_len
, &le
->id
);
767 __le64 valid
= cpu_to_le64(min(*new_valid
, new_size
));
769 if (attr_b
->nres
.valid_size
!= valid
) {
770 attr_b
->nres
.valid_size
= valid
;
776 if (!err
&& attr_b
&& ret
)
779 /* Update inode_set_bytes. */
780 if (!err
&& ((type
== ATTR_DATA
&& !name_len
) ||
781 (type
== ATTR_ALLOC
&& name
== I30_NAME
))) {
784 if (ni
->vfs_inode
.i_size
!= new_size
) {
785 ni
->vfs_inode
.i_size
= new_size
;
789 if (attr_b
&& attr_b
->non_res
) {
790 new_alloc
= le64_to_cpu(attr_b
->nres
.alloc_size
);
791 if (inode_get_bytes(&ni
->vfs_inode
) != new_alloc
) {
792 inode_set_bytes(&ni
->vfs_inode
, new_alloc
);
798 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
799 mark_inode_dirty(&ni
->vfs_inode
);
806 int attr_data_get_block(struct ntfs_inode
*ni
, CLST vcn
, CLST clen
, CLST
*lcn
,
807 CLST
*len
, bool *new)
810 struct runs_tree
*run
= &ni
->file
.run
;
811 struct ntfs_sb_info
*sbi
;
813 struct ATTRIB
*attr
= NULL
, *attr_b
;
814 struct ATTR_LIST_ENTRY
*le
, *le_b
;
815 struct mft_inode
*mi
, *mi_b
;
816 CLST hint
, svcn
, to_alloc
, evcn1
, next_svcn
, asize
, end
;
824 down_read(&ni
->file
.run_lock
);
825 ok
= run_lookup_entry(run
, vcn
, lcn
, len
, NULL
);
826 up_read(&ni
->file
.run_lock
);
828 if (ok
&& (*lcn
!= SPARSE_LCN
|| !new)) {
836 if (ok
&& clen
> *len
)
840 cluster_bits
= sbi
->cluster_bits
;
843 down_write(&ni
->file
.run_lock
);
846 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
852 if (!attr_b
->non_res
) {
858 asize
= le64_to_cpu(attr_b
->nres
.alloc_size
) >> sbi
->cluster_bits
;
864 clst_per_frame
= 1u << attr_b
->nres
.c_unit
;
865 to_alloc
= (clen
+ clst_per_frame
- 1) & ~(clst_per_frame
- 1);
867 if (vcn
+ to_alloc
> asize
)
868 to_alloc
= asize
- vcn
;
870 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
871 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
877 if (le_b
&& (vcn
< svcn
|| evcn1
<= vcn
)) {
878 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
884 svcn
= le64_to_cpu(attr
->nres
.svcn
);
885 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
888 err
= attr_load_runs(attr
, ni
, run
, NULL
);
893 ok
= run_lookup_entry(run
, vcn
, lcn
, len
, NULL
);
894 if (ok
&& (*lcn
!= SPARSE_LCN
|| !new)) {
906 if (ok
&& clen
> *len
) {
908 to_alloc
= (clen
+ clst_per_frame
- 1) &
909 ~(clst_per_frame
- 1);
913 if (!is_attr_ext(attr_b
)) {
918 /* Get the last LCN to allocate from. */
922 if (!run_add_entry(run
, evcn1
, SPARSE_LCN
, vcn
- evcn1
,
927 } else if (vcn
&& !run_lookup_entry(run
, vcn
- 1, &hint
, NULL
, NULL
)) {
931 err
= attr_allocate_clusters(
932 sbi
, run
, vcn
, hint
+ 1, to_alloc
, NULL
, 0, len
,
933 (sbi
->record_size
- le32_to_cpu(mi
->mrec
->used
) + 8) / 3 + 1,
941 total_size
= le64_to_cpu(attr_b
->nres
.total_size
) +
942 ((u64
)*len
<< cluster_bits
);
945 err
= mi_pack_runs(mi
, attr
, run
, max(end
, evcn1
) - svcn
);
949 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
950 inode_set_bytes(&ni
->vfs_inode
, total_size
);
951 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
954 mark_inode_dirty(&ni
->vfs_inode
);
956 /* Stored [vcn : next_svcn) from [vcn : end). */
957 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
960 if (next_svcn
== evcn1
) {
961 /* Normal way. Update attribute and exit. */
964 /* Add new segment [next_svcn : evcn1 - next_svcn). */
965 if (!ni
->attr_list
.size
) {
966 err
= ni_create_attr_list(ni
);
969 /* Layout of records is changed. */
971 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
,
987 /* Estimate next attribute. */
988 attr
= ni_find_attr(ni
, attr
, &le
, ATTR_DATA
, NULL
, 0, &svcn
, &mi
);
991 CLST alloc
= bytes_to_cluster(
992 sbi
, le64_to_cpu(attr_b
->nres
.alloc_size
));
993 CLST evcn
= le64_to_cpu(attr
->nres
.evcn
);
998 /* Remove segment [svcn : evcn). */
999 mi_remove_attr(NULL
, mi
, attr
);
1001 if (!al_remove_le(ni
, le
)) {
1006 if (evcn
+ 1 >= alloc
) {
1007 /* Last attribute segment. */
1012 if (ni_load_mi(ni
, le
, &mi
)) {
1017 attr
= mi_find_attr(mi
, NULL
, ATTR_DATA
, NULL
, 0,
1023 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1024 evcn
= le64_to_cpu(attr
->nres
.evcn
);
1030 err
= attr_load_runs(attr
, ni
, run
, &end
);
1035 attr
->nres
.svcn
= cpu_to_le64(next_svcn
);
1036 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- next_svcn
);
1040 le
->vcn
= cpu_to_le64(next_svcn
);
1041 ni
->attr_list
.dirty
= true;
1044 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1047 if (evcn1
> next_svcn
) {
1048 err
= ni_insert_nonresident(ni
, ATTR_DATA
, NULL
, 0, run
,
1049 next_svcn
, evcn1
- next_svcn
,
1050 attr_b
->flags
, &attr
, &mi
);
1055 run_truncate_around(run
, vcn
);
1057 up_write(&ni
->file
.run_lock
);
1063 int attr_data_read_resident(struct ntfs_inode
*ni
, struct page
*page
)
1066 struct ATTRIB
*attr
;
1069 attr
= ni_find_attr(ni
, NULL
, NULL
, ATTR_DATA
, NULL
, 0, NULL
, NULL
);
1074 return E_NTFS_NONRESIDENT
;
1076 vbo
= page
->index
<< PAGE_SHIFT
;
1077 data_size
= le32_to_cpu(attr
->res
.data_size
);
1078 if (vbo
< data_size
) {
1079 const char *data
= resident_data(attr
);
1080 char *kaddr
= kmap_atomic(page
);
1081 u32 use
= data_size
- vbo
;
1083 if (use
> PAGE_SIZE
)
1086 memcpy(kaddr
, data
+ vbo
, use
);
1087 memset(kaddr
+ use
, 0, PAGE_SIZE
- use
);
1088 kunmap_atomic(kaddr
);
1089 flush_dcache_page(page
);
1090 SetPageUptodate(page
);
1091 } else if (!PageUptodate(page
)) {
1092 zero_user_segment(page
, 0, PAGE_SIZE
);
1093 SetPageUptodate(page
);
1099 int attr_data_write_resident(struct ntfs_inode
*ni
, struct page
*page
)
1102 struct mft_inode
*mi
;
1103 struct ATTRIB
*attr
;
1106 attr
= ni_find_attr(ni
, NULL
, NULL
, ATTR_DATA
, NULL
, 0, NULL
, &mi
);
1110 if (attr
->non_res
) {
1111 /* Return special error code to check this case. */
1112 return E_NTFS_NONRESIDENT
;
1115 vbo
= page
->index
<< PAGE_SHIFT
;
1116 data_size
= le32_to_cpu(attr
->res
.data_size
);
1117 if (vbo
< data_size
) {
1118 char *data
= resident_data(attr
);
1119 char *kaddr
= kmap_atomic(page
);
1120 u32 use
= data_size
- vbo
;
1122 if (use
> PAGE_SIZE
)
1124 memcpy(data
+ vbo
, kaddr
, use
);
1125 kunmap_atomic(kaddr
);
1128 ni
->i_valid
= data_size
;
1134 * attr_load_runs_vcn - Load runs with VCN.
1136 int attr_load_runs_vcn(struct ntfs_inode
*ni
, enum ATTR_TYPE type
,
1137 const __le16
*name
, u8 name_len
, struct runs_tree
*run
,
1140 struct ATTRIB
*attr
;
1145 attr
= ni_find_attr(ni
, NULL
, NULL
, type
, name
, name_len
, &vcn
, NULL
);
1147 /* Is record corrupted? */
1151 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1152 evcn
= le64_to_cpu(attr
->nres
.evcn
);
1154 if (evcn
< vcn
|| vcn
< svcn
) {
1155 /* Is record corrupted? */
1159 ro
= le16_to_cpu(attr
->nres
.run_off
);
1160 err
= run_unpack_ex(run
, ni
->mi
.sbi
, ni
->mi
.rno
, svcn
, evcn
, svcn
,
1161 Add2Ptr(attr
, ro
), le32_to_cpu(attr
->size
) - ro
);
1168 * attr_load_runs_range - Load runs for given range [from to).
1170 int attr_load_runs_range(struct ntfs_inode
*ni
, enum ATTR_TYPE type
,
1171 const __le16
*name
, u8 name_len
, struct runs_tree
*run
,
1174 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1175 u8 cluster_bits
= sbi
->cluster_bits
;
1176 CLST vcn
= from
>> cluster_bits
;
1177 CLST vcn_last
= (to
- 1) >> cluster_bits
;
1181 for (vcn
= from
>> cluster_bits
; vcn
<= vcn_last
; vcn
+= clen
) {
1182 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, NULL
)) {
1183 err
= attr_load_runs_vcn(ni
, type
, name
, name_len
, run
,
1187 clen
= 0; /* Next run_lookup_entry(vcn) must be success. */
1194 #ifdef CONFIG_NTFS3_LZX_XPRESS
1196 * attr_wof_frame_info
1198 * Read header of Xpress/LZX file to get info about frame.
1200 int attr_wof_frame_info(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
1201 struct runs_tree
*run
, u64 frame
, u64 frames
,
1202 u8 frame_bits
, u32
*ondisk_size
, u64
*vbo_data
)
1204 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1205 u64 vbo
[2], off
[2], wof_size
;
1214 if (ni
->vfs_inode
.i_size
< 0x100000000ull
) {
1215 /* File starts with array of 32 bit offsets. */
1216 bytes_per_off
= sizeof(__le32
);
1217 vbo
[1] = frame
<< 2;
1218 *vbo_data
= frames
<< 2;
1220 /* File starts with array of 64 bit offsets. */
1221 bytes_per_off
= sizeof(__le64
);
1222 vbo
[1] = frame
<< 3;
1223 *vbo_data
= frames
<< 3;
1227 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1228 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1230 if (!attr
->non_res
) {
1231 if (vbo
[1] + bytes_per_off
> le32_to_cpu(attr
->res
.data_size
)) {
1232 ntfs_inode_err(&ni
->vfs_inode
, "is corrupted");
1235 addr
= resident_data(attr
);
1237 if (bytes_per_off
== sizeof(__le32
)) {
1238 off32
= Add2Ptr(addr
, vbo
[1]);
1239 off
[0] = vbo
[1] ? le32_to_cpu(off32
[-1]) : 0;
1240 off
[1] = le32_to_cpu(off32
[0]);
1242 off64
= Add2Ptr(addr
, vbo
[1]);
1243 off
[0] = vbo
[1] ? le64_to_cpu(off64
[-1]) : 0;
1244 off
[1] = le64_to_cpu(off64
[0]);
1247 *vbo_data
+= off
[0];
1248 *ondisk_size
= off
[1] - off
[0];
1252 wof_size
= le64_to_cpu(attr
->nres
.data_size
);
1253 down_write(&ni
->file
.run_lock
);
1254 page
= ni
->file
.offs_page
;
1256 page
= alloc_page(GFP_KERNEL
);
1262 ni
->file
.offs_page
= page
;
1265 addr
= page_address(page
);
1268 voff
= vbo
[1] & (PAGE_SIZE
- 1);
1269 vbo
[0] = vbo
[1] - bytes_per_off
;
1279 pgoff_t index
= vbo
[i
] >> PAGE_SHIFT
;
1281 if (index
!= page
->index
) {
1282 u64 from
= vbo
[i
] & ~(u64
)(PAGE_SIZE
- 1);
1283 u64 to
= min(from
+ PAGE_SIZE
, wof_size
);
1285 err
= attr_load_runs_range(ni
, ATTR_DATA
, WOF_NAME
,
1286 ARRAY_SIZE(WOF_NAME
), run
,
1291 err
= ntfs_bio_pages(sbi
, run
, &page
, 1, from
,
1292 to
- from
, REQ_OP_READ
);
1297 page
->index
= index
;
1301 if (bytes_per_off
== sizeof(__le32
)) {
1302 off32
= Add2Ptr(addr
, voff
);
1303 off
[1] = le32_to_cpu(*off32
);
1305 off64
= Add2Ptr(addr
, voff
);
1306 off
[1] = le64_to_cpu(*off64
);
1309 if (bytes_per_off
== sizeof(__le32
)) {
1310 off32
= Add2Ptr(addr
, PAGE_SIZE
- sizeof(u32
));
1311 off
[0] = le32_to_cpu(*off32
);
1313 off64
= Add2Ptr(addr
, PAGE_SIZE
- sizeof(u64
));
1314 off
[0] = le64_to_cpu(*off64
);
1317 /* Two values in one page. */
1318 if (bytes_per_off
== sizeof(__le32
)) {
1319 off32
= Add2Ptr(addr
, voff
);
1320 off
[0] = le32_to_cpu(off32
[-1]);
1321 off
[1] = le32_to_cpu(off32
[0]);
1323 off64
= Add2Ptr(addr
, voff
);
1324 off
[0] = le64_to_cpu(off64
[-1]);
1325 off
[1] = le64_to_cpu(off64
[0]);
1331 *vbo_data
+= off
[0];
1332 *ondisk_size
= off
[1] - off
[0];
1337 up_write(&ni
->file
.run_lock
);
1343 * attr_is_frame_compressed - Used to detect compressed frame.
1345 int attr_is_frame_compressed(struct ntfs_inode
*ni
, struct ATTRIB
*attr
,
1346 CLST frame
, CLST
*clst_data
)
1350 CLST clen
, lcn
, vcn
, alen
, slen
, vcn_next
;
1352 struct runs_tree
*run
;
1356 if (!is_attr_compressed(attr
))
1362 clst_frame
= 1u << attr
->nres
.c_unit
;
1363 vcn
= frame
* clst_frame
;
1364 run
= &ni
->file
.run
;
1366 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
)) {
1367 err
= attr_load_runs_vcn(ni
, attr
->type
, attr_name(attr
),
1368 attr
->name_len
, run
, vcn
);
1372 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
))
1376 if (lcn
== SPARSE_LCN
) {
1377 /* Sparsed frame. */
1381 if (clen
>= clst_frame
) {
1383 * The frame is not compressed 'cause
1384 * it does not contain any sparse clusters.
1386 *clst_data
= clst_frame
;
1390 alen
= bytes_to_cluster(ni
->mi
.sbi
, le64_to_cpu(attr
->nres
.alloc_size
));
1395 * The frame is compressed if *clst_data + slen >= clst_frame.
1396 * Check next fragments.
1398 while ((vcn
+= clen
) < alen
) {
1401 if (!run_get_entry(run
, ++idx
, &vcn
, &lcn
, &clen
) ||
1403 err
= attr_load_runs_vcn(ni
, attr
->type
,
1405 attr
->name_len
, run
, vcn_next
);
1410 if (!run_lookup_entry(run
, vcn
, &lcn
, &clen
, &idx
))
1414 if (lcn
== SPARSE_LCN
) {
1419 * Data_clusters + sparse_clusters =
1420 * not enough for frame.
1427 if (*clst_data
+ slen
>= clst_frame
) {
1430 * There is no sparsed clusters in this frame
1431 * so it is not compressed.
1433 *clst_data
= clst_frame
;
1435 /* Frame is compressed. */
1445 * attr_allocate_frame - Allocate/free clusters for @frame.
1447 * Assumed: down_write(&ni->file.run_lock);
1449 int attr_allocate_frame(struct ntfs_inode
*ni
, CLST frame
, size_t compr_size
,
1453 struct runs_tree
*run
= &ni
->file
.run
;
1454 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1455 struct ATTRIB
*attr
= NULL
, *attr_b
;
1456 struct ATTR_LIST_ENTRY
*le
, *le_b
;
1457 struct mft_inode
*mi
, *mi_b
;
1458 CLST svcn
, evcn1
, next_svcn
, lcn
, len
;
1459 CLST vcn
, end
, clst_data
;
1460 u64 total_size
, valid_size
, data_size
;
1463 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
1467 if (!is_attr_ext(attr_b
))
1470 vcn
= frame
<< NTFS_LZNT_CUNIT
;
1471 total_size
= le64_to_cpu(attr_b
->nres
.total_size
);
1473 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
1474 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
1475 data_size
= le64_to_cpu(attr_b
->nres
.data_size
);
1477 if (svcn
<= vcn
&& vcn
< evcn1
) {
1486 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
1492 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1493 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1496 err
= attr_load_runs(attr
, ni
, run
, NULL
);
1500 err
= attr_is_frame_compressed(ni
, attr_b
, frame
, &clst_data
);
1504 total_size
-= (u64
)clst_data
<< sbi
->cluster_bits
;
1506 len
= bytes_to_cluster(sbi
, compr_size
);
1508 if (len
== clst_data
)
1511 if (len
< clst_data
) {
1512 err
= run_deallocate_ex(sbi
, run
, vcn
+ len
, clst_data
- len
,
1517 if (!run_add_entry(run
, vcn
+ len
, SPARSE_LCN
, clst_data
- len
,
1522 end
= vcn
+ clst_data
;
1523 /* Run contains updated range [vcn + len : end). */
1525 CLST alen
, hint
= 0;
1526 /* Get the last LCN to allocate from. */
1527 if (vcn
+ clst_data
&&
1528 !run_lookup_entry(run
, vcn
+ clst_data
- 1, &hint
, NULL
,
1533 err
= attr_allocate_clusters(sbi
, run
, vcn
+ clst_data
,
1534 hint
+ 1, len
- clst_data
, NULL
, 0,
1540 /* Run contains updated range [vcn + clst_data : end). */
1543 total_size
+= (u64
)len
<< sbi
->cluster_bits
;
1546 err
= mi_pack_runs(mi
, attr
, run
, max(end
, evcn1
) - svcn
);
1550 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
1551 inode_set_bytes(&ni
->vfs_inode
, total_size
);
1554 mark_inode_dirty(&ni
->vfs_inode
);
1556 /* Stored [vcn : next_svcn) from [vcn : end). */
1557 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1560 if (next_svcn
== evcn1
) {
1561 /* Normal way. Update attribute and exit. */
1564 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1565 if (!ni
->attr_list
.size
) {
1566 err
= ni_create_attr_list(ni
);
1569 /* Layout of records is changed. */
1571 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
,
1587 /* Estimate next attribute. */
1588 attr
= ni_find_attr(ni
, attr
, &le
, ATTR_DATA
, NULL
, 0, &svcn
, &mi
);
1591 CLST alloc
= bytes_to_cluster(
1592 sbi
, le64_to_cpu(attr_b
->nres
.alloc_size
));
1593 CLST evcn
= le64_to_cpu(attr
->nres
.evcn
);
1595 if (end
< next_svcn
)
1597 while (end
> evcn
) {
1598 /* Remove segment [svcn : evcn). */
1599 mi_remove_attr(NULL
, mi
, attr
);
1601 if (!al_remove_le(ni
, le
)) {
1606 if (evcn
+ 1 >= alloc
) {
1607 /* Last attribute segment. */
1612 if (ni_load_mi(ni
, le
, &mi
)) {
1617 attr
= mi_find_attr(mi
, NULL
, ATTR_DATA
, NULL
, 0,
1623 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1624 evcn
= le64_to_cpu(attr
->nres
.evcn
);
1630 err
= attr_load_runs(attr
, ni
, run
, &end
);
1635 attr
->nres
.svcn
= cpu_to_le64(next_svcn
);
1636 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- next_svcn
);
1640 le
->vcn
= cpu_to_le64(next_svcn
);
1641 ni
->attr_list
.dirty
= true;
1644 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1647 if (evcn1
> next_svcn
) {
1648 err
= ni_insert_nonresident(ni
, ATTR_DATA
, NULL
, 0, run
,
1649 next_svcn
, evcn1
- next_svcn
,
1650 attr_b
->flags
, &attr
, &mi
);
1655 run_truncate_around(run
, vcn
);
1657 if (new_valid
> data_size
)
1658 new_valid
= data_size
;
1660 valid_size
= le64_to_cpu(attr_b
->nres
.valid_size
);
1661 if (new_valid
!= valid_size
) {
1662 attr_b
->nres
.valid_size
= cpu_to_le64(valid_size
);
1670 * attr_collapse_range - Collapse range in file.
1672 int attr_collapse_range(struct ntfs_inode
*ni
, u64 vbo
, u64 bytes
)
1675 struct runs_tree
*run
= &ni
->file
.run
;
1676 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1677 struct ATTRIB
*attr
= NULL
, *attr_b
;
1678 struct ATTR_LIST_ENTRY
*le
, *le_b
;
1679 struct mft_inode
*mi
, *mi_b
;
1680 CLST svcn
, evcn1
, len
, dealloc
, alen
;
1682 u64 valid_size
, data_size
, alloc_size
, total_size
;
1690 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
1694 if (!attr_b
->non_res
) {
1695 /* Attribute is resident. Nothing to do? */
1699 data_size
= le64_to_cpu(attr_b
->nres
.data_size
);
1700 alloc_size
= le64_to_cpu(attr_b
->nres
.alloc_size
);
1701 a_flags
= attr_b
->flags
;
1703 if (is_attr_ext(attr_b
)) {
1704 total_size
= le64_to_cpu(attr_b
->nres
.total_size
);
1705 mask
= (sbi
->cluster_size
<< attr_b
->nres
.c_unit
) - 1;
1707 total_size
= alloc_size
;
1708 mask
= sbi
->cluster_mask
;
1711 if ((vbo
& mask
) || (bytes
& mask
)) {
1712 /* Allow to collapse only cluster aligned ranges. */
1716 if (vbo
> data_size
)
1719 down_write(&ni
->file
.run_lock
);
1721 if (vbo
+ bytes
>= data_size
) {
1722 u64 new_valid
= min(ni
->i_valid
, vbo
);
1724 /* Simple truncate file at 'vbo'. */
1725 truncate_setsize(&ni
->vfs_inode
, vbo
);
1726 err
= attr_set_size(ni
, ATTR_DATA
, NULL
, 0, &ni
->file
.run
, vbo
,
1727 &new_valid
, true, NULL
);
1729 if (!err
&& new_valid
< ni
->i_valid
)
1730 ni
->i_valid
= new_valid
;
1736 * Enumerate all attribute segments and collapse.
1738 alen
= alloc_size
>> sbi
->cluster_bits
;
1739 vcn
= vbo
>> sbi
->cluster_bits
;
1740 len
= bytes
>> sbi
->cluster_bits
;
1744 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
1745 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
1747 if (svcn
<= vcn
&& vcn
< evcn1
) {
1756 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
1763 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1764 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1770 attr
->nres
.svcn
= cpu_to_le64(svcn
- len
);
1771 attr
->nres
.evcn
= cpu_to_le64(evcn1
- 1 - len
);
1773 le
->vcn
= attr
->nres
.svcn
;
1774 ni
->attr_list
.dirty
= true;
1777 } else if (svcn
< vcn
|| end
< evcn1
) {
1778 CLST vcn1
, eat
, next_svcn
;
1780 /* Collapse a part of this attribute segment. */
1781 err
= attr_load_runs(attr
, ni
, run
, &svcn
);
1784 vcn1
= max(vcn
, svcn
);
1785 eat
= min(end
, evcn1
) - vcn1
;
1787 err
= run_deallocate_ex(sbi
, run
, vcn1
, eat
, &dealloc
,
1792 if (!run_collapse_range(run
, vcn1
, eat
)) {
1799 attr
->nres
.svcn
= cpu_to_le64(vcn
);
1801 le
->vcn
= attr
->nres
.svcn
;
1802 ni
->attr_list
.dirty
= true;
1806 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- svcn
- eat
);
1810 next_svcn
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1811 if (next_svcn
+ eat
< evcn1
) {
1812 err
= ni_insert_nonresident(
1813 ni
, ATTR_DATA
, NULL
, 0, run
, next_svcn
,
1814 evcn1
- eat
- next_svcn
, a_flags
, &attr
,
1819 /* Layout of records maybe changed. */
1821 le
= al_find_ex(ni
, NULL
, ATTR_DATA
, NULL
, 0,
1829 /* Free all allocated memory. */
1830 run_truncate(run
, 0);
1833 u16 roff
= le16_to_cpu(attr
->nres
.run_off
);
1835 run_unpack_ex(RUN_DEALLOCATE
, sbi
, ni
->mi
.rno
, svcn
,
1836 evcn1
- 1, svcn
, Add2Ptr(attr
, roff
),
1837 le32_to_cpu(attr
->size
) - roff
);
1839 /* Delete this attribute segment. */
1840 mi_remove_attr(NULL
, mi
, attr
);
1844 le_sz
= le16_to_cpu(le
->size
);
1845 if (!al_remove_le(ni
, le
)) {
1854 /* Load next record that contains this attribute. */
1855 if (ni_load_mi(ni
, le
, &mi
)) {
1860 /* Look for required attribute. */
1861 attr
= mi_find_attr(mi
, NULL
, ATTR_DATA
, NULL
,
1869 le
= (struct ATTR_LIST_ENTRY
*)((u8
*)le
- le_sz
);
1875 attr
= ni_enum_attr_ex(ni
, attr
, &le
, &mi
);
1882 svcn
= le64_to_cpu(attr
->nres
.svcn
);
1883 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
1888 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
,
1897 valid_size
= ni
->i_valid
;
1898 if (vbo
+ bytes
<= valid_size
)
1899 valid_size
-= bytes
;
1900 else if (vbo
< valid_size
)
1903 attr_b
->nres
.alloc_size
= cpu_to_le64(alloc_size
- bytes
);
1904 attr_b
->nres
.data_size
= cpu_to_le64(data_size
);
1905 attr_b
->nres
.valid_size
= cpu_to_le64(min(valid_size
, data_size
));
1906 total_size
-= (u64
)dealloc
<< sbi
->cluster_bits
;
1907 if (is_attr_ext(attr_b
))
1908 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
1911 /* Update inode size. */
1912 ni
->i_valid
= valid_size
;
1913 ni
->vfs_inode
.i_size
= data_size
;
1914 inode_set_bytes(&ni
->vfs_inode
, total_size
);
1915 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
1916 mark_inode_dirty(&ni
->vfs_inode
);
1919 up_write(&ni
->file
.run_lock
);
1921 make_bad_inode(&ni
->vfs_inode
);
1929 * Not for normal files.
1931 int attr_punch_hole(struct ntfs_inode
*ni
, u64 vbo
, u64 bytes
, u32
*frame_size
)
1934 struct runs_tree
*run
= &ni
->file
.run
;
1935 struct ntfs_sb_info
*sbi
= ni
->mi
.sbi
;
1936 struct ATTRIB
*attr
= NULL
, *attr_b
;
1937 struct ATTR_LIST_ENTRY
*le
, *le_b
;
1938 struct mft_inode
*mi
, *mi_b
;
1939 CLST svcn
, evcn1
, vcn
, len
, end
, alen
, dealloc
;
1940 u64 total_size
, alloc_size
;
1947 attr_b
= ni_find_attr(ni
, NULL
, &le_b
, ATTR_DATA
, NULL
, 0, NULL
, &mi_b
);
1951 if (!attr_b
->non_res
) {
1952 u32 data_size
= le32_to_cpu(attr
->res
.data_size
);
1955 if (vbo
> data_size
)
1959 to
= min_t(u64
, vbo
+ bytes
, data_size
);
1960 memset(Add2Ptr(resident_data(attr_b
), from
), 0, to
- from
);
1964 if (!is_attr_ext(attr_b
))
1967 alloc_size
= le64_to_cpu(attr_b
->nres
.alloc_size
);
1968 total_size
= le64_to_cpu(attr_b
->nres
.total_size
);
1970 if (vbo
>= alloc_size
) {
1971 /* NOTE: It is allowed. */
1975 mask
= (sbi
->cluster_size
<< attr_b
->nres
.c_unit
) - 1;
1978 if (bytes
> alloc_size
)
1982 if ((vbo
& mask
) || (bytes
& mask
)) {
1983 /* We have to zero a range(s). */
1984 if (frame_size
== NULL
) {
1985 /* Caller insists range is aligned. */
1988 *frame_size
= mask
+ 1;
1989 return E_NTFS_NOTALIGNED
;
1992 down_write(&ni
->file
.run_lock
);
1994 * Enumerate all attribute segments and punch hole where necessary.
1996 alen
= alloc_size
>> sbi
->cluster_bits
;
1997 vcn
= vbo
>> sbi
->cluster_bits
;
1998 len
= bytes
>> sbi
->cluster_bits
;
2002 svcn
= le64_to_cpu(attr_b
->nres
.svcn
);
2003 evcn1
= le64_to_cpu(attr_b
->nres
.evcn
) + 1;
2005 if (svcn
<= vcn
&& vcn
< evcn1
) {
2014 attr
= ni_find_attr(ni
, attr_b
, &le
, ATTR_DATA
, NULL
, 0, &vcn
,
2021 svcn
= le64_to_cpu(attr
->nres
.svcn
);
2022 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2025 while (svcn
< end
) {
2026 CLST vcn1
, zero
, dealloc2
;
2028 err
= attr_load_runs(attr
, ni
, run
, &svcn
);
2031 vcn1
= max(vcn
, svcn
);
2032 zero
= min(end
, evcn1
) - vcn1
;
2035 err
= run_deallocate_ex(sbi
, run
, vcn1
, zero
, &dealloc
, true);
2039 if (dealloc2
== dealloc
) {
2040 /* Looks like the required range is already sparsed. */
2042 if (!run_add_entry(run
, vcn1
, SPARSE_LCN
, zero
,
2048 err
= mi_pack_runs(mi
, attr
, run
, evcn1
- svcn
);
2052 /* Free all allocated memory. */
2053 run_truncate(run
, 0);
2058 attr
= ni_enum_attr_ex(ni
, attr
, &le
, &mi
);
2064 svcn
= le64_to_cpu(attr
->nres
.svcn
);
2065 evcn1
= le64_to_cpu(attr
->nres
.evcn
) + 1;
2068 total_size
-= (u64
)dealloc
<< sbi
->cluster_bits
;
2069 attr_b
->nres
.total_size
= cpu_to_le64(total_size
);
2072 /* Update inode size. */
2073 inode_set_bytes(&ni
->vfs_inode
, total_size
);
2074 ni
->ni_flags
|= NI_FLAG_UPDATE_PARENT
;
2075 mark_inode_dirty(&ni
->vfs_inode
);
2078 up_write(&ni
->file
.run_lock
);
2080 make_bad_inode(&ni
->vfs_inode
);