]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - fs/ntfs3/attrib.c
fs/ntfs3: Refactoring attr_set_size to restore after errors
[mirror_ubuntu-kernels.git] / fs / ntfs3 / attrib.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7 */
8
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16
17 /*
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19 * preallocate algorithm.
20 */
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
23 #endif
24
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
27 #endif
28
29 // 16M
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31 // 16G
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33
34 static inline u64 get_pre_allocated(u64 size)
35 {
36 u32 clump;
37 u8 align_shift;
38 u64 ret;
39
40 if (size <= NTFS_CLUMP_MIN) {
41 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 } else if (size >= NTFS_CLUMP_MAX) {
44 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 } else {
47 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 clump = 1u << align_shift;
50 }
51
52 ret = (((size + clump - 1) >> align_shift)) << align_shift;
53
54 return ret;
55 }
56
57 /*
58 * attr_must_be_resident
59 *
60 * Return: True if attribute must be resident.
61 */
62 static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
63 enum ATTR_TYPE type)
64 {
65 const struct ATTR_DEF_ENTRY *de;
66
67 switch (type) {
68 case ATTR_STD:
69 case ATTR_NAME:
70 case ATTR_ID:
71 case ATTR_LABEL:
72 case ATTR_VOL_INFO:
73 case ATTR_ROOT:
74 case ATTR_EA_INFO:
75 return true;
76 default:
77 de = ntfs_query_def(sbi, type);
78 if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
79 return true;
80 return false;
81 }
82 }
83
84 /*
85 * attr_load_runs - Load all runs stored in @attr.
86 */
87 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
88 struct runs_tree *run, const CLST *vcn)
89 {
90 int err;
91 CLST svcn = le64_to_cpu(attr->nres.svcn);
92 CLST evcn = le64_to_cpu(attr->nres.evcn);
93 u32 asize;
94 u16 run_off;
95
96 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
97 return 0;
98
99 if (vcn && (evcn < *vcn || *vcn < svcn))
100 return -EINVAL;
101
102 asize = le32_to_cpu(attr->size);
103 run_off = le16_to_cpu(attr->nres.run_off);
104 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
105 vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
106 asize - run_off);
107 if (err < 0)
108 return err;
109
110 return 0;
111 }
112
113 /*
114 * run_deallocate_ex - Deallocate clusters.
115 */
116 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
117 CLST vcn, CLST len, CLST *done, bool trim)
118 {
119 int err = 0;
120 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
121 size_t idx;
122
123 if (!len)
124 goto out;
125
126 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
127 failed:
128 run_truncate(run, vcn0);
129 err = -EINVAL;
130 goto out;
131 }
132
133 for (;;) {
134 if (clen > len)
135 clen = len;
136
137 if (!clen) {
138 err = -EINVAL;
139 goto out;
140 }
141
142 if (lcn != SPARSE_LCN) {
143 mark_as_free_ex(sbi, lcn, clen, trim);
144 dn += clen;
145 }
146
147 len -= clen;
148 if (!len)
149 break;
150
151 vcn_next = vcn + clen;
152 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
153 vcn != vcn_next) {
154 /* Save memory - don't load entire run. */
155 goto failed;
156 }
157 }
158
159 out:
160 if (done)
161 *done += dn;
162
163 return err;
164 }
165
166 /*
167 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
168 */
169 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
170 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
171 enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
172 CLST *new_lcn)
173 {
174 int err;
175 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
176 size_t cnt = run->count;
177
178 for (;;) {
179 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
180 opt);
181
182 if (err == -ENOSPC && pre) {
183 pre = 0;
184 if (*pre_alloc)
185 *pre_alloc = 0;
186 continue;
187 }
188
189 if (err)
190 goto out;
191
192 if (new_lcn && vcn == vcn0)
193 *new_lcn = lcn;
194
195 /* Add new fragment into run storage. */
196 if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
197 /* Undo last 'ntfs_look_for_free_space' */
198 mark_as_free_ex(sbi, lcn, len, false);
199 err = -ENOMEM;
200 goto out;
201 }
202
203 vcn += flen;
204
205 if (flen >= len || opt == ALLOCATE_MFT ||
206 (fr && run->count - cnt >= fr)) {
207 *alen = vcn - vcn0;
208 return 0;
209 }
210
211 len -= flen;
212 }
213
214 out:
215 /* Undo 'ntfs_look_for_free_space' */
216 if (vcn - vcn0) {
217 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
218 run_truncate(run, vcn0);
219 }
220
221 return err;
222 }
223
224 /*
225 * attr_make_nonresident
226 *
227 * If page is not NULL - it is already contains resident data
228 * and locked (called from ni_write_frame()).
229 */
230 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
231 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
232 u64 new_size, struct runs_tree *run,
233 struct ATTRIB **ins_attr, struct page *page)
234 {
235 struct ntfs_sb_info *sbi;
236 struct ATTRIB *attr_s;
237 struct MFT_REC *rec;
238 u32 used, asize, rsize, aoff, align;
239 bool is_data;
240 CLST len, alen;
241 char *next;
242 int err;
243
244 if (attr->non_res) {
245 *ins_attr = attr;
246 return 0;
247 }
248
249 sbi = mi->sbi;
250 rec = mi->mrec;
251 attr_s = NULL;
252 used = le32_to_cpu(rec->used);
253 asize = le32_to_cpu(attr->size);
254 next = Add2Ptr(attr, asize);
255 aoff = PtrOffset(rec, attr);
256 rsize = le32_to_cpu(attr->res.data_size);
257 is_data = attr->type == ATTR_DATA && !attr->name_len;
258
259 align = sbi->cluster_size;
260 if (is_attr_compressed(attr))
261 align <<= COMPRESSION_UNIT;
262 len = (rsize + align - 1) >> sbi->cluster_bits;
263
264 run_init(run);
265
266 /* Make a copy of original attribute. */
267 attr_s = kmemdup(attr, asize, GFP_NOFS);
268 if (!attr_s) {
269 err = -ENOMEM;
270 goto out;
271 }
272
273 if (!len) {
274 /* Empty resident -> Empty nonresident. */
275 alen = 0;
276 } else {
277 const char *data = resident_data(attr);
278
279 err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
280 ALLOCATE_DEF, &alen, 0, NULL);
281 if (err)
282 goto out1;
283
284 if (!rsize) {
285 /* Empty resident -> Non empty nonresident. */
286 } else if (!is_data) {
287 err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
288 if (err)
289 goto out2;
290 } else if (!page) {
291 char *kaddr;
292
293 page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
294 if (!page) {
295 err = -ENOMEM;
296 goto out2;
297 }
298 kaddr = kmap_atomic(page);
299 memcpy(kaddr, data, rsize);
300 memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
301 kunmap_atomic(kaddr);
302 flush_dcache_page(page);
303 SetPageUptodate(page);
304 set_page_dirty(page);
305 unlock_page(page);
306 put_page(page);
307 }
308 }
309
310 /* Remove original attribute. */
311 used -= asize;
312 memmove(attr, Add2Ptr(attr, asize), used - aoff);
313 rec->used = cpu_to_le32(used);
314 mi->dirty = true;
315 if (le)
316 al_remove_le(ni, le);
317
318 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
319 attr_s->name_len, run, 0, alen,
320 attr_s->flags, &attr, NULL, NULL);
321 if (err)
322 goto out3;
323
324 kfree(attr_s);
325 attr->nres.data_size = cpu_to_le64(rsize);
326 attr->nres.valid_size = attr->nres.data_size;
327
328 *ins_attr = attr;
329
330 if (is_data)
331 ni->ni_flags &= ~NI_FLAG_RESIDENT;
332
333 /* Resident attribute becomes non resident. */
334 return 0;
335
336 out3:
337 attr = Add2Ptr(rec, aoff);
338 memmove(next, attr, used - aoff);
339 memcpy(attr, attr_s, asize);
340 rec->used = cpu_to_le32(used + asize);
341 mi->dirty = true;
342 out2:
343 /* Undo: do not trim new allocated clusters. */
344 run_deallocate(sbi, run, false);
345 run_close(run);
346 out1:
347 kfree(attr_s);
348 out:
349 return err;
350 }
351
352 /*
353 * attr_set_size_res - Helper for attr_set_size().
354 */
355 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
356 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
357 u64 new_size, struct runs_tree *run,
358 struct ATTRIB **ins_attr)
359 {
360 struct ntfs_sb_info *sbi = mi->sbi;
361 struct MFT_REC *rec = mi->mrec;
362 u32 used = le32_to_cpu(rec->used);
363 u32 asize = le32_to_cpu(attr->size);
364 u32 aoff = PtrOffset(rec, attr);
365 u32 rsize = le32_to_cpu(attr->res.data_size);
366 u32 tail = used - aoff - asize;
367 char *next = Add2Ptr(attr, asize);
368 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
369
370 if (dsize < 0) {
371 memmove(next + dsize, next, tail);
372 } else if (dsize > 0) {
373 if (used + dsize > sbi->max_bytes_per_attr)
374 return attr_make_nonresident(ni, attr, le, mi, new_size,
375 run, ins_attr, NULL);
376
377 memmove(next + dsize, next, tail);
378 memset(next, 0, dsize);
379 }
380
381 if (new_size > rsize)
382 memset(Add2Ptr(resident_data(attr), rsize), 0,
383 new_size - rsize);
384
385 rec->used = cpu_to_le32(used + dsize);
386 attr->size = cpu_to_le32(asize + dsize);
387 attr->res.data_size = cpu_to_le32(new_size);
388 mi->dirty = true;
389 *ins_attr = attr;
390
391 return 0;
392 }
393
394 /*
395 * attr_set_size - Change the size of attribute.
396 *
397 * Extend:
398 * - Sparse/compressed: No allocated clusters.
399 * - Normal: Append allocated and preallocated new clusters.
400 * Shrink:
401 * - No deallocate if @keep_prealloc is set.
402 */
403 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
404 const __le16 *name, u8 name_len, struct runs_tree *run,
405 u64 new_size, const u64 *new_valid, bool keep_prealloc,
406 struct ATTRIB **ret)
407 {
408 int err = 0;
409 struct ntfs_sb_info *sbi = ni->mi.sbi;
410 u8 cluster_bits = sbi->cluster_bits;
411 bool is_mft =
412 ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
413 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
414 struct ATTRIB *attr = NULL, *attr_b;
415 struct ATTR_LIST_ENTRY *le, *le_b;
416 struct mft_inode *mi, *mi_b;
417 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
418 CLST next_svcn, pre_alloc = -1, done = 0;
419 bool is_ext, is_bad = false;
420 u32 align;
421 struct MFT_REC *rec;
422
423 again:
424 alen = 0;
425 le_b = NULL;
426 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
427 &mi_b);
428 if (!attr_b) {
429 err = -ENOENT;
430 goto bad_inode;
431 }
432
433 if (!attr_b->non_res) {
434 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
435 &attr_b);
436 if (err)
437 return err;
438
439 /* Return if file is still resident. */
440 if (!attr_b->non_res)
441 goto ok1;
442
443 /* Layout of records may be changed, so do a full search. */
444 goto again;
445 }
446
447 is_ext = is_attr_ext(attr_b);
448 align = sbi->cluster_size;
449 if (is_ext)
450 align <<= attr_b->nres.c_unit;
451
452 old_valid = le64_to_cpu(attr_b->nres.valid_size);
453 old_size = le64_to_cpu(attr_b->nres.data_size);
454 old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
455
456 again_1:
457 old_alen = old_alloc >> cluster_bits;
458
459 new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
460 new_alen = new_alloc >> cluster_bits;
461
462 if (keep_prealloc && new_size < old_size) {
463 attr_b->nres.data_size = cpu_to_le64(new_size);
464 mi_b->dirty = true;
465 goto ok;
466 }
467
468 vcn = old_alen - 1;
469
470 svcn = le64_to_cpu(attr_b->nres.svcn);
471 evcn = le64_to_cpu(attr_b->nres.evcn);
472
473 if (svcn <= vcn && vcn <= evcn) {
474 attr = attr_b;
475 le = le_b;
476 mi = mi_b;
477 } else if (!le_b) {
478 err = -EINVAL;
479 goto bad_inode;
480 } else {
481 le = le_b;
482 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
483 &mi);
484 if (!attr) {
485 err = -EINVAL;
486 goto bad_inode;
487 }
488
489 next_le_1:
490 svcn = le64_to_cpu(attr->nres.svcn);
491 evcn = le64_to_cpu(attr->nres.evcn);
492 }
493 /*
494 * Here we have:
495 * attr,mi,le - last attribute segment (containing 'vcn').
496 * attr_b,mi_b,le_b - base (primary) attribute segment.
497 */
498 next_le:
499 rec = mi->mrec;
500 err = attr_load_runs(attr, ni, run, NULL);
501 if (err)
502 goto out;
503
504 if (new_size > old_size) {
505 CLST to_allocate;
506 size_t free;
507
508 if (new_alloc <= old_alloc) {
509 attr_b->nres.data_size = cpu_to_le64(new_size);
510 mi_b->dirty = true;
511 goto ok;
512 }
513
514 /*
515 * Add clusters. In simple case we have to:
516 * - allocate space (vcn, lcn, len)
517 * - update packed run in 'mi'
518 * - update attr->nres.evcn
519 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
520 */
521 to_allocate = new_alen - old_alen;
522 add_alloc_in_same_attr_seg:
523 lcn = 0;
524 if (is_mft) {
525 /* MFT allocates clusters from MFT zone. */
526 pre_alloc = 0;
527 } else if (is_ext) {
528 /* No preallocate for sparse/compress. */
529 pre_alloc = 0;
530 } else if (pre_alloc == -1) {
531 pre_alloc = 0;
532 if (type == ATTR_DATA && !name_len &&
533 sbi->options->prealloc) {
534 pre_alloc =
535 bytes_to_cluster(
536 sbi,
537 get_pre_allocated(new_size)) -
538 new_alen;
539 }
540
541 /* Get the last LCN to allocate from. */
542 if (old_alen &&
543 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
544 lcn = SPARSE_LCN;
545 }
546
547 if (lcn == SPARSE_LCN)
548 lcn = 0;
549 else if (lcn)
550 lcn += 1;
551
552 free = wnd_zeroes(&sbi->used.bitmap);
553 if (to_allocate > free) {
554 err = -ENOSPC;
555 goto out;
556 }
557
558 if (pre_alloc && to_allocate + pre_alloc > free)
559 pre_alloc = 0;
560 }
561
562 vcn = old_alen;
563
564 if (is_ext) {
565 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
566 false)) {
567 err = -ENOMEM;
568 goto out;
569 }
570 alen = to_allocate;
571 } else {
572 /* ~3 bytes per fragment. */
573 err = attr_allocate_clusters(
574 sbi, run, vcn, lcn, to_allocate, &pre_alloc,
575 is_mft ? ALLOCATE_MFT : 0, &alen,
576 is_mft ? 0
577 : (sbi->record_size -
578 le32_to_cpu(rec->used) + 8) /
579 3 +
580 1,
581 NULL);
582 if (err)
583 goto out;
584 }
585
586 done += alen;
587 vcn += alen;
588 if (to_allocate > alen)
589 to_allocate -= alen;
590 else
591 to_allocate = 0;
592
593 pack_runs:
594 err = mi_pack_runs(mi, attr, run, vcn - svcn);
595 if (err)
596 goto undo_1;
597
598 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
599 new_alloc_tmp = (u64)next_svcn << cluster_bits;
600 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
601 mi_b->dirty = true;
602
603 if (next_svcn >= vcn && !to_allocate) {
604 /* Normal way. Update attribute and exit. */
605 attr_b->nres.data_size = cpu_to_le64(new_size);
606 goto ok;
607 }
608
609 /* At least two MFT to avoid recursive loop. */
610 if (is_mft && next_svcn == vcn &&
611 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
612 new_size = new_alloc_tmp;
613 attr_b->nres.data_size = attr_b->nres.alloc_size;
614 goto ok;
615 }
616
617 if (le32_to_cpu(rec->used) < sbi->record_size) {
618 old_alen = next_svcn;
619 evcn = old_alen - 1;
620 goto add_alloc_in_same_attr_seg;
621 }
622
623 attr_b->nres.data_size = attr_b->nres.alloc_size;
624 if (new_alloc_tmp < old_valid)
625 attr_b->nres.valid_size = attr_b->nres.data_size;
626
627 if (type == ATTR_LIST) {
628 err = ni_expand_list(ni);
629 if (err)
630 goto undo_2;
631 if (next_svcn < vcn)
632 goto pack_runs;
633
634 /* Layout of records is changed. */
635 goto again;
636 }
637
638 if (!ni->attr_list.size) {
639 err = ni_create_attr_list(ni);
640 /* In case of error layout of records is not changed. */
641 if (err)
642 goto undo_2;
643 /* Layout of records is changed. */
644 }
645
646 if (next_svcn >= vcn) {
647 /* This is MFT data, repeat. */
648 goto again;
649 }
650
651 /* Insert new attribute segment. */
652 err = ni_insert_nonresident(ni, type, name, name_len, run,
653 next_svcn, vcn - next_svcn,
654 attr_b->flags, &attr, &mi, NULL);
655
656 /*
657 * Layout of records maybe changed.
658 * Find base attribute to update.
659 */
660 le_b = NULL;
661 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
662 NULL, &mi_b);
663 if (!attr_b) {
664 err = -EINVAL;
665 goto bad_inode;
666 }
667
668 if (err) {
669 /* ni_insert_nonresident failed. */
670 attr = NULL;
671 goto undo_2;
672 }
673
674 if (!is_mft)
675 run_truncate_head(run, evcn + 1);
676
677 svcn = le64_to_cpu(attr->nres.svcn);
678 evcn = le64_to_cpu(attr->nres.evcn);
679
680 /*
681 * Attribute is in consistency state.
682 * Save this point to restore to if next steps fail.
683 */
684 old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
685 attr_b->nres.valid_size = attr_b->nres.data_size =
686 attr_b->nres.alloc_size = cpu_to_le64(old_size);
687 mi_b->dirty = true;
688 goto again_1;
689 }
690
691 if (new_size != old_size ||
692 (new_alloc != old_alloc && !keep_prealloc)) {
693 /*
694 * Truncate clusters. In simple case we have to:
695 * - update packed run in 'mi'
696 * - update attr->nres.evcn
697 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
698 * - mark and trim clusters as free (vcn, lcn, len)
699 */
700 CLST dlen = 0;
701
702 vcn = max(svcn, new_alen);
703 new_alloc_tmp = (u64)vcn << cluster_bits;
704
705 if (vcn > svcn) {
706 err = mi_pack_runs(mi, attr, run, vcn - svcn);
707 if (err)
708 goto out;
709 } else if (le && le->vcn) {
710 u16 le_sz = le16_to_cpu(le->size);
711
712 /*
713 * NOTE: List entries for one attribute are always
714 * the same size. We deal with last entry (vcn==0)
715 * and it is not first in entries array
716 * (list entry for std attribute always first).
717 * So it is safe to step back.
718 */
719 mi_remove_attr(NULL, mi, attr);
720
721 if (!al_remove_le(ni, le)) {
722 err = -EINVAL;
723 goto bad_inode;
724 }
725
726 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
727 } else {
728 attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
729 mi->dirty = true;
730 }
731
732 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
733
734 if (vcn == new_alen) {
735 attr_b->nres.data_size = cpu_to_le64(new_size);
736 if (new_size < old_valid)
737 attr_b->nres.valid_size =
738 attr_b->nres.data_size;
739 } else {
740 if (new_alloc_tmp <=
741 le64_to_cpu(attr_b->nres.data_size))
742 attr_b->nres.data_size =
743 attr_b->nres.alloc_size;
744 if (new_alloc_tmp <
745 le64_to_cpu(attr_b->nres.valid_size))
746 attr_b->nres.valid_size =
747 attr_b->nres.alloc_size;
748 }
749 mi_b->dirty = true;
750
751 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
752 true);
753 if (err)
754 goto out;
755
756 if (is_ext) {
757 /* dlen - really deallocated clusters. */
758 le64_sub_cpu(&attr_b->nres.total_size,
759 ((u64)dlen << cluster_bits));
760 }
761
762 run_truncate(run, vcn);
763
764 if (new_alloc_tmp <= new_alloc)
765 goto ok;
766
767 old_size = new_alloc_tmp;
768 vcn = svcn - 1;
769
770 if (le == le_b) {
771 attr = attr_b;
772 mi = mi_b;
773 evcn = svcn - 1;
774 svcn = 0;
775 goto next_le;
776 }
777
778 if (le->type != type || le->name_len != name_len ||
779 memcmp(le_name(le), name, name_len * sizeof(short))) {
780 err = -EINVAL;
781 goto bad_inode;
782 }
783
784 err = ni_load_mi(ni, le, &mi);
785 if (err)
786 goto out;
787
788 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
789 if (!attr) {
790 err = -EINVAL;
791 goto bad_inode;
792 }
793 goto next_le_1;
794 }
795
796 ok:
797 if (new_valid) {
798 __le64 valid = cpu_to_le64(min(*new_valid, new_size));
799
800 if (attr_b->nres.valid_size != valid) {
801 attr_b->nres.valid_size = valid;
802 mi_b->dirty = true;
803 }
804 }
805
806 ok1:
807 if (ret)
808 *ret = attr_b;
809
810 /* Update inode_set_bytes. */
811 if (((type == ATTR_DATA && !name_len) ||
812 (type == ATTR_ALLOC && name == I30_NAME))) {
813 bool dirty = false;
814
815 if (ni->vfs_inode.i_size != new_size) {
816 ni->vfs_inode.i_size = new_size;
817 dirty = true;
818 }
819
820 if (attr_b->non_res) {
821 new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
822 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
823 inode_set_bytes(&ni->vfs_inode, new_alloc);
824 dirty = true;
825 }
826 }
827
828 if (dirty) {
829 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
830 mark_inode_dirty(&ni->vfs_inode);
831 }
832 }
833
834 return 0;
835
836 undo_2:
837 vcn -= alen;
838 attr_b->nres.data_size = cpu_to_le64(old_size);
839 attr_b->nres.valid_size = cpu_to_le64(old_valid);
840 attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
841
842 /* Restore 'attr' and 'mi'. */
843 if (attr)
844 goto restore_run;
845
846 if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
847 svcn <= le64_to_cpu(attr_b->nres.evcn)) {
848 attr = attr_b;
849 le = le_b;
850 mi = mi_b;
851 } else if (!le_b) {
852 err = -EINVAL;
853 goto bad_inode;
854 } else {
855 le = le_b;
856 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
857 &svcn, &mi);
858 if (!attr)
859 goto bad_inode;
860 }
861
862 restore_run:
863 if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
864 is_bad = true;
865
866 undo_1:
867 run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
868
869 run_truncate(run, vcn);
870 out:
871 if (is_bad) {
872 bad_inode:
873 _ntfs_bad_inode(&ni->vfs_inode);
874 }
875 return err;
876 }
877
878 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
879 CLST *len, bool *new)
880 {
881 int err = 0;
882 struct runs_tree *run = &ni->file.run;
883 struct ntfs_sb_info *sbi;
884 u8 cluster_bits;
885 struct ATTRIB *attr = NULL, *attr_b;
886 struct ATTR_LIST_ENTRY *le, *le_b;
887 struct mft_inode *mi, *mi_b;
888 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
889 u64 total_size;
890 u32 clst_per_frame;
891 bool ok;
892
893 if (new)
894 *new = false;
895
896 down_read(&ni->file.run_lock);
897 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
898 up_read(&ni->file.run_lock);
899
900 if (ok && (*lcn != SPARSE_LCN || !new)) {
901 /* Normal way. */
902 return 0;
903 }
904
905 if (!clen)
906 clen = 1;
907
908 if (ok && clen > *len)
909 clen = *len;
910
911 sbi = ni->mi.sbi;
912 cluster_bits = sbi->cluster_bits;
913
914 ni_lock(ni);
915 down_write(&ni->file.run_lock);
916
917 le_b = NULL;
918 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
919 if (!attr_b) {
920 err = -ENOENT;
921 goto out;
922 }
923
924 if (!attr_b->non_res) {
925 *lcn = RESIDENT_LCN;
926 *len = 1;
927 goto out;
928 }
929
930 asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
931 if (vcn >= asize) {
932 err = -EINVAL;
933 goto out;
934 }
935
936 clst_per_frame = 1u << attr_b->nres.c_unit;
937 to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
938
939 if (vcn + to_alloc > asize)
940 to_alloc = asize - vcn;
941
942 svcn = le64_to_cpu(attr_b->nres.svcn);
943 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
944
945 attr = attr_b;
946 le = le_b;
947 mi = mi_b;
948
949 if (le_b && (vcn < svcn || evcn1 <= vcn)) {
950 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
951 &mi);
952 if (!attr) {
953 err = -EINVAL;
954 goto out;
955 }
956 svcn = le64_to_cpu(attr->nres.svcn);
957 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
958 }
959
960 err = attr_load_runs(attr, ni, run, NULL);
961 if (err)
962 goto out;
963
964 if (!ok) {
965 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
966 if (ok && (*lcn != SPARSE_LCN || !new)) {
967 /* Normal way. */
968 err = 0;
969 goto ok;
970 }
971
972 if (!ok && !new) {
973 *len = 0;
974 err = 0;
975 goto ok;
976 }
977
978 if (ok && clen > *len) {
979 clen = *len;
980 to_alloc = (clen + clst_per_frame - 1) &
981 ~(clst_per_frame - 1);
982 }
983 }
984
985 if (!is_attr_ext(attr_b)) {
986 err = -EINVAL;
987 goto out;
988 }
989
990 /* Get the last LCN to allocate from. */
991 hint = 0;
992
993 if (vcn > evcn1) {
994 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
995 false)) {
996 err = -ENOMEM;
997 goto out;
998 }
999 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1000 hint = -1;
1001 }
1002
1003 err = attr_allocate_clusters(
1004 sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
1005 (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
1006 lcn);
1007 if (err)
1008 goto out;
1009 *new = true;
1010
1011 end = vcn + *len;
1012
1013 total_size = le64_to_cpu(attr_b->nres.total_size) +
1014 ((u64)*len << cluster_bits);
1015
1016 repack:
1017 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1018 if (err)
1019 goto out;
1020
1021 attr_b->nres.total_size = cpu_to_le64(total_size);
1022 inode_set_bytes(&ni->vfs_inode, total_size);
1023 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1024
1025 mi_b->dirty = true;
1026 mark_inode_dirty(&ni->vfs_inode);
1027
1028 /* Stored [vcn : next_svcn) from [vcn : end). */
1029 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1030
1031 if (end <= evcn1) {
1032 if (next_svcn == evcn1) {
1033 /* Normal way. Update attribute and exit. */
1034 goto ok;
1035 }
1036 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1037 if (!ni->attr_list.size) {
1038 err = ni_create_attr_list(ni);
1039 if (err)
1040 goto out;
1041 /* Layout of records is changed. */
1042 le_b = NULL;
1043 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1044 0, NULL, &mi_b);
1045 if (!attr_b) {
1046 err = -ENOENT;
1047 goto out;
1048 }
1049
1050 attr = attr_b;
1051 le = le_b;
1052 mi = mi_b;
1053 goto repack;
1054 }
1055 }
1056
1057 svcn = evcn1;
1058
1059 /* Estimate next attribute. */
1060 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1061
1062 if (attr) {
1063 CLST alloc = bytes_to_cluster(
1064 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1065 CLST evcn = le64_to_cpu(attr->nres.evcn);
1066
1067 if (end < next_svcn)
1068 end = next_svcn;
1069 while (end > evcn) {
1070 /* Remove segment [svcn : evcn). */
1071 mi_remove_attr(NULL, mi, attr);
1072
1073 if (!al_remove_le(ni, le)) {
1074 err = -EINVAL;
1075 goto out;
1076 }
1077
1078 if (evcn + 1 >= alloc) {
1079 /* Last attribute segment. */
1080 evcn1 = evcn + 1;
1081 goto ins_ext;
1082 }
1083
1084 if (ni_load_mi(ni, le, &mi)) {
1085 attr = NULL;
1086 goto out;
1087 }
1088
1089 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1090 &le->id);
1091 if (!attr) {
1092 err = -EINVAL;
1093 goto out;
1094 }
1095 svcn = le64_to_cpu(attr->nres.svcn);
1096 evcn = le64_to_cpu(attr->nres.evcn);
1097 }
1098
1099 if (end < svcn)
1100 end = svcn;
1101
1102 err = attr_load_runs(attr, ni, run, &end);
1103 if (err)
1104 goto out;
1105
1106 evcn1 = evcn + 1;
1107 attr->nres.svcn = cpu_to_le64(next_svcn);
1108 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1109 if (err)
1110 goto out;
1111
1112 le->vcn = cpu_to_le64(next_svcn);
1113 ni->attr_list.dirty = true;
1114 mi->dirty = true;
1115
1116 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1117 }
1118 ins_ext:
1119 if (evcn1 > next_svcn) {
1120 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1121 next_svcn, evcn1 - next_svcn,
1122 attr_b->flags, &attr, &mi, NULL);
1123 if (err)
1124 goto out;
1125 }
1126 ok:
1127 run_truncate_around(run, vcn);
1128 out:
1129 up_write(&ni->file.run_lock);
1130 ni_unlock(ni);
1131
1132 return err;
1133 }
1134
1135 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1136 {
1137 u64 vbo;
1138 struct ATTRIB *attr;
1139 u32 data_size;
1140
1141 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1142 if (!attr)
1143 return -EINVAL;
1144
1145 if (attr->non_res)
1146 return E_NTFS_NONRESIDENT;
1147
1148 vbo = page->index << PAGE_SHIFT;
1149 data_size = le32_to_cpu(attr->res.data_size);
1150 if (vbo < data_size) {
1151 const char *data = resident_data(attr);
1152 char *kaddr = kmap_atomic(page);
1153 u32 use = data_size - vbo;
1154
1155 if (use > PAGE_SIZE)
1156 use = PAGE_SIZE;
1157
1158 memcpy(kaddr, data + vbo, use);
1159 memset(kaddr + use, 0, PAGE_SIZE - use);
1160 kunmap_atomic(kaddr);
1161 flush_dcache_page(page);
1162 SetPageUptodate(page);
1163 } else if (!PageUptodate(page)) {
1164 zero_user_segment(page, 0, PAGE_SIZE);
1165 SetPageUptodate(page);
1166 }
1167
1168 return 0;
1169 }
1170
1171 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1172 {
1173 u64 vbo;
1174 struct mft_inode *mi;
1175 struct ATTRIB *attr;
1176 u32 data_size;
1177
1178 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1179 if (!attr)
1180 return -EINVAL;
1181
1182 if (attr->non_res) {
1183 /* Return special error code to check this case. */
1184 return E_NTFS_NONRESIDENT;
1185 }
1186
1187 vbo = page->index << PAGE_SHIFT;
1188 data_size = le32_to_cpu(attr->res.data_size);
1189 if (vbo < data_size) {
1190 char *data = resident_data(attr);
1191 char *kaddr = kmap_atomic(page);
1192 u32 use = data_size - vbo;
1193
1194 if (use > PAGE_SIZE)
1195 use = PAGE_SIZE;
1196 memcpy(data + vbo, kaddr, use);
1197 kunmap_atomic(kaddr);
1198 mi->dirty = true;
1199 }
1200 ni->i_valid = data_size;
1201
1202 return 0;
1203 }
1204
1205 /*
1206 * attr_load_runs_vcn - Load runs with VCN.
1207 */
1208 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1209 const __le16 *name, u8 name_len, struct runs_tree *run,
1210 CLST vcn)
1211 {
1212 struct ATTRIB *attr;
1213 int err;
1214 CLST svcn, evcn;
1215 u16 ro;
1216
1217 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1218 if (!attr) {
1219 /* Is record corrupted? */
1220 return -ENOENT;
1221 }
1222
1223 svcn = le64_to_cpu(attr->nres.svcn);
1224 evcn = le64_to_cpu(attr->nres.evcn);
1225
1226 if (evcn < vcn || vcn < svcn) {
1227 /* Is record corrupted? */
1228 return -EINVAL;
1229 }
1230
1231 ro = le16_to_cpu(attr->nres.run_off);
1232 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1233 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1234 if (err < 0)
1235 return err;
1236 return 0;
1237 }
1238
1239 /*
1240 * attr_load_runs_range - Load runs for given range [from to).
1241 */
1242 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1243 const __le16 *name, u8 name_len, struct runs_tree *run,
1244 u64 from, u64 to)
1245 {
1246 struct ntfs_sb_info *sbi = ni->mi.sbi;
1247 u8 cluster_bits = sbi->cluster_bits;
1248 CLST vcn;
1249 CLST vcn_last = (to - 1) >> cluster_bits;
1250 CLST lcn, clen;
1251 int err;
1252
1253 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1254 if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1255 err = attr_load_runs_vcn(ni, type, name, name_len, run,
1256 vcn);
1257 if (err)
1258 return err;
1259 clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1260 }
1261 }
1262
1263 return 0;
1264 }
1265
1266 #ifdef CONFIG_NTFS3_LZX_XPRESS
1267 /*
1268 * attr_wof_frame_info
1269 *
1270 * Read header of Xpress/LZX file to get info about frame.
1271 */
1272 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1273 struct runs_tree *run, u64 frame, u64 frames,
1274 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1275 {
1276 struct ntfs_sb_info *sbi = ni->mi.sbi;
1277 u64 vbo[2], off[2], wof_size;
1278 u32 voff;
1279 u8 bytes_per_off;
1280 char *addr;
1281 struct page *page;
1282 int i, err;
1283 __le32 *off32;
1284 __le64 *off64;
1285
1286 if (ni->vfs_inode.i_size < 0x100000000ull) {
1287 /* File starts with array of 32 bit offsets. */
1288 bytes_per_off = sizeof(__le32);
1289 vbo[1] = frame << 2;
1290 *vbo_data = frames << 2;
1291 } else {
1292 /* File starts with array of 64 bit offsets. */
1293 bytes_per_off = sizeof(__le64);
1294 vbo[1] = frame << 3;
1295 *vbo_data = frames << 3;
1296 }
1297
1298 /*
1299 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1300 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1301 */
1302 if (!attr->non_res) {
1303 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1304 ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1305 return -EINVAL;
1306 }
1307 addr = resident_data(attr);
1308
1309 if (bytes_per_off == sizeof(__le32)) {
1310 off32 = Add2Ptr(addr, vbo[1]);
1311 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1312 off[1] = le32_to_cpu(off32[0]);
1313 } else {
1314 off64 = Add2Ptr(addr, vbo[1]);
1315 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1316 off[1] = le64_to_cpu(off64[0]);
1317 }
1318
1319 *vbo_data += off[0];
1320 *ondisk_size = off[1] - off[0];
1321 return 0;
1322 }
1323
1324 wof_size = le64_to_cpu(attr->nres.data_size);
1325 down_write(&ni->file.run_lock);
1326 page = ni->file.offs_page;
1327 if (!page) {
1328 page = alloc_page(GFP_KERNEL);
1329 if (!page) {
1330 err = -ENOMEM;
1331 goto out;
1332 }
1333 page->index = -1;
1334 ni->file.offs_page = page;
1335 }
1336 lock_page(page);
1337 addr = page_address(page);
1338
1339 if (vbo[1]) {
1340 voff = vbo[1] & (PAGE_SIZE - 1);
1341 vbo[0] = vbo[1] - bytes_per_off;
1342 i = 0;
1343 } else {
1344 voff = 0;
1345 vbo[0] = 0;
1346 off[0] = 0;
1347 i = 1;
1348 }
1349
1350 do {
1351 pgoff_t index = vbo[i] >> PAGE_SHIFT;
1352
1353 if (index != page->index) {
1354 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1355 u64 to = min(from + PAGE_SIZE, wof_size);
1356
1357 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1358 ARRAY_SIZE(WOF_NAME), run,
1359 from, to);
1360 if (err)
1361 goto out1;
1362
1363 err = ntfs_bio_pages(sbi, run, &page, 1, from,
1364 to - from, REQ_OP_READ);
1365 if (err) {
1366 page->index = -1;
1367 goto out1;
1368 }
1369 page->index = index;
1370 }
1371
1372 if (i) {
1373 if (bytes_per_off == sizeof(__le32)) {
1374 off32 = Add2Ptr(addr, voff);
1375 off[1] = le32_to_cpu(*off32);
1376 } else {
1377 off64 = Add2Ptr(addr, voff);
1378 off[1] = le64_to_cpu(*off64);
1379 }
1380 } else if (!voff) {
1381 if (bytes_per_off == sizeof(__le32)) {
1382 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1383 off[0] = le32_to_cpu(*off32);
1384 } else {
1385 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1386 off[0] = le64_to_cpu(*off64);
1387 }
1388 } else {
1389 /* Two values in one page. */
1390 if (bytes_per_off == sizeof(__le32)) {
1391 off32 = Add2Ptr(addr, voff);
1392 off[0] = le32_to_cpu(off32[-1]);
1393 off[1] = le32_to_cpu(off32[0]);
1394 } else {
1395 off64 = Add2Ptr(addr, voff);
1396 off[0] = le64_to_cpu(off64[-1]);
1397 off[1] = le64_to_cpu(off64[0]);
1398 }
1399 break;
1400 }
1401 } while (++i < 2);
1402
1403 *vbo_data += off[0];
1404 *ondisk_size = off[1] - off[0];
1405
1406 out1:
1407 unlock_page(page);
1408 out:
1409 up_write(&ni->file.run_lock);
1410 return err;
1411 }
1412 #endif
1413
1414 /*
1415 * attr_is_frame_compressed - Used to detect compressed frame.
1416 */
1417 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1418 CLST frame, CLST *clst_data)
1419 {
1420 int err;
1421 u32 clst_frame;
1422 CLST clen, lcn, vcn, alen, slen, vcn_next;
1423 size_t idx;
1424 struct runs_tree *run;
1425
1426 *clst_data = 0;
1427
1428 if (!is_attr_compressed(attr))
1429 return 0;
1430
1431 if (!attr->non_res)
1432 return 0;
1433
1434 clst_frame = 1u << attr->nres.c_unit;
1435 vcn = frame * clst_frame;
1436 run = &ni->file.run;
1437
1438 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1439 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1440 attr->name_len, run, vcn);
1441 if (err)
1442 return err;
1443
1444 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1445 return -EINVAL;
1446 }
1447
1448 if (lcn == SPARSE_LCN) {
1449 /* Sparsed frame. */
1450 return 0;
1451 }
1452
1453 if (clen >= clst_frame) {
1454 /*
1455 * The frame is not compressed 'cause
1456 * it does not contain any sparse clusters.
1457 */
1458 *clst_data = clst_frame;
1459 return 0;
1460 }
1461
1462 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1463 slen = 0;
1464 *clst_data = clen;
1465
1466 /*
1467 * The frame is compressed if *clst_data + slen >= clst_frame.
1468 * Check next fragments.
1469 */
1470 while ((vcn += clen) < alen) {
1471 vcn_next = vcn;
1472
1473 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1474 vcn_next != vcn) {
1475 err = attr_load_runs_vcn(ni, attr->type,
1476 attr_name(attr),
1477 attr->name_len, run, vcn_next);
1478 if (err)
1479 return err;
1480 vcn = vcn_next;
1481
1482 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1483 return -EINVAL;
1484 }
1485
1486 if (lcn == SPARSE_LCN) {
1487 slen += clen;
1488 } else {
1489 if (slen) {
1490 /*
1491 * Data_clusters + sparse_clusters =
1492 * not enough for frame.
1493 */
1494 return -EINVAL;
1495 }
1496 *clst_data += clen;
1497 }
1498
1499 if (*clst_data + slen >= clst_frame) {
1500 if (!slen) {
1501 /*
1502 * There is no sparsed clusters in this frame
1503 * so it is not compressed.
1504 */
1505 *clst_data = clst_frame;
1506 } else {
1507 /* Frame is compressed. */
1508 }
1509 break;
1510 }
1511 }
1512
1513 return 0;
1514 }
1515
1516 /*
1517 * attr_allocate_frame - Allocate/free clusters for @frame.
1518 *
1519 * Assumed: down_write(&ni->file.run_lock);
1520 */
1521 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1522 u64 new_valid)
1523 {
1524 int err = 0;
1525 struct runs_tree *run = &ni->file.run;
1526 struct ntfs_sb_info *sbi = ni->mi.sbi;
1527 struct ATTRIB *attr = NULL, *attr_b;
1528 struct ATTR_LIST_ENTRY *le, *le_b;
1529 struct mft_inode *mi, *mi_b;
1530 CLST svcn, evcn1, next_svcn, lcn, len;
1531 CLST vcn, end, clst_data;
1532 u64 total_size, valid_size, data_size;
1533
1534 le_b = NULL;
1535 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1536 if (!attr_b)
1537 return -ENOENT;
1538
1539 if (!is_attr_ext(attr_b))
1540 return -EINVAL;
1541
1542 vcn = frame << NTFS_LZNT_CUNIT;
1543 total_size = le64_to_cpu(attr_b->nres.total_size);
1544
1545 svcn = le64_to_cpu(attr_b->nres.svcn);
1546 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1547 data_size = le64_to_cpu(attr_b->nres.data_size);
1548
1549 if (svcn <= vcn && vcn < evcn1) {
1550 attr = attr_b;
1551 le = le_b;
1552 mi = mi_b;
1553 } else if (!le_b) {
1554 err = -EINVAL;
1555 goto out;
1556 } else {
1557 le = le_b;
1558 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1559 &mi);
1560 if (!attr) {
1561 err = -EINVAL;
1562 goto out;
1563 }
1564 svcn = le64_to_cpu(attr->nres.svcn);
1565 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1566 }
1567
1568 err = attr_load_runs(attr, ni, run, NULL);
1569 if (err)
1570 goto out;
1571
1572 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1573 if (err)
1574 goto out;
1575
1576 total_size -= (u64)clst_data << sbi->cluster_bits;
1577
1578 len = bytes_to_cluster(sbi, compr_size);
1579
1580 if (len == clst_data)
1581 goto out;
1582
1583 if (len < clst_data) {
1584 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1585 NULL, true);
1586 if (err)
1587 goto out;
1588
1589 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1590 false)) {
1591 err = -ENOMEM;
1592 goto out;
1593 }
1594 end = vcn + clst_data;
1595 /* Run contains updated range [vcn + len : end). */
1596 } else {
1597 CLST alen, hint = 0;
1598 /* Get the last LCN to allocate from. */
1599 if (vcn + clst_data &&
1600 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1601 NULL)) {
1602 hint = -1;
1603 }
1604
1605 err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1606 hint + 1, len - clst_data, NULL, 0,
1607 &alen, 0, &lcn);
1608 if (err)
1609 goto out;
1610
1611 end = vcn + len;
1612 /* Run contains updated range [vcn + clst_data : end). */
1613 }
1614
1615 total_size += (u64)len << sbi->cluster_bits;
1616
1617 repack:
1618 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1619 if (err)
1620 goto out;
1621
1622 attr_b->nres.total_size = cpu_to_le64(total_size);
1623 inode_set_bytes(&ni->vfs_inode, total_size);
1624
1625 mi_b->dirty = true;
1626 mark_inode_dirty(&ni->vfs_inode);
1627
1628 /* Stored [vcn : next_svcn) from [vcn : end). */
1629 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1630
1631 if (end <= evcn1) {
1632 if (next_svcn == evcn1) {
1633 /* Normal way. Update attribute and exit. */
1634 goto ok;
1635 }
1636 /* Add new segment [next_svcn : evcn1 - next_svcn). */
1637 if (!ni->attr_list.size) {
1638 err = ni_create_attr_list(ni);
1639 if (err)
1640 goto out;
1641 /* Layout of records is changed. */
1642 le_b = NULL;
1643 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1644 0, NULL, &mi_b);
1645 if (!attr_b) {
1646 err = -ENOENT;
1647 goto out;
1648 }
1649
1650 attr = attr_b;
1651 le = le_b;
1652 mi = mi_b;
1653 goto repack;
1654 }
1655 }
1656
1657 svcn = evcn1;
1658
1659 /* Estimate next attribute. */
1660 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1661
1662 if (attr) {
1663 CLST alloc = bytes_to_cluster(
1664 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1665 CLST evcn = le64_to_cpu(attr->nres.evcn);
1666
1667 if (end < next_svcn)
1668 end = next_svcn;
1669 while (end > evcn) {
1670 /* Remove segment [svcn : evcn). */
1671 mi_remove_attr(NULL, mi, attr);
1672
1673 if (!al_remove_le(ni, le)) {
1674 err = -EINVAL;
1675 goto out;
1676 }
1677
1678 if (evcn + 1 >= alloc) {
1679 /* Last attribute segment. */
1680 evcn1 = evcn + 1;
1681 goto ins_ext;
1682 }
1683
1684 if (ni_load_mi(ni, le, &mi)) {
1685 attr = NULL;
1686 goto out;
1687 }
1688
1689 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1690 &le->id);
1691 if (!attr) {
1692 err = -EINVAL;
1693 goto out;
1694 }
1695 svcn = le64_to_cpu(attr->nres.svcn);
1696 evcn = le64_to_cpu(attr->nres.evcn);
1697 }
1698
1699 if (end < svcn)
1700 end = svcn;
1701
1702 err = attr_load_runs(attr, ni, run, &end);
1703 if (err)
1704 goto out;
1705
1706 evcn1 = evcn + 1;
1707 attr->nres.svcn = cpu_to_le64(next_svcn);
1708 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1709 if (err)
1710 goto out;
1711
1712 le->vcn = cpu_to_le64(next_svcn);
1713 ni->attr_list.dirty = true;
1714 mi->dirty = true;
1715
1716 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1717 }
1718 ins_ext:
1719 if (evcn1 > next_svcn) {
1720 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1721 next_svcn, evcn1 - next_svcn,
1722 attr_b->flags, &attr, &mi, NULL);
1723 if (err)
1724 goto out;
1725 }
1726 ok:
1727 run_truncate_around(run, vcn);
1728 out:
1729 if (new_valid > data_size)
1730 new_valid = data_size;
1731
1732 valid_size = le64_to_cpu(attr_b->nres.valid_size);
1733 if (new_valid != valid_size) {
1734 attr_b->nres.valid_size = cpu_to_le64(valid_size);
1735 mi_b->dirty = true;
1736 }
1737
1738 return err;
1739 }
1740
1741 /*
1742 * attr_collapse_range - Collapse range in file.
1743 */
1744 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1745 {
1746 int err = 0;
1747 struct runs_tree *run = &ni->file.run;
1748 struct ntfs_sb_info *sbi = ni->mi.sbi;
1749 struct ATTRIB *attr = NULL, *attr_b;
1750 struct ATTR_LIST_ENTRY *le, *le_b;
1751 struct mft_inode *mi, *mi_b;
1752 CLST svcn, evcn1, len, dealloc, alen;
1753 CLST vcn, end;
1754 u64 valid_size, data_size, alloc_size, total_size;
1755 u32 mask;
1756 __le16 a_flags;
1757
1758 if (!bytes)
1759 return 0;
1760
1761 le_b = NULL;
1762 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1763 if (!attr_b)
1764 return -ENOENT;
1765
1766 if (!attr_b->non_res) {
1767 /* Attribute is resident. Nothing to do? */
1768 return 0;
1769 }
1770
1771 data_size = le64_to_cpu(attr_b->nres.data_size);
1772 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1773 a_flags = attr_b->flags;
1774
1775 if (is_attr_ext(attr_b)) {
1776 total_size = le64_to_cpu(attr_b->nres.total_size);
1777 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1778 } else {
1779 total_size = alloc_size;
1780 mask = sbi->cluster_mask;
1781 }
1782
1783 if ((vbo & mask) || (bytes & mask)) {
1784 /* Allow to collapse only cluster aligned ranges. */
1785 return -EINVAL;
1786 }
1787
1788 if (vbo > data_size)
1789 return -EINVAL;
1790
1791 down_write(&ni->file.run_lock);
1792
1793 if (vbo + bytes >= data_size) {
1794 u64 new_valid = min(ni->i_valid, vbo);
1795
1796 /* Simple truncate file at 'vbo'. */
1797 truncate_setsize(&ni->vfs_inode, vbo);
1798 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1799 &new_valid, true, NULL);
1800
1801 if (!err && new_valid < ni->i_valid)
1802 ni->i_valid = new_valid;
1803
1804 goto out;
1805 }
1806
1807 /*
1808 * Enumerate all attribute segments and collapse.
1809 */
1810 alen = alloc_size >> sbi->cluster_bits;
1811 vcn = vbo >> sbi->cluster_bits;
1812 len = bytes >> sbi->cluster_bits;
1813 end = vcn + len;
1814 dealloc = 0;
1815
1816 svcn = le64_to_cpu(attr_b->nres.svcn);
1817 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1818
1819 if (svcn <= vcn && vcn < evcn1) {
1820 attr = attr_b;
1821 le = le_b;
1822 mi = mi_b;
1823 } else if (!le_b) {
1824 err = -EINVAL;
1825 goto out;
1826 } else {
1827 le = le_b;
1828 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1829 &mi);
1830 if (!attr) {
1831 err = -EINVAL;
1832 goto out;
1833 }
1834
1835 svcn = le64_to_cpu(attr->nres.svcn);
1836 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1837 }
1838
1839 for (;;) {
1840 if (svcn >= end) {
1841 /* Shift VCN- */
1842 attr->nres.svcn = cpu_to_le64(svcn - len);
1843 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1844 if (le) {
1845 le->vcn = attr->nres.svcn;
1846 ni->attr_list.dirty = true;
1847 }
1848 mi->dirty = true;
1849 } else if (svcn < vcn || end < evcn1) {
1850 CLST vcn1, eat, next_svcn;
1851
1852 /* Collapse a part of this attribute segment. */
1853 err = attr_load_runs(attr, ni, run, &svcn);
1854 if (err)
1855 goto out;
1856 vcn1 = max(vcn, svcn);
1857 eat = min(end, evcn1) - vcn1;
1858
1859 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1860 true);
1861 if (err)
1862 goto out;
1863
1864 if (!run_collapse_range(run, vcn1, eat)) {
1865 err = -ENOMEM;
1866 goto out;
1867 }
1868
1869 if (svcn >= vcn) {
1870 /* Shift VCN */
1871 attr->nres.svcn = cpu_to_le64(vcn);
1872 if (le) {
1873 le->vcn = attr->nres.svcn;
1874 ni->attr_list.dirty = true;
1875 }
1876 }
1877
1878 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1879 if (err)
1880 goto out;
1881
1882 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1883 if (next_svcn + eat < evcn1) {
1884 err = ni_insert_nonresident(
1885 ni, ATTR_DATA, NULL, 0, run, next_svcn,
1886 evcn1 - eat - next_svcn, a_flags, &attr,
1887 &mi, &le);
1888 if (err)
1889 goto out;
1890
1891 /* Layout of records maybe changed. */
1892 attr_b = NULL;
1893 }
1894
1895 /* Free all allocated memory. */
1896 run_truncate(run, 0);
1897 } else {
1898 u16 le_sz;
1899 u16 roff = le16_to_cpu(attr->nres.run_off);
1900
1901 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1902 evcn1 - 1, svcn, Add2Ptr(attr, roff),
1903 le32_to_cpu(attr->size) - roff);
1904
1905 /* Delete this attribute segment. */
1906 mi_remove_attr(NULL, mi, attr);
1907 if (!le)
1908 break;
1909
1910 le_sz = le16_to_cpu(le->size);
1911 if (!al_remove_le(ni, le)) {
1912 err = -EINVAL;
1913 goto out;
1914 }
1915
1916 if (evcn1 >= alen)
1917 break;
1918
1919 if (!svcn) {
1920 /* Load next record that contains this attribute. */
1921 if (ni_load_mi(ni, le, &mi)) {
1922 err = -EINVAL;
1923 goto out;
1924 }
1925
1926 /* Look for required attribute. */
1927 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1928 0, &le->id);
1929 if (!attr) {
1930 err = -EINVAL;
1931 goto out;
1932 }
1933 goto next_attr;
1934 }
1935 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1936 }
1937
1938 if (evcn1 >= alen)
1939 break;
1940
1941 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1942 if (!attr) {
1943 err = -EINVAL;
1944 goto out;
1945 }
1946
1947 next_attr:
1948 svcn = le64_to_cpu(attr->nres.svcn);
1949 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1950 }
1951
1952 if (!attr_b) {
1953 le_b = NULL;
1954 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1955 &mi_b);
1956 if (!attr_b) {
1957 err = -ENOENT;
1958 goto out;
1959 }
1960 }
1961
1962 data_size -= bytes;
1963 valid_size = ni->i_valid;
1964 if (vbo + bytes <= valid_size)
1965 valid_size -= bytes;
1966 else if (vbo < valid_size)
1967 valid_size = vbo;
1968
1969 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1970 attr_b->nres.data_size = cpu_to_le64(data_size);
1971 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1972 total_size -= (u64)dealloc << sbi->cluster_bits;
1973 if (is_attr_ext(attr_b))
1974 attr_b->nres.total_size = cpu_to_le64(total_size);
1975 mi_b->dirty = true;
1976
1977 /* Update inode size. */
1978 ni->i_valid = valid_size;
1979 ni->vfs_inode.i_size = data_size;
1980 inode_set_bytes(&ni->vfs_inode, total_size);
1981 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1982 mark_inode_dirty(&ni->vfs_inode);
1983
1984 out:
1985 up_write(&ni->file.run_lock);
1986 if (err)
1987 _ntfs_bad_inode(&ni->vfs_inode);
1988
1989 return err;
1990 }
1991
1992 /*
1993 * attr_punch_hole
1994 *
1995 * Not for normal files.
1996 */
1997 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
1998 {
1999 int err = 0;
2000 struct runs_tree *run = &ni->file.run;
2001 struct ntfs_sb_info *sbi = ni->mi.sbi;
2002 struct ATTRIB *attr = NULL, *attr_b;
2003 struct ATTR_LIST_ENTRY *le, *le_b;
2004 struct mft_inode *mi, *mi_b;
2005 CLST svcn, evcn1, vcn, len, end, alen, dealloc, next_svcn;
2006 u64 total_size, alloc_size;
2007 u32 mask;
2008 __le16 a_flags;
2009
2010 if (!bytes)
2011 return 0;
2012
2013 le_b = NULL;
2014 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2015 if (!attr_b)
2016 return -ENOENT;
2017
2018 if (!attr_b->non_res) {
2019 u32 data_size = le32_to_cpu(attr->res.data_size);
2020 u32 from, to;
2021
2022 if (vbo > data_size)
2023 return 0;
2024
2025 from = vbo;
2026 to = min_t(u64, vbo + bytes, data_size);
2027 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2028 return 0;
2029 }
2030
2031 if (!is_attr_ext(attr_b))
2032 return -EOPNOTSUPP;
2033
2034 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2035 total_size = le64_to_cpu(attr_b->nres.total_size);
2036
2037 if (vbo >= alloc_size) {
2038 /* NOTE: It is allowed. */
2039 return 0;
2040 }
2041
2042 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2043
2044 bytes += vbo;
2045 if (bytes > alloc_size)
2046 bytes = alloc_size;
2047 bytes -= vbo;
2048
2049 if ((vbo & mask) || (bytes & mask)) {
2050 /* We have to zero a range(s). */
2051 if (frame_size == NULL) {
2052 /* Caller insists range is aligned. */
2053 return -EINVAL;
2054 }
2055 *frame_size = mask + 1;
2056 return E_NTFS_NOTALIGNED;
2057 }
2058
2059 down_write(&ni->file.run_lock);
2060 /*
2061 * Enumerate all attribute segments and punch hole where necessary.
2062 */
2063 alen = alloc_size >> sbi->cluster_bits;
2064 vcn = vbo >> sbi->cluster_bits;
2065 len = bytes >> sbi->cluster_bits;
2066 end = vcn + len;
2067 dealloc = 0;
2068
2069 svcn = le64_to_cpu(attr_b->nres.svcn);
2070 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2071 a_flags = attr_b->flags;
2072
2073 if (svcn <= vcn && vcn < evcn1) {
2074 attr = attr_b;
2075 le = le_b;
2076 mi = mi_b;
2077 } else if (!le_b) {
2078 err = -EINVAL;
2079 goto out;
2080 } else {
2081 le = le_b;
2082 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2083 &mi);
2084 if (!attr) {
2085 err = -EINVAL;
2086 goto out;
2087 }
2088
2089 svcn = le64_to_cpu(attr->nres.svcn);
2090 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2091 }
2092
2093 while (svcn < end) {
2094 CLST vcn1, zero, dealloc2;
2095
2096 err = attr_load_runs(attr, ni, run, &svcn);
2097 if (err)
2098 goto out;
2099 vcn1 = max(vcn, svcn);
2100 zero = min(end, evcn1) - vcn1;
2101
2102 dealloc2 = dealloc;
2103 err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
2104 if (err)
2105 goto out;
2106
2107 if (dealloc2 == dealloc) {
2108 /* Looks like the required range is already sparsed. */
2109 } else {
2110 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
2111 false)) {
2112 err = -ENOMEM;
2113 goto out;
2114 }
2115
2116 err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2117 if (err)
2118 goto out;
2119 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2120 if (next_svcn < evcn1) {
2121 err = ni_insert_nonresident(ni, ATTR_DATA, NULL,
2122 0, run, next_svcn,
2123 evcn1 - next_svcn,
2124 a_flags, &attr, &mi,
2125 &le);
2126 if (err)
2127 goto out;
2128 /* Layout of records maybe changed. */
2129 attr_b = NULL;
2130 }
2131 }
2132 /* Free all allocated memory. */
2133 run_truncate(run, 0);
2134
2135 if (evcn1 >= alen)
2136 break;
2137
2138 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2139 if (!attr) {
2140 err = -EINVAL;
2141 goto out;
2142 }
2143
2144 svcn = le64_to_cpu(attr->nres.svcn);
2145 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2146 }
2147
2148 total_size -= (u64)dealloc << sbi->cluster_bits;
2149 if (!attr_b) {
2150 attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2151 &mi_b);
2152 if (!attr_b) {
2153 err = -EINVAL;
2154 goto out;
2155 }
2156 }
2157 attr_b->nres.total_size = cpu_to_le64(total_size);
2158 mi_b->dirty = true;
2159
2160 /* Update inode size. */
2161 inode_set_bytes(&ni->vfs_inode, total_size);
2162 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2163 mark_inode_dirty(&ni->vfs_inode);
2164
2165 out:
2166 up_write(&ni->file.run_lock);
2167 if (err)
2168 _ntfs_bad_inode(&ni->vfs_inode);
2169
2170 return err;
2171 }
2172
2173 /*
2174 * attr_insert_range - Insert range (hole) in file.
2175 * Not for normal files.
2176 */
2177 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2178 {
2179 int err = 0;
2180 struct runs_tree *run = &ni->file.run;
2181 struct ntfs_sb_info *sbi = ni->mi.sbi;
2182 struct ATTRIB *attr = NULL, *attr_b;
2183 struct ATTR_LIST_ENTRY *le, *le_b;
2184 struct mft_inode *mi, *mi_b;
2185 CLST vcn, svcn, evcn1, len, next_svcn;
2186 u64 data_size, alloc_size;
2187 u32 mask;
2188 __le16 a_flags;
2189
2190 if (!bytes)
2191 return 0;
2192
2193 le_b = NULL;
2194 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2195 if (!attr_b)
2196 return -ENOENT;
2197
2198 if (!is_attr_ext(attr_b)) {
2199 /* It was checked above. See fallocate. */
2200 return -EOPNOTSUPP;
2201 }
2202
2203 if (!attr_b->non_res) {
2204 data_size = le32_to_cpu(attr_b->res.data_size);
2205 alloc_size = data_size;
2206 mask = sbi->cluster_mask; /* cluster_size - 1 */
2207 } else {
2208 data_size = le64_to_cpu(attr_b->nres.data_size);
2209 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2210 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2211 }
2212
2213 if (vbo > data_size) {
2214 /* Insert range after the file size is not allowed. */
2215 return -EINVAL;
2216 }
2217
2218 if ((vbo & mask) || (bytes & mask)) {
2219 /* Allow to insert only frame aligned ranges. */
2220 return -EINVAL;
2221 }
2222
2223 /*
2224 * valid_size <= data_size <= alloc_size
2225 * Check alloc_size for maximum possible.
2226 */
2227 if (bytes > sbi->maxbytes_sparse - alloc_size)
2228 return -EFBIG;
2229
2230 vcn = vbo >> sbi->cluster_bits;
2231 len = bytes >> sbi->cluster_bits;
2232
2233 down_write(&ni->file.run_lock);
2234
2235 if (!attr_b->non_res) {
2236 err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2237 data_size + bytes, NULL, false, &attr);
2238 if (err)
2239 goto out;
2240 if (!attr->non_res) {
2241 /* Still resident. */
2242 char *data = Add2Ptr(attr, attr->res.data_off);
2243
2244 memmove(data + bytes, data, bytes);
2245 memset(data, 0, bytes);
2246 err = 0;
2247 goto out;
2248 }
2249 /* Resident files becomes nonresident. */
2250 le_b = NULL;
2251 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2252 &mi_b);
2253 if (!attr_b) {
2254 err = -ENOENT;
2255 goto out;
2256 }
2257 if (!attr_b->non_res) {
2258 err = -EINVAL;
2259 goto out;
2260 }
2261 data_size = le64_to_cpu(attr_b->nres.data_size);
2262 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2263 }
2264
2265 /*
2266 * Enumerate all attribute segments and shift start vcn.
2267 */
2268 a_flags = attr_b->flags;
2269 svcn = le64_to_cpu(attr_b->nres.svcn);
2270 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2271
2272 if (svcn <= vcn && vcn < evcn1) {
2273 attr = attr_b;
2274 le = le_b;
2275 mi = mi_b;
2276 } else if (!le_b) {
2277 err = -EINVAL;
2278 goto out;
2279 } else {
2280 le = le_b;
2281 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2282 &mi);
2283 if (!attr) {
2284 err = -EINVAL;
2285 goto out;
2286 }
2287
2288 svcn = le64_to_cpu(attr->nres.svcn);
2289 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2290 }
2291
2292 run_truncate(run, 0); /* clear cached values. */
2293 err = attr_load_runs(attr, ni, run, NULL);
2294 if (err)
2295 goto out;
2296
2297 if (!run_insert_range(run, vcn, len)) {
2298 err = -ENOMEM;
2299 goto out;
2300 }
2301
2302 /* Try to pack in current record as much as possible. */
2303 err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2304 if (err)
2305 goto out;
2306
2307 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2308 run_truncate_head(run, next_svcn);
2309
2310 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2311 attr->type == ATTR_DATA && !attr->name_len) {
2312 le64_add_cpu(&attr->nres.svcn, len);
2313 le64_add_cpu(&attr->nres.evcn, len);
2314 if (le) {
2315 le->vcn = attr->nres.svcn;
2316 ni->attr_list.dirty = true;
2317 }
2318 mi->dirty = true;
2319 }
2320
2321 /*
2322 * Update primary attribute segment in advance.
2323 * pointer attr_b may become invalid (layout of mft is changed)
2324 */
2325 if (vbo <= ni->i_valid)
2326 ni->i_valid += bytes;
2327
2328 attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
2329 attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);
2330
2331 /* ni->valid may be not equal valid_size (temporary). */
2332 if (ni->i_valid > data_size + bytes)
2333 attr_b->nres.valid_size = attr_b->nres.data_size;
2334 else
2335 attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2336 mi_b->dirty = true;
2337
2338 if (next_svcn < evcn1 + len) {
2339 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2340 next_svcn, evcn1 + len - next_svcn,
2341 a_flags, NULL, NULL, NULL);
2342 if (err)
2343 goto out;
2344 }
2345
2346 ni->vfs_inode.i_size += bytes;
2347 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2348 mark_inode_dirty(&ni->vfs_inode);
2349
2350 out:
2351 run_truncate(run, 0); /* clear cached values. */
2352
2353 up_write(&ni->file.run_lock);
2354 if (err)
2355 _ntfs_bad_inode(&ni->vfs_inode);
2356
2357 return err;
2358 }