]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - fs/ntfs3/file.c
fs/ntfs3: Restyle comments to better align with kernel-doc
[mirror_ubuntu-kernels.git] / fs / ntfs3 / file.c
CommitLineData
4342306f
KK
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
e8b8e97f
KA
6 * Regular file handling primitives for NTFS-based filesystems.
7 *
4342306f 8 */
e8b8e97f 9
4342306f
KK
10#include <linux/backing-dev.h>
11#include <linux/buffer_head.h>
12#include <linux/compat.h>
13#include <linux/falloc.h>
14#include <linux/fiemap.h>
15#include <linux/msdos_fs.h> /* FAT_IOCTL_XXX */
16#include <linux/nls.h>
17
18#include "debug.h"
19#include "ntfs.h"
20#include "ntfs_fs.h"
21
22static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
23{
24 struct fstrim_range __user *user_range;
25 struct fstrim_range range;
26 struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
27 int err;
28
29 if (!capable(CAP_SYS_ADMIN))
30 return -EPERM;
31
32 if (!blk_queue_discard(q))
33 return -EOPNOTSUPP;
34
35 user_range = (struct fstrim_range __user *)arg;
36 if (copy_from_user(&range, user_range, sizeof(range)))
37 return -EFAULT;
38
39 range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity);
40
41 err = ntfs_trim_fs(sbi, &range);
42 if (err < 0)
43 return err;
44
45 if (copy_to_user(user_range, &range, sizeof(range)))
46 return -EFAULT;
47
48 return 0;
49}
50
51static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
52{
53 struct inode *inode = file_inode(filp);
54 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
55 u32 __user *user_attr = (u32 __user *)arg;
56
57 switch (cmd) {
58 case FAT_IOCTL_GET_ATTRIBUTES:
59 return put_user(le32_to_cpu(ntfs_i(inode)->std_fa), user_attr);
60
61 case FAT_IOCTL_GET_VOLUME_ID:
62 return put_user(sbi->volume.ser_num, user_attr);
63
64 case FITRIM:
65 return ntfs_ioctl_fitrim(sbi, arg);
66 }
e8b8e97f 67 return -ENOTTY; /* Inappropriate ioctl for device. */
4342306f
KK
68}
69
70#ifdef CONFIG_COMPAT
71static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
72
73{
74 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
75}
76#endif
77
78/*
e8b8e97f 79 * ntfs_getattr - inode_operations::getattr
4342306f
KK
80 */
81int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
82 struct kstat *stat, u32 request_mask, u32 flags)
83{
84 struct inode *inode = d_inode(path->dentry);
85 struct ntfs_inode *ni = ntfs_i(inode);
86
87 if (is_compressed(ni))
88 stat->attributes |= STATX_ATTR_COMPRESSED;
89
90 if (is_encrypted(ni))
91 stat->attributes |= STATX_ATTR_ENCRYPTED;
92
93 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED;
94
95 generic_fillattr(mnt_userns, inode, stat);
96
97 stat->result_mask |= STATX_BTIME;
98 stat->btime = ni->i_crtime;
99 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
100
101 return 0;
102}
103
104static int ntfs_extend_initialized_size(struct file *file,
105 struct ntfs_inode *ni,
106 const loff_t valid,
107 const loff_t new_valid)
108{
109 struct inode *inode = &ni->vfs_inode;
110 struct address_space *mapping = inode->i_mapping;
111 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
112 loff_t pos = valid;
113 int err;
114
115 if (is_resident(ni)) {
116 ni->i_valid = new_valid;
117 return 0;
118 }
119
120 WARN_ON(is_compressed(ni));
121 WARN_ON(valid >= new_valid);
122
123 for (;;) {
124 u32 zerofrom, len;
125 struct page *page;
126 void *fsdata;
127 u8 bits;
128 CLST vcn, lcn, clen;
129
130 if (is_sparsed(ni)) {
131 bits = sbi->cluster_bits;
132 vcn = pos >> bits;
133
134 err = attr_data_get_block(ni, vcn, 0, &lcn, &clen,
135 NULL);
136 if (err)
137 goto out;
138
139 if (lcn == SPARSE_LCN) {
140 loff_t vbo = (loff_t)vcn << bits;
141 loff_t to = vbo + ((loff_t)clen << bits);
142
143 if (to <= new_valid) {
144 ni->i_valid = to;
145 pos = to;
146 goto next;
147 }
148
149 if (vbo < pos) {
150 pos = vbo;
151 } else {
152 to = (new_valid >> bits) << bits;
153 if (pos < to) {
154 ni->i_valid = to;
155 pos = to;
156 goto next;
157 }
158 }
159 }
160 }
161
162 zerofrom = pos & (PAGE_SIZE - 1);
163 len = PAGE_SIZE - zerofrom;
164
165 if (pos + len > new_valid)
166 len = new_valid - pos;
167
168 err = pagecache_write_begin(file, mapping, pos, len, 0, &page,
169 &fsdata);
170 if (err)
171 goto out;
172
173 zero_user_segment(page, zerofrom, PAGE_SIZE);
174
e8b8e97f 175 /* This function in any case puts page. */
4342306f
KK
176 err = pagecache_write_end(file, mapping, pos, len, len, page,
177 fsdata);
178 if (err < 0)
179 goto out;
180 pos += len;
181
182next:
183 if (pos >= new_valid)
184 break;
185
186 balance_dirty_pages_ratelimited(mapping);
187 cond_resched();
188 }
189
190 return 0;
191
192out:
193 ni->i_valid = valid;
194 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
195 new_valid);
196 return err;
197}
198
199/*
e8b8e97f 200 * ntfs_zero_range - Helper function for punch_hole.
4342306f
KK
201 * It zeroes a range [vbo, vbo_to)
202 */
203static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
204{
205 int err = 0;
206 struct address_space *mapping = inode->i_mapping;
207 u32 blocksize = 1 << inode->i_blkbits;
208 pgoff_t idx = vbo >> PAGE_SHIFT;
209 u32 z_start = vbo & (PAGE_SIZE - 1);
210 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
211 loff_t page_off;
212 struct buffer_head *head, *bh;
213 u32 bh_next, bh_off, z_end;
214 sector_t iblock;
215 struct page *page;
216
217 for (; idx < idx_end; idx += 1, z_start = 0) {
218 page_off = (loff_t)idx << PAGE_SHIFT;
219 z_end = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off)
220 : PAGE_SIZE;
221 iblock = page_off >> inode->i_blkbits;
222
223 page = find_or_create_page(mapping, idx,
224 mapping_gfp_constraint(mapping,
225 ~__GFP_FS));
226 if (!page)
227 return -ENOMEM;
228
229 if (!page_has_buffers(page))
230 create_empty_buffers(page, blocksize, 0);
231
232 bh = head = page_buffers(page);
233 bh_off = 0;
234 do {
235 bh_next = bh_off + blocksize;
236
237 if (bh_next <= z_start || bh_off >= z_end)
238 continue;
239
240 if (!buffer_mapped(bh)) {
241 ntfs_get_block(inode, iblock, bh, 0);
242 /* unmapped? It's a hole - nothing to do */
243 if (!buffer_mapped(bh))
244 continue;
245 }
246
247 /* Ok, it's mapped. Make sure it's up-to-date */
248 if (PageUptodate(page))
249 set_buffer_uptodate(bh);
250
251 if (!buffer_uptodate(bh)) {
252 lock_buffer(bh);
253 bh->b_end_io = end_buffer_read_sync;
254 get_bh(bh);
255 submit_bh(REQ_OP_READ, 0, bh);
256
257 wait_on_buffer(bh);
258 if (!buffer_uptodate(bh)) {
259 unlock_page(page);
260 put_page(page);
261 err = -EIO;
262 goto out;
263 }
264 }
265
266 mark_buffer_dirty(bh);
267
268 } while (bh_off = bh_next, iblock += 1,
269 head != (bh = bh->b_this_page));
270
271 zero_user_segment(page, z_start, z_end);
272
273 unlock_page(page);
274 put_page(page);
275 cond_resched();
276 }
277out:
278 mark_inode_dirty(inode);
279 return err;
280}
281
282/*
283 * ntfs_sparse_cluster
284 *
285 * Helper function to zero a new allocated clusters
286 * NOTE: 512 <= cluster size <= 2M
287 */
288void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
289 CLST len)
290{
291 struct address_space *mapping = inode->i_mapping;
292 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
293 u64 vbo = (u64)vcn << sbi->cluster_bits;
294 u64 bytes = (u64)len << sbi->cluster_bits;
295 u32 blocksize = 1 << inode->i_blkbits;
296 pgoff_t idx0 = page0 ? page0->index : -1;
297 loff_t vbo_clst = vbo & sbi->cluster_mask_inv;
298 loff_t end = ntfs_up_cluster(sbi, vbo + bytes);
299 pgoff_t idx = vbo_clst >> PAGE_SHIFT;
300 u32 from = vbo_clst & (PAGE_SIZE - 1);
301 pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
302 loff_t page_off;
303 u32 to;
304 bool partial;
305 struct page *page;
306
307 for (; idx < idx_end; idx += 1, from = 0) {
308 page = idx == idx0 ? page0 : grab_cache_page(mapping, idx);
309
310 if (!page)
311 continue;
312
313 page_off = (loff_t)idx << PAGE_SHIFT;
314 to = (page_off + PAGE_SIZE) > end ? (end - page_off)
315 : PAGE_SIZE;
316 partial = false;
317
318 if ((from || PAGE_SIZE != to) &&
319 likely(!page_has_buffers(page))) {
320 create_empty_buffers(page, blocksize, 0);
321 }
322
323 if (page_has_buffers(page)) {
324 struct buffer_head *head, *bh;
325 u32 bh_off = 0;
326
327 bh = head = page_buffers(page);
328 do {
329 u32 bh_next = bh_off + blocksize;
330
331 if (from <= bh_off && bh_next <= to) {
332 set_buffer_uptodate(bh);
333 mark_buffer_dirty(bh);
334 } else if (!buffer_uptodate(bh)) {
335 partial = true;
336 }
337 bh_off = bh_next;
338 } while (head != (bh = bh->b_this_page));
339 }
340
341 zero_user_segment(page, from, to);
342
343 if (!partial) {
344 if (!PageUptodate(page))
345 SetPageUptodate(page);
346 set_page_dirty(page);
347 }
348
349 if (idx != idx0) {
350 unlock_page(page);
351 put_page(page);
352 }
353 cond_resched();
354 }
355 mark_inode_dirty(inode);
356}
357
358/*
e8b8e97f 359 * ntfs_file_mmap - file_operations::mmap
4342306f
KK
360 */
361static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
362{
363 struct address_space *mapping = file->f_mapping;
364 struct inode *inode = mapping->host;
365 struct ntfs_inode *ni = ntfs_i(inode);
366 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
367 bool rw = vma->vm_flags & VM_WRITE;
368 int err;
369
370 if (is_encrypted(ni)) {
371 ntfs_inode_warn(inode, "mmap encrypted not supported");
372 return -EOPNOTSUPP;
373 }
374
375 if (is_dedup(ni)) {
376 ntfs_inode_warn(inode, "mmap deduplicated not supported");
377 return -EOPNOTSUPP;
378 }
379
380 if (is_compressed(ni) && rw) {
381 ntfs_inode_warn(inode, "mmap(write) compressed not supported");
382 return -EOPNOTSUPP;
383 }
384
385 if (rw) {
386 u64 to = min_t(loff_t, i_size_read(inode),
387 from + vma->vm_end - vma->vm_start);
388
389 if (is_sparsed(ni)) {
e8b8e97f 390 /* Allocate clusters for rw map. */
4342306f
KK
391 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
392 CLST lcn, len;
393 CLST vcn = from >> sbi->cluster_bits;
394 CLST end = bytes_to_cluster(sbi, to);
395 bool new;
396
397 for (; vcn < end; vcn += len) {
398 err = attr_data_get_block(ni, vcn, 1, &lcn,
399 &len, &new);
400 if (err)
401 goto out;
402
403 if (!new)
404 continue;
405 ntfs_sparse_cluster(inode, NULL, vcn, 1);
406 }
407 }
408
409 if (ni->i_valid < to) {
410 if (!inode_trylock(inode)) {
411 err = -EAGAIN;
412 goto out;
413 }
414 err = ntfs_extend_initialized_size(file, ni,
415 ni->i_valid, to);
416 inode_unlock(inode);
417 if (err)
418 goto out;
419 }
420 }
421
422 err = generic_file_mmap(file, vma);
423out:
424 return err;
425}
426
427static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
428 struct file *file)
429{
430 struct ntfs_inode *ni = ntfs_i(inode);
431 struct address_space *mapping = inode->i_mapping;
432 loff_t end = pos + count;
433 bool extend_init = file && pos > ni->i_valid;
434 int err;
435
436 if (end <= inode->i_size && !extend_init)
437 return 0;
438
e8b8e97f 439 /* Mark rw ntfs as dirty. It will be cleared at umount. */
4342306f
KK
440 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
441
442 if (end > inode->i_size) {
443 err = ntfs_set_size(inode, end);
444 if (err)
445 goto out;
446 inode->i_size = end;
447 }
448
449 if (extend_init && !is_compressed(ni)) {
450 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
451 if (err)
452 goto out;
453 } else {
454 err = 0;
455 }
456
457 inode->i_ctime = inode->i_mtime = current_time(inode);
458 mark_inode_dirty(inode);
459
460 if (IS_SYNC(inode)) {
461 int err2;
462
463 err = filemap_fdatawrite_range(mapping, pos, end - 1);
464 err2 = sync_mapping_buffers(mapping);
465 if (!err)
466 err = err2;
467 err2 = write_inode_now(inode, 1);
468 if (!err)
469 err = err2;
470 if (!err)
471 err = filemap_fdatawait_range(mapping, pos, end - 1);
472 }
473
474out:
475 return err;
476}
477
478static int ntfs_truncate(struct inode *inode, loff_t new_size)
479{
480 struct super_block *sb = inode->i_sb;
481 struct ntfs_inode *ni = ntfs_i(inode);
482 int err, dirty = 0;
483 u64 new_valid;
484
485 if (!S_ISREG(inode->i_mode))
486 return 0;
487
488 if (is_compressed(ni)) {
489 if (ni->i_valid > new_size)
490 ni->i_valid = new_size;
491 } else {
492 err = block_truncate_page(inode->i_mapping, new_size,
493 ntfs_get_block);
494 if (err)
495 return err;
496 }
497
498 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
499
500 ni_lock(ni);
501
502 truncate_setsize(inode, new_size);
503
504 down_write(&ni->file.run_lock);
505 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
506 &new_valid, true, NULL);
507 up_write(&ni->file.run_lock);
508
509 if (new_valid < ni->i_valid)
510 ni->i_valid = new_valid;
511
512 ni_unlock(ni);
513
514 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
515 inode->i_ctime = inode->i_mtime = current_time(inode);
516 if (!IS_DIRSYNC(inode)) {
517 dirty = 1;
518 } else {
519 err = ntfs_sync_inode(inode);
520 if (err)
521 return err;
522 }
523
524 if (dirty)
525 mark_inode_dirty(inode);
526
527 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
528
529 return 0;
530}
531
532/*
e8b8e97f
KA
533 * ntfs_fallocate
534 *
4342306f
KK
535 * Preallocate space for a file. This implements ntfs's fallocate file
536 * operation, which gets called from sys_fallocate system call. User
537 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
538 * we just allocate clusters without zeroing them out. Otherwise we
539 * allocate and zero out clusters via an expanding truncate.
540 */
541static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
542{
543 struct inode *inode = file->f_mapping->host;
544 struct super_block *sb = inode->i_sb;
545 struct ntfs_sb_info *sbi = sb->s_fs_info;
546 struct ntfs_inode *ni = ntfs_i(inode);
547 loff_t end = vbo + len;
548 loff_t vbo_down = round_down(vbo, PAGE_SIZE);
549 loff_t i_size;
550 int err;
551
e8b8e97f 552 /* No support for dir. */
4342306f
KK
553 if (!S_ISREG(inode->i_mode))
554 return -EOPNOTSUPP;
555
e8b8e97f 556 /* Return error if mode is not supported. */
4342306f
KK
557 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
558 FALLOC_FL_COLLAPSE_RANGE)) {
559 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
560 mode);
561 return -EOPNOTSUPP;
562 }
563
564 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
565
566 inode_lock(inode);
567 i_size = inode->i_size;
568
569 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
e8b8e97f 570 /* Should never be here, see ntfs_file_open. */
4342306f
KK
571 err = -EOPNOTSUPP;
572 goto out;
573 }
574
575 if (mode & FALLOC_FL_PUNCH_HOLE) {
576 u32 frame_size;
577 loff_t mask, vbo_a, end_a, tmp;
578
579 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
580 err = -EINVAL;
581 goto out;
582 }
583
584 err = filemap_write_and_wait_range(inode->i_mapping, vbo,
585 end - 1);
586 if (err)
587 goto out;
588
589 err = filemap_write_and_wait_range(inode->i_mapping, end,
590 LLONG_MAX);
591 if (err)
592 goto out;
593
594 inode_dio_wait(inode);
595
596 truncate_pagecache(inode, vbo_down);
597
598 if (!is_sparsed(ni) && !is_compressed(ni)) {
599 /* normal file */
600 err = ntfs_zero_range(inode, vbo, end);
601 goto out;
602 }
603
604 ni_lock(ni);
605 err = attr_punch_hole(ni, vbo, len, &frame_size);
606 ni_unlock(ni);
607 if (err != E_NTFS_NOTALIGNED)
608 goto out;
609
610 /* process not aligned punch */
611 mask = frame_size - 1;
612 vbo_a = (vbo + mask) & ~mask;
613 end_a = end & ~mask;
614
615 tmp = min(vbo_a, end);
616 if (tmp > vbo) {
617 err = ntfs_zero_range(inode, vbo, tmp);
618 if (err)
619 goto out;
620 }
621
622 if (vbo < end_a && end_a < end) {
623 err = ntfs_zero_range(inode, end_a, end);
624 if (err)
625 goto out;
626 }
627
628 /* Aligned punch_hole */
629 if (end_a > vbo_a) {
630 ni_lock(ni);
631 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
632 ni_unlock(ni);
633 }
634 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
635 if (mode & ~FALLOC_FL_COLLAPSE_RANGE) {
636 err = -EINVAL;
637 goto out;
638 }
639
640 /*
641 * Write tail of the last page before removed range since
642 * it will get removed from the page cache below.
643 */
644 err = filemap_write_and_wait_range(inode->i_mapping, vbo_down,
645 vbo);
646 if (err)
647 goto out;
648
649 /*
650 * Write data that will be shifted to preserve them
e8b8e97f 651 * when discarding page cache below.
4342306f
KK
652 */
653 err = filemap_write_and_wait_range(inode->i_mapping, end,
654 LLONG_MAX);
655 if (err)
656 goto out;
657
658 /* Wait for existing dio to complete */
659 inode_dio_wait(inode);
660
661 truncate_pagecache(inode, vbo_down);
662
663 ni_lock(ni);
664 err = attr_collapse_range(ni, vbo, len);
665 ni_unlock(ni);
666 } else {
667 /*
e8b8e97f 668 * Normal file: Allocate clusters, do not change 'valid' size.
4342306f
KK
669 */
670 err = ntfs_set_size(inode, max(end, i_size));
671 if (err)
672 goto out;
673
674 if (is_sparsed(ni) || is_compressed(ni)) {
675 CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
676 CLST vcn = vbo >> sbi->cluster_bits;
677 CLST cend = bytes_to_cluster(sbi, end);
678 CLST lcn, clen;
679 bool new;
680
681 /*
e8b8e97f
KA
682 * Allocate but do not zero new clusters. (see below comments)
683 * This breaks security: One can read unused on-disk areas.
684 * Zeroing these clusters may be too long.
685 * Maybe we should check here for root rights?
4342306f
KK
686 */
687 for (; vcn < cend; vcn += clen) {
688 err = attr_data_get_block(ni, vcn, cend - vcn,
689 &lcn, &clen, &new);
690 if (err)
691 goto out;
692 if (!new || vcn >= vcn_v)
693 continue;
694
695 /*
e8b8e97f
KA
696 * Unwritten area.
697 * NTFS is not able to store several unwritten areas.
698 * Activate 'ntfs_sparse_cluster' to zero new allocated clusters.
4342306f
KK
699 *
700 * Dangerous in case:
701 * 1G of sparsed clusters + 1 cluster of data =>
702 * valid_size == 1G + 1 cluster
703 * fallocate(1G) will zero 1G and this can be very long
e8b8e97f 704 * xfstest 016/086 will fail without 'ntfs_sparse_cluster'.
4342306f
KK
705 */
706 ntfs_sparse_cluster(inode, NULL, vcn,
707 min(vcn_v - vcn, clen));
708 }
709 }
710
711 if (mode & FALLOC_FL_KEEP_SIZE) {
712 ni_lock(ni);
e8b8e97f 713 /* True - Keep preallocated. */
4342306f
KK
714 err = attr_set_size(ni, ATTR_DATA, NULL, 0,
715 &ni->file.run, i_size, &ni->i_valid,
716 true, NULL);
717 ni_unlock(ni);
718 }
719 }
720
721out:
722 if (err == -EFBIG)
723 err = -ENOSPC;
724
725 if (!err) {
726 inode->i_ctime = inode->i_mtime = current_time(inode);
727 mark_inode_dirty(inode);
728 }
729
730 inode_unlock(inode);
731 return err;
732}
733
734/*
e8b8e97f 735 * ntfs3_setattr - inode_operations::setattr
4342306f
KK
736 */
737int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
738 struct iattr *attr)
739{
740 struct super_block *sb = dentry->d_sb;
741 struct ntfs_sb_info *sbi = sb->s_fs_info;
742 struct inode *inode = d_inode(dentry);
743 struct ntfs_inode *ni = ntfs_i(inode);
744 u32 ia_valid = attr->ia_valid;
745 umode_t mode = inode->i_mode;
746 int err;
747
748 if (sbi->options.no_acs_rules) {
e8b8e97f 749 /* "No access rules" - Force any changes of time etc. */
4342306f 750 attr->ia_valid |= ATTR_FORCE;
e8b8e97f 751 /* and disable for editing some attributes. */
4342306f
KK
752 attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE);
753 ia_valid = attr->ia_valid;
754 }
755
756 err = setattr_prepare(mnt_userns, dentry, attr);
757 if (err)
758 goto out;
759
760 if (ia_valid & ATTR_SIZE) {
761 loff_t oldsize = inode->i_size;
762
763 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
e8b8e97f 764 /* Should never be here, see ntfs_file_open(). */
4342306f
KK
765 err = -EOPNOTSUPP;
766 goto out;
767 }
768 inode_dio_wait(inode);
769
770 if (attr->ia_size < oldsize)
771 err = ntfs_truncate(inode, attr->ia_size);
772 else if (attr->ia_size > oldsize)
773 err = ntfs_extend(inode, attr->ia_size, 0, NULL);
774
775 if (err)
776 goto out;
777
778 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
779 }
780
781 setattr_copy(mnt_userns, inode, attr);
782
783 if (mode != inode->i_mode) {
784 err = ntfs_acl_chmod(mnt_userns, inode);
785 if (err)
786 goto out;
787
e8b8e97f 788 /* Linux 'w' -> Windows 'ro'. */
4342306f
KK
789 if (0222 & inode->i_mode)
790 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
791 else
792 ni->std_fa |= FILE_ATTRIBUTE_READONLY;
793 }
794
795 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE))
796 ntfs_save_wsl_perm(inode);
797 mark_inode_dirty(inode);
798out:
799 return err;
800}
801
802static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
803{
804 ssize_t err;
805 size_t count = iov_iter_count(iter);
806 struct file *file = iocb->ki_filp;
807 struct inode *inode = file->f_mapping->host;
808 struct ntfs_inode *ni = ntfs_i(inode);
809
810 if (is_encrypted(ni)) {
811 ntfs_inode_warn(inode, "encrypted i/o not supported");
812 return -EOPNOTSUPP;
813 }
814
815 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
816 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
817 return -EOPNOTSUPP;
818 }
819
820#ifndef CONFIG_NTFS3_LZX_XPRESS
821 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
822 ntfs_inode_warn(
823 inode,
824 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
825 return -EOPNOTSUPP;
826 }
827#endif
828
829 if (is_dedup(ni)) {
830 ntfs_inode_warn(inode, "read deduplicated not supported");
831 return -EOPNOTSUPP;
832 }
833
834 err = count ? generic_file_read_iter(iocb, iter) : 0;
835
836 return err;
837}
838
e8b8e97f
KA
839/*
840 * ntfs_get_frame_pages
841 *
842 * Return: Array of locked pages.
843 */
4342306f
KK
844static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
845 struct page **pages, u32 pages_per_frame,
846 bool *frame_uptodate)
847{
848 gfp_t gfp_mask = mapping_gfp_mask(mapping);
849 u32 npages;
850
851 *frame_uptodate = true;
852
853 for (npages = 0; npages < pages_per_frame; npages++, index++) {
854 struct page *page;
855
856 page = find_or_create_page(mapping, index, gfp_mask);
857 if (!page) {
858 while (npages--) {
859 page = pages[npages];
860 unlock_page(page);
861 put_page(page);
862 }
863
864 return -ENOMEM;
865 }
866
867 if (!PageUptodate(page))
868 *frame_uptodate = false;
869
870 pages[npages] = page;
871 }
872
873 return 0;
874}
875
e8b8e97f
KA
876/*
877 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
878 */
4342306f
KK
879static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
880{
881 int err;
882 struct file *file = iocb->ki_filp;
883 size_t count = iov_iter_count(from);
884 loff_t pos = iocb->ki_pos;
885 struct inode *inode = file_inode(file);
886 loff_t i_size = inode->i_size;
887 struct address_space *mapping = inode->i_mapping;
888 struct ntfs_inode *ni = ntfs_i(inode);
889 u64 valid = ni->i_valid;
890 struct ntfs_sb_info *sbi = ni->mi.sbi;
891 struct page *page, **pages = NULL;
892 size_t written = 0;
893 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
894 u32 frame_size = 1u << frame_bits;
895 u32 pages_per_frame = frame_size >> PAGE_SHIFT;
896 u32 ip, off;
897 CLST frame;
898 u64 frame_vbo;
899 pgoff_t index;
900 bool frame_uptodate;
901
902 if (frame_size < PAGE_SIZE) {
903 /*
904 * frame_size == 8K if cluster 512
905 * frame_size == 64K if cluster 4096
906 */
907 ntfs_inode_warn(inode, "page size is bigger than frame size");
908 return -EOPNOTSUPP;
909 }
910
345482bc 911 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS);
4342306f
KK
912 if (!pages)
913 return -ENOMEM;
914
915 current->backing_dev_info = inode_to_bdi(inode);
916 err = file_remove_privs(file);
917 if (err)
918 goto out;
919
920 err = file_update_time(file);
921 if (err)
922 goto out;
923
e8b8e97f 924 /* Zero range [valid : pos). */
4342306f
KK
925 while (valid < pos) {
926 CLST lcn, clen;
927
928 frame = valid >> frame_bits;
929 frame_vbo = valid & ~(frame_size - 1);
930 off = valid & (frame_size - 1);
931
932 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn,
933 &clen, NULL);
934 if (err)
935 goto out;
936
937 if (lcn == SPARSE_LCN) {
938 ni->i_valid = valid =
939 frame_vbo + ((u64)clen << sbi->cluster_bits);
940 continue;
941 }
942
e8b8e97f 943 /* Load full frame. */
4342306f
KK
944 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
945 pages, pages_per_frame,
946 &frame_uptodate);
947 if (err)
948 goto out;
949
950 if (!frame_uptodate && off) {
951 err = ni_read_frame(ni, frame_vbo, pages,
952 pages_per_frame);
953 if (err) {
954 for (ip = 0; ip < pages_per_frame; ip++) {
955 page = pages[ip];
956 unlock_page(page);
957 put_page(page);
958 }
959 goto out;
960 }
961 }
962
963 ip = off >> PAGE_SHIFT;
964 off = offset_in_page(valid);
965 for (; ip < pages_per_frame; ip++, off = 0) {
966 page = pages[ip];
967 zero_user_segment(page, off, PAGE_SIZE);
968 flush_dcache_page(page);
969 SetPageUptodate(page);
970 }
971
972 ni_lock(ni);
973 err = ni_write_frame(ni, pages, pages_per_frame);
974 ni_unlock(ni);
975
976 for (ip = 0; ip < pages_per_frame; ip++) {
977 page = pages[ip];
978 SetPageUptodate(page);
979 unlock_page(page);
980 put_page(page);
981 }
982
983 if (err)
984 goto out;
985
986 ni->i_valid = valid = frame_vbo + frame_size;
987 }
988
e8b8e97f 989 /* Copy user data [pos : pos + count). */
4342306f
KK
990 while (count) {
991 size_t copied, bytes;
992
993 off = pos & (frame_size - 1);
994 bytes = frame_size - off;
995 if (bytes > count)
996 bytes = count;
997
998 frame = pos >> frame_bits;
999 frame_vbo = pos & ~(frame_size - 1);
1000 index = frame_vbo >> PAGE_SHIFT;
1001
1002 if (unlikely(iov_iter_fault_in_readable(from, bytes))) {
1003 err = -EFAULT;
1004 goto out;
1005 }
1006
e8b8e97f 1007 /* Load full frame. */
4342306f
KK
1008 err = ntfs_get_frame_pages(mapping, index, pages,
1009 pages_per_frame, &frame_uptodate);
1010 if (err)
1011 goto out;
1012
1013 if (!frame_uptodate) {
1014 loff_t to = pos + bytes;
1015
1016 if (off || (to < i_size && (to & (frame_size - 1)))) {
1017 err = ni_read_frame(ni, frame_vbo, pages,
1018 pages_per_frame);
1019 if (err) {
1020 for (ip = 0; ip < pages_per_frame;
1021 ip++) {
1022 page = pages[ip];
1023 unlock_page(page);
1024 put_page(page);
1025 }
1026 goto out;
1027 }
1028 }
1029 }
1030
1031 WARN_ON(!bytes);
1032 copied = 0;
1033 ip = off >> PAGE_SHIFT;
1034 off = offset_in_page(pos);
1035
e8b8e97f 1036 /* Copy user data to pages. */
4342306f
KK
1037 for (;;) {
1038 size_t cp, tail = PAGE_SIZE - off;
1039
1040 page = pages[ip];
1041 cp = copy_page_from_iter_atomic(page, off,
1042 min(tail, bytes), from);
1043 flush_dcache_page(page);
1044
1045 copied += cp;
1046 bytes -= cp;
1047 if (!bytes || !cp)
1048 break;
1049
1050 if (cp < tail) {
1051 off += cp;
1052 } else {
1053 ip++;
1054 off = 0;
1055 }
1056 }
1057
1058 ni_lock(ni);
1059 err = ni_write_frame(ni, pages, pages_per_frame);
1060 ni_unlock(ni);
1061
1062 for (ip = 0; ip < pages_per_frame; ip++) {
1063 page = pages[ip];
1064 ClearPageDirty(page);
1065 SetPageUptodate(page);
1066 unlock_page(page);
1067 put_page(page);
1068 }
1069
1070 if (err)
1071 goto out;
1072
1073 /*
1074 * We can loop for a long time in here. Be nice and allow
1075 * us to schedule out to avoid softlocking if preempt
1076 * is disabled.
1077 */
1078 cond_resched();
1079
1080 pos += copied;
1081 written += copied;
1082
1083 count = iov_iter_count(from);
1084 }
1085
1086out:
195c52bd 1087 kfree(pages);
4342306f
KK
1088
1089 current->backing_dev_info = NULL;
1090
1091 if (err < 0)
1092 return err;
1093
1094 iocb->ki_pos += written;
1095 if (iocb->ki_pos > ni->i_valid)
1096 ni->i_valid = iocb->ki_pos;
1097
1098 return written;
1099}
1100
1101/*
e8b8e97f 1102 * ntfs_file_write_iter - file_operations::write_iter
4342306f
KK
1103 */
1104static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1105{
1106 struct file *file = iocb->ki_filp;
1107 struct address_space *mapping = file->f_mapping;
1108 struct inode *inode = mapping->host;
1109 ssize_t ret;
1110 struct ntfs_inode *ni = ntfs_i(inode);
1111
1112 if (is_encrypted(ni)) {
1113 ntfs_inode_warn(inode, "encrypted i/o not supported");
1114 return -EOPNOTSUPP;
1115 }
1116
1117 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
1118 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
1119 return -EOPNOTSUPP;
1120 }
1121
1122 if (is_dedup(ni)) {
1123 ntfs_inode_warn(inode, "write into deduplicated not supported");
1124 return -EOPNOTSUPP;
1125 }
1126
1127 if (!inode_trylock(inode)) {
1128 if (iocb->ki_flags & IOCB_NOWAIT)
1129 return -EAGAIN;
1130 inode_lock(inode);
1131 }
1132
1133 ret = generic_write_checks(iocb, from);
1134 if (ret <= 0)
1135 goto out;
1136
1137 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
e8b8e97f 1138 /* Should never be here, see ntfs_file_open() */
4342306f
KK
1139 ret = -EOPNOTSUPP;
1140 goto out;
1141 }
1142
1143 ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
1144 if (ret)
1145 goto out;
1146
1147 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from)
1148 : __generic_file_write_iter(iocb, from);
1149
1150out:
1151 inode_unlock(inode);
1152
1153 if (ret > 0)
1154 ret = generic_write_sync(iocb, ret);
1155
1156 return ret;
1157}
1158
1159/*
e8b8e97f 1160 * ntfs_file_open - file_operations::open
4342306f
KK
1161 */
1162int ntfs_file_open(struct inode *inode, struct file *file)
1163{
1164 struct ntfs_inode *ni = ntfs_i(inode);
1165
1166 if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
1167 (file->f_flags & O_DIRECT))) {
1168 return -EOPNOTSUPP;
1169 }
1170
e8b8e97f 1171 /* Decompress "external compressed" file if opened for rw. */
4342306f
KK
1172 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
1173 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
1174#ifdef CONFIG_NTFS3_LZX_XPRESS
1175 int err = ni_decompress_file(ni);
1176
1177 if (err)
1178 return err;
1179#else
1180 ntfs_inode_warn(
1181 inode,
1182 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
1183 return -EOPNOTSUPP;
1184#endif
1185 }
1186
1187 return generic_file_open(inode, file);
1188}
1189
1190/*
e8b8e97f 1191 * ntfs_file_release - file_operations::release
4342306f
KK
1192 */
1193static int ntfs_file_release(struct inode *inode, struct file *file)
1194{
1195 struct ntfs_inode *ni = ntfs_i(inode);
1196 struct ntfs_sb_info *sbi = ni->mi.sbi;
1197 int err = 0;
1198
e8b8e97f 1199 /* If we are last writer on the inode, drop the block reservation. */
4342306f
KK
1200 if (sbi->options.prealloc && ((file->f_mode & FMODE_WRITE) &&
1201 atomic_read(&inode->i_writecount) == 1)) {
1202 ni_lock(ni);
1203 down_write(&ni->file.run_lock);
1204
1205 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
1206 inode->i_size, &ni->i_valid, false, NULL);
1207
1208 up_write(&ni->file.run_lock);
1209 ni_unlock(ni);
1210 }
1211 return err;
1212}
1213
e8b8e97f
KA
1214/*
1215 * ntfs_fiemap - file_operations::fiemap
1216 */
4342306f
KK
1217int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1218 __u64 start, __u64 len)
1219{
1220 int err;
1221 struct ntfs_inode *ni = ntfs_i(inode);
1222
1223 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
1224 return -EOPNOTSUPP;
1225
1226 ni_lock(ni);
1227
1228 err = ni_fiemap(ni, fieinfo, start, len);
1229
1230 ni_unlock(ni);
1231
1232 return err;
1233}
1234
1235// clang-format off
1236const struct inode_operations ntfs_file_inode_operations = {
1237 .getattr = ntfs_getattr,
1238 .setattr = ntfs3_setattr,
1239 .listxattr = ntfs_listxattr,
1240 .permission = ntfs_permission,
1241 .get_acl = ntfs_get_acl,
1242 .set_acl = ntfs_set_acl,
1243 .fiemap = ntfs_fiemap,
1244};
1245
1246const struct file_operations ntfs_file_operations = {
1247 .llseek = generic_file_llseek,
1248 .read_iter = ntfs_file_read_iter,
1249 .write_iter = ntfs_file_write_iter,
1250 .unlocked_ioctl = ntfs_ioctl,
1251#ifdef CONFIG_COMPAT
1252 .compat_ioctl = ntfs_compat_ioctl,
1253#endif
1254 .splice_read = generic_file_splice_read,
1255 .mmap = ntfs_file_mmap,
1256 .open = ntfs_file_open,
1257 .fsync = generic_file_fsync,
1258 .splice_write = iter_file_splice_write,
1259 .fallocate = ntfs_fallocate,
1260 .release = ntfs_file_release,
1261};
1262// clang-format on