1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * File open, close, extend, truncate
7 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 #include <linux/capability.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/uio.h>
17 #include <linux/sched.h>
18 #include <linux/splice.h>
19 #include <linux/mount.h>
20 #include <linux/writeback.h>
21 #include <linux/falloc.h>
22 #include <linux/quotaops.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
26 #include <cluster/masklog.h>
34 #include "extent_map.h"
47 #include "refcounttree.h"
48 #include "ocfs2_trace.h"
50 #include "buffer_head_io.h"
52 static int ocfs2_init_file_private(struct inode
*inode
, struct file
*file
)
54 struct ocfs2_file_private
*fp
;
56 fp
= kzalloc(sizeof(struct ocfs2_file_private
), GFP_KERNEL
);
61 mutex_init(&fp
->fp_mutex
);
62 ocfs2_file_lock_res_init(&fp
->fp_flock
, fp
);
63 file
->private_data
= fp
;
68 static void ocfs2_free_file_private(struct inode
*inode
, struct file
*file
)
70 struct ocfs2_file_private
*fp
= file
->private_data
;
71 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
74 ocfs2_simple_drop_lockres(osb
, &fp
->fp_flock
);
75 ocfs2_lock_res_free(&fp
->fp_flock
);
77 file
->private_data
= NULL
;
81 static int ocfs2_file_open(struct inode
*inode
, struct file
*file
)
84 int mode
= file
->f_flags
;
85 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
87 trace_ocfs2_file_open(inode
, file
, file
->f_path
.dentry
,
88 (unsigned long long)oi
->ip_blkno
,
89 file
->f_path
.dentry
->d_name
.len
,
90 file
->f_path
.dentry
->d_name
.name
, mode
);
92 if (file
->f_mode
& FMODE_WRITE
) {
93 status
= dquot_initialize(inode
);
98 spin_lock(&oi
->ip_lock
);
100 /* Check that the inode hasn't been wiped from disk by another
101 * node. If it hasn't then we're safe as long as we hold the
102 * spin lock until our increment of open count. */
103 if (oi
->ip_flags
& OCFS2_INODE_DELETED
) {
104 spin_unlock(&oi
->ip_lock
);
111 oi
->ip_flags
|= OCFS2_INODE_OPEN_DIRECT
;
114 spin_unlock(&oi
->ip_lock
);
116 status
= ocfs2_init_file_private(inode
, file
);
119 * We want to set open count back if we're failing the
122 spin_lock(&oi
->ip_lock
);
124 spin_unlock(&oi
->ip_lock
);
127 file
->f_mode
|= FMODE_NOWAIT
;
133 static int ocfs2_file_release(struct inode
*inode
, struct file
*file
)
135 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
137 spin_lock(&oi
->ip_lock
);
138 if (!--oi
->ip_open_count
)
139 oi
->ip_flags
&= ~OCFS2_INODE_OPEN_DIRECT
;
141 trace_ocfs2_file_release(inode
, file
, file
->f_path
.dentry
,
143 file
->f_path
.dentry
->d_name
.len
,
144 file
->f_path
.dentry
->d_name
.name
,
146 spin_unlock(&oi
->ip_lock
);
148 ocfs2_free_file_private(inode
, file
);
153 static int ocfs2_dir_open(struct inode
*inode
, struct file
*file
)
155 return ocfs2_init_file_private(inode
, file
);
158 static int ocfs2_dir_release(struct inode
*inode
, struct file
*file
)
160 ocfs2_free_file_private(inode
, file
);
164 static int ocfs2_sync_file(struct file
*file
, loff_t start
, loff_t end
,
168 struct inode
*inode
= file
->f_mapping
->host
;
169 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
170 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
171 journal_t
*journal
= osb
->journal
->j_journal
;
174 bool needs_barrier
= false;
176 trace_ocfs2_sync_file(inode
, file
, file
->f_path
.dentry
,
178 file
->f_path
.dentry
->d_name
.len
,
179 file
->f_path
.dentry
->d_name
.name
,
180 (unsigned long long)datasync
);
182 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
185 err
= file_write_and_wait_range(file
, start
, end
);
189 commit_tid
= datasync
? oi
->i_datasync_tid
: oi
->i_sync_tid
;
190 if (journal
->j_flags
& JBD2_BARRIER
&&
191 !jbd2_trans_will_send_data_barrier(journal
, commit_tid
))
192 needs_barrier
= true;
193 err
= jbd2_complete_transaction(journal
, commit_tid
);
195 ret
= blkdev_issue_flush(inode
->i_sb
->s_bdev
);
203 return (err
< 0) ? -EIO
: 0;
206 int ocfs2_should_update_atime(struct inode
*inode
,
207 struct vfsmount
*vfsmnt
)
209 struct timespec64 now
;
210 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
212 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
215 if ((inode
->i_flags
& S_NOATIME
) ||
216 ((inode
->i_sb
->s_flags
& SB_NODIRATIME
) && S_ISDIR(inode
->i_mode
)))
220 * We can be called with no vfsmnt structure - NFSD will
223 * Note that our action here is different than touch_atime() -
224 * if we can't tell whether this is a noatime mount, then we
225 * don't know whether to trust the value of s_atime_quantum.
230 if ((vfsmnt
->mnt_flags
& MNT_NOATIME
) ||
231 ((vfsmnt
->mnt_flags
& MNT_NODIRATIME
) && S_ISDIR(inode
->i_mode
)))
234 if (vfsmnt
->mnt_flags
& MNT_RELATIME
) {
235 if ((timespec64_compare(&inode
->i_atime
, &inode
->i_mtime
) <= 0) ||
236 (timespec64_compare(&inode
->i_atime
, &inode
->i_ctime
) <= 0))
242 now
= current_time(inode
);
243 if ((now
.tv_sec
- inode
->i_atime
.tv_sec
<= osb
->s_atime_quantum
))
249 int ocfs2_update_inode_atime(struct inode
*inode
,
250 struct buffer_head
*bh
)
253 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
255 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*) bh
->b_data
;
257 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
258 if (IS_ERR(handle
)) {
259 ret
= PTR_ERR(handle
);
264 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), bh
,
265 OCFS2_JOURNAL_ACCESS_WRITE
);
272 * Don't use ocfs2_mark_inode_dirty() here as we don't always
273 * have i_mutex to guard against concurrent changes to other
276 inode
->i_atime
= current_time(inode
);
277 di
->i_atime
= cpu_to_le64(inode
->i_atime
.tv_sec
);
278 di
->i_atime_nsec
= cpu_to_le32(inode
->i_atime
.tv_nsec
);
279 ocfs2_update_inode_fsync_trans(handle
, inode
, 0);
280 ocfs2_journal_dirty(handle
, bh
);
283 ocfs2_commit_trans(osb
, handle
);
288 int ocfs2_set_inode_size(handle_t
*handle
,
290 struct buffer_head
*fe_bh
,
295 i_size_write(inode
, new_i_size
);
296 inode
->i_blocks
= ocfs2_inode_sector_count(inode
);
297 inode
->i_ctime
= inode
->i_mtime
= current_time(inode
);
299 status
= ocfs2_mark_inode_dirty(handle
, inode
, fe_bh
);
309 int ocfs2_simple_size_update(struct inode
*inode
,
310 struct buffer_head
*di_bh
,
314 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
315 handle_t
*handle
= NULL
;
317 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
318 if (IS_ERR(handle
)) {
319 ret
= PTR_ERR(handle
);
324 ret
= ocfs2_set_inode_size(handle
, inode
, di_bh
,
329 ocfs2_update_inode_fsync_trans(handle
, inode
, 0);
330 ocfs2_commit_trans(osb
, handle
);
335 static int ocfs2_cow_file_pos(struct inode
*inode
,
336 struct buffer_head
*fe_bh
,
340 u32 phys
, cpos
= offset
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
341 unsigned int num_clusters
= 0;
342 unsigned int ext_flags
= 0;
345 * If the new offset is aligned to the range of the cluster, there is
346 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
349 if ((offset
& (OCFS2_SB(inode
->i_sb
)->s_clustersize
- 1)) == 0)
352 status
= ocfs2_get_clusters(inode
, cpos
, &phys
,
353 &num_clusters
, &ext_flags
);
359 if (!(ext_flags
& OCFS2_EXT_REFCOUNTED
))
362 return ocfs2_refcount_cow(inode
, fe_bh
, cpos
, 1, cpos
+1);
368 static int ocfs2_orphan_for_truncate(struct ocfs2_super
*osb
,
370 struct buffer_head
*fe_bh
,
375 struct ocfs2_dinode
*di
;
379 * We need to CoW the cluster contains the offset if it is reflinked
380 * since we will call ocfs2_zero_range_for_truncate later which will
381 * write "0" from offset to the end of the cluster.
383 status
= ocfs2_cow_file_pos(inode
, fe_bh
, new_i_size
);
389 /* TODO: This needs to actually orphan the inode in this
392 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
393 if (IS_ERR(handle
)) {
394 status
= PTR_ERR(handle
);
399 status
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), fe_bh
,
400 OCFS2_JOURNAL_ACCESS_WRITE
);
407 * Do this before setting i_size.
409 cluster_bytes
= ocfs2_align_bytes_to_clusters(inode
->i_sb
, new_i_size
);
410 status
= ocfs2_zero_range_for_truncate(inode
, handle
, new_i_size
,
417 i_size_write(inode
, new_i_size
);
418 inode
->i_ctime
= inode
->i_mtime
= current_time(inode
);
420 di
= (struct ocfs2_dinode
*) fe_bh
->b_data
;
421 di
->i_size
= cpu_to_le64(new_i_size
);
422 di
->i_ctime
= di
->i_mtime
= cpu_to_le64(inode
->i_ctime
.tv_sec
);
423 di
->i_ctime_nsec
= di
->i_mtime_nsec
= cpu_to_le32(inode
->i_ctime
.tv_nsec
);
424 ocfs2_update_inode_fsync_trans(handle
, inode
, 0);
426 ocfs2_journal_dirty(handle
, fe_bh
);
429 ocfs2_commit_trans(osb
, handle
);
434 int ocfs2_truncate_file(struct inode
*inode
,
435 struct buffer_head
*di_bh
,
439 struct ocfs2_dinode
*fe
= NULL
;
440 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
442 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
443 * already validated it */
444 fe
= (struct ocfs2_dinode
*) di_bh
->b_data
;
446 trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode
)->ip_blkno
,
447 (unsigned long long)le64_to_cpu(fe
->i_size
),
448 (unsigned long long)new_i_size
);
450 mlog_bug_on_msg(le64_to_cpu(fe
->i_size
) != i_size_read(inode
),
451 "Inode %llu, inode i_size = %lld != di "
452 "i_size = %llu, i_flags = 0x%x\n",
453 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
455 (unsigned long long)le64_to_cpu(fe
->i_size
),
456 le32_to_cpu(fe
->i_flags
));
458 if (new_i_size
> le64_to_cpu(fe
->i_size
)) {
459 trace_ocfs2_truncate_file_error(
460 (unsigned long long)le64_to_cpu(fe
->i_size
),
461 (unsigned long long)new_i_size
);
467 down_write(&OCFS2_I(inode
)->ip_alloc_sem
);
469 ocfs2_resv_discard(&osb
->osb_la_resmap
,
470 &OCFS2_I(inode
)->ip_la_data_resv
);
473 * The inode lock forced other nodes to sync and drop their
474 * pages, which (correctly) happens even if we have a truncate
475 * without allocation change - ocfs2 cluster sizes can be much
476 * greater than page size, so we have to truncate them
479 unmap_mapping_range(inode
->i_mapping
, new_i_size
+ PAGE_SIZE
- 1, 0, 1);
480 truncate_inode_pages(inode
->i_mapping
, new_i_size
);
482 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
483 status
= ocfs2_truncate_inline(inode
, di_bh
, new_i_size
,
484 i_size_read(inode
), 1);
488 goto bail_unlock_sem
;
491 /* alright, we're going to need to do a full blown alloc size
492 * change. Orphan the inode so that recovery can complete the
493 * truncate if necessary. This does the task of marking
495 status
= ocfs2_orphan_for_truncate(osb
, inode
, di_bh
, new_i_size
);
498 goto bail_unlock_sem
;
501 status
= ocfs2_commit_truncate(osb
, inode
, di_bh
);
504 goto bail_unlock_sem
;
507 /* TODO: orphan dir cleanup here. */
509 up_write(&OCFS2_I(inode
)->ip_alloc_sem
);
512 if (!status
&& OCFS2_I(inode
)->ip_clusters
== 0)
513 status
= ocfs2_try_remove_refcount_tree(inode
, di_bh
);
519 * extend file allocation only here.
520 * we'll update all the disk stuff, and oip->alloc_size
522 * expect stuff to be locked, a transaction started and enough data /
523 * metadata reservations in the contexts.
525 * Will return -EAGAIN, and a reason if a restart is needed.
526 * If passed in, *reason will always be set, even in error.
528 int ocfs2_add_inode_data(struct ocfs2_super
*osb
,
533 struct buffer_head
*fe_bh
,
535 struct ocfs2_alloc_context
*data_ac
,
536 struct ocfs2_alloc_context
*meta_ac
,
537 enum ocfs2_alloc_restarted
*reason_ret
)
540 struct ocfs2_extent_tree et
;
542 ocfs2_init_dinode_extent_tree(&et
, INODE_CACHE(inode
), fe_bh
);
543 ret
= ocfs2_add_clusters_in_btree(handle
, &et
, logical_offset
,
544 clusters_to_add
, mark_unwritten
,
545 data_ac
, meta_ac
, reason_ret
);
550 static int ocfs2_extend_allocation(struct inode
*inode
, u32 logical_start
,
551 u32 clusters_to_add
, int mark_unwritten
)
554 int restart_func
= 0;
557 struct buffer_head
*bh
= NULL
;
558 struct ocfs2_dinode
*fe
= NULL
;
559 handle_t
*handle
= NULL
;
560 struct ocfs2_alloc_context
*data_ac
= NULL
;
561 struct ocfs2_alloc_context
*meta_ac
= NULL
;
562 enum ocfs2_alloc_restarted why
= RESTART_NONE
;
563 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
564 struct ocfs2_extent_tree et
;
568 * Unwritten extent only exists for file systems which
571 BUG_ON(mark_unwritten
&& !ocfs2_sparse_alloc(osb
));
573 status
= ocfs2_read_inode_block(inode
, &bh
);
578 fe
= (struct ocfs2_dinode
*) bh
->b_data
;
581 BUG_ON(le32_to_cpu(fe
->i_clusters
) != OCFS2_I(inode
)->ip_clusters
);
583 ocfs2_init_dinode_extent_tree(&et
, INODE_CACHE(inode
), bh
);
584 status
= ocfs2_lock_allocators(inode
, &et
, clusters_to_add
, 0,
591 credits
= ocfs2_calc_extend_credits(osb
->sb
, &fe
->id2
.i_list
);
592 handle
= ocfs2_start_trans(osb
, credits
);
593 if (IS_ERR(handle
)) {
594 status
= PTR_ERR(handle
);
600 restarted_transaction
:
601 trace_ocfs2_extend_allocation(
602 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
603 (unsigned long long)i_size_read(inode
),
604 le32_to_cpu(fe
->i_clusters
), clusters_to_add
,
607 status
= dquot_alloc_space_nodirty(inode
,
608 ocfs2_clusters_to_bytes(osb
->sb
, clusters_to_add
));
613 /* reserve a write to the file entry early on - that we if we
614 * run out of credits in the allocation path, we can still
616 status
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), bh
,
617 OCFS2_JOURNAL_ACCESS_WRITE
);
623 prev_clusters
= OCFS2_I(inode
)->ip_clusters
;
625 status
= ocfs2_add_inode_data(osb
,
635 if ((status
< 0) && (status
!= -EAGAIN
)) {
636 if (status
!= -ENOSPC
)
640 ocfs2_update_inode_fsync_trans(handle
, inode
, 1);
641 ocfs2_journal_dirty(handle
, bh
);
643 spin_lock(&OCFS2_I(inode
)->ip_lock
);
644 clusters_to_add
-= (OCFS2_I(inode
)->ip_clusters
- prev_clusters
);
645 spin_unlock(&OCFS2_I(inode
)->ip_lock
);
646 /* Release unused quota reservation */
647 dquot_free_space(inode
,
648 ocfs2_clusters_to_bytes(osb
->sb
, clusters_to_add
));
651 if (why
!= RESTART_NONE
&& clusters_to_add
) {
652 if (why
== RESTART_META
) {
656 BUG_ON(why
!= RESTART_TRANS
);
658 status
= ocfs2_allocate_extend_trans(handle
, 1);
660 /* handle still has to be committed at
666 goto restarted_transaction
;
670 trace_ocfs2_extend_allocation_end(OCFS2_I(inode
)->ip_blkno
,
671 le32_to_cpu(fe
->i_clusters
),
672 (unsigned long long)le64_to_cpu(fe
->i_size
),
673 OCFS2_I(inode
)->ip_clusters
,
674 (unsigned long long)i_size_read(inode
));
677 if (status
< 0 && did_quota
)
678 dquot_free_space(inode
,
679 ocfs2_clusters_to_bytes(osb
->sb
, clusters_to_add
));
681 ocfs2_commit_trans(osb
, handle
);
685 ocfs2_free_alloc_context(data_ac
);
689 ocfs2_free_alloc_context(meta_ac
);
692 if ((!status
) && restart_func
) {
703 * While a write will already be ordering the data, a truncate will not.
704 * Thus, we need to explicitly order the zeroed pages.
706 static handle_t
*ocfs2_zero_start_ordered_transaction(struct inode
*inode
,
707 struct buffer_head
*di_bh
,
711 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
712 handle_t
*handle
= NULL
;
715 if (!ocfs2_should_order_data(inode
))
718 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
719 if (IS_ERR(handle
)) {
725 ret
= ocfs2_jbd2_inode_add_write(handle
, inode
, start_byte
, length
);
731 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), di_bh
,
732 OCFS2_JOURNAL_ACCESS_WRITE
);
735 ocfs2_update_inode_fsync_trans(handle
, inode
, 1);
740 ocfs2_commit_trans(osb
, handle
);
741 handle
= ERR_PTR(ret
);
746 /* Some parts of this taken from generic_cont_expand, which turned out
747 * to be too fragile to do exactly what we need without us having to
748 * worry about recursive locking in ->write_begin() and ->write_end(). */
749 static int ocfs2_write_zero_page(struct inode
*inode
, u64 abs_from
,
750 u64 abs_to
, struct buffer_head
*di_bh
)
752 struct address_space
*mapping
= inode
->i_mapping
;
754 unsigned long index
= abs_from
>> PAGE_SHIFT
;
757 unsigned zero_from
, zero_to
, block_start
, block_end
;
758 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)di_bh
->b_data
;
760 BUG_ON(abs_from
>= abs_to
);
761 BUG_ON(abs_to
> (((u64
)index
+ 1) << PAGE_SHIFT
));
762 BUG_ON(abs_from
& (inode
->i_blkbits
- 1));
764 handle
= ocfs2_zero_start_ordered_transaction(inode
, di_bh
,
767 if (IS_ERR(handle
)) {
768 ret
= PTR_ERR(handle
);
772 page
= find_or_create_page(mapping
, index
, GFP_NOFS
);
776 goto out_commit_trans
;
779 /* Get the offsets within the page that we want to zero */
780 zero_from
= abs_from
& (PAGE_SIZE
- 1);
781 zero_to
= abs_to
& (PAGE_SIZE
- 1);
785 trace_ocfs2_write_zero_page(
786 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
787 (unsigned long long)abs_from
,
788 (unsigned long long)abs_to
,
789 index
, zero_from
, zero_to
);
791 /* We know that zero_from is block aligned */
792 for (block_start
= zero_from
; block_start
< zero_to
;
793 block_start
= block_end
) {
794 block_end
= block_start
+ i_blocksize(inode
);
797 * block_start is block-aligned. Bump it by one to force
798 * __block_write_begin and block_commit_write to zero the
801 ret
= __block_write_begin(page
, block_start
+ 1, 0,
809 /* must not update i_size! */
810 ret
= block_commit_write(page
, block_start
+ 1,
819 * fs-writeback will release the dirty pages without page lock
820 * whose offset are over inode size, the release happens at
821 * block_write_full_page().
823 i_size_write(inode
, abs_to
);
824 inode
->i_blocks
= ocfs2_inode_sector_count(inode
);
825 di
->i_size
= cpu_to_le64((u64
)i_size_read(inode
));
826 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
827 di
->i_mtime
= di
->i_ctime
= cpu_to_le64(inode
->i_mtime
.tv_sec
);
828 di
->i_ctime_nsec
= cpu_to_le32(inode
->i_mtime
.tv_nsec
);
829 di
->i_mtime_nsec
= di
->i_ctime_nsec
;
831 ocfs2_journal_dirty(handle
, di_bh
);
832 ocfs2_update_inode_fsync_trans(handle
, inode
, 1);
840 ocfs2_commit_trans(OCFS2_SB(inode
->i_sb
), handle
);
846 * Find the next range to zero. We do this in terms of bytes because
847 * that's what ocfs2_zero_extend() wants, and it is dealing with the
848 * pagecache. We may return multiple extents.
850 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
851 * needs to be zeroed. range_start and range_end return the next zeroing
852 * range. A subsequent call should pass the previous range_end as its
853 * zero_start. If range_end is 0, there's nothing to do.
855 * Unwritten extents are skipped over. Refcounted extents are CoWd.
857 static int ocfs2_zero_extend_get_range(struct inode
*inode
,
858 struct buffer_head
*di_bh
,
859 u64 zero_start
, u64 zero_end
,
860 u64
*range_start
, u64
*range_end
)
862 int rc
= 0, needs_cow
= 0;
863 u32 p_cpos
, zero_clusters
= 0;
865 zero_start
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
866 u32 last_cpos
= ocfs2_clusters_for_bytes(inode
->i_sb
, zero_end
);
867 unsigned int num_clusters
= 0;
868 unsigned int ext_flags
= 0;
870 while (zero_cpos
< last_cpos
) {
871 rc
= ocfs2_get_clusters(inode
, zero_cpos
, &p_cpos
,
872 &num_clusters
, &ext_flags
);
878 if (p_cpos
&& !(ext_flags
& OCFS2_EXT_UNWRITTEN
)) {
879 zero_clusters
= num_clusters
;
880 if (ext_flags
& OCFS2_EXT_REFCOUNTED
)
885 zero_cpos
+= num_clusters
;
887 if (!zero_clusters
) {
892 while ((zero_cpos
+ zero_clusters
) < last_cpos
) {
893 rc
= ocfs2_get_clusters(inode
, zero_cpos
+ zero_clusters
,
894 &p_cpos
, &num_clusters
,
901 if (!p_cpos
|| (ext_flags
& OCFS2_EXT_UNWRITTEN
))
903 if (ext_flags
& OCFS2_EXT_REFCOUNTED
)
905 zero_clusters
+= num_clusters
;
907 if ((zero_cpos
+ zero_clusters
) > last_cpos
)
908 zero_clusters
= last_cpos
- zero_cpos
;
911 rc
= ocfs2_refcount_cow(inode
, di_bh
, zero_cpos
,
912 zero_clusters
, UINT_MAX
);
919 *range_start
= ocfs2_clusters_to_bytes(inode
->i_sb
, zero_cpos
);
920 *range_end
= ocfs2_clusters_to_bytes(inode
->i_sb
,
921 zero_cpos
+ zero_clusters
);
928 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
929 * has made sure that the entire range needs zeroing.
931 static int ocfs2_zero_extend_range(struct inode
*inode
, u64 range_start
,
932 u64 range_end
, struct buffer_head
*di_bh
)
936 u64 zero_pos
= range_start
;
938 trace_ocfs2_zero_extend_range(
939 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
940 (unsigned long long)range_start
,
941 (unsigned long long)range_end
);
942 BUG_ON(range_start
>= range_end
);
944 while (zero_pos
< range_end
) {
945 next_pos
= (zero_pos
& PAGE_MASK
) + PAGE_SIZE
;
946 if (next_pos
> range_end
)
947 next_pos
= range_end
;
948 rc
= ocfs2_write_zero_page(inode
, zero_pos
, next_pos
, di_bh
);
956 * Very large extends have the potential to lock up
957 * the cpu for extended periods of time.
965 int ocfs2_zero_extend(struct inode
*inode
, struct buffer_head
*di_bh
,
969 u64 zero_start
, range_start
= 0, range_end
= 0;
970 struct super_block
*sb
= inode
->i_sb
;
972 zero_start
= ocfs2_align_bytes_to_blocks(sb
, i_size_read(inode
));
973 trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode
)->ip_blkno
,
974 (unsigned long long)zero_start
,
975 (unsigned long long)i_size_read(inode
));
976 while (zero_start
< zero_to_size
) {
977 ret
= ocfs2_zero_extend_get_range(inode
, di_bh
, zero_start
,
988 if (range_start
< zero_start
)
989 range_start
= zero_start
;
990 if (range_end
> zero_to_size
)
991 range_end
= zero_to_size
;
993 ret
= ocfs2_zero_extend_range(inode
, range_start
,
999 zero_start
= range_end
;
1005 int ocfs2_extend_no_holes(struct inode
*inode
, struct buffer_head
*di_bh
,
1006 u64 new_i_size
, u64 zero_to
)
1009 u32 clusters_to_add
;
1010 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
1013 * Only quota files call this without a bh, and they can't be
1016 BUG_ON(!di_bh
&& ocfs2_is_refcount_inode(inode
));
1017 BUG_ON(!di_bh
&& !(oi
->ip_flags
& OCFS2_INODE_SYSTEM_FILE
));
1019 clusters_to_add
= ocfs2_clusters_for_bytes(inode
->i_sb
, new_i_size
);
1020 if (clusters_to_add
< oi
->ip_clusters
)
1021 clusters_to_add
= 0;
1023 clusters_to_add
-= oi
->ip_clusters
;
1025 if (clusters_to_add
) {
1026 ret
= ocfs2_extend_allocation(inode
, oi
->ip_clusters
,
1027 clusters_to_add
, 0);
1035 * Call this even if we don't add any clusters to the tree. We
1036 * still need to zero the area between the old i_size and the
1039 ret
= ocfs2_zero_extend(inode
, di_bh
, zero_to
);
1047 static int ocfs2_extend_file(struct inode
*inode
,
1048 struct buffer_head
*di_bh
,
1052 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
1056 /* setattr sometimes calls us like this. */
1057 if (new_i_size
== 0)
1060 if (i_size_read(inode
) == new_i_size
)
1062 BUG_ON(new_i_size
< i_size_read(inode
));
1065 * The alloc sem blocks people in read/write from reading our
1066 * allocation until we're done changing it. We depend on
1067 * i_mutex to block other extend/truncate calls while we're
1068 * here. We even have to hold it for sparse files because there
1069 * might be some tail zeroing.
1071 down_write(&oi
->ip_alloc_sem
);
1073 if (oi
->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
1075 * We can optimize small extends by keeping the inodes
1078 if (ocfs2_size_fits_inline_data(di_bh
, new_i_size
)) {
1079 up_write(&oi
->ip_alloc_sem
);
1080 goto out_update_size
;
1083 ret
= ocfs2_convert_inline_data_to_extents(inode
, di_bh
);
1085 up_write(&oi
->ip_alloc_sem
);
1091 if (ocfs2_sparse_alloc(OCFS2_SB(inode
->i_sb
)))
1092 ret
= ocfs2_zero_extend(inode
, di_bh
, new_i_size
);
1094 ret
= ocfs2_extend_no_holes(inode
, di_bh
, new_i_size
,
1097 up_write(&oi
->ip_alloc_sem
);
1105 ret
= ocfs2_simple_size_update(inode
, di_bh
, new_i_size
);
1113 int ocfs2_setattr(struct user_namespace
*mnt_userns
, struct dentry
*dentry
,
1116 int status
= 0, size_change
;
1117 int inode_locked
= 0;
1118 struct inode
*inode
= d_inode(dentry
);
1119 struct super_block
*sb
= inode
->i_sb
;
1120 struct ocfs2_super
*osb
= OCFS2_SB(sb
);
1121 struct buffer_head
*bh
= NULL
;
1122 handle_t
*handle
= NULL
;
1123 struct dquot
*transfer_to
[MAXQUOTAS
] = { };
1126 struct ocfs2_lock_holder oh
;
1128 trace_ocfs2_setattr(inode
, dentry
,
1129 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1130 dentry
->d_name
.len
, dentry
->d_name
.name
,
1131 attr
->ia_valid
, attr
->ia_mode
,
1132 from_kuid(&init_user_ns
, attr
->ia_uid
),
1133 from_kgid(&init_user_ns
, attr
->ia_gid
));
1135 /* ensuring we don't even attempt to truncate a symlink */
1136 if (S_ISLNK(inode
->i_mode
))
1137 attr
->ia_valid
&= ~ATTR_SIZE
;
1139 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1140 | ATTR_GID | ATTR_UID | ATTR_MODE)
1141 if (!(attr
->ia_valid
& OCFS2_VALID_ATTRS
))
1144 status
= setattr_prepare(&init_user_ns
, dentry
, attr
);
1148 if (is_quota_modification(inode
, attr
)) {
1149 status
= dquot_initialize(inode
);
1153 size_change
= S_ISREG(inode
->i_mode
) && attr
->ia_valid
& ATTR_SIZE
;
1156 * Here we should wait dio to finish before inode lock
1157 * to avoid a deadlock between ocfs2_setattr() and
1158 * ocfs2_dio_end_io_write()
1160 inode_dio_wait(inode
);
1162 status
= ocfs2_rw_lock(inode
, 1);
1169 had_lock
= ocfs2_inode_lock_tracker(inode
, &bh
, 1, &oh
);
1172 goto bail_unlock_rw
;
1173 } else if (had_lock
) {
1175 * As far as we know, ocfs2_setattr() could only be the first
1176 * VFS entry point in the call chain of recursive cluster
1184 * ocfs2_iop_get_acl()
1186 * But, we're not 100% sure if it's always true, because the
1187 * ordering of the VFS entry points in the call chain is out
1188 * of our control. So, we'd better dump the stack here to
1189 * catch the other cases of recursive locking.
1191 mlog(ML_ERROR
, "Another case of recursive locking:\n");
1197 status
= inode_newsize_ok(inode
, attr
->ia_size
);
1201 if (i_size_read(inode
) >= attr
->ia_size
) {
1202 if (ocfs2_should_order_data(inode
)) {
1203 status
= ocfs2_begin_ordered_truncate(inode
,
1208 status
= ocfs2_truncate_file(inode
, bh
, attr
->ia_size
);
1210 status
= ocfs2_extend_file(inode
, bh
, attr
->ia_size
);
1212 if (status
!= -ENOSPC
)
1219 if ((attr
->ia_valid
& ATTR_UID
&& !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
1220 (attr
->ia_valid
& ATTR_GID
&& !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
1222 * Gather pointers to quota structures so that allocation /
1223 * freeing of quota structures happens here and not inside
1224 * dquot_transfer() where we have problems with lock ordering
1226 if (attr
->ia_valid
& ATTR_UID
&& !uid_eq(attr
->ia_uid
, inode
->i_uid
)
1227 && OCFS2_HAS_RO_COMPAT_FEATURE(sb
,
1228 OCFS2_FEATURE_RO_COMPAT_USRQUOTA
)) {
1229 transfer_to
[USRQUOTA
] = dqget(sb
, make_kqid_uid(attr
->ia_uid
));
1230 if (IS_ERR(transfer_to
[USRQUOTA
])) {
1231 status
= PTR_ERR(transfer_to
[USRQUOTA
]);
1232 transfer_to
[USRQUOTA
] = NULL
;
1236 if (attr
->ia_valid
& ATTR_GID
&& !gid_eq(attr
->ia_gid
, inode
->i_gid
)
1237 && OCFS2_HAS_RO_COMPAT_FEATURE(sb
,
1238 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA
)) {
1239 transfer_to
[GRPQUOTA
] = dqget(sb
, make_kqid_gid(attr
->ia_gid
));
1240 if (IS_ERR(transfer_to
[GRPQUOTA
])) {
1241 status
= PTR_ERR(transfer_to
[GRPQUOTA
]);
1242 transfer_to
[GRPQUOTA
] = NULL
;
1246 down_write(&OCFS2_I(inode
)->ip_alloc_sem
);
1247 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
+
1248 2 * ocfs2_quota_trans_credits(sb
));
1249 if (IS_ERR(handle
)) {
1250 status
= PTR_ERR(handle
);
1252 goto bail_unlock_alloc
;
1254 status
= __dquot_transfer(inode
, transfer_to
);
1258 down_write(&OCFS2_I(inode
)->ip_alloc_sem
);
1259 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1260 if (IS_ERR(handle
)) {
1261 status
= PTR_ERR(handle
);
1263 goto bail_unlock_alloc
;
1267 setattr_copy(&init_user_ns
, inode
, attr
);
1268 mark_inode_dirty(inode
);
1270 status
= ocfs2_mark_inode_dirty(handle
, inode
, bh
);
1275 ocfs2_commit_trans(osb
, handle
);
1277 up_write(&OCFS2_I(inode
)->ip_alloc_sem
);
1279 if (status
&& inode_locked
) {
1280 ocfs2_inode_unlock_tracker(inode
, 1, &oh
, had_lock
);
1285 ocfs2_rw_unlock(inode
, 1);
1288 /* Release quota pointers in case we acquired them */
1289 for (qtype
= 0; qtype
< OCFS2_MAXQUOTAS
; qtype
++)
1290 dqput(transfer_to
[qtype
]);
1292 if (!status
&& attr
->ia_valid
& ATTR_MODE
) {
1293 status
= ocfs2_acl_chmod(inode
, bh
);
1298 ocfs2_inode_unlock_tracker(inode
, 1, &oh
, had_lock
);
1304 int ocfs2_getattr(struct user_namespace
*mnt_userns
, const struct path
*path
,
1305 struct kstat
*stat
, u32 request_mask
, unsigned int flags
)
1307 struct inode
*inode
= d_inode(path
->dentry
);
1308 struct super_block
*sb
= path
->dentry
->d_sb
;
1309 struct ocfs2_super
*osb
= sb
->s_fs_info
;
1312 err
= ocfs2_inode_revalidate(path
->dentry
);
1319 generic_fillattr(&init_user_ns
, inode
, stat
);
1321 * If there is inline data in the inode, the inode will normally not
1322 * have data blocks allocated (it may have an external xattr block).
1323 * Report at least one sector for such files, so tools like tar, rsync,
1324 * others don't incorrectly think the file is completely sparse.
1326 if (unlikely(OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
))
1327 stat
->blocks
+= (stat
->size
+ 511)>>9;
1329 /* We set the blksize from the cluster size for performance */
1330 stat
->blksize
= osb
->s_clustersize
;
1336 int ocfs2_permission(struct user_namespace
*mnt_userns
, struct inode
*inode
,
1340 struct ocfs2_lock_holder oh
;
1342 if (mask
& MAY_NOT_BLOCK
)
1345 had_lock
= ocfs2_inode_lock_tracker(inode
, NULL
, 0, &oh
);
1349 } else if (had_lock
) {
1350 /* See comments in ocfs2_setattr() for details.
1351 * The call chain of this case could be:
1354 * inode_permission()
1355 * ocfs2_permission()
1356 * ocfs2_iop_get_acl()
1358 mlog(ML_ERROR
, "Another case of recursive locking:\n");
1362 ret
= generic_permission(&init_user_ns
, inode
, mask
);
1364 ocfs2_inode_unlock_tracker(inode
, 0, &oh
, had_lock
);
1369 static int __ocfs2_write_remove_suid(struct inode
*inode
,
1370 struct buffer_head
*bh
)
1374 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1375 struct ocfs2_dinode
*di
;
1377 trace_ocfs2_write_remove_suid(
1378 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1381 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1382 if (IS_ERR(handle
)) {
1383 ret
= PTR_ERR(handle
);
1388 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(inode
), bh
,
1389 OCFS2_JOURNAL_ACCESS_WRITE
);
1395 inode
->i_mode
&= ~S_ISUID
;
1396 if ((inode
->i_mode
& S_ISGID
) && (inode
->i_mode
& S_IXGRP
))
1397 inode
->i_mode
&= ~S_ISGID
;
1399 di
= (struct ocfs2_dinode
*) bh
->b_data
;
1400 di
->i_mode
= cpu_to_le16(inode
->i_mode
);
1401 ocfs2_update_inode_fsync_trans(handle
, inode
, 0);
1403 ocfs2_journal_dirty(handle
, bh
);
1406 ocfs2_commit_trans(osb
, handle
);
1411 static int ocfs2_write_remove_suid(struct inode
*inode
)
1414 struct buffer_head
*bh
= NULL
;
1416 ret
= ocfs2_read_inode_block(inode
, &bh
);
1422 ret
= __ocfs2_write_remove_suid(inode
, bh
);
1429 * Allocate enough extents to cover the region starting at byte offset
1430 * start for len bytes. Existing extents are skipped, any extents
1431 * added are marked as "unwritten".
1433 static int ocfs2_allocate_unwritten_extents(struct inode
*inode
,
1437 u32 cpos
, phys_cpos
, clusters
, alloc_size
;
1438 u64 end
= start
+ len
;
1439 struct buffer_head
*di_bh
= NULL
;
1441 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
1442 ret
= ocfs2_read_inode_block(inode
, &di_bh
);
1449 * Nothing to do if the requested reservation range
1450 * fits within the inode.
1452 if (ocfs2_size_fits_inline_data(di_bh
, end
))
1455 ret
= ocfs2_convert_inline_data_to_extents(inode
, di_bh
);
1463 * We consider both start and len to be inclusive.
1465 cpos
= start
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
1466 clusters
= ocfs2_clusters_for_bytes(inode
->i_sb
, start
+ len
);
1470 ret
= ocfs2_get_clusters(inode
, cpos
, &phys_cpos
,
1478 * Hole or existing extent len can be arbitrary, so
1479 * cap it to our own allocation request.
1481 if (alloc_size
> clusters
)
1482 alloc_size
= clusters
;
1486 * We already have an allocation at this
1487 * region so we can safely skip it.
1492 ret
= ocfs2_extend_allocation(inode
, cpos
, alloc_size
, 1);
1501 clusters
-= alloc_size
;
1512 * Truncate a byte range, avoiding pages within partial clusters. This
1513 * preserves those pages for the zeroing code to write to.
1515 static void ocfs2_truncate_cluster_pages(struct inode
*inode
, u64 byte_start
,
1518 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1520 struct address_space
*mapping
= inode
->i_mapping
;
1522 start
= (loff_t
)ocfs2_align_bytes_to_clusters(inode
->i_sb
, byte_start
);
1523 end
= byte_start
+ byte_len
;
1524 end
= end
& ~(osb
->s_clustersize
- 1);
1527 unmap_mapping_range(mapping
, start
, end
- start
, 0);
1528 truncate_inode_pages_range(mapping
, start
, end
- 1);
1533 * zero out partial blocks of one cluster.
1535 * start: file offset where zero starts, will be made upper block aligned.
1536 * len: it will be trimmed to the end of current cluster if "start + len"
1537 * is bigger than it.
1539 static int ocfs2_zeroout_partial_cluster(struct inode
*inode
,
1543 u64 start_block
, end_block
, nr_blocks
;
1544 u64 p_block
, offset
;
1545 u32 cluster
, p_cluster
, nr_clusters
;
1546 struct super_block
*sb
= inode
->i_sb
;
1547 u64 end
= ocfs2_align_bytes_to_clusters(sb
, start
);
1549 if (start
+ len
< end
)
1552 start_block
= ocfs2_blocks_for_bytes(sb
, start
);
1553 end_block
= ocfs2_blocks_for_bytes(sb
, end
);
1554 nr_blocks
= end_block
- start_block
;
1558 cluster
= ocfs2_bytes_to_clusters(sb
, start
);
1559 ret
= ocfs2_get_clusters(inode
, cluster
, &p_cluster
,
1560 &nr_clusters
, NULL
);
1566 offset
= start_block
- ocfs2_clusters_to_blocks(sb
, cluster
);
1567 p_block
= ocfs2_clusters_to_blocks(sb
, p_cluster
) + offset
;
1568 return sb_issue_zeroout(sb
, p_block
, nr_blocks
, GFP_NOFS
);
1571 static int ocfs2_zero_partial_clusters(struct inode
*inode
,
1576 u64 end
= start
+ len
;
1577 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1578 unsigned int csize
= osb
->s_clustersize
;
1580 loff_t isize
= i_size_read(inode
);
1583 * The "start" and "end" values are NOT necessarily part of
1584 * the range whose allocation is being deleted. Rather, this
1585 * is what the user passed in with the request. We must zero
1586 * partial clusters here. There's no need to worry about
1587 * physical allocation - the zeroing code knows to skip holes.
1589 trace_ocfs2_zero_partial_clusters(
1590 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1591 (unsigned long long)start
, (unsigned long long)end
);
1594 * If both edges are on a cluster boundary then there's no
1595 * zeroing required as the region is part of the allocation to
1598 if ((start
& (csize
- 1)) == 0 && (end
& (csize
- 1)) == 0)
1601 /* No page cache for EOF blocks, issue zero out to disk. */
1604 * zeroout eof blocks in last cluster starting from
1605 * "isize" even "start" > "isize" because it is
1606 * complicated to zeroout just at "start" as "start"
1607 * may be not aligned with block size, buffer write
1608 * would be required to do that, but out of eof buffer
1609 * write is not supported.
1611 ret
= ocfs2_zeroout_partial_cluster(inode
, isize
,
1621 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
1622 if (IS_ERR(handle
)) {
1623 ret
= PTR_ERR(handle
);
1629 * If start is on a cluster boundary and end is somewhere in another
1630 * cluster, we have not COWed the cluster starting at start, unless
1631 * end is also within the same cluster. So, in this case, we skip this
1632 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1635 if ((start
& (csize
- 1)) != 0) {
1637 * We want to get the byte offset of the end of the 1st
1640 tmpend
= (u64
)osb
->s_clustersize
+
1641 (start
& ~(osb
->s_clustersize
- 1));
1645 trace_ocfs2_zero_partial_clusters_range1(
1646 (unsigned long long)start
,
1647 (unsigned long long)tmpend
);
1649 ret
= ocfs2_zero_range_for_truncate(inode
, handle
, start
,
1657 * This may make start and end equal, but the zeroing
1658 * code will skip any work in that case so there's no
1659 * need to catch it up here.
1661 start
= end
& ~(osb
->s_clustersize
- 1);
1663 trace_ocfs2_zero_partial_clusters_range2(
1664 (unsigned long long)start
, (unsigned long long)end
);
1666 ret
= ocfs2_zero_range_for_truncate(inode
, handle
, start
, end
);
1670 ocfs2_update_inode_fsync_trans(handle
, inode
, 1);
1672 ocfs2_commit_trans(osb
, handle
);
1677 static int ocfs2_find_rec(struct ocfs2_extent_list
*el
, u32 pos
)
1680 struct ocfs2_extent_rec
*rec
= NULL
;
1682 for (i
= le16_to_cpu(el
->l_next_free_rec
) - 1; i
>= 0; i
--) {
1684 rec
= &el
->l_recs
[i
];
1686 if (le32_to_cpu(rec
->e_cpos
) < pos
)
1694 * Helper to calculate the punching pos and length in one run, we handle the
1695 * following three cases in order:
1697 * - remove the entire record
1698 * - remove a partial record
1699 * - no record needs to be removed (hole-punching completed)
1701 static void ocfs2_calc_trunc_pos(struct inode
*inode
,
1702 struct ocfs2_extent_list
*el
,
1703 struct ocfs2_extent_rec
*rec
,
1704 u32 trunc_start
, u32
*trunc_cpos
,
1705 u32
*trunc_len
, u32
*trunc_end
,
1706 u64
*blkno
, int *done
)
1711 range
= le32_to_cpu(rec
->e_cpos
) + ocfs2_rec_clusters(el
, rec
);
1713 if (le32_to_cpu(rec
->e_cpos
) >= trunc_start
) {
1715 * remove an entire extent record.
1717 *trunc_cpos
= le32_to_cpu(rec
->e_cpos
);
1719 * Skip holes if any.
1721 if (range
< *trunc_end
)
1723 *trunc_len
= *trunc_end
- le32_to_cpu(rec
->e_cpos
);
1724 *blkno
= le64_to_cpu(rec
->e_blkno
);
1725 *trunc_end
= le32_to_cpu(rec
->e_cpos
);
1726 } else if (range
> trunc_start
) {
1728 * remove a partial extent record, which means we're
1729 * removing the last extent record.
1731 *trunc_cpos
= trunc_start
;
1735 if (range
< *trunc_end
)
1737 *trunc_len
= *trunc_end
- trunc_start
;
1738 coff
= trunc_start
- le32_to_cpu(rec
->e_cpos
);
1739 *blkno
= le64_to_cpu(rec
->e_blkno
) +
1740 ocfs2_clusters_to_blocks(inode
->i_sb
, coff
);
1741 *trunc_end
= trunc_start
;
1744 * It may have two following possibilities:
1746 * - last record has been removed
1747 * - trunc_start was within a hole
1749 * both two cases mean the completion of hole punching.
1757 int ocfs2_remove_inode_range(struct inode
*inode
,
1758 struct buffer_head
*di_bh
, u64 byte_start
,
1761 int ret
= 0, flags
= 0, done
= 0, i
;
1762 u32 trunc_start
, trunc_len
, trunc_end
, trunc_cpos
, phys_cpos
;
1764 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1765 struct ocfs2_cached_dealloc_ctxt dealloc
;
1766 struct address_space
*mapping
= inode
->i_mapping
;
1767 struct ocfs2_extent_tree et
;
1768 struct ocfs2_path
*path
= NULL
;
1769 struct ocfs2_extent_list
*el
= NULL
;
1770 struct ocfs2_extent_rec
*rec
= NULL
;
1771 struct ocfs2_dinode
*di
= (struct ocfs2_dinode
*)di_bh
->b_data
;
1772 u64 blkno
, refcount_loc
= le64_to_cpu(di
->i_refcount_loc
);
1774 ocfs2_init_dinode_extent_tree(&et
, INODE_CACHE(inode
), di_bh
);
1775 ocfs2_init_dealloc_ctxt(&dealloc
);
1777 trace_ocfs2_remove_inode_range(
1778 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1779 (unsigned long long)byte_start
,
1780 (unsigned long long)byte_len
);
1785 if (OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
) {
1786 ret
= ocfs2_truncate_inline(inode
, di_bh
, byte_start
,
1787 byte_start
+ byte_len
, 0);
1793 * There's no need to get fancy with the page cache
1794 * truncate of an inline-data inode. We're talking
1795 * about less than a page here, which will be cached
1796 * in the dinode buffer anyway.
1798 unmap_mapping_range(mapping
, 0, 0, 0);
1799 truncate_inode_pages(mapping
, 0);
1804 * For reflinks, we may need to CoW 2 clusters which might be
1805 * partially zero'd later, if hole's start and end offset were
1806 * within one cluster(means is not exactly aligned to clustersize).
1809 if (ocfs2_is_refcount_inode(inode
)) {
1810 ret
= ocfs2_cow_file_pos(inode
, di_bh
, byte_start
);
1816 ret
= ocfs2_cow_file_pos(inode
, di_bh
, byte_start
+ byte_len
);
1823 trunc_start
= ocfs2_clusters_for_bytes(osb
->sb
, byte_start
);
1824 trunc_end
= (byte_start
+ byte_len
) >> osb
->s_clustersize_bits
;
1825 cluster_in_el
= trunc_end
;
1827 ret
= ocfs2_zero_partial_clusters(inode
, byte_start
, byte_len
);
1833 path
= ocfs2_new_path_from_et(&et
);
1840 while (trunc_end
> trunc_start
) {
1842 ret
= ocfs2_find_path(INODE_CACHE(inode
), path
,
1849 el
= path_leaf_el(path
);
1851 i
= ocfs2_find_rec(el
, trunc_end
);
1853 * Need to go to previous extent block.
1856 if (path
->p_tree_depth
== 0)
1859 ret
= ocfs2_find_cpos_for_left_leaf(inode
->i_sb
,
1868 * We've reached the leftmost extent block,
1869 * it's safe to leave.
1871 if (cluster_in_el
== 0)
1875 * The 'pos' searched for previous extent block is
1876 * always one cluster less than actual trunc_end.
1878 trunc_end
= cluster_in_el
+ 1;
1880 ocfs2_reinit_path(path
, 1);
1885 rec
= &el
->l_recs
[i
];
1887 ocfs2_calc_trunc_pos(inode
, el
, rec
, trunc_start
, &trunc_cpos
,
1888 &trunc_len
, &trunc_end
, &blkno
, &done
);
1892 flags
= rec
->e_flags
;
1893 phys_cpos
= ocfs2_blocks_to_clusters(inode
->i_sb
, blkno
);
1895 ret
= ocfs2_remove_btree_range(inode
, &et
, trunc_cpos
,
1896 phys_cpos
, trunc_len
, flags
,
1897 &dealloc
, refcount_loc
, false);
1903 cluster_in_el
= trunc_end
;
1905 ocfs2_reinit_path(path
, 1);
1908 ocfs2_truncate_cluster_pages(inode
, byte_start
, byte_len
);
1911 ocfs2_free_path(path
);
1912 ocfs2_schedule_truncate_log_flush(osb
, 1);
1913 ocfs2_run_deallocs(osb
, &dealloc
);
1919 * Parts of this function taken from xfs_change_file_space()
1921 static int __ocfs2_change_file_space(struct file
*file
, struct inode
*inode
,
1922 loff_t f_pos
, unsigned int cmd
,
1923 struct ocfs2_space_resv
*sr
,
1928 loff_t size
, orig_isize
;
1929 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1930 struct buffer_head
*di_bh
= NULL
;
1932 unsigned long long max_off
= inode
->i_sb
->s_maxbytes
;
1934 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
1940 * This prevents concurrent writes on other nodes
1942 ret
= ocfs2_rw_lock(inode
, 1);
1948 ret
= ocfs2_inode_lock(inode
, &di_bh
, 1);
1954 if (inode
->i_flags
& (S_IMMUTABLE
|S_APPEND
)) {
1956 goto out_inode_unlock
;
1959 switch (sr
->l_whence
) {
1960 case 0: /*SEEK_SET*/
1962 case 1: /*SEEK_CUR*/
1963 sr
->l_start
+= f_pos
;
1965 case 2: /*SEEK_END*/
1966 sr
->l_start
+= i_size_read(inode
);
1970 goto out_inode_unlock
;
1974 llen
= sr
->l_len
> 0 ? sr
->l_len
- 1 : sr
->l_len
;
1977 || sr
->l_start
> max_off
1978 || (sr
->l_start
+ llen
) < 0
1979 || (sr
->l_start
+ llen
) > max_off
) {
1981 goto out_inode_unlock
;
1983 size
= sr
->l_start
+ sr
->l_len
;
1985 if (cmd
== OCFS2_IOC_RESVSP
|| cmd
== OCFS2_IOC_RESVSP64
||
1986 cmd
== OCFS2_IOC_UNRESVSP
|| cmd
== OCFS2_IOC_UNRESVSP64
) {
1987 if (sr
->l_len
<= 0) {
1989 goto out_inode_unlock
;
1993 if (file
&& should_remove_suid(file
->f_path
.dentry
)) {
1994 ret
= __ocfs2_write_remove_suid(inode
, di_bh
);
1997 goto out_inode_unlock
;
2001 down_write(&OCFS2_I(inode
)->ip_alloc_sem
);
2003 case OCFS2_IOC_RESVSP
:
2004 case OCFS2_IOC_RESVSP64
:
2006 * This takes unsigned offsets, but the signed ones we
2007 * pass have been checked against overflow above.
2009 ret
= ocfs2_allocate_unwritten_extents(inode
, sr
->l_start
,
2012 case OCFS2_IOC_UNRESVSP
:
2013 case OCFS2_IOC_UNRESVSP64
:
2014 ret
= ocfs2_remove_inode_range(inode
, di_bh
, sr
->l_start
,
2021 orig_isize
= i_size_read(inode
);
2022 /* zeroout eof blocks in the cluster. */
2023 if (!ret
&& change_size
&& orig_isize
< size
) {
2024 ret
= ocfs2_zeroout_partial_cluster(inode
, orig_isize
,
2027 i_size_write(inode
, size
);
2029 up_write(&OCFS2_I(inode
)->ip_alloc_sem
);
2032 goto out_inode_unlock
;
2036 * We update c/mtime for these changes
2038 handle
= ocfs2_start_trans(osb
, OCFS2_INODE_UPDATE_CREDITS
);
2039 if (IS_ERR(handle
)) {
2040 ret
= PTR_ERR(handle
);
2042 goto out_inode_unlock
;
2045 inode
->i_ctime
= inode
->i_mtime
= current_time(inode
);
2046 ret
= ocfs2_mark_inode_dirty(handle
, inode
, di_bh
);
2050 if (file
&& (file
->f_flags
& O_SYNC
))
2053 ocfs2_commit_trans(osb
, handle
);
2057 ocfs2_inode_unlock(inode
, 1);
2059 ocfs2_rw_unlock(inode
, 1);
2062 inode_unlock(inode
);
2066 int ocfs2_change_file_space(struct file
*file
, unsigned int cmd
,
2067 struct ocfs2_space_resv
*sr
)
2069 struct inode
*inode
= file_inode(file
);
2070 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2073 if ((cmd
== OCFS2_IOC_RESVSP
|| cmd
== OCFS2_IOC_RESVSP64
) &&
2074 !ocfs2_writes_unwritten_extents(osb
))
2076 else if ((cmd
== OCFS2_IOC_UNRESVSP
|| cmd
== OCFS2_IOC_UNRESVSP64
) &&
2077 !ocfs2_sparse_alloc(osb
))
2080 if (!S_ISREG(inode
->i_mode
))
2083 if (!(file
->f_mode
& FMODE_WRITE
))
2086 ret
= mnt_want_write_file(file
);
2089 ret
= __ocfs2_change_file_space(file
, inode
, file
->f_pos
, cmd
, sr
, 0);
2090 mnt_drop_write_file(file
);
2094 static long ocfs2_fallocate(struct file
*file
, int mode
, loff_t offset
,
2097 struct inode
*inode
= file_inode(file
);
2098 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2099 struct ocfs2_space_resv sr
;
2100 int change_size
= 1;
2101 int cmd
= OCFS2_IOC_RESVSP64
;
2103 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
2105 if (!ocfs2_writes_unwritten_extents(osb
))
2108 if (mode
& FALLOC_FL_KEEP_SIZE
)
2111 if (mode
& FALLOC_FL_PUNCH_HOLE
)
2112 cmd
= OCFS2_IOC_UNRESVSP64
;
2115 sr
.l_start
= (s64
)offset
;
2116 sr
.l_len
= (s64
)len
;
2118 return __ocfs2_change_file_space(NULL
, inode
, offset
, cmd
, &sr
,
2122 int ocfs2_check_range_for_refcount(struct inode
*inode
, loff_t pos
,
2126 unsigned int extent_flags
;
2127 u32 cpos
, clusters
, extent_len
, phys_cpos
;
2128 struct super_block
*sb
= inode
->i_sb
;
2130 if (!ocfs2_refcount_tree(OCFS2_SB(inode
->i_sb
)) ||
2131 !ocfs2_is_refcount_inode(inode
) ||
2132 OCFS2_I(inode
)->ip_dyn_features
& OCFS2_INLINE_DATA_FL
)
2135 cpos
= pos
>> OCFS2_SB(sb
)->s_clustersize_bits
;
2136 clusters
= ocfs2_clusters_for_bytes(sb
, pos
+ count
) - cpos
;
2139 ret
= ocfs2_get_clusters(inode
, cpos
, &phys_cpos
, &extent_len
,
2146 if (phys_cpos
&& (extent_flags
& OCFS2_EXT_REFCOUNTED
)) {
2151 if (extent_len
> clusters
)
2152 extent_len
= clusters
;
2154 clusters
-= extent_len
;
2161 static int ocfs2_is_io_unaligned(struct inode
*inode
, size_t count
, loff_t pos
)
2163 int blockmask
= inode
->i_sb
->s_blocksize
- 1;
2164 loff_t final_size
= pos
+ count
;
2166 if ((pos
& blockmask
) || (final_size
& blockmask
))
2171 static int ocfs2_inode_lock_for_extent_tree(struct inode
*inode
,
2172 struct buffer_head
**di_bh
,
2180 ret
= ocfs2_inode_lock(inode
, di_bh
, meta_level
);
2182 ret
= ocfs2_try_inode_lock(inode
, di_bh
, meta_level
);
2188 down_write(&OCFS2_I(inode
)->ip_alloc_sem
);
2190 down_read(&OCFS2_I(inode
)->ip_alloc_sem
);
2193 ret
= down_write_trylock(&OCFS2_I(inode
)->ip_alloc_sem
);
2195 ret
= down_read_trylock(&OCFS2_I(inode
)->ip_alloc_sem
);
2208 ocfs2_inode_unlock(inode
, meta_level
);
2213 static void ocfs2_inode_unlock_for_extent_tree(struct inode
*inode
,
2214 struct buffer_head
**di_bh
,
2219 up_write(&OCFS2_I(inode
)->ip_alloc_sem
);
2221 up_read(&OCFS2_I(inode
)->ip_alloc_sem
);
2226 if (meta_level
>= 0)
2227 ocfs2_inode_unlock(inode
, meta_level
);
2230 static int ocfs2_prepare_inode_for_write(struct file
*file
,
2231 loff_t pos
, size_t count
, int wait
)
2233 int ret
= 0, meta_level
= 0, overwrite_io
= 0;
2235 struct dentry
*dentry
= file
->f_path
.dentry
;
2236 struct inode
*inode
= d_inode(dentry
);
2237 struct buffer_head
*di_bh
= NULL
;
2242 * We start with a read level meta lock and only jump to an ex
2243 * if we need to make modifications here.
2246 ret
= ocfs2_inode_lock_for_extent_tree(inode
,
2258 * Check if IO will overwrite allocated blocks in case
2259 * IOCB_NOWAIT flag is set.
2261 if (!wait
&& !overwrite_io
) {
2264 ret
= ocfs2_overwrite_io(inode
, di_bh
, pos
, count
);
2272 /* Clear suid / sgid if necessary. We do this here
2273 * instead of later in the write path because
2274 * remove_suid() calls ->setattr without any hint that
2275 * we may have already done our cluster locking. Since
2276 * ocfs2_setattr() *must* take cluster locks to
2277 * proceed, this will lead us to recursively lock the
2278 * inode. There's also the dinode i_size state which
2279 * can be lost via setattr during extending writes (we
2280 * set inode->i_size at the end of a write. */
2281 if (should_remove_suid(dentry
)) {
2282 if (meta_level
== 0) {
2283 ocfs2_inode_unlock_for_extent_tree(inode
,
2291 ret
= ocfs2_write_remove_suid(inode
);
2298 ret
= ocfs2_check_range_for_refcount(inode
, pos
, count
);
2300 ocfs2_inode_unlock_for_extent_tree(inode
,
2306 ret
= ocfs2_inode_lock_for_extent_tree(inode
,
2317 cpos
= pos
>> OCFS2_SB(inode
->i_sb
)->s_clustersize_bits
;
2319 ocfs2_clusters_for_bytes(inode
->i_sb
, pos
+ count
) - cpos
;
2320 ret
= ocfs2_refcount_cow(inode
, di_bh
, cpos
, clusters
, UINT_MAX
);
2333 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode
)->ip_blkno
,
2336 ocfs2_inode_unlock_for_extent_tree(inode
,
2345 static ssize_t
ocfs2_file_write_iter(struct kiocb
*iocb
,
2346 struct iov_iter
*from
)
2349 ssize_t written
= 0;
2351 size_t count
= iov_iter_count(from
);
2352 struct file
*file
= iocb
->ki_filp
;
2353 struct inode
*inode
= file_inode(file
);
2354 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2355 int full_coherency
= !(osb
->s_mount_opt
&
2356 OCFS2_MOUNT_COHERENCY_BUFFERED
);
2357 void *saved_ki_complete
= NULL
;
2358 int append_write
= ((iocb
->ki_pos
+ count
) >=
2359 i_size_read(inode
) ? 1 : 0);
2360 int direct_io
= iocb
->ki_flags
& IOCB_DIRECT
? 1 : 0;
2361 int nowait
= iocb
->ki_flags
& IOCB_NOWAIT
? 1 : 0;
2363 trace_ocfs2_file_write_iter(inode
, file
, file
->f_path
.dentry
,
2364 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
2365 file
->f_path
.dentry
->d_name
.len
,
2366 file
->f_path
.dentry
->d_name
.name
,
2367 (unsigned int)from
->nr_segs
); /* GRRRRR */
2369 if (!direct_io
&& nowait
)
2376 if (!inode_trylock(inode
))
2382 * Concurrent O_DIRECT writes are allowed with
2383 * mount_option "coherency=buffered".
2384 * For append write, we must take rw EX.
2386 rw_level
= (!direct_io
|| full_coherency
|| append_write
);
2389 ret
= ocfs2_try_rw_lock(inode
, rw_level
);
2391 ret
= ocfs2_rw_lock(inode
, rw_level
);
2399 * O_DIRECT writes with "coherency=full" need to take EX cluster
2400 * inode_lock to guarantee coherency.
2402 if (direct_io
&& full_coherency
) {
2404 * We need to take and drop the inode lock to force
2405 * other nodes to drop their caches. Buffered I/O
2406 * already does this in write_begin().
2409 ret
= ocfs2_try_inode_lock(inode
, NULL
, 1);
2411 ret
= ocfs2_inode_lock(inode
, NULL
, 1);
2418 ocfs2_inode_unlock(inode
, 1);
2421 ret
= generic_write_checks(iocb
, from
);
2429 ret
= ocfs2_prepare_inode_for_write(file
, iocb
->ki_pos
, count
, !nowait
);
2436 if (direct_io
&& !is_sync_kiocb(iocb
) &&
2437 ocfs2_is_io_unaligned(inode
, count
, iocb
->ki_pos
)) {
2439 * Make it a sync io if it's an unaligned aio.
2441 saved_ki_complete
= xchg(&iocb
->ki_complete
, NULL
);
2444 /* communicate with ocfs2_dio_end_io */
2445 ocfs2_iocb_set_rw_locked(iocb
, rw_level
);
2447 written
= __generic_file_write_iter(iocb
, from
);
2448 /* buffered aio wouldn't have proper lock coverage today */
2449 BUG_ON(written
== -EIOCBQUEUED
&& !direct_io
);
2452 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2453 * function pointer which is called when o_direct io completes so that
2454 * it can unlock our rw lock.
2455 * Unfortunately there are error cases which call end_io and others
2456 * that don't. so we don't have to unlock the rw_lock if either an
2457 * async dio is going to do it in the future or an end_io after an
2458 * error has already done it.
2460 if ((written
== -EIOCBQUEUED
) || (!ocfs2_iocb_is_rw_locked(iocb
))) {
2464 if (unlikely(written
<= 0))
2467 if (((file
->f_flags
& O_DSYNC
) && !direct_io
) ||
2469 ret
= filemap_fdatawrite_range(file
->f_mapping
,
2470 iocb
->ki_pos
- written
,
2476 ret
= jbd2_journal_force_commit(osb
->journal
->j_journal
);
2482 ret
= filemap_fdatawait_range(file
->f_mapping
,
2483 iocb
->ki_pos
- written
,
2488 if (saved_ki_complete
)
2489 xchg(&iocb
->ki_complete
, saved_ki_complete
);
2492 ocfs2_rw_unlock(inode
, rw_level
);
2495 inode_unlock(inode
);
2502 static ssize_t
ocfs2_file_read_iter(struct kiocb
*iocb
,
2503 struct iov_iter
*to
)
2505 int ret
= 0, rw_level
= -1, lock_level
= 0;
2506 struct file
*filp
= iocb
->ki_filp
;
2507 struct inode
*inode
= file_inode(filp
);
2508 int direct_io
= iocb
->ki_flags
& IOCB_DIRECT
? 1 : 0;
2509 int nowait
= iocb
->ki_flags
& IOCB_NOWAIT
? 1 : 0;
2511 trace_ocfs2_file_read_iter(inode
, filp
, filp
->f_path
.dentry
,
2512 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
2513 filp
->f_path
.dentry
->d_name
.len
,
2514 filp
->f_path
.dentry
->d_name
.name
,
2515 to
->nr_segs
); /* GRRRRR */
2524 if (!direct_io
&& nowait
)
2528 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2529 * need locks to protect pending reads from racing with truncate.
2533 ret
= ocfs2_try_rw_lock(inode
, 0);
2535 ret
= ocfs2_rw_lock(inode
, 0);
2543 /* communicate with ocfs2_dio_end_io */
2544 ocfs2_iocb_set_rw_locked(iocb
, rw_level
);
2548 * We're fine letting folks race truncates and extending
2549 * writes with read across the cluster, just like they can
2550 * locally. Hence no rw_lock during read.
2552 * Take and drop the meta data lock to update inode fields
2553 * like i_size. This allows the checks down below
2554 * generic_file_read_iter() a chance of actually working.
2556 ret
= ocfs2_inode_lock_atime(inode
, filp
->f_path
.mnt
, &lock_level
,
2563 ocfs2_inode_unlock(inode
, lock_level
);
2565 ret
= generic_file_read_iter(iocb
, to
);
2566 trace_generic_file_read_iter_ret(ret
);
2568 /* buffered aio wouldn't have proper lock coverage today */
2569 BUG_ON(ret
== -EIOCBQUEUED
&& !direct_io
);
2571 /* see ocfs2_file_write_iter */
2572 if (ret
== -EIOCBQUEUED
|| !ocfs2_iocb_is_rw_locked(iocb
)) {
2578 ocfs2_rw_unlock(inode
, rw_level
);
2583 /* Refer generic_file_llseek_unlocked() */
2584 static loff_t
ocfs2_file_llseek(struct file
*file
, loff_t offset
, int whence
)
2586 struct inode
*inode
= file
->f_mapping
->host
;
2595 /* SEEK_END requires the OCFS2 inode lock for the file
2596 * because it references the file's size.
2598 ret
= ocfs2_inode_lock(inode
, NULL
, 0);
2603 offset
+= i_size_read(inode
);
2604 ocfs2_inode_unlock(inode
, 0);
2608 offset
= file
->f_pos
;
2611 offset
+= file
->f_pos
;
2615 ret
= ocfs2_seek_data_hole_offset(file
, &offset
, whence
);
2624 offset
= vfs_setpos(file
, offset
, inode
->i_sb
->s_maxbytes
);
2627 inode_unlock(inode
);
2633 static loff_t
ocfs2_remap_file_range(struct file
*file_in
, loff_t pos_in
,
2634 struct file
*file_out
, loff_t pos_out
,
2635 loff_t len
, unsigned int remap_flags
)
2637 struct inode
*inode_in
= file_inode(file_in
);
2638 struct inode
*inode_out
= file_inode(file_out
);
2639 struct ocfs2_super
*osb
= OCFS2_SB(inode_in
->i_sb
);
2640 struct buffer_head
*in_bh
= NULL
, *out_bh
= NULL
;
2641 bool same_inode
= (inode_in
== inode_out
);
2642 loff_t remapped
= 0;
2645 if (remap_flags
& ~(REMAP_FILE_DEDUP
| REMAP_FILE_ADVISORY
))
2647 if (!ocfs2_refcount_tree(osb
))
2649 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
2652 /* Lock both files against IO */
2653 ret
= ocfs2_reflink_inodes_lock(inode_in
, &in_bh
, inode_out
, &out_bh
);
2657 /* Check file eligibility and prepare for block sharing. */
2659 if ((OCFS2_I(inode_in
)->ip_flags
& OCFS2_INODE_SYSTEM_FILE
) ||
2660 (OCFS2_I(inode_out
)->ip_flags
& OCFS2_INODE_SYSTEM_FILE
))
2663 ret
= generic_remap_file_range_prep(file_in
, pos_in
, file_out
, pos_out
,
2665 if (ret
< 0 || len
== 0)
2668 /* Lock out changes to the allocation maps and remap. */
2669 down_write(&OCFS2_I(inode_in
)->ip_alloc_sem
);
2671 down_write_nested(&OCFS2_I(inode_out
)->ip_alloc_sem
,
2672 SINGLE_DEPTH_NESTING
);
2674 /* Zap any page cache for the destination file's range. */
2675 truncate_inode_pages_range(&inode_out
->i_data
,
2676 round_down(pos_out
, PAGE_SIZE
),
2677 round_up(pos_out
+ len
, PAGE_SIZE
) - 1);
2679 remapped
= ocfs2_reflink_remap_blocks(inode_in
, in_bh
, pos_in
,
2680 inode_out
, out_bh
, pos_out
, len
);
2681 up_write(&OCFS2_I(inode_in
)->ip_alloc_sem
);
2683 up_write(&OCFS2_I(inode_out
)->ip_alloc_sem
);
2691 * Empty the extent map so that we may get the right extent
2692 * record from the disk.
2694 ocfs2_extent_map_trunc(inode_in
, 0);
2695 ocfs2_extent_map_trunc(inode_out
, 0);
2697 ret
= ocfs2_reflink_update_dest(inode_out
, out_bh
, pos_out
+ len
);
2704 ocfs2_reflink_inodes_unlock(inode_in
, in_bh
, inode_out
, out_bh
);
2705 return remapped
> 0 ? remapped
: ret
;
2708 const struct inode_operations ocfs2_file_iops
= {
2709 .setattr
= ocfs2_setattr
,
2710 .getattr
= ocfs2_getattr
,
2711 .permission
= ocfs2_permission
,
2712 .listxattr
= ocfs2_listxattr
,
2713 .fiemap
= ocfs2_fiemap
,
2714 .get_acl
= ocfs2_iop_get_acl
,
2715 .set_acl
= ocfs2_iop_set_acl
,
2716 .fileattr_get
= ocfs2_fileattr_get
,
2717 .fileattr_set
= ocfs2_fileattr_set
,
2720 const struct inode_operations ocfs2_special_file_iops
= {
2721 .setattr
= ocfs2_setattr
,
2722 .getattr
= ocfs2_getattr
,
2723 .permission
= ocfs2_permission
,
2724 .get_acl
= ocfs2_iop_get_acl
,
2725 .set_acl
= ocfs2_iop_set_acl
,
2729 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2730 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2732 const struct file_operations ocfs2_fops
= {
2733 .llseek
= ocfs2_file_llseek
,
2735 .fsync
= ocfs2_sync_file
,
2736 .release
= ocfs2_file_release
,
2737 .open
= ocfs2_file_open
,
2738 .read_iter
= ocfs2_file_read_iter
,
2739 .write_iter
= ocfs2_file_write_iter
,
2740 .unlocked_ioctl
= ocfs2_ioctl
,
2741 #ifdef CONFIG_COMPAT
2742 .compat_ioctl
= ocfs2_compat_ioctl
,
2745 .flock
= ocfs2_flock
,
2746 .splice_read
= generic_file_splice_read
,
2747 .splice_write
= iter_file_splice_write
,
2748 .fallocate
= ocfs2_fallocate
,
2749 .remap_file_range
= ocfs2_remap_file_range
,
2752 const struct file_operations ocfs2_dops
= {
2753 .llseek
= generic_file_llseek
,
2754 .read
= generic_read_dir
,
2755 .iterate
= ocfs2_readdir
,
2756 .fsync
= ocfs2_sync_file
,
2757 .release
= ocfs2_dir_release
,
2758 .open
= ocfs2_dir_open
,
2759 .unlocked_ioctl
= ocfs2_ioctl
,
2760 #ifdef CONFIG_COMPAT
2761 .compat_ioctl
= ocfs2_compat_ioctl
,
2764 .flock
= ocfs2_flock
,
2768 * POSIX-lockless variants of our file_operations.
2770 * These will be used if the underlying cluster stack does not support
2771 * posix file locking, if the user passes the "localflocks" mount
2772 * option, or if we have a local-only fs.
2774 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2775 * so we still want it in the case of no stack support for
2776 * plocks. Internally, it will do the right thing when asked to ignore
2779 const struct file_operations ocfs2_fops_no_plocks
= {
2780 .llseek
= ocfs2_file_llseek
,
2782 .fsync
= ocfs2_sync_file
,
2783 .release
= ocfs2_file_release
,
2784 .open
= ocfs2_file_open
,
2785 .read_iter
= ocfs2_file_read_iter
,
2786 .write_iter
= ocfs2_file_write_iter
,
2787 .unlocked_ioctl
= ocfs2_ioctl
,
2788 #ifdef CONFIG_COMPAT
2789 .compat_ioctl
= ocfs2_compat_ioctl
,
2791 .flock
= ocfs2_flock
,
2792 .splice_read
= generic_file_splice_read
,
2793 .splice_write
= iter_file_splice_write
,
2794 .fallocate
= ocfs2_fallocate
,
2795 .remap_file_range
= ocfs2_remap_file_range
,
2798 const struct file_operations ocfs2_dops_no_plocks
= {
2799 .llseek
= generic_file_llseek
,
2800 .read
= generic_read_dir
,
2801 .iterate
= ocfs2_readdir
,
2802 .fsync
= ocfs2_sync_file
,
2803 .release
= ocfs2_dir_release
,
2804 .open
= ocfs2_dir_open
,
2805 .unlocked_ioctl
= ocfs2_ioctl
,
2806 #ifdef CONFIG_COMPAT
2807 .compat_ioctl
= ocfs2_compat_ioctl
,
2809 .flock
= ocfs2_flock
,