]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ocfs2/file.c
fs: add i_blocksize()
[mirror_ubuntu-artful-kernel.git] / fs / ocfs2 / file.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * file.c
5 *
6 * File open, close, extend, truncate
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26 #include <linux/capability.h>
27 #include <linux/fs.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37 #include <linux/falloc.h>
38 #include <linux/quotaops.h>
39 #include <linux/blkdev.h>
40 #include <linux/backing-dev.h>
41
42 #include <cluster/masklog.h>
43
44 #include "ocfs2.h"
45
46 #include "alloc.h"
47 #include "aops.h"
48 #include "dir.h"
49 #include "dlmglue.h"
50 #include "extent_map.h"
51 #include "file.h"
52 #include "sysfile.h"
53 #include "inode.h"
54 #include "ioctl.h"
55 #include "journal.h"
56 #include "locks.h"
57 #include "mmap.h"
58 #include "suballoc.h"
59 #include "super.h"
60 #include "xattr.h"
61 #include "acl.h"
62 #include "quota.h"
63 #include "refcounttree.h"
64 #include "ocfs2_trace.h"
65
66 #include "buffer_head_io.h"
67
68 static int ocfs2_init_file_private(struct inode *inode, struct file *file)
69 {
70 struct ocfs2_file_private *fp;
71
72 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
73 if (!fp)
74 return -ENOMEM;
75
76 fp->fp_file = file;
77 mutex_init(&fp->fp_mutex);
78 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
79 file->private_data = fp;
80
81 return 0;
82 }
83
84 static void ocfs2_free_file_private(struct inode *inode, struct file *file)
85 {
86 struct ocfs2_file_private *fp = file->private_data;
87 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
88
89 if (fp) {
90 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
91 ocfs2_lock_res_free(&fp->fp_flock);
92 kfree(fp);
93 file->private_data = NULL;
94 }
95 }
96
97 static int ocfs2_file_open(struct inode *inode, struct file *file)
98 {
99 int status;
100 int mode = file->f_flags;
101 struct ocfs2_inode_info *oi = OCFS2_I(inode);
102
103 trace_ocfs2_file_open(inode, file, file->f_path.dentry,
104 (unsigned long long)OCFS2_I(inode)->ip_blkno,
105 file->f_path.dentry->d_name.len,
106 file->f_path.dentry->d_name.name, mode);
107
108 if (file->f_mode & FMODE_WRITE) {
109 status = dquot_initialize(inode);
110 if (status)
111 goto leave;
112 }
113
114 spin_lock(&oi->ip_lock);
115
116 /* Check that the inode hasn't been wiped from disk by another
117 * node. If it hasn't then we're safe as long as we hold the
118 * spin lock until our increment of open count. */
119 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
120 spin_unlock(&oi->ip_lock);
121
122 status = -ENOENT;
123 goto leave;
124 }
125
126 if (mode & O_DIRECT)
127 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
128
129 oi->ip_open_count++;
130 spin_unlock(&oi->ip_lock);
131
132 status = ocfs2_init_file_private(inode, file);
133 if (status) {
134 /*
135 * We want to set open count back if we're failing the
136 * open.
137 */
138 spin_lock(&oi->ip_lock);
139 oi->ip_open_count--;
140 spin_unlock(&oi->ip_lock);
141 }
142
143 leave:
144 return status;
145 }
146
147 static int ocfs2_file_release(struct inode *inode, struct file *file)
148 {
149 struct ocfs2_inode_info *oi = OCFS2_I(inode);
150
151 spin_lock(&oi->ip_lock);
152 if (!--oi->ip_open_count)
153 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
154
155 trace_ocfs2_file_release(inode, file, file->f_path.dentry,
156 oi->ip_blkno,
157 file->f_path.dentry->d_name.len,
158 file->f_path.dentry->d_name.name,
159 oi->ip_open_count);
160 spin_unlock(&oi->ip_lock);
161
162 ocfs2_free_file_private(inode, file);
163
164 return 0;
165 }
166
167 static int ocfs2_dir_open(struct inode *inode, struct file *file)
168 {
169 return ocfs2_init_file_private(inode, file);
170 }
171
172 static int ocfs2_dir_release(struct inode *inode, struct file *file)
173 {
174 ocfs2_free_file_private(inode, file);
175 return 0;
176 }
177
178 static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
179 int datasync)
180 {
181 int err = 0;
182 struct inode *inode = file->f_mapping->host;
183 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
184 struct ocfs2_inode_info *oi = OCFS2_I(inode);
185 journal_t *journal = osb->journal->j_journal;
186 int ret;
187 tid_t commit_tid;
188 bool needs_barrier = false;
189
190 trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
191 OCFS2_I(inode)->ip_blkno,
192 file->f_path.dentry->d_name.len,
193 file->f_path.dentry->d_name.name,
194 (unsigned long long)datasync);
195
196 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
197 return -EROFS;
198
199 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
200 if (err)
201 return err;
202
203 commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
204 if (journal->j_flags & JBD2_BARRIER &&
205 !jbd2_trans_will_send_data_barrier(journal, commit_tid))
206 needs_barrier = true;
207 err = jbd2_complete_transaction(journal, commit_tid);
208 if (needs_barrier) {
209 ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
210 if (!err)
211 err = ret;
212 }
213
214 if (err)
215 mlog_errno(err);
216
217 return (err < 0) ? -EIO : 0;
218 }
219
220 int ocfs2_should_update_atime(struct inode *inode,
221 struct vfsmount *vfsmnt)
222 {
223 struct timespec now;
224 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
225
226 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
227 return 0;
228
229 if ((inode->i_flags & S_NOATIME) ||
230 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
231 return 0;
232
233 /*
234 * We can be called with no vfsmnt structure - NFSD will
235 * sometimes do this.
236 *
237 * Note that our action here is different than touch_atime() -
238 * if we can't tell whether this is a noatime mount, then we
239 * don't know whether to trust the value of s_atime_quantum.
240 */
241 if (vfsmnt == NULL)
242 return 0;
243
244 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
245 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
246 return 0;
247
248 if (vfsmnt->mnt_flags & MNT_RELATIME) {
249 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
250 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
251 return 1;
252
253 return 0;
254 }
255
256 now = current_time(inode);
257 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
258 return 0;
259 else
260 return 1;
261 }
262
263 int ocfs2_update_inode_atime(struct inode *inode,
264 struct buffer_head *bh)
265 {
266 int ret;
267 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
268 handle_t *handle;
269 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
270
271 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
272 if (IS_ERR(handle)) {
273 ret = PTR_ERR(handle);
274 mlog_errno(ret);
275 goto out;
276 }
277
278 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
279 OCFS2_JOURNAL_ACCESS_WRITE);
280 if (ret) {
281 mlog_errno(ret);
282 goto out_commit;
283 }
284
285 /*
286 * Don't use ocfs2_mark_inode_dirty() here as we don't always
287 * have i_mutex to guard against concurrent changes to other
288 * inode fields.
289 */
290 inode->i_atime = current_time(inode);
291 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
292 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
293 ocfs2_update_inode_fsync_trans(handle, inode, 0);
294 ocfs2_journal_dirty(handle, bh);
295
296 out_commit:
297 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
298 out:
299 return ret;
300 }
301
302 int ocfs2_set_inode_size(handle_t *handle,
303 struct inode *inode,
304 struct buffer_head *fe_bh,
305 u64 new_i_size)
306 {
307 int status;
308
309 i_size_write(inode, new_i_size);
310 inode->i_blocks = ocfs2_inode_sector_count(inode);
311 inode->i_ctime = inode->i_mtime = current_time(inode);
312
313 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
314 if (status < 0) {
315 mlog_errno(status);
316 goto bail;
317 }
318
319 bail:
320 return status;
321 }
322
323 int ocfs2_simple_size_update(struct inode *inode,
324 struct buffer_head *di_bh,
325 u64 new_i_size)
326 {
327 int ret;
328 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
329 handle_t *handle = NULL;
330
331 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
332 if (IS_ERR(handle)) {
333 ret = PTR_ERR(handle);
334 mlog_errno(ret);
335 goto out;
336 }
337
338 ret = ocfs2_set_inode_size(handle, inode, di_bh,
339 new_i_size);
340 if (ret < 0)
341 mlog_errno(ret);
342
343 ocfs2_update_inode_fsync_trans(handle, inode, 0);
344 ocfs2_commit_trans(osb, handle);
345 out:
346 return ret;
347 }
348
349 static int ocfs2_cow_file_pos(struct inode *inode,
350 struct buffer_head *fe_bh,
351 u64 offset)
352 {
353 int status;
354 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
355 unsigned int num_clusters = 0;
356 unsigned int ext_flags = 0;
357
358 /*
359 * If the new offset is aligned to the range of the cluster, there is
360 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
361 * CoW either.
362 */
363 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
364 return 0;
365
366 status = ocfs2_get_clusters(inode, cpos, &phys,
367 &num_clusters, &ext_flags);
368 if (status) {
369 mlog_errno(status);
370 goto out;
371 }
372
373 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
374 goto out;
375
376 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
377
378 out:
379 return status;
380 }
381
382 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
383 struct inode *inode,
384 struct buffer_head *fe_bh,
385 u64 new_i_size)
386 {
387 int status;
388 handle_t *handle;
389 struct ocfs2_dinode *di;
390 u64 cluster_bytes;
391
392 /*
393 * We need to CoW the cluster contains the offset if it is reflinked
394 * since we will call ocfs2_zero_range_for_truncate later which will
395 * write "0" from offset to the end of the cluster.
396 */
397 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
398 if (status) {
399 mlog_errno(status);
400 return status;
401 }
402
403 /* TODO: This needs to actually orphan the inode in this
404 * transaction. */
405
406 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
407 if (IS_ERR(handle)) {
408 status = PTR_ERR(handle);
409 mlog_errno(status);
410 goto out;
411 }
412
413 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
414 OCFS2_JOURNAL_ACCESS_WRITE);
415 if (status < 0) {
416 mlog_errno(status);
417 goto out_commit;
418 }
419
420 /*
421 * Do this before setting i_size.
422 */
423 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
424 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
425 cluster_bytes);
426 if (status) {
427 mlog_errno(status);
428 goto out_commit;
429 }
430
431 i_size_write(inode, new_i_size);
432 inode->i_ctime = inode->i_mtime = current_time(inode);
433
434 di = (struct ocfs2_dinode *) fe_bh->b_data;
435 di->i_size = cpu_to_le64(new_i_size);
436 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
437 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
438 ocfs2_update_inode_fsync_trans(handle, inode, 0);
439
440 ocfs2_journal_dirty(handle, fe_bh);
441
442 out_commit:
443 ocfs2_commit_trans(osb, handle);
444 out:
445 return status;
446 }
447
448 int ocfs2_truncate_file(struct inode *inode,
449 struct buffer_head *di_bh,
450 u64 new_i_size)
451 {
452 int status = 0;
453 struct ocfs2_dinode *fe = NULL;
454 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
455
456 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
457 * already validated it */
458 fe = (struct ocfs2_dinode *) di_bh->b_data;
459
460 trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
461 (unsigned long long)le64_to_cpu(fe->i_size),
462 (unsigned long long)new_i_size);
463
464 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
465 "Inode %llu, inode i_size = %lld != di "
466 "i_size = %llu, i_flags = 0x%x\n",
467 (unsigned long long)OCFS2_I(inode)->ip_blkno,
468 i_size_read(inode),
469 (unsigned long long)le64_to_cpu(fe->i_size),
470 le32_to_cpu(fe->i_flags));
471
472 if (new_i_size > le64_to_cpu(fe->i_size)) {
473 trace_ocfs2_truncate_file_error(
474 (unsigned long long)le64_to_cpu(fe->i_size),
475 (unsigned long long)new_i_size);
476 status = -EINVAL;
477 mlog_errno(status);
478 goto bail;
479 }
480
481 down_write(&OCFS2_I(inode)->ip_alloc_sem);
482
483 ocfs2_resv_discard(&osb->osb_la_resmap,
484 &OCFS2_I(inode)->ip_la_data_resv);
485
486 /*
487 * The inode lock forced other nodes to sync and drop their
488 * pages, which (correctly) happens even if we have a truncate
489 * without allocation change - ocfs2 cluster sizes can be much
490 * greater than page size, so we have to truncate them
491 * anyway.
492 */
493 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
494 truncate_inode_pages(inode->i_mapping, new_i_size);
495
496 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
497 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
498 i_size_read(inode), 1);
499 if (status)
500 mlog_errno(status);
501
502 goto bail_unlock_sem;
503 }
504
505 /* alright, we're going to need to do a full blown alloc size
506 * change. Orphan the inode so that recovery can complete the
507 * truncate if necessary. This does the task of marking
508 * i_size. */
509 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
510 if (status < 0) {
511 mlog_errno(status);
512 goto bail_unlock_sem;
513 }
514
515 status = ocfs2_commit_truncate(osb, inode, di_bh);
516 if (status < 0) {
517 mlog_errno(status);
518 goto bail_unlock_sem;
519 }
520
521 /* TODO: orphan dir cleanup here. */
522 bail_unlock_sem:
523 up_write(&OCFS2_I(inode)->ip_alloc_sem);
524
525 bail:
526 if (!status && OCFS2_I(inode)->ip_clusters == 0)
527 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
528
529 return status;
530 }
531
532 /*
533 * extend file allocation only here.
534 * we'll update all the disk stuff, and oip->alloc_size
535 *
536 * expect stuff to be locked, a transaction started and enough data /
537 * metadata reservations in the contexts.
538 *
539 * Will return -EAGAIN, and a reason if a restart is needed.
540 * If passed in, *reason will always be set, even in error.
541 */
542 int ocfs2_add_inode_data(struct ocfs2_super *osb,
543 struct inode *inode,
544 u32 *logical_offset,
545 u32 clusters_to_add,
546 int mark_unwritten,
547 struct buffer_head *fe_bh,
548 handle_t *handle,
549 struct ocfs2_alloc_context *data_ac,
550 struct ocfs2_alloc_context *meta_ac,
551 enum ocfs2_alloc_restarted *reason_ret)
552 {
553 int ret;
554 struct ocfs2_extent_tree et;
555
556 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
557 ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
558 clusters_to_add, mark_unwritten,
559 data_ac, meta_ac, reason_ret);
560
561 return ret;
562 }
563
564 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
565 u32 clusters_to_add, int mark_unwritten)
566 {
567 int status = 0;
568 int restart_func = 0;
569 int credits;
570 u32 prev_clusters;
571 struct buffer_head *bh = NULL;
572 struct ocfs2_dinode *fe = NULL;
573 handle_t *handle = NULL;
574 struct ocfs2_alloc_context *data_ac = NULL;
575 struct ocfs2_alloc_context *meta_ac = NULL;
576 enum ocfs2_alloc_restarted why = RESTART_NONE;
577 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
578 struct ocfs2_extent_tree et;
579 int did_quota = 0;
580
581 /*
582 * Unwritten extent only exists for file systems which
583 * support holes.
584 */
585 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
586
587 status = ocfs2_read_inode_block(inode, &bh);
588 if (status < 0) {
589 mlog_errno(status);
590 goto leave;
591 }
592 fe = (struct ocfs2_dinode *) bh->b_data;
593
594 restart_all:
595 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
596
597 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
598 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
599 &data_ac, &meta_ac);
600 if (status) {
601 mlog_errno(status);
602 goto leave;
603 }
604
605 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
606 handle = ocfs2_start_trans(osb, credits);
607 if (IS_ERR(handle)) {
608 status = PTR_ERR(handle);
609 handle = NULL;
610 mlog_errno(status);
611 goto leave;
612 }
613
614 restarted_transaction:
615 trace_ocfs2_extend_allocation(
616 (unsigned long long)OCFS2_I(inode)->ip_blkno,
617 (unsigned long long)i_size_read(inode),
618 le32_to_cpu(fe->i_clusters), clusters_to_add,
619 why, restart_func);
620
621 status = dquot_alloc_space_nodirty(inode,
622 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
623 if (status)
624 goto leave;
625 did_quota = 1;
626
627 /* reserve a write to the file entry early on - that we if we
628 * run out of credits in the allocation path, we can still
629 * update i_size. */
630 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
631 OCFS2_JOURNAL_ACCESS_WRITE);
632 if (status < 0) {
633 mlog_errno(status);
634 goto leave;
635 }
636
637 prev_clusters = OCFS2_I(inode)->ip_clusters;
638
639 status = ocfs2_add_inode_data(osb,
640 inode,
641 &logical_start,
642 clusters_to_add,
643 mark_unwritten,
644 bh,
645 handle,
646 data_ac,
647 meta_ac,
648 &why);
649 if ((status < 0) && (status != -EAGAIN)) {
650 if (status != -ENOSPC)
651 mlog_errno(status);
652 goto leave;
653 }
654 ocfs2_update_inode_fsync_trans(handle, inode, 1);
655 ocfs2_journal_dirty(handle, bh);
656
657 spin_lock(&OCFS2_I(inode)->ip_lock);
658 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
659 spin_unlock(&OCFS2_I(inode)->ip_lock);
660 /* Release unused quota reservation */
661 dquot_free_space(inode,
662 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
663 did_quota = 0;
664
665 if (why != RESTART_NONE && clusters_to_add) {
666 if (why == RESTART_META) {
667 restart_func = 1;
668 status = 0;
669 } else {
670 BUG_ON(why != RESTART_TRANS);
671
672 status = ocfs2_allocate_extend_trans(handle, 1);
673 if (status < 0) {
674 /* handle still has to be committed at
675 * this point. */
676 status = -ENOMEM;
677 mlog_errno(status);
678 goto leave;
679 }
680 goto restarted_transaction;
681 }
682 }
683
684 trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
685 le32_to_cpu(fe->i_clusters),
686 (unsigned long long)le64_to_cpu(fe->i_size),
687 OCFS2_I(inode)->ip_clusters,
688 (unsigned long long)i_size_read(inode));
689
690 leave:
691 if (status < 0 && did_quota)
692 dquot_free_space(inode,
693 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
694 if (handle) {
695 ocfs2_commit_trans(osb, handle);
696 handle = NULL;
697 }
698 if (data_ac) {
699 ocfs2_free_alloc_context(data_ac);
700 data_ac = NULL;
701 }
702 if (meta_ac) {
703 ocfs2_free_alloc_context(meta_ac);
704 meta_ac = NULL;
705 }
706 if ((!status) && restart_func) {
707 restart_func = 0;
708 goto restart_all;
709 }
710 brelse(bh);
711 bh = NULL;
712
713 return status;
714 }
715
716 int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
717 u32 clusters_to_add, int mark_unwritten)
718 {
719 return __ocfs2_extend_allocation(inode, logical_start,
720 clusters_to_add, mark_unwritten);
721 }
722
723 /*
724 * While a write will already be ordering the data, a truncate will not.
725 * Thus, we need to explicitly order the zeroed pages.
726 */
727 static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
728 struct buffer_head *di_bh)
729 {
730 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
731 handle_t *handle = NULL;
732 int ret = 0;
733
734 if (!ocfs2_should_order_data(inode))
735 goto out;
736
737 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
738 if (IS_ERR(handle)) {
739 ret = -ENOMEM;
740 mlog_errno(ret);
741 goto out;
742 }
743
744 ret = ocfs2_jbd2_file_inode(handle, inode);
745 if (ret < 0) {
746 mlog_errno(ret);
747 goto out;
748 }
749
750 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
751 OCFS2_JOURNAL_ACCESS_WRITE);
752 if (ret)
753 mlog_errno(ret);
754 ocfs2_update_inode_fsync_trans(handle, inode, 1);
755
756 out:
757 if (ret) {
758 if (!IS_ERR(handle))
759 ocfs2_commit_trans(osb, handle);
760 handle = ERR_PTR(ret);
761 }
762 return handle;
763 }
764
765 /* Some parts of this taken from generic_cont_expand, which turned out
766 * to be too fragile to do exactly what we need without us having to
767 * worry about recursive locking in ->write_begin() and ->write_end(). */
768 static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
769 u64 abs_to, struct buffer_head *di_bh)
770 {
771 struct address_space *mapping = inode->i_mapping;
772 struct page *page;
773 unsigned long index = abs_from >> PAGE_SHIFT;
774 handle_t *handle;
775 int ret = 0;
776 unsigned zero_from, zero_to, block_start, block_end;
777 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
778
779 BUG_ON(abs_from >= abs_to);
780 BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
781 BUG_ON(abs_from & (inode->i_blkbits - 1));
782
783 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
784 if (IS_ERR(handle)) {
785 ret = PTR_ERR(handle);
786 goto out;
787 }
788
789 page = find_or_create_page(mapping, index, GFP_NOFS);
790 if (!page) {
791 ret = -ENOMEM;
792 mlog_errno(ret);
793 goto out_commit_trans;
794 }
795
796 /* Get the offsets within the page that we want to zero */
797 zero_from = abs_from & (PAGE_SIZE - 1);
798 zero_to = abs_to & (PAGE_SIZE - 1);
799 if (!zero_to)
800 zero_to = PAGE_SIZE;
801
802 trace_ocfs2_write_zero_page(
803 (unsigned long long)OCFS2_I(inode)->ip_blkno,
804 (unsigned long long)abs_from,
805 (unsigned long long)abs_to,
806 index, zero_from, zero_to);
807
808 /* We know that zero_from is block aligned */
809 for (block_start = zero_from; block_start < zero_to;
810 block_start = block_end) {
811 block_end = block_start + i_blocksize(inode);
812
813 /*
814 * block_start is block-aligned. Bump it by one to force
815 * __block_write_begin and block_commit_write to zero the
816 * whole block.
817 */
818 ret = __block_write_begin(page, block_start + 1, 0,
819 ocfs2_get_block);
820 if (ret < 0) {
821 mlog_errno(ret);
822 goto out_unlock;
823 }
824
825
826 /* must not update i_size! */
827 ret = block_commit_write(page, block_start + 1,
828 block_start + 1);
829 if (ret < 0)
830 mlog_errno(ret);
831 else
832 ret = 0;
833 }
834
835 /*
836 * fs-writeback will release the dirty pages without page lock
837 * whose offset are over inode size, the release happens at
838 * block_write_full_page().
839 */
840 i_size_write(inode, abs_to);
841 inode->i_blocks = ocfs2_inode_sector_count(inode);
842 di->i_size = cpu_to_le64((u64)i_size_read(inode));
843 inode->i_mtime = inode->i_ctime = current_time(inode);
844 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
845 di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
846 di->i_mtime_nsec = di->i_ctime_nsec;
847 if (handle) {
848 ocfs2_journal_dirty(handle, di_bh);
849 ocfs2_update_inode_fsync_trans(handle, inode, 1);
850 }
851
852 out_unlock:
853 unlock_page(page);
854 put_page(page);
855 out_commit_trans:
856 if (handle)
857 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
858 out:
859 return ret;
860 }
861
862 /*
863 * Find the next range to zero. We do this in terms of bytes because
864 * that's what ocfs2_zero_extend() wants, and it is dealing with the
865 * pagecache. We may return multiple extents.
866 *
867 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
868 * needs to be zeroed. range_start and range_end return the next zeroing
869 * range. A subsequent call should pass the previous range_end as its
870 * zero_start. If range_end is 0, there's nothing to do.
871 *
872 * Unwritten extents are skipped over. Refcounted extents are CoWd.
873 */
874 static int ocfs2_zero_extend_get_range(struct inode *inode,
875 struct buffer_head *di_bh,
876 u64 zero_start, u64 zero_end,
877 u64 *range_start, u64 *range_end)
878 {
879 int rc = 0, needs_cow = 0;
880 u32 p_cpos, zero_clusters = 0;
881 u32 zero_cpos =
882 zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
883 u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
884 unsigned int num_clusters = 0;
885 unsigned int ext_flags = 0;
886
887 while (zero_cpos < last_cpos) {
888 rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
889 &num_clusters, &ext_flags);
890 if (rc) {
891 mlog_errno(rc);
892 goto out;
893 }
894
895 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
896 zero_clusters = num_clusters;
897 if (ext_flags & OCFS2_EXT_REFCOUNTED)
898 needs_cow = 1;
899 break;
900 }
901
902 zero_cpos += num_clusters;
903 }
904 if (!zero_clusters) {
905 *range_end = 0;
906 goto out;
907 }
908
909 while ((zero_cpos + zero_clusters) < last_cpos) {
910 rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
911 &p_cpos, &num_clusters,
912 &ext_flags);
913 if (rc) {
914 mlog_errno(rc);
915 goto out;
916 }
917
918 if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
919 break;
920 if (ext_flags & OCFS2_EXT_REFCOUNTED)
921 needs_cow = 1;
922 zero_clusters += num_clusters;
923 }
924 if ((zero_cpos + zero_clusters) > last_cpos)
925 zero_clusters = last_cpos - zero_cpos;
926
927 if (needs_cow) {
928 rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
929 zero_clusters, UINT_MAX);
930 if (rc) {
931 mlog_errno(rc);
932 goto out;
933 }
934 }
935
936 *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
937 *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
938 zero_cpos + zero_clusters);
939
940 out:
941 return rc;
942 }
943
944 /*
945 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
946 * has made sure that the entire range needs zeroing.
947 */
948 static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
949 u64 range_end, struct buffer_head *di_bh)
950 {
951 int rc = 0;
952 u64 next_pos;
953 u64 zero_pos = range_start;
954
955 trace_ocfs2_zero_extend_range(
956 (unsigned long long)OCFS2_I(inode)->ip_blkno,
957 (unsigned long long)range_start,
958 (unsigned long long)range_end);
959 BUG_ON(range_start >= range_end);
960
961 while (zero_pos < range_end) {
962 next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
963 if (next_pos > range_end)
964 next_pos = range_end;
965 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
966 if (rc < 0) {
967 mlog_errno(rc);
968 break;
969 }
970 zero_pos = next_pos;
971
972 /*
973 * Very large extends have the potential to lock up
974 * the cpu for extended periods of time.
975 */
976 cond_resched();
977 }
978
979 return rc;
980 }
981
982 int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
983 loff_t zero_to_size)
984 {
985 int ret = 0;
986 u64 zero_start, range_start = 0, range_end = 0;
987 struct super_block *sb = inode->i_sb;
988
989 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
990 trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
991 (unsigned long long)zero_start,
992 (unsigned long long)i_size_read(inode));
993 while (zero_start < zero_to_size) {
994 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
995 zero_to_size,
996 &range_start,
997 &range_end);
998 if (ret) {
999 mlog_errno(ret);
1000 break;
1001 }
1002 if (!range_end)
1003 break;
1004 /* Trim the ends */
1005 if (range_start < zero_start)
1006 range_start = zero_start;
1007 if (range_end > zero_to_size)
1008 range_end = zero_to_size;
1009
1010 ret = ocfs2_zero_extend_range(inode, range_start,
1011 range_end, di_bh);
1012 if (ret) {
1013 mlog_errno(ret);
1014 break;
1015 }
1016 zero_start = range_end;
1017 }
1018
1019 return ret;
1020 }
1021
1022 int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1023 u64 new_i_size, u64 zero_to)
1024 {
1025 int ret;
1026 u32 clusters_to_add;
1027 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1028
1029 /*
1030 * Only quota files call this without a bh, and they can't be
1031 * refcounted.
1032 */
1033 BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
1034 BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1035
1036 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1037 if (clusters_to_add < oi->ip_clusters)
1038 clusters_to_add = 0;
1039 else
1040 clusters_to_add -= oi->ip_clusters;
1041
1042 if (clusters_to_add) {
1043 ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
1044 clusters_to_add, 0);
1045 if (ret) {
1046 mlog_errno(ret);
1047 goto out;
1048 }
1049 }
1050
1051 /*
1052 * Call this even if we don't add any clusters to the tree. We
1053 * still need to zero the area between the old i_size and the
1054 * new i_size.
1055 */
1056 ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1057 if (ret < 0)
1058 mlog_errno(ret);
1059
1060 out:
1061 return ret;
1062 }
1063
1064 static int ocfs2_extend_file(struct inode *inode,
1065 struct buffer_head *di_bh,
1066 u64 new_i_size)
1067 {
1068 int ret = 0;
1069 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1070
1071 BUG_ON(!di_bh);
1072
1073 /* setattr sometimes calls us like this. */
1074 if (new_i_size == 0)
1075 goto out;
1076
1077 if (i_size_read(inode) == new_i_size)
1078 goto out;
1079 BUG_ON(new_i_size < i_size_read(inode));
1080
1081 /*
1082 * The alloc sem blocks people in read/write from reading our
1083 * allocation until we're done changing it. We depend on
1084 * i_mutex to block other extend/truncate calls while we're
1085 * here. We even have to hold it for sparse files because there
1086 * might be some tail zeroing.
1087 */
1088 down_write(&oi->ip_alloc_sem);
1089
1090 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1091 /*
1092 * We can optimize small extends by keeping the inodes
1093 * inline data.
1094 */
1095 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1096 up_write(&oi->ip_alloc_sem);
1097 goto out_update_size;
1098 }
1099
1100 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1101 if (ret) {
1102 up_write(&oi->ip_alloc_sem);
1103 mlog_errno(ret);
1104 goto out;
1105 }
1106 }
1107
1108 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1109 ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1110 else
1111 ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1112 new_i_size);
1113
1114 up_write(&oi->ip_alloc_sem);
1115
1116 if (ret < 0) {
1117 mlog_errno(ret);
1118 goto out;
1119 }
1120
1121 out_update_size:
1122 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1123 if (ret < 0)
1124 mlog_errno(ret);
1125
1126 out:
1127 return ret;
1128 }
1129
1130 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1131 {
1132 int status = 0, size_change;
1133 int inode_locked = 0;
1134 struct inode *inode = d_inode(dentry);
1135 struct super_block *sb = inode->i_sb;
1136 struct ocfs2_super *osb = OCFS2_SB(sb);
1137 struct buffer_head *bh = NULL;
1138 handle_t *handle = NULL;
1139 struct dquot *transfer_to[MAXQUOTAS] = { };
1140 int qtype;
1141 int had_lock;
1142 struct ocfs2_lock_holder oh;
1143
1144 trace_ocfs2_setattr(inode, dentry,
1145 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1146 dentry->d_name.len, dentry->d_name.name,
1147 attr->ia_valid, attr->ia_mode,
1148 from_kuid(&init_user_ns, attr->ia_uid),
1149 from_kgid(&init_user_ns, attr->ia_gid));
1150
1151 /* ensuring we don't even attempt to truncate a symlink */
1152 if (S_ISLNK(inode->i_mode))
1153 attr->ia_valid &= ~ATTR_SIZE;
1154
1155 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1156 | ATTR_GID | ATTR_UID | ATTR_MODE)
1157 if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1158 return 0;
1159
1160 status = setattr_prepare(dentry, attr);
1161 if (status)
1162 return status;
1163
1164 if (is_quota_modification(inode, attr)) {
1165 status = dquot_initialize(inode);
1166 if (status)
1167 return status;
1168 }
1169 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1170 if (size_change) {
1171 status = ocfs2_rw_lock(inode, 1);
1172 if (status < 0) {
1173 mlog_errno(status);
1174 goto bail;
1175 }
1176 }
1177
1178 had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
1179 if (had_lock < 0) {
1180 status = had_lock;
1181 goto bail_unlock_rw;
1182 } else if (had_lock) {
1183 /*
1184 * As far as we know, ocfs2_setattr() could only be the first
1185 * VFS entry point in the call chain of recursive cluster
1186 * locking issue.
1187 *
1188 * For instance:
1189 * chmod_common()
1190 * notify_change()
1191 * ocfs2_setattr()
1192 * posix_acl_chmod()
1193 * ocfs2_iop_get_acl()
1194 *
1195 * But, we're not 100% sure if it's always true, because the
1196 * ordering of the VFS entry points in the call chain is out
1197 * of our control. So, we'd better dump the stack here to
1198 * catch the other cases of recursive locking.
1199 */
1200 mlog(ML_ERROR, "Another case of recursive locking:\n");
1201 dump_stack();
1202 }
1203 inode_locked = 1;
1204
1205 if (size_change) {
1206 status = inode_newsize_ok(inode, attr->ia_size);
1207 if (status)
1208 goto bail_unlock;
1209
1210 inode_dio_wait(inode);
1211
1212 if (i_size_read(inode) >= attr->ia_size) {
1213 if (ocfs2_should_order_data(inode)) {
1214 status = ocfs2_begin_ordered_truncate(inode,
1215 attr->ia_size);
1216 if (status)
1217 goto bail_unlock;
1218 }
1219 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1220 } else
1221 status = ocfs2_extend_file(inode, bh, attr->ia_size);
1222 if (status < 0) {
1223 if (status != -ENOSPC)
1224 mlog_errno(status);
1225 status = -ENOSPC;
1226 goto bail_unlock;
1227 }
1228 }
1229
1230 if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1231 (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1232 /*
1233 * Gather pointers to quota structures so that allocation /
1234 * freeing of quota structures happens here and not inside
1235 * dquot_transfer() where we have problems with lock ordering
1236 */
1237 if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1238 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1239 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1240 transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1241 if (IS_ERR(transfer_to[USRQUOTA])) {
1242 status = PTR_ERR(transfer_to[USRQUOTA]);
1243 goto bail_unlock;
1244 }
1245 }
1246 if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1247 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1248 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1249 transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1250 if (IS_ERR(transfer_to[GRPQUOTA])) {
1251 status = PTR_ERR(transfer_to[GRPQUOTA]);
1252 goto bail_unlock;
1253 }
1254 }
1255 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1256 2 * ocfs2_quota_trans_credits(sb));
1257 if (IS_ERR(handle)) {
1258 status = PTR_ERR(handle);
1259 mlog_errno(status);
1260 goto bail_unlock;
1261 }
1262 status = __dquot_transfer(inode, transfer_to);
1263 if (status < 0)
1264 goto bail_commit;
1265 } else {
1266 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1267 if (IS_ERR(handle)) {
1268 status = PTR_ERR(handle);
1269 mlog_errno(status);
1270 goto bail_unlock;
1271 }
1272 }
1273
1274 setattr_copy(inode, attr);
1275 mark_inode_dirty(inode);
1276
1277 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1278 if (status < 0)
1279 mlog_errno(status);
1280
1281 bail_commit:
1282 ocfs2_commit_trans(osb, handle);
1283 bail_unlock:
1284 if (status && inode_locked) {
1285 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1286 inode_locked = 0;
1287 }
1288 bail_unlock_rw:
1289 if (size_change)
1290 ocfs2_rw_unlock(inode, 1);
1291 bail:
1292
1293 /* Release quota pointers in case we acquired them */
1294 for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1295 dqput(transfer_to[qtype]);
1296
1297 if (!status && attr->ia_valid & ATTR_MODE) {
1298 status = ocfs2_acl_chmod(inode, bh);
1299 if (status < 0)
1300 mlog_errno(status);
1301 }
1302 if (inode_locked)
1303 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1304
1305 brelse(bh);
1306 return status;
1307 }
1308
1309 int ocfs2_getattr(struct vfsmount *mnt,
1310 struct dentry *dentry,
1311 struct kstat *stat)
1312 {
1313 struct inode *inode = d_inode(dentry);
1314 struct super_block *sb = dentry->d_sb;
1315 struct ocfs2_super *osb = sb->s_fs_info;
1316 int err;
1317
1318 err = ocfs2_inode_revalidate(dentry);
1319 if (err) {
1320 if (err != -ENOENT)
1321 mlog_errno(err);
1322 goto bail;
1323 }
1324
1325 generic_fillattr(inode, stat);
1326 /*
1327 * If there is inline data in the inode, the inode will normally not
1328 * have data blocks allocated (it may have an external xattr block).
1329 * Report at least one sector for such files, so tools like tar, rsync,
1330 * others don't incorrectly think the file is completely sparse.
1331 */
1332 if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1333 stat->blocks += (stat->size + 511)>>9;
1334
1335 /* We set the blksize from the cluster size for performance */
1336 stat->blksize = osb->s_clustersize;
1337
1338 bail:
1339 return err;
1340 }
1341
1342 int ocfs2_permission(struct inode *inode, int mask)
1343 {
1344 int ret, had_lock;
1345 struct ocfs2_lock_holder oh;
1346
1347 if (mask & MAY_NOT_BLOCK)
1348 return -ECHILD;
1349
1350 had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
1351 if (had_lock < 0) {
1352 ret = had_lock;
1353 goto out;
1354 } else if (had_lock) {
1355 /* See comments in ocfs2_setattr() for details.
1356 * The call chain of this case could be:
1357 * do_sys_open()
1358 * may_open()
1359 * inode_permission()
1360 * ocfs2_permission()
1361 * ocfs2_iop_get_acl()
1362 */
1363 mlog(ML_ERROR, "Another case of recursive locking:\n");
1364 dump_stack();
1365 }
1366
1367 ret = generic_permission(inode, mask);
1368
1369 ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1370 out:
1371 return ret;
1372 }
1373
1374 static int __ocfs2_write_remove_suid(struct inode *inode,
1375 struct buffer_head *bh)
1376 {
1377 int ret;
1378 handle_t *handle;
1379 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1380 struct ocfs2_dinode *di;
1381
1382 trace_ocfs2_write_remove_suid(
1383 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1384 inode->i_mode);
1385
1386 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1387 if (IS_ERR(handle)) {
1388 ret = PTR_ERR(handle);
1389 mlog_errno(ret);
1390 goto out;
1391 }
1392
1393 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1394 OCFS2_JOURNAL_ACCESS_WRITE);
1395 if (ret < 0) {
1396 mlog_errno(ret);
1397 goto out_trans;
1398 }
1399
1400 inode->i_mode &= ~S_ISUID;
1401 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1402 inode->i_mode &= ~S_ISGID;
1403
1404 di = (struct ocfs2_dinode *) bh->b_data;
1405 di->i_mode = cpu_to_le16(inode->i_mode);
1406 ocfs2_update_inode_fsync_trans(handle, inode, 0);
1407
1408 ocfs2_journal_dirty(handle, bh);
1409
1410 out_trans:
1411 ocfs2_commit_trans(osb, handle);
1412 out:
1413 return ret;
1414 }
1415
1416 static int ocfs2_write_remove_suid(struct inode *inode)
1417 {
1418 int ret;
1419 struct buffer_head *bh = NULL;
1420
1421 ret = ocfs2_read_inode_block(inode, &bh);
1422 if (ret < 0) {
1423 mlog_errno(ret);
1424 goto out;
1425 }
1426
1427 ret = __ocfs2_write_remove_suid(inode, bh);
1428 out:
1429 brelse(bh);
1430 return ret;
1431 }
1432
1433 /*
1434 * Allocate enough extents to cover the region starting at byte offset
1435 * start for len bytes. Existing extents are skipped, any extents
1436 * added are marked as "unwritten".
1437 */
1438 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1439 u64 start, u64 len)
1440 {
1441 int ret;
1442 u32 cpos, phys_cpos, clusters, alloc_size;
1443 u64 end = start + len;
1444 struct buffer_head *di_bh = NULL;
1445
1446 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1447 ret = ocfs2_read_inode_block(inode, &di_bh);
1448 if (ret) {
1449 mlog_errno(ret);
1450 goto out;
1451 }
1452
1453 /*
1454 * Nothing to do if the requested reservation range
1455 * fits within the inode.
1456 */
1457 if (ocfs2_size_fits_inline_data(di_bh, end))
1458 goto out;
1459
1460 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1461 if (ret) {
1462 mlog_errno(ret);
1463 goto out;
1464 }
1465 }
1466
1467 /*
1468 * We consider both start and len to be inclusive.
1469 */
1470 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1471 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1472 clusters -= cpos;
1473
1474 while (clusters) {
1475 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1476 &alloc_size, NULL);
1477 if (ret) {
1478 mlog_errno(ret);
1479 goto out;
1480 }
1481
1482 /*
1483 * Hole or existing extent len can be arbitrary, so
1484 * cap it to our own allocation request.
1485 */
1486 if (alloc_size > clusters)
1487 alloc_size = clusters;
1488
1489 if (phys_cpos) {
1490 /*
1491 * We already have an allocation at this
1492 * region so we can safely skip it.
1493 */
1494 goto next;
1495 }
1496
1497 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1498 if (ret) {
1499 if (ret != -ENOSPC)
1500 mlog_errno(ret);
1501 goto out;
1502 }
1503
1504 next:
1505 cpos += alloc_size;
1506 clusters -= alloc_size;
1507 }
1508
1509 ret = 0;
1510 out:
1511
1512 brelse(di_bh);
1513 return ret;
1514 }
1515
1516 /*
1517 * Truncate a byte range, avoiding pages within partial clusters. This
1518 * preserves those pages for the zeroing code to write to.
1519 */
1520 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1521 u64 byte_len)
1522 {
1523 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1524 loff_t start, end;
1525 struct address_space *mapping = inode->i_mapping;
1526
1527 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1528 end = byte_start + byte_len;
1529 end = end & ~(osb->s_clustersize - 1);
1530
1531 if (start < end) {
1532 unmap_mapping_range(mapping, start, end - start, 0);
1533 truncate_inode_pages_range(mapping, start, end - 1);
1534 }
1535 }
1536
1537 static int ocfs2_zero_partial_clusters(struct inode *inode,
1538 u64 start, u64 len)
1539 {
1540 int ret = 0;
1541 u64 tmpend = 0;
1542 u64 end = start + len;
1543 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1544 unsigned int csize = osb->s_clustersize;
1545 handle_t *handle;
1546
1547 /*
1548 * The "start" and "end" values are NOT necessarily part of
1549 * the range whose allocation is being deleted. Rather, this
1550 * is what the user passed in with the request. We must zero
1551 * partial clusters here. There's no need to worry about
1552 * physical allocation - the zeroing code knows to skip holes.
1553 */
1554 trace_ocfs2_zero_partial_clusters(
1555 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1556 (unsigned long long)start, (unsigned long long)end);
1557
1558 /*
1559 * If both edges are on a cluster boundary then there's no
1560 * zeroing required as the region is part of the allocation to
1561 * be truncated.
1562 */
1563 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1564 goto out;
1565
1566 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1567 if (IS_ERR(handle)) {
1568 ret = PTR_ERR(handle);
1569 mlog_errno(ret);
1570 goto out;
1571 }
1572
1573 /*
1574 * If start is on a cluster boundary and end is somewhere in another
1575 * cluster, we have not COWed the cluster starting at start, unless
1576 * end is also within the same cluster. So, in this case, we skip this
1577 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1578 * to the next one.
1579 */
1580 if ((start & (csize - 1)) != 0) {
1581 /*
1582 * We want to get the byte offset of the end of the 1st
1583 * cluster.
1584 */
1585 tmpend = (u64)osb->s_clustersize +
1586 (start & ~(osb->s_clustersize - 1));
1587 if (tmpend > end)
1588 tmpend = end;
1589
1590 trace_ocfs2_zero_partial_clusters_range1(
1591 (unsigned long long)start,
1592 (unsigned long long)tmpend);
1593
1594 ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1595 tmpend);
1596 if (ret)
1597 mlog_errno(ret);
1598 }
1599
1600 if (tmpend < end) {
1601 /*
1602 * This may make start and end equal, but the zeroing
1603 * code will skip any work in that case so there's no
1604 * need to catch it up here.
1605 */
1606 start = end & ~(osb->s_clustersize - 1);
1607
1608 trace_ocfs2_zero_partial_clusters_range2(
1609 (unsigned long long)start, (unsigned long long)end);
1610
1611 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1612 if (ret)
1613 mlog_errno(ret);
1614 }
1615 ocfs2_update_inode_fsync_trans(handle, inode, 1);
1616
1617 ocfs2_commit_trans(osb, handle);
1618 out:
1619 return ret;
1620 }
1621
1622 static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1623 {
1624 int i;
1625 struct ocfs2_extent_rec *rec = NULL;
1626
1627 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1628
1629 rec = &el->l_recs[i];
1630
1631 if (le32_to_cpu(rec->e_cpos) < pos)
1632 break;
1633 }
1634
1635 return i;
1636 }
1637
1638 /*
1639 * Helper to calculate the punching pos and length in one run, we handle the
1640 * following three cases in order:
1641 *
1642 * - remove the entire record
1643 * - remove a partial record
1644 * - no record needs to be removed (hole-punching completed)
1645 */
1646 static void ocfs2_calc_trunc_pos(struct inode *inode,
1647 struct ocfs2_extent_list *el,
1648 struct ocfs2_extent_rec *rec,
1649 u32 trunc_start, u32 *trunc_cpos,
1650 u32 *trunc_len, u32 *trunc_end,
1651 u64 *blkno, int *done)
1652 {
1653 int ret = 0;
1654 u32 coff, range;
1655
1656 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1657
1658 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1659 /*
1660 * remove an entire extent record.
1661 */
1662 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1663 /*
1664 * Skip holes if any.
1665 */
1666 if (range < *trunc_end)
1667 *trunc_end = range;
1668 *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1669 *blkno = le64_to_cpu(rec->e_blkno);
1670 *trunc_end = le32_to_cpu(rec->e_cpos);
1671 } else if (range > trunc_start) {
1672 /*
1673 * remove a partial extent record, which means we're
1674 * removing the last extent record.
1675 */
1676 *trunc_cpos = trunc_start;
1677 /*
1678 * skip hole if any.
1679 */
1680 if (range < *trunc_end)
1681 *trunc_end = range;
1682 *trunc_len = *trunc_end - trunc_start;
1683 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1684 *blkno = le64_to_cpu(rec->e_blkno) +
1685 ocfs2_clusters_to_blocks(inode->i_sb, coff);
1686 *trunc_end = trunc_start;
1687 } else {
1688 /*
1689 * It may have two following possibilities:
1690 *
1691 * - last record has been removed
1692 * - trunc_start was within a hole
1693 *
1694 * both two cases mean the completion of hole punching.
1695 */
1696 ret = 1;
1697 }
1698
1699 *done = ret;
1700 }
1701
1702 int ocfs2_remove_inode_range(struct inode *inode,
1703 struct buffer_head *di_bh, u64 byte_start,
1704 u64 byte_len)
1705 {
1706 int ret = 0, flags = 0, done = 0, i;
1707 u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1708 u32 cluster_in_el;
1709 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1710 struct ocfs2_cached_dealloc_ctxt dealloc;
1711 struct address_space *mapping = inode->i_mapping;
1712 struct ocfs2_extent_tree et;
1713 struct ocfs2_path *path = NULL;
1714 struct ocfs2_extent_list *el = NULL;
1715 struct ocfs2_extent_rec *rec = NULL;
1716 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1717 u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1718
1719 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1720 ocfs2_init_dealloc_ctxt(&dealloc);
1721
1722 trace_ocfs2_remove_inode_range(
1723 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1724 (unsigned long long)byte_start,
1725 (unsigned long long)byte_len);
1726
1727 if (byte_len == 0)
1728 return 0;
1729
1730 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1731 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1732 byte_start + byte_len, 0);
1733 if (ret) {
1734 mlog_errno(ret);
1735 goto out;
1736 }
1737 /*
1738 * There's no need to get fancy with the page cache
1739 * truncate of an inline-data inode. We're talking
1740 * about less than a page here, which will be cached
1741 * in the dinode buffer anyway.
1742 */
1743 unmap_mapping_range(mapping, 0, 0, 0);
1744 truncate_inode_pages(mapping, 0);
1745 goto out;
1746 }
1747
1748 /*
1749 * For reflinks, we may need to CoW 2 clusters which might be
1750 * partially zero'd later, if hole's start and end offset were
1751 * within one cluster(means is not exactly aligned to clustersize).
1752 */
1753
1754 if (ocfs2_is_refcount_inode(inode)) {
1755 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1756 if (ret) {
1757 mlog_errno(ret);
1758 goto out;
1759 }
1760
1761 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1762 if (ret) {
1763 mlog_errno(ret);
1764 goto out;
1765 }
1766 }
1767
1768 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1769 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1770 cluster_in_el = trunc_end;
1771
1772 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1773 if (ret) {
1774 mlog_errno(ret);
1775 goto out;
1776 }
1777
1778 path = ocfs2_new_path_from_et(&et);
1779 if (!path) {
1780 ret = -ENOMEM;
1781 mlog_errno(ret);
1782 goto out;
1783 }
1784
1785 while (trunc_end > trunc_start) {
1786
1787 ret = ocfs2_find_path(INODE_CACHE(inode), path,
1788 cluster_in_el);
1789 if (ret) {
1790 mlog_errno(ret);
1791 goto out;
1792 }
1793
1794 el = path_leaf_el(path);
1795
1796 i = ocfs2_find_rec(el, trunc_end);
1797 /*
1798 * Need to go to previous extent block.
1799 */
1800 if (i < 0) {
1801 if (path->p_tree_depth == 0)
1802 break;
1803
1804 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1805 path,
1806 &cluster_in_el);
1807 if (ret) {
1808 mlog_errno(ret);
1809 goto out;
1810 }
1811
1812 /*
1813 * We've reached the leftmost extent block,
1814 * it's safe to leave.
1815 */
1816 if (cluster_in_el == 0)
1817 break;
1818
1819 /*
1820 * The 'pos' searched for previous extent block is
1821 * always one cluster less than actual trunc_end.
1822 */
1823 trunc_end = cluster_in_el + 1;
1824
1825 ocfs2_reinit_path(path, 1);
1826
1827 continue;
1828
1829 } else
1830 rec = &el->l_recs[i];
1831
1832 ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1833 &trunc_len, &trunc_end, &blkno, &done);
1834 if (done)
1835 break;
1836
1837 flags = rec->e_flags;
1838 phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1839
1840 ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1841 phys_cpos, trunc_len, flags,
1842 &dealloc, refcount_loc, false);
1843 if (ret < 0) {
1844 mlog_errno(ret);
1845 goto out;
1846 }
1847
1848 cluster_in_el = trunc_end;
1849
1850 ocfs2_reinit_path(path, 1);
1851 }
1852
1853 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1854
1855 out:
1856 ocfs2_free_path(path);
1857 ocfs2_schedule_truncate_log_flush(osb, 1);
1858 ocfs2_run_deallocs(osb, &dealloc);
1859
1860 return ret;
1861 }
1862
1863 /*
1864 * Parts of this function taken from xfs_change_file_space()
1865 */
1866 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1867 loff_t f_pos, unsigned int cmd,
1868 struct ocfs2_space_resv *sr,
1869 int change_size)
1870 {
1871 int ret;
1872 s64 llen;
1873 loff_t size;
1874 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1875 struct buffer_head *di_bh = NULL;
1876 handle_t *handle;
1877 unsigned long long max_off = inode->i_sb->s_maxbytes;
1878
1879 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1880 return -EROFS;
1881
1882 inode_lock(inode);
1883
1884 /*
1885 * This prevents concurrent writes on other nodes
1886 */
1887 ret = ocfs2_rw_lock(inode, 1);
1888 if (ret) {
1889 mlog_errno(ret);
1890 goto out;
1891 }
1892
1893 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1894 if (ret) {
1895 mlog_errno(ret);
1896 goto out_rw_unlock;
1897 }
1898
1899 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1900 ret = -EPERM;
1901 goto out_inode_unlock;
1902 }
1903
1904 switch (sr->l_whence) {
1905 case 0: /*SEEK_SET*/
1906 break;
1907 case 1: /*SEEK_CUR*/
1908 sr->l_start += f_pos;
1909 break;
1910 case 2: /*SEEK_END*/
1911 sr->l_start += i_size_read(inode);
1912 break;
1913 default:
1914 ret = -EINVAL;
1915 goto out_inode_unlock;
1916 }
1917 sr->l_whence = 0;
1918
1919 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1920
1921 if (sr->l_start < 0
1922 || sr->l_start > max_off
1923 || (sr->l_start + llen) < 0
1924 || (sr->l_start + llen) > max_off) {
1925 ret = -EINVAL;
1926 goto out_inode_unlock;
1927 }
1928 size = sr->l_start + sr->l_len;
1929
1930 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
1931 cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
1932 if (sr->l_len <= 0) {
1933 ret = -EINVAL;
1934 goto out_inode_unlock;
1935 }
1936 }
1937
1938 if (file && should_remove_suid(file->f_path.dentry)) {
1939 ret = __ocfs2_write_remove_suid(inode, di_bh);
1940 if (ret) {
1941 mlog_errno(ret);
1942 goto out_inode_unlock;
1943 }
1944 }
1945
1946 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1947 switch (cmd) {
1948 case OCFS2_IOC_RESVSP:
1949 case OCFS2_IOC_RESVSP64:
1950 /*
1951 * This takes unsigned offsets, but the signed ones we
1952 * pass have been checked against overflow above.
1953 */
1954 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1955 sr->l_len);
1956 break;
1957 case OCFS2_IOC_UNRESVSP:
1958 case OCFS2_IOC_UNRESVSP64:
1959 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1960 sr->l_len);
1961 break;
1962 default:
1963 ret = -EINVAL;
1964 }
1965 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1966 if (ret) {
1967 mlog_errno(ret);
1968 goto out_inode_unlock;
1969 }
1970
1971 /*
1972 * We update c/mtime for these changes
1973 */
1974 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1975 if (IS_ERR(handle)) {
1976 ret = PTR_ERR(handle);
1977 mlog_errno(ret);
1978 goto out_inode_unlock;
1979 }
1980
1981 if (change_size && i_size_read(inode) < size)
1982 i_size_write(inode, size);
1983
1984 inode->i_ctime = inode->i_mtime = current_time(inode);
1985 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1986 if (ret < 0)
1987 mlog_errno(ret);
1988
1989 if (file && (file->f_flags & O_SYNC))
1990 handle->h_sync = 1;
1991
1992 ocfs2_commit_trans(osb, handle);
1993
1994 out_inode_unlock:
1995 brelse(di_bh);
1996 ocfs2_inode_unlock(inode, 1);
1997 out_rw_unlock:
1998 ocfs2_rw_unlock(inode, 1);
1999
2000 out:
2001 inode_unlock(inode);
2002 return ret;
2003 }
2004
2005 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
2006 struct ocfs2_space_resv *sr)
2007 {
2008 struct inode *inode = file_inode(file);
2009 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2010 int ret;
2011
2012 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
2013 !ocfs2_writes_unwritten_extents(osb))
2014 return -ENOTTY;
2015 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
2016 !ocfs2_sparse_alloc(osb))
2017 return -ENOTTY;
2018
2019 if (!S_ISREG(inode->i_mode))
2020 return -EINVAL;
2021
2022 if (!(file->f_mode & FMODE_WRITE))
2023 return -EBADF;
2024
2025 ret = mnt_want_write_file(file);
2026 if (ret)
2027 return ret;
2028 ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
2029 mnt_drop_write_file(file);
2030 return ret;
2031 }
2032
2033 static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2034 loff_t len)
2035 {
2036 struct inode *inode = file_inode(file);
2037 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2038 struct ocfs2_space_resv sr;
2039 int change_size = 1;
2040 int cmd = OCFS2_IOC_RESVSP64;
2041
2042 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2043 return -EOPNOTSUPP;
2044 if (!ocfs2_writes_unwritten_extents(osb))
2045 return -EOPNOTSUPP;
2046
2047 if (mode & FALLOC_FL_KEEP_SIZE)
2048 change_size = 0;
2049
2050 if (mode & FALLOC_FL_PUNCH_HOLE)
2051 cmd = OCFS2_IOC_UNRESVSP64;
2052
2053 sr.l_whence = 0;
2054 sr.l_start = (s64)offset;
2055 sr.l_len = (s64)len;
2056
2057 return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2058 change_size);
2059 }
2060
2061 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2062 size_t count)
2063 {
2064 int ret = 0;
2065 unsigned int extent_flags;
2066 u32 cpos, clusters, extent_len, phys_cpos;
2067 struct super_block *sb = inode->i_sb;
2068
2069 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2070 !ocfs2_is_refcount_inode(inode) ||
2071 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2072 return 0;
2073
2074 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2075 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2076
2077 while (clusters) {
2078 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2079 &extent_flags);
2080 if (ret < 0) {
2081 mlog_errno(ret);
2082 goto out;
2083 }
2084
2085 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2086 ret = 1;
2087 break;
2088 }
2089
2090 if (extent_len > clusters)
2091 extent_len = clusters;
2092
2093 clusters -= extent_len;
2094 cpos += extent_len;
2095 }
2096 out:
2097 return ret;
2098 }
2099
2100 static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2101 {
2102 int blockmask = inode->i_sb->s_blocksize - 1;
2103 loff_t final_size = pos + count;
2104
2105 if ((pos & blockmask) || (final_size & blockmask))
2106 return 1;
2107 return 0;
2108 }
2109
2110 static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2111 struct file *file,
2112 loff_t pos, size_t count,
2113 int *meta_level)
2114 {
2115 int ret;
2116 struct buffer_head *di_bh = NULL;
2117 u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2118 u32 clusters =
2119 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2120
2121 ret = ocfs2_inode_lock(inode, &di_bh, 1);
2122 if (ret) {
2123 mlog_errno(ret);
2124 goto out;
2125 }
2126
2127 *meta_level = 1;
2128
2129 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2130 if (ret)
2131 mlog_errno(ret);
2132 out:
2133 brelse(di_bh);
2134 return ret;
2135 }
2136
2137 static int ocfs2_prepare_inode_for_write(struct file *file,
2138 loff_t pos,
2139 size_t count)
2140 {
2141 int ret = 0, meta_level = 0;
2142 struct dentry *dentry = file->f_path.dentry;
2143 struct inode *inode = d_inode(dentry);
2144 loff_t end;
2145
2146 /*
2147 * We start with a read level meta lock and only jump to an ex
2148 * if we need to make modifications here.
2149 */
2150 for(;;) {
2151 ret = ocfs2_inode_lock(inode, NULL, meta_level);
2152 if (ret < 0) {
2153 meta_level = -1;
2154 mlog_errno(ret);
2155 goto out;
2156 }
2157
2158 /* Clear suid / sgid if necessary. We do this here
2159 * instead of later in the write path because
2160 * remove_suid() calls ->setattr without any hint that
2161 * we may have already done our cluster locking. Since
2162 * ocfs2_setattr() *must* take cluster locks to
2163 * proceed, this will lead us to recursively lock the
2164 * inode. There's also the dinode i_size state which
2165 * can be lost via setattr during extending writes (we
2166 * set inode->i_size at the end of a write. */
2167 if (should_remove_suid(dentry)) {
2168 if (meta_level == 0) {
2169 ocfs2_inode_unlock(inode, meta_level);
2170 meta_level = 1;
2171 continue;
2172 }
2173
2174 ret = ocfs2_write_remove_suid(inode);
2175 if (ret < 0) {
2176 mlog_errno(ret);
2177 goto out_unlock;
2178 }
2179 }
2180
2181 end = pos + count;
2182
2183 ret = ocfs2_check_range_for_refcount(inode, pos, count);
2184 if (ret == 1) {
2185 ocfs2_inode_unlock(inode, meta_level);
2186 meta_level = -1;
2187
2188 ret = ocfs2_prepare_inode_for_refcount(inode,
2189 file,
2190 pos,
2191 count,
2192 &meta_level);
2193 }
2194
2195 if (ret < 0) {
2196 mlog_errno(ret);
2197 goto out_unlock;
2198 }
2199
2200 break;
2201 }
2202
2203 out_unlock:
2204 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2205 pos, count);
2206
2207 if (meta_level >= 0)
2208 ocfs2_inode_unlock(inode, meta_level);
2209
2210 out:
2211 return ret;
2212 }
2213
2214 static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2215 struct iov_iter *from)
2216 {
2217 int direct_io, rw_level;
2218 ssize_t written = 0;
2219 ssize_t ret;
2220 size_t count = iov_iter_count(from);
2221 struct file *file = iocb->ki_filp;
2222 struct inode *inode = file_inode(file);
2223 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2224 int full_coherency = !(osb->s_mount_opt &
2225 OCFS2_MOUNT_COHERENCY_BUFFERED);
2226 void *saved_ki_complete = NULL;
2227 int append_write = ((iocb->ki_pos + count) >=
2228 i_size_read(inode) ? 1 : 0);
2229
2230 trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
2231 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2232 file->f_path.dentry->d_name.len,
2233 file->f_path.dentry->d_name.name,
2234 (unsigned int)from->nr_segs); /* GRRRRR */
2235
2236 if (count == 0)
2237 return 0;
2238
2239 direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2240
2241 inode_lock(inode);
2242
2243 /*
2244 * Concurrent O_DIRECT writes are allowed with
2245 * mount_option "coherency=buffered".
2246 * For append write, we must take rw EX.
2247 */
2248 rw_level = (!direct_io || full_coherency || append_write);
2249
2250 ret = ocfs2_rw_lock(inode, rw_level);
2251 if (ret < 0) {
2252 mlog_errno(ret);
2253 goto out_mutex;
2254 }
2255
2256 /*
2257 * O_DIRECT writes with "coherency=full" need to take EX cluster
2258 * inode_lock to guarantee coherency.
2259 */
2260 if (direct_io && full_coherency) {
2261 /*
2262 * We need to take and drop the inode lock to force
2263 * other nodes to drop their caches. Buffered I/O
2264 * already does this in write_begin().
2265 */
2266 ret = ocfs2_inode_lock(inode, NULL, 1);
2267 if (ret < 0) {
2268 mlog_errno(ret);
2269 goto out;
2270 }
2271
2272 ocfs2_inode_unlock(inode, 1);
2273 }
2274
2275 ret = generic_write_checks(iocb, from);
2276 if (ret <= 0) {
2277 if (ret)
2278 mlog_errno(ret);
2279 goto out;
2280 }
2281 count = ret;
2282
2283 ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count);
2284 if (ret < 0) {
2285 mlog_errno(ret);
2286 goto out;
2287 }
2288
2289 if (direct_io && !is_sync_kiocb(iocb) &&
2290 ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2291 /*
2292 * Make it a sync io if it's an unaligned aio.
2293 */
2294 saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2295 }
2296
2297 /* communicate with ocfs2_dio_end_io */
2298 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2299
2300 written = __generic_file_write_iter(iocb, from);
2301 /* buffered aio wouldn't have proper lock coverage today */
2302 BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
2303
2304 /*
2305 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2306 * function pointer which is called when o_direct io completes so that
2307 * it can unlock our rw lock.
2308 * Unfortunately there are error cases which call end_io and others
2309 * that don't. so we don't have to unlock the rw_lock if either an
2310 * async dio is going to do it in the future or an end_io after an
2311 * error has already done it.
2312 */
2313 if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2314 rw_level = -1;
2315 }
2316
2317 if (unlikely(written <= 0))
2318 goto out;
2319
2320 if (((file->f_flags & O_DSYNC) && !direct_io) ||
2321 IS_SYNC(inode)) {
2322 ret = filemap_fdatawrite_range(file->f_mapping,
2323 iocb->ki_pos - written,
2324 iocb->ki_pos - 1);
2325 if (ret < 0)
2326 written = ret;
2327
2328 if (!ret) {
2329 ret = jbd2_journal_force_commit(osb->journal->j_journal);
2330 if (ret < 0)
2331 written = ret;
2332 }
2333
2334 if (!ret)
2335 ret = filemap_fdatawait_range(file->f_mapping,
2336 iocb->ki_pos - written,
2337 iocb->ki_pos - 1);
2338 }
2339
2340 out:
2341 if (saved_ki_complete)
2342 xchg(&iocb->ki_complete, saved_ki_complete);
2343
2344 if (rw_level != -1)
2345 ocfs2_rw_unlock(inode, rw_level);
2346
2347 out_mutex:
2348 inode_unlock(inode);
2349
2350 if (written)
2351 ret = written;
2352 return ret;
2353 }
2354
2355 static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2356 struct iov_iter *to)
2357 {
2358 int ret = 0, rw_level = -1, lock_level = 0;
2359 struct file *filp = iocb->ki_filp;
2360 struct inode *inode = file_inode(filp);
2361
2362 trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
2363 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2364 filp->f_path.dentry->d_name.len,
2365 filp->f_path.dentry->d_name.name,
2366 to->nr_segs); /* GRRRRR */
2367
2368
2369 if (!inode) {
2370 ret = -EINVAL;
2371 mlog_errno(ret);
2372 goto bail;
2373 }
2374
2375 /*
2376 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2377 * need locks to protect pending reads from racing with truncate.
2378 */
2379 if (iocb->ki_flags & IOCB_DIRECT) {
2380 ret = ocfs2_rw_lock(inode, 0);
2381 if (ret < 0) {
2382 mlog_errno(ret);
2383 goto bail;
2384 }
2385 rw_level = 0;
2386 /* communicate with ocfs2_dio_end_io */
2387 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2388 }
2389
2390 /*
2391 * We're fine letting folks race truncates and extending
2392 * writes with read across the cluster, just like they can
2393 * locally. Hence no rw_lock during read.
2394 *
2395 * Take and drop the meta data lock to update inode fields
2396 * like i_size. This allows the checks down below
2397 * generic_file_aio_read() a chance of actually working.
2398 */
2399 ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
2400 if (ret < 0) {
2401 mlog_errno(ret);
2402 goto bail;
2403 }
2404 ocfs2_inode_unlock(inode, lock_level);
2405
2406 ret = generic_file_read_iter(iocb, to);
2407 trace_generic_file_aio_read_ret(ret);
2408
2409 /* buffered aio wouldn't have proper lock coverage today */
2410 BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
2411
2412 /* see ocfs2_file_write_iter */
2413 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2414 rw_level = -1;
2415 }
2416
2417 bail:
2418 if (rw_level != -1)
2419 ocfs2_rw_unlock(inode, rw_level);
2420
2421 return ret;
2422 }
2423
2424 /* Refer generic_file_llseek_unlocked() */
2425 static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2426 {
2427 struct inode *inode = file->f_mapping->host;
2428 int ret = 0;
2429
2430 inode_lock(inode);
2431
2432 switch (whence) {
2433 case SEEK_SET:
2434 break;
2435 case SEEK_END:
2436 /* SEEK_END requires the OCFS2 inode lock for the file
2437 * because it references the file's size.
2438 */
2439 ret = ocfs2_inode_lock(inode, NULL, 0);
2440 if (ret < 0) {
2441 mlog_errno(ret);
2442 goto out;
2443 }
2444 offset += i_size_read(inode);
2445 ocfs2_inode_unlock(inode, 0);
2446 break;
2447 case SEEK_CUR:
2448 if (offset == 0) {
2449 offset = file->f_pos;
2450 goto out;
2451 }
2452 offset += file->f_pos;
2453 break;
2454 case SEEK_DATA:
2455 case SEEK_HOLE:
2456 ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2457 if (ret)
2458 goto out;
2459 break;
2460 default:
2461 ret = -EINVAL;
2462 goto out;
2463 }
2464
2465 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2466
2467 out:
2468 inode_unlock(inode);
2469 if (ret)
2470 return ret;
2471 return offset;
2472 }
2473
2474 static int ocfs2_file_clone_range(struct file *file_in,
2475 loff_t pos_in,
2476 struct file *file_out,
2477 loff_t pos_out,
2478 u64 len)
2479 {
2480 return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out,
2481 len, false);
2482 }
2483
2484 static ssize_t ocfs2_file_dedupe_range(struct file *src_file,
2485 u64 loff,
2486 u64 len,
2487 struct file *dst_file,
2488 u64 dst_loff)
2489 {
2490 int error;
2491
2492 error = ocfs2_reflink_remap_range(src_file, loff, dst_file, dst_loff,
2493 len, true);
2494 if (error)
2495 return error;
2496 return len;
2497 }
2498
2499 const struct inode_operations ocfs2_file_iops = {
2500 .setattr = ocfs2_setattr,
2501 .getattr = ocfs2_getattr,
2502 .permission = ocfs2_permission,
2503 .listxattr = ocfs2_listxattr,
2504 .fiemap = ocfs2_fiemap,
2505 .get_acl = ocfs2_iop_get_acl,
2506 .set_acl = ocfs2_iop_set_acl,
2507 };
2508
2509 const struct inode_operations ocfs2_special_file_iops = {
2510 .setattr = ocfs2_setattr,
2511 .getattr = ocfs2_getattr,
2512 .permission = ocfs2_permission,
2513 .get_acl = ocfs2_iop_get_acl,
2514 .set_acl = ocfs2_iop_set_acl,
2515 };
2516
2517 /*
2518 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2519 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2520 */
2521 const struct file_operations ocfs2_fops = {
2522 .llseek = ocfs2_file_llseek,
2523 .mmap = ocfs2_mmap,
2524 .fsync = ocfs2_sync_file,
2525 .release = ocfs2_file_release,
2526 .open = ocfs2_file_open,
2527 .read_iter = ocfs2_file_read_iter,
2528 .write_iter = ocfs2_file_write_iter,
2529 .unlocked_ioctl = ocfs2_ioctl,
2530 #ifdef CONFIG_COMPAT
2531 .compat_ioctl = ocfs2_compat_ioctl,
2532 #endif
2533 .lock = ocfs2_lock,
2534 .flock = ocfs2_flock,
2535 .splice_read = generic_file_splice_read,
2536 .splice_write = iter_file_splice_write,
2537 .fallocate = ocfs2_fallocate,
2538 .clone_file_range = ocfs2_file_clone_range,
2539 .dedupe_file_range = ocfs2_file_dedupe_range,
2540 };
2541
2542 const struct file_operations ocfs2_dops = {
2543 .llseek = generic_file_llseek,
2544 .read = generic_read_dir,
2545 .iterate = ocfs2_readdir,
2546 .fsync = ocfs2_sync_file,
2547 .release = ocfs2_dir_release,
2548 .open = ocfs2_dir_open,
2549 .unlocked_ioctl = ocfs2_ioctl,
2550 #ifdef CONFIG_COMPAT
2551 .compat_ioctl = ocfs2_compat_ioctl,
2552 #endif
2553 .lock = ocfs2_lock,
2554 .flock = ocfs2_flock,
2555 };
2556
2557 /*
2558 * POSIX-lockless variants of our file_operations.
2559 *
2560 * These will be used if the underlying cluster stack does not support
2561 * posix file locking, if the user passes the "localflocks" mount
2562 * option, or if we have a local-only fs.
2563 *
2564 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2565 * so we still want it in the case of no stack support for
2566 * plocks. Internally, it will do the right thing when asked to ignore
2567 * the cluster.
2568 */
2569 const struct file_operations ocfs2_fops_no_plocks = {
2570 .llseek = ocfs2_file_llseek,
2571 .mmap = ocfs2_mmap,
2572 .fsync = ocfs2_sync_file,
2573 .release = ocfs2_file_release,
2574 .open = ocfs2_file_open,
2575 .read_iter = ocfs2_file_read_iter,
2576 .write_iter = ocfs2_file_write_iter,
2577 .unlocked_ioctl = ocfs2_ioctl,
2578 #ifdef CONFIG_COMPAT
2579 .compat_ioctl = ocfs2_compat_ioctl,
2580 #endif
2581 .flock = ocfs2_flock,
2582 .splice_read = generic_file_splice_read,
2583 .splice_write = iter_file_splice_write,
2584 .fallocate = ocfs2_fallocate,
2585 .clone_file_range = ocfs2_file_clone_range,
2586 .dedupe_file_range = ocfs2_file_dedupe_range,
2587 };
2588
2589 const struct file_operations ocfs2_dops_no_plocks = {
2590 .llseek = generic_file_llseek,
2591 .read = generic_read_dir,
2592 .iterate = ocfs2_readdir,
2593 .fsync = ocfs2_sync_file,
2594 .release = ocfs2_dir_release,
2595 .open = ocfs2_dir_open,
2596 .unlocked_ioctl = ocfs2_ioctl,
2597 #ifdef CONFIG_COMPAT
2598 .compat_ioctl = ocfs2_compat_ioctl,
2599 #endif
2600 .flock = ocfs2_flock,
2601 };