]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/ocfs2/file.c
ocfs2: Use ocfs2_extent_list instead of ocfs2_dinode.
[mirror_ubuntu-zesty-kernel.git] / fs / ocfs2 / file.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * file.c
5 *
6 * File open, close, extend, truncate
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26 #include <linux/capability.h>
27 #include <linux/fs.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37 #include <linux/falloc.h>
38
39 #define MLOG_MASK_PREFIX ML_INODE
40 #include <cluster/masklog.h>
41
42 #include "ocfs2.h"
43
44 #include "alloc.h"
45 #include "aops.h"
46 #include "dir.h"
47 #include "dlmglue.h"
48 #include "extent_map.h"
49 #include "file.h"
50 #include "sysfile.h"
51 #include "inode.h"
52 #include "ioctl.h"
53 #include "journal.h"
54 #include "locks.h"
55 #include "mmap.h"
56 #include "suballoc.h"
57 #include "super.h"
58
59 #include "buffer_head_io.h"
60
61 static int ocfs2_sync_inode(struct inode *inode)
62 {
63 filemap_fdatawrite(inode->i_mapping);
64 return sync_mapping_buffers(inode->i_mapping);
65 }
66
67 static int ocfs2_init_file_private(struct inode *inode, struct file *file)
68 {
69 struct ocfs2_file_private *fp;
70
71 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
72 if (!fp)
73 return -ENOMEM;
74
75 fp->fp_file = file;
76 mutex_init(&fp->fp_mutex);
77 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
78 file->private_data = fp;
79
80 return 0;
81 }
82
83 static void ocfs2_free_file_private(struct inode *inode, struct file *file)
84 {
85 struct ocfs2_file_private *fp = file->private_data;
86 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
87
88 if (fp) {
89 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
90 ocfs2_lock_res_free(&fp->fp_flock);
91 kfree(fp);
92 file->private_data = NULL;
93 }
94 }
95
96 static int ocfs2_file_open(struct inode *inode, struct file *file)
97 {
98 int status;
99 int mode = file->f_flags;
100 struct ocfs2_inode_info *oi = OCFS2_I(inode);
101
102 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
103 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
104
105 spin_lock(&oi->ip_lock);
106
107 /* Check that the inode hasn't been wiped from disk by another
108 * node. If it hasn't then we're safe as long as we hold the
109 * spin lock until our increment of open count. */
110 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
111 spin_unlock(&oi->ip_lock);
112
113 status = -ENOENT;
114 goto leave;
115 }
116
117 if (mode & O_DIRECT)
118 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
119
120 oi->ip_open_count++;
121 spin_unlock(&oi->ip_lock);
122
123 status = ocfs2_init_file_private(inode, file);
124 if (status) {
125 /*
126 * We want to set open count back if we're failing the
127 * open.
128 */
129 spin_lock(&oi->ip_lock);
130 oi->ip_open_count--;
131 spin_unlock(&oi->ip_lock);
132 }
133
134 leave:
135 mlog_exit(status);
136 return status;
137 }
138
139 static int ocfs2_file_release(struct inode *inode, struct file *file)
140 {
141 struct ocfs2_inode_info *oi = OCFS2_I(inode);
142
143 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
144 file->f_path.dentry->d_name.len,
145 file->f_path.dentry->d_name.name);
146
147 spin_lock(&oi->ip_lock);
148 if (!--oi->ip_open_count)
149 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
150 spin_unlock(&oi->ip_lock);
151
152 ocfs2_free_file_private(inode, file);
153
154 mlog_exit(0);
155
156 return 0;
157 }
158
159 static int ocfs2_dir_open(struct inode *inode, struct file *file)
160 {
161 return ocfs2_init_file_private(inode, file);
162 }
163
164 static int ocfs2_dir_release(struct inode *inode, struct file *file)
165 {
166 ocfs2_free_file_private(inode, file);
167 return 0;
168 }
169
170 static int ocfs2_sync_file(struct file *file,
171 struct dentry *dentry,
172 int datasync)
173 {
174 int err = 0;
175 journal_t *journal;
176 struct inode *inode = dentry->d_inode;
177 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
178
179 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
180 dentry->d_name.len, dentry->d_name.name);
181
182 err = ocfs2_sync_inode(dentry->d_inode);
183 if (err)
184 goto bail;
185
186 journal = osb->journal->j_journal;
187 err = journal_force_commit(journal);
188
189 bail:
190 mlog_exit(err);
191
192 return (err < 0) ? -EIO : 0;
193 }
194
195 int ocfs2_should_update_atime(struct inode *inode,
196 struct vfsmount *vfsmnt)
197 {
198 struct timespec now;
199 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
200
201 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
202 return 0;
203
204 if ((inode->i_flags & S_NOATIME) ||
205 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
206 return 0;
207
208 /*
209 * We can be called with no vfsmnt structure - NFSD will
210 * sometimes do this.
211 *
212 * Note that our action here is different than touch_atime() -
213 * if we can't tell whether this is a noatime mount, then we
214 * don't know whether to trust the value of s_atime_quantum.
215 */
216 if (vfsmnt == NULL)
217 return 0;
218
219 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
220 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
221 return 0;
222
223 if (vfsmnt->mnt_flags & MNT_RELATIME) {
224 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
225 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
226 return 1;
227
228 return 0;
229 }
230
231 now = CURRENT_TIME;
232 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
233 return 0;
234 else
235 return 1;
236 }
237
238 int ocfs2_update_inode_atime(struct inode *inode,
239 struct buffer_head *bh)
240 {
241 int ret;
242 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
243 handle_t *handle;
244 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
245
246 mlog_entry_void();
247
248 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
249 if (handle == NULL) {
250 ret = -ENOMEM;
251 mlog_errno(ret);
252 goto out;
253 }
254
255 ret = ocfs2_journal_access(handle, inode, bh,
256 OCFS2_JOURNAL_ACCESS_WRITE);
257 if (ret) {
258 mlog_errno(ret);
259 goto out_commit;
260 }
261
262 /*
263 * Don't use ocfs2_mark_inode_dirty() here as we don't always
264 * have i_mutex to guard against concurrent changes to other
265 * inode fields.
266 */
267 inode->i_atime = CURRENT_TIME;
268 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
269 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
270
271 ret = ocfs2_journal_dirty(handle, bh);
272 if (ret < 0)
273 mlog_errno(ret);
274
275 out_commit:
276 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
277 out:
278 mlog_exit(ret);
279 return ret;
280 }
281
282 static int ocfs2_set_inode_size(handle_t *handle,
283 struct inode *inode,
284 struct buffer_head *fe_bh,
285 u64 new_i_size)
286 {
287 int status;
288
289 mlog_entry_void();
290 i_size_write(inode, new_i_size);
291 inode->i_blocks = ocfs2_inode_sector_count(inode);
292 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
293
294 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
295 if (status < 0) {
296 mlog_errno(status);
297 goto bail;
298 }
299
300 bail:
301 mlog_exit(status);
302 return status;
303 }
304
305 static int ocfs2_simple_size_update(struct inode *inode,
306 struct buffer_head *di_bh,
307 u64 new_i_size)
308 {
309 int ret;
310 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
311 handle_t *handle = NULL;
312
313 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
314 if (handle == NULL) {
315 ret = -ENOMEM;
316 mlog_errno(ret);
317 goto out;
318 }
319
320 ret = ocfs2_set_inode_size(handle, inode, di_bh,
321 new_i_size);
322 if (ret < 0)
323 mlog_errno(ret);
324
325 ocfs2_commit_trans(osb, handle);
326 out:
327 return ret;
328 }
329
330 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
331 struct inode *inode,
332 struct buffer_head *fe_bh,
333 u64 new_i_size)
334 {
335 int status;
336 handle_t *handle;
337 struct ocfs2_dinode *di;
338 u64 cluster_bytes;
339
340 mlog_entry_void();
341
342 /* TODO: This needs to actually orphan the inode in this
343 * transaction. */
344
345 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
346 if (IS_ERR(handle)) {
347 status = PTR_ERR(handle);
348 mlog_errno(status);
349 goto out;
350 }
351
352 status = ocfs2_journal_access(handle, inode, fe_bh,
353 OCFS2_JOURNAL_ACCESS_WRITE);
354 if (status < 0) {
355 mlog_errno(status);
356 goto out_commit;
357 }
358
359 /*
360 * Do this before setting i_size.
361 */
362 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
363 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
364 cluster_bytes);
365 if (status) {
366 mlog_errno(status);
367 goto out_commit;
368 }
369
370 i_size_write(inode, new_i_size);
371 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
372
373 di = (struct ocfs2_dinode *) fe_bh->b_data;
374 di->i_size = cpu_to_le64(new_i_size);
375 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
376 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
377
378 status = ocfs2_journal_dirty(handle, fe_bh);
379 if (status < 0)
380 mlog_errno(status);
381
382 out_commit:
383 ocfs2_commit_trans(osb, handle);
384 out:
385
386 mlog_exit(status);
387 return status;
388 }
389
390 static int ocfs2_truncate_file(struct inode *inode,
391 struct buffer_head *di_bh,
392 u64 new_i_size)
393 {
394 int status = 0;
395 struct ocfs2_dinode *fe = NULL;
396 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
397 struct ocfs2_truncate_context *tc = NULL;
398
399 mlog_entry("(inode = %llu, new_i_size = %llu\n",
400 (unsigned long long)OCFS2_I(inode)->ip_blkno,
401 (unsigned long long)new_i_size);
402
403 fe = (struct ocfs2_dinode *) di_bh->b_data;
404 if (!OCFS2_IS_VALID_DINODE(fe)) {
405 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
406 status = -EIO;
407 goto bail;
408 }
409
410 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
411 "Inode %llu, inode i_size = %lld != di "
412 "i_size = %llu, i_flags = 0x%x\n",
413 (unsigned long long)OCFS2_I(inode)->ip_blkno,
414 i_size_read(inode),
415 (unsigned long long)le64_to_cpu(fe->i_size),
416 le32_to_cpu(fe->i_flags));
417
418 if (new_i_size > le64_to_cpu(fe->i_size)) {
419 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
420 (unsigned long long)le64_to_cpu(fe->i_size),
421 (unsigned long long)new_i_size);
422 status = -EINVAL;
423 mlog_errno(status);
424 goto bail;
425 }
426
427 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
428 (unsigned long long)le64_to_cpu(fe->i_blkno),
429 (unsigned long long)le64_to_cpu(fe->i_size),
430 (unsigned long long)new_i_size);
431
432 /* lets handle the simple truncate cases before doing any more
433 * cluster locking. */
434 if (new_i_size == le64_to_cpu(fe->i_size))
435 goto bail;
436
437 down_write(&OCFS2_I(inode)->ip_alloc_sem);
438
439 /*
440 * The inode lock forced other nodes to sync and drop their
441 * pages, which (correctly) happens even if we have a truncate
442 * without allocation change - ocfs2 cluster sizes can be much
443 * greater than page size, so we have to truncate them
444 * anyway.
445 */
446 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
447 truncate_inode_pages(inode->i_mapping, new_i_size);
448
449 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
450 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
451 i_size_read(inode), 1);
452 if (status)
453 mlog_errno(status);
454
455 goto bail_unlock_sem;
456 }
457
458 /* alright, we're going to need to do a full blown alloc size
459 * change. Orphan the inode so that recovery can complete the
460 * truncate if necessary. This does the task of marking
461 * i_size. */
462 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
463 if (status < 0) {
464 mlog_errno(status);
465 goto bail_unlock_sem;
466 }
467
468 status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
469 if (status < 0) {
470 mlog_errno(status);
471 goto bail_unlock_sem;
472 }
473
474 status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
475 if (status < 0) {
476 mlog_errno(status);
477 goto bail_unlock_sem;
478 }
479
480 /* TODO: orphan dir cleanup here. */
481 bail_unlock_sem:
482 up_write(&OCFS2_I(inode)->ip_alloc_sem);
483
484 bail:
485
486 mlog_exit(status);
487 return status;
488 }
489
490 /*
491 * extend allocation only here.
492 * we'll update all the disk stuff, and oip->alloc_size
493 *
494 * expect stuff to be locked, a transaction started and enough data /
495 * metadata reservations in the contexts.
496 *
497 * Will return -EAGAIN, and a reason if a restart is needed.
498 * If passed in, *reason will always be set, even in error.
499 */
500 int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
501 struct inode *inode,
502 u32 *logical_offset,
503 u32 clusters_to_add,
504 int mark_unwritten,
505 struct buffer_head *fe_bh,
506 handle_t *handle,
507 struct ocfs2_alloc_context *data_ac,
508 struct ocfs2_alloc_context *meta_ac,
509 enum ocfs2_alloc_restarted *reason_ret)
510 {
511 int status = 0;
512 int free_extents;
513 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
514 enum ocfs2_alloc_restarted reason = RESTART_NONE;
515 u32 bit_off, num_bits;
516 u64 block;
517 u8 flags = 0;
518
519 BUG_ON(!clusters_to_add);
520
521 if (mark_unwritten)
522 flags = OCFS2_EXT_UNWRITTEN;
523
524 free_extents = ocfs2_num_free_extents(osb, inode, fe_bh);
525 if (free_extents < 0) {
526 status = free_extents;
527 mlog_errno(status);
528 goto leave;
529 }
530
531 /* there are two cases which could cause us to EAGAIN in the
532 * we-need-more-metadata case:
533 * 1) we haven't reserved *any*
534 * 2) we are so fragmented, we've needed to add metadata too
535 * many times. */
536 if (!free_extents && !meta_ac) {
537 mlog(0, "we haven't reserved any metadata!\n");
538 status = -EAGAIN;
539 reason = RESTART_META;
540 goto leave;
541 } else if ((!free_extents)
542 && (ocfs2_alloc_context_bits_left(meta_ac)
543 < ocfs2_extend_meta_needed(&fe->id2.i_list))) {
544 mlog(0, "filesystem is really fragmented...\n");
545 status = -EAGAIN;
546 reason = RESTART_META;
547 goto leave;
548 }
549
550 status = __ocfs2_claim_clusters(osb, handle, data_ac, 1,
551 clusters_to_add, &bit_off, &num_bits);
552 if (status < 0) {
553 if (status != -ENOSPC)
554 mlog_errno(status);
555 goto leave;
556 }
557
558 BUG_ON(num_bits > clusters_to_add);
559
560 /* reserve our write early -- insert_extent may update the inode */
561 status = ocfs2_journal_access(handle, inode, fe_bh,
562 OCFS2_JOURNAL_ACCESS_WRITE);
563 if (status < 0) {
564 mlog_errno(status);
565 goto leave;
566 }
567
568 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
569 mlog(0, "Allocating %u clusters at block %u for inode %llu\n",
570 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
571 status = ocfs2_insert_extent(osb, handle, inode, fe_bh,
572 *logical_offset, block, num_bits,
573 flags, meta_ac);
574 if (status < 0) {
575 mlog_errno(status);
576 goto leave;
577 }
578
579 status = ocfs2_journal_dirty(handle, fe_bh);
580 if (status < 0) {
581 mlog_errno(status);
582 goto leave;
583 }
584
585 clusters_to_add -= num_bits;
586 *logical_offset += num_bits;
587
588 if (clusters_to_add) {
589 mlog(0, "need to alloc once more, clusters = %u, wanted = "
590 "%u\n", fe->i_clusters, clusters_to_add);
591 status = -EAGAIN;
592 reason = RESTART_TRANS;
593 }
594
595 leave:
596 mlog_exit(status);
597 if (reason_ret)
598 *reason_ret = reason;
599 return status;
600 }
601
602 /*
603 * For a given allocation, determine which allocators will need to be
604 * accessed, and lock them, reserving the appropriate number of bits.
605 *
606 * Sparse file systems call this from ocfs2_write_begin_nolock()
607 * and ocfs2_allocate_unwritten_extents().
608 *
609 * File systems which don't support holes call this from
610 * ocfs2_extend_allocation().
611 */
612 int ocfs2_lock_allocators(struct inode *inode, struct buffer_head *di_bh,
613 u32 clusters_to_add, u32 extents_to_split,
614 struct ocfs2_alloc_context **data_ac,
615 struct ocfs2_alloc_context **meta_ac)
616 {
617 int ret = 0, num_free_extents;
618 unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
619 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
620 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
621
622 *meta_ac = NULL;
623 if (data_ac)
624 *data_ac = NULL;
625
626 BUG_ON(clusters_to_add != 0 && data_ac == NULL);
627
628 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
629 "clusters_to_add = %u, extents_to_split = %u\n",
630 (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode),
631 le32_to_cpu(di->i_clusters), clusters_to_add, extents_to_split);
632
633 num_free_extents = ocfs2_num_free_extents(osb, inode, di_bh);
634 if (num_free_extents < 0) {
635 ret = num_free_extents;
636 mlog_errno(ret);
637 goto out;
638 }
639
640 /*
641 * Sparse allocation file systems need to be more conservative
642 * with reserving room for expansion - the actual allocation
643 * happens while we've got a journal handle open so re-taking
644 * a cluster lock (because we ran out of room for another
645 * extent) will violate ordering rules.
646 *
647 * Most of the time we'll only be seeing this 1 cluster at a time
648 * anyway.
649 *
650 * Always lock for any unwritten extents - we might want to
651 * add blocks during a split.
652 */
653 if (!num_free_extents ||
654 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
655 ret = ocfs2_reserve_new_metadata(osb, &di->id2.i_list, meta_ac);
656 if (ret < 0) {
657 if (ret != -ENOSPC)
658 mlog_errno(ret);
659 goto out;
660 }
661 }
662
663 if (clusters_to_add == 0)
664 goto out;
665
666 ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
667 if (ret < 0) {
668 if (ret != -ENOSPC)
669 mlog_errno(ret);
670 goto out;
671 }
672
673 out:
674 if (ret) {
675 if (*meta_ac) {
676 ocfs2_free_alloc_context(*meta_ac);
677 *meta_ac = NULL;
678 }
679
680 /*
681 * We cannot have an error and a non null *data_ac.
682 */
683 }
684
685 return ret;
686 }
687
688 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
689 u32 clusters_to_add, int mark_unwritten)
690 {
691 int status = 0;
692 int restart_func = 0;
693 int credits;
694 u32 prev_clusters;
695 struct buffer_head *bh = NULL;
696 struct ocfs2_dinode *fe = NULL;
697 handle_t *handle = NULL;
698 struct ocfs2_alloc_context *data_ac = NULL;
699 struct ocfs2_alloc_context *meta_ac = NULL;
700 enum ocfs2_alloc_restarted why;
701 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
702
703 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
704
705 /*
706 * This function only exists for file systems which don't
707 * support holes.
708 */
709 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
710
711 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
712 OCFS2_BH_CACHED, inode);
713 if (status < 0) {
714 mlog_errno(status);
715 goto leave;
716 }
717
718 fe = (struct ocfs2_dinode *) bh->b_data;
719 if (!OCFS2_IS_VALID_DINODE(fe)) {
720 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
721 status = -EIO;
722 goto leave;
723 }
724
725 restart_all:
726 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
727
728 status = ocfs2_lock_allocators(inode, bh, clusters_to_add, 0, &data_ac,
729 &meta_ac);
730 if (status) {
731 mlog_errno(status);
732 goto leave;
733 }
734
735 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list,
736 clusters_to_add);
737 handle = ocfs2_start_trans(osb, credits);
738 if (IS_ERR(handle)) {
739 status = PTR_ERR(handle);
740 handle = NULL;
741 mlog_errno(status);
742 goto leave;
743 }
744
745 restarted_transaction:
746 /* reserve a write to the file entry early on - that we if we
747 * run out of credits in the allocation path, we can still
748 * update i_size. */
749 status = ocfs2_journal_access(handle, inode, bh,
750 OCFS2_JOURNAL_ACCESS_WRITE);
751 if (status < 0) {
752 mlog_errno(status);
753 goto leave;
754 }
755
756 prev_clusters = OCFS2_I(inode)->ip_clusters;
757
758 status = ocfs2_do_extend_allocation(osb,
759 inode,
760 &logical_start,
761 clusters_to_add,
762 mark_unwritten,
763 bh,
764 handle,
765 data_ac,
766 meta_ac,
767 &why);
768 if ((status < 0) && (status != -EAGAIN)) {
769 if (status != -ENOSPC)
770 mlog_errno(status);
771 goto leave;
772 }
773
774 status = ocfs2_journal_dirty(handle, bh);
775 if (status < 0) {
776 mlog_errno(status);
777 goto leave;
778 }
779
780 spin_lock(&OCFS2_I(inode)->ip_lock);
781 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
782 spin_unlock(&OCFS2_I(inode)->ip_lock);
783
784 if (why != RESTART_NONE && clusters_to_add) {
785 if (why == RESTART_META) {
786 mlog(0, "restarting function.\n");
787 restart_func = 1;
788 } else {
789 BUG_ON(why != RESTART_TRANS);
790
791 mlog(0, "restarting transaction.\n");
792 /* TODO: This can be more intelligent. */
793 credits = ocfs2_calc_extend_credits(osb->sb,
794 &fe->id2.i_list,
795 clusters_to_add);
796 status = ocfs2_extend_trans(handle, credits);
797 if (status < 0) {
798 /* handle still has to be committed at
799 * this point. */
800 status = -ENOMEM;
801 mlog_errno(status);
802 goto leave;
803 }
804 goto restarted_transaction;
805 }
806 }
807
808 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
809 le32_to_cpu(fe->i_clusters),
810 (unsigned long long)le64_to_cpu(fe->i_size));
811 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
812 OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode));
813
814 leave:
815 if (handle) {
816 ocfs2_commit_trans(osb, handle);
817 handle = NULL;
818 }
819 if (data_ac) {
820 ocfs2_free_alloc_context(data_ac);
821 data_ac = NULL;
822 }
823 if (meta_ac) {
824 ocfs2_free_alloc_context(meta_ac);
825 meta_ac = NULL;
826 }
827 if ((!status) && restart_func) {
828 restart_func = 0;
829 goto restart_all;
830 }
831 if (bh) {
832 brelse(bh);
833 bh = NULL;
834 }
835
836 mlog_exit(status);
837 return status;
838 }
839
840 /* Some parts of this taken from generic_cont_expand, which turned out
841 * to be too fragile to do exactly what we need without us having to
842 * worry about recursive locking in ->prepare_write() and
843 * ->commit_write(). */
844 static int ocfs2_write_zero_page(struct inode *inode,
845 u64 size)
846 {
847 struct address_space *mapping = inode->i_mapping;
848 struct page *page;
849 unsigned long index;
850 unsigned int offset;
851 handle_t *handle = NULL;
852 int ret;
853
854 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
855 /* ugh. in prepare/commit_write, if from==to==start of block, we
856 ** skip the prepare. make sure we never send an offset for the start
857 ** of a block
858 */
859 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
860 offset++;
861 }
862 index = size >> PAGE_CACHE_SHIFT;
863
864 page = grab_cache_page(mapping, index);
865 if (!page) {
866 ret = -ENOMEM;
867 mlog_errno(ret);
868 goto out;
869 }
870
871 ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
872 if (ret < 0) {
873 mlog_errno(ret);
874 goto out_unlock;
875 }
876
877 if (ocfs2_should_order_data(inode)) {
878 handle = ocfs2_start_walk_page_trans(inode, page, offset,
879 offset);
880 if (IS_ERR(handle)) {
881 ret = PTR_ERR(handle);
882 handle = NULL;
883 goto out_unlock;
884 }
885 }
886
887 /* must not update i_size! */
888 ret = block_commit_write(page, offset, offset);
889 if (ret < 0)
890 mlog_errno(ret);
891 else
892 ret = 0;
893
894 if (handle)
895 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
896 out_unlock:
897 unlock_page(page);
898 page_cache_release(page);
899 out:
900 return ret;
901 }
902
903 static int ocfs2_zero_extend(struct inode *inode,
904 u64 zero_to_size)
905 {
906 int ret = 0;
907 u64 start_off;
908 struct super_block *sb = inode->i_sb;
909
910 start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
911 while (start_off < zero_to_size) {
912 ret = ocfs2_write_zero_page(inode, start_off);
913 if (ret < 0) {
914 mlog_errno(ret);
915 goto out;
916 }
917
918 start_off += sb->s_blocksize;
919
920 /*
921 * Very large extends have the potential to lock up
922 * the cpu for extended periods of time.
923 */
924 cond_resched();
925 }
926
927 out:
928 return ret;
929 }
930
931 int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
932 {
933 int ret;
934 u32 clusters_to_add;
935 struct ocfs2_inode_info *oi = OCFS2_I(inode);
936
937 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
938 if (clusters_to_add < oi->ip_clusters)
939 clusters_to_add = 0;
940 else
941 clusters_to_add -= oi->ip_clusters;
942
943 if (clusters_to_add) {
944 ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
945 clusters_to_add, 0);
946 if (ret) {
947 mlog_errno(ret);
948 goto out;
949 }
950 }
951
952 /*
953 * Call this even if we don't add any clusters to the tree. We
954 * still need to zero the area between the old i_size and the
955 * new i_size.
956 */
957 ret = ocfs2_zero_extend(inode, zero_to);
958 if (ret < 0)
959 mlog_errno(ret);
960
961 out:
962 return ret;
963 }
964
965 static int ocfs2_extend_file(struct inode *inode,
966 struct buffer_head *di_bh,
967 u64 new_i_size)
968 {
969 int ret = 0;
970 struct ocfs2_inode_info *oi = OCFS2_I(inode);
971
972 BUG_ON(!di_bh);
973
974 /* setattr sometimes calls us like this. */
975 if (new_i_size == 0)
976 goto out;
977
978 if (i_size_read(inode) == new_i_size)
979 goto out;
980 BUG_ON(new_i_size < i_size_read(inode));
981
982 /*
983 * Fall through for converting inline data, even if the fs
984 * supports sparse files.
985 *
986 * The check for inline data here is legal - nobody can add
987 * the feature since we have i_mutex. We must check it again
988 * after acquiring ip_alloc_sem though, as paths like mmap
989 * might have raced us to converting the inode to extents.
990 */
991 if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
992 && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
993 goto out_update_size;
994
995 /*
996 * The alloc sem blocks people in read/write from reading our
997 * allocation until we're done changing it. We depend on
998 * i_mutex to block other extend/truncate calls while we're
999 * here.
1000 */
1001 down_write(&oi->ip_alloc_sem);
1002
1003 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1004 /*
1005 * We can optimize small extends by keeping the inodes
1006 * inline data.
1007 */
1008 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1009 up_write(&oi->ip_alloc_sem);
1010 goto out_update_size;
1011 }
1012
1013 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1014 if (ret) {
1015 up_write(&oi->ip_alloc_sem);
1016
1017 mlog_errno(ret);
1018 goto out;
1019 }
1020 }
1021
1022 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1023 ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size);
1024
1025 up_write(&oi->ip_alloc_sem);
1026
1027 if (ret < 0) {
1028 mlog_errno(ret);
1029 goto out;
1030 }
1031
1032 out_update_size:
1033 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1034 if (ret < 0)
1035 mlog_errno(ret);
1036
1037 out:
1038 return ret;
1039 }
1040
1041 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1042 {
1043 int status = 0, size_change;
1044 struct inode *inode = dentry->d_inode;
1045 struct super_block *sb = inode->i_sb;
1046 struct ocfs2_super *osb = OCFS2_SB(sb);
1047 struct buffer_head *bh = NULL;
1048 handle_t *handle = NULL;
1049
1050 mlog_entry("(0x%p, '%.*s')\n", dentry,
1051 dentry->d_name.len, dentry->d_name.name);
1052
1053 /* ensuring we don't even attempt to truncate a symlink */
1054 if (S_ISLNK(inode->i_mode))
1055 attr->ia_valid &= ~ATTR_SIZE;
1056
1057 if (attr->ia_valid & ATTR_MODE)
1058 mlog(0, "mode change: %d\n", attr->ia_mode);
1059 if (attr->ia_valid & ATTR_UID)
1060 mlog(0, "uid change: %d\n", attr->ia_uid);
1061 if (attr->ia_valid & ATTR_GID)
1062 mlog(0, "gid change: %d\n", attr->ia_gid);
1063 if (attr->ia_valid & ATTR_SIZE)
1064 mlog(0, "size change...\n");
1065 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
1066 mlog(0, "time change...\n");
1067
1068 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1069 | ATTR_GID | ATTR_UID | ATTR_MODE)
1070 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
1071 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
1072 return 0;
1073 }
1074
1075 status = inode_change_ok(inode, attr);
1076 if (status)
1077 return status;
1078
1079 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1080 if (size_change) {
1081 status = ocfs2_rw_lock(inode, 1);
1082 if (status < 0) {
1083 mlog_errno(status);
1084 goto bail;
1085 }
1086 }
1087
1088 status = ocfs2_inode_lock(inode, &bh, 1);
1089 if (status < 0) {
1090 if (status != -ENOENT)
1091 mlog_errno(status);
1092 goto bail_unlock_rw;
1093 }
1094
1095 if (size_change && attr->ia_size != i_size_read(inode)) {
1096 if (attr->ia_size > sb->s_maxbytes) {
1097 status = -EFBIG;
1098 goto bail_unlock;
1099 }
1100
1101 if (i_size_read(inode) > attr->ia_size)
1102 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1103 else
1104 status = ocfs2_extend_file(inode, bh, attr->ia_size);
1105 if (status < 0) {
1106 if (status != -ENOSPC)
1107 mlog_errno(status);
1108 status = -ENOSPC;
1109 goto bail_unlock;
1110 }
1111 }
1112
1113 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1114 if (IS_ERR(handle)) {
1115 status = PTR_ERR(handle);
1116 mlog_errno(status);
1117 goto bail_unlock;
1118 }
1119
1120 /*
1121 * This will intentionally not wind up calling vmtruncate(),
1122 * since all the work for a size change has been done above.
1123 * Otherwise, we could get into problems with truncate as
1124 * ip_alloc_sem is used there to protect against i_size
1125 * changes.
1126 */
1127 status = inode_setattr(inode, attr);
1128 if (status < 0) {
1129 mlog_errno(status);
1130 goto bail_commit;
1131 }
1132
1133 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1134 if (status < 0)
1135 mlog_errno(status);
1136
1137 bail_commit:
1138 ocfs2_commit_trans(osb, handle);
1139 bail_unlock:
1140 ocfs2_inode_unlock(inode, 1);
1141 bail_unlock_rw:
1142 if (size_change)
1143 ocfs2_rw_unlock(inode, 1);
1144 bail:
1145 if (bh)
1146 brelse(bh);
1147
1148 mlog_exit(status);
1149 return status;
1150 }
1151
1152 int ocfs2_getattr(struct vfsmount *mnt,
1153 struct dentry *dentry,
1154 struct kstat *stat)
1155 {
1156 struct inode *inode = dentry->d_inode;
1157 struct super_block *sb = dentry->d_inode->i_sb;
1158 struct ocfs2_super *osb = sb->s_fs_info;
1159 int err;
1160
1161 mlog_entry_void();
1162
1163 err = ocfs2_inode_revalidate(dentry);
1164 if (err) {
1165 if (err != -ENOENT)
1166 mlog_errno(err);
1167 goto bail;
1168 }
1169
1170 generic_fillattr(inode, stat);
1171
1172 /* We set the blksize from the cluster size for performance */
1173 stat->blksize = osb->s_clustersize;
1174
1175 bail:
1176 mlog_exit(err);
1177
1178 return err;
1179 }
1180
1181 int ocfs2_permission(struct inode *inode, int mask)
1182 {
1183 int ret;
1184
1185 mlog_entry_void();
1186
1187 ret = ocfs2_inode_lock(inode, NULL, 0);
1188 if (ret) {
1189 if (ret != -ENOENT)
1190 mlog_errno(ret);
1191 goto out;
1192 }
1193
1194 ret = generic_permission(inode, mask, NULL);
1195
1196 ocfs2_inode_unlock(inode, 0);
1197 out:
1198 mlog_exit(ret);
1199 return ret;
1200 }
1201
1202 static int __ocfs2_write_remove_suid(struct inode *inode,
1203 struct buffer_head *bh)
1204 {
1205 int ret;
1206 handle_t *handle;
1207 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1208 struct ocfs2_dinode *di;
1209
1210 mlog_entry("(Inode %llu, mode 0%o)\n",
1211 (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode);
1212
1213 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1214 if (handle == NULL) {
1215 ret = -ENOMEM;
1216 mlog_errno(ret);
1217 goto out;
1218 }
1219
1220 ret = ocfs2_journal_access(handle, inode, bh,
1221 OCFS2_JOURNAL_ACCESS_WRITE);
1222 if (ret < 0) {
1223 mlog_errno(ret);
1224 goto out_trans;
1225 }
1226
1227 inode->i_mode &= ~S_ISUID;
1228 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1229 inode->i_mode &= ~S_ISGID;
1230
1231 di = (struct ocfs2_dinode *) bh->b_data;
1232 di->i_mode = cpu_to_le16(inode->i_mode);
1233
1234 ret = ocfs2_journal_dirty(handle, bh);
1235 if (ret < 0)
1236 mlog_errno(ret);
1237
1238 out_trans:
1239 ocfs2_commit_trans(osb, handle);
1240 out:
1241 mlog_exit(ret);
1242 return ret;
1243 }
1244
1245 /*
1246 * Will look for holes and unwritten extents in the range starting at
1247 * pos for count bytes (inclusive).
1248 */
1249 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1250 size_t count)
1251 {
1252 int ret = 0;
1253 unsigned int extent_flags;
1254 u32 cpos, clusters, extent_len, phys_cpos;
1255 struct super_block *sb = inode->i_sb;
1256
1257 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1258 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1259
1260 while (clusters) {
1261 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1262 &extent_flags);
1263 if (ret < 0) {
1264 mlog_errno(ret);
1265 goto out;
1266 }
1267
1268 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1269 ret = 1;
1270 break;
1271 }
1272
1273 if (extent_len > clusters)
1274 extent_len = clusters;
1275
1276 clusters -= extent_len;
1277 cpos += extent_len;
1278 }
1279 out:
1280 return ret;
1281 }
1282
1283 static int ocfs2_write_remove_suid(struct inode *inode)
1284 {
1285 int ret;
1286 struct buffer_head *bh = NULL;
1287 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1288
1289 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1290 oi->ip_blkno, &bh, OCFS2_BH_CACHED, inode);
1291 if (ret < 0) {
1292 mlog_errno(ret);
1293 goto out;
1294 }
1295
1296 ret = __ocfs2_write_remove_suid(inode, bh);
1297 out:
1298 brelse(bh);
1299 return ret;
1300 }
1301
1302 /*
1303 * Allocate enough extents to cover the region starting at byte offset
1304 * start for len bytes. Existing extents are skipped, any extents
1305 * added are marked as "unwritten".
1306 */
1307 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1308 u64 start, u64 len)
1309 {
1310 int ret;
1311 u32 cpos, phys_cpos, clusters, alloc_size;
1312 u64 end = start + len;
1313 struct buffer_head *di_bh = NULL;
1314
1315 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1316 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1317 OCFS2_I(inode)->ip_blkno, &di_bh,
1318 OCFS2_BH_CACHED, inode);
1319 if (ret) {
1320 mlog_errno(ret);
1321 goto out;
1322 }
1323
1324 /*
1325 * Nothing to do if the requested reservation range
1326 * fits within the inode.
1327 */
1328 if (ocfs2_size_fits_inline_data(di_bh, end))
1329 goto out;
1330
1331 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1332 if (ret) {
1333 mlog_errno(ret);
1334 goto out;
1335 }
1336 }
1337
1338 /*
1339 * We consider both start and len to be inclusive.
1340 */
1341 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1342 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1343 clusters -= cpos;
1344
1345 while (clusters) {
1346 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1347 &alloc_size, NULL);
1348 if (ret) {
1349 mlog_errno(ret);
1350 goto out;
1351 }
1352
1353 /*
1354 * Hole or existing extent len can be arbitrary, so
1355 * cap it to our own allocation request.
1356 */
1357 if (alloc_size > clusters)
1358 alloc_size = clusters;
1359
1360 if (phys_cpos) {
1361 /*
1362 * We already have an allocation at this
1363 * region so we can safely skip it.
1364 */
1365 goto next;
1366 }
1367
1368 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1369 if (ret) {
1370 if (ret != -ENOSPC)
1371 mlog_errno(ret);
1372 goto out;
1373 }
1374
1375 next:
1376 cpos += alloc_size;
1377 clusters -= alloc_size;
1378 }
1379
1380 ret = 0;
1381 out:
1382
1383 brelse(di_bh);
1384 return ret;
1385 }
1386
1387 static int __ocfs2_remove_inode_range(struct inode *inode,
1388 struct buffer_head *di_bh,
1389 u32 cpos, u32 phys_cpos, u32 len,
1390 struct ocfs2_cached_dealloc_ctxt *dealloc)
1391 {
1392 int ret;
1393 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
1394 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1395 struct inode *tl_inode = osb->osb_tl_inode;
1396 handle_t *handle;
1397 struct ocfs2_alloc_context *meta_ac = NULL;
1398 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1399
1400 ret = ocfs2_lock_allocators(inode, di_bh, 0, 1, NULL, &meta_ac);
1401 if (ret) {
1402 mlog_errno(ret);
1403 return ret;
1404 }
1405
1406 mutex_lock(&tl_inode->i_mutex);
1407
1408 if (ocfs2_truncate_log_needs_flush(osb)) {
1409 ret = __ocfs2_flush_truncate_log(osb);
1410 if (ret < 0) {
1411 mlog_errno(ret);
1412 goto out;
1413 }
1414 }
1415
1416 handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS);
1417 if (handle == NULL) {
1418 ret = -ENOMEM;
1419 mlog_errno(ret);
1420 goto out;
1421 }
1422
1423 ret = ocfs2_journal_access(handle, inode, di_bh,
1424 OCFS2_JOURNAL_ACCESS_WRITE);
1425 if (ret) {
1426 mlog_errno(ret);
1427 goto out;
1428 }
1429
1430 ret = ocfs2_remove_extent(inode, di_bh, cpos, len, handle, meta_ac,
1431 dealloc);
1432 if (ret) {
1433 mlog_errno(ret);
1434 goto out_commit;
1435 }
1436
1437 OCFS2_I(inode)->ip_clusters -= len;
1438 di->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters);
1439
1440 ret = ocfs2_journal_dirty(handle, di_bh);
1441 if (ret) {
1442 mlog_errno(ret);
1443 goto out_commit;
1444 }
1445
1446 ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len);
1447 if (ret)
1448 mlog_errno(ret);
1449
1450 out_commit:
1451 ocfs2_commit_trans(osb, handle);
1452 out:
1453 mutex_unlock(&tl_inode->i_mutex);
1454
1455 if (meta_ac)
1456 ocfs2_free_alloc_context(meta_ac);
1457
1458 return ret;
1459 }
1460
1461 /*
1462 * Truncate a byte range, avoiding pages within partial clusters. This
1463 * preserves those pages for the zeroing code to write to.
1464 */
1465 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1466 u64 byte_len)
1467 {
1468 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1469 loff_t start, end;
1470 struct address_space *mapping = inode->i_mapping;
1471
1472 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1473 end = byte_start + byte_len;
1474 end = end & ~(osb->s_clustersize - 1);
1475
1476 if (start < end) {
1477 unmap_mapping_range(mapping, start, end - start, 0);
1478 truncate_inode_pages_range(mapping, start, end - 1);
1479 }
1480 }
1481
1482 static int ocfs2_zero_partial_clusters(struct inode *inode,
1483 u64 start, u64 len)
1484 {
1485 int ret = 0;
1486 u64 tmpend, end = start + len;
1487 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1488 unsigned int csize = osb->s_clustersize;
1489 handle_t *handle;
1490
1491 /*
1492 * The "start" and "end" values are NOT necessarily part of
1493 * the range whose allocation is being deleted. Rather, this
1494 * is what the user passed in with the request. We must zero
1495 * partial clusters here. There's no need to worry about
1496 * physical allocation - the zeroing code knows to skip holes.
1497 */
1498 mlog(0, "byte start: %llu, end: %llu\n",
1499 (unsigned long long)start, (unsigned long long)end);
1500
1501 /*
1502 * If both edges are on a cluster boundary then there's no
1503 * zeroing required as the region is part of the allocation to
1504 * be truncated.
1505 */
1506 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1507 goto out;
1508
1509 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1510 if (handle == NULL) {
1511 ret = -ENOMEM;
1512 mlog_errno(ret);
1513 goto out;
1514 }
1515
1516 /*
1517 * We want to get the byte offset of the end of the 1st cluster.
1518 */
1519 tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1520 if (tmpend > end)
1521 tmpend = end;
1522
1523 mlog(0, "1st range: start: %llu, tmpend: %llu\n",
1524 (unsigned long long)start, (unsigned long long)tmpend);
1525
1526 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1527 if (ret)
1528 mlog_errno(ret);
1529
1530 if (tmpend < end) {
1531 /*
1532 * This may make start and end equal, but the zeroing
1533 * code will skip any work in that case so there's no
1534 * need to catch it up here.
1535 */
1536 start = end & ~(osb->s_clustersize - 1);
1537
1538 mlog(0, "2nd range: start: %llu, end: %llu\n",
1539 (unsigned long long)start, (unsigned long long)end);
1540
1541 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1542 if (ret)
1543 mlog_errno(ret);
1544 }
1545
1546 ocfs2_commit_trans(osb, handle);
1547 out:
1548 return ret;
1549 }
1550
1551 static int ocfs2_remove_inode_range(struct inode *inode,
1552 struct buffer_head *di_bh, u64 byte_start,
1553 u64 byte_len)
1554 {
1555 int ret = 0;
1556 u32 trunc_start, trunc_len, cpos, phys_cpos, alloc_size;
1557 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1558 struct ocfs2_cached_dealloc_ctxt dealloc;
1559 struct address_space *mapping = inode->i_mapping;
1560
1561 ocfs2_init_dealloc_ctxt(&dealloc);
1562
1563 if (byte_len == 0)
1564 return 0;
1565
1566 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1567 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1568 byte_start + byte_len, 0);
1569 if (ret) {
1570 mlog_errno(ret);
1571 goto out;
1572 }
1573 /*
1574 * There's no need to get fancy with the page cache
1575 * truncate of an inline-data inode. We're talking
1576 * about less than a page here, which will be cached
1577 * in the dinode buffer anyway.
1578 */
1579 unmap_mapping_range(mapping, 0, 0, 0);
1580 truncate_inode_pages(mapping, 0);
1581 goto out;
1582 }
1583
1584 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1585 trunc_len = (byte_start + byte_len) >> osb->s_clustersize_bits;
1586 if (trunc_len >= trunc_start)
1587 trunc_len -= trunc_start;
1588 else
1589 trunc_len = 0;
1590
1591 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u\n",
1592 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1593 (unsigned long long)byte_start,
1594 (unsigned long long)byte_len, trunc_start, trunc_len);
1595
1596 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1597 if (ret) {
1598 mlog_errno(ret);
1599 goto out;
1600 }
1601
1602 cpos = trunc_start;
1603 while (trunc_len) {
1604 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1605 &alloc_size, NULL);
1606 if (ret) {
1607 mlog_errno(ret);
1608 goto out;
1609 }
1610
1611 if (alloc_size > trunc_len)
1612 alloc_size = trunc_len;
1613
1614 /* Only do work for non-holes */
1615 if (phys_cpos != 0) {
1616 ret = __ocfs2_remove_inode_range(inode, di_bh, cpos,
1617 phys_cpos, alloc_size,
1618 &dealloc);
1619 if (ret) {
1620 mlog_errno(ret);
1621 goto out;
1622 }
1623 }
1624
1625 cpos += alloc_size;
1626 trunc_len -= alloc_size;
1627 }
1628
1629 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1630
1631 out:
1632 ocfs2_schedule_truncate_log_flush(osb, 1);
1633 ocfs2_run_deallocs(osb, &dealloc);
1634
1635 return ret;
1636 }
1637
1638 /*
1639 * Parts of this function taken from xfs_change_file_space()
1640 */
1641 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1642 loff_t f_pos, unsigned int cmd,
1643 struct ocfs2_space_resv *sr,
1644 int change_size)
1645 {
1646 int ret;
1647 s64 llen;
1648 loff_t size;
1649 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1650 struct buffer_head *di_bh = NULL;
1651 handle_t *handle;
1652 unsigned long long max_off = inode->i_sb->s_maxbytes;
1653
1654 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1655 return -EROFS;
1656
1657 mutex_lock(&inode->i_mutex);
1658
1659 /*
1660 * This prevents concurrent writes on other nodes
1661 */
1662 ret = ocfs2_rw_lock(inode, 1);
1663 if (ret) {
1664 mlog_errno(ret);
1665 goto out;
1666 }
1667
1668 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1669 if (ret) {
1670 mlog_errno(ret);
1671 goto out_rw_unlock;
1672 }
1673
1674 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1675 ret = -EPERM;
1676 goto out_inode_unlock;
1677 }
1678
1679 switch (sr->l_whence) {
1680 case 0: /*SEEK_SET*/
1681 break;
1682 case 1: /*SEEK_CUR*/
1683 sr->l_start += f_pos;
1684 break;
1685 case 2: /*SEEK_END*/
1686 sr->l_start += i_size_read(inode);
1687 break;
1688 default:
1689 ret = -EINVAL;
1690 goto out_inode_unlock;
1691 }
1692 sr->l_whence = 0;
1693
1694 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1695
1696 if (sr->l_start < 0
1697 || sr->l_start > max_off
1698 || (sr->l_start + llen) < 0
1699 || (sr->l_start + llen) > max_off) {
1700 ret = -EINVAL;
1701 goto out_inode_unlock;
1702 }
1703 size = sr->l_start + sr->l_len;
1704
1705 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
1706 if (sr->l_len <= 0) {
1707 ret = -EINVAL;
1708 goto out_inode_unlock;
1709 }
1710 }
1711
1712 if (file && should_remove_suid(file->f_path.dentry)) {
1713 ret = __ocfs2_write_remove_suid(inode, di_bh);
1714 if (ret) {
1715 mlog_errno(ret);
1716 goto out_inode_unlock;
1717 }
1718 }
1719
1720 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1721 switch (cmd) {
1722 case OCFS2_IOC_RESVSP:
1723 case OCFS2_IOC_RESVSP64:
1724 /*
1725 * This takes unsigned offsets, but the signed ones we
1726 * pass have been checked against overflow above.
1727 */
1728 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1729 sr->l_len);
1730 break;
1731 case OCFS2_IOC_UNRESVSP:
1732 case OCFS2_IOC_UNRESVSP64:
1733 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1734 sr->l_len);
1735 break;
1736 default:
1737 ret = -EINVAL;
1738 }
1739 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1740 if (ret) {
1741 mlog_errno(ret);
1742 goto out_inode_unlock;
1743 }
1744
1745 /*
1746 * We update c/mtime for these changes
1747 */
1748 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1749 if (IS_ERR(handle)) {
1750 ret = PTR_ERR(handle);
1751 mlog_errno(ret);
1752 goto out_inode_unlock;
1753 }
1754
1755 if (change_size && i_size_read(inode) < size)
1756 i_size_write(inode, size);
1757
1758 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1759 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1760 if (ret < 0)
1761 mlog_errno(ret);
1762
1763 ocfs2_commit_trans(osb, handle);
1764
1765 out_inode_unlock:
1766 brelse(di_bh);
1767 ocfs2_inode_unlock(inode, 1);
1768 out_rw_unlock:
1769 ocfs2_rw_unlock(inode, 1);
1770
1771 out:
1772 mutex_unlock(&inode->i_mutex);
1773 return ret;
1774 }
1775
1776 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1777 struct ocfs2_space_resv *sr)
1778 {
1779 struct inode *inode = file->f_path.dentry->d_inode;
1780 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);;
1781
1782 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1783 !ocfs2_writes_unwritten_extents(osb))
1784 return -ENOTTY;
1785 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1786 !ocfs2_sparse_alloc(osb))
1787 return -ENOTTY;
1788
1789 if (!S_ISREG(inode->i_mode))
1790 return -EINVAL;
1791
1792 if (!(file->f_mode & FMODE_WRITE))
1793 return -EBADF;
1794
1795 return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
1796 }
1797
1798 static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset,
1799 loff_t len)
1800 {
1801 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1802 struct ocfs2_space_resv sr;
1803 int change_size = 1;
1804
1805 if (!ocfs2_writes_unwritten_extents(osb))
1806 return -EOPNOTSUPP;
1807
1808 if (S_ISDIR(inode->i_mode))
1809 return -ENODEV;
1810
1811 if (mode & FALLOC_FL_KEEP_SIZE)
1812 change_size = 0;
1813
1814 sr.l_whence = 0;
1815 sr.l_start = (s64)offset;
1816 sr.l_len = (s64)len;
1817
1818 return __ocfs2_change_file_space(NULL, inode, offset,
1819 OCFS2_IOC_RESVSP64, &sr, change_size);
1820 }
1821
1822 static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1823 loff_t *ppos,
1824 size_t count,
1825 int appending,
1826 int *direct_io)
1827 {
1828 int ret = 0, meta_level = 0;
1829 struct inode *inode = dentry->d_inode;
1830 loff_t saved_pos, end;
1831
1832 /*
1833 * We start with a read level meta lock and only jump to an ex
1834 * if we need to make modifications here.
1835 */
1836 for(;;) {
1837 ret = ocfs2_inode_lock(inode, NULL, meta_level);
1838 if (ret < 0) {
1839 meta_level = -1;
1840 mlog_errno(ret);
1841 goto out;
1842 }
1843
1844 /* Clear suid / sgid if necessary. We do this here
1845 * instead of later in the write path because
1846 * remove_suid() calls ->setattr without any hint that
1847 * we may have already done our cluster locking. Since
1848 * ocfs2_setattr() *must* take cluster locks to
1849 * proceeed, this will lead us to recursively lock the
1850 * inode. There's also the dinode i_size state which
1851 * can be lost via setattr during extending writes (we
1852 * set inode->i_size at the end of a write. */
1853 if (should_remove_suid(dentry)) {
1854 if (meta_level == 0) {
1855 ocfs2_inode_unlock(inode, meta_level);
1856 meta_level = 1;
1857 continue;
1858 }
1859
1860 ret = ocfs2_write_remove_suid(inode);
1861 if (ret < 0) {
1862 mlog_errno(ret);
1863 goto out_unlock;
1864 }
1865 }
1866
1867 /* work on a copy of ppos until we're sure that we won't have
1868 * to recalculate it due to relocking. */
1869 if (appending) {
1870 saved_pos = i_size_read(inode);
1871 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
1872 } else {
1873 saved_pos = *ppos;
1874 }
1875
1876 end = saved_pos + count;
1877
1878 /*
1879 * Skip the O_DIRECT checks if we don't need
1880 * them.
1881 */
1882 if (!direct_io || !(*direct_io))
1883 break;
1884
1885 /*
1886 * There's no sane way to do direct writes to an inode
1887 * with inline data.
1888 */
1889 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1890 *direct_io = 0;
1891 break;
1892 }
1893
1894 /*
1895 * Allowing concurrent direct writes means
1896 * i_size changes wouldn't be synchronized, so
1897 * one node could wind up truncating another
1898 * nodes writes.
1899 */
1900 if (end > i_size_read(inode)) {
1901 *direct_io = 0;
1902 break;
1903 }
1904
1905 /*
1906 * We don't fill holes during direct io, so
1907 * check for them here. If any are found, the
1908 * caller will have to retake some cluster
1909 * locks and initiate the io as buffered.
1910 */
1911 ret = ocfs2_check_range_for_holes(inode, saved_pos, count);
1912 if (ret == 1) {
1913 *direct_io = 0;
1914 ret = 0;
1915 } else if (ret < 0)
1916 mlog_errno(ret);
1917 break;
1918 }
1919
1920 if (appending)
1921 *ppos = saved_pos;
1922
1923 out_unlock:
1924 ocfs2_inode_unlock(inode, meta_level);
1925
1926 out:
1927 return ret;
1928 }
1929
1930 static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1931 const struct iovec *iov,
1932 unsigned long nr_segs,
1933 loff_t pos)
1934 {
1935 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
1936 int can_do_direct;
1937 ssize_t written = 0;
1938 size_t ocount; /* original count */
1939 size_t count; /* after file limit checks */
1940 loff_t old_size, *ppos = &iocb->ki_pos;
1941 u32 old_clusters;
1942 struct file *file = iocb->ki_filp;
1943 struct inode *inode = file->f_path.dentry->d_inode;
1944 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1945
1946 mlog_entry("(0x%p, %u, '%.*s')\n", file,
1947 (unsigned int)nr_segs,
1948 file->f_path.dentry->d_name.len,
1949 file->f_path.dentry->d_name.name);
1950
1951 if (iocb->ki_left == 0)
1952 return 0;
1953
1954 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1955
1956 appending = file->f_flags & O_APPEND ? 1 : 0;
1957 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
1958
1959 mutex_lock(&inode->i_mutex);
1960
1961 relock:
1962 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
1963 if (direct_io) {
1964 down_read(&inode->i_alloc_sem);
1965 have_alloc_sem = 1;
1966 }
1967
1968 /* concurrent O_DIRECT writes are allowed */
1969 rw_level = !direct_io;
1970 ret = ocfs2_rw_lock(inode, rw_level);
1971 if (ret < 0) {
1972 mlog_errno(ret);
1973 goto out_sems;
1974 }
1975
1976 can_do_direct = direct_io;
1977 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
1978 iocb->ki_left, appending,
1979 &can_do_direct);
1980 if (ret < 0) {
1981 mlog_errno(ret);
1982 goto out;
1983 }
1984
1985 /*
1986 * We can't complete the direct I/O as requested, fall back to
1987 * buffered I/O.
1988 */
1989 if (direct_io && !can_do_direct) {
1990 ocfs2_rw_unlock(inode, rw_level);
1991 up_read(&inode->i_alloc_sem);
1992
1993 have_alloc_sem = 0;
1994 rw_level = -1;
1995
1996 direct_io = 0;
1997 goto relock;
1998 }
1999
2000 /*
2001 * To later detect whether a journal commit for sync writes is
2002 * necessary, we sample i_size, and cluster count here.
2003 */
2004 old_size = i_size_read(inode);
2005 old_clusters = OCFS2_I(inode)->ip_clusters;
2006
2007 /* communicate with ocfs2_dio_end_io */
2008 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2009
2010 if (direct_io) {
2011 ret = generic_segment_checks(iov, &nr_segs, &ocount,
2012 VERIFY_READ);
2013 if (ret)
2014 goto out_dio;
2015
2016 ret = generic_write_checks(file, ppos, &count,
2017 S_ISBLK(inode->i_mode));
2018 if (ret)
2019 goto out_dio;
2020
2021 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
2022 ppos, count, ocount);
2023 if (written < 0) {
2024 ret = written;
2025 goto out_dio;
2026 }
2027 } else {
2028 written = generic_file_aio_write_nolock(iocb, iov, nr_segs,
2029 *ppos);
2030 }
2031
2032 out_dio:
2033 /* buffered aio wouldn't have proper lock coverage today */
2034 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2035
2036 if ((file->f_flags & O_SYNC && !direct_io) || IS_SYNC(inode)) {
2037 /*
2038 * The generic write paths have handled getting data
2039 * to disk, but since we don't make use of the dirty
2040 * inode list, a manual journal commit is necessary
2041 * here.
2042 */
2043 if (old_size != i_size_read(inode) ||
2044 old_clusters != OCFS2_I(inode)->ip_clusters) {
2045 ret = journal_force_commit(osb->journal->j_journal);
2046 if (ret < 0)
2047 written = ret;
2048 }
2049 }
2050
2051 /*
2052 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2053 * function pointer which is called when o_direct io completes so that
2054 * it can unlock our rw lock. (it's the clustered equivalent of
2055 * i_alloc_sem; protects truncate from racing with pending ios).
2056 * Unfortunately there are error cases which call end_io and others
2057 * that don't. so we don't have to unlock the rw_lock if either an
2058 * async dio is going to do it in the future or an end_io after an
2059 * error has already done it.
2060 */
2061 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2062 rw_level = -1;
2063 have_alloc_sem = 0;
2064 }
2065
2066 out:
2067 if (rw_level != -1)
2068 ocfs2_rw_unlock(inode, rw_level);
2069
2070 out_sems:
2071 if (have_alloc_sem)
2072 up_read(&inode->i_alloc_sem);
2073
2074 mutex_unlock(&inode->i_mutex);
2075
2076 mlog_exit(ret);
2077 return written ? written : ret;
2078 }
2079
2080 static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2081 struct file *out,
2082 loff_t *ppos,
2083 size_t len,
2084 unsigned int flags)
2085 {
2086 int ret;
2087 struct inode *inode = out->f_path.dentry->d_inode;
2088
2089 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
2090 (unsigned int)len,
2091 out->f_path.dentry->d_name.len,
2092 out->f_path.dentry->d_name.name);
2093
2094 inode_double_lock(inode, pipe->inode);
2095
2096 ret = ocfs2_rw_lock(inode, 1);
2097 if (ret < 0) {
2098 mlog_errno(ret);
2099 goto out;
2100 }
2101
2102 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0,
2103 NULL);
2104 if (ret < 0) {
2105 mlog_errno(ret);
2106 goto out_unlock;
2107 }
2108
2109 ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
2110
2111 out_unlock:
2112 ocfs2_rw_unlock(inode, 1);
2113 out:
2114 inode_double_unlock(inode, pipe->inode);
2115
2116 mlog_exit(ret);
2117 return ret;
2118 }
2119
2120 static ssize_t ocfs2_file_splice_read(struct file *in,
2121 loff_t *ppos,
2122 struct pipe_inode_info *pipe,
2123 size_t len,
2124 unsigned int flags)
2125 {
2126 int ret = 0;
2127 struct inode *inode = in->f_path.dentry->d_inode;
2128
2129 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
2130 (unsigned int)len,
2131 in->f_path.dentry->d_name.len,
2132 in->f_path.dentry->d_name.name);
2133
2134 /*
2135 * See the comment in ocfs2_file_aio_read()
2136 */
2137 ret = ocfs2_inode_lock(inode, NULL, 0);
2138 if (ret < 0) {
2139 mlog_errno(ret);
2140 goto bail;
2141 }
2142 ocfs2_inode_unlock(inode, 0);
2143
2144 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2145
2146 bail:
2147 mlog_exit(ret);
2148 return ret;
2149 }
2150
2151 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2152 const struct iovec *iov,
2153 unsigned long nr_segs,
2154 loff_t pos)
2155 {
2156 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
2157 struct file *filp = iocb->ki_filp;
2158 struct inode *inode = filp->f_path.dentry->d_inode;
2159
2160 mlog_entry("(0x%p, %u, '%.*s')\n", filp,
2161 (unsigned int)nr_segs,
2162 filp->f_path.dentry->d_name.len,
2163 filp->f_path.dentry->d_name.name);
2164
2165 if (!inode) {
2166 ret = -EINVAL;
2167 mlog_errno(ret);
2168 goto bail;
2169 }
2170
2171 /*
2172 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2173 * need locks to protect pending reads from racing with truncate.
2174 */
2175 if (filp->f_flags & O_DIRECT) {
2176 down_read(&inode->i_alloc_sem);
2177 have_alloc_sem = 1;
2178
2179 ret = ocfs2_rw_lock(inode, 0);
2180 if (ret < 0) {
2181 mlog_errno(ret);
2182 goto bail;
2183 }
2184 rw_level = 0;
2185 /* communicate with ocfs2_dio_end_io */
2186 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2187 }
2188
2189 /*
2190 * We're fine letting folks race truncates and extending
2191 * writes with read across the cluster, just like they can
2192 * locally. Hence no rw_lock during read.
2193 *
2194 * Take and drop the meta data lock to update inode fields
2195 * like i_size. This allows the checks down below
2196 * generic_file_aio_read() a chance of actually working.
2197 */
2198 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2199 if (ret < 0) {
2200 mlog_errno(ret);
2201 goto bail;
2202 }
2203 ocfs2_inode_unlock(inode, lock_level);
2204
2205 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
2206 if (ret == -EINVAL)
2207 mlog(0, "generic_file_aio_read returned -EINVAL\n");
2208
2209 /* buffered aio wouldn't have proper lock coverage today */
2210 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
2211
2212 /* see ocfs2_file_aio_write */
2213 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2214 rw_level = -1;
2215 have_alloc_sem = 0;
2216 }
2217
2218 bail:
2219 if (have_alloc_sem)
2220 up_read(&inode->i_alloc_sem);
2221 if (rw_level != -1)
2222 ocfs2_rw_unlock(inode, rw_level);
2223 mlog_exit(ret);
2224
2225 return ret;
2226 }
2227
2228 const struct inode_operations ocfs2_file_iops = {
2229 .setattr = ocfs2_setattr,
2230 .getattr = ocfs2_getattr,
2231 .permission = ocfs2_permission,
2232 .fallocate = ocfs2_fallocate,
2233 .fiemap = ocfs2_fiemap,
2234 };
2235
2236 const struct inode_operations ocfs2_special_file_iops = {
2237 .setattr = ocfs2_setattr,
2238 .getattr = ocfs2_getattr,
2239 .permission = ocfs2_permission,
2240 };
2241
2242 /*
2243 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2244 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2245 */
2246 const struct file_operations ocfs2_fops = {
2247 .llseek = generic_file_llseek,
2248 .read = do_sync_read,
2249 .write = do_sync_write,
2250 .mmap = ocfs2_mmap,
2251 .fsync = ocfs2_sync_file,
2252 .release = ocfs2_file_release,
2253 .open = ocfs2_file_open,
2254 .aio_read = ocfs2_file_aio_read,
2255 .aio_write = ocfs2_file_aio_write,
2256 .unlocked_ioctl = ocfs2_ioctl,
2257 #ifdef CONFIG_COMPAT
2258 .compat_ioctl = ocfs2_compat_ioctl,
2259 #endif
2260 .lock = ocfs2_lock,
2261 .flock = ocfs2_flock,
2262 .splice_read = ocfs2_file_splice_read,
2263 .splice_write = ocfs2_file_splice_write,
2264 };
2265
2266 const struct file_operations ocfs2_dops = {
2267 .llseek = generic_file_llseek,
2268 .read = generic_read_dir,
2269 .readdir = ocfs2_readdir,
2270 .fsync = ocfs2_sync_file,
2271 .release = ocfs2_dir_release,
2272 .open = ocfs2_dir_open,
2273 .unlocked_ioctl = ocfs2_ioctl,
2274 #ifdef CONFIG_COMPAT
2275 .compat_ioctl = ocfs2_compat_ioctl,
2276 #endif
2277 .lock = ocfs2_lock,
2278 .flock = ocfs2_flock,
2279 };
2280
2281 /*
2282 * POSIX-lockless variants of our file_operations.
2283 *
2284 * These will be used if the underlying cluster stack does not support
2285 * posix file locking, if the user passes the "localflocks" mount
2286 * option, or if we have a local-only fs.
2287 *
2288 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2289 * so we still want it in the case of no stack support for
2290 * plocks. Internally, it will do the right thing when asked to ignore
2291 * the cluster.
2292 */
2293 const struct file_operations ocfs2_fops_no_plocks = {
2294 .llseek = generic_file_llseek,
2295 .read = do_sync_read,
2296 .write = do_sync_write,
2297 .mmap = ocfs2_mmap,
2298 .fsync = ocfs2_sync_file,
2299 .release = ocfs2_file_release,
2300 .open = ocfs2_file_open,
2301 .aio_read = ocfs2_file_aio_read,
2302 .aio_write = ocfs2_file_aio_write,
2303 .unlocked_ioctl = ocfs2_ioctl,
2304 #ifdef CONFIG_COMPAT
2305 .compat_ioctl = ocfs2_compat_ioctl,
2306 #endif
2307 .flock = ocfs2_flock,
2308 .splice_read = ocfs2_file_splice_read,
2309 .splice_write = ocfs2_file_splice_write,
2310 };
2311
2312 const struct file_operations ocfs2_dops_no_plocks = {
2313 .llseek = generic_file_llseek,
2314 .read = generic_read_dir,
2315 .readdir = ocfs2_readdir,
2316 .fsync = ocfs2_sync_file,
2317 .release = ocfs2_dir_release,
2318 .open = ocfs2_dir_open,
2319 .unlocked_ioctl = ocfs2_ioctl,
2320 #ifdef CONFIG_COMPAT
2321 .compat_ioctl = ocfs2_compat_ioctl,
2322 #endif
2323 .flock = ocfs2_flock,
2324 };