]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/gfs2/file.c
GFS2: Cache the most recently used resource group in the inode
[mirror_ubuntu-artful-kernel.git] / fs / gfs2 / file.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/ext2_fs.h>
22 #include <linux/falloc.h>
23 #include <linux/swap.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <asm/uaccess.h>
27 #include <linux/dlm.h>
28 #include <linux/dlm_plock.h>
29
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "bmap.h"
33 #include "dir.h"
34 #include "glock.h"
35 #include "glops.h"
36 #include "inode.h"
37 #include "log.h"
38 #include "meta_io.h"
39 #include "quota.h"
40 #include "rgrp.h"
41 #include "trans.h"
42 #include "util.h"
43
44 /**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
48 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 *
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
52 *
53 * Returns: The new offset, or errno
54 */
55
56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
57 {
58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 struct gfs2_holder i_gh;
60 loff_t error;
61
62 switch (origin) {
63 case SEEK_END: /* These reference inode->i_size */
64 case SEEK_DATA:
65 case SEEK_HOLE:
66 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
67 &i_gh);
68 if (!error) {
69 error = generic_file_llseek_unlocked(file, offset, origin);
70 gfs2_glock_dq_uninit(&i_gh);
71 }
72 break;
73 case SEEK_CUR:
74 case SEEK_SET:
75 error = generic_file_llseek_unlocked(file, offset, origin);
76 break;
77 default:
78 error = -EINVAL;
79 }
80
81 return error;
82 }
83
84 /**
85 * gfs2_readdir - Read directory entries from a directory
86 * @file: The directory to read from
87 * @dirent: Buffer for dirents
88 * @filldir: Function used to do the copying
89 *
90 * Returns: errno
91 */
92
93 static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
94 {
95 struct inode *dir = file->f_mapping->host;
96 struct gfs2_inode *dip = GFS2_I(dir);
97 struct gfs2_holder d_gh;
98 u64 offset = file->f_pos;
99 int error;
100
101 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
102 error = gfs2_glock_nq(&d_gh);
103 if (error) {
104 gfs2_holder_uninit(&d_gh);
105 return error;
106 }
107
108 error = gfs2_dir_read(dir, &offset, dirent, filldir);
109
110 gfs2_glock_dq_uninit(&d_gh);
111
112 file->f_pos = offset;
113
114 return error;
115 }
116
117 /**
118 * fsflags_cvt
119 * @table: A table of 32 u32 flags
120 * @val: a 32 bit value to convert
121 *
122 * This function can be used to convert between fsflags values and
123 * GFS2's own flags values.
124 *
125 * Returns: the converted flags
126 */
127 static u32 fsflags_cvt(const u32 *table, u32 val)
128 {
129 u32 res = 0;
130 while(val) {
131 if (val & 1)
132 res |= *table;
133 table++;
134 val >>= 1;
135 }
136 return res;
137 }
138
139 static const u32 fsflags_to_gfs2[32] = {
140 [3] = GFS2_DIF_SYNC,
141 [4] = GFS2_DIF_IMMUTABLE,
142 [5] = GFS2_DIF_APPENDONLY,
143 [7] = GFS2_DIF_NOATIME,
144 [12] = GFS2_DIF_EXHASH,
145 [14] = GFS2_DIF_INHERIT_JDATA,
146 };
147
148 static const u32 gfs2_to_fsflags[32] = {
149 [gfs2fl_Sync] = FS_SYNC_FL,
150 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
151 [gfs2fl_AppendOnly] = FS_APPEND_FL,
152 [gfs2fl_NoAtime] = FS_NOATIME_FL,
153 [gfs2fl_ExHash] = FS_INDEX_FL,
154 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
155 };
156
157 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
158 {
159 struct inode *inode = filp->f_path.dentry->d_inode;
160 struct gfs2_inode *ip = GFS2_I(inode);
161 struct gfs2_holder gh;
162 int error;
163 u32 fsflags;
164
165 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
166 error = gfs2_glock_nq(&gh);
167 if (error)
168 return error;
169
170 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
171 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
172 fsflags |= FS_JOURNAL_DATA_FL;
173 if (put_user(fsflags, ptr))
174 error = -EFAULT;
175
176 gfs2_glock_dq(&gh);
177 gfs2_holder_uninit(&gh);
178 return error;
179 }
180
181 void gfs2_set_inode_flags(struct inode *inode)
182 {
183 struct gfs2_inode *ip = GFS2_I(inode);
184 unsigned int flags = inode->i_flags;
185
186 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
187 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
188 inode->i_flags |= S_NOSEC;
189 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
190 flags |= S_IMMUTABLE;
191 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
192 flags |= S_APPEND;
193 if (ip->i_diskflags & GFS2_DIF_NOATIME)
194 flags |= S_NOATIME;
195 if (ip->i_diskflags & GFS2_DIF_SYNC)
196 flags |= S_SYNC;
197 inode->i_flags = flags;
198 }
199
200 /* Flags that can be set by user space */
201 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
202 GFS2_DIF_IMMUTABLE| \
203 GFS2_DIF_APPENDONLY| \
204 GFS2_DIF_NOATIME| \
205 GFS2_DIF_SYNC| \
206 GFS2_DIF_SYSTEM| \
207 GFS2_DIF_INHERIT_JDATA)
208
209 /**
210 * gfs2_set_flags - set flags on an inode
211 * @inode: The inode
212 * @flags: The flags to set
213 * @mask: Indicates which flags are valid
214 *
215 */
216 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
217 {
218 struct inode *inode = filp->f_path.dentry->d_inode;
219 struct gfs2_inode *ip = GFS2_I(inode);
220 struct gfs2_sbd *sdp = GFS2_SB(inode);
221 struct buffer_head *bh;
222 struct gfs2_holder gh;
223 int error;
224 u32 new_flags, flags;
225
226 error = mnt_want_write(filp->f_path.mnt);
227 if (error)
228 return error;
229
230 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
231 if (error)
232 goto out_drop_write;
233
234 error = -EACCES;
235 if (!inode_owner_or_capable(inode))
236 goto out;
237
238 error = 0;
239 flags = ip->i_diskflags;
240 new_flags = (flags & ~mask) | (reqflags & mask);
241 if ((new_flags ^ flags) == 0)
242 goto out;
243
244 error = -EINVAL;
245 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
246 goto out;
247
248 error = -EPERM;
249 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
250 goto out;
251 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
252 goto out;
253 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
254 !capable(CAP_LINUX_IMMUTABLE))
255 goto out;
256 if (!IS_IMMUTABLE(inode)) {
257 error = gfs2_permission(inode, MAY_WRITE);
258 if (error)
259 goto out;
260 }
261 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
262 if (flags & GFS2_DIF_JDATA)
263 gfs2_log_flush(sdp, ip->i_gl);
264 error = filemap_fdatawrite(inode->i_mapping);
265 if (error)
266 goto out;
267 error = filemap_fdatawait(inode->i_mapping);
268 if (error)
269 goto out;
270 }
271 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
272 if (error)
273 goto out;
274 error = gfs2_meta_inode_buffer(ip, &bh);
275 if (error)
276 goto out_trans_end;
277 gfs2_trans_add_bh(ip->i_gl, bh, 1);
278 ip->i_diskflags = new_flags;
279 gfs2_dinode_out(ip, bh->b_data);
280 brelse(bh);
281 gfs2_set_inode_flags(inode);
282 gfs2_set_aops(inode);
283 out_trans_end:
284 gfs2_trans_end(sdp);
285 out:
286 gfs2_glock_dq_uninit(&gh);
287 out_drop_write:
288 mnt_drop_write(filp->f_path.mnt);
289 return error;
290 }
291
292 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
293 {
294 struct inode *inode = filp->f_path.dentry->d_inode;
295 u32 fsflags, gfsflags;
296
297 if (get_user(fsflags, ptr))
298 return -EFAULT;
299
300 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
301 if (!S_ISDIR(inode->i_mode)) {
302 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
303 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
304 return do_gfs2_set_flags(filp, gfsflags, ~0);
305 }
306 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
307 }
308
309 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
310 {
311 switch(cmd) {
312 case FS_IOC_GETFLAGS:
313 return gfs2_get_flags(filp, (u32 __user *)arg);
314 case FS_IOC_SETFLAGS:
315 return gfs2_set_flags(filp, (u32 __user *)arg);
316 }
317 return -ENOTTY;
318 }
319
320 /**
321 * gfs2_allocate_page_backing - Use bmap to allocate blocks
322 * @page: The (locked) page to allocate backing for
323 *
324 * We try to allocate all the blocks required for the page in
325 * one go. This might fail for various reasons, so we keep
326 * trying until all the blocks to back this page are allocated.
327 * If some of the blocks are already allocated, thats ok too.
328 */
329
330 static int gfs2_allocate_page_backing(struct page *page)
331 {
332 struct inode *inode = page->mapping->host;
333 struct buffer_head bh;
334 unsigned long size = PAGE_CACHE_SIZE;
335 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
336
337 do {
338 bh.b_state = 0;
339 bh.b_size = size;
340 gfs2_block_map(inode, lblock, &bh, 1);
341 if (!buffer_mapped(&bh))
342 return -EIO;
343 size -= bh.b_size;
344 lblock += (bh.b_size >> inode->i_blkbits);
345 } while(size > 0);
346 return 0;
347 }
348
349 /**
350 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
351 * @vma: The virtual memory area
352 * @page: The page which is about to become writable
353 *
354 * When the page becomes writable, we need to ensure that we have
355 * blocks allocated on disk to back that page.
356 */
357
358 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
359 {
360 struct page *page = vmf->page;
361 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
362 struct gfs2_inode *ip = GFS2_I(inode);
363 struct gfs2_sbd *sdp = GFS2_SB(inode);
364 unsigned long last_index;
365 u64 pos = page->index << PAGE_CACHE_SHIFT;
366 unsigned int data_blocks, ind_blocks, rblocks;
367 struct gfs2_holder gh;
368 struct gfs2_alloc *al;
369 int ret;
370
371 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
372 ret = gfs2_glock_nq(&gh);
373 if (ret)
374 goto out;
375
376 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
377 set_bit(GIF_SW_PAGED, &ip->i_flags);
378
379 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
380 goto out_unlock;
381 ret = -ENOMEM;
382 al = gfs2_alloc_get(ip);
383 if (al == NULL)
384 goto out_unlock;
385
386 ret = gfs2_quota_lock_check(ip);
387 if (ret)
388 goto out_alloc_put;
389 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
390 al->al_requested = data_blocks + ind_blocks;
391 ret = gfs2_inplace_reserve(ip);
392 if (ret)
393 goto out_quota_unlock;
394
395 rblocks = RES_DINODE + ind_blocks;
396 if (gfs2_is_jdata(ip))
397 rblocks += data_blocks ? data_blocks : 1;
398 if (ind_blocks || data_blocks) {
399 rblocks += RES_STATFS + RES_QUOTA;
400 rblocks += gfs2_rg_blocks(ip);
401 }
402 ret = gfs2_trans_begin(sdp, rblocks, 0);
403 if (ret)
404 goto out_trans_fail;
405
406 lock_page(page);
407 ret = -EINVAL;
408 last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
409 if (page->index > last_index)
410 goto out_unlock_page;
411 ret = 0;
412 if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
413 goto out_unlock_page;
414 if (gfs2_is_stuffed(ip)) {
415 ret = gfs2_unstuff_dinode(ip, page);
416 if (ret)
417 goto out_unlock_page;
418 }
419 ret = gfs2_allocate_page_backing(page);
420
421 out_unlock_page:
422 unlock_page(page);
423 gfs2_trans_end(sdp);
424 out_trans_fail:
425 gfs2_inplace_release(ip);
426 out_quota_unlock:
427 gfs2_quota_unlock(ip);
428 out_alloc_put:
429 gfs2_alloc_put(ip);
430 out_unlock:
431 gfs2_glock_dq(&gh);
432 out:
433 gfs2_holder_uninit(&gh);
434 if (ret == -ENOMEM)
435 ret = VM_FAULT_OOM;
436 else if (ret)
437 ret = VM_FAULT_SIGBUS;
438 return ret;
439 }
440
441 static const struct vm_operations_struct gfs2_vm_ops = {
442 .fault = filemap_fault,
443 .page_mkwrite = gfs2_page_mkwrite,
444 };
445
446 /**
447 * gfs2_mmap -
448 * @file: The file to map
449 * @vma: The VMA which described the mapping
450 *
451 * There is no need to get a lock here unless we should be updating
452 * atime. We ignore any locking errors since the only consequence is
453 * a missed atime update (which will just be deferred until later).
454 *
455 * Returns: 0
456 */
457
458 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
459 {
460 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
461
462 if (!(file->f_flags & O_NOATIME) &&
463 !IS_NOATIME(&ip->i_inode)) {
464 struct gfs2_holder i_gh;
465 int error;
466
467 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
468 error = gfs2_glock_nq(&i_gh);
469 if (error == 0) {
470 file_accessed(file);
471 gfs2_glock_dq(&i_gh);
472 }
473 gfs2_holder_uninit(&i_gh);
474 if (error)
475 return error;
476 }
477 vma->vm_ops = &gfs2_vm_ops;
478 vma->vm_flags |= VM_CAN_NONLINEAR;
479
480 return 0;
481 }
482
483 /**
484 * gfs2_open - open a file
485 * @inode: the inode to open
486 * @file: the struct file for this opening
487 *
488 * Returns: errno
489 */
490
491 static int gfs2_open(struct inode *inode, struct file *file)
492 {
493 struct gfs2_inode *ip = GFS2_I(inode);
494 struct gfs2_holder i_gh;
495 struct gfs2_file *fp;
496 int error;
497
498 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
499 if (!fp)
500 return -ENOMEM;
501
502 mutex_init(&fp->f_fl_mutex);
503
504 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
505 file->private_data = fp;
506
507 if (S_ISREG(ip->i_inode.i_mode)) {
508 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
509 &i_gh);
510 if (error)
511 goto fail;
512
513 if (!(file->f_flags & O_LARGEFILE) &&
514 i_size_read(inode) > MAX_NON_LFS) {
515 error = -EOVERFLOW;
516 goto fail_gunlock;
517 }
518
519 gfs2_glock_dq_uninit(&i_gh);
520 }
521
522 return 0;
523
524 fail_gunlock:
525 gfs2_glock_dq_uninit(&i_gh);
526 fail:
527 file->private_data = NULL;
528 kfree(fp);
529 return error;
530 }
531
532 /**
533 * gfs2_close - called to close a struct file
534 * @inode: the inode the struct file belongs to
535 * @file: the struct file being closed
536 *
537 * Returns: errno
538 */
539
540 static int gfs2_close(struct inode *inode, struct file *file)
541 {
542 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
543 struct gfs2_file *fp;
544
545 fp = file->private_data;
546 file->private_data = NULL;
547
548 if (gfs2_assert_warn(sdp, fp))
549 return -EIO;
550
551 kfree(fp);
552
553 return 0;
554 }
555
556 /**
557 * gfs2_fsync - sync the dirty data for a file (across the cluster)
558 * @file: the file that points to the dentry
559 * @start: the start position in the file to sync
560 * @end: the end position in the file to sync
561 * @datasync: set if we can ignore timestamp changes
562 *
563 * We split the data flushing here so that we don't wait for the data
564 * until after we've also sent the metadata to disk. Note that for
565 * data=ordered, we will write & wait for the data at the log flush
566 * stage anyway, so this is unlikely to make much of a difference
567 * except in the data=writeback case.
568 *
569 * If the fdatawrite fails due to any reason except -EIO, we will
570 * continue the remainder of the fsync, although we'll still report
571 * the error at the end. This is to match filemap_write_and_wait_range()
572 * behaviour.
573 *
574 * Returns: errno
575 */
576
577 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
578 int datasync)
579 {
580 struct address_space *mapping = file->f_mapping;
581 struct inode *inode = mapping->host;
582 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
583 struct gfs2_inode *ip = GFS2_I(inode);
584 int ret, ret1 = 0;
585
586 if (mapping->nrpages) {
587 ret1 = filemap_fdatawrite_range(mapping, start, end);
588 if (ret1 == -EIO)
589 return ret1;
590 }
591
592 if (datasync)
593 sync_state &= ~I_DIRTY_SYNC;
594
595 if (sync_state) {
596 mutex_lock(&inode->i_mutex);
597 ret = sync_inode_metadata(inode, 1);
598 if (ret) {
599 mutex_unlock(&inode->i_mutex);
600 return ret;
601 }
602 if (gfs2_is_jdata(ip))
603 filemap_write_and_wait(mapping);
604 gfs2_ail_flush(ip->i_gl);
605 mutex_unlock(&inode->i_mutex);
606 }
607
608 if (mapping->nrpages)
609 ret = filemap_fdatawait_range(mapping, start, end);
610
611 return ret ? ret : ret1;
612 }
613
614 /**
615 * gfs2_file_aio_write - Perform a write to a file
616 * @iocb: The io context
617 * @iov: The data to write
618 * @nr_segs: Number of @iov segments
619 * @pos: The file position
620 *
621 * We have to do a lock/unlock here to refresh the inode size for
622 * O_APPEND writes, otherwise we can land up writing at the wrong
623 * offset. There is still a race, but provided the app is using its
624 * own file locking, this will make O_APPEND work as expected.
625 *
626 */
627
628 static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
629 unsigned long nr_segs, loff_t pos)
630 {
631 struct file *file = iocb->ki_filp;
632
633 if (file->f_flags & O_APPEND) {
634 struct dentry *dentry = file->f_dentry;
635 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
636 struct gfs2_holder gh;
637 int ret;
638
639 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
640 if (ret)
641 return ret;
642 gfs2_glock_dq_uninit(&gh);
643 }
644
645 return generic_file_aio_write(iocb, iov, nr_segs, pos);
646 }
647
648 static int empty_write_end(struct page *page, unsigned from,
649 unsigned to, int mode)
650 {
651 struct inode *inode = page->mapping->host;
652 struct gfs2_inode *ip = GFS2_I(inode);
653 struct buffer_head *bh;
654 unsigned offset, blksize = 1 << inode->i_blkbits;
655 pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
656
657 zero_user(page, from, to-from);
658 mark_page_accessed(page);
659
660 if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
661 if (!gfs2_is_writeback(ip))
662 gfs2_page_add_databufs(ip, page, from, to);
663
664 block_commit_write(page, from, to);
665 return 0;
666 }
667
668 offset = 0;
669 bh = page_buffers(page);
670 while (offset < to) {
671 if (offset >= from) {
672 set_buffer_uptodate(bh);
673 mark_buffer_dirty(bh);
674 clear_buffer_new(bh);
675 write_dirty_buffer(bh, WRITE);
676 }
677 offset += blksize;
678 bh = bh->b_this_page;
679 }
680
681 offset = 0;
682 bh = page_buffers(page);
683 while (offset < to) {
684 if (offset >= from) {
685 wait_on_buffer(bh);
686 if (!buffer_uptodate(bh))
687 return -EIO;
688 }
689 offset += blksize;
690 bh = bh->b_this_page;
691 }
692 return 0;
693 }
694
695 static int needs_empty_write(sector_t block, struct inode *inode)
696 {
697 int error;
698 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
699
700 bh_map.b_size = 1 << inode->i_blkbits;
701 error = gfs2_block_map(inode, block, &bh_map, 0);
702 if (unlikely(error))
703 return error;
704 return !buffer_mapped(&bh_map);
705 }
706
707 static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
708 int mode)
709 {
710 struct inode *inode = page->mapping->host;
711 unsigned start, end, next, blksize;
712 sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
713 int ret;
714
715 blksize = 1 << inode->i_blkbits;
716 next = end = 0;
717 while (next < from) {
718 next += blksize;
719 block++;
720 }
721 start = next;
722 do {
723 next += blksize;
724 ret = needs_empty_write(block, inode);
725 if (unlikely(ret < 0))
726 return ret;
727 if (ret == 0) {
728 if (end) {
729 ret = __block_write_begin(page, start, end - start,
730 gfs2_block_map);
731 if (unlikely(ret))
732 return ret;
733 ret = empty_write_end(page, start, end, mode);
734 if (unlikely(ret))
735 return ret;
736 end = 0;
737 }
738 start = next;
739 }
740 else
741 end = next;
742 block++;
743 } while (next < to);
744
745 if (end) {
746 ret = __block_write_begin(page, start, end - start, gfs2_block_map);
747 if (unlikely(ret))
748 return ret;
749 ret = empty_write_end(page, start, end, mode);
750 if (unlikely(ret))
751 return ret;
752 }
753
754 return 0;
755 }
756
757 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
758 int mode)
759 {
760 struct gfs2_inode *ip = GFS2_I(inode);
761 struct buffer_head *dibh;
762 int error;
763 u64 start = offset >> PAGE_CACHE_SHIFT;
764 unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
765 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
766 pgoff_t curr;
767 struct page *page;
768 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
769 unsigned int from, to;
770
771 if (!end_offset)
772 end_offset = PAGE_CACHE_SIZE;
773
774 error = gfs2_meta_inode_buffer(ip, &dibh);
775 if (unlikely(error))
776 goto out;
777
778 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
779
780 if (gfs2_is_stuffed(ip)) {
781 error = gfs2_unstuff_dinode(ip, NULL);
782 if (unlikely(error))
783 goto out;
784 }
785
786 curr = start;
787 offset = start << PAGE_CACHE_SHIFT;
788 from = start_offset;
789 to = PAGE_CACHE_SIZE;
790 while (curr <= end) {
791 page = grab_cache_page_write_begin(inode->i_mapping, curr,
792 AOP_FLAG_NOFS);
793 if (unlikely(!page)) {
794 error = -ENOMEM;
795 goto out;
796 }
797
798 if (curr == end)
799 to = end_offset;
800 error = write_empty_blocks(page, from, to, mode);
801 if (!error && offset + to > inode->i_size &&
802 !(mode & FALLOC_FL_KEEP_SIZE)) {
803 i_size_write(inode, offset + to);
804 }
805 unlock_page(page);
806 page_cache_release(page);
807 if (error)
808 goto out;
809 curr++;
810 offset += PAGE_CACHE_SIZE;
811 from = 0;
812 }
813
814 mark_inode_dirty(inode);
815
816 brelse(dibh);
817
818 out:
819 return error;
820 }
821
822 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
823 unsigned int *data_blocks, unsigned int *ind_blocks)
824 {
825 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
826 unsigned int max_blocks = ip->i_rgd->rd_free_clone;
827 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
828
829 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
830 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
831 max_data -= tmp;
832 }
833 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
834 so it might end up with fewer data blocks */
835 if (max_data <= *data_blocks)
836 return;
837 *data_blocks = max_data;
838 *ind_blocks = max_blocks - max_data;
839 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
840 if (*len > max) {
841 *len = max;
842 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
843 }
844 }
845
846 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
847 loff_t len)
848 {
849 struct inode *inode = file->f_path.dentry->d_inode;
850 struct gfs2_sbd *sdp = GFS2_SB(inode);
851 struct gfs2_inode *ip = GFS2_I(inode);
852 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
853 loff_t bytes, max_bytes;
854 struct gfs2_alloc *al;
855 int error;
856 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
857 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
858 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
859
860 /* We only support the FALLOC_FL_KEEP_SIZE mode */
861 if (mode & ~FALLOC_FL_KEEP_SIZE)
862 return -EOPNOTSUPP;
863
864 offset &= bsize_mask;
865
866 len = next - offset;
867 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
868 if (!bytes)
869 bytes = UINT_MAX;
870 bytes &= bsize_mask;
871 if (bytes == 0)
872 bytes = sdp->sd_sb.sb_bsize;
873
874 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
875 error = gfs2_glock_nq(&ip->i_gh);
876 if (unlikely(error))
877 goto out_uninit;
878
879 if (!gfs2_write_alloc_required(ip, offset, len))
880 goto out_unlock;
881
882 while (len > 0) {
883 if (len < bytes)
884 bytes = len;
885 al = gfs2_alloc_get(ip);
886 if (!al) {
887 error = -ENOMEM;
888 goto out_unlock;
889 }
890
891 error = gfs2_quota_lock_check(ip);
892 if (error)
893 goto out_alloc_put;
894
895 retry:
896 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
897
898 al->al_requested = data_blocks + ind_blocks;
899 error = gfs2_inplace_reserve(ip);
900 if (error) {
901 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
902 bytes >>= 1;
903 bytes &= bsize_mask;
904 if (bytes == 0)
905 bytes = sdp->sd_sb.sb_bsize;
906 goto retry;
907 }
908 goto out_qunlock;
909 }
910 max_bytes = bytes;
911 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
912 al->al_requested = data_blocks + ind_blocks;
913
914 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
915 RES_RG_HDR + gfs2_rg_blocks(ip);
916 if (gfs2_is_jdata(ip))
917 rblocks += data_blocks ? data_blocks : 1;
918
919 error = gfs2_trans_begin(sdp, rblocks,
920 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
921 if (error)
922 goto out_trans_fail;
923
924 error = fallocate_chunk(inode, offset, max_bytes, mode);
925 gfs2_trans_end(sdp);
926
927 if (error)
928 goto out_trans_fail;
929
930 len -= max_bytes;
931 offset += max_bytes;
932 gfs2_inplace_release(ip);
933 gfs2_quota_unlock(ip);
934 gfs2_alloc_put(ip);
935 }
936 goto out_unlock;
937
938 out_trans_fail:
939 gfs2_inplace_release(ip);
940 out_qunlock:
941 gfs2_quota_unlock(ip);
942 out_alloc_put:
943 gfs2_alloc_put(ip);
944 out_unlock:
945 gfs2_glock_dq(&ip->i_gh);
946 out_uninit:
947 gfs2_holder_uninit(&ip->i_gh);
948 return error;
949 }
950
951 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
952
953 /**
954 * gfs2_setlease - acquire/release a file lease
955 * @file: the file pointer
956 * @arg: lease type
957 * @fl: file lock
958 *
959 * We don't currently have a way to enforce a lease across the whole
960 * cluster; until we do, disable leases (by just returning -EINVAL),
961 * unless the administrator has requested purely local locking.
962 *
963 * Locking: called under lock_flocks
964 *
965 * Returns: errno
966 */
967
968 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
969 {
970 return -EINVAL;
971 }
972
973 /**
974 * gfs2_lock - acquire/release a posix lock on a file
975 * @file: the file pointer
976 * @cmd: either modify or retrieve lock state, possibly wait
977 * @fl: type and range of lock
978 *
979 * Returns: errno
980 */
981
982 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
983 {
984 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
985 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
986 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
987
988 if (!(fl->fl_flags & FL_POSIX))
989 return -ENOLCK;
990 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
991 return -ENOLCK;
992
993 if (cmd == F_CANCELLK) {
994 /* Hack: */
995 cmd = F_SETLK;
996 fl->fl_type = F_UNLCK;
997 }
998 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
999 return -EIO;
1000 if (IS_GETLK(cmd))
1001 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1002 else if (fl->fl_type == F_UNLCK)
1003 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1004 else
1005 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1006 }
1007
1008 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1009 {
1010 struct gfs2_file *fp = file->private_data;
1011 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1012 struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
1013 struct gfs2_glock *gl;
1014 unsigned int state;
1015 int flags;
1016 int error = 0;
1017
1018 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1019 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
1020
1021 mutex_lock(&fp->f_fl_mutex);
1022
1023 gl = fl_gh->gh_gl;
1024 if (gl) {
1025 if (fl_gh->gh_state == state)
1026 goto out;
1027 flock_lock_file_wait(file,
1028 &(struct file_lock){.fl_type = F_UNLCK});
1029 gfs2_glock_dq_wait(fl_gh);
1030 gfs2_holder_reinit(state, flags, fl_gh);
1031 } else {
1032 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1033 &gfs2_flock_glops, CREATE, &gl);
1034 if (error)
1035 goto out;
1036 gfs2_holder_init(gl, state, flags, fl_gh);
1037 gfs2_glock_put(gl);
1038 }
1039 error = gfs2_glock_nq(fl_gh);
1040 if (error) {
1041 gfs2_holder_uninit(fl_gh);
1042 if (error == GLR_TRYFAILED)
1043 error = -EAGAIN;
1044 } else {
1045 error = flock_lock_file_wait(file, fl);
1046 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1047 }
1048
1049 out:
1050 mutex_unlock(&fp->f_fl_mutex);
1051 return error;
1052 }
1053
1054 static void do_unflock(struct file *file, struct file_lock *fl)
1055 {
1056 struct gfs2_file *fp = file->private_data;
1057 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1058
1059 mutex_lock(&fp->f_fl_mutex);
1060 flock_lock_file_wait(file, fl);
1061 if (fl_gh->gh_gl) {
1062 gfs2_glock_dq_wait(fl_gh);
1063 gfs2_holder_uninit(fl_gh);
1064 }
1065 mutex_unlock(&fp->f_fl_mutex);
1066 }
1067
1068 /**
1069 * gfs2_flock - acquire/release a flock lock on a file
1070 * @file: the file pointer
1071 * @cmd: either modify or retrieve lock state, possibly wait
1072 * @fl: type and range of lock
1073 *
1074 * Returns: errno
1075 */
1076
1077 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1078 {
1079 if (!(fl->fl_flags & FL_FLOCK))
1080 return -ENOLCK;
1081 if (fl->fl_type & LOCK_MAND)
1082 return -EOPNOTSUPP;
1083
1084 if (fl->fl_type == F_UNLCK) {
1085 do_unflock(file, fl);
1086 return 0;
1087 } else {
1088 return do_flock(file, cmd, fl);
1089 }
1090 }
1091
1092 const struct file_operations gfs2_file_fops = {
1093 .llseek = gfs2_llseek,
1094 .read = do_sync_read,
1095 .aio_read = generic_file_aio_read,
1096 .write = do_sync_write,
1097 .aio_write = gfs2_file_aio_write,
1098 .unlocked_ioctl = gfs2_ioctl,
1099 .mmap = gfs2_mmap,
1100 .open = gfs2_open,
1101 .release = gfs2_close,
1102 .fsync = gfs2_fsync,
1103 .lock = gfs2_lock,
1104 .flock = gfs2_flock,
1105 .splice_read = generic_file_splice_read,
1106 .splice_write = generic_file_splice_write,
1107 .setlease = gfs2_setlease,
1108 .fallocate = gfs2_fallocate,
1109 };
1110
1111 const struct file_operations gfs2_dir_fops = {
1112 .readdir = gfs2_readdir,
1113 .unlocked_ioctl = gfs2_ioctl,
1114 .open = gfs2_open,
1115 .release = gfs2_close,
1116 .fsync = gfs2_fsync,
1117 .lock = gfs2_lock,
1118 .flock = gfs2_flock,
1119 .llseek = default_llseek,
1120 };
1121
1122 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1123
1124 const struct file_operations gfs2_file_fops_nolock = {
1125 .llseek = gfs2_llseek,
1126 .read = do_sync_read,
1127 .aio_read = generic_file_aio_read,
1128 .write = do_sync_write,
1129 .aio_write = gfs2_file_aio_write,
1130 .unlocked_ioctl = gfs2_ioctl,
1131 .mmap = gfs2_mmap,
1132 .open = gfs2_open,
1133 .release = gfs2_close,
1134 .fsync = gfs2_fsync,
1135 .splice_read = generic_file_splice_read,
1136 .splice_write = generic_file_splice_write,
1137 .setlease = generic_setlease,
1138 .fallocate = gfs2_fallocate,
1139 };
1140
1141 const struct file_operations gfs2_dir_fops_nolock = {
1142 .readdir = gfs2_readdir,
1143 .unlocked_ioctl = gfs2_ioctl,
1144 .open = gfs2_open,
1145 .release = gfs2_close,
1146 .fsync = gfs2_fsync,
1147 .llseek = default_llseek,
1148 };
1149