]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - fs/gfs2/file.c
Merge tag 'for-4.17-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
[mirror_ubuntu-eoan-kernel.git] / fs / gfs2 / file.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <linux/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
28 #include <linux/delay.h>
29
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "bmap.h"
33 #include "dir.h"
34 #include "glock.h"
35 #include "glops.h"
36 #include "inode.h"
37 #include "log.h"
38 #include "meta_io.h"
39 #include "quota.h"
40 #include "rgrp.h"
41 #include "trans.h"
42 #include "util.h"
43
44 /**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 *
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
52 *
53 * Returns: The new offset, or errno
54 */
55
56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
57 {
58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 struct gfs2_holder i_gh;
60 loff_t error;
61
62 switch (whence) {
63 case SEEK_END:
64 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
65 &i_gh);
66 if (!error) {
67 error = generic_file_llseek(file, offset, whence);
68 gfs2_glock_dq_uninit(&i_gh);
69 }
70 break;
71
72 case SEEK_DATA:
73 error = gfs2_seek_data(file, offset);
74 break;
75
76 case SEEK_HOLE:
77 error = gfs2_seek_hole(file, offset);
78 break;
79
80 case SEEK_CUR:
81 case SEEK_SET:
82 /*
83 * These don't reference inode->i_size and don't depend on the
84 * block mapping, so we don't need the glock.
85 */
86 error = generic_file_llseek(file, offset, whence);
87 break;
88 default:
89 error = -EINVAL;
90 }
91
92 return error;
93 }
94
95 /**
96 * gfs2_readdir - Iterator for a directory
97 * @file: The directory to read from
98 * @ctx: What to feed directory entries to
99 *
100 * Returns: errno
101 */
102
103 static int gfs2_readdir(struct file *file, struct dir_context *ctx)
104 {
105 struct inode *dir = file->f_mapping->host;
106 struct gfs2_inode *dip = GFS2_I(dir);
107 struct gfs2_holder d_gh;
108 int error;
109
110 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
111 if (error)
112 return error;
113
114 error = gfs2_dir_read(dir, ctx, &file->f_ra);
115
116 gfs2_glock_dq_uninit(&d_gh);
117
118 return error;
119 }
120
121 /**
122 * fsflag_gfs2flag
123 *
124 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
125 * and to GFS2_DIF_JDATA for non-directories.
126 */
127 static struct {
128 u32 fsflag;
129 u32 gfsflag;
130 } fsflag_gfs2flag[] = {
131 {FS_SYNC_FL, GFS2_DIF_SYNC},
132 {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
133 {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
134 {FS_NOATIME_FL, GFS2_DIF_NOATIME},
135 {FS_INDEX_FL, GFS2_DIF_EXHASH},
136 {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
137 {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
138 };
139
140 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
141 {
142 struct inode *inode = file_inode(filp);
143 struct gfs2_inode *ip = GFS2_I(inode);
144 struct gfs2_holder gh;
145 int i, error;
146 u32 gfsflags, fsflags = 0;
147
148 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
149 error = gfs2_glock_nq(&gh);
150 if (error)
151 goto out_uninit;
152
153 gfsflags = ip->i_diskflags;
154 if (S_ISDIR(inode->i_mode))
155 gfsflags &= ~GFS2_DIF_JDATA;
156 else
157 gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
158 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
159 if (gfsflags & fsflag_gfs2flag[i].gfsflag)
160 fsflags |= fsflag_gfs2flag[i].fsflag;
161
162 if (put_user(fsflags, ptr))
163 error = -EFAULT;
164
165 gfs2_glock_dq(&gh);
166 out_uninit:
167 gfs2_holder_uninit(&gh);
168 return error;
169 }
170
171 void gfs2_set_inode_flags(struct inode *inode)
172 {
173 struct gfs2_inode *ip = GFS2_I(inode);
174 unsigned int flags = inode->i_flags;
175
176 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
177 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
178 flags |= S_NOSEC;
179 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
180 flags |= S_IMMUTABLE;
181 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
182 flags |= S_APPEND;
183 if (ip->i_diskflags & GFS2_DIF_NOATIME)
184 flags |= S_NOATIME;
185 if (ip->i_diskflags & GFS2_DIF_SYNC)
186 flags |= S_SYNC;
187 inode->i_flags = flags;
188 }
189
190 /* Flags that can be set by user space */
191 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
192 GFS2_DIF_IMMUTABLE| \
193 GFS2_DIF_APPENDONLY| \
194 GFS2_DIF_NOATIME| \
195 GFS2_DIF_SYNC| \
196 GFS2_DIF_TOPDIR| \
197 GFS2_DIF_INHERIT_JDATA)
198
199 /**
200 * do_gfs2_set_flags - set flags on an inode
201 * @filp: file pointer
202 * @reqflags: The flags to set
203 * @mask: Indicates which flags are valid
204 *
205 */
206 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
207 {
208 struct inode *inode = file_inode(filp);
209 struct gfs2_inode *ip = GFS2_I(inode);
210 struct gfs2_sbd *sdp = GFS2_SB(inode);
211 struct buffer_head *bh;
212 struct gfs2_holder gh;
213 int error;
214 u32 new_flags, flags;
215
216 error = mnt_want_write_file(filp);
217 if (error)
218 return error;
219
220 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
221 if (error)
222 goto out_drop_write;
223
224 error = -EACCES;
225 if (!inode_owner_or_capable(inode))
226 goto out;
227
228 error = 0;
229 flags = ip->i_diskflags;
230 new_flags = (flags & ~mask) | (reqflags & mask);
231 if ((new_flags ^ flags) == 0)
232 goto out;
233
234 error = -EPERM;
235 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
236 goto out;
237 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
238 goto out;
239 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
240 !capable(CAP_LINUX_IMMUTABLE))
241 goto out;
242 if (!IS_IMMUTABLE(inode)) {
243 error = gfs2_permission(inode, MAY_WRITE);
244 if (error)
245 goto out;
246 }
247 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
248 if (new_flags & GFS2_DIF_JDATA)
249 gfs2_log_flush(sdp, ip->i_gl,
250 GFS2_LOG_HEAD_FLUSH_NORMAL |
251 GFS2_LFC_SET_FLAGS);
252 error = filemap_fdatawrite(inode->i_mapping);
253 if (error)
254 goto out;
255 error = filemap_fdatawait(inode->i_mapping);
256 if (error)
257 goto out;
258 if (new_flags & GFS2_DIF_JDATA)
259 gfs2_ordered_del_inode(ip);
260 }
261 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
262 if (error)
263 goto out;
264 error = gfs2_meta_inode_buffer(ip, &bh);
265 if (error)
266 goto out_trans_end;
267 inode->i_ctime = current_time(inode);
268 gfs2_trans_add_meta(ip->i_gl, bh);
269 ip->i_diskflags = new_flags;
270 gfs2_dinode_out(ip, bh->b_data);
271 brelse(bh);
272 gfs2_set_inode_flags(inode);
273 gfs2_set_aops(inode);
274 out_trans_end:
275 gfs2_trans_end(sdp);
276 out:
277 gfs2_glock_dq_uninit(&gh);
278 out_drop_write:
279 mnt_drop_write_file(filp);
280 return error;
281 }
282
283 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
284 {
285 struct inode *inode = file_inode(filp);
286 u32 fsflags, gfsflags = 0;
287 u32 mask;
288 int i;
289
290 if (get_user(fsflags, ptr))
291 return -EFAULT;
292
293 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
294 if (fsflags & fsflag_gfs2flag[i].fsflag) {
295 fsflags &= ~fsflag_gfs2flag[i].fsflag;
296 gfsflags |= fsflag_gfs2flag[i].gfsflag;
297 }
298 }
299 if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
300 return -EINVAL;
301
302 mask = GFS2_FLAGS_USER_SET;
303 if (S_ISDIR(inode->i_mode)) {
304 mask &= ~GFS2_DIF_JDATA;
305 } else {
306 /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
307 if (gfsflags & GFS2_DIF_TOPDIR)
308 return -EINVAL;
309 mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
310 }
311
312 return do_gfs2_set_flags(filp, gfsflags, mask);
313 }
314
315 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
316 {
317 switch(cmd) {
318 case FS_IOC_GETFLAGS:
319 return gfs2_get_flags(filp, (u32 __user *)arg);
320 case FS_IOC_SETFLAGS:
321 return gfs2_set_flags(filp, (u32 __user *)arg);
322 case FITRIM:
323 return gfs2_fitrim(filp, (void __user *)arg);
324 }
325 return -ENOTTY;
326 }
327
328 /**
329 * gfs2_size_hint - Give a hint to the size of a write request
330 * @filep: The struct file
331 * @offset: The file offset of the write
332 * @size: The length of the write
333 *
334 * When we are about to do a write, this function records the total
335 * write size in order to provide a suitable hint to the lower layers
336 * about how many blocks will be required.
337 *
338 */
339
340 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
341 {
342 struct inode *inode = file_inode(filep);
343 struct gfs2_sbd *sdp = GFS2_SB(inode);
344 struct gfs2_inode *ip = GFS2_I(inode);
345 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
346 int hint = min_t(size_t, INT_MAX, blks);
347
348 if (hint > atomic_read(&ip->i_res.rs_sizehint))
349 atomic_set(&ip->i_res.rs_sizehint, hint);
350 }
351
352 /**
353 * gfs2_allocate_page_backing - Use bmap to allocate blocks
354 * @page: The (locked) page to allocate backing for
355 *
356 * We try to allocate all the blocks required for the page in
357 * one go. This might fail for various reasons, so we keep
358 * trying until all the blocks to back this page are allocated.
359 * If some of the blocks are already allocated, thats ok too.
360 */
361
362 static int gfs2_allocate_page_backing(struct page *page)
363 {
364 struct inode *inode = page->mapping->host;
365 struct buffer_head bh;
366 unsigned long size = PAGE_SIZE;
367 u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
368
369 do {
370 bh.b_state = 0;
371 bh.b_size = size;
372 gfs2_block_map(inode, lblock, &bh, 1);
373 if (!buffer_mapped(&bh))
374 return -EIO;
375 size -= bh.b_size;
376 lblock += (bh.b_size >> inode->i_blkbits);
377 } while(size > 0);
378 return 0;
379 }
380
381 /**
382 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
383 * @vma: The virtual memory area
384 * @vmf: The virtual memory fault containing the page to become writable
385 *
386 * When the page becomes writable, we need to ensure that we have
387 * blocks allocated on disk to back that page.
388 */
389
390 static int gfs2_page_mkwrite(struct vm_fault *vmf)
391 {
392 struct page *page = vmf->page;
393 struct inode *inode = file_inode(vmf->vma->vm_file);
394 struct gfs2_inode *ip = GFS2_I(inode);
395 struct gfs2_sbd *sdp = GFS2_SB(inode);
396 struct gfs2_alloc_parms ap = { .aflags = 0, };
397 unsigned long last_index;
398 u64 pos = page->index << PAGE_SHIFT;
399 unsigned int data_blocks, ind_blocks, rblocks;
400 struct gfs2_holder gh;
401 loff_t size;
402 int ret;
403
404 sb_start_pagefault(inode->i_sb);
405
406 ret = gfs2_rsqa_alloc(ip);
407 if (ret)
408 goto out;
409
410 gfs2_size_hint(vmf->vma->vm_file, pos, PAGE_SIZE);
411
412 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
413 ret = gfs2_glock_nq(&gh);
414 if (ret)
415 goto out_uninit;
416
417 /* Update file times before taking page lock */
418 file_update_time(vmf->vma->vm_file);
419
420 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
421 set_bit(GIF_SW_PAGED, &ip->i_flags);
422
423 if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
424 lock_page(page);
425 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
426 ret = -EAGAIN;
427 unlock_page(page);
428 }
429 goto out_unlock;
430 }
431
432 ret = gfs2_rindex_update(sdp);
433 if (ret)
434 goto out_unlock;
435
436 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
437 ap.target = data_blocks + ind_blocks;
438 ret = gfs2_quota_lock_check(ip, &ap);
439 if (ret)
440 goto out_unlock;
441 ret = gfs2_inplace_reserve(ip, &ap);
442 if (ret)
443 goto out_quota_unlock;
444
445 rblocks = RES_DINODE + ind_blocks;
446 if (gfs2_is_jdata(ip))
447 rblocks += data_blocks ? data_blocks : 1;
448 if (ind_blocks || data_blocks) {
449 rblocks += RES_STATFS + RES_QUOTA;
450 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
451 }
452 ret = gfs2_trans_begin(sdp, rblocks, 0);
453 if (ret)
454 goto out_trans_fail;
455
456 lock_page(page);
457 ret = -EINVAL;
458 size = i_size_read(inode);
459 last_index = (size - 1) >> PAGE_SHIFT;
460 /* Check page index against inode size */
461 if (size == 0 || (page->index > last_index))
462 goto out_trans_end;
463
464 ret = -EAGAIN;
465 /* If truncated, we must retry the operation, we may have raced
466 * with the glock demotion code.
467 */
468 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
469 goto out_trans_end;
470
471 /* Unstuff, if required, and allocate backing blocks for page */
472 ret = 0;
473 if (gfs2_is_stuffed(ip))
474 ret = gfs2_unstuff_dinode(ip, page);
475 if (ret == 0)
476 ret = gfs2_allocate_page_backing(page);
477
478 out_trans_end:
479 if (ret)
480 unlock_page(page);
481 gfs2_trans_end(sdp);
482 out_trans_fail:
483 gfs2_inplace_release(ip);
484 out_quota_unlock:
485 gfs2_quota_unlock(ip);
486 out_unlock:
487 gfs2_glock_dq(&gh);
488 out_uninit:
489 gfs2_holder_uninit(&gh);
490 if (ret == 0) {
491 set_page_dirty(page);
492 wait_for_stable_page(page);
493 }
494 out:
495 sb_end_pagefault(inode->i_sb);
496 return block_page_mkwrite_return(ret);
497 }
498
499 static const struct vm_operations_struct gfs2_vm_ops = {
500 .fault = filemap_fault,
501 .map_pages = filemap_map_pages,
502 .page_mkwrite = gfs2_page_mkwrite,
503 };
504
505 /**
506 * gfs2_mmap -
507 * @file: The file to map
508 * @vma: The VMA which described the mapping
509 *
510 * There is no need to get a lock here unless we should be updating
511 * atime. We ignore any locking errors since the only consequence is
512 * a missed atime update (which will just be deferred until later).
513 *
514 * Returns: 0
515 */
516
517 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
518 {
519 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
520
521 if (!(file->f_flags & O_NOATIME) &&
522 !IS_NOATIME(&ip->i_inode)) {
523 struct gfs2_holder i_gh;
524 int error;
525
526 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
527 &i_gh);
528 if (error)
529 return error;
530 /* grab lock to update inode */
531 gfs2_glock_dq_uninit(&i_gh);
532 file_accessed(file);
533 }
534 vma->vm_ops = &gfs2_vm_ops;
535
536 return 0;
537 }
538
539 /**
540 * gfs2_open_common - This is common to open and atomic_open
541 * @inode: The inode being opened
542 * @file: The file being opened
543 *
544 * This maybe called under a glock or not depending upon how it has
545 * been called. We must always be called under a glock for regular
546 * files, however. For other file types, it does not matter whether
547 * we hold the glock or not.
548 *
549 * Returns: Error code or 0 for success
550 */
551
552 int gfs2_open_common(struct inode *inode, struct file *file)
553 {
554 struct gfs2_file *fp;
555 int ret;
556
557 if (S_ISREG(inode->i_mode)) {
558 ret = generic_file_open(inode, file);
559 if (ret)
560 return ret;
561 }
562
563 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
564 if (!fp)
565 return -ENOMEM;
566
567 mutex_init(&fp->f_fl_mutex);
568
569 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
570 file->private_data = fp;
571 return 0;
572 }
573
574 /**
575 * gfs2_open - open a file
576 * @inode: the inode to open
577 * @file: the struct file for this opening
578 *
579 * After atomic_open, this function is only used for opening files
580 * which are already cached. We must still get the glock for regular
581 * files to ensure that we have the file size uptodate for the large
582 * file check which is in the common code. That is only an issue for
583 * regular files though.
584 *
585 * Returns: errno
586 */
587
588 static int gfs2_open(struct inode *inode, struct file *file)
589 {
590 struct gfs2_inode *ip = GFS2_I(inode);
591 struct gfs2_holder i_gh;
592 int error;
593 bool need_unlock = false;
594
595 if (S_ISREG(ip->i_inode.i_mode)) {
596 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
597 &i_gh);
598 if (error)
599 return error;
600 need_unlock = true;
601 }
602
603 error = gfs2_open_common(inode, file);
604
605 if (need_unlock)
606 gfs2_glock_dq_uninit(&i_gh);
607
608 return error;
609 }
610
611 /**
612 * gfs2_release - called to close a struct file
613 * @inode: the inode the struct file belongs to
614 * @file: the struct file being closed
615 *
616 * Returns: errno
617 */
618
619 static int gfs2_release(struct inode *inode, struct file *file)
620 {
621 struct gfs2_inode *ip = GFS2_I(inode);
622
623 kfree(file->private_data);
624 file->private_data = NULL;
625
626 if (!(file->f_mode & FMODE_WRITE))
627 return 0;
628
629 gfs2_rsqa_delete(ip, &inode->i_writecount);
630 return 0;
631 }
632
633 /**
634 * gfs2_fsync - sync the dirty data for a file (across the cluster)
635 * @file: the file that points to the dentry
636 * @start: the start position in the file to sync
637 * @end: the end position in the file to sync
638 * @datasync: set if we can ignore timestamp changes
639 *
640 * We split the data flushing here so that we don't wait for the data
641 * until after we've also sent the metadata to disk. Note that for
642 * data=ordered, we will write & wait for the data at the log flush
643 * stage anyway, so this is unlikely to make much of a difference
644 * except in the data=writeback case.
645 *
646 * If the fdatawrite fails due to any reason except -EIO, we will
647 * continue the remainder of the fsync, although we'll still report
648 * the error at the end. This is to match filemap_write_and_wait_range()
649 * behaviour.
650 *
651 * Returns: errno
652 */
653
654 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
655 int datasync)
656 {
657 struct address_space *mapping = file->f_mapping;
658 struct inode *inode = mapping->host;
659 int sync_state = inode->i_state & I_DIRTY_ALL;
660 struct gfs2_inode *ip = GFS2_I(inode);
661 int ret = 0, ret1 = 0;
662
663 if (mapping->nrpages) {
664 ret1 = filemap_fdatawrite_range(mapping, start, end);
665 if (ret1 == -EIO)
666 return ret1;
667 }
668
669 if (!gfs2_is_jdata(ip))
670 sync_state &= ~I_DIRTY_PAGES;
671 if (datasync)
672 sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
673
674 if (sync_state) {
675 ret = sync_inode_metadata(inode, 1);
676 if (ret)
677 return ret;
678 if (gfs2_is_jdata(ip))
679 ret = file_write_and_wait(file);
680 if (ret)
681 return ret;
682 gfs2_ail_flush(ip->i_gl, 1);
683 }
684
685 if (mapping->nrpages)
686 ret = file_fdatawait_range(file, start, end);
687
688 return ret ? ret : ret1;
689 }
690
691 /**
692 * gfs2_file_write_iter - Perform a write to a file
693 * @iocb: The io context
694 * @iov: The data to write
695 * @nr_segs: Number of @iov segments
696 * @pos: The file position
697 *
698 * We have to do a lock/unlock here to refresh the inode size for
699 * O_APPEND writes, otherwise we can land up writing at the wrong
700 * offset. There is still a race, but provided the app is using its
701 * own file locking, this will make O_APPEND work as expected.
702 *
703 */
704
705 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
706 {
707 struct file *file = iocb->ki_filp;
708 struct gfs2_inode *ip = GFS2_I(file_inode(file));
709 int ret;
710
711 ret = gfs2_rsqa_alloc(ip);
712 if (ret)
713 return ret;
714
715 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
716
717 if (iocb->ki_flags & IOCB_APPEND) {
718 struct gfs2_holder gh;
719
720 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
721 if (ret)
722 return ret;
723 gfs2_glock_dq_uninit(&gh);
724 }
725
726 return generic_file_write_iter(iocb, from);
727 }
728
729 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
730 int mode)
731 {
732 struct gfs2_inode *ip = GFS2_I(inode);
733 struct buffer_head *dibh;
734 int error;
735 unsigned int nr_blks;
736 sector_t lblock = offset >> inode->i_blkbits;
737
738 error = gfs2_meta_inode_buffer(ip, &dibh);
739 if (unlikely(error))
740 return error;
741
742 gfs2_trans_add_meta(ip->i_gl, dibh);
743
744 if (gfs2_is_stuffed(ip)) {
745 error = gfs2_unstuff_dinode(ip, NULL);
746 if (unlikely(error))
747 goto out;
748 }
749
750 while (len) {
751 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
752 bh_map.b_size = len;
753 set_buffer_zeronew(&bh_map);
754
755 error = gfs2_block_map(inode, lblock, &bh_map, 1);
756 if (unlikely(error))
757 goto out;
758 len -= bh_map.b_size;
759 nr_blks = bh_map.b_size >> inode->i_blkbits;
760 lblock += nr_blks;
761 if (!buffer_new(&bh_map))
762 continue;
763 if (unlikely(!buffer_zeronew(&bh_map))) {
764 error = -EIO;
765 goto out;
766 }
767 }
768 out:
769 brelse(dibh);
770 return error;
771 }
772 /**
773 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
774 * blocks, determine how many bytes can be written.
775 * @ip: The inode in question.
776 * @len: Max cap of bytes. What we return in *len must be <= this.
777 * @data_blocks: Compute and return the number of data blocks needed
778 * @ind_blocks: Compute and return the number of indirect blocks needed
779 * @max_blocks: The total blocks available to work with.
780 *
781 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
782 */
783 static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
784 unsigned int *data_blocks, unsigned int *ind_blocks,
785 unsigned int max_blocks)
786 {
787 loff_t max = *len;
788 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
789 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
790
791 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
792 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
793 max_data -= tmp;
794 }
795
796 *data_blocks = max_data;
797 *ind_blocks = max_blocks - max_data;
798 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
799 if (*len > max) {
800 *len = max;
801 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
802 }
803 }
804
805 static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
806 {
807 struct inode *inode = file_inode(file);
808 struct gfs2_sbd *sdp = GFS2_SB(inode);
809 struct gfs2_inode *ip = GFS2_I(inode);
810 struct gfs2_alloc_parms ap = { .aflags = 0, };
811 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
812 loff_t bytes, max_bytes, max_blks = UINT_MAX;
813 int error;
814 const loff_t pos = offset;
815 const loff_t count = len;
816 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
817 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
818 loff_t max_chunk_size = UINT_MAX & bsize_mask;
819
820 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
821
822 offset &= bsize_mask;
823
824 len = next - offset;
825 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
826 if (!bytes)
827 bytes = UINT_MAX;
828 bytes &= bsize_mask;
829 if (bytes == 0)
830 bytes = sdp->sd_sb.sb_bsize;
831
832 gfs2_size_hint(file, offset, len);
833
834 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
835 ap.min_target = data_blocks + ind_blocks;
836
837 while (len > 0) {
838 if (len < bytes)
839 bytes = len;
840 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
841 len -= bytes;
842 offset += bytes;
843 continue;
844 }
845
846 /* We need to determine how many bytes we can actually
847 * fallocate without exceeding quota or going over the
848 * end of the fs. We start off optimistically by assuming
849 * we can write max_bytes */
850 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
851
852 /* Since max_bytes is most likely a theoretical max, we
853 * calculate a more realistic 'bytes' to serve as a good
854 * starting point for the number of bytes we may be able
855 * to write */
856 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
857 ap.target = data_blocks + ind_blocks;
858
859 error = gfs2_quota_lock_check(ip, &ap);
860 if (error)
861 return error;
862 /* ap.allowed tells us how many blocks quota will allow
863 * us to write. Check if this reduces max_blks */
864 if (ap.allowed && ap.allowed < max_blks)
865 max_blks = ap.allowed;
866
867 error = gfs2_inplace_reserve(ip, &ap);
868 if (error)
869 goto out_qunlock;
870
871 /* check if the selected rgrp limits our max_blks further */
872 if (ap.allowed && ap.allowed < max_blks)
873 max_blks = ap.allowed;
874
875 /* Almost done. Calculate bytes that can be written using
876 * max_blks. We also recompute max_bytes, data_blocks and
877 * ind_blocks */
878 calc_max_reserv(ip, &max_bytes, &data_blocks,
879 &ind_blocks, max_blks);
880
881 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
882 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
883 if (gfs2_is_jdata(ip))
884 rblocks += data_blocks ? data_blocks : 1;
885
886 error = gfs2_trans_begin(sdp, rblocks,
887 PAGE_SIZE/sdp->sd_sb.sb_bsize);
888 if (error)
889 goto out_trans_fail;
890
891 error = fallocate_chunk(inode, offset, max_bytes, mode);
892 gfs2_trans_end(sdp);
893
894 if (error)
895 goto out_trans_fail;
896
897 len -= max_bytes;
898 offset += max_bytes;
899 gfs2_inplace_release(ip);
900 gfs2_quota_unlock(ip);
901 }
902
903 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
904 i_size_write(inode, pos + count);
905 file_update_time(file);
906 mark_inode_dirty(inode);
907 }
908
909 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
910 return vfs_fsync_range(file, pos, pos + count - 1,
911 (file->f_flags & __O_SYNC) ? 0 : 1);
912 return 0;
913
914 out_trans_fail:
915 gfs2_inplace_release(ip);
916 out_qunlock:
917 gfs2_quota_unlock(ip);
918 return error;
919 }
920
921 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
922 {
923 struct inode *inode = file_inode(file);
924 struct gfs2_sbd *sdp = GFS2_SB(inode);
925 struct gfs2_inode *ip = GFS2_I(inode);
926 struct gfs2_holder gh;
927 int ret;
928
929 if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
930 return -EOPNOTSUPP;
931 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
932 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
933 return -EOPNOTSUPP;
934
935 inode_lock(inode);
936
937 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
938 ret = gfs2_glock_nq(&gh);
939 if (ret)
940 goto out_uninit;
941
942 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
943 (offset + len) > inode->i_size) {
944 ret = inode_newsize_ok(inode, offset + len);
945 if (ret)
946 goto out_unlock;
947 }
948
949 ret = get_write_access(inode);
950 if (ret)
951 goto out_unlock;
952
953 if (mode & FALLOC_FL_PUNCH_HOLE) {
954 ret = __gfs2_punch_hole(file, offset, len);
955 } else {
956 ret = gfs2_rsqa_alloc(ip);
957 if (ret)
958 goto out_putw;
959
960 ret = __gfs2_fallocate(file, mode, offset, len);
961
962 if (ret)
963 gfs2_rs_deltree(&ip->i_res);
964 }
965
966 out_putw:
967 put_write_access(inode);
968 out_unlock:
969 gfs2_glock_dq(&gh);
970 out_uninit:
971 gfs2_holder_uninit(&gh);
972 inode_unlock(inode);
973 return ret;
974 }
975
976 static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
977 struct file *out, loff_t *ppos,
978 size_t len, unsigned int flags)
979 {
980 int error;
981 struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
982
983 error = gfs2_rsqa_alloc(ip);
984 if (error)
985 return (ssize_t)error;
986
987 gfs2_size_hint(out, *ppos, len);
988
989 return iter_file_splice_write(pipe, out, ppos, len, flags);
990 }
991
992 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
993
994 /**
995 * gfs2_lock - acquire/release a posix lock on a file
996 * @file: the file pointer
997 * @cmd: either modify or retrieve lock state, possibly wait
998 * @fl: type and range of lock
999 *
1000 * Returns: errno
1001 */
1002
1003 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
1004 {
1005 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
1006 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
1007 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1008
1009 if (!(fl->fl_flags & FL_POSIX))
1010 return -ENOLCK;
1011 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
1012 return -ENOLCK;
1013
1014 if (cmd == F_CANCELLK) {
1015 /* Hack: */
1016 cmd = F_SETLK;
1017 fl->fl_type = F_UNLCK;
1018 }
1019 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1020 if (fl->fl_type == F_UNLCK)
1021 locks_lock_file_wait(file, fl);
1022 return -EIO;
1023 }
1024 if (IS_GETLK(cmd))
1025 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1026 else if (fl->fl_type == F_UNLCK)
1027 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1028 else
1029 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1030 }
1031
1032 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1033 {
1034 struct gfs2_file *fp = file->private_data;
1035 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1036 struct gfs2_inode *ip = GFS2_I(file_inode(file));
1037 struct gfs2_glock *gl;
1038 unsigned int state;
1039 u16 flags;
1040 int error = 0;
1041 int sleeptime;
1042
1043 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1044 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1045
1046 mutex_lock(&fp->f_fl_mutex);
1047
1048 if (gfs2_holder_initialized(fl_gh)) {
1049 if (fl_gh->gh_state == state)
1050 goto out;
1051 locks_lock_file_wait(file,
1052 &(struct file_lock) {
1053 .fl_type = F_UNLCK,
1054 .fl_flags = FL_FLOCK
1055 });
1056 gfs2_glock_dq(fl_gh);
1057 gfs2_holder_reinit(state, flags, fl_gh);
1058 } else {
1059 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1060 &gfs2_flock_glops, CREATE, &gl);
1061 if (error)
1062 goto out;
1063 gfs2_holder_init(gl, state, flags, fl_gh);
1064 gfs2_glock_put(gl);
1065 }
1066 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1067 error = gfs2_glock_nq(fl_gh);
1068 if (error != GLR_TRYFAILED)
1069 break;
1070 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1071 fl_gh->gh_error = 0;
1072 msleep(sleeptime);
1073 }
1074 if (error) {
1075 gfs2_holder_uninit(fl_gh);
1076 if (error == GLR_TRYFAILED)
1077 error = -EAGAIN;
1078 } else {
1079 error = locks_lock_file_wait(file, fl);
1080 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1081 }
1082
1083 out:
1084 mutex_unlock(&fp->f_fl_mutex);
1085 return error;
1086 }
1087
1088 static void do_unflock(struct file *file, struct file_lock *fl)
1089 {
1090 struct gfs2_file *fp = file->private_data;
1091 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1092
1093 mutex_lock(&fp->f_fl_mutex);
1094 locks_lock_file_wait(file, fl);
1095 if (gfs2_holder_initialized(fl_gh)) {
1096 gfs2_glock_dq(fl_gh);
1097 gfs2_holder_uninit(fl_gh);
1098 }
1099 mutex_unlock(&fp->f_fl_mutex);
1100 }
1101
1102 /**
1103 * gfs2_flock - acquire/release a flock lock on a file
1104 * @file: the file pointer
1105 * @cmd: either modify or retrieve lock state, possibly wait
1106 * @fl: type and range of lock
1107 *
1108 * Returns: errno
1109 */
1110
1111 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1112 {
1113 if (!(fl->fl_flags & FL_FLOCK))
1114 return -ENOLCK;
1115 if (fl->fl_type & LOCK_MAND)
1116 return -EOPNOTSUPP;
1117
1118 if (fl->fl_type == F_UNLCK) {
1119 do_unflock(file, fl);
1120 return 0;
1121 } else {
1122 return do_flock(file, cmd, fl);
1123 }
1124 }
1125
1126 const struct file_operations gfs2_file_fops = {
1127 .llseek = gfs2_llseek,
1128 .read_iter = generic_file_read_iter,
1129 .write_iter = gfs2_file_write_iter,
1130 .unlocked_ioctl = gfs2_ioctl,
1131 .mmap = gfs2_mmap,
1132 .open = gfs2_open,
1133 .release = gfs2_release,
1134 .fsync = gfs2_fsync,
1135 .lock = gfs2_lock,
1136 .flock = gfs2_flock,
1137 .splice_read = generic_file_splice_read,
1138 .splice_write = gfs2_file_splice_write,
1139 .setlease = simple_nosetlease,
1140 .fallocate = gfs2_fallocate,
1141 };
1142
1143 const struct file_operations gfs2_dir_fops = {
1144 .iterate_shared = gfs2_readdir,
1145 .unlocked_ioctl = gfs2_ioctl,
1146 .open = gfs2_open,
1147 .release = gfs2_release,
1148 .fsync = gfs2_fsync,
1149 .lock = gfs2_lock,
1150 .flock = gfs2_flock,
1151 .llseek = default_llseek,
1152 };
1153
1154 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1155
1156 const struct file_operations gfs2_file_fops_nolock = {
1157 .llseek = gfs2_llseek,
1158 .read_iter = generic_file_read_iter,
1159 .write_iter = gfs2_file_write_iter,
1160 .unlocked_ioctl = gfs2_ioctl,
1161 .mmap = gfs2_mmap,
1162 .open = gfs2_open,
1163 .release = gfs2_release,
1164 .fsync = gfs2_fsync,
1165 .splice_read = generic_file_splice_read,
1166 .splice_write = gfs2_file_splice_write,
1167 .setlease = generic_setlease,
1168 .fallocate = gfs2_fallocate,
1169 };
1170
1171 const struct file_operations gfs2_dir_fops_nolock = {
1172 .iterate_shared = gfs2_readdir,
1173 .unlocked_ioctl = gfs2_ioctl,
1174 .open = gfs2_open,
1175 .release = gfs2_release,
1176 .fsync = gfs2_fsync,
1177 .llseek = default_llseek,
1178 };
1179