]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/gfs2/ops_file.c
[GFS2] Fix a case where we didn't get unstuffing right
[mirror_ubuntu-artful-kernel.git] / fs / gfs2 / ops_file.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
16#include <linux/uio.h>
17#include <linux/blkdev.h>
18#include <linux/mm.h>
19#include <linux/smp_lock.h>
20#include <linux/gfs2_ioctl.h>
18ec7d5c 21#include <linux/fs.h>
b3b94faa
DT
22#include <asm/semaphore.h>
23#include <asm/uaccess.h>
24
25#include "gfs2.h"
26#include "bmap.h"
27#include "dir.h"
28#include "glock.h"
29#include "glops.h"
30#include "inode.h"
b3b94faa
DT
31#include "lm.h"
32#include "log.h"
33#include "meta_io.h"
34#include "ops_file.h"
35#include "ops_vm.h"
36#include "quota.h"
37#include "rgrp.h"
38#include "trans.h"
39
40/* "bad" is for NFS support */
41struct filldir_bad_entry {
42 char *fbe_name;
43 unsigned int fbe_length;
44 uint64_t fbe_offset;
45 struct gfs2_inum fbe_inum;
46 unsigned int fbe_type;
47};
48
49struct filldir_bad {
50 struct gfs2_sbd *fdb_sbd;
51
52 struct filldir_bad_entry *fdb_entry;
53 unsigned int fdb_entry_num;
54 unsigned int fdb_entry_off;
55
56 char *fdb_name;
57 unsigned int fdb_name_size;
58 unsigned int fdb_name_off;
59};
60
61/* For regular, non-NFS */
62struct filldir_reg {
63 struct gfs2_sbd *fdr_sbd;
64 int fdr_prefetch;
65
66 filldir_t fdr_filldir;
67 void *fdr_opaque;
68};
69
61a30dcb
SW
70/*
71 * Most fields left uninitialised to catch anybody who tries to
72 * use them. f_flags set to prevent file_accessed() from touching
73 * any other part of this. Its use is purely as a flag so that we
74 * know (in readpage()) whether or not do to locking.
75 */
76struct file gfs2_internal_file_sentinal = {
77 .f_flags = O_NOATIME|O_RDONLY,
78};
79
18ec7d5c
SW
80static int gfs2_read_actor(read_descriptor_t *desc, struct page *page,
81 unsigned long offset, unsigned long size)
82{
83 char *kaddr;
84 unsigned long count = desc->count;
85
86 if (size > count)
87 size = count;
88
89 kaddr = kmap(page);
90 memcpy(desc->arg.buf, kaddr + offset, size);
91 kunmap(page);
92
93 desc->count = count - size;
94 desc->written += size;
95 desc->arg.buf += size;
96 return size;
97}
98
99int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
100 char *buf, loff_t *pos, unsigned size)
101{
102 struct inode *inode = ip->i_vnode;
103 read_descriptor_t desc;
104 desc.written = 0;
105 desc.arg.buf = buf;
106 desc.count = size;
107 desc.error = 0;
61a30dcb
SW
108 do_generic_mapping_read(inode->i_mapping, ra_state,
109 &gfs2_internal_file_sentinal, pos, &desc,
110 gfs2_read_actor);
18ec7d5c
SW
111 return desc.written ? desc.written : desc.error;
112}
b3b94faa
DT
113
114/**
115 * gfs2_llseek - seek to a location in a file
116 * @file: the file
117 * @offset: the offset
118 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
119 *
120 * SEEK_END requires the glock for the file because it references the
121 * file's size.
122 *
123 * Returns: The new offset, or errno
124 */
125
126static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
127{
128 struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
129 struct gfs2_holder i_gh;
130 loff_t error;
131
132 atomic_inc(&ip->i_sbd->sd_ops_file);
133
134 if (origin == 2) {
135 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
136 &i_gh);
137 if (!error) {
138 error = remote_llseek(file, offset, origin);
139 gfs2_glock_dq_uninit(&i_gh);
140 }
141 } else
142 error = remote_llseek(file, offset, origin);
143
144 return error;
145}
146
b3b94faa 147
18ec7d5c
SW
148static ssize_t gfs2_direct_IO_read(struct kiocb *iocb, const struct iovec *iov,
149 loff_t offset, unsigned long nr_segs)
b3b94faa 150{
18ec7d5c
SW
151 struct file *file = iocb->ki_filp;
152 struct address_space *mapping = file->f_mapping;
153 ssize_t retval;
b3b94faa 154
18ec7d5c
SW
155 retval = filemap_write_and_wait(mapping);
156 if (retval == 0) {
157 retval = mapping->a_ops->direct_IO(READ, iocb, iov, offset,
158 nr_segs);
b3b94faa 159 }
18ec7d5c 160 return retval;
b3b94faa
DT
161}
162
163/**
18ec7d5c
SW
164 * __gfs2_file_aio_read - The main GFS2 read function
165 *
166 * N.B. This is almost, but not quite the same as __generic_file_aio_read()
167 * the important subtle different being that inode->i_size isn't valid
168 * unless we are holding a lock, and we do this _only_ on the O_DIRECT
169 * path since otherwise locking is done entirely at the page cache
170 * layer.
b3b94faa 171 */
18ec7d5c
SW
172static ssize_t __gfs2_file_aio_read(struct kiocb *iocb,
173 const struct iovec *iov,
174 unsigned long nr_segs, loff_t *ppos)
b3b94faa 175{
18ec7d5c
SW
176 struct file *filp = iocb->ki_filp;
177 struct gfs2_inode *ip = get_v2ip(filp->f_mapping->host);
b3b94faa 178 struct gfs2_holder gh;
18ec7d5c
SW
179 ssize_t retval;
180 unsigned long seg;
181 size_t count;
182
183 count = 0;
184 for (seg = 0; seg < nr_segs; seg++) {
185 const struct iovec *iv = &iov[seg];
186
187 /*
188 * If any segment has a negative length, or the cumulative
189 * length ever wraps negative then return -EINVAL.
190 */
d1665e41
SW
191 count += iv->iov_len;
192 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
193 return -EINVAL;
194 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
195 continue;
196 if (seg == 0)
197 return -EFAULT;
198 nr_segs = seg;
199 count -= iv->iov_len; /* This segment is no good */
200 break;
18ec7d5c
SW
201 }
202
203 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
204 if (filp->f_flags & O_DIRECT) {
205 loff_t pos = *ppos, size;
206 struct address_space *mapping;
207 struct inode *inode;
208
209 mapping = filp->f_mapping;
210 inode = mapping->host;
211 retval = 0;
212 if (!count)
213 goto out; /* skip atime */
214
215 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
216 retval = gfs2_glock_nq_m_atime(1, &gh);
217 if (retval)
218 goto out;
d1665e41
SW
219 if (gfs2_is_stuffed(ip)) {
220 gfs2_glock_dq_m(1, &gh);
221 gfs2_holder_uninit(&gh);
222 goto fallback_to_normal;
223 }
18ec7d5c
SW
224 size = i_size_read(inode);
225 if (pos < size) {
d1665e41 226 retval = gfs2_direct_IO_read(iocb, iov, pos, nr_segs);
18ec7d5c
SW
227 if (retval > 0 && !is_sync_kiocb(iocb))
228 retval = -EIOCBQUEUED;
229 if (retval > 0)
230 *ppos = pos + retval;
b3b94faa 231 }
18ec7d5c
SW
232 file_accessed(filp);
233 gfs2_glock_dq_m(1, &gh);
234 gfs2_holder_uninit(&gh);
b3b94faa 235 goto out;
18ec7d5c 236 }
b3b94faa 237
d1665e41 238fallback_to_normal:
18ec7d5c
SW
239 retval = 0;
240 if (count) {
241 for (seg = 0; seg < nr_segs; seg++) {
242 read_descriptor_t desc;
243
244 desc.written = 0;
245 desc.arg.buf = iov[seg].iov_base;
246 desc.count = iov[seg].iov_len;
247 if (desc.count == 0)
248 continue;
249 desc.error = 0;
250 do_generic_file_read(filp,ppos,&desc,file_read_actor);
251 retval += desc.written;
252 if (desc.error) {
253 retval = retval ?: desc.error;
254 break;
255 }
256 }
257 }
258out:
259 return retval;
b3b94faa
DT
260}
261
262/**
263 * gfs2_read - Read bytes from a file
264 * @file: The file to read from
265 * @buf: The buffer to copy into
266 * @size: The amount of data requested
267 * @offset: The current file offset
268 *
269 * Outputs: Offset - updated according to number of bytes read
270 *
271 * Returns: The number of bytes read, errno on failure
272 */
273
18ec7d5c 274static ssize_t gfs2_read(struct file *filp, char __user *buf, size_t size,
b3b94faa
DT
275 loff_t *offset)
276{
b3b94faa 277 struct iovec local_iov = { .iov_base = buf, .iov_len = size };
18ec7d5c
SW
278 struct kiocb kiocb;
279 ssize_t ret;
b3b94faa 280
18ec7d5c 281 atomic_inc(&get_v2sdp(filp->f_mapping->host->i_sb)->sd_ops_file);
b3b94faa 282
18ec7d5c
SW
283 init_sync_kiocb(&kiocb, filp);
284 ret = __gfs2_file_aio_read(&kiocb, &local_iov, 1, offset);
285 if (-EIOCBQUEUED == ret)
286 ret = wait_on_sync_kiocb(&kiocb);
287 return ret;
b3b94faa
DT
288}
289
18ec7d5c
SW
290static ssize_t gfs2_file_readv(struct file *filp, const struct iovec *iov,
291 unsigned long nr_segs, loff_t *ppos)
b3b94faa 292{
18ec7d5c
SW
293 struct kiocb kiocb;
294 ssize_t ret;
b3b94faa 295
18ec7d5c 296 atomic_inc(&get_v2sdp(filp->f_mapping->host->i_sb)->sd_ops_file);
b3b94faa 297
18ec7d5c
SW
298 init_sync_kiocb(&kiocb, filp);
299 ret = __gfs2_file_aio_read(&kiocb, iov, nr_segs, ppos);
300 if (-EIOCBQUEUED == ret)
301 ret = wait_on_sync_kiocb(&kiocb);
302 return ret;
b3b94faa
DT
303}
304
18ec7d5c
SW
305static ssize_t gfs2_file_aio_read(struct kiocb *iocb, char __user *buf,
306 size_t count, loff_t pos)
b3b94faa 307{
18ec7d5c
SW
308 struct file *filp = iocb->ki_filp;
309 struct iovec local_iov = { .iov_base = buf, .iov_len = count };
b3b94faa 310
18ec7d5c 311 atomic_inc(&get_v2sdp(filp->f_mapping->host->i_sb)->sd_ops_file);
b3b94faa 312
18ec7d5c
SW
313 BUG_ON(iocb->ki_pos != pos);
314 return __gfs2_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
b3b94faa
DT
315}
316
b3b94faa
DT
317
318/**
319 * filldir_reg_func - Report a directory entry to the caller of gfs2_dir_read()
320 * @opaque: opaque data used by the function
321 * @name: the name of the directory entry
322 * @length: the length of the name
323 * @offset: the entry's offset in the directory
324 * @inum: the inode number the entry points to
325 * @type: the type of inode the entry points to
326 *
327 * Returns: 0 on success, 1 if buffer full
328 */
329
330static int filldir_reg_func(void *opaque, const char *name, unsigned int length,
331 uint64_t offset, struct gfs2_inum *inum,
332 unsigned int type)
333{
334 struct filldir_reg *fdr = (struct filldir_reg *)opaque;
335 struct gfs2_sbd *sdp = fdr->fdr_sbd;
336 int error;
337
338 error = fdr->fdr_filldir(fdr->fdr_opaque, name, length, offset,
339 inum->no_formal_ino, type);
340 if (error)
341 return 1;
342
343 if (fdr->fdr_prefetch && !(length == 1 && *name == '.')) {
344 gfs2_glock_prefetch_num(sdp,
345 inum->no_addr, &gfs2_inode_glops,
346 LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
347 gfs2_glock_prefetch_num(sdp,
348 inum->no_addr, &gfs2_iopen_glops,
349 LM_ST_SHARED, LM_FLAG_TRY);
350 }
351
352 return 0;
353}
354
355/**
356 * readdir_reg - Read directory entries from a directory
357 * @file: The directory to read from
358 * @dirent: Buffer for dirents
359 * @filldir: Function used to do the copying
360 *
361 * Returns: errno
362 */
363
364static int readdir_reg(struct file *file, void *dirent, filldir_t filldir)
365{
366 struct gfs2_inode *dip = get_v2ip(file->f_mapping->host);
367 struct filldir_reg fdr;
368 struct gfs2_holder d_gh;
369 uint64_t offset = file->f_pos;
370 int error;
371
372 fdr.fdr_sbd = dip->i_sbd;
373 fdr.fdr_prefetch = 1;
374 fdr.fdr_filldir = filldir;
375 fdr.fdr_opaque = dirent;
376
377 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
378 error = gfs2_glock_nq_atime(&d_gh);
379 if (error) {
380 gfs2_holder_uninit(&d_gh);
381 return error;
382 }
383
384 error = gfs2_dir_read(dip, &offset, &fdr, filldir_reg_func);
385
386 gfs2_glock_dq_uninit(&d_gh);
387
388 file->f_pos = offset;
389
390 return error;
391}
392
393/**
394 * filldir_bad_func - Report a directory entry to the caller of gfs2_dir_read()
395 * @opaque: opaque data used by the function
396 * @name: the name of the directory entry
397 * @length: the length of the name
398 * @offset: the entry's offset in the directory
399 * @inum: the inode number the entry points to
400 * @type: the type of inode the entry points to
401 *
402 * For supporting NFS.
403 *
404 * Returns: 0 on success, 1 if buffer full
405 */
406
407static int filldir_bad_func(void *opaque, const char *name, unsigned int length,
408 uint64_t offset, struct gfs2_inum *inum,
409 unsigned int type)
410{
411 struct filldir_bad *fdb = (struct filldir_bad *)opaque;
412 struct gfs2_sbd *sdp = fdb->fdb_sbd;
413 struct filldir_bad_entry *fbe;
414
415 if (fdb->fdb_entry_off == fdb->fdb_entry_num ||
416 fdb->fdb_name_off + length > fdb->fdb_name_size)
417 return 1;
418
419 fbe = &fdb->fdb_entry[fdb->fdb_entry_off];
420 fbe->fbe_name = fdb->fdb_name + fdb->fdb_name_off;
421 memcpy(fbe->fbe_name, name, length);
422 fbe->fbe_length = length;
423 fbe->fbe_offset = offset;
424 fbe->fbe_inum = *inum;
425 fbe->fbe_type = type;
426
427 fdb->fdb_entry_off++;
428 fdb->fdb_name_off += length;
429
430 if (!(length == 1 && *name == '.')) {
431 gfs2_glock_prefetch_num(sdp,
432 inum->no_addr, &gfs2_inode_glops,
433 LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
434 gfs2_glock_prefetch_num(sdp,
435 inum->no_addr, &gfs2_iopen_glops,
436 LM_ST_SHARED, LM_FLAG_TRY);
437 }
438
439 return 0;
440}
441
442/**
443 * readdir_bad - Read directory entries from a directory
444 * @file: The directory to read from
445 * @dirent: Buffer for dirents
446 * @filldir: Function used to do the copying
447 *
448 * For supporting NFS.
449 *
450 * Returns: errno
451 */
452
453static int readdir_bad(struct file *file, void *dirent, filldir_t filldir)
454{
455 struct gfs2_inode *dip = get_v2ip(file->f_mapping->host);
456 struct gfs2_sbd *sdp = dip->i_sbd;
457 struct filldir_reg fdr;
458 unsigned int entries, size;
459 struct filldir_bad *fdb;
460 struct gfs2_holder d_gh;
461 uint64_t offset = file->f_pos;
462 unsigned int x;
463 struct filldir_bad_entry *fbe;
464 int error;
465
466 entries = gfs2_tune_get(sdp, gt_entries_per_readdir);
467 size = sizeof(struct filldir_bad) +
468 entries * (sizeof(struct filldir_bad_entry) + GFS2_FAST_NAME_SIZE);
469
470 fdb = kzalloc(size, GFP_KERNEL);
471 if (!fdb)
472 return -ENOMEM;
473
474 fdb->fdb_sbd = sdp;
475 fdb->fdb_entry = (struct filldir_bad_entry *)(fdb + 1);
476 fdb->fdb_entry_num = entries;
477 fdb->fdb_name = ((char *)fdb) + sizeof(struct filldir_bad) +
478 entries * sizeof(struct filldir_bad_entry);
479 fdb->fdb_name_size = entries * GFS2_FAST_NAME_SIZE;
480
481 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
482 error = gfs2_glock_nq_atime(&d_gh);
483 if (error) {
484 gfs2_holder_uninit(&d_gh);
485 goto out;
486 }
487
488 error = gfs2_dir_read(dip, &offset, fdb, filldir_bad_func);
489
490 gfs2_glock_dq_uninit(&d_gh);
491
492 fdr.fdr_sbd = sdp;
493 fdr.fdr_prefetch = 0;
494 fdr.fdr_filldir = filldir;
495 fdr.fdr_opaque = dirent;
496
497 for (x = 0; x < fdb->fdb_entry_off; x++) {
498 fbe = &fdb->fdb_entry[x];
499
500 error = filldir_reg_func(&fdr,
501 fbe->fbe_name, fbe->fbe_length,
502 fbe->fbe_offset,
503 &fbe->fbe_inum, fbe->fbe_type);
504 if (error) {
505 file->f_pos = fbe->fbe_offset;
506 error = 0;
507 goto out;
508 }
509 }
510
511 file->f_pos = offset;
512
513 out:
514 kfree(fdb);
515
516 return error;
517}
518
519/**
520 * gfs2_readdir - Read directory entries from a directory
521 * @file: The directory to read from
522 * @dirent: Buffer for dirents
523 * @filldir: Function used to do the copying
524 *
525 * Returns: errno
526 */
527
528static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
529{
530 int error;
531
532 atomic_inc(&get_v2sdp(file->f_mapping->host->i_sb)->sd_ops_file);
533
534 if (strcmp(current->comm, "nfsd") != 0)
535 error = readdir_reg(file, dirent, filldir);
536 else
537 error = readdir_bad(file, dirent, filldir);
538
539 return error;
540}
541
542static int gfs2_ioctl_flags(struct gfs2_inode *ip, unsigned int cmd, unsigned long arg)
543{
544 unsigned int lmode = (cmd == GFS2_IOCTL_SETFLAGS) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
545 struct buffer_head *dibh;
546 struct gfs2_holder i_gh;
547 int error;
548 __u32 flags = 0, change;
549
550 if (cmd == GFS2_IOCTL_SETFLAGS) {
551 error = get_user(flags, (__u32 __user *)arg);
552 if (error)
553 return -EFAULT;
554 }
555
556 error = gfs2_glock_nq_init(ip->i_gl, lmode, 0, &i_gh);
557 if (error)
558 return error;
559
560 if (cmd == GFS2_IOCTL_SETFLAGS) {
561 change = flags ^ ip->i_di.di_flags;
562 error = -EPERM;
563 if (change & (GFS2_DIF_IMMUTABLE|GFS2_DIF_APPENDONLY)) {
564 if (!capable(CAP_LINUX_IMMUTABLE))
565 goto out;
566 }
567 error = -EINVAL;
568 if (flags & (GFS2_DIF_JDATA|GFS2_DIF_DIRECTIO)) {
569 if (!S_ISREG(ip->i_di.di_mode))
570 goto out;
b3b94faa
DT
571 }
572 if (flags & (GFS2_DIF_INHERIT_JDATA|GFS2_DIF_INHERIT_DIRECTIO)) {
573 if (!S_ISDIR(ip->i_di.di_mode))
574 goto out;
575 }
576
577 error = gfs2_trans_begin(ip->i_sbd, RES_DINODE, 0);
578 if (error)
579 goto out;
580
581 error = gfs2_meta_inode_buffer(ip, &dibh);
582 if (error)
583 goto out_trans_end;
584
585 ip->i_di.di_flags = flags;
586
d4e9c4c3 587 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa
DT
588 gfs2_dinode_out(&ip->i_di, dibh->b_data);
589
590 brelse(dibh);
591
592out_trans_end:
593 gfs2_trans_end(ip->i_sbd);
594 } else {
595 flags = ip->i_di.di_flags;
596 }
597out:
598 gfs2_glock_dq_uninit(&i_gh);
599 if (cmd == GFS2_IOCTL_GETFLAGS) {
600 if (put_user(flags, (__u32 __user *)arg))
601 return -EFAULT;
602 }
603 return error;
604}
605
606/**
607 * gfs2_ioctl - do an ioctl on a file
608 * @inode: the inode
609 * @file: the file pointer
610 * @cmd: the ioctl command
611 * @arg: the argument
612 *
613 * Returns: errno
614 */
615
616static int gfs2_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
617 unsigned long arg)
618{
619 struct gfs2_inode *ip = get_v2ip(inode);
620
621 atomic_inc(&ip->i_sbd->sd_ops_file);
622
623 switch (cmd) {
b3b94faa
DT
624 case GFS2_IOCTL_SETFLAGS:
625 case GFS2_IOCTL_GETFLAGS:
626 return gfs2_ioctl_flags(ip, cmd, arg);
b3b94faa
DT
627
628 default:
629 return -ENOTTY;
630 }
631}
632
633/**
634 * gfs2_mmap -
635 * @file: The file to map
636 * @vma: The VMA which described the mapping
637 *
638 * Returns: 0 or error code
639 */
640
641static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
642{
643 struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
644 struct gfs2_holder i_gh;
645 int error;
646
647 atomic_inc(&ip->i_sbd->sd_ops_file);
648
649 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
650 error = gfs2_glock_nq_atime(&i_gh);
651 if (error) {
652 gfs2_holder_uninit(&i_gh);
653 return error;
654 }
655
18ec7d5c
SW
656 /* This is VM_MAYWRITE instead of VM_WRITE because a call
657 to mprotect() can turn on VM_WRITE later. */
658
659 if ((vma->vm_flags & (VM_MAYSHARE | VM_MAYWRITE)) ==
660 (VM_MAYSHARE | VM_MAYWRITE))
661 vma->vm_ops = &gfs2_vm_ops_sharewrite;
662 else
663 vma->vm_ops = &gfs2_vm_ops_private;
b3b94faa
DT
664
665 gfs2_glock_dq_uninit(&i_gh);
666
667 return error;
668}
669
670/**
671 * gfs2_open - open a file
672 * @inode: the inode to open
673 * @file: the struct file for this opening
674 *
675 * Returns: errno
676 */
677
678static int gfs2_open(struct inode *inode, struct file *file)
679{
680 struct gfs2_inode *ip = get_v2ip(inode);
681 struct gfs2_holder i_gh;
682 struct gfs2_file *fp;
683 int error;
684
685 atomic_inc(&ip->i_sbd->sd_ops_file);
686
687 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
688 if (!fp)
689 return -ENOMEM;
690
691 init_MUTEX(&fp->f_fl_mutex);
692
693 fp->f_inode = ip;
694 fp->f_vfile = file;
695
696 gfs2_assert_warn(ip->i_sbd, !get_v2fp(file));
697 set_v2fp(file, fp);
698
699 if (S_ISREG(ip->i_di.di_mode)) {
700 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
701 &i_gh);
702 if (error)
703 goto fail;
704
705 if (!(file->f_flags & O_LARGEFILE) &&
706 ip->i_di.di_size > MAX_NON_LFS) {
707 error = -EFBIG;
708 goto fail_gunlock;
709 }
710
711 /* Listen to the Direct I/O flag */
712
713 if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
714 file->f_flags |= O_DIRECT;
715
b3b94faa
DT
716 gfs2_glock_dq_uninit(&i_gh);
717 }
718
719 return 0;
720
721 fail_gunlock:
722 gfs2_glock_dq_uninit(&i_gh);
723
724 fail:
725 set_v2fp(file, NULL);
726 kfree(fp);
727
728 return error;
729}
730
731/**
732 * gfs2_close - called to close a struct file
733 * @inode: the inode the struct file belongs to
734 * @file: the struct file being closed
735 *
736 * Returns: errno
737 */
738
739static int gfs2_close(struct inode *inode, struct file *file)
740{
741 struct gfs2_sbd *sdp = get_v2sdp(inode->i_sb);
742 struct gfs2_file *fp;
743
744 atomic_inc(&sdp->sd_ops_file);
745
746 fp = get_v2fp(file);
747 set_v2fp(file, NULL);
748
749 if (gfs2_assert_warn(sdp, fp))
750 return -EIO;
751
752 kfree(fp);
753
754 return 0;
755}
756
757/**
758 * gfs2_fsync - sync the dirty data for a file (across the cluster)
759 * @file: the file that points to the dentry (we ignore this)
760 * @dentry: the dentry that points to the inode to sync
761 *
762 * Returns: errno
763 */
764
765static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
766{
767 struct gfs2_inode *ip = get_v2ip(dentry->d_inode);
768
769 atomic_inc(&ip->i_sbd->sd_ops_file);
770 gfs2_log_flush_glock(ip->i_gl);
771
772 return 0;
773}
774
775/**
776 * gfs2_lock - acquire/release a posix lock on a file
777 * @file: the file pointer
778 * @cmd: either modify or retrieve lock state, possibly wait
779 * @fl: type and range of lock
780 *
781 * Returns: errno
782 */
783
784static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
785{
786 struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
787 struct gfs2_sbd *sdp = ip->i_sbd;
788 struct lm_lockname name =
789 { .ln_number = ip->i_num.no_addr,
790 .ln_type = LM_TYPE_PLOCK };
791
792 atomic_inc(&sdp->sd_ops_file);
793
794 if (!(fl->fl_flags & FL_POSIX))
795 return -ENOLCK;
796 if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
797 return -ENOLCK;
798
799 if (sdp->sd_args.ar_localflocks) {
800 if (IS_GETLK(cmd)) {
801 struct file_lock *tmp;
802 lock_kernel();
803 tmp = posix_test_lock(file, fl);
804 fl->fl_type = F_UNLCK;
805 if (tmp)
806 memcpy(fl, tmp, sizeof(struct file_lock));
807 unlock_kernel();
808 return 0;
809 } else {
810 int error;
811 lock_kernel();
812 error = posix_lock_file_wait(file, fl);
813 unlock_kernel();
814 return error;
815 }
816 }
817
818 if (IS_GETLK(cmd))
819 return gfs2_lm_plock_get(sdp, &name, file, fl);
820 else if (fl->fl_type == F_UNLCK)
821 return gfs2_lm_punlock(sdp, &name, file, fl);
822 else
823 return gfs2_lm_plock(sdp, &name, file, cmd, fl);
824}
825
826/**
827 * gfs2_sendfile - Send bytes to a file or socket
828 * @in_file: The file to read from
829 * @out_file: The file to write to
830 * @count: The amount of data
831 * @offset: The beginning file offset
832 *
833 * Outputs: offset - updated according to number of bytes read
834 *
835 * Returns: The number of bytes sent, errno on failure
836 */
837
838static ssize_t gfs2_sendfile(struct file *in_file, loff_t *offset, size_t count,
839 read_actor_t actor, void *target)
840{
841 struct gfs2_inode *ip = get_v2ip(in_file->f_mapping->host);
b3b94faa
DT
842
843 atomic_inc(&ip->i_sbd->sd_ops_file);
844
18ec7d5c 845 return generic_file_sendfile(in_file, offset, count, actor, target);
b3b94faa
DT
846}
847
848static int do_flock(struct file *file, int cmd, struct file_lock *fl)
849{
850 struct gfs2_file *fp = get_v2fp(file);
851 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
852 struct gfs2_inode *ip = fp->f_inode;
853 struct gfs2_glock *gl;
854 unsigned int state;
855 int flags;
856 int error = 0;
857
858 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
859 flags = ((IS_SETLKW(cmd)) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
860
861 down(&fp->f_fl_mutex);
862
863 gl = fl_gh->gh_gl;
864 if (gl) {
865 if (fl_gh->gh_state == state)
866 goto out;
867 gfs2_glock_hold(gl);
868 flock_lock_file_wait(file,
869 &(struct file_lock){.fl_type = F_UNLCK});
870 gfs2_glock_dq_uninit(fl_gh);
871 } else {
872 error = gfs2_glock_get(ip->i_sbd,
873 ip->i_num.no_addr, &gfs2_flock_glops,
874 CREATE, &gl);
875 if (error)
876 goto out;
877 }
878
879 gfs2_holder_init(gl, state, flags, fl_gh);
880 gfs2_glock_put(gl);
881
882 error = gfs2_glock_nq(fl_gh);
883 if (error) {
884 gfs2_holder_uninit(fl_gh);
885 if (error == GLR_TRYFAILED)
886 error = -EAGAIN;
887 } else {
888 error = flock_lock_file_wait(file, fl);
889 gfs2_assert_warn(ip->i_sbd, !error);
890 }
891
892 out:
893 up(&fp->f_fl_mutex);
894
895 return error;
896}
897
898static void do_unflock(struct file *file, struct file_lock *fl)
899{
900 struct gfs2_file *fp = get_v2fp(file);
901 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
902
903 down(&fp->f_fl_mutex);
904 flock_lock_file_wait(file, fl);
905 if (fl_gh->gh_gl)
906 gfs2_glock_dq_uninit(fl_gh);
907 up(&fp->f_fl_mutex);
908}
909
910/**
911 * gfs2_flock - acquire/release a flock lock on a file
912 * @file: the file pointer
913 * @cmd: either modify or retrieve lock state, possibly wait
914 * @fl: type and range of lock
915 *
916 * Returns: errno
917 */
918
919static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
920{
921 struct gfs2_inode *ip = get_v2ip(file->f_mapping->host);
922 struct gfs2_sbd *sdp = ip->i_sbd;
923
924 atomic_inc(&ip->i_sbd->sd_ops_file);
925
926 if (!(fl->fl_flags & FL_FLOCK))
927 return -ENOLCK;
928 if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
929 return -ENOLCK;
930
931 if (sdp->sd_args.ar_localflocks)
932 return flock_lock_file_wait(file, fl);
933
934 if (fl->fl_type == F_UNLCK) {
935 do_unflock(file, fl);
936 return 0;
937 } else
938 return do_flock(file, cmd, fl);
939}
940
941struct file_operations gfs2_file_fops = {
942 .llseek = gfs2_llseek,
943 .read = gfs2_read,
18ec7d5c
SW
944 .readv = gfs2_file_readv,
945 .aio_read = gfs2_file_aio_read,
946 .write = generic_file_write,
947 .writev = generic_file_writev,
948 .aio_write = generic_file_aio_write,
b3b94faa
DT
949 .ioctl = gfs2_ioctl,
950 .mmap = gfs2_mmap,
951 .open = gfs2_open,
952 .release = gfs2_close,
953 .fsync = gfs2_fsync,
954 .lock = gfs2_lock,
955 .sendfile = gfs2_sendfile,
956 .flock = gfs2_flock,
957};
958
959struct file_operations gfs2_dir_fops = {
960 .readdir = gfs2_readdir,
961 .ioctl = gfs2_ioctl,
962 .open = gfs2_open,
963 .release = gfs2_close,
964 .fsync = gfs2_fsync,
965 .lock = gfs2_lock,
966 .flock = gfs2_flock,
967};
968