]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/xfs/linux-2.6/xfs_file.c
Pull error-inject into release branch
[mirror_ubuntu-bionic-kernel.git] / fs / xfs / linux-2.6 / xfs_file.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_bit.h"
20 #include "xfs_log.h"
21 #include "xfs_inum.h"
22 #include "xfs_sb.h"
23 #include "xfs_ag.h"
24 #include "xfs_dir2.h"
25 #include "xfs_trans.h"
26 #include "xfs_dmapi.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_alloc.h"
32 #include "xfs_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_error.h"
38 #include "xfs_rw.h"
39 #include "xfs_ioctl32.h"
40
41 #include <linux/dcache.h>
42 #include <linux/smp_lock.h>
43
44 static struct vm_operations_struct xfs_file_vm_ops;
45 #ifdef CONFIG_XFS_DMAPI
46 static struct vm_operations_struct xfs_dmapi_file_vm_ops;
47 #endif
48
49 STATIC_INLINE ssize_t
50 __xfs_file_read(
51 struct kiocb *iocb,
52 const struct iovec *iov,
53 unsigned long nr_segs,
54 int ioflags,
55 loff_t pos)
56 {
57 struct file *file = iocb->ki_filp;
58 bhv_vnode_t *vp = vn_from_inode(file->f_path.dentry->d_inode);
59
60 BUG_ON(iocb->ki_pos != pos);
61 if (unlikely(file->f_flags & O_DIRECT))
62 ioflags |= IO_ISDIRECT;
63 return bhv_vop_read(vp, iocb, iov, nr_segs, &iocb->ki_pos,
64 ioflags, NULL);
65 }
66
67 STATIC ssize_t
68 xfs_file_aio_read(
69 struct kiocb *iocb,
70 const struct iovec *iov,
71 unsigned long nr_segs,
72 loff_t pos)
73 {
74 return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO, pos);
75 }
76
77 STATIC ssize_t
78 xfs_file_aio_read_invis(
79 struct kiocb *iocb,
80 const struct iovec *iov,
81 unsigned long nr_segs,
82 loff_t pos)
83 {
84 return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
85 }
86
87 STATIC_INLINE ssize_t
88 __xfs_file_write(
89 struct kiocb *iocb,
90 const struct iovec *iov,
91 unsigned long nr_segs,
92 int ioflags,
93 loff_t pos)
94 {
95 struct file *file = iocb->ki_filp;
96 struct inode *inode = file->f_mapping->host;
97 bhv_vnode_t *vp = vn_from_inode(inode);
98
99 BUG_ON(iocb->ki_pos != pos);
100 if (unlikely(file->f_flags & O_DIRECT))
101 ioflags |= IO_ISDIRECT;
102 return bhv_vop_write(vp, iocb, iov, nr_segs, &iocb->ki_pos,
103 ioflags, NULL);
104 }
105
106 STATIC ssize_t
107 xfs_file_aio_write(
108 struct kiocb *iocb,
109 const struct iovec *iov,
110 unsigned long nr_segs,
111 loff_t pos)
112 {
113 return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO, pos);
114 }
115
116 STATIC ssize_t
117 xfs_file_aio_write_invis(
118 struct kiocb *iocb,
119 const struct iovec *iov,
120 unsigned long nr_segs,
121 loff_t pos)
122 {
123 return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
124 }
125
126 STATIC ssize_t
127 xfs_file_sendfile(
128 struct file *filp,
129 loff_t *pos,
130 size_t count,
131 read_actor_t actor,
132 void *target)
133 {
134 return bhv_vop_sendfile(vn_from_inode(filp->f_path.dentry->d_inode),
135 filp, pos, 0, count, actor, target, NULL);
136 }
137
138 STATIC ssize_t
139 xfs_file_sendfile_invis(
140 struct file *filp,
141 loff_t *pos,
142 size_t count,
143 read_actor_t actor,
144 void *target)
145 {
146 return bhv_vop_sendfile(vn_from_inode(filp->f_path.dentry->d_inode),
147 filp, pos, IO_INVIS, count, actor, target, NULL);
148 }
149
150 STATIC ssize_t
151 xfs_file_splice_read(
152 struct file *infilp,
153 loff_t *ppos,
154 struct pipe_inode_info *pipe,
155 size_t len,
156 unsigned int flags)
157 {
158 return bhv_vop_splice_read(vn_from_inode(infilp->f_path.dentry->d_inode),
159 infilp, ppos, pipe, len, flags, 0, NULL);
160 }
161
162 STATIC ssize_t
163 xfs_file_splice_read_invis(
164 struct file *infilp,
165 loff_t *ppos,
166 struct pipe_inode_info *pipe,
167 size_t len,
168 unsigned int flags)
169 {
170 return bhv_vop_splice_read(vn_from_inode(infilp->f_path.dentry->d_inode),
171 infilp, ppos, pipe, len, flags, IO_INVIS,
172 NULL);
173 }
174
175 STATIC ssize_t
176 xfs_file_splice_write(
177 struct pipe_inode_info *pipe,
178 struct file *outfilp,
179 loff_t *ppos,
180 size_t len,
181 unsigned int flags)
182 {
183 return bhv_vop_splice_write(vn_from_inode(outfilp->f_path.dentry->d_inode),
184 pipe, outfilp, ppos, len, flags, 0, NULL);
185 }
186
187 STATIC ssize_t
188 xfs_file_splice_write_invis(
189 struct pipe_inode_info *pipe,
190 struct file *outfilp,
191 loff_t *ppos,
192 size_t len,
193 unsigned int flags)
194 {
195 return bhv_vop_splice_write(vn_from_inode(outfilp->f_path.dentry->d_inode),
196 pipe, outfilp, ppos, len, flags, IO_INVIS,
197 NULL);
198 }
199
200 STATIC int
201 xfs_file_open(
202 struct inode *inode,
203 struct file *filp)
204 {
205 if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
206 return -EFBIG;
207 return -bhv_vop_open(vn_from_inode(inode), NULL);
208 }
209
210 STATIC int
211 xfs_file_close(
212 struct file *filp,
213 fl_owner_t id)
214 {
215 return -bhv_vop_close(vn_from_inode(filp->f_path.dentry->d_inode), 0,
216 file_count(filp) > 1 ? L_FALSE : L_TRUE, NULL);
217 }
218
219 STATIC int
220 xfs_file_release(
221 struct inode *inode,
222 struct file *filp)
223 {
224 bhv_vnode_t *vp = vn_from_inode(inode);
225
226 if (vp)
227 return -bhv_vop_release(vp);
228 return 0;
229 }
230
231 STATIC int
232 xfs_file_fsync(
233 struct file *filp,
234 struct dentry *dentry,
235 int datasync)
236 {
237 bhv_vnode_t *vp = vn_from_inode(dentry->d_inode);
238 int flags = FSYNC_WAIT;
239
240 if (datasync)
241 flags |= FSYNC_DATA;
242 if (VN_TRUNC(vp))
243 VUNTRUNCATE(vp);
244 return -bhv_vop_fsync(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1);
245 }
246
247 #ifdef CONFIG_XFS_DMAPI
248 STATIC struct page *
249 xfs_vm_nopage(
250 struct vm_area_struct *area,
251 unsigned long address,
252 int *type)
253 {
254 struct inode *inode = area->vm_file->f_path.dentry->d_inode;
255 bhv_vnode_t *vp = vn_from_inode(inode);
256
257 ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
258 if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), area, 0))
259 return NULL;
260 return filemap_nopage(area, address, type);
261 }
262 #endif /* CONFIG_XFS_DMAPI */
263
264 STATIC int
265 xfs_file_readdir(
266 struct file *filp,
267 void *dirent,
268 filldir_t filldir)
269 {
270 int error = 0;
271 bhv_vnode_t *vp = vn_from_inode(filp->f_path.dentry->d_inode);
272 uio_t uio;
273 iovec_t iov;
274 int eof = 0;
275 caddr_t read_buf;
276 int namelen, size = 0;
277 size_t rlen = PAGE_CACHE_SIZE;
278 xfs_off_t start_offset, curr_offset;
279 xfs_dirent_t *dbp = NULL;
280
281 /* Try fairly hard to get memory */
282 do {
283 if ((read_buf = kmalloc(rlen, GFP_KERNEL)))
284 break;
285 rlen >>= 1;
286 } while (rlen >= 1024);
287
288 if (read_buf == NULL)
289 return -ENOMEM;
290
291 uio.uio_iov = &iov;
292 uio.uio_segflg = UIO_SYSSPACE;
293 curr_offset = filp->f_pos;
294 if (filp->f_pos != 0x7fffffff)
295 uio.uio_offset = filp->f_pos;
296 else
297 uio.uio_offset = 0xffffffff;
298
299 while (!eof) {
300 uio.uio_resid = iov.iov_len = rlen;
301 iov.iov_base = read_buf;
302 uio.uio_iovcnt = 1;
303
304 start_offset = uio.uio_offset;
305
306 error = bhv_vop_readdir(vp, &uio, NULL, &eof);
307 if ((uio.uio_offset == start_offset) || error) {
308 size = 0;
309 break;
310 }
311
312 size = rlen - uio.uio_resid;
313 dbp = (xfs_dirent_t *)read_buf;
314 while (size > 0) {
315 namelen = strlen(dbp->d_name);
316
317 if (filldir(dirent, dbp->d_name, namelen,
318 (loff_t) curr_offset & 0x7fffffff,
319 (ino_t) dbp->d_ino,
320 DT_UNKNOWN)) {
321 goto done;
322 }
323 size -= dbp->d_reclen;
324 curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */;
325 dbp = (xfs_dirent_t *)((char *)dbp + dbp->d_reclen);
326 }
327 }
328 done:
329 if (!error) {
330 if (size == 0)
331 filp->f_pos = uio.uio_offset & 0x7fffffff;
332 else if (dbp)
333 filp->f_pos = curr_offset;
334 }
335
336 kfree(read_buf);
337 return -error;
338 }
339
340 STATIC int
341 xfs_file_mmap(
342 struct file *filp,
343 struct vm_area_struct *vma)
344 {
345 vma->vm_ops = &xfs_file_vm_ops;
346
347 #ifdef CONFIG_XFS_DMAPI
348 if (vn_from_inode(filp->f_path.dentry->d_inode)->v_vfsp->vfs_flag & VFS_DMI)
349 vma->vm_ops = &xfs_dmapi_file_vm_ops;
350 #endif /* CONFIG_XFS_DMAPI */
351
352 file_accessed(filp);
353 return 0;
354 }
355
356 STATIC long
357 xfs_file_ioctl(
358 struct file *filp,
359 unsigned int cmd,
360 unsigned long p)
361 {
362 int error;
363 struct inode *inode = filp->f_path.dentry->d_inode;
364 bhv_vnode_t *vp = vn_from_inode(inode);
365
366 error = bhv_vop_ioctl(vp, inode, filp, 0, cmd, (void __user *)p);
367 VMODIFY(vp);
368
369 /* NOTE: some of the ioctl's return positive #'s as a
370 * byte count indicating success, such as
371 * readlink_by_handle. So we don't "sign flip"
372 * like most other routines. This means true
373 * errors need to be returned as a negative value.
374 */
375 return error;
376 }
377
378 STATIC long
379 xfs_file_ioctl_invis(
380 struct file *filp,
381 unsigned int cmd,
382 unsigned long p)
383 {
384 int error;
385 struct inode *inode = filp->f_path.dentry->d_inode;
386 bhv_vnode_t *vp = vn_from_inode(inode);
387
388 error = bhv_vop_ioctl(vp, inode, filp, IO_INVIS, cmd, (void __user *)p);
389 VMODIFY(vp);
390
391 /* NOTE: some of the ioctl's return positive #'s as a
392 * byte count indicating success, such as
393 * readlink_by_handle. So we don't "sign flip"
394 * like most other routines. This means true
395 * errors need to be returned as a negative value.
396 */
397 return error;
398 }
399
400 #ifdef CONFIG_XFS_DMAPI
401 #ifdef HAVE_VMOP_MPROTECT
402 STATIC int
403 xfs_vm_mprotect(
404 struct vm_area_struct *vma,
405 unsigned int newflags)
406 {
407 bhv_vnode_t *vp = vn_from_inode(vma->vm_file->f_path.dentry->d_inode);
408 int error = 0;
409
410 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
411 if ((vma->vm_flags & VM_MAYSHARE) &&
412 (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) {
413 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
414
415 error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
416 }
417 }
418 return error;
419 }
420 #endif /* HAVE_VMOP_MPROTECT */
421 #endif /* CONFIG_XFS_DMAPI */
422
423 #ifdef HAVE_FOP_OPEN_EXEC
424 /* If the user is attempting to execute a file that is offline then
425 * we have to trigger a DMAPI READ event before the file is marked as busy
426 * otherwise the invisible I/O will not be able to write to the file to bring
427 * it back online.
428 */
429 STATIC int
430 xfs_file_open_exec(
431 struct inode *inode)
432 {
433 bhv_vnode_t *vp = vn_from_inode(inode);
434
435 if (unlikely(vp->v_vfsp->vfs_flag & VFS_DMI)) {
436 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
437 xfs_inode_t *ip = xfs_vtoi(vp);
438
439 if (!ip)
440 return -EINVAL;
441 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ))
442 return -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
443 0, 0, 0, NULL);
444 }
445 return 0;
446 }
447 #endif /* HAVE_FOP_OPEN_EXEC */
448
449 const struct file_operations xfs_file_operations = {
450 .llseek = generic_file_llseek,
451 .read = do_sync_read,
452 .write = do_sync_write,
453 .aio_read = xfs_file_aio_read,
454 .aio_write = xfs_file_aio_write,
455 .sendfile = xfs_file_sendfile,
456 .splice_read = xfs_file_splice_read,
457 .splice_write = xfs_file_splice_write,
458 .unlocked_ioctl = xfs_file_ioctl,
459 #ifdef CONFIG_COMPAT
460 .compat_ioctl = xfs_file_compat_ioctl,
461 #endif
462 .mmap = xfs_file_mmap,
463 .open = xfs_file_open,
464 .flush = xfs_file_close,
465 .release = xfs_file_release,
466 .fsync = xfs_file_fsync,
467 #ifdef HAVE_FOP_OPEN_EXEC
468 .open_exec = xfs_file_open_exec,
469 #endif
470 };
471
472 const struct file_operations xfs_invis_file_operations = {
473 .llseek = generic_file_llseek,
474 .read = do_sync_read,
475 .write = do_sync_write,
476 .aio_read = xfs_file_aio_read_invis,
477 .aio_write = xfs_file_aio_write_invis,
478 .sendfile = xfs_file_sendfile_invis,
479 .splice_read = xfs_file_splice_read_invis,
480 .splice_write = xfs_file_splice_write_invis,
481 .unlocked_ioctl = xfs_file_ioctl_invis,
482 #ifdef CONFIG_COMPAT
483 .compat_ioctl = xfs_file_compat_invis_ioctl,
484 #endif
485 .mmap = xfs_file_mmap,
486 .open = xfs_file_open,
487 .flush = xfs_file_close,
488 .release = xfs_file_release,
489 .fsync = xfs_file_fsync,
490 };
491
492
493 const struct file_operations xfs_dir_file_operations = {
494 .read = generic_read_dir,
495 .readdir = xfs_file_readdir,
496 .unlocked_ioctl = xfs_file_ioctl,
497 #ifdef CONFIG_COMPAT
498 .compat_ioctl = xfs_file_compat_ioctl,
499 #endif
500 .fsync = xfs_file_fsync,
501 };
502
503 static struct vm_operations_struct xfs_file_vm_ops = {
504 .nopage = filemap_nopage,
505 .populate = filemap_populate,
506 };
507
508 #ifdef CONFIG_XFS_DMAPI
509 static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
510 .nopage = xfs_vm_nopage,
511 .populate = filemap_populate,
512 #ifdef HAVE_VMOP_MPROTECT
513 .mprotect = xfs_vm_mprotect,
514 #endif
515 };
516 #endif /* CONFIG_XFS_DMAPI */