]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zpl_file.c
Fix zil_commit() NULL dereference
[mirror_zfs.git] / module / zfs / zpl_file.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
23 */
24
25
26 #include <sys/dmu_objset.h>
27 #include <sys/zfs_vfsops.h>
28 #include <sys/zfs_vnops.h>
29 #include <sys/zfs_znode.h>
30 #include <sys/zpl.h>
31
32
33 static int
34 zpl_open(struct inode *ip, struct file *filp)
35 {
36 cred_t *cr = CRED();
37 int error;
38
39 error = generic_file_open(ip, filp);
40 if (error)
41 return (error);
42
43 crhold(cr);
44 error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
45 crfree(cr);
46 ASSERT3S(error, <=, 0);
47
48 return (error);
49 }
50
51 static int
52 zpl_release(struct inode *ip, struct file *filp)
53 {
54 cred_t *cr = CRED();
55 int error;
56
57 if (ITOZ(ip)->z_atime_dirty)
58 zfs_mark_inode_dirty(ip);
59
60 crhold(cr);
61 error = -zfs_close(ip, filp->f_flags, cr);
62 crfree(cr);
63 ASSERT3S(error, <=, 0);
64
65 return (error);
66 }
67
68 static int
69 zpl_iterate(struct file *filp, struct dir_context *ctx)
70 {
71 struct dentry *dentry = filp->f_path.dentry;
72 cred_t *cr = CRED();
73 int error;
74
75 crhold(cr);
76 error = -zfs_readdir(dentry->d_inode, ctx, cr);
77 crfree(cr);
78 ASSERT3S(error, <=, 0);
79
80 return (error);
81 }
82
83 #if !defined(HAVE_VFS_ITERATE)
84 static int
85 zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
86 {
87 struct dir_context ctx = DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
88 int error;
89
90 error = zpl_iterate(filp, &ctx);
91 filp->f_pos = ctx.pos;
92
93 return (error);
94 }
95 #endif /* HAVE_VFS_ITERATE */
96
97 #if defined(HAVE_FSYNC_WITH_DENTRY)
98 /*
99 * Linux 2.6.x - 2.6.34 API,
100 * Through 2.6.34 the nfsd kernel server would pass a NULL 'file struct *'
101 * to the fops->fsync() hook. For this reason, we must be careful not to
102 * use filp unconditionally.
103 */
104 static int
105 zpl_fsync(struct file *filp, struct dentry *dentry, int datasync)
106 {
107 cred_t *cr = CRED();
108 int error;
109
110 crhold(cr);
111 error = -zfs_fsync(dentry->d_inode, datasync, cr);
112 crfree(cr);
113 ASSERT3S(error, <=, 0);
114
115 return (error);
116 }
117
118 #elif defined(HAVE_FSYNC_WITHOUT_DENTRY)
119 /*
120 * Linux 2.6.35 - 3.0 API,
121 * As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
122 * redundant. The dentry is still accessible via filp->f_path.dentry,
123 * and we are guaranteed that filp will never be NULL.
124 */
125 static int
126 zpl_fsync(struct file *filp, int datasync)
127 {
128 struct inode *inode = filp->f_mapping->host;
129 cred_t *cr = CRED();
130 int error;
131
132 crhold(cr);
133 error = -zfs_fsync(inode, datasync, cr);
134 crfree(cr);
135 ASSERT3S(error, <=, 0);
136
137 return (error);
138 }
139
140 #elif defined(HAVE_FSYNC_RANGE)
141 /*
142 * Linux 3.1 - 3.x API,
143 * As of 3.1 the responsibility to call filemap_write_and_wait_range() has
144 * been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
145 * lock is no longer held by the caller, for zfs we don't require the lock
146 * to be held so we don't acquire it.
147 */
148 static int
149 zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
150 {
151 struct inode *inode = filp->f_mapping->host;
152 cred_t *cr = CRED();
153 int error;
154
155 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
156 if (error)
157 return (error);
158
159 crhold(cr);
160 error = -zfs_fsync(inode, datasync, cr);
161 crfree(cr);
162 ASSERT3S(error, <=, 0);
163
164 return (error);
165 }
166 #else
167 #error "Unsupported fops->fsync() implementation"
168 #endif
169
170 ssize_t
171 zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t pos,
172 uio_seg_t segment, int flags, cred_t *cr)
173 {
174 int error;
175 ssize_t read;
176 struct iovec iov;
177 uio_t uio;
178
179 iov.iov_base = (void *)buf;
180 iov.iov_len = len;
181
182 uio.uio_iov = &iov;
183 uio.uio_resid = len;
184 uio.uio_iovcnt = 1;
185 uio.uio_loffset = pos;
186 uio.uio_limit = MAXOFFSET_T;
187 uio.uio_segflg = segment;
188
189 error = -zfs_read(ip, &uio, flags, cr);
190 if (error < 0)
191 return (error);
192
193 read = len - uio.uio_resid;
194 task_io_account_read(read);
195
196 return (read);
197 }
198
199 static ssize_t
200 zpl_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
201 {
202 cred_t *cr = CRED();
203 ssize_t read;
204
205 crhold(cr);
206 read = zpl_read_common(filp->f_mapping->host, buf, len, *ppos,
207 UIO_USERSPACE, filp->f_flags, cr);
208 crfree(cr);
209
210 if (read < 0)
211 return (read);
212
213 *ppos += read;
214 return (read);
215 }
216
217 ssize_t
218 zpl_write_common(struct inode *ip, const char *buf, size_t len, loff_t pos,
219 uio_seg_t segment, int flags, cred_t *cr)
220 {
221 int error;
222 ssize_t wrote;
223 struct iovec iov;
224 uio_t uio;
225
226 iov.iov_base = (void *)buf;
227 iov.iov_len = len;
228
229 uio.uio_iov = &iov;
230 uio.uio_resid = len,
231 uio.uio_iovcnt = 1;
232 uio.uio_loffset = pos;
233 uio.uio_limit = MAXOFFSET_T;
234 uio.uio_segflg = segment;
235
236 error = -zfs_write(ip, &uio, flags, cr);
237 if (error < 0)
238 return (error);
239
240 wrote = len - uio.uio_resid;
241 task_io_account_write(wrote);
242
243 return (wrote);
244 }
245
246 static ssize_t
247 zpl_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
248 {
249 cred_t *cr = CRED();
250 ssize_t wrote;
251
252 crhold(cr);
253 wrote = zpl_write_common(filp->f_mapping->host, buf, len, *ppos,
254 UIO_USERSPACE, filp->f_flags, cr);
255 crfree(cr);
256
257 if (wrote < 0)
258 return (wrote);
259
260 *ppos += wrote;
261 return (wrote);
262 }
263
264 static loff_t
265 zpl_llseek(struct file *filp, loff_t offset, int whence)
266 {
267 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
268 if (whence == SEEK_DATA || whence == SEEK_HOLE) {
269 struct inode *ip = filp->f_mapping->host;
270 loff_t maxbytes = ip->i_sb->s_maxbytes;
271 loff_t error;
272
273 spl_inode_lock(ip);
274 error = -zfs_holey(ip, whence, &offset);
275 if (error == 0)
276 error = lseek_execute(filp, ip, offset, maxbytes);
277 spl_inode_unlock(ip);
278
279 return (error);
280 }
281 #endif /* SEEK_HOLE && SEEK_DATA */
282
283 return (generic_file_llseek(filp, offset, whence));
284 }
285
286 /*
287 * It's worth taking a moment to describe how mmap is implemented
288 * for zfs because it differs considerably from other Linux filesystems.
289 * However, this issue is handled the same way under OpenSolaris.
290 *
291 * The issue is that by design zfs bypasses the Linux page cache and
292 * leaves all caching up to the ARC. This has been shown to work
293 * well for the common read(2)/write(2) case. However, mmap(2)
294 * is problem because it relies on being tightly integrated with the
295 * page cache. To handle this we cache mmap'ed files twice, once in
296 * the ARC and a second time in the page cache. The code is careful
297 * to keep both copies synchronized.
298 *
299 * When a file with an mmap'ed region is written to using write(2)
300 * both the data in the ARC and existing pages in the page cache
301 * are updated. For a read(2) data will be read first from the page
302 * cache then the ARC if needed. Neither a write(2) or read(2) will
303 * will ever result in new pages being added to the page cache.
304 *
305 * New pages are added to the page cache only via .readpage() which
306 * is called when the vfs needs to read a page off disk to back the
307 * virtual memory region. These pages may be modified without
308 * notifying the ARC and will be written out periodically via
309 * .writepage(). This will occur due to either a sync or the usual
310 * page aging behavior. Note because a read(2) of a mmap'ed file
311 * will always check the page cache first even when the ARC is out
312 * of date correct data will still be returned.
313 *
314 * While this implementation ensures correct behavior it does have
315 * have some drawbacks. The most obvious of which is that it
316 * increases the required memory footprint when access mmap'ed
317 * files. It also adds additional complexity to the code keeping
318 * both caches synchronized.
319 *
320 * Longer term it may be possible to cleanly resolve this wart by
321 * mapping page cache pages directly on to the ARC buffers. The
322 * Linux address space operations are flexible enough to allow
323 * selection of which pages back a particular index. The trick
324 * would be working out the details of which subsystem is in
325 * charge, the ARC, the page cache, or both. It may also prove
326 * helpful to move the ARC buffers to a scatter-gather lists
327 * rather than a vmalloc'ed region.
328 */
329 static int
330 zpl_mmap(struct file *filp, struct vm_area_struct *vma)
331 {
332 struct inode *ip = filp->f_mapping->host;
333 znode_t *zp = ITOZ(ip);
334 int error;
335
336 error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
337 (size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
338 if (error)
339 return (error);
340
341 error = generic_file_mmap(filp, vma);
342 if (error)
343 return (error);
344
345 mutex_enter(&zp->z_lock);
346 zp->z_is_mapped = 1;
347 mutex_exit(&zp->z_lock);
348
349 return (error);
350 }
351
352 /*
353 * Populate a page with data for the Linux page cache. This function is
354 * only used to support mmap(2). There will be an identical copy of the
355 * data in the ARC which is kept up to date via .write() and .writepage().
356 *
357 * Current this function relies on zpl_read_common() and the O_DIRECT
358 * flag to read in a page. This works but the more correct way is to
359 * update zfs_fillpage() to be Linux friendly and use that interface.
360 */
361 static int
362 zpl_readpage(struct file *filp, struct page *pp)
363 {
364 struct inode *ip;
365 struct page *pl[1];
366 int error = 0;
367
368 ASSERT(PageLocked(pp));
369 ip = pp->mapping->host;
370 pl[0] = pp;
371
372 error = -zfs_getpage(ip, pl, 1);
373
374 if (error) {
375 SetPageError(pp);
376 ClearPageUptodate(pp);
377 } else {
378 ClearPageError(pp);
379 SetPageUptodate(pp);
380 flush_dcache_page(pp);
381 }
382
383 unlock_page(pp);
384 return (error);
385 }
386
387 /*
388 * Populate a set of pages with data for the Linux page cache. This
389 * function will only be called for read ahead and never for demand
390 * paging. For simplicity, the code relies on read_cache_pages() to
391 * correctly lock each page for IO and call zpl_readpage().
392 */
393 static int
394 zpl_readpages(struct file *filp, struct address_space *mapping,
395 struct list_head *pages, unsigned nr_pages)
396 {
397 return (read_cache_pages(mapping, pages,
398 (filler_t *)zpl_readpage, filp));
399 }
400
401 int
402 zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
403 {
404 struct address_space *mapping = data;
405
406 ASSERT(PageLocked(pp));
407 ASSERT(!PageWriteback(pp));
408 ASSERT(!(current->flags & PF_NOFS));
409
410 /*
411 * Annotate this call path with a flag that indicates that it is
412 * unsafe to use KM_SLEEP during memory allocations due to the
413 * potential for a deadlock. KM_PUSHPAGE should be used instead.
414 */
415 current->flags |= PF_NOFS;
416 (void) zfs_putpage(mapping->host, pp, wbc);
417 current->flags &= ~PF_NOFS;
418
419 return (0);
420 }
421
422 static int
423 zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
424 {
425 znode_t *zp = ITOZ(mapping->host);
426 zfs_sb_t *zsb = ITOZSB(mapping->host);
427 enum writeback_sync_modes sync_mode;
428 int result;
429
430 ZFS_ENTER(zsb);
431 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
432 wbc->sync_mode = WB_SYNC_ALL;
433 ZFS_EXIT(zsb);
434 sync_mode = wbc->sync_mode;
435
436 /*
437 * We don't want to run write_cache_pages() in SYNC mode here, because
438 * that would make putpage() wait for a single page to be committed to
439 * disk every single time, resulting in atrocious performance. Instead
440 * we run it once in non-SYNC mode so that the ZIL gets all the data,
441 * and then we commit it all in one go.
442 */
443 wbc->sync_mode = WB_SYNC_NONE;
444 result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
445 if (sync_mode != wbc->sync_mode) {
446 ZFS_ENTER(zsb);
447 ZFS_VERIFY_ZP(zp);
448 if (zsb->z_log != NULL)
449 zil_commit(zsb->z_log, zp->z_id);
450 ZFS_EXIT(zsb);
451
452 /*
453 * We need to call write_cache_pages() again (we can't just
454 * return after the commit) because the previous call in
455 * non-SYNC mode does not guarantee that we got all the dirty
456 * pages (see the implementation of write_cache_pages() for
457 * details). That being said, this is a no-op in most cases.
458 */
459 wbc->sync_mode = sync_mode;
460 result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
461 }
462 return (result);
463 }
464
465 /*
466 * Write out dirty pages to the ARC, this function is only required to
467 * support mmap(2). Mapped pages may be dirtied by memory operations
468 * which never call .write(). These dirty pages are kept in sync with
469 * the ARC buffers via this hook.
470 */
471 static int
472 zpl_writepage(struct page *pp, struct writeback_control *wbc)
473 {
474 if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
475 wbc->sync_mode = WB_SYNC_ALL;
476
477 return (zpl_putpage(pp, wbc, pp->mapping));
478 }
479
480 /*
481 * The only flag combination which matches the behavior of zfs_space()
482 * is FALLOC_FL_PUNCH_HOLE. This flag was introduced in the 2.6.38 kernel.
483 */
484 long
485 zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
486 {
487 cred_t *cr = CRED();
488 int error = -EOPNOTSUPP;
489
490 if (mode & FALLOC_FL_KEEP_SIZE)
491 return (-EOPNOTSUPP);
492
493 crhold(cr);
494
495 #ifdef FALLOC_FL_PUNCH_HOLE
496 if (mode & FALLOC_FL_PUNCH_HOLE) {
497 flock64_t bf;
498
499 bf.l_type = F_WRLCK;
500 bf.l_whence = 0;
501 bf.l_start = offset;
502 bf.l_len = len;
503 bf.l_pid = 0;
504
505 error = -zfs_space(ip, F_FREESP, &bf, FWRITE, offset, cr);
506 }
507 #endif /* FALLOC_FL_PUNCH_HOLE */
508
509 crfree(cr);
510
511 ASSERT3S(error, <=, 0);
512 return (error);
513 }
514
515 #ifdef HAVE_FILE_FALLOCATE
516 static long
517 zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
518 {
519 return zpl_fallocate_common(filp->f_path.dentry->d_inode,
520 mode, offset, len);
521 }
522 #endif /* HAVE_FILE_FALLOCATE */
523
524 /*
525 * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
526 * attributes common to both Linux and Solaris are mapped.
527 */
528 static int
529 zpl_ioctl_getflags(struct file *filp, void __user *arg)
530 {
531 struct inode *ip = filp->f_dentry->d_inode;
532 unsigned int ioctl_flags = 0;
533 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
534 int error;
535
536 if (zfs_flags & ZFS_IMMUTABLE)
537 ioctl_flags |= FS_IMMUTABLE_FL;
538
539 if (zfs_flags & ZFS_APPENDONLY)
540 ioctl_flags |= FS_APPEND_FL;
541
542 if (zfs_flags & ZFS_NODUMP)
543 ioctl_flags |= FS_NODUMP_FL;
544
545 ioctl_flags &= FS_FL_USER_VISIBLE;
546
547 error = copy_to_user(arg, &ioctl_flags, sizeof (ioctl_flags));
548
549 return (error);
550 }
551
552 /*
553 * fchange() is a helper macro to detect if we have been asked to change a
554 * flag. This is ugly, but the requirement that we do this is a consequence of
555 * how the Linux file attribute interface was designed. Another consequence is
556 * that concurrent modification of files suffers from a TOCTOU race. Neither
557 * are things we can fix without modifying the kernel-userland interface, which
558 * is outside of our jurisdiction.
559 */
560
561 #define fchange(f0, f1, b0, b1) ((((f0) & (b0)) == (b0)) != \
562 (((b1) & (f1)) == (f1)))
563
564 static int
565 zpl_ioctl_setflags(struct file *filp, void __user *arg)
566 {
567 struct inode *ip = filp->f_dentry->d_inode;
568 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
569 unsigned int ioctl_flags;
570 cred_t *cr = CRED();
571 xvattr_t xva;
572 xoptattr_t *xoap;
573 int error;
574
575 if (copy_from_user(&ioctl_flags, arg, sizeof (ioctl_flags)))
576 return (-EFAULT);
577
578 if ((ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL)))
579 return (-EOPNOTSUPP);
580
581 if ((ioctl_flags & ~(FS_FL_USER_MODIFIABLE)))
582 return (-EACCES);
583
584 if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
585 fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
586 !capable(CAP_LINUX_IMMUTABLE))
587 return (-EACCES);
588
589 if (!zpl_inode_owner_or_capable(ip))
590 return (-EACCES);
591
592 xva_init(&xva);
593 xoap = xva_getxoptattr(&xva);
594
595 XVA_SET_REQ(&xva, XAT_IMMUTABLE);
596 if (ioctl_flags & FS_IMMUTABLE_FL)
597 xoap->xoa_immutable = B_TRUE;
598
599 XVA_SET_REQ(&xva, XAT_APPENDONLY);
600 if (ioctl_flags & FS_APPEND_FL)
601 xoap->xoa_appendonly = B_TRUE;
602
603 XVA_SET_REQ(&xva, XAT_NODUMP);
604 if (ioctl_flags & FS_NODUMP_FL)
605 xoap->xoa_nodump = B_TRUE;
606
607 crhold(cr);
608 error = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
609 crfree(cr);
610
611 return (error);
612 }
613
614 static long
615 zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
616 {
617 switch (cmd) {
618 case FS_IOC_GETFLAGS:
619 return (zpl_ioctl_getflags(filp, (void *)arg));
620 case FS_IOC_SETFLAGS:
621 return (zpl_ioctl_setflags(filp, (void *)arg));
622 default:
623 return (-ENOTTY);
624 }
625 }
626
627 #ifdef CONFIG_COMPAT
628 static long
629 zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
630 {
631 return (zpl_ioctl(filp, cmd, arg));
632 }
633 #endif /* CONFIG_COMPAT */
634
635
636 const struct address_space_operations zpl_address_space_operations = {
637 .readpages = zpl_readpages,
638 .readpage = zpl_readpage,
639 .writepage = zpl_writepage,
640 .writepages = zpl_writepages,
641 };
642
643 const struct file_operations zpl_file_operations = {
644 .open = zpl_open,
645 .release = zpl_release,
646 .llseek = zpl_llseek,
647 .read = zpl_read,
648 .write = zpl_write,
649 .mmap = zpl_mmap,
650 .fsync = zpl_fsync,
651 #ifdef HAVE_FILE_FALLOCATE
652 .fallocate = zpl_fallocate,
653 #endif /* HAVE_FILE_FALLOCATE */
654 .unlocked_ioctl = zpl_ioctl,
655 #ifdef CONFIG_COMPAT
656 .compat_ioctl = zpl_compat_ioctl,
657 #endif
658 };
659
660 const struct file_operations zpl_dir_file_operations = {
661 .llseek = generic_file_llseek,
662 .read = generic_read_dir,
663 #ifdef HAVE_VFS_ITERATE
664 .iterate = zpl_iterate,
665 #else
666 .readdir = zpl_readdir,
667 #endif
668 .fsync = zpl_fsync,
669 .unlocked_ioctl = zpl_ioctl,
670 #ifdef CONFIG_COMPAT
671 .compat_ioctl = zpl_compat_ioctl,
672 #endif
673 };