*/
+#include <sys/dmu_objset.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_znode.h>
cred_t *cr = CRED();
int error;
+ error = generic_file_open(ip, filp);
+ if (error)
+ return (error);
+
crhold(cr);
error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
crfree(cr);
ASSERT3S(error, <=, 0);
- if (error)
- return (error);
-
- return generic_file_open(ip, filp);
+ return (error);
}
static int
}
static int
-zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
+zpl_iterate(struct file *filp, struct dir_context *ctx)
{
struct dentry *dentry = filp->f_path.dentry;
cred_t *cr = CRED();
int error;
crhold(cr);
- error = -zfs_readdir(dentry->d_inode, dirent, filldir,
- &filp->f_pos, cr);
+ error = -zfs_readdir(dentry->d_inode, ctx, cr);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
+#if !defined(HAVE_VFS_ITERATE)
+static int
+zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+ struct dir_context ctx = DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
+ int error;
+
+ error = zpl_iterate(filp, &ctx);
+ filp->f_pos = ctx.pos;
+
+ return (error);
+}
+#endif /* HAVE_VFS_ITERATE */
+
#if defined(HAVE_FSYNC_WITH_DENTRY)
/*
* Linux 2.6.x - 2.6.34 API,
ssize_t
zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t pos,
- uio_seg_t segment, int flags, cred_t *cr)
+ uio_seg_t segment, int flags, cred_t *cr)
{
int error;
+ ssize_t read;
struct iovec iov;
uio_t uio;
if (error < 0)
return (error);
- return (len - uio.uio_resid);
+ read = len - uio.uio_resid;
+ task_io_account_read(read);
+
+ return (read);
}
static ssize_t
uio_seg_t segment, int flags, cred_t *cr)
{
int error;
+ ssize_t wrote;
struct iovec iov;
uio_t uio;
if (error < 0)
return (error);
- return (len - uio.uio_resid);
+ wrote = len - uio.uio_resid;
+ task_io_account_write(wrote);
+
+ return (wrote);
}
static ssize_t
}
#endif /* SEEK_HOLE && SEEK_DATA */
- return generic_file_llseek(filp, offset, whence);
+ return (generic_file_llseek(filp, offset, whence));
}
/*
}
unlock_page(pp);
- return error;
+ return (error);
}
/*
static int
zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
- return write_cache_pages(mapping, wbc, zpl_putpage, mapping);
+ znode_t *zp = ITOZ(mapping->host);
+ zfs_sb_t *zsb = ITOZSB(mapping->host);
+ enum writeback_sync_modes sync_mode;
+ int result;
+
+ ZFS_ENTER(zsb);
+ if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ wbc->sync_mode = WB_SYNC_ALL;
+ ZFS_EXIT(zsb);
+ sync_mode = wbc->sync_mode;
+
+ /*
+ * We don't want to run write_cache_pages() in SYNC mode here, because
+ * that would make putpage() wait for a single page to be committed to
+ * disk every single time, resulting in atrocious performance. Instead
+ * we run it once in non-SYNC mode so that the ZIL gets all the data,
+ * and then we commit it all in one go.
+ */
+ wbc->sync_mode = WB_SYNC_NONE;
+ result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
+ if (sync_mode != wbc->sync_mode) {
+ ZFS_ENTER(zsb);
+ ZFS_VERIFY_ZP(zp);
+ zil_commit(zsb->z_log, zp->z_id);
+ ZFS_EXIT(zsb);
+
+ /*
+ * We need to call write_cache_pages() again (we can't just
+ * return after the commit) because the previous call in
+ * non-SYNC mode does not guarantee that we got all the dirty
+ * pages (see the implementation of write_cache_pages() for
+ * details). That being said, this is a no-op in most cases.
+ */
+ wbc->sync_mode = sync_mode;
+ result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
+ }
+ return (result);
}
/*
static int
zpl_writepage(struct page *pp, struct writeback_control *wbc)
{
- return zpl_putpage(pp, wbc, pp->mapping);
+ if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
+ wbc->sync_mode = WB_SYNC_ALL;
+
+ return (zpl_putpage(pp, wbc, pp->mapping));
}
/*
static long
zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- return zpl_ioctl(filp, cmd, arg);
+ return (zpl_ioctl(filp, cmd, arg));
}
#endif /* CONFIG_COMPAT */
.readpages = zpl_readpages,
.readpage = zpl_readpage,
.writepage = zpl_writepage,
- .writepages = zpl_writepages,
+ .writepages = zpl_writepages,
};
const struct file_operations zpl_file_operations = {
.mmap = zpl_mmap,
.fsync = zpl_fsync,
#ifdef HAVE_FILE_FALLOCATE
- .fallocate = zpl_fallocate,
+ .fallocate = zpl_fallocate,
#endif /* HAVE_FILE_FALLOCATE */
- .unlocked_ioctl = zpl_ioctl,
+ .unlocked_ioctl = zpl_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = zpl_compat_ioctl,
+ .compat_ioctl = zpl_compat_ioctl,
#endif
};
const struct file_operations zpl_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
+#ifdef HAVE_VFS_ITERATE
+ .iterate = zpl_iterate,
+#else
.readdir = zpl_readdir,
+#endif
.fsync = zpl_fsync,
.unlocked_ioctl = zpl_ioctl,
#ifdef CONFIG_COMPAT