]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/zfs_vfsops.c
Fix typo/etc in module/zfs/zfs_ctldir.c
[mirror_zfs.git] / module / zfs / zfs_vfsops.c
index a477c8669b543e0ed0cd8363958fb1110d616b33..781708ba96a28171c0e5d5df38960af0f5790773 100644 (file)
@@ -20,7 +20,7 @@
  */
 /*
  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
  */
 
 /* Portions Copyright 2010 Robert Milkowski */
@@ -56,6 +56,7 @@
 #include <sys/dmu_objset.h>
 #include <sys/spa_boot.h>
 #include <sys/zpl.h>
+#include <linux/vfs_compat.h>
 #include "zfs_comutil.h"
 
 enum {
@@ -249,7 +250,7 @@ zfsvfs_parse_options(char *mntopts, vfs_t **vfsp)
 boolean_t
 zfs_is_readonly(zfsvfs_t *zfsvfs)
 {
-       return (!!(zfsvfs->z_sb->s_flags & MS_RDONLY));
+       return (!!(zfsvfs->z_sb->s_flags & SB_RDONLY));
 }
 
 /*ARGSUSED*/
@@ -258,13 +259,6 @@ zfs_sync(struct super_block *sb, int wait, cred_t *cr)
 {
        zfsvfs_t *zfsvfs = sb->s_fs_info;
 
-       /*
-        * Data integrity is job one.  We don't want a compromised kernel
-        * writing to the storage pool, so we never sync during panic.
-        */
-       if (unlikely(oops_in_progress))
-               return (0);
-
        /*
         * Semantically, the only requirement is that the sync be initiated.
         * The DMU syncs out txgs frequently, so there's nothing to do.
@@ -343,15 +337,15 @@ acltype_changed_cb(void *arg, uint64_t newval)
        switch (newval) {
        case ZFS_ACLTYPE_OFF:
                zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
-               zfsvfs->z_sb->s_flags &= ~MS_POSIXACL;
+               zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
                break;
        case ZFS_ACLTYPE_POSIXACL:
 #ifdef CONFIG_FS_POSIX_ACL
                zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIXACL;
-               zfsvfs->z_sb->s_flags |= MS_POSIXACL;
+               zfsvfs->z_sb->s_flags |= SB_POSIXACL;
 #else
                zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
-               zfsvfs->z_sb->s_flags &= ~MS_POSIXACL;
+               zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
 #endif /* CONFIG_FS_POSIX_ACL */
                break;
        default:
@@ -380,9 +374,9 @@ readonly_changed_cb(void *arg, uint64_t newval)
                return;
 
        if (newval)
-               sb->s_flags |= MS_RDONLY;
+               sb->s_flags |= SB_RDONLY;
        else
-               sb->s_flags &= ~MS_RDONLY;
+               sb->s_flags &= ~SB_RDONLY;
 }
 
 static void
@@ -410,9 +404,9 @@ nbmand_changed_cb(void *arg, uint64_t newval)
                return;
 
        if (newval == TRUE)
-               sb->s_flags |= MS_MANDLOCK;
+               sb->s_flags |= SB_MANDLOCK;
        else
-               sb->s_flags &= ~MS_MANDLOCK;
+               sb->s_flags &= ~SB_MANDLOCK;
 }
 
 static void
@@ -1041,14 +1035,6 @@ zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
                        zfsvfs->z_xattr_sa = B_TRUE;
        }
 
-       error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
-           &zfsvfs->z_attr_table);
-       if (error != 0)
-               return (error);
-
-       if (zfsvfs->z_version >= ZPL_VERSION_SA)
-               sa_register_update_callback(os, zfs_sa_upgrade);
-
        error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
            &zfsvfs->z_root);
        if (error != 0)
@@ -1122,6 +1108,14 @@ zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
        else if (error != 0)
                return (error);
 
+       error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
+           &zfsvfs->z_attr_table);
+       if (error != 0)
+               return (error);
+
+       if (zfsvfs->z_version >= ZPL_VERSION_SA)
+               sa_register_update_callback(os, zfs_sa_upgrade);
+
        return (0);
 }
 
@@ -1148,6 +1142,11 @@ zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
        return (error);
 }
 
+
+/*
+ * Note: zfsvfs is assumed to be malloc'd, and will be freed by this function
+ * on a failure.  Do not pass in a statically allocated zfsvfs.
+ */
 int
 zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
 {
@@ -1180,10 +1179,14 @@ zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
        error = zfsvfs_init(zfsvfs, os);
        if (error != 0) {
                *zfvp = NULL;
-               kmem_free(zfsvfs, sizeof (zfsvfs_t));
+               zfsvfs_free(zfsvfs);
                return (error);
        }
 
+       zfsvfs->z_drain_task = TASKQID_INVALID;
+       zfsvfs->z_draining = B_FALSE;
+       zfsvfs->z_drain_cancel = B_TRUE;
+
        *zfvp = zfsvfs;
        return (0);
 }
@@ -1206,14 +1209,27 @@ zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
         * operations out since we closed the ZIL.
         */
        if (mounting) {
+               ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
+               dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
+
                /*
                 * During replay we remove the read only flag to
                 * allow replays to succeed.
                 */
-               if (readonly != 0)
+               if (readonly != 0) {
                        readonly_changed_cb(zfsvfs, B_FALSE);
-               else
+               } else {
+                       zap_stats_t zs;
+                       if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
+                           &zs) == 0) {
+                               dataset_kstats_update_nunlinks_kstat(
+                                   &zfsvfs->z_kstat, zs.zs_num_entries);
+                       }
+                       dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
+                           "num_entries in unlinked set: %llu",
+                           zs.zs_num_entries);
                        zfs_unlinked_drain(zfsvfs);
+               }
 
                /*
                 * Parse and replay the intent log.
@@ -1288,6 +1304,7 @@ zfsvfs_free(zfsvfs_t *zfsvfs)
        vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
        vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
        zfsvfs_vfs_free(zfsvfs->z_vfs);
+       dataset_kstats_destroy(&zfsvfs->z_kstat);
        kmem_free(zfsvfs, sizeof (zfsvfs_t));
 }
 
@@ -1418,8 +1435,6 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
 {
        zfsvfs_t *zfsvfs = dentry->d_sb->s_fs_info;
        uint64_t refdbytes, availbytes, usedobjs, availobjs;
-       uint64_t fsid;
-       uint32_t bshift;
        int err = 0;
 
        ZFS_ENTER(zfsvfs);
@@ -1427,7 +1442,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
        dmu_objset_space(zfsvfs->z_os,
            &refdbytes, &availbytes, &usedobjs, &availobjs);
 
-       fsid = dmu_objset_fsid_guid(zfsvfs->z_os);
+       uint64_t fsid = dmu_objset_fsid_guid(zfsvfs->z_os);
        /*
         * The underlying storage pool actually uses multiple block
         * size.  Under Solaris frsize (fragment size) is reported as
@@ -1439,7 +1454,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
         */
        statp->f_frsize = zfsvfs->z_max_blksz;
        statp->f_bsize = zfsvfs->z_max_blksz;
-       bshift = fls(statp->f_bsize) - 1;
+       uint32_t bshift = fls(statp->f_bsize) - 1;
 
        /*
         * The following report "total" blocks of various kinds in
@@ -1447,6 +1462,8 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
         * "preferred" size.
         */
 
+       /* Round up so we never have a filesytem using 0 blocks. */
+       refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize);
        statp->f_blocks = (refdbytes + availbytes) >> bshift;
        statp->f_bfree = availbytes >> bshift;
        statp->f_bavail = statp->f_bfree; /* no root reservation */
@@ -1456,7 +1473,7 @@ zfs_statvfs(struct dentry *dentry, struct kstatfs *statp)
         * static metadata.  ZFS doesn't preallocate files, so the best
         * we can do is report the max that could possibly fit in f_files,
         * and that minus the number actually used in f_ffree.
-        * For f_ffree, report the smaller of the number of object available
+        * For f_ffree, report the smaller of the number of objects available
         * and the number of blocks (each object will take at least a block).
         */
        statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT);
@@ -1635,6 +1652,8 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
 {
        znode_t *zp;
 
+       zfs_unlinked_drain_stop_wait(zfsvfs);
+
        /*
         * If someone has not already unmounted this file system,
         * drain the iput_taskq to ensure all active references to the
@@ -1741,10 +1760,10 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
        zfs_unregister_callbacks(zfsvfs);
 
        /*
-        * Evict cached data
+        * Evict cached data. We must write out any dirty data before
+        * disowning the dataset.
         */
-       if (dsl_dataset_is_dirty(dmu_objset_ds(zfsvfs->z_os)) &&
-           !zfs_is_readonly(zfsvfs))
+       if (!zfs_is_readonly(zfsvfs))
                txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
        dmu_objset_evict_dbufs(zfsvfs->z_os);
 
@@ -1886,6 +1905,7 @@ zfs_preumount(struct super_block *sb)
 
        /* zfsvfs is NULL when zfs_domount fails during mount */
        if (zfsvfs) {
+               zfs_unlinked_drain_stop_wait(zfsvfs);
                zfsctl_destroy(sb->s_fs_info);
                /*
                 * Wait for iput_async before entering evict_inodes in
@@ -1957,8 +1977,8 @@ zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm)
        int error;
 
        if ((issnap || !spa_writeable(dmu_objset_spa(zfsvfs->z_os))) &&
-           !(*flags & MS_RDONLY)) {
-               *flags |= MS_RDONLY;
+           !(*flags & SB_RDONLY)) {
+               *flags |= SB_RDONLY;
                return (EROFS);
        }
 
@@ -1966,6 +1986,9 @@ zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm)
        if (error)
                return (error);
 
+       if (!zfs_is_readonly(zfsvfs) && (*flags & SB_RDONLY))
+               txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
+
        zfs_unregister_callbacks(zfsvfs);
        zfsvfs_vfs_free(zfsvfs->z_vfs);
 
@@ -2158,6 +2181,15 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
        }
        mutex_exit(&zfsvfs->z_znodes_lock);
 
+       if (!zfs_is_readonly(zfsvfs) && !zfsvfs->z_unmounted) {
+               /*
+                * zfs_suspend_fs() could have interrupted freeing
+                * of dnodes. We need to restart this freeing so
+                * that we don't "leak" the space.
+                */
+               zfs_unlinked_drain(zfsvfs);
+       }
+
 bail:
        /* release the VFS ops */
        rw_exit(&zfsvfs->z_teardown_inactive_lock);