]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - fs/ext4/super.c
Merge uncontroversial parts of branch 'readlink' of git://git.kernel.org/pub/scm...
[mirror_ubuntu-artful-kernel.git] / fs / ext4 / super.c
index caa4147cda47b599e84a301f0acc13aa006f519c..63a6b6332682b8865576b7554c63e23d3e2841ae 100644 (file)
@@ -863,7 +863,6 @@ static void ext4_put_super(struct super_block *sb)
        percpu_counter_destroy(&sbi->s_dirs_counter);
        percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
        percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
-       brelse(sbi->s_sbh);
 #ifdef CONFIG_QUOTA
        for (i = 0; i < EXT4_MAXQUOTAS; i++)
                kfree(sbi->s_qf_names[i]);
@@ -895,6 +894,7 @@ static void ext4_put_super(struct super_block *sb)
        }
        if (sbi->s_mmp_tsk)
                kthread_stop(sbi->s_mmp_tsk);
+       brelse(sbi->s_sbh);
        sb->s_fs_info = NULL;
        /*
         * Now that we are completely done shutting down the
@@ -1114,37 +1114,55 @@ static int ext4_prepare_context(struct inode *inode)
 static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
                                                        void *fs_data)
 {
-       handle_t *handle;
-       int res, res2;
+       handle_t *handle = fs_data;
+       int res, res2, retries = 0;
+
+       /*
+        * If a journal handle was specified, then the encryption context is
+        * being set on a new inode via inheritance and is part of a larger
+        * transaction to create the inode.  Otherwise the encryption context is
+        * being set on an existing inode in its own transaction.  Only in the
+        * latter case should the "retry on ENOSPC" logic be used.
+        */
 
-       /* fs_data is null when internally used. */
-       if (fs_data) {
-               res  = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
-                               EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
-                               len, 0);
+       if (handle) {
+               res = ext4_xattr_set_handle(handle, inode,
+                                           EXT4_XATTR_INDEX_ENCRYPTION,
+                                           EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+                                           ctx, len, 0);
                if (!res) {
                        ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
                        ext4_clear_inode_state(inode,
                                        EXT4_STATE_MAY_INLINE_DATA);
+                       /*
+                        * Update inode->i_flags - e.g. S_DAX may get disabled
+                        */
+                       ext4_set_inode_flags(inode);
                }
                return res;
        }
 
+retry:
        handle = ext4_journal_start(inode, EXT4_HT_MISC,
                        ext4_jbd2_credits_xattr(inode));
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
-       res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
-                       EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
-                       len, 0);
+       res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
+                                   EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+                                   ctx, len, 0);
        if (!res) {
                ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+               /* Update inode->i_flags - e.g. S_DAX may get disabled */
+               ext4_set_inode_flags(inode);
                res = ext4_mark_inode_dirty(handle, inode);
                if (res)
                        EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
        }
        res2 = ext4_journal_stop(handle);
+
+       if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
        if (!res)
                res = res2;
        return res;
@@ -1187,7 +1205,7 @@ static int ext4_release_dquot(struct dquot *dquot);
 static int ext4_mark_dquot_dirty(struct dquot *dquot);
 static int ext4_write_info(struct super_block *sb, int type);
 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
-                        struct path *path);
+                        const struct path *path);
 static int ext4_quota_off(struct super_block *sb, int type);
 static int ext4_quota_on_mount(struct super_block *sb, int type);
 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
@@ -1883,12 +1901,6 @@ static int parse_options(char *options, struct super_block *sb,
                        return 0;
                }
        }
-       if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
-           test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
-               ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit "
-                        "in data=ordered mode");
-               return 0;
-       }
        return 1;
 }
 
@@ -2330,7 +2342,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                                struct ext4_super_block *es)
 {
        unsigned int s_flags = sb->s_flags;
-       int nr_orphans = 0, nr_truncates = 0;
+       int ret, nr_orphans = 0, nr_truncates = 0;
 #ifdef CONFIG_QUOTA
        int i;
 #endif
@@ -2412,7 +2424,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
                                  inode->i_ino, inode->i_size);
                        inode_lock(inode);
                        truncate_inode_pages(inode->i_mapping, inode->i_size);
-                       ext4_truncate(inode);
+                       ret = ext4_truncate(inode);
+                       if (ret)
+                               ext4_std_error(inode->i_sb, ret);
                        inode_unlock(inode);
                        nr_truncates++;
                } else {
@@ -3193,10 +3207,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
                        ext4_set_bit(s++, buf);
                        count++;
                }
-               for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
-                       ext4_set_bit(EXT4_B2C(sbi, s++), buf);
-                       count++;
+               j = ext4_bg_num_gdb(sb, grp);
+               if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
+                       ext4_error(sb, "Invalid number of block group "
+                                  "descriptor blocks: %d", j);
+                       j = EXT4_BLOCKS_PER_GROUP(sb) - s;
                }
+               count += j;
+               for (; j > 0; j--)
+                       ext4_set_bit(EXT4_B2C(sbi, s++), buf);
        }
        if (!count)
                return 0;
@@ -3301,7 +3320,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        char *orig_data = kstrdup(data, GFP_KERNEL);
        struct buffer_head *bh;
        struct ext4_super_block *es = NULL;
-       struct ext4_sb_info *sbi;
+       struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
        ext4_fsblk_t block;
        ext4_fsblk_t sb_block = get_sb_block(&data);
        ext4_fsblk_t logical_sb_block;
@@ -3320,16 +3339,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
        ext4_group_t first_not_zeroed;
 
-       sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
-       if (!sbi)
-               goto out_free_orig;
+       if ((data && !orig_data) || !sbi)
+               goto out_free_base;
 
        sbi->s_blockgroup_lock =
                kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
-       if (!sbi->s_blockgroup_lock) {
-               kfree(sbi);
-               goto out_free_orig;
-       }
+       if (!sbi->s_blockgroup_lock)
+               goto out_free_base;
+
        sb->s_fs_info = sbi;
        sbi->s_sb = sb;
        sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
@@ -3475,11 +3492,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
         */
        sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
 
-       if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
-                          &journal_devnum, &journal_ioprio, 0)) {
-               ext4_msg(sb, KERN_WARNING,
-                        "failed to parse options in superblock: %s",
-                        sbi->s_es->s_mount_opts);
+       if (sbi->s_es->s_mount_opts[0]) {
+               char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+                                             sizeof(sbi->s_es->s_mount_opts),
+                                             GFP_KERNEL);
+               if (!s_mount_opts)
+                       goto failed_mount;
+               if (!parse_options(s_mount_opts, sb, &journal_devnum,
+                                  &journal_ioprio, 0)) {
+                       ext4_msg(sb, KERN_WARNING,
+                                "failed to parse options in superblock: %s",
+                                s_mount_opts);
+               }
+               kfree(s_mount_opts);
        }
        sbi->s_def_mount_opt = sbi->s_mount_opt;
        if (!parse_options((char *) data, sb, &journal_devnum,
@@ -3505,6 +3530,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                                 "both data=journal and dax");
                        goto failed_mount;
                }
+               if (ext4_has_feature_encrypt(sb)) {
+                       ext4_msg(sb, KERN_WARNING,
+                                "encrypted files will use data=ordered "
+                                "instead of data journaling mode");
+               }
                if (test_opt(sb, DELALLOC))
                        clear_opt(sb, DELALLOC);
        } else {
@@ -3660,12 +3690,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 
        sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
        sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
-       if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
-               goto cantfind_ext4;
 
        sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
        if (sbi->s_inodes_per_block == 0)
                goto cantfind_ext4;
+       if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+           sbi->s_inodes_per_group > blocksize * 8) {
+               ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
+                        sbi->s_blocks_per_group);
+               goto failed_mount;
+       }
        sbi->s_itb_per_group = sbi->s_inodes_per_group /
                                        sbi->s_inodes_per_block;
        sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
@@ -3748,13 +3782,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
        sbi->s_cluster_ratio = clustersize / blocksize;
 
-       if (sbi->s_inodes_per_group > blocksize * 8) {
-               ext4_msg(sb, KERN_ERR,
-                      "#inodes per group too big: %lu",
-                      sbi->s_inodes_per_group);
-               goto failed_mount;
-       }
-
        /* Do we have standard group size of clustersize * 8 blocks ? */
        if (sbi->s_blocks_per_group == clustersize << 3)
                set_opt2(sb, STD_GROUP_SIZE);
@@ -3814,6 +3841,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
        db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
                   EXT4_DESC_PER_BLOCK(sb);
+       if (ext4_has_feature_meta_bg(sb)) {
+               if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
+                       ext4_msg(sb, KERN_WARNING,
+                                "first meta block group too large: %u "
+                                "(group descriptor block count %u)",
+                                le32_to_cpu(es->s_first_meta_bg), db_count);
+                       goto failed_mount;
+               }
+       }
        sbi->s_group_desc = ext4_kvmalloc(db_count *
                                          sizeof(struct buffer_head *),
                                          GFP_KERNEL);
@@ -3967,6 +4003,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        default:
                break;
        }
+
+       if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
+           test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+               ext4_msg(sb, KERN_ERR, "can't mount with "
+                       "journal_async_commit in data=ordered mode");
+               goto failed_mount_wq;
+       }
+
        set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
 
        sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
@@ -4160,7 +4204,9 @@ no_journal:
 
        if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
                ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
-                        "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+                        "Opts: %.*s%s%s", descr,
+                        (int) sizeof(sbi->s_es->s_mount_opts),
+                        sbi->s_es->s_mount_opts,
                         *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
 
        if (es->s_error_count)
@@ -4239,8 +4285,8 @@ failed_mount:
 out_fail:
        sb->s_fs_info = NULL;
        kfree(sbi->s_blockgroup_lock);
+out_free_base:
        kfree(sbi);
-out_free_orig:
        kfree(orig_data);
        return err ? err : ret;
 }
@@ -4550,7 +4596,8 @@ static int ext4_commit_super(struct super_block *sb, int sync)
                                &EXT4_SB(sb)->s_freeinodes_counter));
        BUFFER_TRACE(sbh, "marking dirty");
        ext4_superblock_csum_set(sb);
-       lock_buffer(sbh);
+       if (sync)
+               lock_buffer(sbh);
        if (buffer_write_io_error(sbh)) {
                /*
                 * Oh, dear.  A previous attempt to write the
@@ -4566,8 +4613,8 @@ static int ext4_commit_super(struct super_block *sb, int sync)
                set_buffer_uptodate(sbh);
        }
        mark_buffer_dirty(sbh);
-       unlock_buffer(sbh);
        if (sync) {
+               unlock_buffer(sbh);
                error = __sync_dirty_buffer(sbh,
                        test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC);
                if (error)
@@ -4857,6 +4904,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                        err = -EINVAL;
                        goto restore_opts;
                }
+       } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
+               if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+                       ext4_msg(sb, KERN_ERR, "can't mount with "
+                               "journal_async_commit in data=ordered mode");
+                       err = -EINVAL;
+                       goto restore_opts;
+               }
        }
 
        if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
@@ -5239,7 +5293,7 @@ static void lockdep_set_quota_inode(struct inode *inode, int subclass)
  * Standard function to be called on quota_on
  */
 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
-                        struct path *path)
+                        const struct path *path)
 {
        int err;
 
@@ -5366,7 +5420,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
        handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
        if (IS_ERR(handle))
                goto out;
-       inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       inode->i_mtime = inode->i_ctime = current_time(inode);
        ext4_mark_inode_dirty(handle, inode);
        ext4_journal_stop(handle);