]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/shmem.c
mac80211_hwsim: Fix possible Spectre-v1 for hwsim_world_regdom_custom
[mirror_ubuntu-bionic-kernel.git] / mm / shmem.c
index 7fbe67be86fa816b13d06603c098ccf10cca4fe2..b6d517099fc40220612d4719890470dd8a857c0c 100644 (file)
@@ -111,9 +111,13 @@ static unsigned long shmem_default_max_blocks(void)
        return totalram_pages / 2;
 }
 
-static unsigned long shmem_default_max_inodes(void)
+static int shmem_default_max_inodes(void)
 {
-       return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
+       unsigned long ul;
+
+       ul = INT_MAX;
+       ul = min3(ul, totalram_pages - totalhigh_pages, totalram_pages / 2);
+       return ul;
 }
 #endif
 
@@ -493,36 +497,45 @@ next:
                info = list_entry(pos, struct shmem_inode_info, shrinklist);
                inode = &info->vfs_inode;
 
-               if (nr_to_split && split >= nr_to_split) {
-                       iput(inode);
-                       continue;
-               }
+               if (nr_to_split && split >= nr_to_split)
+                       goto leave;
 
-               page = find_lock_page(inode->i_mapping,
+               page = find_get_page(inode->i_mapping,
                                (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
                if (!page)
                        goto drop;
 
+               /* No huge page at the end of the file: nothing to split */
                if (!PageTransHuge(page)) {
-                       unlock_page(page);
                        put_page(page);
                        goto drop;
                }
 
+               /*
+                * Leave the inode on the list if we failed to lock
+                * the page at this time.
+                *
+                * Waiting for the lock may lead to deadlock in the
+                * reclaim path.
+                */
+               if (!trylock_page(page)) {
+                       put_page(page);
+                       goto leave;
+               }
+
                ret = split_huge_page(page);
                unlock_page(page);
                put_page(page);
 
-               if (ret) {
-                       /* split failed: leave it on the list */
-                       iput(inode);
-                       continue;
-               }
+               /* If split failed leave the inode on the list */
+               if (ret)
+                       goto leave;
 
                split++;
 drop:
                list_del_init(&info->shrinklist);
                removed++;
+leave:
                iput(inode);
        }
 
@@ -1082,6 +1095,11 @@ static void shmem_evict_inode(struct inode *inode)
 
        simple_xattrs_free(&info->xattrs);
        WARN_ON(inode->i_blocks);
+       if (!sbinfo->idr_nouse && inode->i_ino) {
+               mutex_lock(&sbinfo->idr_lock);
+               idr_remove(&sbinfo->idr, inode->i_ino);
+               mutex_unlock(&sbinfo->idr_lock);
+       }
        shmem_free_inode(inode->i_sb);
        clear_inode(inode);
 }
@@ -2150,13 +2168,13 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
        struct inode *inode;
        struct shmem_inode_info *info;
        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+       int ino;
 
        if (shmem_reserve_inode(sb))
                return NULL;
 
        inode = new_inode(sb);
        if (inode) {
-               inode->i_ino = get_next_ino();
                inode_init_owner(inode, dir, mode);
                inode->i_blocks = 0;
                inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
@@ -2198,6 +2216,25 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                        mpol_shared_policy_init(&info->policy, NULL);
                        break;
                }
+
+               if (!sbinfo->idr_nouse) {
+                       /* inum 0 and 1 are unused */
+                       mutex_lock(&sbinfo->idr_lock);
+                       ino = idr_alloc(&sbinfo->idr, inode, 2, INT_MAX,
+                                       GFP_NOFS);
+                       if (ino > 0) {
+                               inode->i_ino = ino;
+                               mutex_unlock(&sbinfo->idr_lock);
+                               __insert_inode_hash(inode, inode->i_ino);
+                       } else {
+                               inode->i_ino = 0;
+                               mutex_unlock(&sbinfo->idr_lock);
+                               iput(inode);
+                               /* shmem_free_inode() will be called */
+                               inode = NULL;
+                       }
+               } else
+                       inode->i_ino = get_next_ino();
        } else
                shmem_free_inode(sb);
        return inode;
@@ -2227,6 +2264,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
        struct page *page;
        pte_t _dst_pte, *dst_pte;
        int ret;
+       pgoff_t offset, max_off;
 
        ret = -ENOMEM;
        if (!shmem_inode_acct_block(inode, 1))
@@ -2249,7 +2287,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
                                *pagep = page;
                                shmem_inode_unacct_blocks(inode, 1);
                                /* don't free the page */
-                               return -EFAULT;
+                               return -ENOENT;
                        }
                } else {                /* mfill_zeropage_atomic */
                        clear_highpage(page);
@@ -2264,6 +2302,12 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
        __SetPageSwapBacked(page);
        __SetPageUptodate(page);
 
+       ret = -EFAULT;
+       offset = linear_page_index(dst_vma, dst_addr);
+       max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+       if (unlikely(offset >= max_off))
+               goto out_release;
+
        ret = mem_cgroup_try_charge(page, dst_mm, gfp, &memcg, false);
        if (ret)
                goto out_release;
@@ -2281,9 +2325,25 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
        _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
        if (dst_vma->vm_flags & VM_WRITE)
                _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
+       else {
+               /*
+                * We don't set the pte dirty if the vma has no
+                * VM_WRITE permission, so mark the page dirty or it
+                * could be freed from under us. We could do it
+                * unconditionally before unlock_page(), but doing it
+                * only if VM_WRITE is not set is faster.
+                */
+               set_page_dirty(page);
+       }
 
-       ret = -EEXIST;
        dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+
+       ret = -EFAULT;
+       max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+       if (unlikely(offset >= max_off))
+               goto out_release_uncharge_unlock;
+
+       ret = -EEXIST;
        if (!pte_none(*dst_pte))
                goto out_release_uncharge_unlock;
 
@@ -2301,13 +2361,15 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
 
        /* No need to invalidate - it was non-present before */
        update_mmu_cache(dst_vma, dst_addr, dst_pte);
-       unlock_page(page);
        pte_unmap_unlock(dst_pte, ptl);
+       unlock_page(page);
        ret = 0;
 out:
        return ret;
 out_release_uncharge_unlock:
        pte_unmap_unlock(dst_pte, ptl);
+       ClearPageDirty(page);
+       delete_from_page_cache(page);
 out_release_uncharge:
        mem_cgroup_cancel_charge(page, memcg, false);
 out_release:
@@ -3397,8 +3459,7 @@ static struct dentry *shmem_get_parent(struct dentry *child)
 static int shmem_match(struct inode *ino, void *vfh)
 {
        __u32 *fh = vfh;
-       __u64 inum = fh[2];
-       inum = (inum << 32) | fh[1];
+       __u64 inum = fh[1];
        return ino->i_ino == inum && fh[0] == ino->i_generation;
 }
 
@@ -3409,14 +3470,11 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
        struct dentry *dentry = NULL;
        u64 inum;
 
-       if (fh_len < 3)
+       if (fh_len < 2)
                return NULL;
 
-       inum = fid->raw[2];
-       inum = (inum << 32) | fid->raw[1];
-
-       inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
-                       shmem_match, fid->raw);
+       inum = fid->raw[1];
+       inode = ilookup5(sb, inum, shmem_match, fid->raw);
        if (inode) {
                dentry = d_find_alias(inode);
                iput(inode);
@@ -3428,30 +3486,15 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
                                struct inode *parent)
 {
-       if (*len < 3) {
-               *len = 3;
+       if (*len < 2) {
+               *len = 2;
                return FILEID_INVALID;
        }
 
-       if (inode_unhashed(inode)) {
-               /* Unfortunately insert_inode_hash is not idempotent,
-                * so as we hash inodes here rather than at creation
-                * time, we need a lock to ensure we only try
-                * to do it once
-                */
-               static DEFINE_SPINLOCK(lock);
-               spin_lock(&lock);
-               if (inode_unhashed(inode))
-                       __insert_inode_hash(inode,
-                                           inode->i_ino + inode->i_generation);
-               spin_unlock(&lock);
-       }
-
        fh[0] = inode->i_generation;
        fh[1] = inode->i_ino;
-       fh[2] = ((__u64)inode->i_ino) >> 32;
 
-       *len = 3;
+       *len = 2;
        return 1;
 }
 
@@ -3515,7 +3558,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
                                goto bad_val;
                } else if (!strcmp(this_char,"nr_inodes")) {
                        sbinfo->max_inodes = memparse(value, &rest);
-                       if (*rest)
+                       if (*rest || sbinfo->max_inodes < 2)
                                goto bad_val;
                } else if (!strcmp(this_char,"mode")) {
                        if (remount)
@@ -3580,7 +3623,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
 {
        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
        struct shmem_sb_info config = *sbinfo;
-       unsigned long inodes;
+       int inodes;
        int error = -EINVAL;
 
        config.mpol = NULL;
@@ -3629,7 +3672,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
                seq_printf(seq, ",size=%luk",
                        sbinfo->max_blocks << (PAGE_SHIFT - 10));
        if (sbinfo->max_inodes != shmem_default_max_inodes())
-               seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
+               seq_printf(seq, ",nr_inodes=%d", sbinfo->max_inodes);
        if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
                seq_printf(seq, ",mode=%03ho", sbinfo->mode);
        if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
@@ -3747,6 +3790,8 @@ static void shmem_put_super(struct super_block *sb)
 {
        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
 
+       if (!sbinfo->idr_nouse)
+               idr_destroy(&sbinfo->idr);
        percpu_counter_destroy(&sbinfo->used_blocks);
        mpol_put(sbinfo->mpol);
        kfree(sbinfo);
@@ -3765,6 +3810,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
        if (!sbinfo)
                return -ENOMEM;
 
+       mutex_init(&sbinfo->idr_lock);
+       idr_init(&sbinfo->idr);
        sbinfo->mode = S_IRWXUGO | S_ISVTX;
        sbinfo->uid = current_fsuid();
        sbinfo->gid = current_fsgid();
@@ -3872,6 +3919,15 @@ static void shmem_destroy_inodecache(void)
        kmem_cache_destroy(shmem_inode_cachep);
 }
 
+static __init void shmem_no_idr(struct super_block *sb)
+{
+       struct shmem_sb_info *sbinfo;
+
+       sbinfo = SHMEM_SB(sb);
+       sbinfo->idr_nouse = true;
+       idr_destroy(&sbinfo->idr);
+}
+
 static const struct address_space_operations shmem_aops = {
        .writepage      = shmem_writepage,
        .set_page_dirty = __set_page_dirty_no_writeback,
@@ -4002,6 +4058,7 @@ int __init shmem_init(void)
                pr_err("Could not kern_mount tmpfs\n");
                goto out1;
        }
+       shmem_no_idr(shm_mnt->mnt_sb);
 
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
        if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)