]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - fs/exec.c
KVM: arm64: vgic-v3: Log which GICv3 system registers are trapped
[mirror_ubuntu-zesty-kernel.git] / fs / exec.c
index 5b6383208379ec3930423809b02b803e4f0234b4..8cf76e2a0b83e2777f01f51a0a89efd68e4b7127 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -205,7 +205,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 
 #ifdef CONFIG_STACK_GROWSUP
        if (write) {
-               ret = expand_downwards(bprm->vma, pos, 0);
+               ret = expand_downwards(bprm->vma, pos);
                if (ret < 0)
                        return NULL;
        }
@@ -225,14 +225,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 
        if (write) {
                unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+               unsigned long ptr_size;
                struct rlimit *rlim;
 
                /*
-                * GRWOSUP doesn't really have any gap at this stage because we grow
-                * the stack down now. See the expand_downwards above.
+                * Since the stack will hold pointers to the strings, we
+                * must account for them as well.
+                *
+                * The size calculation is the entire vma while each arg page is
+                * built, so each time we get here it's calculating how far it
+                * is currently (rather than each call being just the newly
+                * added size from the arg page).  As a result, we need to
+                * always add the entire size of the pointers, so that on the
+                * last call to get_arg_page() we'll actually have the entire
+                * correct size.
                 */
-               if (!IS_ENABLED(CONFIG_STACK_GROWSUP))
-                       size -= stack_guard_gap;
+               ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+               if (ptr_size > ULONG_MAX - size)
+                       goto fail;
+               size += ptr_size;
+
                acct_arg_size(bprm, size / PAGE_SIZE);
 
                /*
@@ -250,13 +262,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                 *    to work from.
                 */
                rlim = current->signal->rlim;
-               if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
-                       put_page(page);
-                       return NULL;
-               }
+               if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+                       goto fail;
        }
 
        return page;
+
+fail:
+       put_page(page);
+       return NULL;
 }
 
 static void put_arg_page(struct page *page)
@@ -1443,6 +1457,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
 {
        struct task_struct *p = current, *t;
        unsigned n_fs;
+       bool fs_recheck;
 
        if (p->ptrace) {
                if (ptracer_capable(p, current_user_ns()))
@@ -1458,6 +1473,8 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
        if (task_no_new_privs(current))
                bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
 
+recheck:
+       fs_recheck = false;
        t = p;
        n_fs = 1;
        spin_lock(&p->fs->lock);
@@ -1465,12 +1482,18 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
        while_each_thread(p, t) {
                if (t->fs == p->fs)
                        n_fs++;
+               if (t->flags & (PF_EXITING | PF_FORKNOEXEC))
+                       fs_recheck  = true;
        }
        rcu_read_unlock();
 
-       if (p->fs->users > n_fs)
+       if (p->fs->users > n_fs) {
+               if (fs_recheck) {
+                       spin_unlock(&p->fs->lock);
+                       goto recheck;
+               }
                bprm->unsafe |= LSM_UNSAFE_SHARE;
-       else
+       else
                p->fs->in_exec = 1;
        spin_unlock(&p->fs->lock);
 }