#include <linux/compat.h>
#include <linux/vmalloc.h>
-#include <asm/uaccess.h>
+#include <trace/events/fs.h>
+
+#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
return (path->mnt->mnt_flags & MNT_NOEXEC) ||
(path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
}
+EXPORT_SYMBOL_GPL(path_noexec);
+
+bool path_nosuid(const struct path *path)
+{
+ return !mnt_may_suid(path->mnt) ||
+ (path->mnt->mnt_sb->s_iflags & SB_I_NOSUID);
+}
+EXPORT_SYMBOL(path_nosuid);
#ifdef CONFIG_USELIB
/*
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+ unsigned long ptr_size;
struct rlimit *rlim;
+ /*
+ * Since the stack will hold pointers to the strings, we
+ * must account for them as well.
+ *
+ * The size calculation is the entire vma while each arg page is
+ * built, so each time we get here it's calculating how far it
+ * is currently (rather than each call being just the newly
+ * added size from the arg page). As a result, we need to
+ * always add the entire size of the pointers, so that on the
+ * last call to get_arg_page() we'll actually have the entire
+ * correct size.
+ */
+ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+ if (ptr_size > ULONG_MAX - size)
+ goto fail;
+ size += ptr_size;
+
acct_arg_size(bprm, size / PAGE_SIZE);
/*
* to work from.
*/
rlim = current->signal->rlim;
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
- put_page(page);
- return NULL;
- }
+ if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+ goto fail;
}
return page;
+
+fail:
+ put_page(page);
+ return NULL;
}
static void put_arg_page(struct page *page)
if (name->name[0] != '\0')
fsnotify_open(file);
+ trace_open_exec(name->name);
+
out:
return file;
{
struct task_struct *p = current, *t;
unsigned n_fs;
+ bool fs_recheck;
if (p->ptrace) {
if (ptracer_capable(p, current_user_ns()))
if (task_no_new_privs(current))
bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
+recheck:
+ fs_recheck = false;
t = p;
n_fs = 1;
spin_lock(&p->fs->lock);
while_each_thread(p, t) {
if (t->fs == p->fs)
n_fs++;
+ if (t->flags & (PF_EXITING | PF_FORKNOEXEC))
+ fs_recheck = true;
}
rcu_read_unlock();
- if (p->fs->users > n_fs)
+ if (p->fs->users > n_fs) {
+ if (fs_recheck) {
+ spin_unlock(&p->fs->lock);
+ goto recheck;
+ }
bprm->unsafe |= LSM_UNSAFE_SHARE;
- else
+ } else
p->fs->in_exec = 1;
spin_unlock(&p->fs->lock);
}
bprm->cred->euid = current_euid();
bprm->cred->egid = current_egid();
- if (!mnt_may_suid(bprm->file->f_path.mnt))
+ if (path_nosuid(&bprm->file->f_path))
return;
if (task_no_new_privs(current))