]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - kernel/fork.c
ebtables: remove nf_hook_register usage
[mirror_ubuntu-artful-kernel.git] / kernel / fork.c
index ff82e24573b6d07e7ccaf37d1d765311d48a1398..6c463c80e93de8c3be3180f3cbd8694b955a1ac3 100644 (file)
  */
 
 #include <linux/slab.h>
+#include <linux/sched/autogroup.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/coredump.h>
+#include <linux/sched/user.h>
+#include <linux/sched/numa_balancing.h>
+#include <linux/sched/stat.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sched/cputime.h>
+#include <linux/rtmutex.h>
 #include <linux/init.h>
 #include <linux/unistd.h>
 #include <linux/module.h>
@@ -55,6 +65,7 @@
 #include <linux/rmap.h>
 #include <linux/ksm.h>
 #include <linux/acct.h>
+#include <linux/userfaultfd_k.h>
 #include <linux/tsacct_kern.h>
 #include <linux/cn_proc.h>
 #include <linux/freezer.h>
@@ -561,6 +572,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
        struct rb_node **rb_link, *rb_parent;
        int retval;
        unsigned long charge;
+       LIST_HEAD(uf);
 
        uprobe_start_dup_mmap();
        if (down_write_killable(&oldmm->mmap_sem)) {
@@ -617,12 +629,13 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                if (retval)
                        goto fail_nomem_policy;
                tmp->vm_mm = mm;
+               retval = dup_userfaultfd(tmp, &uf);
+               if (retval)
+                       goto fail_nomem_anon_vma_fork;
                if (anon_vma_fork(tmp, mpnt))
                        goto fail_nomem_anon_vma_fork;
-               tmp->vm_flags &=
-                       ~(VM_LOCKED|VM_LOCKONFAULT|VM_UFFD_MISSING|VM_UFFD_WP);
+               tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
                tmp->vm_next = tmp->vm_prev = NULL;
-               tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
                file = tmp->vm_file;
                if (file) {
                        struct inode *inode = file_inode(file);
@@ -678,6 +691,7 @@ out:
        up_write(&mm->mmap_sem);
        flush_tlb_mm(oldmm);
        up_write(&oldmm->mmap_sem);
+       dup_userfaultfd_complete(&uf);
 fail_uprobe_end:
        uprobe_end_dup_mmap();
        return retval;
@@ -996,7 +1010,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
                if (task->flags & PF_KTHREAD)
                        mm = NULL;
                else
-                       atomic_inc(&mm->mm_users);
+                       mmget(mm);
        }
        task_unlock(task);
        return mm;
@@ -1184,7 +1198,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
        vmacache_flush(tsk);
 
        if (clone_flags & CLONE_VM) {
-               atomic_inc(&oldmm->mm_users);
+               mmget(oldmm);
                mm = oldmm;
                goto good_mm;
        }
@@ -1373,9 +1387,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        sig->oom_score_adj = current->signal->oom_score_adj;
        sig->oom_score_adj_min = current->signal->oom_score_adj_min;
 
-       sig->has_child_subreaper = current->signal->has_child_subreaper ||
-                                  current->signal->is_child_subreaper;
-
        mutex_init(&sig->cred_guard_mutex);
 
        return 0;
@@ -1454,6 +1465,21 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
         task->pids[type].pid = pid;
 }
 
+static inline void rcu_copy_process(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RCU
+       p->rcu_read_lock_nesting = 0;
+       p->rcu_read_unlock_special.s = 0;
+       p->rcu_blocked_node = NULL;
+       INIT_LIST_HEAD(&p->rcu_node_entry);
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TASKS_RCU
+       p->rcu_tasks_holdout = false;
+       INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
+       p->rcu_tasks_idle_cpu = -1;
+#endif /* #ifdef CONFIG_TASKS_RCU */
+}
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -1745,7 +1771,7 @@ static __latent_entropy struct task_struct *copy_process(
        INIT_LIST_HEAD(&p->thread_group);
        p->task_works = NULL;
 
-       threadgroup_change_begin(current);
+       cgroup_threadgroup_change_begin(current);
        /*
         * Ensure that the cgroup subsystem policies allow the new process to be
         * forked. It should be noted the the new process's css_set can be changed
@@ -1810,6 +1836,13 @@ static __latent_entropy struct task_struct *copy_process(
 
                        p->signal->leader_pid = pid;
                        p->signal->tty = tty_kref_get(current->signal->tty);
+                       /*
+                        * Inherit has_child_subreaper flag under the same
+                        * tasklist_lock with adding child to the process tree
+                        * for propagate_has_child_subreaper optimization.
+                        */
+                       p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
+                                                        p->real_parent->signal->is_child_subreaper;
                        list_add_tail(&p->sibling, &p->real_parent->children);
                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
                        attach_pid(p, PIDTYPE_PGID);
@@ -1835,7 +1868,7 @@ static __latent_entropy struct task_struct *copy_process(
 
        proc_fork_connector(p);
        cgroup_post_fork(p);
-       threadgroup_change_end(current);
+       cgroup_threadgroup_change_end(current);
        perf_event_fork(p);
 
        trace_task_newtask(p, clone_flags);
@@ -1846,7 +1879,7 @@ static __latent_entropy struct task_struct *copy_process(
 bad_fork_cancel_cgroup:
        cgroup_cancel_fork(p);
 bad_fork_free_pid:
-       threadgroup_change_end(current);
+       cgroup_threadgroup_change_end(current);
        if (pid != &init_struct_pid)
                free_pid(pid);
 bad_fork_cleanup_thread:
@@ -2063,6 +2096,38 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
 }
 #endif
 
+void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
+{
+       struct task_struct *leader, *parent, *child;
+       int res;
+
+       read_lock(&tasklist_lock);
+       leader = top = top->group_leader;
+down:
+       for_each_thread(leader, parent) {
+               list_for_each_entry(child, &parent->children, sibling) {
+                       res = visitor(child, data);
+                       if (res) {
+                               if (res < 0)
+                                       goto out;
+                               leader = child;
+                               goto down;
+                       }
+up:
+                       ;
+               }
+       }
+
+       if (leader != top) {
+               child = leader;
+               parent = child->real_parent;
+               leader = parent->group_leader;
+               goto up;
+       }
+out:
+       read_unlock(&tasklist_lock);
+}
+
 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
 #define ARCH_MIN_MMSTRUCT_ALIGN 0
 #endif