]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - kernel/fork.c
ebtables: remove nf_hook_register usage
[mirror_ubuntu-artful-kernel.git] / kernel / fork.c
index 246bf9aaf9dfddf4632f6994ab6e2bcdd8a02434..6c463c80e93de8c3be3180f3cbd8694b955a1ac3 100644 (file)
  */
 
 #include <linux/slab.h>
+#include <linux/sched/autogroup.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/coredump.h>
+#include <linux/sched/user.h>
+#include <linux/sched/numa_balancing.h>
+#include <linux/sched/stat.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sched/cputime.h>
+#include <linux/rtmutex.h>
 #include <linux/init.h>
 #include <linux/unistd.h>
 #include <linux/module.h>
@@ -1455,6 +1465,21 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
         task->pids[type].pid = pid;
 }
 
+static inline void rcu_copy_process(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RCU
+       p->rcu_read_lock_nesting = 0;
+       p->rcu_read_unlock_special.s = 0;
+       p->rcu_blocked_node = NULL;
+       INIT_LIST_HEAD(&p->rcu_node_entry);
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TASKS_RCU
+       p->rcu_tasks_holdout = false;
+       INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
+       p->rcu_tasks_idle_cpu = -1;
+#endif /* #ifdef CONFIG_TASKS_RCU */
+}
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -1746,7 +1771,7 @@ static __latent_entropy struct task_struct *copy_process(
        INIT_LIST_HEAD(&p->thread_group);
        p->task_works = NULL;
 
-       threadgroup_change_begin(current);
+       cgroup_threadgroup_change_begin(current);
        /*
         * Ensure that the cgroup subsystem policies allow the new process to be
         * forked. It should be noted the the new process's css_set can be changed
@@ -1843,7 +1868,7 @@ static __latent_entropy struct task_struct *copy_process(
 
        proc_fork_connector(p);
        cgroup_post_fork(p);
-       threadgroup_change_end(current);
+       cgroup_threadgroup_change_end(current);
        perf_event_fork(p);
 
        trace_task_newtask(p, clone_flags);
@@ -1854,7 +1879,7 @@ static __latent_entropy struct task_struct *copy_process(
 bad_fork_cancel_cgroup:
        cgroup_cancel_fork(p);
 bad_fork_free_pid:
-       threadgroup_change_end(current);
+       cgroup_threadgroup_change_end(current);
        if (pid != &init_struct_pid)
                free_pid(pid);
 bad_fork_cleanup_thread: