]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
sched/headers, cgroups: Remove the threadgroup_change_*() wrappery
authorIngo Molnar <mingo@kernel.org>
Thu, 2 Feb 2017 10:50:56 +0000 (11:50 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 2 Mar 2017 07:42:25 +0000 (08:42 +0100)
threadgroup_change_begin()/end() is a pointless wrapper around
cgroup_threadgroup_change_begin()/end(), minus a might_sleep()
in the !CONFIG_CGROUPS=y case.

Remove the wrappery, move the might_sleep() (the down_read()
already has a might_sleep() check).

This debloats <linux/sched.h> a bit and simplifies this API.

Update all call sites.

No change in functionality.

Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
fs/exec.c
include/linux/cgroup-defs.h
include/linux/sched.h
kernel/cgroup/pids.c
kernel/fork.c
kernel/signal.c

index 698a86094f7672550340c2e4ca20cdf97e8ee0e2..e595e1529581344907dfae0ed9468c76fe0692a2 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1088,7 +1088,7 @@ static int de_thread(struct task_struct *tsk)
                struct task_struct *leader = tsk->group_leader;
 
                for (;;) {
-                       threadgroup_change_begin(tsk);
+                       cgroup_threadgroup_change_begin(tsk);
                        write_lock_irq(&tasklist_lock);
                        /*
                         * Do this under tasklist_lock to ensure that
@@ -1099,7 +1099,7 @@ static int de_thread(struct task_struct *tsk)
                                break;
                        __set_current_state(TASK_KILLABLE);
                        write_unlock_irq(&tasklist_lock);
-                       threadgroup_change_end(tsk);
+                       cgroup_threadgroup_change_end(tsk);
                        schedule();
                        if (unlikely(__fatal_signal_pending(tsk)))
                                goto killed;
@@ -1157,7 +1157,7 @@ static int de_thread(struct task_struct *tsk)
                if (unlikely(leader->ptrace))
                        __wake_up_parent(leader, leader->parent);
                write_unlock_irq(&tasklist_lock);
-               threadgroup_change_end(tsk);
+               cgroup_threadgroup_change_end(tsk);
 
                release_task(leader);
        }
index 3c02404cfce9b239ab527d9748d092a4cc98cfb1..6a3f850cababb6f96130503014d83d256f2b9213 100644 (file)
@@ -531,8 +531,8 @@ extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
  * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
  * @tsk: target task
  *
- * Called from threadgroup_change_begin() and allows cgroup operations to
- * synchronize against threadgroup changes using a percpu_rw_semaphore.
+ * Allows cgroup operations to synchronize against threadgroup changes
+ * using a percpu_rw_semaphore.
  */
 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
 {
@@ -543,8 +543,7 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
  * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
  * @tsk: target task
  *
- * Called from threadgroup_change_end().  Counterpart of
- * cgroup_threadcgroup_change_begin().
+ * Counterpart of cgroup_threadcgroup_change_begin().
  */
 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
 {
@@ -555,7 +554,11 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
 
 #define CGROUP_SUBSYS_COUNT 0
 
-static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
+static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
+{
+       might_sleep();
+}
+
 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
 
 #endif /* CONFIG_CGROUPS */
index e732881517f29176f34f0a744d0ce41e80963450..3f61baac928b036c488070ebe9d39c60b8ce5cd0 100644 (file)
@@ -3162,34 +3162,6 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
        spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
 }
 
-/**
- * threadgroup_change_begin - mark the beginning of changes to a threadgroup
- * @tsk: task causing the changes
- *
- * All operations which modify a threadgroup - a new thread joining the
- * group, death of a member thread (the assertion of PF_EXITING) and
- * exec(2) dethreading the process and replacing the leader - are wrapped
- * by threadgroup_change_{begin|end}().  This is to provide a place which
- * subsystems needing threadgroup stability can hook into for
- * synchronization.
- */
-static inline void threadgroup_change_begin(struct task_struct *tsk)
-{
-       might_sleep();
-       cgroup_threadgroup_change_begin(tsk);
-}
-
-/**
- * threadgroup_change_end - mark the end of changes to a threadgroup
- * @tsk: task causing the changes
- *
- * See threadgroup_change_begin().
- */
-static inline void threadgroup_change_end(struct task_struct *tsk)
-{
-       cgroup_threadgroup_change_end(tsk);
-}
-
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 
 static inline struct thread_info *task_thread_info(struct task_struct *task)
index 2bd673783f1a9520bddd689da959217d77797ce4..e756dae493008e4bc4bf9f87846f818d7349ede8 100644 (file)
@@ -214,7 +214,7 @@ static void pids_cancel_attach(struct cgroup_taskset *tset)
 
 /*
  * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
- * on threadgroup_change_begin() held by the copy_process().
+ * on cgroup_threadgroup_change_begin() held by the copy_process().
  */
 static int pids_can_fork(struct task_struct *task)
 {
index 246bf9aaf9dfddf4632f6994ab6e2bcdd8a02434..d043fedc03c81977c942d8b0fbcf3404edf98a83 100644 (file)
@@ -1746,7 +1746,7 @@ static __latent_entropy struct task_struct *copy_process(
        INIT_LIST_HEAD(&p->thread_group);
        p->task_works = NULL;
 
-       threadgroup_change_begin(current);
+       cgroup_threadgroup_change_begin(current);
        /*
         * Ensure that the cgroup subsystem policies allow the new process to be
         * forked. It should be noted the the new process's css_set can be changed
@@ -1843,7 +1843,7 @@ static __latent_entropy struct task_struct *copy_process(
 
        proc_fork_connector(p);
        cgroup_post_fork(p);
-       threadgroup_change_end(current);
+       cgroup_threadgroup_change_end(current);
        perf_event_fork(p);
 
        trace_task_newtask(p, clone_flags);
@@ -1854,7 +1854,7 @@ static __latent_entropy struct task_struct *copy_process(
 bad_fork_cancel_cgroup:
        cgroup_cancel_fork(p);
 bad_fork_free_pid:
-       threadgroup_change_end(current);
+       cgroup_threadgroup_change_end(current);
        if (pid != &init_struct_pid)
                free_pid(pid);
 bad_fork_cleanup_thread:
index 214a8feeb77124c69b976d3021044953d212fd8b..bae358532d0aee309e1a7a83b1fabd0386f6e5f7 100644 (file)
@@ -2395,11 +2395,11 @@ void exit_signals(struct task_struct *tsk)
         * @tsk is about to have PF_EXITING set - lock out users which
         * expect stable threadgroup.
         */
-       threadgroup_change_begin(tsk);
+       cgroup_threadgroup_change_begin(tsk);
 
        if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
                tsk->flags |= PF_EXITING;
-               threadgroup_change_end(tsk);
+               cgroup_threadgroup_change_end(tsk);
                return;
        }
 
@@ -2410,7 +2410,7 @@ void exit_signals(struct task_struct *tsk)
         */
        tsk->flags |= PF_EXITING;
 
-       threadgroup_change_end(tsk);
+       cgroup_threadgroup_change_end(tsk);
 
        if (!signal_pending(tsk))
                goto out;