]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - include/linux/sched.h
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / include / linux / sched.h
index d2588263a9893caa04d8854607207c41080927cc..d7d6ec85a419dbf96b29d95bf603de7918ff421b 100644 (file)
@@ -113,17 +113,36 @@ struct task_group;
 
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 
+/*
+ * Special states are those that do not use the normal wait-loop pattern. See
+ * the comment with set_special_state().
+ */
+#define is_special_task_state(state)                           \
+       ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+
 #define __set_current_state(state_value)                       \
        do {                                                    \
+               WARN_ON_ONCE(is_special_task_state(state_value));\
                current->task_state_change = _THIS_IP_;         \
                current->state = (state_value);                 \
        } while (0)
+
 #define set_current_state(state_value)                         \
        do {                                                    \
+               WARN_ON_ONCE(is_special_task_state(state_value));\
                current->task_state_change = _THIS_IP_;         \
                smp_store_mb(current->state, (state_value));    \
        } while (0)
 
+#define set_special_state(state_value)                                 \
+       do {                                                            \
+               unsigned long flags; /* may shadow */                   \
+               WARN_ON_ONCE(!is_special_task_state(state_value));      \
+               raw_spin_lock_irqsave(&current->pi_lock, flags);        \
+               current->task_state_change = _THIS_IP_;                 \
+               current->state = (state_value);                         \
+               raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
+       } while (0)
 #else
 /*
  * set_current_state() includes a barrier so that the write of current->state
@@ -145,8 +164,8 @@ struct task_group;
  *
  * The above is typically ordered against the wakeup, which does:
  *
- *     need_sleep = false;
- *     wake_up_state(p, TASK_UNINTERRUPTIBLE);
+ *   need_sleep = false;
+ *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
  *
  * Where wake_up_state() (and all other wakeup primitives) imply enough
  * barriers to order the store of the variable against wakeup.
@@ -155,12 +174,33 @@ struct task_group;
  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
  *
- * This is obviously fine, since they both store the exact same value.
+ * However, with slightly different timing the wakeup TASK_RUNNING store can
+ * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
+ * a problem either because that will result in one extra go around the loop
+ * and our @cond test will save the day.
  *
  * Also see the comments of try_to_wake_up().
  */
-#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
-#define set_current_state(state_value)  smp_store_mb(current->state, (state_value))
+#define __set_current_state(state_value)                               \
+       current->state = (state_value)
+
+#define set_current_state(state_value)                                 \
+       smp_store_mb(current->state, (state_value))
+
+/*
+ * set_special_state() should be used for those states when the blocking task
+ * can not use the regular condition based wait-loop. In that case we must
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
+ * will not collide with our state change.
+ */
+#define set_special_state(state_value)                                 \
+       do {                                                            \
+               unsigned long flags; /* may shadow */                   \
+               raw_spin_lock_irqsave(&current->pi_lock, flags);        \
+               current->state = (state_value);                         \
+               raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
+       } while (0)
+
 #endif
 
 /* Task command name length: */
@@ -909,6 +949,8 @@ struct task_struct {
 #endif
        struct list_head                pi_state_list;
        struct futex_pi_state           *pi_state_cache;
+       struct mutex                    futex_exit_mutex;
+       unsigned int                    futex_state;
 #endif
 #ifdef CONFIG_PERF_EVENTS
        struct perf_event_context       *perf_event_ctxp[perf_nr_task_contexts];
@@ -1011,6 +1053,7 @@ struct task_struct {
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        /* Index of current stored address in ret_stack: */
        int                             curr_ret_stack;
+       int                             curr_ret_depth;
 
        /* Stack of return addresses for return function tracing: */
        struct ftrace_ret_stack         *ret_stack;
@@ -1284,7 +1327,6 @@ extern struct pid *cad_pid;
  */
 #define PF_IDLE                        0x00000002      /* I am an IDLE thread */
 #define PF_EXITING             0x00000004      /* Getting shut down */
-#define PF_EXITPIDONE          0x00000008      /* PI exit done on shut down */
 #define PF_VCPU                        0x00000010      /* I'm a virtual CPU */
 #define PF_WQ_WORKER           0x00000020      /* I'm a workqueue worker */
 #define PF_FORKNOEXEC          0x00000040      /* Forked but didn't exec */
@@ -1353,7 +1395,10 @@ static inline bool is_percpu_thread(void)
 #define PFA_NO_NEW_PRIVS               0       /* May not gain new privileges. */
 #define PFA_SPREAD_PAGE                        1       /* Spread page cache over cpuset */
 #define PFA_SPREAD_SLAB                        2       /* Spread some slab caches over cpuset */
-
+#define PFA_SPEC_SSB_DISABLE           3       /* Speculative Store Bypass disabled */
+#define PFA_SPEC_SSB_FORCE_DISABLE     4       /* Speculative Store Bypass force disabled*/
+#define PFA_SPEC_IB_DISABLE            5       /* Indirect branch speculation restricted */
+#define PFA_SPEC_IB_FORCE_DISABLE      6       /* Indirect branch speculation permanently restricted */
 
 #define TASK_PFA_TEST(name, func)                                      \
        static inline bool task_##func(struct task_struct *p)           \
@@ -1378,6 +1423,20 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
 
+TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
+
+TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+
+TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+
 static inline void
 current_restore_flags(unsigned long orig_flags, unsigned long flags)
 {
@@ -1621,9 +1680,9 @@ static __always_inline bool need_resched(void)
 static inline unsigned int task_cpu(const struct task_struct *p)
 {
 #ifdef CONFIG_THREAD_INFO_IN_TASK
-       return p->cpu;
+       return READ_ONCE(p->cpu);
 #else
-       return task_thread_info(p)->cpu;
+       return READ_ONCE(task_thread_info(p)->cpu);
 #endif
 }