#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+/*
+ * Special states are those that do not use the normal wait-loop pattern. See
+ * the comment with set_special_state().
+ */
+#define is_special_task_state(state) \
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+
#define __set_current_state(state_value) \
do { \
+ WARN_ON_ONCE(is_special_task_state(state_value));\
current->task_state_change = _THIS_IP_; \
current->state = (state_value); \
} while (0)
+
#define set_current_state(state_value) \
do { \
+ WARN_ON_ONCE(is_special_task_state(state_value));\
current->task_state_change = _THIS_IP_; \
smp_store_mb(current->state, (state_value)); \
} while (0)
+#define set_special_state(state_value) \
+ do { \
+ unsigned long flags; /* may shadow */ \
+ WARN_ON_ONCE(!is_special_task_state(state_value)); \
+ raw_spin_lock_irqsave(¤t->pi_lock, flags); \
+ current->task_state_change = _THIS_IP_; \
+ current->state = (state_value); \
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
+ } while (0)
#else
/*
* set_current_state() includes a barrier so that the write of current->state
*
* The above is typically ordered against the wakeup, which does:
*
- * need_sleep = false;
- * wake_up_state(p, TASK_UNINTERRUPTIBLE);
+ * need_sleep = false;
+ * wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
* Where wake_up_state() (and all other wakeup primitives) imply enough
* barriers to order the store of the variable against wakeup.
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
- * This is obviously fine, since they both store the exact same value.
+ * However, with slightly different timing the wakeup TASK_RUNNING store can
+ * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
+ * a problem either because that will result in one extra go around the loop
+ * and our @cond test will save the day.
*
* Also see the comments of try_to_wake_up().
*/
-#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
-#define set_current_state(state_value) smp_store_mb(current->state, (state_value))
+#define __set_current_state(state_value) \
+ current->state = (state_value)
+
+#define set_current_state(state_value) \
+ smp_store_mb(current->state, (state_value))
+
+/*
+ * set_special_state() should be used for those states when the blocking task
+ * can not use the regular condition based wait-loop. In that case we must
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
+ * will not collide with our state change.
+ */
+#define set_special_state(state_value) \
+ do { \
+ unsigned long flags; /* may shadow */ \
+ raw_spin_lock_irqsave(¤t->pi_lock, flags); \
+ current->state = (state_value); \
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
+ } while (0)
+
#endif
/* Task command name length: */
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
+ struct mutex futex_exit_mutex;
+ unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
+ int curr_ret_depth;
/* Stack of return addresses for return function tracing: */
struct ftrace_ret_stack *ret_stack;
*/
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
-#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
-
+#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
+#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
+#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
+#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
+
+TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+
+TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
static inline unsigned int task_cpu(const struct task_struct *p)
{
#ifdef CONFIG_THREAD_INFO_IN_TASK
- return p->cpu;
+ return READ_ONCE(p->cpu);
#else
- return task_thread_info(p)->cpu;
+ return READ_ONCE(task_thread_info(p)->cpu);
#endif
}