]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - kernel/cpu.c
futex: Replace PF_EXITPIDONE with a state
[mirror_ubuntu-bionic-kernel.git] / kernel / cpu.c
index 9dd365e4bfed77fee1b51b710c5af7e6c6370d7c..c408d0a735ed1b907f916517e15590913a56af32 100644 (file)
@@ -314,6 +314,15 @@ void cpus_write_unlock(void)
 
 void lockdep_assert_cpus_held(void)
 {
+       /*
+        * We can't have hotplug operations before userspace starts running,
+        * and some init codepaths will knowingly not take the hotplug lock.
+        * This is all valid, so mute lockdep until it makes sense to report
+        * unheld locks.
+        */
+       if (system_state < SYSTEM_RUNNING)
+               return;
+
        percpu_rwsem_assert_held(&cpu_hotplug_lock);
 }
 
@@ -356,9 +365,6 @@ void __weak arch_smt_update(void) { }
 
 #ifdef CONFIG_HOTPLUG_SMT
 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
-EXPORT_SYMBOL_GPL(cpu_smt_control);
-
-static bool cpu_smt_available __read_mostly;
 
 void __init cpu_smt_disable(bool force)
 {
@@ -370,31 +376,18 @@ void __init cpu_smt_disable(bool force)
                pr_info("SMT: Force disabled\n");
                cpu_smt_control = CPU_SMT_FORCE_DISABLED;
        } else {
+               pr_info("SMT: disabled\n");
                cpu_smt_control = CPU_SMT_DISABLED;
        }
 }
 
 /*
  * The decision whether SMT is supported can only be done after the full
- * CPU identification. Called from architecture code before non boot CPUs
- * are brought up.
- */
-void __init cpu_smt_check_topology_early(void)
-{
-       if (!topology_smt_supported())
-               cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
-}
-
-/*
- * If SMT was disabled by BIOS, detect it here, after the CPUs have been
- * brought online. This ensures the smt/l1tf sysfs entries are consistent
- * with reality. cpu_smt_available is set to true during the bringup of non
- * boot CPUs when a SMT sibling is detected. Note, this may overwrite
- * cpu_smt_control's previous setting.
+ * CPU identification. Called from architecture code.
  */
 void __init cpu_smt_check_topology(void)
 {
-       if (!cpu_smt_available)
+       if (!topology_smt_supported())
                cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
 }
 
@@ -407,18 +400,10 @@ early_param("nosmt", smt_cmdline_disable);
 
 static inline bool cpu_smt_allowed(unsigned int cpu)
 {
-       if (topology_is_primary_thread(cpu))
+       if (cpu_smt_control == CPU_SMT_ENABLED)
                return true;
 
-       /*
-        * If the CPU is not a 'primary' thread and the booted_once bit is
-        * set then the processor has SMT support. Store this information
-        * for the late check of SMT support in cpu_smt_check_topology().
-        */
-       if (per_cpu(cpuhp_state, cpu).booted_once)
-               cpu_smt_available = true;
-
-       if (cpu_smt_control == CPU_SMT_ENABLED)
+       if (topology_is_primary_thread(cpu))
                return true;
 
        /*
@@ -563,6 +548,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
        }
 }
 
+static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
+{
+       if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+               return true;
+       /*
+        * When CPU hotplug is disabled, then taking the CPU down is not
+        * possible because takedown_cpu() and the architecture and
+        * subsystem specific mechanisms are not available. So the CPU
+        * which would be completely unplugged again needs to stay around
+        * in the current state.
+        */
+       return st->state <= CPUHP_BRINGUP_CPU;
+}
+
 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                              enum cpuhp_state target)
 {
@@ -573,8 +572,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                st->state++;
                ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
                if (ret) {
-                       st->target = prev_state;
-                       undo_cpu_up(cpu, st);
+                       if (can_rollback_cpu(st)) {
+                               st->target = prev_state;
+                               undo_cpu_up(cpu, st);
+                       }
                        break;
                }
        }
@@ -619,15 +620,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
        bool bringup = st->bringup;
        enum cpuhp_state state;
 
+       if (WARN_ON_ONCE(!st->should_run))
+               return;
+
        /*
         * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
         * that if we see ->should_run we also see the rest of the state.
         */
        smp_mb();
 
-       if (WARN_ON_ONCE(!st->should_run))
-               return;
-
        cpuhp_lock_acquire(bringup);
 
        if (st->single) {
@@ -939,7 +940,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
                if (ret) {
                        st->target = prev_state;
-                       undo_cpu_down(cpu, st);
+                       if (st->state < prev_state)
+                               undo_cpu_down(cpu, st);
                        break;
                }
        }
@@ -992,7 +994,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
         * to do the further cleanups.
         */
        ret = cpuhp_down_callbacks(cpu, st, target);
-       if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+       if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
                cpuhp_reset_state(st, prev_state);
                __cpuhp_kick_ap(st);
        }
@@ -1943,6 +1945,9 @@ static ssize_t write_cpuhp_fail(struct device *dev,
        if (ret)
                return ret;
 
+       if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
+               return -EINVAL;
+
        /*
         * Cannot fail STARTING/DYING callbacks.
         */
@@ -2053,7 +2058,7 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
        kobject_uevent(&dev->kobj, KOBJ_ONLINE);
 }
 
-static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
 {
        int cpu, ret = 0;
 
@@ -2087,7 +2092,7 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
        return ret;
 }
 
-static int cpuhp_smt_enable(void)
+int cpuhp_smt_enable(void)
 {
        int cpu, ret = 0;
 
@@ -2297,7 +2302,18 @@ void __init boot_cpu_hotplug_init(void)
        per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
 }
 
-enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
+/*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+ */
+enum cpu_mitigations {
+       CPU_MITIGATIONS_OFF,
+       CPU_MITIGATIONS_AUTO,
+       CPU_MITIGATIONS_AUTO_NOSMT,
+};
+
+static enum cpu_mitigations cpu_mitigations __ro_after_init =
+       CPU_MITIGATIONS_AUTO;
 
 static int __init mitigations_parse_cmdline(char *arg)
 {
@@ -2307,7 +2323,24 @@ static int __init mitigations_parse_cmdline(char *arg)
                cpu_mitigations = CPU_MITIGATIONS_AUTO;
        else if (!strcmp(arg, "auto,nosmt"))
                cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
+       else
+               pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
+                       arg);
 
        return 0;
 }
 early_param("mitigations", mitigations_parse_cmdline);
+
+/* mitigations=off */
+bool cpu_mitigations_off(void)
+{
+       return cpu_mitigations == CPU_MITIGATIONS_OFF;
+}
+EXPORT_SYMBOL_GPL(cpu_mitigations_off);
+
+/* mitigations=auto,nosmt */
+bool cpu_mitigations_auto_nosmt(void)
+{
+       return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+}
+EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);