X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=kernel%2Fcpu.c;h=c408d0a735ed1b907f916517e15590913a56af32;hb=96573ebb6848afc59697409a8c80cef27ac0f995;hp=384afdfec59890095430ee885228e1baf541f9b8;hpb=3762ba906e57545b5baa04e91a2b427705f46cca;p=mirror_ubuntu-bionic-kernel.git diff --git a/kernel/cpu.c b/kernel/cpu.c index 384afdfec598..c408d0a735ed 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -313,6 +314,15 @@ void cpus_write_unlock(void) void lockdep_assert_cpus_held(void) { + /* + * We can't have hotplug operations before userspace starts running, + * and some init codepaths will knowingly not take the hotplug lock. + * This is all valid, so mute lockdep until it makes sense to report + * unheld locks. + */ + if (system_state < SYSTEM_RUNNING) + return; + percpu_rwsem_assert_held(&cpu_hotplug_lock); } @@ -347,16 +357,43 @@ void cpu_hotplug_enable(void) EXPORT_SYMBOL_GPL(cpu_hotplug_enable); #endif /* CONFIG_HOTPLUG_CPU */ +/* + * Architectures that need SMT-specific errata handling during SMT hotplug + * should override this. + */ +void __weak arch_smt_update(void) { } + #ifdef CONFIG_HOTPLUG_SMT enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; -static int __init smt_cmdline_disable(char *str) +void __init cpu_smt_disable(bool force) { - cpu_smt_control = CPU_SMT_DISABLED; - if (str && !strcmp(str, "force")) { + if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || + cpu_smt_control == CPU_SMT_NOT_SUPPORTED) + return; + + if (force) { pr_info("SMT: Force disabled\n"); cpu_smt_control = CPU_SMT_FORCE_DISABLED; + } else { + pr_info("SMT: disabled\n"); + cpu_smt_control = CPU_SMT_DISABLED; } +} + +/* + * The decision whether SMT is supported can only be done after the full + * CPU identification. Called from architecture code. + */ +void __init cpu_smt_check_topology(void) +{ + if (!topology_smt_supported()) + cpu_smt_control = CPU_SMT_NOT_SUPPORTED; +} + +static int __init smt_cmdline_disable(char *str) +{ + cpu_smt_disable(str && !strcmp(str, "force")); return 0; } early_param("nosmt", smt_cmdline_disable); @@ -511,6 +548,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) } } +static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st) +{ + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) + return true; + /* + * When CPU hotplug is disabled, then taking the CPU down is not + * possible because takedown_cpu() and the architecture and + * subsystem specific mechanisms are not available. So the CPU + * which would be completely unplugged again needs to stay around + * in the current state. + */ + return st->state <= CPUHP_BRINGUP_CPU; +} + static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) { @@ -521,8 +572,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, st->state++; ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); if (ret) { - st->target = prev_state; - undo_cpu_up(cpu, st); + if (can_rollback_cpu(st)) { + st->target = prev_state; + undo_cpu_up(cpu, st); + } break; } } @@ -567,15 +620,15 @@ static void cpuhp_thread_fun(unsigned int cpu) bool bringup = st->bringup; enum cpuhp_state state; + if (WARN_ON_ONCE(!st->should_run)) + return; + /* * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures * that if we see ->should_run we also see the rest of the state. */ smp_mb(); - if (WARN_ON_ONCE(!st->should_run)) - return; - cpuhp_lock_acquire(bringup); if (st->single) { @@ -887,7 +940,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); if (ret) { st->target = prev_state; - undo_cpu_down(cpu, st); + if (st->state < prev_state) + undo_cpu_down(cpu, st); break; } } @@ -940,7 +994,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, * to do the further cleanups. */ ret = cpuhp_down_callbacks(cpu, st, target); - if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { + if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) { cpuhp_reset_state(st, prev_state); __cpuhp_kick_ap(st); } @@ -952,6 +1006,7 @@ out: * concurrent CPU hotplug via cpu_add_remove_lock. */ lockup_detector_cleanup(); + arch_smt_update(); return ret; } @@ -1080,6 +1135,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) ret = cpuhp_up_callbacks(cpu, st, target); out: cpus_write_unlock(); + arch_smt_update(); return ret; } @@ -1889,6 +1945,9 @@ static ssize_t write_cpuhp_fail(struct device *dev, if (ret) return ret; + if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE) + return -EINVAL; + /* * Cannot fail STARTING/DYING callbacks. */ @@ -1990,7 +2049,16 @@ static void cpuhp_offline_cpu_device(unsigned int cpu) kobject_uevent(&dev->kobj, KOBJ_OFFLINE); } -static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) +static void cpuhp_online_cpu_device(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + + dev->offline = false; + /* Tell user space about the state change */ + kobject_uevent(&dev->kobj, KOBJ_ONLINE); +} + +int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { int cpu, ret = 0; @@ -2016,17 +2084,33 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) */ cpuhp_offline_cpu_device(cpu); } - if (!ret) + if (!ret) { cpu_smt_control = ctrlval; + arch_smt_update(); + } cpu_maps_update_done(); return ret; } -static void cpuhp_smt_enable(void) +int cpuhp_smt_enable(void) { + int cpu, ret = 0; + cpu_maps_update_begin(); cpu_smt_control = CPU_SMT_ENABLED; + arch_smt_update(); + for_each_present_cpu(cpu) { + /* Skip online CPUs and CPUs on offline nodes */ + if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) + continue; + ret = _cpu_up(cpu, 0, CPUHP_ONLINE); + if (ret) + break; + /* See comment in cpuhp_smt_disable() */ + cpuhp_online_cpu_device(cpu); + } cpu_maps_update_done(); + return ret; } static ssize_t @@ -2057,7 +2141,7 @@ store_smt_control(struct device *dev, struct device_attribute *attr, if (ctrlval != cpu_smt_control) { switch (ctrlval) { case CPU_SMT_ENABLED: - cpuhp_smt_enable(); + ret = cpuhp_smt_enable(); break; case CPU_SMT_DISABLED: case CPU_SMT_FORCE_DISABLED: @@ -2094,9 +2178,6 @@ static const struct attribute_group cpuhp_smt_attr_group = { static int __init cpu_smt_state_init(void) { - if (!topology_smt_supported()) - cpu_smt_control = CPU_SMT_NOT_SUPPORTED; - return sysfs_create_group(&cpu_subsys.dev_root->kobj, &cpuhp_smt_attr_group); } @@ -2213,8 +2294,53 @@ void __init boot_cpu_init(void) /* * Must be called _AFTER_ setting up the per_cpu areas */ -void __init boot_cpu_state_init(void) +void __init boot_cpu_hotplug_init(void) +{ +#ifdef CONFIG_SMP + per_cpu_ptr(&cpuhp_state, smp_processor_id())->booted_once = true; +#endif + per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE; +} + +/* + * These are used for a global "mitigations=" cmdline option for toggling + * optional CPU mitigations. + */ +enum cpu_mitigations { + CPU_MITIGATIONS_OFF, + CPU_MITIGATIONS_AUTO, + CPU_MITIGATIONS_AUTO_NOSMT, +}; + +static enum cpu_mitigations cpu_mitigations __ro_after_init = + CPU_MITIGATIONS_AUTO; + +static int __init mitigations_parse_cmdline(char *arg) +{ + if (!strcmp(arg, "off")) + cpu_mitigations = CPU_MITIGATIONS_OFF; + else if (!strcmp(arg, "auto")) + cpu_mitigations = CPU_MITIGATIONS_AUTO; + else if (!strcmp(arg, "auto,nosmt")) + cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT; + else + pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n", + arg); + + return 0; +} +early_param("mitigations", mitigations_parse_cmdline); + +/* mitigations=off */ +bool cpu_mitigations_off(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_OFF; +} +EXPORT_SYMBOL_GPL(cpu_mitigations_off); + +/* mitigations=auto,nosmt */ +bool cpu_mitigations_auto_nosmt(void) { - this_cpu_write(cpuhp_state.booted_once, true); - this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); + return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; } +EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);