]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - kernel/cpu.c
cdrom: Fix info leak/OOB read in cdrom_ioctl_drive_status
[mirror_ubuntu-bionic-kernel.git] / kernel / cpu.c
index 53f7dc65f9a3b917c1c347834257cf26521eff95..f760063fb1647b73ccd91635d2b9dc3c1fb9578f 100644 (file)
@@ -60,6 +60,7 @@ struct cpuhp_cpu_state {
        bool                    rollback;
        bool                    single;
        bool                    bringup;
+       bool                    booted_once;
        struct hlist_node       *node;
        struct hlist_node       *last;
        enum cpuhp_state        cb_state;
@@ -346,6 +347,85 @@ void cpu_hotplug_enable(void)
 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 #endif /* CONFIG_HOTPLUG_CPU */
 
+#ifdef CONFIG_HOTPLUG_SMT
+enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+EXPORT_SYMBOL_GPL(cpu_smt_control);
+
+static bool cpu_smt_available __read_mostly;
+
+void __init cpu_smt_disable(bool force)
+{
+       if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
+               cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+               return;
+
+       if (force) {
+               pr_info("SMT: Force disabled\n");
+               cpu_smt_control = CPU_SMT_FORCE_DISABLED;
+       } else {
+               cpu_smt_control = CPU_SMT_DISABLED;
+       }
+}
+
+/*
+ * The decision whether SMT is supported can only be done after the full
+ * CPU identification. Called from architecture code before non boot CPUs
+ * are brought up.
+ */
+void __init cpu_smt_check_topology_early(void)
+{
+       if (!topology_smt_supported())
+               cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+}
+
+/*
+ * If SMT was disabled by BIOS, detect it here, after the CPUs have been
+ * brought online. This ensures the smt/l1tf sysfs entries are consistent
+ * with reality. cpu_smt_available is set to true during the bringup of non
+ * boot CPUs when a SMT sibling is detected. Note, this may overwrite
+ * cpu_smt_control's previous setting.
+ */
+void __init cpu_smt_check_topology(void)
+{
+       if (!cpu_smt_available)
+               cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+}
+
+static int __init smt_cmdline_disable(char *str)
+{
+       cpu_smt_disable(str && !strcmp(str, "force"));
+       return 0;
+}
+early_param("nosmt", smt_cmdline_disable);
+
+static inline bool cpu_smt_allowed(unsigned int cpu)
+{
+       if (topology_is_primary_thread(cpu))
+               return true;
+
+       /*
+        * If the CPU is not a 'primary' thread and the booted_once bit is
+        * set then the processor has SMT support. Store this information
+        * for the late check of SMT support in cpu_smt_check_topology().
+        */
+       if (per_cpu(cpuhp_state, cpu).booted_once)
+               cpu_smt_available = true;
+
+       if (cpu_smt_control == CPU_SMT_ENABLED)
+               return true;
+
+       /*
+        * On x86 it's required to boot all logical CPUs at least once so
+        * that the init code can get a chance to set CR4.MCE on each
+        * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
+        * core will shutdown the machine.
+        */
+       return !per_cpu(cpuhp_state, cpu).booted_once;
+}
+#else
+static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
+#endif
+
 static inline enum cpuhp_state
 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
 {
@@ -426,6 +506,16 @@ static int bringup_wait_for_ap(unsigned int cpu)
        stop_machine_unpark(cpu);
        kthread_unpark(st->thread);
 
+       /*
+        * SMT soft disabling on X86 requires to bring the CPU out of the
+        * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
+        * CPU marked itself as booted_once in cpu_notify_starting() so the
+        * cpu_smt_allowed() check will now return false if this is not the
+        * primary sibling.
+        */
+       if (!cpu_smt_allowed(cpu))
+               return -ECANCELED;
+
        if (st->target <= CPUHP_AP_ONLINE_IDLE)
                return 0;
 
@@ -758,7 +848,6 @@ static int takedown_cpu(unsigned int cpu)
 
        /* Park the smpboot threads */
        kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
-       smpboot_park_threads(cpu);
 
        /*
         * Prevent irq alloc/free while the dying cpu reorganizes the
@@ -911,20 +1000,19 @@ out:
        return ret;
 }
 
+static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+{
+       if (cpu_hotplug_disabled)
+               return -EBUSY;
+       return _cpu_down(cpu, 0, target);
+}
+
 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
 {
        int err;
 
        cpu_maps_update_begin();
-
-       if (cpu_hotplug_disabled) {
-               err = -EBUSY;
-               goto out;
-       }
-
-       err = _cpu_down(cpu, 0, target);
-
-out:
+       err = cpu_down_maps_locked(cpu, target);
        cpu_maps_update_done();
        return err;
 }
@@ -953,6 +1041,7 @@ void notify_cpu_starting(unsigned int cpu)
        int ret;
 
        rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
+       st->booted_once = true;
        while (st->state < target) {
                st->state++;
                ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
@@ -1062,6 +1151,10 @@ static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
                err = -EBUSY;
                goto out;
        }
+       if (!cpu_smt_allowed(cpu)) {
+               err = -EPERM;
+               goto out;
+       }
 
        err = _cpu_up(cpu, 0, target);
 out:
@@ -1344,7 +1437,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
        [CPUHP_AP_SMPBOOT_THREADS] = {
                .name                   = "smpboot/threads:online",
                .startup.single         = smpboot_unpark_threads,
-               .teardown.single        = NULL,
+               .teardown.single        = smpboot_park_threads,
        },
        [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
                .name                   = "irq/affinity:online",
@@ -1918,10 +2011,172 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {
        NULL
 };
 
+#ifdef CONFIG_HOTPLUG_SMT
+
+static const char *smt_states[] = {
+       [CPU_SMT_ENABLED]               = "on",
+       [CPU_SMT_DISABLED]              = "off",
+       [CPU_SMT_FORCE_DISABLED]        = "forceoff",
+       [CPU_SMT_NOT_SUPPORTED]         = "notsupported",
+};
+
+static ssize_t
+show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
+}
+
+static void cpuhp_offline_cpu_device(unsigned int cpu)
+{
+       struct device *dev = get_cpu_device(cpu);
+
+       dev->offline = true;
+       /* Tell user space about the state change */
+       kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+}
+
+static void cpuhp_online_cpu_device(unsigned int cpu)
+{
+       struct device *dev = get_cpu_device(cpu);
+
+       dev->offline = false;
+       /* Tell user space about the state change */
+       kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+}
+
+static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+{
+       int cpu, ret = 0;
+
+       cpu_maps_update_begin();
+       for_each_online_cpu(cpu) {
+               if (topology_is_primary_thread(cpu))
+                       continue;
+               ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
+               if (ret)
+                       break;
+               /*
+                * As this needs to hold the cpu maps lock it's impossible
+                * to call device_offline() because that ends up calling
+                * cpu_down() which takes cpu maps lock. cpu maps lock
+                * needs to be held as this might race against in kernel
+                * abusers of the hotplug machinery (thermal management).
+                *
+                * So nothing would update device:offline state. That would
+                * leave the sysfs entry stale and prevent onlining after
+                * smt control has been changed to 'off' again. This is
+                * called under the sysfs hotplug lock, so it is properly
+                * serialized against the regular offline usage.
+                */
+               cpuhp_offline_cpu_device(cpu);
+       }
+       if (!ret)
+               cpu_smt_control = ctrlval;
+       cpu_maps_update_done();
+       return ret;
+}
+
+static int cpuhp_smt_enable(void)
+{
+       int cpu, ret = 0;
+
+       cpu_maps_update_begin();
+       cpu_smt_control = CPU_SMT_ENABLED;
+       for_each_present_cpu(cpu) {
+               /* Skip online CPUs and CPUs on offline nodes */
+               if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+                       continue;
+               ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
+               if (ret)
+                       break;
+               /* See comment in cpuhp_smt_disable() */
+               cpuhp_online_cpu_device(cpu);
+       }
+       cpu_maps_update_done();
+       return ret;
+}
+
+static ssize_t
+store_smt_control(struct device *dev, struct device_attribute *attr,
+                 const char *buf, size_t count)
+{
+       int ctrlval, ret;
+
+       if (sysfs_streq(buf, "on"))
+               ctrlval = CPU_SMT_ENABLED;
+       else if (sysfs_streq(buf, "off"))
+               ctrlval = CPU_SMT_DISABLED;
+       else if (sysfs_streq(buf, "forceoff"))
+               ctrlval = CPU_SMT_FORCE_DISABLED;
+       else
+               return -EINVAL;
+
+       if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
+               return -EPERM;
+
+       if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+               return -ENODEV;
+
+       ret = lock_device_hotplug_sysfs();
+       if (ret)
+               return ret;
+
+       if (ctrlval != cpu_smt_control) {
+               switch (ctrlval) {
+               case CPU_SMT_ENABLED:
+                       ret = cpuhp_smt_enable();
+                       break;
+               case CPU_SMT_DISABLED:
+               case CPU_SMT_FORCE_DISABLED:
+                       ret = cpuhp_smt_disable(ctrlval);
+                       break;
+               }
+       }
+
+       unlock_device_hotplug();
+       return ret ? ret : count;
+}
+static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
+
+static ssize_t
+show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       bool active = topology_max_smt_threads() > 1;
+
+       return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
+}
+static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
+
+static struct attribute *cpuhp_smt_attrs[] = {
+       &dev_attr_control.attr,
+       &dev_attr_active.attr,
+       NULL
+};
+
+static const struct attribute_group cpuhp_smt_attr_group = {
+       .attrs = cpuhp_smt_attrs,
+       .name = "smt",
+       NULL
+};
+
+static int __init cpu_smt_state_init(void)
+{
+       return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+                                 &cpuhp_smt_attr_group);
+}
+
+#else
+static inline int cpu_smt_state_init(void) { return 0; }
+#endif
+
 static int __init cpuhp_sysfs_init(void)
 {
        int cpu, ret;
 
+       ret = cpu_smt_state_init();
+       if (ret)
+               return ret;
+
        ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
                                 &cpuhp_cpu_root_attr_group);
        if (ret)
@@ -2024,5 +2279,6 @@ void __init boot_cpu_init(void)
  */
 void __init boot_cpu_state_init(void)
 {
+       per_cpu_ptr(&cpuhp_state, smp_processor_id())->booted_once = true;
        per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
 }