]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - kernel/cpu.c
cpu/hotplug: Provide knobs to control SMT
[mirror_ubuntu-bionic-kernel.git] / kernel / cpu.c
index da7bd87f7bd3f6b4816ace5e3ae7c1134fa961b2..8ccec33d0f3876f8196f8255a73087d97e0ef936 100644 (file)
@@ -937,6 +937,29 @@ EXPORT_SYMBOL(cpu_down);
 #define takedown_cpu           NULL
 #endif /*CONFIG_HOTPLUG_CPU*/
 
+#ifdef CONFIG_HOTPLUG_SMT
+enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+
+static int __init smt_cmdline_disable(char *str)
+{
+       cpu_smt_control = CPU_SMT_DISABLED;
+       if (str && !strcmp(str, "force")) {
+               pr_info("SMT: Force disabled\n");
+               cpu_smt_control = CPU_SMT_FORCE_DISABLED;
+       }
+       return 0;
+}
+early_param("nosmt", smt_cmdline_disable);
+
+static inline bool cpu_smt_allowed(unsigned int cpu)
+{
+       return cpu_smt_control == CPU_SMT_ENABLED ||
+               topology_is_primary_thread(cpu);
+}
+#else
+static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
+#endif
+
 /**
  * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
  * @cpu: cpu that just started
@@ -1060,6 +1083,10 @@ static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
                err = -EBUSY;
                goto out;
        }
+       if (!cpu_smt_allowed(cpu)) {
+               err = -EPERM;
+               goto out;
+       }
 
        err = _cpu_up(cpu, 0, target);
 out:
@@ -1916,10 +1943,153 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {
        NULL
 };
 
+#ifdef CONFIG_HOTPLUG_SMT
+
+static const char *smt_states[] = {
+       [CPU_SMT_ENABLED]               = "on",
+       [CPU_SMT_DISABLED]              = "off",
+       [CPU_SMT_FORCE_DISABLED]        = "forceoff",
+       [CPU_SMT_NOT_SUPPORTED]         = "notsupported",
+};
+
+static ssize_t
+show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
+}
+
+static void cpuhp_offline_cpu_device(unsigned int cpu)
+{
+       struct device *dev = get_cpu_device(cpu);
+
+       dev->offline = true;
+       /* Tell user space about the state change */
+       kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+}
+
+static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+{
+       int cpu, ret = 0;
+
+       cpu_maps_update_begin();
+       for_each_online_cpu(cpu) {
+               if (topology_is_primary_thread(cpu))
+                       continue;
+               ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
+               if (ret)
+                       break;
+               /*
+                * As this needs to hold the cpu maps lock it's impossible
+                * to call device_offline() because that ends up calling
+                * cpu_down() which takes cpu maps lock. cpu maps lock
+                * needs to be held as this might race against in kernel
+                * abusers of the hotplug machinery (thermal management).
+                *
+                * So nothing would update device:offline state. That would
+                * leave the sysfs entry stale and prevent onlining after
+                * smt control has been changed to 'off' again. This is
+                * called under the sysfs hotplug lock, so it is properly
+                * serialized against the regular offline usage.
+                */
+               cpuhp_offline_cpu_device(cpu);
+       }
+       if (!ret)
+               cpu_smt_control = ctrlval;
+       cpu_maps_update_done();
+       return ret;
+}
+
+static void cpuhp_smt_enable(void)
+{
+       cpu_maps_update_begin();
+       cpu_smt_control = CPU_SMT_ENABLED;
+       cpu_maps_update_done();
+}
+
+static ssize_t
+store_smt_control(struct device *dev, struct device_attribute *attr,
+                 const char *buf, size_t count)
+{
+       int ctrlval, ret;
+
+       if (sysfs_streq(buf, "on"))
+               ctrlval = CPU_SMT_ENABLED;
+       else if (sysfs_streq(buf, "off"))
+               ctrlval = CPU_SMT_DISABLED;
+       else if (sysfs_streq(buf, "forceoff"))
+               ctrlval = CPU_SMT_FORCE_DISABLED;
+       else
+               return -EINVAL;
+
+       if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
+               return -EPERM;
+
+       if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+               return -ENODEV;
+
+       ret = lock_device_hotplug_sysfs();
+       if (ret)
+               return ret;
+
+       if (ctrlval != cpu_smt_control) {
+               switch (ctrlval) {
+               case CPU_SMT_ENABLED:
+                       cpuhp_smt_enable();
+                       break;
+               case CPU_SMT_DISABLED:
+               case CPU_SMT_FORCE_DISABLED:
+                       ret = cpuhp_smt_disable(ctrlval);
+                       break;
+               }
+       }
+
+       unlock_device_hotplug();
+       return ret ? ret : count;
+}
+static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
+
+static ssize_t
+show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       bool active = topology_max_smt_threads() > 1;
+
+       return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
+}
+static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
+
+static struct attribute *cpuhp_smt_attrs[] = {
+       &dev_attr_control.attr,
+       &dev_attr_active.attr,
+       NULL
+};
+
+static const struct attribute_group cpuhp_smt_attr_group = {
+       .attrs = cpuhp_smt_attrs,
+       .name = "smt",
+       NULL
+};
+
+static int __init cpu_smt_state_init(void)
+{
+       if (!topology_smt_supported())
+               cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+
+       return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+                                 &cpuhp_smt_attr_group);
+}
+
+#else
+static inline int cpu_smt_state_init(void) { return 0; }
+#endif
+
 static int __init cpuhp_sysfs_init(void)
 {
        int cpu, ret;
 
+       ret = cpu_smt_state_init();
+       if (ret)
+               return ret;
+
        ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
                                 &cpuhp_cpu_root_attr_group);
        if (ret)