]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/base/cpu.c
ACPI / processor: Use common hotplug infrastructure
[mirror_ubuntu-bionic-kernel.git] / drivers / base / cpu.c
1 /*
2 * CPU subsystem support
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/cpu.h>
10 #include <linux/topology.h>
11 #include <linux/device.h>
12 #include <linux/node.h>
13 #include <linux/gfp.h>
14 #include <linux/slab.h>
15 #include <linux/percpu.h>
16 #include <linux/acpi.h>
17
18 #include "base.h"
19
20 static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
21
22 static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
23 {
24 /* ACPI style match is the only one that may succeed. */
25 if (acpi_driver_match_device(dev, drv))
26 return 1;
27
28 return 0;
29 }
30
31 #ifdef CONFIG_HOTPLUG_CPU
32 static void change_cpu_under_node(struct cpu *cpu,
33 unsigned int from_nid, unsigned int to_nid)
34 {
35 int cpuid = cpu->dev.id;
36 unregister_cpu_under_node(cpuid, from_nid);
37 register_cpu_under_node(cpuid, to_nid);
38 cpu->node_id = to_nid;
39 }
40
41 static int __ref cpu_subsys_online(struct device *dev)
42 {
43 struct cpu *cpu = container_of(dev, struct cpu, dev);
44 int cpuid = dev->id;
45 int from_nid, to_nid;
46 int ret;
47
48 cpu_hotplug_driver_lock();
49
50 from_nid = cpu_to_node(cpuid);
51 ret = cpu_up(cpuid);
52 /*
53 * When hot adding memory to memoryless node and enabling a cpu
54 * on the node, node number of the cpu may internally change.
55 */
56 to_nid = cpu_to_node(cpuid);
57 if (from_nid != to_nid)
58 change_cpu_under_node(cpu, from_nid, to_nid);
59
60 cpu_hotplug_driver_unlock();
61 return ret;
62 }
63
64 static int cpu_subsys_offline(struct device *dev)
65 {
66 int ret;
67
68 cpu_hotplug_driver_lock();
69 ret = cpu_down(dev->id);
70 cpu_hotplug_driver_unlock();
71 return ret;
72 }
73
74 void unregister_cpu(struct cpu *cpu)
75 {
76 int logical_cpu = cpu->dev.id;
77
78 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
79
80 device_unregister(&cpu->dev);
81 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
82 return;
83 }
84
85 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
86 static ssize_t cpu_probe_store(struct device *dev,
87 struct device_attribute *attr,
88 const char *buf,
89 size_t count)
90 {
91 return arch_cpu_probe(buf, count);
92 }
93
94 static ssize_t cpu_release_store(struct device *dev,
95 struct device_attribute *attr,
96 const char *buf,
97 size_t count)
98 {
99 return arch_cpu_release(buf, count);
100 }
101
102 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
103 static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
104 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
105
106 #endif /* CONFIG_HOTPLUG_CPU */
107
108 struct bus_type cpu_subsys = {
109 .name = "cpu",
110 .dev_name = "cpu",
111 .match = cpu_subsys_match,
112 #ifdef CONFIG_HOTPLUG_CPU
113 .online = cpu_subsys_online,
114 .offline = cpu_subsys_offline,
115 #endif
116 };
117 EXPORT_SYMBOL_GPL(cpu_subsys);
118
119 #ifdef CONFIG_KEXEC
120 #include <linux/kexec.h>
121
122 static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
123 char *buf)
124 {
125 struct cpu *cpu = container_of(dev, struct cpu, dev);
126 ssize_t rc;
127 unsigned long long addr;
128 int cpunum;
129
130 cpunum = cpu->dev.id;
131
132 /*
133 * Might be reading other cpu's data based on which cpu read thread
134 * has been scheduled. But cpu data (memory) is allocated once during
135 * boot up and this data does not change there after. Hence this
136 * operation should be safe. No locking required.
137 */
138 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
139 rc = sprintf(buf, "%Lx\n", addr);
140 return rc;
141 }
142 static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
143
144 static ssize_t show_crash_notes_size(struct device *dev,
145 struct device_attribute *attr,
146 char *buf)
147 {
148 ssize_t rc;
149
150 rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
151 return rc;
152 }
153 static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
154 #endif
155
156 /*
157 * Print cpu online, possible, present, and system maps
158 */
159
160 struct cpu_attr {
161 struct device_attribute attr;
162 const struct cpumask *const * const map;
163 };
164
165 static ssize_t show_cpus_attr(struct device *dev,
166 struct device_attribute *attr,
167 char *buf)
168 {
169 struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
170 int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
171
172 buf[n++] = '\n';
173 buf[n] = '\0';
174 return n;
175 }
176
177 #define _CPU_ATTR(name, map) \
178 { __ATTR(name, 0444, show_cpus_attr, NULL), map }
179
180 /* Keep in sync with cpu_subsys_attrs */
181 static struct cpu_attr cpu_attrs[] = {
182 _CPU_ATTR(online, &cpu_online_mask),
183 _CPU_ATTR(possible, &cpu_possible_mask),
184 _CPU_ATTR(present, &cpu_present_mask),
185 };
186
187 /*
188 * Print values for NR_CPUS and offlined cpus
189 */
190 static ssize_t print_cpus_kernel_max(struct device *dev,
191 struct device_attribute *attr, char *buf)
192 {
193 int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
194 return n;
195 }
196 static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
197
198 /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
199 unsigned int total_cpus;
200
201 static ssize_t print_cpus_offline(struct device *dev,
202 struct device_attribute *attr, char *buf)
203 {
204 int n = 0, len = PAGE_SIZE-2;
205 cpumask_var_t offline;
206
207 /* display offline cpus < nr_cpu_ids */
208 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
209 return -ENOMEM;
210 cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
211 n = cpulist_scnprintf(buf, len, offline);
212 free_cpumask_var(offline);
213
214 /* display offline cpus >= nr_cpu_ids */
215 if (total_cpus && nr_cpu_ids < total_cpus) {
216 if (n && n < len)
217 buf[n++] = ',';
218
219 if (nr_cpu_ids == total_cpus-1)
220 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
221 else
222 n += snprintf(&buf[n], len - n, "%d-%d",
223 nr_cpu_ids, total_cpus-1);
224 }
225
226 n += snprintf(&buf[n], len - n, "\n");
227 return n;
228 }
229 static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
230
231 static void cpu_device_release(struct device *dev)
232 {
233 /*
234 * This is an empty function to prevent the driver core from spitting a
235 * warning at us. Yes, I know this is directly opposite of what the
236 * documentation for the driver core and kobjects say, and the author
237 * of this code has already been publically ridiculed for doing
238 * something as foolish as this. However, at this point in time, it is
239 * the only way to handle the issue of statically allocated cpu
240 * devices. The different architectures will have their cpu device
241 * code reworked to properly handle this in the near future, so this
242 * function will then be changed to correctly free up the memory held
243 * by the cpu device.
244 *
245 * Never copy this way of doing things, or you too will be made fun of
246 * on the linux-kernel list, you have been warned.
247 */
248 }
249
250 /*
251 * register_cpu - Setup a sysfs device for a CPU.
252 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
253 * sysfs for this CPU.
254 * @num - CPU number to use when creating the device.
255 *
256 * Initialize and register the CPU device.
257 */
258 int __cpuinit register_cpu(struct cpu *cpu, int num)
259 {
260 int error;
261
262 cpu->node_id = cpu_to_node(num);
263 memset(&cpu->dev, 0x00, sizeof(struct device));
264 cpu->dev.id = num;
265 cpu->dev.bus = &cpu_subsys;
266 cpu->dev.release = cpu_device_release;
267 cpu->dev.offline_disabled = !cpu->hotpluggable;
268 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
269 cpu->dev.bus->uevent = arch_cpu_uevent;
270 #endif
271 error = device_register(&cpu->dev);
272 if (!error)
273 per_cpu(cpu_sys_devices, num) = &cpu->dev;
274 if (!error)
275 register_cpu_under_node(num, cpu_to_node(num));
276
277 #ifdef CONFIG_KEXEC
278 if (!error)
279 error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
280 if (!error)
281 error = device_create_file(&cpu->dev,
282 &dev_attr_crash_notes_size);
283 #endif
284 return error;
285 }
286
287 struct device *get_cpu_device(unsigned cpu)
288 {
289 if (cpu < nr_cpu_ids && cpu_possible(cpu))
290 return per_cpu(cpu_sys_devices, cpu);
291 else
292 return NULL;
293 }
294 EXPORT_SYMBOL_GPL(get_cpu_device);
295
296 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
297 static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
298 #endif
299
300 static struct attribute *cpu_root_attrs[] = {
301 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
302 &dev_attr_probe.attr,
303 &dev_attr_release.attr,
304 #endif
305 &cpu_attrs[0].attr.attr,
306 &cpu_attrs[1].attr.attr,
307 &cpu_attrs[2].attr.attr,
308 &dev_attr_kernel_max.attr,
309 &dev_attr_offline.attr,
310 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
311 &dev_attr_modalias.attr,
312 #endif
313 NULL
314 };
315
316 static struct attribute_group cpu_root_attr_group = {
317 .attrs = cpu_root_attrs,
318 };
319
320 static const struct attribute_group *cpu_root_attr_groups[] = {
321 &cpu_root_attr_group,
322 NULL,
323 };
324
325 bool cpu_is_hotpluggable(unsigned cpu)
326 {
327 struct device *dev = get_cpu_device(cpu);
328 return dev && container_of(dev, struct cpu, dev)->hotpluggable;
329 }
330 EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
331
332 #ifdef CONFIG_GENERIC_CPU_DEVICES
333 static DEFINE_PER_CPU(struct cpu, cpu_devices);
334 #endif
335
336 static void __init cpu_dev_register_generic(void)
337 {
338 #ifdef CONFIG_GENERIC_CPU_DEVICES
339 int i;
340
341 for_each_possible_cpu(i) {
342 if (register_cpu(&per_cpu(cpu_devices, i), i))
343 panic("Failed to register CPU device");
344 }
345 #endif
346 }
347
348 void __init cpu_dev_init(void)
349 {
350 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
351 panic("Failed to register CPU subsystem");
352
353 cpu_dev_register_generic();
354 }