]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/cpu.c
Merge branch 'akpm' (fixes from Andrew Morton)
[mirror_ubuntu-artful-kernel.git] / drivers / base / cpu.c
CommitLineData
1da177e4 1/*
8a25a2fd 2 * CPU subsystem support
1da177e4
LT
3 */
4
024f7846 5#include <linux/kernel.h>
1da177e4
LT
6#include <linux/module.h>
7#include <linux/init.h>
f6a57033 8#include <linux/sched.h>
1da177e4
LT
9#include <linux/cpu.h>
10#include <linux/topology.h>
11#include <linux/device.h>
76b67ed9 12#include <linux/node.h>
5a0e3ad6 13#include <linux/gfp.h>
fad12ac8 14#include <linux/slab.h>
9f13a1fd 15#include <linux/percpu.h>
ac212b69 16#include <linux/acpi.h>
f86e4718 17#include <linux/of.h>
1da177e4 18
a1bdc7aa 19#include "base.h"
1da177e4 20
8a25a2fd 21static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
ad74557a 22
ac212b69
RW
23static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
24{
25 /* ACPI style match is the only one that may succeed. */
26 if (acpi_driver_match_device(dev, drv))
27 return 1;
28
29 return 0;
30}
31
1da177e4 32#ifdef CONFIG_HOTPLUG_CPU
34640468
YI
33static void change_cpu_under_node(struct cpu *cpu,
34 unsigned int from_nid, unsigned int to_nid)
35{
36 int cpuid = cpu->dev.id;
37 unregister_cpu_under_node(cpuid, from_nid);
38 register_cpu_under_node(cpuid, to_nid);
39 cpu->node_id = to_nid;
40}
41
0902a904 42static int __ref cpu_subsys_online(struct device *dev)
1da177e4 43{
8a25a2fd 44 struct cpu *cpu = container_of(dev, struct cpu, dev);
0902a904
RW
45 int cpuid = dev->id;
46 int from_nid, to_nid;
c7991b0b 47 int ret = -ENODEV;
0902a904
RW
48
49 cpu_hotplug_driver_lock();
1da177e4 50
0902a904 51 from_nid = cpu_to_node(cpuid);
c7991b0b
RW
52 if (from_nid == NUMA_NO_NODE)
53 goto out;
54
0902a904
RW
55 ret = cpu_up(cpuid);
56 /*
57 * When hot adding memory to memoryless node and enabling a cpu
58 * on the node, node number of the cpu may internally change.
59 */
60 to_nid = cpu_to_node(cpuid);
61 if (from_nid != to_nid)
62 change_cpu_under_node(cpu, from_nid, to_nid);
1da177e4 63
c7991b0b 64 out:
0902a904
RW
65 cpu_hotplug_driver_unlock();
66 return ret;
1da177e4
LT
67}
68
0902a904 69static int cpu_subsys_offline(struct device *dev)
1da177e4 70{
0902a904 71 int ret;
1da177e4 72
51badebd 73 cpu_hotplug_driver_lock();
0902a904 74 ret = cpu_down(dev->id);
51badebd 75 cpu_hotplug_driver_unlock();
1da177e4
LT
76 return ret;
77}
1c4e2d70 78
76b67ed9 79void unregister_cpu(struct cpu *cpu)
1da177e4 80{
8a25a2fd 81 int logical_cpu = cpu->dev.id;
1da177e4 82
76b67ed9
KH
83 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
84
8a25a2fd 85 device_unregister(&cpu->dev);
e37d05da 86 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
1da177e4
LT
87 return;
88}
12633e80
NF
89
90#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
8a25a2fd
KS
91static ssize_t cpu_probe_store(struct device *dev,
92 struct device_attribute *attr,
67fc233f 93 const char *buf,
12633e80
NF
94 size_t count)
95{
96 return arch_cpu_probe(buf, count);
97}
98
8a25a2fd
KS
99static ssize_t cpu_release_store(struct device *dev,
100 struct device_attribute *attr,
67fc233f 101 const char *buf,
12633e80
NF
102 size_t count)
103{
104 return arch_cpu_release(buf, count);
105}
106
8a25a2fd
KS
107static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
108static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
12633e80 109#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
1da177e4
LT
110#endif /* CONFIG_HOTPLUG_CPU */
111
0902a904
RW
112struct bus_type cpu_subsys = {
113 .name = "cpu",
114 .dev_name = "cpu",
ac212b69 115 .match = cpu_subsys_match,
0902a904
RW
116#ifdef CONFIG_HOTPLUG_CPU
117 .online = cpu_subsys_online,
118 .offline = cpu_subsys_offline,
119#endif
120};
121EXPORT_SYMBOL_GPL(cpu_subsys);
122
51be5606
VG
123#ifdef CONFIG_KEXEC
124#include <linux/kexec.h>
125
8a25a2fd 126static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
4a0b2b4d 127 char *buf)
51be5606 128{
8a25a2fd 129 struct cpu *cpu = container_of(dev, struct cpu, dev);
51be5606
VG
130 ssize_t rc;
131 unsigned long long addr;
132 int cpunum;
133
8a25a2fd 134 cpunum = cpu->dev.id;
51be5606
VG
135
136 /*
137 * Might be reading other cpu's data based on which cpu read thread
138 * has been scheduled. But cpu data (memory) is allocated once during
139 * boot up and this data does not change there after. Hence this
140 * operation should be safe. No locking required.
141 */
3b034b0d 142 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
51be5606 143 rc = sprintf(buf, "%Lx\n", addr);
51be5606
VG
144 return rc;
145}
8a25a2fd 146static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
eca4549f
ZY
147
148static ssize_t show_crash_notes_size(struct device *dev,
149 struct device_attribute *attr,
150 char *buf)
151{
152 ssize_t rc;
153
bcfb87fb 154 rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
eca4549f
ZY
155 return rc;
156}
157static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
c055da9f
IM
158
159static struct attribute *crash_note_cpu_attrs[] = {
160 &dev_attr_crash_notes.attr,
161 &dev_attr_crash_notes_size.attr,
162 NULL
163};
164
165static struct attribute_group crash_note_cpu_attr_group = {
166 .attrs = crash_note_cpu_attrs,
167};
168#endif
169
170static const struct attribute_group *common_cpu_attr_groups[] = {
171#ifdef CONFIG_KEXEC
172 &crash_note_cpu_attr_group,
51be5606 173#endif
c055da9f
IM
174 NULL
175};
51be5606 176
1c4e2d70
IM
177static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
178#ifdef CONFIG_KEXEC
179 &crash_note_cpu_attr_group,
1c4e2d70
IM
180#endif
181 NULL
182};
183
9d1fe323
MT
184/*
185 * Print cpu online, possible, present, and system maps
186 */
265d2e2e
AK
187
188struct cpu_attr {
8a25a2fd 189 struct device_attribute attr;
265d2e2e
AK
190 const struct cpumask *const * const map;
191};
192
8a25a2fd
KS
193static ssize_t show_cpus_attr(struct device *dev,
194 struct device_attribute *attr,
265d2e2e 195 char *buf)
9d1fe323 196{
265d2e2e
AK
197 struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
198 int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
9d1fe323
MT
199
200 buf[n++] = '\n';
201 buf[n] = '\0';
202 return n;
203}
204
8a25a2fd
KS
205#define _CPU_ATTR(name, map) \
206 { __ATTR(name, 0444, show_cpus_attr, NULL), map }
9d1fe323 207
8a25a2fd 208/* Keep in sync with cpu_subsys_attrs */
265d2e2e
AK
209static struct cpu_attr cpu_attrs[] = {
210 _CPU_ATTR(online, &cpu_online_mask),
211 _CPU_ATTR(possible, &cpu_possible_mask),
212 _CPU_ATTR(present, &cpu_present_mask),
213};
9d1fe323 214
e057d7ae
MT
215/*
216 * Print values for NR_CPUS and offlined cpus
217 */
8a25a2fd
KS
218static ssize_t print_cpus_kernel_max(struct device *dev,
219 struct device_attribute *attr, char *buf)
e057d7ae 220{
8fd2d2d5 221 int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
e057d7ae
MT
222 return n;
223}
8a25a2fd 224static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
e057d7ae
MT
225
226/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
227unsigned int total_cpus;
228
8a25a2fd
KS
229static ssize_t print_cpus_offline(struct device *dev,
230 struct device_attribute *attr, char *buf)
e057d7ae
MT
231{
232 int n = 0, len = PAGE_SIZE-2;
233 cpumask_var_t offline;
234
235 /* display offline cpus < nr_cpu_ids */
236 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
237 return -ENOMEM;
cdc6e3d3 238 cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
e057d7ae
MT
239 n = cpulist_scnprintf(buf, len, offline);
240 free_cpumask_var(offline);
241
242 /* display offline cpus >= nr_cpu_ids */
243 if (total_cpus && nr_cpu_ids < total_cpus) {
244 if (n && n < len)
245 buf[n++] = ',';
246
247 if (nr_cpu_ids == total_cpus-1)
248 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
249 else
250 n += snprintf(&buf[n], len - n, "%d-%d",
251 nr_cpu_ids, total_cpus-1);
252 }
253
254 n += snprintf(&buf[n], len - n, "\n");
255 return n;
256}
8a25a2fd 257static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
e057d7ae 258
2885e25c
GKH
259static void cpu_device_release(struct device *dev)
260{
261 /*
262 * This is an empty function to prevent the driver core from spitting a
263 * warning at us. Yes, I know this is directly opposite of what the
264 * documentation for the driver core and kobjects say, and the author
265 * of this code has already been publically ridiculed for doing
266 * something as foolish as this. However, at this point in time, it is
267 * the only way to handle the issue of statically allocated cpu
268 * devices. The different architectures will have their cpu device
269 * code reworked to properly handle this in the near future, so this
270 * function will then be changed to correctly free up the memory held
271 * by the cpu device.
272 *
273 * Never copy this way of doing things, or you too will be made fun of
30a4840a 274 * on the linux-kernel list, you have been warned.
2885e25c
GKH
275 */
276}
277
1da177e4 278/*
405ae7d3 279 * register_cpu - Setup a sysfs device for a CPU.
72486f1f
SS
280 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
281 * sysfs for this CPU.
1da177e4
LT
282 * @num - CPU number to use when creating the device.
283 *
284 * Initialize and register the CPU device.
285 */
a83048eb 286int register_cpu(struct cpu *cpu, int num)
1da177e4
LT
287{
288 int error;
76b67ed9 289
8a25a2fd 290 cpu->node_id = cpu_to_node(num);
29bb5d4f 291 memset(&cpu->dev, 0x00, sizeof(struct device));
8a25a2fd
KS
292 cpu->dev.id = num;
293 cpu->dev.bus = &cpu_subsys;
2885e25c 294 cpu->dev.release = cpu_device_release;
0902a904 295 cpu->dev.offline_disabled = !cpu->hotpluggable;
1001b4d4 296 cpu->dev.offline = !cpu_online(num);
f86e4718 297 cpu->dev.of_node = of_get_cpu_node(num, NULL);
fad12ac8
TR
298#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
299 cpu->dev.bus->uevent = arch_cpu_uevent;
300#endif
c055da9f 301 cpu->dev.groups = common_cpu_attr_groups;
1c4e2d70
IM
302 if (cpu->hotpluggable)
303 cpu->dev.groups = hotplugable_cpu_attr_groups;
8a25a2fd 304 error = device_register(&cpu->dev);
ad74557a 305 if (!error)
8a25a2fd 306 per_cpu(cpu_sys_devices, num) = &cpu->dev;
76b67ed9
KH
307 if (!error)
308 register_cpu_under_node(num, cpu_to_node(num));
51be5606 309
1da177e4
LT
310 return error;
311}
312
8a25a2fd 313struct device *get_cpu_device(unsigned cpu)
ad74557a 314{
e37d05da
MT
315 if (cpu < nr_cpu_ids && cpu_possible(cpu))
316 return per_cpu(cpu_sys_devices, cpu);
ad74557a
AR
317 else
318 return NULL;
319}
8a25a2fd
KS
320EXPORT_SYMBOL_GPL(get_cpu_device);
321
fad12ac8
TR
322#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
323static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
324#endif
325
8a25a2fd
KS
326static struct attribute *cpu_root_attrs[] = {
327#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
328 &dev_attr_probe.attr,
329 &dev_attr_release.attr,
330#endif
331 &cpu_attrs[0].attr.attr,
332 &cpu_attrs[1].attr.attr,
333 &cpu_attrs[2].attr.attr,
334 &dev_attr_kernel_max.attr,
335 &dev_attr_offline.attr,
fad12ac8
TR
336#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
337 &dev_attr_modalias.attr,
338#endif
8a25a2fd
KS
339 NULL
340};
341
342static struct attribute_group cpu_root_attr_group = {
343 .attrs = cpu_root_attrs,
344};
345
346static const struct attribute_group *cpu_root_attr_groups[] = {
347 &cpu_root_attr_group,
348 NULL,
349};
1da177e4 350
2987557f
JT
351bool cpu_is_hotpluggable(unsigned cpu)
352{
7affca35
LT
353 struct device *dev = get_cpu_device(cpu);
354 return dev && container_of(dev, struct cpu, dev)->hotpluggable;
2987557f
JT
355}
356EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
357
9f13a1fd
BH
358#ifdef CONFIG_GENERIC_CPU_DEVICES
359static DEFINE_PER_CPU(struct cpu, cpu_devices);
360#endif
361
362static void __init cpu_dev_register_generic(void)
363{
364#ifdef CONFIG_GENERIC_CPU_DEVICES
365 int i;
366
367 for_each_possible_cpu(i) {
368 if (register_cpu(&per_cpu(cpu_devices, i), i))
369 panic("Failed to register CPU device");
370 }
371#endif
372}
373
024f7846 374void __init cpu_dev_init(void)
1da177e4 375{
024f7846
BH
376 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
377 panic("Failed to register CPU subsystem");
8a25a2fd 378
9f13a1fd 379 cpu_dev_register_generic();
1da177e4 380}