]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/cpu.c
cpu: fix "crash_notes" and "crash_notes_size" leaks in register_cpu()
[mirror_ubuntu-artful-kernel.git] / drivers / base / cpu.c
CommitLineData
1da177e4 1/*
8a25a2fd 2 * CPU subsystem support
1da177e4
LT
3 */
4
024f7846 5#include <linux/kernel.h>
1da177e4
LT
6#include <linux/module.h>
7#include <linux/init.h>
f6a57033 8#include <linux/sched.h>
1da177e4
LT
9#include <linux/cpu.h>
10#include <linux/topology.h>
11#include <linux/device.h>
76b67ed9 12#include <linux/node.h>
5a0e3ad6 13#include <linux/gfp.h>
fad12ac8 14#include <linux/slab.h>
9f13a1fd 15#include <linux/percpu.h>
1da177e4 16
a1bdc7aa 17#include "base.h"
1da177e4 18
8a25a2fd 19struct bus_type cpu_subsys = {
af5ca3f4 20 .name = "cpu",
8a25a2fd 21 .dev_name = "cpu",
1da177e4 22};
8a25a2fd 23EXPORT_SYMBOL_GPL(cpu_subsys);
1da177e4 24
8a25a2fd 25static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
ad74557a 26
1da177e4 27#ifdef CONFIG_HOTPLUG_CPU
34640468
YI
28static void change_cpu_under_node(struct cpu *cpu,
29 unsigned int from_nid, unsigned int to_nid)
30{
31 int cpuid = cpu->dev.id;
32 unregister_cpu_under_node(cpuid, from_nid);
33 register_cpu_under_node(cpuid, to_nid);
34 cpu->node_id = to_nid;
35}
36
8a25a2fd
KS
37static ssize_t show_online(struct device *dev,
38 struct device_attribute *attr,
4a0b2b4d 39 char *buf)
1da177e4 40{
8a25a2fd 41 struct cpu *cpu = container_of(dev, struct cpu, dev);
1da177e4 42
8a25a2fd 43 return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
1da177e4
LT
44}
45
8a25a2fd
KS
46static ssize_t __ref store_online(struct device *dev,
47 struct device_attribute *attr,
48 const char *buf, size_t count)
1da177e4 49{
8a25a2fd 50 struct cpu *cpu = container_of(dev, struct cpu, dev);
34640468
YI
51 int cpuid = cpu->dev.id;
52 int from_nid, to_nid;
1da177e4
LT
53 ssize_t ret;
54
51badebd 55 cpu_hotplug_driver_lock();
1da177e4
LT
56 switch (buf[0]) {
57 case '0':
34640468 58 ret = cpu_down(cpuid);
1da177e4 59 if (!ret)
312c004d 60 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
1da177e4
LT
61 break;
62 case '1':
34640468
YI
63 from_nid = cpu_to_node(cpuid);
64 ret = cpu_up(cpuid);
65
66 /*
67 * When hot adding memory to memoryless node and enabling a cpu
68 * on the node, node number of the cpu may internally change.
69 */
70 to_nid = cpu_to_node(cpuid);
71 if (from_nid != to_nid)
72 change_cpu_under_node(cpu, from_nid, to_nid);
73
fb69c390 74 if (!ret)
312c004d 75 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
1da177e4
LT
76 break;
77 default:
78 ret = -EINVAL;
79 }
51badebd 80 cpu_hotplug_driver_unlock();
1da177e4
LT
81
82 if (ret >= 0)
83 ret = count;
84 return ret;
85}
8a25a2fd 86static DEVICE_ATTR(online, 0644, show_online, store_online);
1da177e4 87
6c847402 88static void __cpuinit register_cpu_control(struct cpu *cpu)
1da177e4 89{
8a25a2fd 90 device_create_file(&cpu->dev, &dev_attr_online);
1da177e4 91}
76b67ed9 92void unregister_cpu(struct cpu *cpu)
1da177e4 93{
8a25a2fd 94 int logical_cpu = cpu->dev.id;
1da177e4 95
76b67ed9
KH
96 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
97
8a25a2fd 98 device_remove_file(&cpu->dev, &dev_attr_online);
1da177e4 99
8a25a2fd 100 device_unregister(&cpu->dev);
e37d05da 101 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
1da177e4
LT
102 return;
103}
12633e80
NF
104
105#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
8a25a2fd
KS
106static ssize_t cpu_probe_store(struct device *dev,
107 struct device_attribute *attr,
67fc233f 108 const char *buf,
12633e80
NF
109 size_t count)
110{
111 return arch_cpu_probe(buf, count);
112}
113
8a25a2fd
KS
114static ssize_t cpu_release_store(struct device *dev,
115 struct device_attribute *attr,
67fc233f 116 const char *buf,
12633e80
NF
117 size_t count)
118{
119 return arch_cpu_release(buf, count);
120}
121
8a25a2fd
KS
122static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
123static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
12633e80
NF
124#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
125
1da177e4
LT
126#else /* ... !CONFIG_HOTPLUG_CPU */
127static inline void register_cpu_control(struct cpu *cpu)
128{
129}
130#endif /* CONFIG_HOTPLUG_CPU */
131
51be5606
VG
132#ifdef CONFIG_KEXEC
133#include <linux/kexec.h>
134
8a25a2fd 135static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
4a0b2b4d 136 char *buf)
51be5606 137{
8a25a2fd 138 struct cpu *cpu = container_of(dev, struct cpu, dev);
51be5606
VG
139 ssize_t rc;
140 unsigned long long addr;
141 int cpunum;
142
8a25a2fd 143 cpunum = cpu->dev.id;
51be5606
VG
144
145 /*
146 * Might be reading other cpu's data based on which cpu read thread
147 * has been scheduled. But cpu data (memory) is allocated once during
148 * boot up and this data does not change there after. Hence this
149 * operation should be safe. No locking required.
150 */
3b034b0d 151 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
51be5606 152 rc = sprintf(buf, "%Lx\n", addr);
51be5606
VG
153 return rc;
154}
8a25a2fd 155static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
eca4549f
ZY
156
157static ssize_t show_crash_notes_size(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
160{
161 ssize_t rc;
162
bcfb87fb 163 rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
eca4549f
ZY
164 return rc;
165}
166static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
c055da9f
IM
167
168static struct attribute *crash_note_cpu_attrs[] = {
169 &dev_attr_crash_notes.attr,
170 &dev_attr_crash_notes_size.attr,
171 NULL
172};
173
174static struct attribute_group crash_note_cpu_attr_group = {
175 .attrs = crash_note_cpu_attrs,
176};
177#endif
178
179static const struct attribute_group *common_cpu_attr_groups[] = {
180#ifdef CONFIG_KEXEC
181 &crash_note_cpu_attr_group,
51be5606 182#endif
c055da9f
IM
183 NULL
184};
51be5606 185
9d1fe323
MT
186/*
187 * Print cpu online, possible, present, and system maps
188 */
265d2e2e
AK
189
190struct cpu_attr {
8a25a2fd 191 struct device_attribute attr;
265d2e2e
AK
192 const struct cpumask *const * const map;
193};
194
8a25a2fd
KS
195static ssize_t show_cpus_attr(struct device *dev,
196 struct device_attribute *attr,
265d2e2e 197 char *buf)
9d1fe323 198{
265d2e2e
AK
199 struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
200 int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
9d1fe323
MT
201
202 buf[n++] = '\n';
203 buf[n] = '\0';
204 return n;
205}
206
8a25a2fd
KS
207#define _CPU_ATTR(name, map) \
208 { __ATTR(name, 0444, show_cpus_attr, NULL), map }
9d1fe323 209
8a25a2fd 210/* Keep in sync with cpu_subsys_attrs */
265d2e2e
AK
211static struct cpu_attr cpu_attrs[] = {
212 _CPU_ATTR(online, &cpu_online_mask),
213 _CPU_ATTR(possible, &cpu_possible_mask),
214 _CPU_ATTR(present, &cpu_present_mask),
215};
9d1fe323 216
e057d7ae
MT
217/*
218 * Print values for NR_CPUS and offlined cpus
219 */
8a25a2fd
KS
220static ssize_t print_cpus_kernel_max(struct device *dev,
221 struct device_attribute *attr, char *buf)
e057d7ae 222{
8fd2d2d5 223 int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
e057d7ae
MT
224 return n;
225}
8a25a2fd 226static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
e057d7ae
MT
227
228/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
229unsigned int total_cpus;
230
8a25a2fd
KS
231static ssize_t print_cpus_offline(struct device *dev,
232 struct device_attribute *attr, char *buf)
e057d7ae
MT
233{
234 int n = 0, len = PAGE_SIZE-2;
235 cpumask_var_t offline;
236
237 /* display offline cpus < nr_cpu_ids */
238 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
239 return -ENOMEM;
cdc6e3d3 240 cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
e057d7ae
MT
241 n = cpulist_scnprintf(buf, len, offline);
242 free_cpumask_var(offline);
243
244 /* display offline cpus >= nr_cpu_ids */
245 if (total_cpus && nr_cpu_ids < total_cpus) {
246 if (n && n < len)
247 buf[n++] = ',';
248
249 if (nr_cpu_ids == total_cpus-1)
250 n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
251 else
252 n += snprintf(&buf[n], len - n, "%d-%d",
253 nr_cpu_ids, total_cpus-1);
254 }
255
256 n += snprintf(&buf[n], len - n, "\n");
257 return n;
258}
8a25a2fd 259static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
e057d7ae 260
2885e25c
GKH
261static void cpu_device_release(struct device *dev)
262{
263 /*
264 * This is an empty function to prevent the driver core from spitting a
265 * warning at us. Yes, I know this is directly opposite of what the
266 * documentation for the driver core and kobjects say, and the author
267 * of this code has already been publically ridiculed for doing
268 * something as foolish as this. However, at this point in time, it is
269 * the only way to handle the issue of statically allocated cpu
270 * devices. The different architectures will have their cpu device
271 * code reworked to properly handle this in the near future, so this
272 * function will then be changed to correctly free up the memory held
273 * by the cpu device.
274 *
275 * Never copy this way of doing things, or you too will be made fun of
30a4840a 276 * on the linux-kernel list, you have been warned.
2885e25c
GKH
277 */
278}
279
1da177e4 280/*
405ae7d3 281 * register_cpu - Setup a sysfs device for a CPU.
72486f1f
SS
282 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
283 * sysfs for this CPU.
1da177e4
LT
284 * @num - CPU number to use when creating the device.
285 *
286 * Initialize and register the CPU device.
287 */
33b5f31b 288int __cpuinit register_cpu(struct cpu *cpu, int num)
1da177e4
LT
289{
290 int error;
76b67ed9 291
8a25a2fd 292 cpu->node_id = cpu_to_node(num);
29bb5d4f 293 memset(&cpu->dev, 0x00, sizeof(struct device));
8a25a2fd
KS
294 cpu->dev.id = num;
295 cpu->dev.bus = &cpu_subsys;
2885e25c 296 cpu->dev.release = cpu_device_release;
fad12ac8
TR
297#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
298 cpu->dev.bus->uevent = arch_cpu_uevent;
299#endif
c055da9f 300 cpu->dev.groups = common_cpu_attr_groups;
8a25a2fd 301 error = device_register(&cpu->dev);
72486f1f 302 if (!error && cpu->hotpluggable)
1da177e4 303 register_cpu_control(cpu);
ad74557a 304 if (!error)
8a25a2fd 305 per_cpu(cpu_sys_devices, num) = &cpu->dev;
76b67ed9
KH
306 if (!error)
307 register_cpu_under_node(num, cpu_to_node(num));
51be5606 308
1da177e4
LT
309 return error;
310}
311
8a25a2fd 312struct device *get_cpu_device(unsigned cpu)
ad74557a 313{
e37d05da
MT
314 if (cpu < nr_cpu_ids && cpu_possible(cpu))
315 return per_cpu(cpu_sys_devices, cpu);
ad74557a
AR
316 else
317 return NULL;
318}
8a25a2fd
KS
319EXPORT_SYMBOL_GPL(get_cpu_device);
320
fad12ac8
TR
321#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
322static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
323#endif
324
8a25a2fd
KS
325static struct attribute *cpu_root_attrs[] = {
326#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
327 &dev_attr_probe.attr,
328 &dev_attr_release.attr,
329#endif
330 &cpu_attrs[0].attr.attr,
331 &cpu_attrs[1].attr.attr,
332 &cpu_attrs[2].attr.attr,
333 &dev_attr_kernel_max.attr,
334 &dev_attr_offline.attr,
fad12ac8
TR
335#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
336 &dev_attr_modalias.attr,
337#endif
8a25a2fd
KS
338 NULL
339};
340
341static struct attribute_group cpu_root_attr_group = {
342 .attrs = cpu_root_attrs,
343};
344
345static const struct attribute_group *cpu_root_attr_groups[] = {
346 &cpu_root_attr_group,
347 NULL,
348};
1da177e4 349
2987557f
JT
350bool cpu_is_hotpluggable(unsigned cpu)
351{
7affca35
LT
352 struct device *dev = get_cpu_device(cpu);
353 return dev && container_of(dev, struct cpu, dev)->hotpluggable;
2987557f
JT
354}
355EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
356
9f13a1fd
BH
357#ifdef CONFIG_GENERIC_CPU_DEVICES
358static DEFINE_PER_CPU(struct cpu, cpu_devices);
359#endif
360
361static void __init cpu_dev_register_generic(void)
362{
363#ifdef CONFIG_GENERIC_CPU_DEVICES
364 int i;
365
366 for_each_possible_cpu(i) {
367 if (register_cpu(&per_cpu(cpu_devices, i), i))
368 panic("Failed to register CPU device");
369 }
370#endif
371}
372
024f7846 373void __init cpu_dev_init(void)
1da177e4 374{
024f7846
BH
375 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
376 panic("Failed to register CPU subsystem");
8a25a2fd 377
9f13a1fd 378 cpu_dev_register_generic();
1da177e4 379}