]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * This file contains NUMA specific variables and functions which can | |
7 | * be split away from DISCONTIGMEM and are used on NUMA machines with | |
8 | * contiguous memory. | |
9 | * 2002/08/07 Erich Focht <efocht@ess.nec.de> | |
10 | * Populate cpu entries in sysfs for non-numa systems as well | |
11 | * Intel Corporation - Ashok Raj | |
f1918005 ZY |
12 | * 02/27/2006 Zhang, Yanmin |
13 | * Populate cpu cache entries in sysfs for cpu cache info | |
1da177e4 LT |
14 | */ |
15 | ||
16 | #include <linux/config.h> | |
17 | #include <linux/cpu.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/node.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/bootmem.h> | |
23 | #include <linux/nodemask.h> | |
f1918005 | 24 | #include <linux/notifier.h> |
1da177e4 LT |
25 | #include <asm/mmzone.h> |
26 | #include <asm/numa.h> | |
27 | #include <asm/cpu.h> | |
28 | ||
1da177e4 LT |
29 | static struct ia64_cpu *sysfs_cpus; |
30 | ||
31 | int arch_register_cpu(int num) | |
32 | { | |
33 | struct node *parent = NULL; | |
34 | ||
35 | #ifdef CONFIG_NUMA | |
0fc44159 | 36 | parent = &node_devices[cpu_to_node(num)]; |
1da177e4 LT |
37 | #endif /* CONFIG_NUMA */ |
38 | ||
b88e9265 | 39 | #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) |
55e59c51 AR |
40 | /* |
41 | * If CPEI cannot be re-targetted, and this is | |
42 | * CPEI target, then dont create the control file | |
43 | */ | |
44 | if (!can_cpei_retarget() && is_cpu_cpei_target(num)) | |
45 | sysfs_cpus[num].cpu.no_control = 1; | |
46906c44 | 46 | #endif |
55e59c51 | 47 | |
1da177e4 LT |
48 | return register_cpu(&sysfs_cpus[num].cpu, num, parent); |
49 | } | |
50 | ||
51 | #ifdef CONFIG_HOTPLUG_CPU | |
52 | ||
53 | void arch_unregister_cpu(int num) | |
54 | { | |
55 | struct node *parent = NULL; | |
56 | ||
57 | #ifdef CONFIG_NUMA | |
58 | int node = cpu_to_node(num); | |
0fc44159 | 59 | parent = &node_devices[node]; |
1da177e4 LT |
60 | #endif /* CONFIG_NUMA */ |
61 | ||
62 | return unregister_cpu(&sysfs_cpus[num].cpu, parent); | |
63 | } | |
64 | EXPORT_SYMBOL(arch_register_cpu); | |
65 | EXPORT_SYMBOL(arch_unregister_cpu); | |
66 | #endif /*CONFIG_HOTPLUG_CPU*/ | |
67 | ||
68 | ||
69 | static int __init topology_init(void) | |
70 | { | |
71 | int i, err = 0; | |
72 | ||
73 | #ifdef CONFIG_NUMA | |
69dcc991 ZY |
74 | /* |
75 | * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes? | |
76 | */ | |
77 | for_each_online_node(i) { | |
0fc44159 | 78 | if ((err = register_one_node(i))) |
1da177e4 | 79 | goto out; |
69dcc991 | 80 | } |
1da177e4 LT |
81 | #endif |
82 | ||
69dcc991 | 83 | sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL); |
1da177e4 LT |
84 | if (!sysfs_cpus) { |
85 | err = -ENOMEM; | |
86 | goto out; | |
87 | } | |
1da177e4 | 88 | |
69dcc991 | 89 | for_each_present_cpu(i) { |
1da177e4 LT |
90 | if((err = arch_register_cpu(i))) |
91 | goto out; | |
69dcc991 | 92 | } |
1da177e4 LT |
93 | out: |
94 | return err; | |
95 | } | |
96 | ||
69dcc991 | 97 | subsys_initcall(topology_init); |
f1918005 ZY |
98 | |
99 | ||
100 | /* | |
101 | * Export cpu cache information through sysfs | |
102 | */ | |
103 | ||
104 | /* | |
105 | * A bunch of string array to get pretty printing | |
106 | */ | |
107 | static const char *cache_types[] = { | |
108 | "", /* not used */ | |
109 | "Instruction", | |
110 | "Data", | |
111 | "Unified" /* unified */ | |
112 | }; | |
113 | ||
114 | static const char *cache_mattrib[]={ | |
115 | "WriteThrough", | |
116 | "WriteBack", | |
117 | "", /* reserved */ | |
118 | "" /* reserved */ | |
119 | }; | |
120 | ||
121 | struct cache_info { | |
122 | pal_cache_config_info_t cci; | |
123 | cpumask_t shared_cpu_map; | |
124 | int level; | |
125 | int type; | |
126 | struct kobject kobj; | |
127 | }; | |
128 | ||
129 | struct cpu_cache_info { | |
130 | struct cache_info *cache_leaves; | |
131 | int num_cache_leaves; | |
132 | struct kobject kobj; | |
133 | }; | |
134 | ||
135 | static struct cpu_cache_info all_cpu_cache_info[NR_CPUS]; | |
136 | #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) | |
137 | ||
138 | #ifdef CONFIG_SMP | |
139 | static void cache_shared_cpu_map_setup( unsigned int cpu, | |
140 | struct cache_info * this_leaf) | |
141 | { | |
142 | pal_cache_shared_info_t csi; | |
143 | int num_shared, i = 0; | |
144 | unsigned int j; | |
145 | ||
146 | if (cpu_data(cpu)->threads_per_core <= 1 && | |
147 | cpu_data(cpu)->cores_per_socket <= 1) { | |
148 | cpu_set(cpu, this_leaf->shared_cpu_map); | |
149 | return; | |
150 | } | |
151 | ||
152 | if (ia64_pal_cache_shared_info(this_leaf->level, | |
153 | this_leaf->type, | |
154 | 0, | |
155 | &csi) != PAL_STATUS_SUCCESS) | |
156 | return; | |
157 | ||
158 | num_shared = (int) csi.num_shared; | |
159 | do { | |
fb1bb34d | 160 | for_each_possible_cpu(j) |
f1918005 ZY |
161 | if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id |
162 | && cpu_data(j)->core_id == csi.log1_cid | |
163 | && cpu_data(j)->thread_id == csi.log1_tid) | |
164 | cpu_set(j, this_leaf->shared_cpu_map); | |
165 | ||
166 | i++; | |
167 | } while (i < num_shared && | |
168 | ia64_pal_cache_shared_info(this_leaf->level, | |
169 | this_leaf->type, | |
170 | i, | |
171 | &csi) == PAL_STATUS_SUCCESS); | |
172 | } | |
173 | #else | |
174 | static void cache_shared_cpu_map_setup(unsigned int cpu, | |
175 | struct cache_info * this_leaf) | |
176 | { | |
177 | cpu_set(cpu, this_leaf->shared_cpu_map); | |
178 | return; | |
179 | } | |
180 | #endif | |
181 | ||
182 | static ssize_t show_coherency_line_size(struct cache_info *this_leaf, | |
183 | char *buf) | |
184 | { | |
185 | return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size); | |
186 | } | |
187 | ||
188 | static ssize_t show_ways_of_associativity(struct cache_info *this_leaf, | |
189 | char *buf) | |
190 | { | |
191 | return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc); | |
192 | } | |
193 | ||
194 | static ssize_t show_attributes(struct cache_info *this_leaf, char *buf) | |
195 | { | |
196 | return sprintf(buf, | |
197 | "%s\n", | |
198 | cache_mattrib[this_leaf->cci.pcci_cache_attr]); | |
199 | } | |
200 | ||
201 | static ssize_t show_size(struct cache_info *this_leaf, char *buf) | |
202 | { | |
203 | return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024); | |
204 | } | |
205 | ||
206 | static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf) | |
207 | { | |
208 | unsigned number_of_sets = this_leaf->cci.pcci_cache_size; | |
209 | number_of_sets /= this_leaf->cci.pcci_assoc; | |
210 | number_of_sets /= 1 << this_leaf->cci.pcci_line_size; | |
211 | ||
212 | return sprintf(buf, "%u\n", number_of_sets); | |
213 | } | |
214 | ||
215 | static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) | |
216 | { | |
217 | ssize_t len; | |
218 | cpumask_t shared_cpu_map; | |
219 | ||
220 | cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); | |
221 | len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); | |
222 | len += sprintf(buf+len, "\n"); | |
223 | return len; | |
224 | } | |
225 | ||
226 | static ssize_t show_type(struct cache_info *this_leaf, char *buf) | |
227 | { | |
228 | int type = this_leaf->type + this_leaf->cci.pcci_unified; | |
229 | return sprintf(buf, "%s\n", cache_types[type]); | |
230 | } | |
231 | ||
232 | static ssize_t show_level(struct cache_info *this_leaf, char *buf) | |
233 | { | |
234 | return sprintf(buf, "%u\n", this_leaf->level); | |
235 | } | |
236 | ||
237 | struct cache_attr { | |
238 | struct attribute attr; | |
239 | ssize_t (*show)(struct cache_info *, char *); | |
240 | ssize_t (*store)(struct cache_info *, const char *, size_t count); | |
241 | }; | |
242 | ||
243 | #ifdef define_one_ro | |
244 | #undef define_one_ro | |
245 | #endif | |
246 | #define define_one_ro(_name) \ | |
247 | static struct cache_attr _name = \ | |
248 | __ATTR(_name, 0444, show_##_name, NULL) | |
249 | ||
250 | define_one_ro(level); | |
251 | define_one_ro(type); | |
252 | define_one_ro(coherency_line_size); | |
253 | define_one_ro(ways_of_associativity); | |
254 | define_one_ro(size); | |
255 | define_one_ro(number_of_sets); | |
256 | define_one_ro(shared_cpu_map); | |
257 | define_one_ro(attributes); | |
258 | ||
259 | static struct attribute * cache_default_attrs[] = { | |
260 | &type.attr, | |
261 | &level.attr, | |
262 | &coherency_line_size.attr, | |
263 | &ways_of_associativity.attr, | |
264 | &attributes.attr, | |
265 | &size.attr, | |
266 | &number_of_sets.attr, | |
267 | &shared_cpu_map.attr, | |
268 | NULL | |
269 | }; | |
270 | ||
271 | #define to_object(k) container_of(k, struct cache_info, kobj) | |
272 | #define to_attr(a) container_of(a, struct cache_attr, attr) | |
273 | ||
274 | static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf) | |
275 | { | |
276 | struct cache_attr *fattr = to_attr(attr); | |
277 | struct cache_info *this_leaf = to_object(kobj); | |
278 | ssize_t ret; | |
279 | ||
280 | ret = fattr->show ? fattr->show(this_leaf, buf) : 0; | |
281 | return ret; | |
282 | } | |
283 | ||
284 | static struct sysfs_ops cache_sysfs_ops = { | |
285 | .show = cache_show | |
286 | }; | |
287 | ||
288 | static struct kobj_type cache_ktype = { | |
289 | .sysfs_ops = &cache_sysfs_ops, | |
290 | .default_attrs = cache_default_attrs, | |
291 | }; | |
292 | ||
293 | static struct kobj_type cache_ktype_percpu_entry = { | |
294 | .sysfs_ops = &cache_sysfs_ops, | |
295 | }; | |
296 | ||
297 | static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) | |
298 | { | |
cbf283c0 JJ |
299 | kfree(all_cpu_cache_info[cpu].cache_leaves); |
300 | all_cpu_cache_info[cpu].cache_leaves = NULL; | |
f1918005 ZY |
301 | all_cpu_cache_info[cpu].num_cache_leaves = 0; |
302 | memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); | |
f1918005 ZY |
303 | return; |
304 | } | |
305 | ||
306 | static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) | |
307 | { | |
308 | u64 i, levels, unique_caches; | |
309 | pal_cache_config_info_t cci; | |
310 | int j; | |
311 | s64 status; | |
312 | struct cache_info *this_cache; | |
313 | int num_cache_leaves = 0; | |
314 | ||
315 | if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { | |
316 | printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status); | |
317 | return -1; | |
318 | } | |
319 | ||
320 | this_cache=kzalloc(sizeof(struct cache_info)*unique_caches, | |
321 | GFP_KERNEL); | |
322 | if (this_cache == NULL) | |
323 | return -ENOMEM; | |
324 | ||
325 | for (i=0; i < levels; i++) { | |
326 | for (j=2; j >0 ; j--) { | |
327 | if ((status=ia64_pal_cache_config_info(i,j, &cci)) != | |
328 | PAL_STATUS_SUCCESS) | |
329 | continue; | |
330 | ||
331 | this_cache[num_cache_leaves].cci = cci; | |
332 | this_cache[num_cache_leaves].level = i + 1; | |
333 | this_cache[num_cache_leaves].type = j; | |
334 | ||
335 | cache_shared_cpu_map_setup(cpu, | |
336 | &this_cache[num_cache_leaves]); | |
337 | num_cache_leaves ++; | |
338 | } | |
339 | } | |
340 | ||
341 | all_cpu_cache_info[cpu].cache_leaves = this_cache; | |
342 | all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves; | |
343 | ||
344 | memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); | |
345 | ||
346 | return 0; | |
347 | } | |
348 | ||
349 | /* Add cache interface for CPU device */ | |
350 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |
351 | { | |
352 | unsigned int cpu = sys_dev->id; | |
353 | unsigned long i, j; | |
354 | struct cache_info *this_object; | |
355 | int retval = 0; | |
356 | cpumask_t oldmask; | |
357 | ||
358 | if (all_cpu_cache_info[cpu].kobj.parent) | |
359 | return 0; | |
360 | ||
361 | oldmask = current->cpus_allowed; | |
362 | retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); | |
363 | if (unlikely(retval)) | |
364 | return retval; | |
365 | ||
366 | retval = cpu_cache_sysfs_init(cpu); | |
367 | set_cpus_allowed(current, oldmask); | |
368 | if (unlikely(retval < 0)) | |
369 | return retval; | |
370 | ||
371 | all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj; | |
372 | kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache"); | |
373 | all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry; | |
374 | retval = kobject_register(&all_cpu_cache_info[cpu].kobj); | |
375 | ||
376 | for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { | |
377 | this_object = LEAF_KOBJECT_PTR(cpu,i); | |
378 | this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj; | |
379 | kobject_set_name(&(this_object->kobj), "index%1lu", i); | |
380 | this_object->kobj.ktype = &cache_ktype; | |
381 | retval = kobject_register(&(this_object->kobj)); | |
382 | if (unlikely(retval)) { | |
383 | for (j = 0; j < i; j++) { | |
384 | kobject_unregister( | |
385 | &(LEAF_KOBJECT_PTR(cpu,j)->kobj)); | |
386 | } | |
387 | kobject_unregister(&all_cpu_cache_info[cpu].kobj); | |
388 | cpu_cache_sysfs_exit(cpu); | |
389 | break; | |
390 | } | |
391 | } | |
392 | return retval; | |
393 | } | |
394 | ||
395 | /* Remove cache interface for CPU device */ | |
396 | static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |
397 | { | |
398 | unsigned int cpu = sys_dev->id; | |
399 | unsigned long i; | |
400 | ||
401 | for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) | |
402 | kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj)); | |
403 | ||
404 | if (all_cpu_cache_info[cpu].kobj.parent) { | |
405 | kobject_unregister(&all_cpu_cache_info[cpu].kobj); | |
406 | memset(&all_cpu_cache_info[cpu].kobj, | |
407 | 0, | |
408 | sizeof(struct kobject)); | |
409 | } | |
410 | ||
411 | cpu_cache_sysfs_exit(cpu); | |
412 | ||
413 | return 0; | |
414 | } | |
415 | ||
416 | /* | |
417 | * When a cpu is hot-plugged, do a check and initiate | |
418 | * cache kobject if necessary | |
419 | */ | |
83d722f7 | 420 | static int cache_cpu_callback(struct notifier_block *nfb, |
f1918005 ZY |
421 | unsigned long action, void *hcpu) |
422 | { | |
423 | unsigned int cpu = (unsigned long)hcpu; | |
424 | struct sys_device *sys_dev; | |
425 | ||
426 | sys_dev = get_cpu_sysdev(cpu); | |
427 | switch (action) { | |
428 | case CPU_ONLINE: | |
429 | cache_add_dev(sys_dev); | |
430 | break; | |
431 | case CPU_DEAD: | |
432 | cache_remove_dev(sys_dev); | |
433 | break; | |
434 | } | |
435 | return NOTIFY_OK; | |
436 | } | |
437 | ||
438 | static struct notifier_block cache_cpu_notifier = | |
439 | { | |
440 | .notifier_call = cache_cpu_callback | |
441 | }; | |
442 | ||
443 | static int __cpuinit cache_sysfs_init(void) | |
444 | { | |
445 | int i; | |
446 | ||
447 | for_each_online_cpu(i) { | |
448 | cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE, | |
449 | (void *)(long)i); | |
450 | } | |
451 | ||
452 | register_cpu_notifier(&cache_cpu_notifier); | |
453 | ||
454 | return 0; | |
455 | } | |
456 | ||
457 | device_initcall(cache_sysfs_init); | |
458 |