]>
Commit | Line | Data |
---|---|---|
246246cb SH |
1 | /* |
2 | * cacheinfo support - processor cache information via sysfs | |
3 | * | |
4 | * Based on arch/x86/kernel/cpu/intel_cacheinfo.c | |
5 | * Author: Sudeep Holla <sudeep.holla@arm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
12 | * kind, whether express or implied; without even the implied warranty | |
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | #include <linux/bitops.h> | |
20 | #include <linux/cacheinfo.h> | |
21 | #include <linux/compiler.h> | |
22 | #include <linux/cpu.h> | |
23 | #include <linux/device.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/of.h> | |
26 | #include <linux/sched.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/smp.h> | |
29 | #include <linux/sysfs.h> | |
30 | ||
31 | /* pointer to per cpu cacheinfo */ | |
32 | static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); | |
33 | #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) | |
34 | #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) | |
35 | #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) | |
36 | ||
37 | struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) | |
38 | { | |
39 | return ci_cacheinfo(cpu); | |
40 | } | |
41 | ||
42 | #ifdef CONFIG_OF | |
43 | static int cache_setup_of_node(unsigned int cpu) | |
44 | { | |
45 | struct device_node *np; | |
46 | struct cacheinfo *this_leaf; | |
47 | struct device *cpu_dev = get_cpu_device(cpu); | |
48 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
49 | unsigned int index = 0; | |
50 | ||
51 | /* skip if of_node is already populated */ | |
52 | if (this_cpu_ci->info_list->of_node) | |
53 | return 0; | |
54 | ||
55 | if (!cpu_dev) { | |
56 | pr_err("No cpu device for CPU %d\n", cpu); | |
57 | return -ENODEV; | |
58 | } | |
59 | np = cpu_dev->of_node; | |
60 | if (!np) { | |
61 | pr_err("Failed to find cpu%d device node\n", cpu); | |
62 | return -ENOENT; | |
63 | } | |
64 | ||
8a7d95f9 | 65 | while (index < cache_leaves(cpu)) { |
246246cb SH |
66 | this_leaf = this_cpu_ci->info_list + index; |
67 | if (this_leaf->level != 1) | |
68 | np = of_find_next_cache_node(np); | |
69 | else | |
70 | np = of_node_get(np);/* cpu node itself */ | |
8a7d95f9 SH |
71 | if (!np) |
72 | break; | |
246246cb SH |
73 | this_leaf->of_node = np; |
74 | index++; | |
75 | } | |
8a7d95f9 SH |
76 | |
77 | if (index != cache_leaves(cpu)) /* not all OF nodes populated */ | |
78 | return -ENOENT; | |
79 | ||
246246cb SH |
80 | return 0; |
81 | } | |
82 | ||
83 | static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, | |
84 | struct cacheinfo *sib_leaf) | |
85 | { | |
86 | return sib_leaf->of_node == this_leaf->of_node; | |
87 | } | |
88 | #else | |
89 | static inline int cache_setup_of_node(unsigned int cpu) { return 0; } | |
90 | static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, | |
91 | struct cacheinfo *sib_leaf) | |
92 | { | |
93 | /* | |
94 | * For non-DT systems, assume unique level 1 cache, system-wide | |
95 | * shared caches for all other levels. This will be used only if | |
96 | * arch specific code has not populated shared_cpu_map | |
97 | */ | |
98 | return !(this_leaf->level == 1); | |
99 | } | |
100 | #endif | |
101 | ||
102 | static int cache_shared_cpu_map_setup(unsigned int cpu) | |
103 | { | |
104 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
105 | struct cacheinfo *this_leaf, *sib_leaf; | |
106 | unsigned int index; | |
107 | int ret; | |
108 | ||
109 | ret = cache_setup_of_node(cpu); | |
110 | if (ret) | |
111 | return ret; | |
112 | ||
113 | for (index = 0; index < cache_leaves(cpu); index++) { | |
114 | unsigned int i; | |
115 | ||
116 | this_leaf = this_cpu_ci->info_list + index; | |
117 | /* skip if shared_cpu_map is already populated */ | |
118 | if (!cpumask_empty(&this_leaf->shared_cpu_map)) | |
119 | continue; | |
120 | ||
121 | cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); | |
122 | for_each_online_cpu(i) { | |
123 | struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); | |
124 | ||
125 | if (i == cpu || !sib_cpu_ci->info_list) | |
126 | continue;/* skip if itself or no cacheinfo */ | |
127 | sib_leaf = sib_cpu_ci->info_list + index; | |
128 | if (cache_leaves_are_shared(this_leaf, sib_leaf)) { | |
129 | cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); | |
130 | cpumask_set_cpu(i, &this_leaf->shared_cpu_map); | |
131 | } | |
132 | } | |
133 | } | |
134 | ||
135 | return 0; | |
136 | } | |
137 | ||
138 | static void cache_shared_cpu_map_remove(unsigned int cpu) | |
139 | { | |
140 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
141 | struct cacheinfo *this_leaf, *sib_leaf; | |
142 | unsigned int sibling, index; | |
143 | ||
144 | for (index = 0; index < cache_leaves(cpu); index++) { | |
145 | this_leaf = this_cpu_ci->info_list + index; | |
146 | for_each_cpu(sibling, &this_leaf->shared_cpu_map) { | |
147 | struct cpu_cacheinfo *sib_cpu_ci; | |
148 | ||
149 | if (sibling == cpu) /* skip itself */ | |
150 | continue; | |
151 | sib_cpu_ci = get_cpu_cacheinfo(sibling); | |
152 | sib_leaf = sib_cpu_ci->info_list + index; | |
153 | cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); | |
154 | cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); | |
155 | } | |
156 | of_node_put(this_leaf->of_node); | |
157 | } | |
158 | } | |
159 | ||
160 | static void free_cache_attributes(unsigned int cpu) | |
161 | { | |
162 | cache_shared_cpu_map_remove(cpu); | |
163 | ||
164 | kfree(per_cpu_cacheinfo(cpu)); | |
165 | per_cpu_cacheinfo(cpu) = NULL; | |
166 | } | |
167 | ||
168 | int __weak init_cache_level(unsigned int cpu) | |
169 | { | |
170 | return -ENOENT; | |
171 | } | |
172 | ||
173 | int __weak populate_cache_leaves(unsigned int cpu) | |
174 | { | |
175 | return -ENOENT; | |
176 | } | |
177 | ||
178 | static int detect_cache_attributes(unsigned int cpu) | |
179 | { | |
180 | int ret; | |
181 | ||
182 | if (init_cache_level(cpu)) | |
183 | return -ENOENT; | |
184 | ||
185 | per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), | |
186 | sizeof(struct cacheinfo), GFP_KERNEL); | |
187 | if (per_cpu_cacheinfo(cpu) == NULL) | |
188 | return -ENOMEM; | |
189 | ||
190 | ret = populate_cache_leaves(cpu); | |
191 | if (ret) | |
192 | goto free_ci; | |
193 | /* | |
194 | * For systems using DT for cache hierarcy, of_node and shared_cpu_map | |
195 | * will be set up here only if they are not populated already | |
196 | */ | |
197 | ret = cache_shared_cpu_map_setup(cpu); | |
8a7d95f9 SH |
198 | if (ret) { |
199 | pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n", | |
200 | cpu); | |
246246cb | 201 | goto free_ci; |
8a7d95f9 | 202 | } |
246246cb SH |
203 | return 0; |
204 | ||
205 | free_ci: | |
206 | free_cache_attributes(cpu); | |
207 | return ret; | |
208 | } | |
209 | ||
210 | /* pointer to cpuX/cache device */ | |
211 | static DEFINE_PER_CPU(struct device *, ci_cache_dev); | |
212 | #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) | |
213 | ||
214 | static cpumask_t cache_dev_map; | |
215 | ||
216 | /* pointer to array of devices for cpuX/cache/indexY */ | |
217 | static DEFINE_PER_CPU(struct device **, ci_index_dev); | |
218 | #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) | |
219 | #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) | |
220 | ||
221 | #define show_one(file_name, object) \ | |
222 | static ssize_t file_name##_show(struct device *dev, \ | |
223 | struct device_attribute *attr, char *buf) \ | |
224 | { \ | |
225 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ | |
226 | return sprintf(buf, "%u\n", this_leaf->object); \ | |
227 | } | |
228 | ||
229 | show_one(level, level); | |
230 | show_one(coherency_line_size, coherency_line_size); | |
231 | show_one(number_of_sets, number_of_sets); | |
232 | show_one(physical_line_partition, physical_line_partition); | |
233 | show_one(ways_of_associativity, ways_of_associativity); | |
234 | ||
235 | static ssize_t size_show(struct device *dev, | |
236 | struct device_attribute *attr, char *buf) | |
237 | { | |
238 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
239 | ||
240 | return sprintf(buf, "%uK\n", this_leaf->size >> 10); | |
241 | } | |
242 | ||
243 | static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf) | |
244 | { | |
245 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
246 | const struct cpumask *mask = &this_leaf->shared_cpu_map; | |
247 | ||
248 | return cpumap_print_to_pagebuf(list, buf, mask); | |
249 | } | |
250 | ||
251 | static ssize_t shared_cpu_map_show(struct device *dev, | |
252 | struct device_attribute *attr, char *buf) | |
253 | { | |
254 | return shared_cpumap_show_func(dev, false, buf); | |
255 | } | |
256 | ||
257 | static ssize_t shared_cpu_list_show(struct device *dev, | |
258 | struct device_attribute *attr, char *buf) | |
259 | { | |
260 | return shared_cpumap_show_func(dev, true, buf); | |
261 | } | |
262 | ||
263 | static ssize_t type_show(struct device *dev, | |
264 | struct device_attribute *attr, char *buf) | |
265 | { | |
266 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
267 | ||
268 | switch (this_leaf->type) { | |
269 | case CACHE_TYPE_DATA: | |
270 | return sprintf(buf, "Data\n"); | |
271 | case CACHE_TYPE_INST: | |
272 | return sprintf(buf, "Instruction\n"); | |
273 | case CACHE_TYPE_UNIFIED: | |
274 | return sprintf(buf, "Unified\n"); | |
275 | default: | |
276 | return -EINVAL; | |
277 | } | |
278 | } | |
279 | ||
280 | static ssize_t allocation_policy_show(struct device *dev, | |
281 | struct device_attribute *attr, char *buf) | |
282 | { | |
283 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
284 | unsigned int ci_attr = this_leaf->attributes; | |
285 | int n = 0; | |
286 | ||
287 | if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) | |
288 | n = sprintf(buf, "ReadWriteAllocate\n"); | |
289 | else if (ci_attr & CACHE_READ_ALLOCATE) | |
290 | n = sprintf(buf, "ReadAllocate\n"); | |
291 | else if (ci_attr & CACHE_WRITE_ALLOCATE) | |
292 | n = sprintf(buf, "WriteAllocate\n"); | |
293 | return n; | |
294 | } | |
295 | ||
296 | static ssize_t write_policy_show(struct device *dev, | |
297 | struct device_attribute *attr, char *buf) | |
298 | { | |
299 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
300 | unsigned int ci_attr = this_leaf->attributes; | |
301 | int n = 0; | |
302 | ||
303 | if (ci_attr & CACHE_WRITE_THROUGH) | |
304 | n = sprintf(buf, "WriteThrough\n"); | |
305 | else if (ci_attr & CACHE_WRITE_BACK) | |
306 | n = sprintf(buf, "WriteBack\n"); | |
307 | return n; | |
308 | } | |
309 | ||
310 | static DEVICE_ATTR_RO(level); | |
311 | static DEVICE_ATTR_RO(type); | |
312 | static DEVICE_ATTR_RO(coherency_line_size); | |
313 | static DEVICE_ATTR_RO(ways_of_associativity); | |
314 | static DEVICE_ATTR_RO(number_of_sets); | |
315 | static DEVICE_ATTR_RO(size); | |
316 | static DEVICE_ATTR_RO(allocation_policy); | |
317 | static DEVICE_ATTR_RO(write_policy); | |
318 | static DEVICE_ATTR_RO(shared_cpu_map); | |
319 | static DEVICE_ATTR_RO(shared_cpu_list); | |
320 | static DEVICE_ATTR_RO(physical_line_partition); | |
321 | ||
322 | static struct attribute *cache_default_attrs[] = { | |
323 | &dev_attr_type.attr, | |
324 | &dev_attr_level.attr, | |
325 | &dev_attr_shared_cpu_map.attr, | |
326 | &dev_attr_shared_cpu_list.attr, | |
327 | &dev_attr_coherency_line_size.attr, | |
328 | &dev_attr_ways_of_associativity.attr, | |
329 | &dev_attr_number_of_sets.attr, | |
330 | &dev_attr_size.attr, | |
331 | &dev_attr_allocation_policy.attr, | |
332 | &dev_attr_write_policy.attr, | |
333 | &dev_attr_physical_line_partition.attr, | |
334 | NULL | |
335 | }; | |
336 | ||
337 | static umode_t | |
338 | cache_default_attrs_is_visible(struct kobject *kobj, | |
339 | struct attribute *attr, int unused) | |
340 | { | |
341 | struct device *dev = kobj_to_dev(kobj); | |
342 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
343 | const struct cpumask *mask = &this_leaf->shared_cpu_map; | |
344 | umode_t mode = attr->mode; | |
345 | ||
346 | if ((attr == &dev_attr_type.attr) && this_leaf->type) | |
347 | return mode; | |
348 | if ((attr == &dev_attr_level.attr) && this_leaf->level) | |
349 | return mode; | |
350 | if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask)) | |
351 | return mode; | |
352 | if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask)) | |
353 | return mode; | |
354 | if ((attr == &dev_attr_coherency_line_size.attr) && | |
355 | this_leaf->coherency_line_size) | |
356 | return mode; | |
357 | if ((attr == &dev_attr_ways_of_associativity.attr) && | |
358 | this_leaf->size) /* allow 0 = full associativity */ | |
359 | return mode; | |
360 | if ((attr == &dev_attr_number_of_sets.attr) && | |
361 | this_leaf->number_of_sets) | |
362 | return mode; | |
363 | if ((attr == &dev_attr_size.attr) && this_leaf->size) | |
364 | return mode; | |
365 | if ((attr == &dev_attr_write_policy.attr) && | |
366 | (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) | |
367 | return mode; | |
368 | if ((attr == &dev_attr_allocation_policy.attr) && | |
369 | (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) | |
370 | return mode; | |
371 | if ((attr == &dev_attr_physical_line_partition.attr) && | |
372 | this_leaf->physical_line_partition) | |
373 | return mode; | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static const struct attribute_group cache_default_group = { | |
379 | .attrs = cache_default_attrs, | |
380 | .is_visible = cache_default_attrs_is_visible, | |
381 | }; | |
382 | ||
383 | static const struct attribute_group *cache_default_groups[] = { | |
384 | &cache_default_group, | |
385 | NULL, | |
386 | }; | |
387 | ||
388 | static const struct attribute_group *cache_private_groups[] = { | |
389 | &cache_default_group, | |
390 | NULL, /* Place holder for private group */ | |
391 | NULL, | |
392 | }; | |
393 | ||
394 | const struct attribute_group * | |
395 | __weak cache_get_priv_group(struct cacheinfo *this_leaf) | |
396 | { | |
397 | return NULL; | |
398 | } | |
399 | ||
400 | static const struct attribute_group ** | |
401 | cache_get_attribute_groups(struct cacheinfo *this_leaf) | |
402 | { | |
403 | const struct attribute_group *priv_group = | |
404 | cache_get_priv_group(this_leaf); | |
405 | ||
406 | if (!priv_group) | |
407 | return cache_default_groups; | |
408 | ||
409 | if (!cache_private_groups[1]) | |
410 | cache_private_groups[1] = priv_group; | |
411 | ||
412 | return cache_private_groups; | |
413 | } | |
414 | ||
415 | /* Add/Remove cache interface for CPU device */ | |
416 | static void cpu_cache_sysfs_exit(unsigned int cpu) | |
417 | { | |
418 | int i; | |
419 | struct device *ci_dev; | |
420 | ||
421 | if (per_cpu_index_dev(cpu)) { | |
422 | for (i = 0; i < cache_leaves(cpu); i++) { | |
423 | ci_dev = per_cache_index_dev(cpu, i); | |
424 | if (!ci_dev) | |
425 | continue; | |
426 | device_unregister(ci_dev); | |
427 | } | |
428 | kfree(per_cpu_index_dev(cpu)); | |
429 | per_cpu_index_dev(cpu) = NULL; | |
430 | } | |
431 | device_unregister(per_cpu_cache_dev(cpu)); | |
432 | per_cpu_cache_dev(cpu) = NULL; | |
433 | } | |
434 | ||
435 | static int cpu_cache_sysfs_init(unsigned int cpu) | |
436 | { | |
437 | struct device *dev = get_cpu_device(cpu); | |
438 | ||
439 | if (per_cpu_cacheinfo(cpu) == NULL) | |
440 | return -ENOENT; | |
441 | ||
442 | per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); | |
443 | if (IS_ERR(per_cpu_cache_dev(cpu))) | |
444 | return PTR_ERR(per_cpu_cache_dev(cpu)); | |
445 | ||
446 | /* Allocate all required memory */ | |
447 | per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), | |
448 | sizeof(struct device *), GFP_KERNEL); | |
449 | if (unlikely(per_cpu_index_dev(cpu) == NULL)) | |
450 | goto err_out; | |
451 | ||
452 | return 0; | |
453 | ||
454 | err_out: | |
455 | cpu_cache_sysfs_exit(cpu); | |
456 | return -ENOMEM; | |
457 | } | |
458 | ||
459 | static int cache_add_dev(unsigned int cpu) | |
460 | { | |
461 | unsigned int i; | |
462 | int rc; | |
463 | struct device *ci_dev, *parent; | |
464 | struct cacheinfo *this_leaf; | |
465 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
466 | const struct attribute_group **cache_groups; | |
467 | ||
468 | rc = cpu_cache_sysfs_init(cpu); | |
469 | if (unlikely(rc < 0)) | |
470 | return rc; | |
471 | ||
472 | parent = per_cpu_cache_dev(cpu); | |
473 | for (i = 0; i < cache_leaves(cpu); i++) { | |
474 | this_leaf = this_cpu_ci->info_list + i; | |
475 | if (this_leaf->disable_sysfs) | |
476 | continue; | |
477 | cache_groups = cache_get_attribute_groups(this_leaf); | |
478 | ci_dev = cpu_device_create(parent, this_leaf, cache_groups, | |
479 | "index%1u", i); | |
480 | if (IS_ERR(ci_dev)) { | |
481 | rc = PTR_ERR(ci_dev); | |
482 | goto err; | |
483 | } | |
484 | per_cache_index_dev(cpu, i) = ci_dev; | |
485 | } | |
486 | cpumask_set_cpu(cpu, &cache_dev_map); | |
487 | ||
488 | return 0; | |
489 | err: | |
490 | cpu_cache_sysfs_exit(cpu); | |
491 | return rc; | |
492 | } | |
493 | ||
494 | static void cache_remove_dev(unsigned int cpu) | |
495 | { | |
496 | if (!cpumask_test_cpu(cpu, &cache_dev_map)) | |
497 | return; | |
498 | cpumask_clear_cpu(cpu, &cache_dev_map); | |
499 | ||
500 | cpu_cache_sysfs_exit(cpu); | |
501 | } | |
502 | ||
503 | static int cacheinfo_cpu_callback(struct notifier_block *nfb, | |
504 | unsigned long action, void *hcpu) | |
505 | { | |
506 | unsigned int cpu = (unsigned long)hcpu; | |
507 | int rc = 0; | |
508 | ||
509 | switch (action & ~CPU_TASKS_FROZEN) { | |
510 | case CPU_ONLINE: | |
511 | rc = detect_cache_attributes(cpu); | |
512 | if (!rc) | |
513 | rc = cache_add_dev(cpu); | |
514 | break; | |
515 | case CPU_DEAD: | |
516 | cache_remove_dev(cpu); | |
517 | if (per_cpu_cacheinfo(cpu)) | |
518 | free_cache_attributes(cpu); | |
519 | break; | |
520 | } | |
521 | return notifier_from_errno(rc); | |
522 | } | |
523 | ||
524 | static int __init cacheinfo_sysfs_init(void) | |
525 | { | |
526 | int cpu, rc = 0; | |
527 | ||
528 | cpu_notifier_register_begin(); | |
529 | ||
530 | for_each_online_cpu(cpu) { | |
531 | rc = detect_cache_attributes(cpu); | |
6df43c9b | 532 | if (rc) |
246246cb | 533 | goto out; |
246246cb SH |
534 | rc = cache_add_dev(cpu); |
535 | if (rc) { | |
536 | free_cache_attributes(cpu); | |
537 | pr_err("error populating cacheinfo..cpu%d\n", cpu); | |
538 | goto out; | |
539 | } | |
540 | } | |
541 | __hotcpu_notifier(cacheinfo_cpu_callback, 0); | |
542 | ||
543 | out: | |
544 | cpu_notifier_register_done(); | |
545 | return rc; | |
546 | } | |
547 | ||
548 | device_initcall(cacheinfo_sysfs_init); |