2 * cacheinfo support - processor cache information via sysfs
4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5 * Author: Sudeep Holla <sudeep.holla@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/acpi.h>
22 #include <linux/bitops.h>
23 #include <linux/cacheinfo.h>
24 #include <linux/compiler.h>
25 #include <linux/cpu.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/smp.h>
32 #include <linux/sysfs.h>
34 /* pointer to per cpu cacheinfo */
35 static DEFINE_PER_CPU(struct cpu_cacheinfo
, ci_cpu_cacheinfo
);
36 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
37 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
38 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
40 struct cpu_cacheinfo
*get_cpu_cacheinfo(unsigned int cpu
)
42 return ci_cacheinfo(cpu
);
46 static int cache_setup_of_node(unsigned int cpu
)
48 struct device_node
*np
;
49 struct cacheinfo
*this_leaf
;
50 struct device
*cpu_dev
= get_cpu_device(cpu
);
51 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
52 unsigned int index
= 0;
54 /* skip if of_node is already populated */
55 if (this_cpu_ci
->info_list
->of_node
)
59 pr_err("No cpu device for CPU %d\n", cpu
);
62 np
= cpu_dev
->of_node
;
64 pr_err("Failed to find cpu%d device node\n", cpu
);
68 while (index
< cache_leaves(cpu
)) {
69 this_leaf
= this_cpu_ci
->info_list
+ index
;
70 if (this_leaf
->level
!= 1)
71 np
= of_find_next_cache_node(np
);
73 np
= of_node_get(np
);/* cpu node itself */
76 this_leaf
->of_node
= np
;
80 if (index
!= cache_leaves(cpu
)) /* not all OF nodes populated */
86 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
87 struct cacheinfo
*sib_leaf
)
89 return sib_leaf
->of_node
== this_leaf
->of_node
;
92 /* OF properties to query for a given cache type */
93 struct cache_type_info
{
94 const char *size_prop
;
95 const char *line_size_props
[2];
96 const char *nr_sets_prop
;
99 static const struct cache_type_info cache_type_info
[] = {
101 .size_prop
= "cache-size",
102 .line_size_props
= { "cache-line-size",
103 "cache-block-size", },
104 .nr_sets_prop
= "cache-sets",
106 .size_prop
= "i-cache-size",
107 .line_size_props
= { "i-cache-line-size",
108 "i-cache-block-size", },
109 .nr_sets_prop
= "i-cache-sets",
111 .size_prop
= "d-cache-size",
112 .line_size_props
= { "d-cache-line-size",
113 "d-cache-block-size", },
114 .nr_sets_prop
= "d-cache-sets",
118 static inline int get_cacheinfo_idx(enum cache_type type
)
120 if (type
== CACHE_TYPE_UNIFIED
)
125 static void cache_size(struct cacheinfo
*this_leaf
)
127 const char *propname
;
128 const __be32
*cache_size
;
131 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
132 propname
= cache_type_info
[ct_idx
].size_prop
;
134 cache_size
= of_get_property(this_leaf
->of_node
, propname
, NULL
);
136 this_leaf
->size
= of_read_number(cache_size
, 1);
139 /* not cache_line_size() because that's a macro in include/linux/cache.h */
140 static void cache_get_line_size(struct cacheinfo
*this_leaf
)
142 const __be32
*line_size
;
145 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
146 lim
= ARRAY_SIZE(cache_type_info
[ct_idx
].line_size_props
);
148 for (i
= 0; i
< lim
; i
++) {
149 const char *propname
;
151 propname
= cache_type_info
[ct_idx
].line_size_props
[i
];
152 line_size
= of_get_property(this_leaf
->of_node
, propname
, NULL
);
158 this_leaf
->coherency_line_size
= of_read_number(line_size
, 1);
161 static void cache_nr_sets(struct cacheinfo
*this_leaf
)
163 const char *propname
;
164 const __be32
*nr_sets
;
167 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
168 propname
= cache_type_info
[ct_idx
].nr_sets_prop
;
170 nr_sets
= of_get_property(this_leaf
->of_node
, propname
, NULL
);
172 this_leaf
->number_of_sets
= of_read_number(nr_sets
, 1);
175 static void cache_associativity(struct cacheinfo
*this_leaf
)
177 unsigned int line_size
= this_leaf
->coherency_line_size
;
178 unsigned int nr_sets
= this_leaf
->number_of_sets
;
179 unsigned int size
= this_leaf
->size
;
182 * If the cache is fully associative, there is no need to
183 * check the other properties.
185 if (!(nr_sets
== 1) && (nr_sets
> 0 && size
> 0 && line_size
> 0))
186 this_leaf
->ways_of_associativity
= (size
/ nr_sets
) / line_size
;
189 static void cache_of_override_properties(unsigned int cpu
)
192 struct cacheinfo
*this_leaf
;
193 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
195 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
196 this_leaf
= this_cpu_ci
->info_list
+ index
;
197 cache_size(this_leaf
);
198 cache_get_line_size(this_leaf
);
199 cache_nr_sets(this_leaf
);
200 cache_associativity(this_leaf
);
204 static void cache_of_override_properties(unsigned int cpu
) { }
205 static inline int cache_setup_of_node(unsigned int cpu
) { return 0; }
206 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
207 struct cacheinfo
*sib_leaf
)
210 * For non-DT systems, assume unique level 1 cache, system-wide
211 * shared caches for all other levels. This will be used only if
212 * arch specific code has not populated shared_cpu_map
214 return !(this_leaf
->level
== 1);
218 static int cache_shared_cpu_map_setup(unsigned int cpu
)
220 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
221 struct cacheinfo
*this_leaf
, *sib_leaf
;
225 if (this_cpu_ci
->cpu_map_populated
)
228 if (of_have_populated_dt())
229 ret
= cache_setup_of_node(cpu
);
230 else if (!acpi_disabled
)
231 /* No cache property/hierarchy support yet in ACPI */
236 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
239 this_leaf
= this_cpu_ci
->info_list
+ index
;
240 /* skip if shared_cpu_map is already populated */
241 if (!cpumask_empty(&this_leaf
->shared_cpu_map
))
244 cpumask_set_cpu(cpu
, &this_leaf
->shared_cpu_map
);
245 for_each_online_cpu(i
) {
246 struct cpu_cacheinfo
*sib_cpu_ci
= get_cpu_cacheinfo(i
);
248 if (i
== cpu
|| !sib_cpu_ci
->info_list
)
249 continue;/* skip if itself or no cacheinfo */
250 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
251 if (cache_leaves_are_shared(this_leaf
, sib_leaf
)) {
252 cpumask_set_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
253 cpumask_set_cpu(i
, &this_leaf
->shared_cpu_map
);
261 static void cache_shared_cpu_map_remove(unsigned int cpu
)
263 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
264 struct cacheinfo
*this_leaf
, *sib_leaf
;
265 unsigned int sibling
, index
;
267 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
268 this_leaf
= this_cpu_ci
->info_list
+ index
;
269 for_each_cpu(sibling
, &this_leaf
->shared_cpu_map
) {
270 struct cpu_cacheinfo
*sib_cpu_ci
;
272 if (sibling
== cpu
) /* skip itself */
275 sib_cpu_ci
= get_cpu_cacheinfo(sibling
);
276 if (!sib_cpu_ci
->info_list
)
279 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
280 cpumask_clear_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
281 cpumask_clear_cpu(sibling
, &this_leaf
->shared_cpu_map
);
283 of_node_put(this_leaf
->of_node
);
287 static void cache_override_properties(unsigned int cpu
)
289 if (of_have_populated_dt())
290 return cache_of_override_properties(cpu
);
293 static void free_cache_attributes(unsigned int cpu
)
295 if (!per_cpu_cacheinfo(cpu
))
298 cache_shared_cpu_map_remove(cpu
);
300 kfree(per_cpu_cacheinfo(cpu
));
301 per_cpu_cacheinfo(cpu
) = NULL
;
304 int __weak
init_cache_level(unsigned int cpu
)
309 int __weak
populate_cache_leaves(unsigned int cpu
)
314 static int detect_cache_attributes(unsigned int cpu
)
318 if (init_cache_level(cpu
) || !cache_leaves(cpu
))
321 per_cpu_cacheinfo(cpu
) = kcalloc(cache_leaves(cpu
),
322 sizeof(struct cacheinfo
), GFP_KERNEL
);
323 if (per_cpu_cacheinfo(cpu
) == NULL
)
326 ret
= populate_cache_leaves(cpu
);
330 * For systems using DT for cache hierarchy, of_node and shared_cpu_map
331 * will be set up here only if they are not populated already
333 ret
= cache_shared_cpu_map_setup(cpu
);
335 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu
);
339 cache_override_properties(cpu
);
343 free_cache_attributes(cpu
);
347 /* pointer to cpuX/cache device */
348 static DEFINE_PER_CPU(struct device
*, ci_cache_dev
);
349 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
351 static cpumask_t cache_dev_map
;
353 /* pointer to array of devices for cpuX/cache/indexY */
354 static DEFINE_PER_CPU(struct device
**, ci_index_dev
);
355 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
356 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
358 #define show_one(file_name, object) \
359 static ssize_t file_name##_show(struct device *dev, \
360 struct device_attribute *attr, char *buf) \
362 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
363 return sprintf(buf, "%u\n", this_leaf->object); \
366 show_one(level
, level
);
367 show_one(coherency_line_size
, coherency_line_size
);
368 show_one(number_of_sets
, number_of_sets
);
369 show_one(physical_line_partition
, physical_line_partition
);
370 show_one(ways_of_associativity
, ways_of_associativity
);
372 static ssize_t
size_show(struct device
*dev
,
373 struct device_attribute
*attr
, char *buf
)
375 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
377 return sprintf(buf
, "%uK\n", this_leaf
->size
>> 10);
380 static ssize_t
shared_cpumap_show_func(struct device
*dev
, bool list
, char *buf
)
382 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
383 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
385 return cpumap_print_to_pagebuf(list
, buf
, mask
);
388 static ssize_t
shared_cpu_map_show(struct device
*dev
,
389 struct device_attribute
*attr
, char *buf
)
391 return shared_cpumap_show_func(dev
, false, buf
);
394 static ssize_t
shared_cpu_list_show(struct device
*dev
,
395 struct device_attribute
*attr
, char *buf
)
397 return shared_cpumap_show_func(dev
, true, buf
);
400 static ssize_t
type_show(struct device
*dev
,
401 struct device_attribute
*attr
, char *buf
)
403 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
405 switch (this_leaf
->type
) {
406 case CACHE_TYPE_DATA
:
407 return sprintf(buf
, "Data\n");
408 case CACHE_TYPE_INST
:
409 return sprintf(buf
, "Instruction\n");
410 case CACHE_TYPE_UNIFIED
:
411 return sprintf(buf
, "Unified\n");
417 static ssize_t
allocation_policy_show(struct device
*dev
,
418 struct device_attribute
*attr
, char *buf
)
420 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
421 unsigned int ci_attr
= this_leaf
->attributes
;
424 if ((ci_attr
& CACHE_READ_ALLOCATE
) && (ci_attr
& CACHE_WRITE_ALLOCATE
))
425 n
= sprintf(buf
, "ReadWriteAllocate\n");
426 else if (ci_attr
& CACHE_READ_ALLOCATE
)
427 n
= sprintf(buf
, "ReadAllocate\n");
428 else if (ci_attr
& CACHE_WRITE_ALLOCATE
)
429 n
= sprintf(buf
, "WriteAllocate\n");
433 static ssize_t
write_policy_show(struct device
*dev
,
434 struct device_attribute
*attr
, char *buf
)
436 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
437 unsigned int ci_attr
= this_leaf
->attributes
;
440 if (ci_attr
& CACHE_WRITE_THROUGH
)
441 n
= sprintf(buf
, "WriteThrough\n");
442 else if (ci_attr
& CACHE_WRITE_BACK
)
443 n
= sprintf(buf
, "WriteBack\n");
447 static DEVICE_ATTR_RO(level
);
448 static DEVICE_ATTR_RO(type
);
449 static DEVICE_ATTR_RO(coherency_line_size
);
450 static DEVICE_ATTR_RO(ways_of_associativity
);
451 static DEVICE_ATTR_RO(number_of_sets
);
452 static DEVICE_ATTR_RO(size
);
453 static DEVICE_ATTR_RO(allocation_policy
);
454 static DEVICE_ATTR_RO(write_policy
);
455 static DEVICE_ATTR_RO(shared_cpu_map
);
456 static DEVICE_ATTR_RO(shared_cpu_list
);
457 static DEVICE_ATTR_RO(physical_line_partition
);
459 static struct attribute
*cache_default_attrs
[] = {
461 &dev_attr_level
.attr
,
462 &dev_attr_shared_cpu_map
.attr
,
463 &dev_attr_shared_cpu_list
.attr
,
464 &dev_attr_coherency_line_size
.attr
,
465 &dev_attr_ways_of_associativity
.attr
,
466 &dev_attr_number_of_sets
.attr
,
468 &dev_attr_allocation_policy
.attr
,
469 &dev_attr_write_policy
.attr
,
470 &dev_attr_physical_line_partition
.attr
,
475 cache_default_attrs_is_visible(struct kobject
*kobj
,
476 struct attribute
*attr
, int unused
)
478 struct device
*dev
= kobj_to_dev(kobj
);
479 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
480 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
481 umode_t mode
= attr
->mode
;
483 if ((attr
== &dev_attr_type
.attr
) && this_leaf
->type
)
485 if ((attr
== &dev_attr_level
.attr
) && this_leaf
->level
)
487 if ((attr
== &dev_attr_shared_cpu_map
.attr
) && !cpumask_empty(mask
))
489 if ((attr
== &dev_attr_shared_cpu_list
.attr
) && !cpumask_empty(mask
))
491 if ((attr
== &dev_attr_coherency_line_size
.attr
) &&
492 this_leaf
->coherency_line_size
)
494 if ((attr
== &dev_attr_ways_of_associativity
.attr
) &&
495 this_leaf
->size
) /* allow 0 = full associativity */
497 if ((attr
== &dev_attr_number_of_sets
.attr
) &&
498 this_leaf
->number_of_sets
)
500 if ((attr
== &dev_attr_size
.attr
) && this_leaf
->size
)
502 if ((attr
== &dev_attr_write_policy
.attr
) &&
503 (this_leaf
->attributes
& CACHE_WRITE_POLICY_MASK
))
505 if ((attr
== &dev_attr_allocation_policy
.attr
) &&
506 (this_leaf
->attributes
& CACHE_ALLOCATE_POLICY_MASK
))
508 if ((attr
== &dev_attr_physical_line_partition
.attr
) &&
509 this_leaf
->physical_line_partition
)
515 static const struct attribute_group cache_default_group
= {
516 .attrs
= cache_default_attrs
,
517 .is_visible
= cache_default_attrs_is_visible
,
520 static const struct attribute_group
*cache_default_groups
[] = {
521 &cache_default_group
,
525 static const struct attribute_group
*cache_private_groups
[] = {
526 &cache_default_group
,
527 NULL
, /* Place holder for private group */
531 const struct attribute_group
*
532 __weak
cache_get_priv_group(struct cacheinfo
*this_leaf
)
537 static const struct attribute_group
**
538 cache_get_attribute_groups(struct cacheinfo
*this_leaf
)
540 const struct attribute_group
*priv_group
=
541 cache_get_priv_group(this_leaf
);
544 return cache_default_groups
;
546 if (!cache_private_groups
[1])
547 cache_private_groups
[1] = priv_group
;
549 return cache_private_groups
;
552 /* Add/Remove cache interface for CPU device */
553 static void cpu_cache_sysfs_exit(unsigned int cpu
)
556 struct device
*ci_dev
;
558 if (per_cpu_index_dev(cpu
)) {
559 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
560 ci_dev
= per_cache_index_dev(cpu
, i
);
563 device_unregister(ci_dev
);
565 kfree(per_cpu_index_dev(cpu
));
566 per_cpu_index_dev(cpu
) = NULL
;
568 device_unregister(per_cpu_cache_dev(cpu
));
569 per_cpu_cache_dev(cpu
) = NULL
;
572 static int cpu_cache_sysfs_init(unsigned int cpu
)
574 struct device
*dev
= get_cpu_device(cpu
);
576 if (per_cpu_cacheinfo(cpu
) == NULL
)
579 per_cpu_cache_dev(cpu
) = cpu_device_create(dev
, NULL
, NULL
, "cache");
580 if (IS_ERR(per_cpu_cache_dev(cpu
)))
581 return PTR_ERR(per_cpu_cache_dev(cpu
));
583 /* Allocate all required memory */
584 per_cpu_index_dev(cpu
) = kcalloc(cache_leaves(cpu
),
585 sizeof(struct device
*), GFP_KERNEL
);
586 if (unlikely(per_cpu_index_dev(cpu
) == NULL
))
592 cpu_cache_sysfs_exit(cpu
);
596 static int cache_add_dev(unsigned int cpu
)
600 struct device
*ci_dev
, *parent
;
601 struct cacheinfo
*this_leaf
;
602 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
603 const struct attribute_group
**cache_groups
;
605 rc
= cpu_cache_sysfs_init(cpu
);
606 if (unlikely(rc
< 0))
609 parent
= per_cpu_cache_dev(cpu
);
610 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
611 this_leaf
= this_cpu_ci
->info_list
+ i
;
612 if (this_leaf
->disable_sysfs
)
614 cache_groups
= cache_get_attribute_groups(this_leaf
);
615 ci_dev
= cpu_device_create(parent
, this_leaf
, cache_groups
,
617 if (IS_ERR(ci_dev
)) {
618 rc
= PTR_ERR(ci_dev
);
621 per_cache_index_dev(cpu
, i
) = ci_dev
;
623 cpumask_set_cpu(cpu
, &cache_dev_map
);
627 cpu_cache_sysfs_exit(cpu
);
631 static void cache_remove_dev(unsigned int cpu
)
633 if (!cpumask_test_cpu(cpu
, &cache_dev_map
))
635 cpumask_clear_cpu(cpu
, &cache_dev_map
);
637 cpu_cache_sysfs_exit(cpu
);
640 static int cacheinfo_cpu_callback(struct notifier_block
*nfb
,
641 unsigned long action
, void *hcpu
)
643 unsigned int cpu
= (unsigned long)hcpu
;
646 switch (action
& ~CPU_TASKS_FROZEN
) {
648 rc
= detect_cache_attributes(cpu
);
650 rc
= cache_add_dev(cpu
);
653 cache_remove_dev(cpu
);
654 free_cache_attributes(cpu
);
657 return notifier_from_errno(rc
);
660 static int __init
cacheinfo_sysfs_init(void)
664 cpu_notifier_register_begin();
666 for_each_online_cpu(cpu
) {
667 rc
= detect_cache_attributes(cpu
);
670 rc
= cache_add_dev(cpu
);
672 free_cache_attributes(cpu
);
673 pr_err("error populating cacheinfo..cpu%d\n", cpu
);
677 __hotcpu_notifier(cacheinfo_cpu_callback
, 0);
680 cpu_notifier_register_done();
684 device_initcall(cacheinfo_sysfs_init
);