2 * cacheinfo support - processor cache information via sysfs
4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5 * Author: Sudeep Holla <sudeep.holla@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/acpi.h>
22 #include <linux/bitops.h>
23 #include <linux/cacheinfo.h>
24 #include <linux/compiler.h>
25 #include <linux/cpu.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/smp.h>
32 #include <linux/sysfs.h>
34 /* pointer to per cpu cacheinfo */
35 static DEFINE_PER_CPU(struct cpu_cacheinfo
, ci_cpu_cacheinfo
);
36 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
37 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
38 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
40 struct cpu_cacheinfo
*get_cpu_cacheinfo(unsigned int cpu
)
42 return ci_cacheinfo(cpu
);
46 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
47 struct cacheinfo
*sib_leaf
)
49 return sib_leaf
->fw_token
== this_leaf
->fw_token
;
52 /* OF properties to query for a given cache type */
53 struct cache_type_info
{
54 const char *size_prop
;
55 const char *line_size_props
[2];
56 const char *nr_sets_prop
;
59 static const struct cache_type_info cache_type_info
[] = {
61 .size_prop
= "cache-size",
62 .line_size_props
= { "cache-line-size",
63 "cache-block-size", },
64 .nr_sets_prop
= "cache-sets",
66 .size_prop
= "i-cache-size",
67 .line_size_props
= { "i-cache-line-size",
68 "i-cache-block-size", },
69 .nr_sets_prop
= "i-cache-sets",
71 .size_prop
= "d-cache-size",
72 .line_size_props
= { "d-cache-line-size",
73 "d-cache-block-size", },
74 .nr_sets_prop
= "d-cache-sets",
78 static inline int get_cacheinfo_idx(enum cache_type type
)
80 if (type
== CACHE_TYPE_UNIFIED
)
85 static void cache_size(struct cacheinfo
*this_leaf
, struct device_node
*np
)
88 const __be32
*cache_size
;
91 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
92 propname
= cache_type_info
[ct_idx
].size_prop
;
94 cache_size
= of_get_property(np
, propname
, NULL
);
96 this_leaf
->size
= of_read_number(cache_size
, 1);
99 /* not cache_line_size() because that's a macro in include/linux/cache.h */
100 static void cache_get_line_size(struct cacheinfo
*this_leaf
,
101 struct device_node
*np
)
103 const __be32
*line_size
;
106 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
107 lim
= ARRAY_SIZE(cache_type_info
[ct_idx
].line_size_props
);
109 for (i
= 0; i
< lim
; i
++) {
110 const char *propname
;
112 propname
= cache_type_info
[ct_idx
].line_size_props
[i
];
113 line_size
= of_get_property(np
, propname
, NULL
);
119 this_leaf
->coherency_line_size
= of_read_number(line_size
, 1);
122 static void cache_nr_sets(struct cacheinfo
*this_leaf
, struct device_node
*np
)
124 const char *propname
;
125 const __be32
*nr_sets
;
128 ct_idx
= get_cacheinfo_idx(this_leaf
->type
);
129 propname
= cache_type_info
[ct_idx
].nr_sets_prop
;
131 nr_sets
= of_get_property(np
, propname
, NULL
);
133 this_leaf
->number_of_sets
= of_read_number(nr_sets
, 1);
136 static void cache_associativity(struct cacheinfo
*this_leaf
)
138 unsigned int line_size
= this_leaf
->coherency_line_size
;
139 unsigned int nr_sets
= this_leaf
->number_of_sets
;
140 unsigned int size
= this_leaf
->size
;
143 * If the cache is fully associative, there is no need to
144 * check the other properties.
146 if (!(nr_sets
== 1) && (nr_sets
> 0 && size
> 0 && line_size
> 0))
147 this_leaf
->ways_of_associativity
= (size
/ nr_sets
) / line_size
;
150 static bool cache_node_is_unified(struct cacheinfo
*this_leaf
,
151 struct device_node
*np
)
153 return of_property_read_bool(np
, "cache-unified");
156 static void cache_of_set_props(struct cacheinfo
*this_leaf
,
157 struct device_node
*np
)
160 * init_cache_level must setup the cache level correctly
161 * overriding the architecturally specified levels, so
162 * if type is NONE at this stage, it should be unified
164 if (this_leaf
->type
== CACHE_TYPE_NOCACHE
&&
165 cache_node_is_unified(this_leaf
, np
))
166 this_leaf
->type
= CACHE_TYPE_UNIFIED
;
167 cache_size(this_leaf
, np
);
168 cache_get_line_size(this_leaf
, np
);
169 cache_nr_sets(this_leaf
, np
);
170 cache_associativity(this_leaf
);
173 static int cache_setup_of_node(unsigned int cpu
)
175 struct device_node
*np
;
176 struct cacheinfo
*this_leaf
;
177 struct device
*cpu_dev
= get_cpu_device(cpu
);
178 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
179 unsigned int index
= 0;
181 /* skip if fw_token is already populated */
182 if (this_cpu_ci
->info_list
->fw_token
) {
187 pr_err("No cpu device for CPU %d\n", cpu
);
190 np
= cpu_dev
->of_node
;
192 pr_err("Failed to find cpu%d device node\n", cpu
);
196 while (index
< cache_leaves(cpu
)) {
197 this_leaf
= this_cpu_ci
->info_list
+ index
;
198 if (this_leaf
->level
!= 1)
199 np
= of_find_next_cache_node(np
);
201 np
= of_node_get(np
);/* cpu node itself */
204 cache_of_set_props(this_leaf
, np
);
205 this_leaf
->fw_token
= np
;
209 if (index
!= cache_leaves(cpu
)) /* not all OF nodes populated */
215 static inline int cache_setup_of_node(unsigned int cpu
) { return 0; }
216 static inline bool cache_leaves_are_shared(struct cacheinfo
*this_leaf
,
217 struct cacheinfo
*sib_leaf
)
220 * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
221 * shared caches for all other levels. This will be used only if
222 * arch specific code has not populated shared_cpu_map
224 return !(this_leaf
->level
== 1);
228 int __weak
cache_setup_acpi(unsigned int cpu
)
233 static int cache_shared_cpu_map_setup(unsigned int cpu
)
235 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
236 struct cacheinfo
*this_leaf
, *sib_leaf
;
240 if (this_cpu_ci
->cpu_map_populated
)
243 if (of_have_populated_dt())
244 ret
= cache_setup_of_node(cpu
);
245 else if (!acpi_disabled
)
246 ret
= cache_setup_acpi(cpu
);
251 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
254 this_leaf
= this_cpu_ci
->info_list
+ index
;
255 /* skip if shared_cpu_map is already populated */
256 if (!cpumask_empty(&this_leaf
->shared_cpu_map
))
259 cpumask_set_cpu(cpu
, &this_leaf
->shared_cpu_map
);
260 for_each_online_cpu(i
) {
261 struct cpu_cacheinfo
*sib_cpu_ci
= get_cpu_cacheinfo(i
);
263 if (i
== cpu
|| !sib_cpu_ci
->info_list
)
264 continue;/* skip if itself or no cacheinfo */
265 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
266 if (cache_leaves_are_shared(this_leaf
, sib_leaf
)) {
267 cpumask_set_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
268 cpumask_set_cpu(i
, &this_leaf
->shared_cpu_map
);
276 static void cache_shared_cpu_map_remove(unsigned int cpu
)
278 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
279 struct cacheinfo
*this_leaf
, *sib_leaf
;
280 unsigned int sibling
, index
;
282 for (index
= 0; index
< cache_leaves(cpu
); index
++) {
283 this_leaf
= this_cpu_ci
->info_list
+ index
;
284 for_each_cpu(sibling
, &this_leaf
->shared_cpu_map
) {
285 struct cpu_cacheinfo
*sib_cpu_ci
;
287 if (sibling
== cpu
) /* skip itself */
290 sib_cpu_ci
= get_cpu_cacheinfo(sibling
);
291 if (!sib_cpu_ci
->info_list
)
294 sib_leaf
= sib_cpu_ci
->info_list
+ index
;
295 cpumask_clear_cpu(cpu
, &sib_leaf
->shared_cpu_map
);
296 cpumask_clear_cpu(sibling
, &this_leaf
->shared_cpu_map
);
298 if (of_have_populated_dt())
299 of_node_put(this_leaf
->fw_token
);
303 static void free_cache_attributes(unsigned int cpu
)
305 if (!per_cpu_cacheinfo(cpu
))
308 cache_shared_cpu_map_remove(cpu
);
310 kfree(per_cpu_cacheinfo(cpu
));
311 per_cpu_cacheinfo(cpu
) = NULL
;
314 int __weak
init_cache_level(unsigned int cpu
)
319 int __weak
populate_cache_leaves(unsigned int cpu
)
324 static int detect_cache_attributes(unsigned int cpu
)
328 if (init_cache_level(cpu
) || !cache_leaves(cpu
))
331 per_cpu_cacheinfo(cpu
) = kcalloc(cache_leaves(cpu
),
332 sizeof(struct cacheinfo
), GFP_KERNEL
);
333 if (per_cpu_cacheinfo(cpu
) == NULL
)
337 * populate_cache_leaves() may completely setup the cache leaves and
338 * shared_cpu_map or it may leave it partially setup.
340 ret
= populate_cache_leaves(cpu
);
344 * For systems using DT for cache hierarchy, fw_token
345 * and shared_cpu_map will be set up here only if they are
346 * not populated already
348 ret
= cache_shared_cpu_map_setup(cpu
);
350 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu
);
357 free_cache_attributes(cpu
);
361 /* pointer to cpuX/cache device */
362 static DEFINE_PER_CPU(struct device
*, ci_cache_dev
);
363 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
365 static cpumask_t cache_dev_map
;
367 /* pointer to array of devices for cpuX/cache/indexY */
368 static DEFINE_PER_CPU(struct device
**, ci_index_dev
);
369 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
370 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
372 #define show_one(file_name, object) \
373 static ssize_t file_name##_show(struct device *dev, \
374 struct device_attribute *attr, char *buf) \
376 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
377 return sprintf(buf, "%u\n", this_leaf->object); \
381 show_one(level
, level
);
382 show_one(coherency_line_size
, coherency_line_size
);
383 show_one(number_of_sets
, number_of_sets
);
384 show_one(physical_line_partition
, physical_line_partition
);
385 show_one(ways_of_associativity
, ways_of_associativity
);
387 static ssize_t
size_show(struct device
*dev
,
388 struct device_attribute
*attr
, char *buf
)
390 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
392 return sprintf(buf
, "%uK\n", this_leaf
->size
>> 10);
395 static ssize_t
shared_cpumap_show_func(struct device
*dev
, bool list
, char *buf
)
397 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
398 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
400 return cpumap_print_to_pagebuf(list
, buf
, mask
);
403 static ssize_t
shared_cpu_map_show(struct device
*dev
,
404 struct device_attribute
*attr
, char *buf
)
406 return shared_cpumap_show_func(dev
, false, buf
);
409 static ssize_t
shared_cpu_list_show(struct device
*dev
,
410 struct device_attribute
*attr
, char *buf
)
412 return shared_cpumap_show_func(dev
, true, buf
);
415 static ssize_t
type_show(struct device
*dev
,
416 struct device_attribute
*attr
, char *buf
)
418 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
420 switch (this_leaf
->type
) {
421 case CACHE_TYPE_DATA
:
422 return sprintf(buf
, "Data\n");
423 case CACHE_TYPE_INST
:
424 return sprintf(buf
, "Instruction\n");
425 case CACHE_TYPE_UNIFIED
:
426 return sprintf(buf
, "Unified\n");
432 static ssize_t
allocation_policy_show(struct device
*dev
,
433 struct device_attribute
*attr
, char *buf
)
435 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
436 unsigned int ci_attr
= this_leaf
->attributes
;
439 if ((ci_attr
& CACHE_READ_ALLOCATE
) && (ci_attr
& CACHE_WRITE_ALLOCATE
))
440 n
= sprintf(buf
, "ReadWriteAllocate\n");
441 else if (ci_attr
& CACHE_READ_ALLOCATE
)
442 n
= sprintf(buf
, "ReadAllocate\n");
443 else if (ci_attr
& CACHE_WRITE_ALLOCATE
)
444 n
= sprintf(buf
, "WriteAllocate\n");
448 static ssize_t
write_policy_show(struct device
*dev
,
449 struct device_attribute
*attr
, char *buf
)
451 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
452 unsigned int ci_attr
= this_leaf
->attributes
;
455 if (ci_attr
& CACHE_WRITE_THROUGH
)
456 n
= sprintf(buf
, "WriteThrough\n");
457 else if (ci_attr
& CACHE_WRITE_BACK
)
458 n
= sprintf(buf
, "WriteBack\n");
462 static DEVICE_ATTR_RO(id
);
463 static DEVICE_ATTR_RO(level
);
464 static DEVICE_ATTR_RO(type
);
465 static DEVICE_ATTR_RO(coherency_line_size
);
466 static DEVICE_ATTR_RO(ways_of_associativity
);
467 static DEVICE_ATTR_RO(number_of_sets
);
468 static DEVICE_ATTR_RO(size
);
469 static DEVICE_ATTR_RO(allocation_policy
);
470 static DEVICE_ATTR_RO(write_policy
);
471 static DEVICE_ATTR_RO(shared_cpu_map
);
472 static DEVICE_ATTR_RO(shared_cpu_list
);
473 static DEVICE_ATTR_RO(physical_line_partition
);
475 static struct attribute
*cache_default_attrs
[] = {
478 &dev_attr_level
.attr
,
479 &dev_attr_shared_cpu_map
.attr
,
480 &dev_attr_shared_cpu_list
.attr
,
481 &dev_attr_coherency_line_size
.attr
,
482 &dev_attr_ways_of_associativity
.attr
,
483 &dev_attr_number_of_sets
.attr
,
485 &dev_attr_allocation_policy
.attr
,
486 &dev_attr_write_policy
.attr
,
487 &dev_attr_physical_line_partition
.attr
,
492 cache_default_attrs_is_visible(struct kobject
*kobj
,
493 struct attribute
*attr
, int unused
)
495 struct device
*dev
= kobj_to_dev(kobj
);
496 struct cacheinfo
*this_leaf
= dev_get_drvdata(dev
);
497 const struct cpumask
*mask
= &this_leaf
->shared_cpu_map
;
498 umode_t mode
= attr
->mode
;
500 if ((attr
== &dev_attr_id
.attr
) && (this_leaf
->attributes
& CACHE_ID
))
502 if ((attr
== &dev_attr_type
.attr
) && this_leaf
->type
)
504 if ((attr
== &dev_attr_level
.attr
) && this_leaf
->level
)
506 if ((attr
== &dev_attr_shared_cpu_map
.attr
) && !cpumask_empty(mask
))
508 if ((attr
== &dev_attr_shared_cpu_list
.attr
) && !cpumask_empty(mask
))
510 if ((attr
== &dev_attr_coherency_line_size
.attr
) &&
511 this_leaf
->coherency_line_size
)
513 if ((attr
== &dev_attr_ways_of_associativity
.attr
) &&
514 this_leaf
->size
) /* allow 0 = full associativity */
516 if ((attr
== &dev_attr_number_of_sets
.attr
) &&
517 this_leaf
->number_of_sets
)
519 if ((attr
== &dev_attr_size
.attr
) && this_leaf
->size
)
521 if ((attr
== &dev_attr_write_policy
.attr
) &&
522 (this_leaf
->attributes
& CACHE_WRITE_POLICY_MASK
))
524 if ((attr
== &dev_attr_allocation_policy
.attr
) &&
525 (this_leaf
->attributes
& CACHE_ALLOCATE_POLICY_MASK
))
527 if ((attr
== &dev_attr_physical_line_partition
.attr
) &&
528 this_leaf
->physical_line_partition
)
534 static const struct attribute_group cache_default_group
= {
535 .attrs
= cache_default_attrs
,
536 .is_visible
= cache_default_attrs_is_visible
,
539 static const struct attribute_group
*cache_default_groups
[] = {
540 &cache_default_group
,
544 static const struct attribute_group
*cache_private_groups
[] = {
545 &cache_default_group
,
546 NULL
, /* Place holder for private group */
550 const struct attribute_group
*
551 __weak
cache_get_priv_group(struct cacheinfo
*this_leaf
)
556 static const struct attribute_group
**
557 cache_get_attribute_groups(struct cacheinfo
*this_leaf
)
559 const struct attribute_group
*priv_group
=
560 cache_get_priv_group(this_leaf
);
563 return cache_default_groups
;
565 if (!cache_private_groups
[1])
566 cache_private_groups
[1] = priv_group
;
568 return cache_private_groups
;
571 /* Add/Remove cache interface for CPU device */
572 static void cpu_cache_sysfs_exit(unsigned int cpu
)
575 struct device
*ci_dev
;
577 if (per_cpu_index_dev(cpu
)) {
578 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
579 ci_dev
= per_cache_index_dev(cpu
, i
);
582 device_unregister(ci_dev
);
584 kfree(per_cpu_index_dev(cpu
));
585 per_cpu_index_dev(cpu
) = NULL
;
587 device_unregister(per_cpu_cache_dev(cpu
));
588 per_cpu_cache_dev(cpu
) = NULL
;
591 static int cpu_cache_sysfs_init(unsigned int cpu
)
593 struct device
*dev
= get_cpu_device(cpu
);
595 if (per_cpu_cacheinfo(cpu
) == NULL
)
598 per_cpu_cache_dev(cpu
) = cpu_device_create(dev
, NULL
, NULL
, "cache");
599 if (IS_ERR(per_cpu_cache_dev(cpu
)))
600 return PTR_ERR(per_cpu_cache_dev(cpu
));
602 /* Allocate all required memory */
603 per_cpu_index_dev(cpu
) = kcalloc(cache_leaves(cpu
),
604 sizeof(struct device
*), GFP_KERNEL
);
605 if (unlikely(per_cpu_index_dev(cpu
) == NULL
))
611 cpu_cache_sysfs_exit(cpu
);
615 static int cache_add_dev(unsigned int cpu
)
619 struct device
*ci_dev
, *parent
;
620 struct cacheinfo
*this_leaf
;
621 struct cpu_cacheinfo
*this_cpu_ci
= get_cpu_cacheinfo(cpu
);
622 const struct attribute_group
**cache_groups
;
624 rc
= cpu_cache_sysfs_init(cpu
);
625 if (unlikely(rc
< 0))
628 parent
= per_cpu_cache_dev(cpu
);
629 for (i
= 0; i
< cache_leaves(cpu
); i
++) {
630 this_leaf
= this_cpu_ci
->info_list
+ i
;
631 if (this_leaf
->disable_sysfs
)
633 cache_groups
= cache_get_attribute_groups(this_leaf
);
634 ci_dev
= cpu_device_create(parent
, this_leaf
, cache_groups
,
636 if (IS_ERR(ci_dev
)) {
637 rc
= PTR_ERR(ci_dev
);
640 per_cache_index_dev(cpu
, i
) = ci_dev
;
642 cpumask_set_cpu(cpu
, &cache_dev_map
);
646 cpu_cache_sysfs_exit(cpu
);
650 static int cacheinfo_cpu_online(unsigned int cpu
)
652 int rc
= detect_cache_attributes(cpu
);
656 rc
= cache_add_dev(cpu
);
658 free_cache_attributes(cpu
);
662 static int cacheinfo_cpu_pre_down(unsigned int cpu
)
664 if (cpumask_test_and_clear_cpu(cpu
, &cache_dev_map
))
665 cpu_cache_sysfs_exit(cpu
);
667 free_cache_attributes(cpu
);
671 static int __init
cacheinfo_sysfs_init(void)
673 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "base/cacheinfo:online",
674 cacheinfo_cpu_online
, cacheinfo_cpu_pre_down
);
676 device_initcall(cacheinfo_sysfs_init
);