2 * Routines to identify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/amd_nb.h>
30 unsigned char descriptor
;
35 #define MB(x) ((x) * 1024)
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
40 static const struct _cache_table cache_table
[] =
42 { 0x06, LVL_1_INST
, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST
, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST
, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA
, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA
, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA
, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA
, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2
, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3
, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3
, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3
, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA
, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST
, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2
, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2
, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2
, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2
, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2
, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2
, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2
, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2
, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2
, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2
, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2
, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3
, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3
, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2
, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3
, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3
, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3
, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3
, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3
, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2
, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA
, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA
, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA
, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA
, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE
, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE
, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE
, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE
, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2
, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2
, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2
, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2
, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2
, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2
, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2
, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2
, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2
, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2
, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2
, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2
, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2
, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2
, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3
, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3
, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3
, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3
, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3
, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3
, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3
, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3
, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3
, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3
, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3
, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3
, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3
, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3
, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3
, MB(24) }, /* 24-way set assoc, 64 byte line size */
122 CACHE_TYPE_UNIFIED
= 3
125 union _cpuid4_leaf_eax
{
127 enum _cache_type type
:5;
128 unsigned int level
:3;
129 unsigned int is_self_initializing
:1;
130 unsigned int is_fully_associative
:1;
131 unsigned int reserved
:4;
132 unsigned int num_threads_sharing
:12;
133 unsigned int num_cores_on_die
:6;
138 union _cpuid4_leaf_ebx
{
140 unsigned int coherency_line_size
:12;
141 unsigned int physical_line_partition
:10;
142 unsigned int ways_of_associativity
:10;
147 union _cpuid4_leaf_ecx
{
149 unsigned int number_of_sets
:32;
154 struct _cpuid4_info_regs
{
155 union _cpuid4_leaf_eax eax
;
156 union _cpuid4_leaf_ebx ebx
;
157 union _cpuid4_leaf_ecx ecx
;
159 struct amd_northbridge
*nb
;
162 struct _cpuid4_info
{
163 struct _cpuid4_info_regs base
;
164 DECLARE_BITMAP(shared_cpu_map
, NR_CPUS
);
167 unsigned short num_cache_leaves
;
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
173 In theory the TLBs could be reported as fake type (they are in "dummy").
177 unsigned line_size
:8;
178 unsigned lines_per_tag
:8;
180 unsigned size_in_kb
:8;
187 unsigned line_size
:8;
188 unsigned lines_per_tag
:4;
190 unsigned size_in_kb
:16;
197 unsigned line_size
:8;
198 unsigned lines_per_tag
:4;
201 unsigned size_encoded
:14;
206 static const unsigned short assocs
[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
220 static const unsigned char levels
[] = { 1, 1, 2, 3 };
221 static const unsigned char types
[] = { 1, 2, 3, 3 };
224 amd_cpuid4(int leaf
, union _cpuid4_leaf_eax
*eax
,
225 union _cpuid4_leaf_ebx
*ebx
,
226 union _cpuid4_leaf_ecx
*ecx
)
229 unsigned line_size
, lines_per_tag
, assoc
, size_in_kb
;
230 union l1_cache l1i
, l1d
;
233 union l1_cache
*l1
= &l1d
;
239 cpuid(0x80000005, &dummy
, &dummy
, &l1d
.val
, &l1i
.val
);
240 cpuid(0x80000006, &dummy
, &dummy
, &l2
.val
, &l3
.val
);
248 assoc
= assocs
[l1
->assoc
];
249 line_size
= l1
->line_size
;
250 lines_per_tag
= l1
->lines_per_tag
;
251 size_in_kb
= l1
->size_in_kb
;
256 assoc
= assocs
[l2
.assoc
];
257 line_size
= l2
.line_size
;
258 lines_per_tag
= l2
.lines_per_tag
;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb
= __this_cpu_read(cpu_info
.x86_cache_size
);
265 assoc
= assocs
[l3
.assoc
];
266 line_size
= l3
.line_size
;
267 lines_per_tag
= l3
.lines_per_tag
;
268 size_in_kb
= l3
.size_encoded
* 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM
)) {
270 size_in_kb
= size_in_kb
>> 1;
278 eax
->split
.is_self_initializing
= 1;
279 eax
->split
.type
= types
[leaf
];
280 eax
->split
.level
= levels
[leaf
];
281 eax
->split
.num_threads_sharing
= 0;
282 eax
->split
.num_cores_on_die
= __this_cpu_read(cpu_info
.x86_max_cores
) - 1;
286 eax
->split
.is_fully_associative
= 1;
287 ebx
->split
.coherency_line_size
= line_size
- 1;
288 ebx
->split
.ways_of_associativity
= assoc
- 1;
289 ebx
->split
.physical_line_partition
= lines_per_tag
- 1;
290 ecx
->split
.number_of_sets
= (size_in_kb
* 1024) / line_size
/
291 (ebx
->split
.ways_of_associativity
+ 1) - 1;
295 struct attribute attr
;
296 ssize_t (*show
)(struct _cpuid4_info
*, char *, unsigned int);
297 ssize_t (*store
)(struct _cpuid4_info
*, const char *, size_t count
,
301 #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
303 * L3 cache descriptors
305 static void amd_calc_l3_indices(struct amd_northbridge
*nb
)
307 struct amd_l3_cache
*l3
= &nb
->l3_cache
;
308 unsigned int sc0
, sc1
, sc2
, sc3
;
311 pci_read_config_dword(nb
->misc
, 0x1C4, &val
);
313 /* calculate subcache sizes */
314 l3
->subcaches
[0] = sc0
= !(val
& BIT(0));
315 l3
->subcaches
[1] = sc1
= !(val
& BIT(4));
317 if (boot_cpu_data
.x86
== 0x15) {
318 l3
->subcaches
[0] = sc0
+= !(val
& BIT(1));
319 l3
->subcaches
[1] = sc1
+= !(val
& BIT(5));
322 l3
->subcaches
[2] = sc2
= !(val
& BIT(8)) + !(val
& BIT(9));
323 l3
->subcaches
[3] = sc3
= !(val
& BIT(12)) + !(val
& BIT(13));
325 l3
->indices
= (max(max3(sc0
, sc1
, sc2
), sc3
) << 10) - 1;
328 static void amd_init_l3_cache(struct _cpuid4_info_regs
*this_leaf
, int index
)
332 /* only for L3, and not in virtualized environments */
336 node
= amd_get_nb_id(smp_processor_id());
337 this_leaf
->nb
= node_to_amd_nb(node
);
338 if (this_leaf
->nb
&& !this_leaf
->nb
->l3_cache
.indices
)
339 amd_calc_l3_indices(this_leaf
->nb
);
343 * check whether a slot used for disabling an L3 index is occupied.
344 * @l3: L3 cache descriptor
345 * @slot: slot number (0..1)
347 * @returns: the disabled index if used or negative value if slot free.
349 int amd_get_l3_disable_slot(struct amd_northbridge
*nb
, unsigned slot
)
351 unsigned int reg
= 0;
353 pci_read_config_dword(nb
->misc
, 0x1BC + slot
* 4, ®
);
355 /* check whether this slot is activated already */
356 if (reg
& (3UL << 30))
362 static ssize_t
show_cache_disable(struct _cpuid4_info
*this_leaf
, char *buf
,
367 if (!this_leaf
->base
.nb
|| !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
))
370 index
= amd_get_l3_disable_slot(this_leaf
->base
.nb
, slot
);
372 return sprintf(buf
, "%d\n", index
);
374 return sprintf(buf
, "FREE\n");
377 #define SHOW_CACHE_DISABLE(slot) \
379 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
382 return show_cache_disable(this_leaf, buf, slot); \
384 SHOW_CACHE_DISABLE(0)
385 SHOW_CACHE_DISABLE(1)
387 static void amd_l3_disable_index(struct amd_northbridge
*nb
, int cpu
,
388 unsigned slot
, unsigned long idx
)
395 * disable index in all 4 subcaches
397 for (i
= 0; i
< 4; i
++) {
398 u32 reg
= idx
| (i
<< 20);
400 if (!nb
->l3_cache
.subcaches
[i
])
403 pci_write_config_dword(nb
->misc
, 0x1BC + slot
* 4, reg
);
406 * We need to WBINVD on a core on the node containing the L3
407 * cache which indices we disable therefore a simple wbinvd()
413 pci_write_config_dword(nb
->misc
, 0x1BC + slot
* 4, reg
);
418 * disable a L3 cache index by using a disable-slot
420 * @l3: L3 cache descriptor
421 * @cpu: A CPU on the node containing the L3 cache
422 * @slot: slot number (0..1)
423 * @index: index to disable
425 * @return: 0 on success, error status on failure
427 int amd_set_l3_disable_slot(struct amd_northbridge
*nb
, int cpu
, unsigned slot
,
432 /* check if @slot is already used or the index is already disabled */
433 ret
= amd_get_l3_disable_slot(nb
, slot
);
437 if (index
> nb
->l3_cache
.indices
)
440 /* check whether the other slot has disabled the same index already */
441 if (index
== amd_get_l3_disable_slot(nb
, !slot
))
444 amd_l3_disable_index(nb
, cpu
, slot
, index
);
449 static ssize_t
store_cache_disable(struct _cpuid4_info
*this_leaf
,
450 const char *buf
, size_t count
,
453 unsigned long val
= 0;
456 if (!capable(CAP_SYS_ADMIN
))
459 if (!this_leaf
->base
.nb
|| !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
))
462 cpu
= cpumask_first(to_cpumask(this_leaf
->shared_cpu_map
));
464 if (strict_strtoul(buf
, 10, &val
) < 0)
467 err
= amd_set_l3_disable_slot(this_leaf
->base
.nb
, cpu
, slot
, val
);
470 pr_warning("L3 slot %d in use/index already disabled!\n",
477 #define STORE_CACHE_DISABLE(slot) \
479 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
480 const char *buf, size_t count, \
483 return store_cache_disable(this_leaf, buf, count, slot); \
485 STORE_CACHE_DISABLE(0)
486 STORE_CACHE_DISABLE(1)
488 static struct _cache_attr cache_disable_0
= __ATTR(cache_disable_0
, 0644,
489 show_cache_disable_0
, store_cache_disable_0
);
490 static struct _cache_attr cache_disable_1
= __ATTR(cache_disable_1
, 0644,
491 show_cache_disable_1
, store_cache_disable_1
);
494 show_subcaches(struct _cpuid4_info
*this_leaf
, char *buf
, unsigned int cpu
)
496 if (!this_leaf
->base
.nb
|| !amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
499 return sprintf(buf
, "%x\n", amd_get_subcaches(cpu
));
503 store_subcaches(struct _cpuid4_info
*this_leaf
, const char *buf
, size_t count
,
508 if (!capable(CAP_SYS_ADMIN
))
511 if (!this_leaf
->base
.nb
|| !amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
514 if (strict_strtoul(buf
, 16, &val
) < 0)
517 if (amd_set_subcaches(cpu
, val
))
523 static struct _cache_attr subcaches
=
524 __ATTR(subcaches
, 0644, show_subcaches
, store_subcaches
);
527 #define amd_init_l3_cache(x, y)
528 #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
531 cpuid4_cache_lookup_regs(int index
, struct _cpuid4_info_regs
*this_leaf
)
533 union _cpuid4_leaf_eax eax
;
534 union _cpuid4_leaf_ebx ebx
;
535 union _cpuid4_leaf_ecx ecx
;
538 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
) {
540 cpuid_count(0x8000001d, index
, &eax
.full
,
541 &ebx
.full
, &ecx
.full
, &edx
);
543 amd_cpuid4(index
, &eax
, &ebx
, &ecx
);
544 amd_init_l3_cache(this_leaf
, index
);
546 cpuid_count(4, index
, &eax
.full
, &ebx
.full
, &ecx
.full
, &edx
);
549 if (eax
.split
.type
== CACHE_TYPE_NULL
)
550 return -EIO
; /* better error ? */
552 this_leaf
->eax
= eax
;
553 this_leaf
->ebx
= ebx
;
554 this_leaf
->ecx
= ecx
;
555 this_leaf
->size
= (ecx
.split
.number_of_sets
+ 1) *
556 (ebx
.split
.coherency_line_size
+ 1) *
557 (ebx
.split
.physical_line_partition
+ 1) *
558 (ebx
.split
.ways_of_associativity
+ 1);
562 static int find_num_cache_leaves(struct cpuinfo_x86
*c
)
564 unsigned int eax
, ebx
, ecx
, edx
, op
;
565 union _cpuid4_leaf_eax cache_eax
;
568 if (c
->x86_vendor
== X86_VENDOR_AMD
)
575 /* Do cpuid(op) loop to find out num_cache_leaves */
576 cpuid_count(op
, i
, &eax
, &ebx
, &ecx
, &edx
);
577 cache_eax
.full
= eax
;
578 } while (cache_eax
.split
.type
!= CACHE_TYPE_NULL
);
582 void init_amd_cacheinfo(struct cpuinfo_x86
*c
)
585 if (cpu_has_topoext
) {
586 num_cache_leaves
= find_num_cache_leaves(c
);
587 } else if (c
->extended_cpuid_level
>= 0x80000006) {
588 if (cpuid_edx(0x80000006) & 0xf000)
589 num_cache_leaves
= 4;
591 num_cache_leaves
= 3;
595 unsigned int init_intel_cacheinfo(struct cpuinfo_x86
*c
)
598 unsigned int trace
= 0, l1i
= 0, l1d
= 0, l2
= 0, l3
= 0;
599 unsigned int new_l1d
= 0, new_l1i
= 0; /* Cache sizes from cpuid(4) */
600 unsigned int new_l2
= 0, new_l3
= 0, i
; /* Cache sizes from cpuid(4) */
601 unsigned int l2_id
= 0, l3_id
= 0, num_threads_sharing
, index_msb
;
603 unsigned int cpu
= c
->cpu_index
;
606 if (c
->cpuid_level
> 3) {
607 static int is_initialized
;
609 if (is_initialized
== 0) {
610 /* Init num_cache_leaves from boot CPU */
611 num_cache_leaves
= find_num_cache_leaves(c
);
616 * Whenever possible use cpuid(4), deterministic cache
617 * parameters cpuid leaf to find the cache details
619 for (i
= 0; i
< num_cache_leaves
; i
++) {
620 struct _cpuid4_info_regs this_leaf
= {};
623 retval
= cpuid4_cache_lookup_regs(i
, &this_leaf
);
627 switch (this_leaf
.eax
.split
.level
) {
629 if (this_leaf
.eax
.split
.type
== CACHE_TYPE_DATA
)
630 new_l1d
= this_leaf
.size
/1024;
631 else if (this_leaf
.eax
.split
.type
== CACHE_TYPE_INST
)
632 new_l1i
= this_leaf
.size
/1024;
635 new_l2
= this_leaf
.size
/1024;
636 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
637 index_msb
= get_count_order(num_threads_sharing
);
638 l2_id
= c
->apicid
& ~((1 << index_msb
) - 1);
641 new_l3
= this_leaf
.size
/1024;
642 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
643 index_msb
= get_count_order(num_threads_sharing
);
644 l3_id
= c
->apicid
& ~((1 << index_msb
) - 1);
652 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
655 if ((num_cache_leaves
== 0 || c
->x86
== 15) && c
->cpuid_level
> 1) {
656 /* supports eax=2 call */
658 unsigned int regs
[4];
659 unsigned char *dp
= (unsigned char *)regs
;
662 if (num_cache_leaves
!= 0 && c
->x86
== 15)
665 /* Number of times to iterate */
666 n
= cpuid_eax(2) & 0xFF;
668 for (i
= 0 ; i
< n
; i
++) {
669 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
671 /* If bit 31 is set, this is an unknown format */
672 for (j
= 0 ; j
< 3 ; j
++)
673 if (regs
[j
] & (1 << 31))
676 /* Byte 0 is level count, not a descriptor */
677 for (j
= 1 ; j
< 16 ; j
++) {
678 unsigned char des
= dp
[j
];
681 /* look up this descriptor in the table */
682 while (cache_table
[k
].descriptor
!= 0) {
683 if (cache_table
[k
].descriptor
== des
) {
684 if (only_trace
&& cache_table
[k
].cache_type
!= LVL_TRACE
)
686 switch (cache_table
[k
].cache_type
) {
688 l1i
+= cache_table
[k
].size
;
691 l1d
+= cache_table
[k
].size
;
694 l2
+= cache_table
[k
].size
;
697 l3
+= cache_table
[k
].size
;
700 trace
+= cache_table
[k
].size
;
722 per_cpu(cpu_llc_id
, cpu
) = l2_id
;
729 per_cpu(cpu_llc_id
, cpu
) = l3_id
;
735 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
736 * turns means that the only possibility is SMT (as indicated in
737 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
738 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
741 if (per_cpu(cpu_llc_id
, cpu
) == BAD_APICID
)
742 per_cpu(cpu_llc_id
, cpu
) = c
->phys_proc_id
;
745 c
->x86_cache_size
= l3
? l3
: (l2
? l2
: (l1i
+l1d
));
752 /* pointer to _cpuid4_info array (for each cache leaf) */
753 static DEFINE_PER_CPU(struct _cpuid4_info
*, ici_cpuid4_info
);
754 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
758 static int cache_shared_amd_cpu_map_setup(unsigned int cpu
, int index
)
760 struct _cpuid4_info
*this_leaf
;
763 if (cpu_has_topoext
) {
764 unsigned int apicid
, nshared
, first
, last
;
766 if (!per_cpu(ici_cpuid4_info
, cpu
))
769 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
770 nshared
= this_leaf
->base
.eax
.split
.num_threads_sharing
+ 1;
771 apicid
= cpu_data(cpu
).apicid
;
772 first
= apicid
- (apicid
% nshared
);
773 last
= first
+ nshared
- 1;
775 for_each_online_cpu(i
) {
776 apicid
= cpu_data(i
).apicid
;
777 if ((apicid
< first
) || (apicid
> last
))
779 if (!per_cpu(ici_cpuid4_info
, i
))
781 this_leaf
= CPUID4_INFO_IDX(i
, index
);
783 for_each_online_cpu(sibling
) {
784 apicid
= cpu_data(sibling
).apicid
;
785 if ((apicid
< first
) || (apicid
> last
))
787 set_bit(sibling
, this_leaf
->shared_cpu_map
);
790 } else if (index
== 3) {
791 for_each_cpu(i
, cpu_llc_shared_mask(cpu
)) {
792 if (!per_cpu(ici_cpuid4_info
, i
))
794 this_leaf
= CPUID4_INFO_IDX(i
, index
);
795 for_each_cpu(sibling
, cpu_llc_shared_mask(cpu
)) {
796 if (!cpu_online(sibling
))
798 set_bit(sibling
, this_leaf
->shared_cpu_map
);
807 static void cache_shared_cpu_map_setup(unsigned int cpu
, int index
)
809 struct _cpuid4_info
*this_leaf
, *sibling_leaf
;
810 unsigned long num_threads_sharing
;
812 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
814 if (c
->x86_vendor
== X86_VENDOR_AMD
) {
815 if (cache_shared_amd_cpu_map_setup(cpu
, index
))
819 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
820 num_threads_sharing
= 1 + this_leaf
->base
.eax
.split
.num_threads_sharing
;
822 if (num_threads_sharing
== 1)
823 cpumask_set_cpu(cpu
, to_cpumask(this_leaf
->shared_cpu_map
));
825 index_msb
= get_count_order(num_threads_sharing
);
827 for_each_online_cpu(i
) {
828 if (cpu_data(i
).apicid
>> index_msb
==
829 c
->apicid
>> index_msb
) {
831 to_cpumask(this_leaf
->shared_cpu_map
));
832 if (i
!= cpu
&& per_cpu(ici_cpuid4_info
, i
)) {
834 CPUID4_INFO_IDX(i
, index
);
835 cpumask_set_cpu(cpu
, to_cpumask(
836 sibling_leaf
->shared_cpu_map
));
842 static void cache_remove_shared_cpu_map(unsigned int cpu
, int index
)
844 struct _cpuid4_info
*this_leaf
, *sibling_leaf
;
847 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
848 for_each_cpu(sibling
, to_cpumask(this_leaf
->shared_cpu_map
)) {
849 sibling_leaf
= CPUID4_INFO_IDX(sibling
, index
);
850 cpumask_clear_cpu(cpu
,
851 to_cpumask(sibling_leaf
->shared_cpu_map
));
855 static void cache_shared_cpu_map_setup(unsigned int cpu
, int index
)
859 static void cache_remove_shared_cpu_map(unsigned int cpu
, int index
)
864 static void free_cache_attributes(unsigned int cpu
)
868 for (i
= 0; i
< num_cache_leaves
; i
++)
869 cache_remove_shared_cpu_map(cpu
, i
);
871 kfree(per_cpu(ici_cpuid4_info
, cpu
));
872 per_cpu(ici_cpuid4_info
, cpu
) = NULL
;
875 static void get_cpu_leaves(void *_retval
)
877 int j
, *retval
= _retval
, cpu
= smp_processor_id();
879 /* Do cpuid and store the results */
880 for (j
= 0; j
< num_cache_leaves
; j
++) {
881 struct _cpuid4_info
*this_leaf
= CPUID4_INFO_IDX(cpu
, j
);
883 *retval
= cpuid4_cache_lookup_regs(j
, &this_leaf
->base
);
884 if (unlikely(*retval
< 0)) {
887 for (i
= 0; i
< j
; i
++)
888 cache_remove_shared_cpu_map(cpu
, i
);
891 cache_shared_cpu_map_setup(cpu
, j
);
895 static int detect_cache_attributes(unsigned int cpu
)
899 if (num_cache_leaves
== 0)
902 per_cpu(ici_cpuid4_info
, cpu
) = kzalloc(
903 sizeof(struct _cpuid4_info
) * num_cache_leaves
, GFP_KERNEL
);
904 if (per_cpu(ici_cpuid4_info
, cpu
) == NULL
)
907 smp_call_function_single(cpu
, get_cpu_leaves
, &retval
, true);
909 kfree(per_cpu(ici_cpuid4_info
, cpu
));
910 per_cpu(ici_cpuid4_info
, cpu
) = NULL
;
916 #include <linux/kobject.h>
917 #include <linux/sysfs.h>
918 #include <linux/cpu.h>
920 /* pointer to kobject for cpuX/cache */
921 static DEFINE_PER_CPU(struct kobject
*, ici_cache_kobject
);
923 struct _index_kobject
{
926 unsigned short index
;
929 /* pointer to array of kobjects for cpuX/cache/indexY */
930 static DEFINE_PER_CPU(struct _index_kobject
*, ici_index_kobject
);
931 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
933 #define show_one_plus(file_name, object, val) \
934 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
937 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
940 show_one_plus(level
, base
.eax
.split
.level
, 0);
941 show_one_plus(coherency_line_size
, base
.ebx
.split
.coherency_line_size
, 1);
942 show_one_plus(physical_line_partition
, base
.ebx
.split
.physical_line_partition
, 1);
943 show_one_plus(ways_of_associativity
, base
.ebx
.split
.ways_of_associativity
, 1);
944 show_one_plus(number_of_sets
, base
.ecx
.split
.number_of_sets
, 1);
946 static ssize_t
show_size(struct _cpuid4_info
*this_leaf
, char *buf
,
949 return sprintf(buf
, "%luK\n", this_leaf
->base
.size
/ 1024);
952 static ssize_t
show_shared_cpu_map_func(struct _cpuid4_info
*this_leaf
,
955 ptrdiff_t len
= PTR_ALIGN(buf
+ PAGE_SIZE
- 1, PAGE_SIZE
) - buf
;
959 const struct cpumask
*mask
;
961 mask
= to_cpumask(this_leaf
->shared_cpu_map
);
963 cpulist_scnprintf(buf
, len
-2, mask
) :
964 cpumask_scnprintf(buf
, len
-2, mask
);
971 static inline ssize_t
show_shared_cpu_map(struct _cpuid4_info
*leaf
, char *buf
,
974 return show_shared_cpu_map_func(leaf
, 0, buf
);
977 static inline ssize_t
show_shared_cpu_list(struct _cpuid4_info
*leaf
, char *buf
,
980 return show_shared_cpu_map_func(leaf
, 1, buf
);
983 static ssize_t
show_type(struct _cpuid4_info
*this_leaf
, char *buf
,
986 switch (this_leaf
->base
.eax
.split
.type
) {
987 case CACHE_TYPE_DATA
:
988 return sprintf(buf
, "Data\n");
989 case CACHE_TYPE_INST
:
990 return sprintf(buf
, "Instruction\n");
991 case CACHE_TYPE_UNIFIED
:
992 return sprintf(buf
, "Unified\n");
994 return sprintf(buf
, "Unknown\n");
998 #define to_object(k) container_of(k, struct _index_kobject, kobj)
999 #define to_attr(a) container_of(a, struct _cache_attr, attr)
1001 #define define_one_ro(_name) \
1002 static struct _cache_attr _name = \
1003 __ATTR(_name, 0444, show_##_name, NULL)
1005 define_one_ro(level
);
1006 define_one_ro(type
);
1007 define_one_ro(coherency_line_size
);
1008 define_one_ro(physical_line_partition
);
1009 define_one_ro(ways_of_associativity
);
1010 define_one_ro(number_of_sets
);
1011 define_one_ro(size
);
1012 define_one_ro(shared_cpu_map
);
1013 define_one_ro(shared_cpu_list
);
1015 static struct attribute
*default_attrs
[] = {
1018 &coherency_line_size
.attr
,
1019 &physical_line_partition
.attr
,
1020 &ways_of_associativity
.attr
,
1021 &number_of_sets
.attr
,
1023 &shared_cpu_map
.attr
,
1024 &shared_cpu_list
.attr
,
1028 #ifdef CONFIG_AMD_NB
1029 static struct attribute
**amd_l3_attrs(void)
1031 static struct attribute
**attrs
;
1037 n
= ARRAY_SIZE(default_attrs
);
1039 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
))
1042 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
1045 attrs
= kzalloc(n
* sizeof (struct attribute
*), GFP_KERNEL
);
1047 return attrs
= default_attrs
;
1049 for (n
= 0; default_attrs
[n
]; n
++)
1050 attrs
[n
] = default_attrs
[n
];
1052 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE
)) {
1053 attrs
[n
++] = &cache_disable_0
.attr
;
1054 attrs
[n
++] = &cache_disable_1
.attr
;
1057 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING
))
1058 attrs
[n
++] = &subcaches
.attr
;
1064 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
1066 struct _cache_attr
*fattr
= to_attr(attr
);
1067 struct _index_kobject
*this_leaf
= to_object(kobj
);
1071 fattr
->show(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
1072 buf
, this_leaf
->cpu
) :
1077 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
1078 const char *buf
, size_t count
)
1080 struct _cache_attr
*fattr
= to_attr(attr
);
1081 struct _index_kobject
*this_leaf
= to_object(kobj
);
1084 ret
= fattr
->store
?
1085 fattr
->store(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
1086 buf
, count
, this_leaf
->cpu
) :
1091 static const struct sysfs_ops sysfs_ops
= {
1096 static struct kobj_type ktype_cache
= {
1097 .sysfs_ops
= &sysfs_ops
,
1098 .default_attrs
= default_attrs
,
1101 static struct kobj_type ktype_percpu_entry
= {
1102 .sysfs_ops
= &sysfs_ops
,
1105 static void cpuid4_cache_sysfs_exit(unsigned int cpu
)
1107 kfree(per_cpu(ici_cache_kobject
, cpu
));
1108 kfree(per_cpu(ici_index_kobject
, cpu
));
1109 per_cpu(ici_cache_kobject
, cpu
) = NULL
;
1110 per_cpu(ici_index_kobject
, cpu
) = NULL
;
1111 free_cache_attributes(cpu
);
1114 static int cpuid4_cache_sysfs_init(unsigned int cpu
)
1118 if (num_cache_leaves
== 0)
1121 err
= detect_cache_attributes(cpu
);
1125 /* Allocate all required memory */
1126 per_cpu(ici_cache_kobject
, cpu
) =
1127 kzalloc(sizeof(struct kobject
), GFP_KERNEL
);
1128 if (unlikely(per_cpu(ici_cache_kobject
, cpu
) == NULL
))
1131 per_cpu(ici_index_kobject
, cpu
) = kzalloc(
1132 sizeof(struct _index_kobject
) * num_cache_leaves
, GFP_KERNEL
);
1133 if (unlikely(per_cpu(ici_index_kobject
, cpu
) == NULL
))
1139 cpuid4_cache_sysfs_exit(cpu
);
1143 static DECLARE_BITMAP(cache_dev_map
, NR_CPUS
);
1145 /* Add/Remove cache interface for CPU device */
1146 static int cache_add_dev(struct device
*dev
)
1148 unsigned int cpu
= dev
->id
;
1150 struct _index_kobject
*this_object
;
1151 struct _cpuid4_info
*this_leaf
;
1154 retval
= cpuid4_cache_sysfs_init(cpu
);
1155 if (unlikely(retval
< 0))
1158 retval
= kobject_init_and_add(per_cpu(ici_cache_kobject
, cpu
),
1159 &ktype_percpu_entry
,
1160 &dev
->kobj
, "%s", "cache");
1162 cpuid4_cache_sysfs_exit(cpu
);
1166 for (i
= 0; i
< num_cache_leaves
; i
++) {
1167 this_object
= INDEX_KOBJECT_PTR(cpu
, i
);
1168 this_object
->cpu
= cpu
;
1169 this_object
->index
= i
;
1171 this_leaf
= CPUID4_INFO_IDX(cpu
, i
);
1173 ktype_cache
.default_attrs
= default_attrs
;
1174 #ifdef CONFIG_AMD_NB
1175 if (this_leaf
->base
.nb
)
1176 ktype_cache
.default_attrs
= amd_l3_attrs();
1178 retval
= kobject_init_and_add(&(this_object
->kobj
),
1180 per_cpu(ici_cache_kobject
, cpu
),
1182 if (unlikely(retval
)) {
1183 for (j
= 0; j
< i
; j
++)
1184 kobject_put(&(INDEX_KOBJECT_PTR(cpu
, j
)->kobj
));
1185 kobject_put(per_cpu(ici_cache_kobject
, cpu
));
1186 cpuid4_cache_sysfs_exit(cpu
);
1189 kobject_uevent(&(this_object
->kobj
), KOBJ_ADD
);
1191 cpumask_set_cpu(cpu
, to_cpumask(cache_dev_map
));
1193 kobject_uevent(per_cpu(ici_cache_kobject
, cpu
), KOBJ_ADD
);
1197 static void cache_remove_dev(struct device
*dev
)
1199 unsigned int cpu
= dev
->id
;
1202 if (per_cpu(ici_cpuid4_info
, cpu
) == NULL
)
1204 if (!cpumask_test_cpu(cpu
, to_cpumask(cache_dev_map
)))
1206 cpumask_clear_cpu(cpu
, to_cpumask(cache_dev_map
));
1208 for (i
= 0; i
< num_cache_leaves
; i
++)
1209 kobject_put(&(INDEX_KOBJECT_PTR(cpu
, i
)->kobj
));
1210 kobject_put(per_cpu(ici_cache_kobject
, cpu
));
1211 cpuid4_cache_sysfs_exit(cpu
);
1214 static int cacheinfo_cpu_callback(struct notifier_block
*nfb
,
1215 unsigned long action
, void *hcpu
)
1217 unsigned int cpu
= (unsigned long)hcpu
;
1220 dev
= get_cpu_device(cpu
);
1223 case CPU_ONLINE_FROZEN
:
1227 case CPU_DEAD_FROZEN
:
1228 cache_remove_dev(dev
);
1234 static struct notifier_block cacheinfo_cpu_notifier
= {
1235 .notifier_call
= cacheinfo_cpu_callback
,
1238 static int __init
cache_sysfs_init(void)
1242 if (num_cache_leaves
== 0)
1245 cpu_notifier_register_begin();
1246 for_each_online_cpu(i
) {
1247 struct device
*dev
= get_cpu_device(i
);
1249 err
= cache_add_dev(dev
);
1253 __register_hotcpu_notifier(&cacheinfo_cpu_notifier
);
1256 cpu_notifier_register_done();
1260 device_initcall(cache_sysfs_init
);