]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/x86/kernel/cpu/intel_cacheinfo.c
x86/intel/cacheinfo: Shut up annoying warning
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
CommitLineData
1da177e4 1/*
cdcf772e 2 * Routines to indentify caches on Intel CPU.
1da177e4 3 *
cdcf772e
IM
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
8bdbd962 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
67cddd94 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
1da177e4
LT
8 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
4e57b681 15#include <linux/sched.h>
a24e8d36 16#include <linux/pci.h>
1da177e4
LT
17
18#include <asm/processor.h>
8bdbd962 19#include <linux/smp.h>
23ac4ae8 20#include <asm/amd_nb.h>
dcf39daf 21#include <asm/smp.h>
1da177e4
LT
22
23#define LVL_1_INST 1
24#define LVL_1_DATA 2
25#define LVL_2 3
26#define LVL_3 4
27#define LVL_TRACE 5
28
8bdbd962 29struct _cache_table {
1da177e4
LT
30 unsigned char descriptor;
31 char cache_type;
32 short size;
33};
34
2ca49b2f
DJ
35#define MB(x) ((x) * 1024)
36
8bdbd962
AC
37/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
39
02dde8b4 40static const struct _cache_table __cpuinitconst cache_table[] =
1da177e4
LT
41{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8 44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
1da177e4
LT
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8 47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
fb87ec38 48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
9a8ecae8 49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
1da177e4 50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
2ca49b2f
DJ
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479 57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479
DJ
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
04fa11ea 62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
1da177e4
LT
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
2ca49b2f
DJ
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
fb87ec38 70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
2ca49b2f
DJ
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
6fe8f479 84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
2ca49b2f
DJ
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
fb87ec38 92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
2ca49b2f
DJ
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
114 { 0x00, 0, 0}
115};
116
117
8bdbd962 118enum _cache_type {
1da177e4
LT
119 CACHE_TYPE_NULL = 0,
120 CACHE_TYPE_DATA = 1,
121 CACHE_TYPE_INST = 2,
122 CACHE_TYPE_UNIFIED = 3
123};
124
125union _cpuid4_leaf_eax {
126 struct {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
134 } split;
135 u32 full;
136};
137
138union _cpuid4_leaf_ebx {
139 struct {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
143 } split;
144 u32 full;
145};
146
147union _cpuid4_leaf_ecx {
148 struct {
149 unsigned int number_of_sets:32;
150 } split;
151 u32 full;
152};
153
b7d11a76 154struct _cpuid4_info_regs {
1da177e4
LT
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
d2946041 159 struct amd_northbridge *nb;
f9b90566
MT
160};
161
b7d11a76
TG
162struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
1da177e4
LT
165};
166
240cd6a8
AK
167unsigned short num_cache_leaves;
168
169/* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
67cddd94 171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
240cd6a8
AK
172
173 In theory the TLBs could be reported as fake type (they are in "dummy").
174 Maybe later */
175union l1_cache {
176 struct {
8bdbd962
AC
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
179 unsigned assoc:8;
180 unsigned size_in_kb:8;
240cd6a8
AK
181 };
182 unsigned val;
183};
184
185union l2_cache {
186 struct {
8bdbd962
AC
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
189 unsigned assoc:4;
190 unsigned size_in_kb:16;
240cd6a8
AK
191 };
192 unsigned val;
193};
194
67cddd94
AK
195union l3_cache {
196 struct {
8bdbd962
AC
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
199 unsigned assoc:4;
200 unsigned res:2;
201 unsigned size_encoded:14;
67cddd94
AK
202 };
203 unsigned val;
204};
205
02dde8b4 206static const unsigned short __cpuinitconst assocs[] = {
6265ff19
AH
207 [1] = 1,
208 [2] = 2,
209 [4] = 4,
210 [6] = 8,
211 [8] = 16,
212 [0xa] = 32,
213 [0xb] = 48,
67cddd94 214 [0xc] = 64,
6265ff19
AH
215 [0xd] = 96,
216 [0xe] = 128,
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
67cddd94
AK
218};
219
02dde8b4
JB
220static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
240cd6a8 222
cdcf772e
IM
223static void __cpuinit
224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
240cd6a8
AK
227{
228 unsigned dummy;
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
231 union l2_cache l2;
67cddd94
AK
232 union l3_cache l3;
233 union l1_cache *l1 = &l1d;
240cd6a8
AK
234
235 eax->full = 0;
236 ebx->full = 0;
237 ecx->full = 0;
238
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
67cddd94 240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
240cd6a8 241
67cddd94
AK
242 switch (leaf) {
243 case 1:
244 l1 = &l1i;
245 case 0:
246 if (!l1->val)
247 return;
a326e948 248 assoc = assocs[l1->assoc];
240cd6a8
AK
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
67cddd94
AK
252 break;
253 case 2:
254 if (!l2.val)
255 return;
a326e948 256 assoc = assocs[l2.assoc];
240cd6a8
AK
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
7b543a53 260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
67cddd94
AK
261 break;
262 case 3:
263 if (!l3.val)
264 return;
a326e948 265 assoc = assocs[l3.assoc];
67cddd94
AK
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
a326e948
AH
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
271 assoc = assoc >> 1;
272 }
67cddd94
AK
273 break;
274 default:
275 return;
240cd6a8
AK
276 }
277
67cddd94
AK
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
a326e948 281 eax->split.num_threads_sharing = 0;
7b543a53 282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
67cddd94
AK
283
284
a326e948 285 if (assoc == 0xffff)
240cd6a8
AK
286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
a326e948 288 ebx->split.ways_of_associativity = assoc - 1;
240cd6a8
AK
289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
292}
1da177e4 293
cb19060a
BP
294struct _cache_attr {
295 struct attribute attr;
cabb5bd7
HR
296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
298 unsigned int);
cb19060a
BP
299};
300
f76e39c5 301#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
ba06edb6
BP
302/*
303 * L3 cache descriptors
304 */
d2946041 305static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
048a8774 306{
d2946041 307 struct amd_l3_cache *l3 = &nb->l3_cache;
048a8774 308 unsigned int sc0, sc1, sc2, sc3;
cb19060a 309 u32 val = 0;
048a8774 310
d2946041 311 pci_read_config_dword(nb->misc, 0x1C4, &val);
048a8774
BP
312
313 /* calculate subcache sizes */
9350f982
BP
314 l3->subcaches[0] = sc0 = !(val & BIT(0));
315 l3->subcaches[1] = sc1 = !(val & BIT(4));
77e75fc7
FA
316
317 if (boot_cpu_data.x86 == 0x15) {
318 l3->subcaches[0] = sc0 += !(val & BIT(1));
319 l3->subcaches[1] = sc1 += !(val & BIT(5));
320 }
321
9350f982
BP
322 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
323 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
324
732eacc0 325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
ba06edb6
BP
326}
327
32c32338 328static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
8cb22bcb 329{
ba06edb6
BP
330 int node;
331
f658bcfb 332 /* only for L3, and not in virtualized environments */
d2946041 333 if (index < 3)
f2b20e41
FA
334 return;
335
ba06edb6 336 node = amd_get_nb_id(smp_processor_id());
d2946041
TG
337 this_leaf->nb = node_to_amd_nb(node);
338 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
339 amd_calc_l3_indices(this_leaf->nb);
8cb22bcb
ML
340}
341
8cc1176e
BP
342/*
343 * check whether a slot used for disabling an L3 index is occupied.
344 * @l3: L3 cache descriptor
345 * @slot: slot number (0..1)
346 *
347 * @returns: the disabled index if used or negative value if slot free.
348 */
d2946041 349int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
8cc1176e
BP
350{
351 unsigned int reg = 0;
352
d2946041 353 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
8cc1176e
BP
354
355 /* check whether this slot is activated already */
356 if (reg & (3UL << 30))
357 return reg & 0xfff;
358
359 return -1;
360}
361
cb19060a 362static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
59d3b388 363 unsigned int slot)
cb19060a 364{
8cc1176e 365 int index;
cb19060a 366
d2946041 367 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
cb19060a
BP
368 return -EINVAL;
369
d2946041 370 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
8cc1176e
BP
371 if (index >= 0)
372 return sprintf(buf, "%d\n", index);
cb19060a 373
8cc1176e 374 return sprintf(buf, "FREE\n");
cb19060a
BP
375}
376
59d3b388 377#define SHOW_CACHE_DISABLE(slot) \
cb19060a 378static ssize_t \
cabb5bd7
HR
379show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
380 unsigned int cpu) \
cb19060a 381{ \
59d3b388 382 return show_cache_disable(this_leaf, buf, slot); \
cb19060a
BP
383}
384SHOW_CACHE_DISABLE(0)
385SHOW_CACHE_DISABLE(1)
386
d2946041 387static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
59d3b388
BP
388 unsigned slot, unsigned long idx)
389{
390 int i;
391
392 idx |= BIT(30);
393
394 /*
395 * disable index in all 4 subcaches
396 */
397 for (i = 0; i < 4; i++) {
398 u32 reg = idx | (i << 20);
399
d2946041 400 if (!nb->l3_cache.subcaches[i])
59d3b388
BP
401 continue;
402
d2946041 403 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
59d3b388
BP
404
405 /*
406 * We need to WBINVD on a core on the node containing the L3
407 * cache which indices we disable therefore a simple wbinvd()
408 * is not sufficient.
409 */
410 wbinvd_on_cpu(cpu);
411
412 reg |= BIT(31);
d2946041 413 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
59d3b388
BP
414 }
415}
416
8cc1176e
BP
417/*
418 * disable a L3 cache index by using a disable-slot
419 *
420 * @l3: L3 cache descriptor
421 * @cpu: A CPU on the node containing the L3 cache
422 * @slot: slot number (0..1)
423 * @index: index to disable
424 *
425 * @return: 0 on success, error status on failure
426 */
d2946041 427int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
8cc1176e 428 unsigned long index)
cb19060a 429{
8cc1176e 430 int ret = 0;
cb19060a 431
42be4505 432 /* check if @slot is already used or the index is already disabled */
d2946041 433 ret = amd_get_l3_disable_slot(nb, slot);
8cc1176e 434 if (ret >= 0)
a720b2dd 435 return -EEXIST;
cb19060a 436
d2946041 437 if (index > nb->l3_cache.indices)
8cc1176e
BP
438 return -EINVAL;
439
42be4505 440 /* check whether the other slot has disabled the same index already */
d2946041 441 if (index == amd_get_l3_disable_slot(nb, !slot))
a720b2dd 442 return -EEXIST;
8cc1176e 443
d2946041 444 amd_l3_disable_index(nb, cpu, slot, index);
8cc1176e
BP
445
446 return 0;
447}
448
449static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
450 const char *buf, size_t count,
451 unsigned int slot)
452{
453 unsigned long val = 0;
454 int cpu, err = 0;
455
cb19060a
BP
456 if (!capable(CAP_SYS_ADMIN))
457 return -EPERM;
458
d2946041 459 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
cb19060a
BP
460 return -EINVAL;
461
8cc1176e 462 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
cb19060a 463
8cc1176e 464 if (strict_strtoul(buf, 10, &val) < 0)
cb19060a
BP
465 return -EINVAL;
466
d2946041 467 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
8cc1176e
BP
468 if (err) {
469 if (err == -EEXIST)
a720b2dd
SB
470 pr_warning("L3 slot %d in use/index already disabled!\n",
471 slot);
8cc1176e
BP
472 return err;
473 }
cb19060a
BP
474 return count;
475}
476
59d3b388 477#define STORE_CACHE_DISABLE(slot) \
cb19060a 478static ssize_t \
59d3b388 479store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
cabb5bd7
HR
480 const char *buf, size_t count, \
481 unsigned int cpu) \
cb19060a 482{ \
59d3b388 483 return store_cache_disable(this_leaf, buf, count, slot); \
8cb22bcb 484}
cb19060a
BP
485STORE_CACHE_DISABLE(0)
486STORE_CACHE_DISABLE(1)
487
488static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
489 show_cache_disable_0, store_cache_disable_0);
490static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
491 show_cache_disable_1, store_cache_disable_1);
492
cabb5bd7
HR
493static ssize_t
494show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
495{
d2946041 496 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
cabb5bd7
HR
497 return -EINVAL;
498
499 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
500}
501
502static ssize_t
503store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
504 unsigned int cpu)
505{
506 unsigned long val;
507
508 if (!capable(CAP_SYS_ADMIN))
509 return -EPERM;
510
d2946041 511 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
cabb5bd7
HR
512 return -EINVAL;
513
514 if (strict_strtoul(buf, 16, &val) < 0)
515 return -EINVAL;
516
517 if (amd_set_subcaches(cpu, val))
518 return -EINVAL;
519
520 return count;
521}
522
523static struct _cache_attr subcaches =
524 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
525
f76e39c5 526#else
f658bcfb 527#define amd_init_l3_cache(x, y)
f76e39c5 528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
8cb22bcb 529
7a4983bb 530static int
f9b90566
MT
531__cpuinit cpuid4_cache_lookup_regs(int index,
532 struct _cpuid4_info_regs *this_leaf)
1da177e4 533{
cabb5bd7
HR
534 union _cpuid4_leaf_eax eax;
535 union _cpuid4_leaf_ebx ebx;
536 union _cpuid4_leaf_ecx ecx;
240cd6a8 537 unsigned edx;
1da177e4 538
8cb22bcb 539 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
2e8458df
AH
540 if (cpu_has_topoext)
541 cpuid_count(0x8000001d, index, &eax.full,
542 &ebx.full, &ecx.full, &edx);
543 else
544 amd_cpuid4(index, &eax, &ebx, &ecx);
f658bcfb 545 amd_init_l3_cache(this_leaf, index);
7a4983bb
IM
546 } else {
547 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
548 }
549
240cd6a8 550 if (eax.split.type == CACHE_TYPE_NULL)
e2cac789 551 return -EIO; /* better error ? */
1da177e4 552
240cd6a8
AK
553 this_leaf->eax = eax;
554 this_leaf->ebx = ebx;
555 this_leaf->ecx = ecx;
7a4983bb
IM
556 this_leaf->size = (ecx.split.number_of_sets + 1) *
557 (ebx.split.coherency_line_size + 1) *
558 (ebx.split.physical_line_partition + 1) *
559 (ebx.split.ways_of_associativity + 1);
1da177e4
LT
560 return 0;
561}
562
04a15418 563static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
1da177e4 564{
04a15418 565 unsigned int eax, ebx, ecx, edx, op;
1da177e4 566 union _cpuid4_leaf_eax cache_eax;
d16aafff 567 int i = -1;
1da177e4 568
04a15418
AH
569 if (c->x86_vendor == X86_VENDOR_AMD)
570 op = 0x8000001d;
571 else
572 op = 4;
573
d16aafff
SS
574 do {
575 ++i;
04a15418
AH
576 /* Do cpuid(op) loop to find out num_cache_leaves */
577 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
1da177e4 578 cache_eax.full = eax;
d16aafff
SS
579 } while (cache_eax.split.type != CACHE_TYPE_NULL);
580 return i;
1da177e4
LT
581}
582
04a15418
AH
583void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
584{
585
586 if (cpu_has_topoext) {
587 num_cache_leaves = find_num_cache_leaves(c);
588 } else if (c->extended_cpuid_level >= 0x80000006) {
589 if (cpuid_edx(0x80000006) & 0xf000)
590 num_cache_leaves = 4;
591 else
592 num_cache_leaves = 3;
593 }
594}
595
1aa1a9f9 596unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
1da177e4 597{
8bdbd962
AC
598 /* Cache sizes */
599 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
1da177e4
LT
600 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
601 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
1e9f28fa 602 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
96c52749 603#ifdef CONFIG_X86_HT
92cb7612 604 unsigned int cpu = c->cpu_index;
1e9f28fa 605#endif
1da177e4 606
f2d0d263 607 if (c->cpuid_level > 3) {
1da177e4
LT
608 static int is_initialized;
609
610 if (is_initialized == 0) {
611 /* Init num_cache_leaves from boot CPU */
04a15418 612 num_cache_leaves = find_num_cache_leaves(c);
1da177e4
LT
613 is_initialized++;
614 }
615
616 /*
617 * Whenever possible use cpuid(4), deterministic cache
618 * parameters cpuid leaf to find the cache details
619 */
620 for (i = 0; i < num_cache_leaves; i++) {
f9b90566 621 struct _cpuid4_info_regs this_leaf;
1da177e4
LT
622 int retval;
623
f9b90566 624 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
1da177e4 625 if (retval >= 0) {
8bdbd962
AC
626 switch (this_leaf.eax.split.level) {
627 case 1:
1da177e4
LT
628 if (this_leaf.eax.split.type ==
629 CACHE_TYPE_DATA)
630 new_l1d = this_leaf.size/1024;
631 else if (this_leaf.eax.split.type ==
632 CACHE_TYPE_INST)
633 new_l1i = this_leaf.size/1024;
634 break;
8bdbd962 635 case 2:
1da177e4 636 new_l2 = this_leaf.size/1024;
1e9f28fa
SS
637 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
638 index_msb = get_count_order(num_threads_sharing);
ddc5681e 639 l2_id = c->apicid & ~((1 << index_msb) - 1);
1da177e4 640 break;
8bdbd962 641 case 3:
1da177e4 642 new_l3 = this_leaf.size/1024;
1e9f28fa 643 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
8bdbd962
AC
644 index_msb = get_count_order(
645 num_threads_sharing);
ddc5681e 646 l3_id = c->apicid & ~((1 << index_msb) - 1);
1da177e4 647 break;
8bdbd962 648 default:
1da177e4
LT
649 break;
650 }
651 }
652 }
653 }
b06be912
SL
654 /*
655 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
656 * trace cache
657 */
658 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
1da177e4 659 /* supports eax=2 call */
c1666e66
HH
660 int j, n;
661 unsigned int regs[4];
1da177e4 662 unsigned char *dp = (unsigned char *)regs;
b06be912
SL
663 int only_trace = 0;
664
665 if (num_cache_leaves != 0 && c->x86 == 15)
666 only_trace = 1;
1da177e4
LT
667
668 /* Number of times to iterate */
669 n = cpuid_eax(2) & 0xFF;
670
8bdbd962 671 for (i = 0 ; i < n ; i++) {
1da177e4
LT
672 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
673
674 /* If bit 31 is set, this is an unknown format */
8bdbd962
AC
675 for (j = 0 ; j < 3 ; j++)
676 if (regs[j] & (1 << 31))
677 regs[j] = 0;
1da177e4
LT
678
679 /* Byte 0 is level count, not a descriptor */
8bdbd962 680 for (j = 1 ; j < 16 ; j++) {
1da177e4
LT
681 unsigned char des = dp[j];
682 unsigned char k = 0;
683
684 /* look up this descriptor in the table */
8bdbd962 685 while (cache_table[k].descriptor != 0) {
1da177e4 686 if (cache_table[k].descriptor == des) {
b06be912
SL
687 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
688 break;
1da177e4
LT
689 switch (cache_table[k].cache_type) {
690 case LVL_1_INST:
691 l1i += cache_table[k].size;
692 break;
693 case LVL_1_DATA:
694 l1d += cache_table[k].size;
695 break;
696 case LVL_2:
697 l2 += cache_table[k].size;
698 break;
699 case LVL_3:
700 l3 += cache_table[k].size;
701 break;
702 case LVL_TRACE:
703 trace += cache_table[k].size;
704 break;
705 }
706
707 break;
708 }
709
710 k++;
711 }
712 }
713 }
b06be912 714 }
1da177e4 715
b06be912
SL
716 if (new_l1d)
717 l1d = new_l1d;
1da177e4 718
b06be912
SL
719 if (new_l1i)
720 l1i = new_l1i;
1da177e4 721
b06be912
SL
722 if (new_l2) {
723 l2 = new_l2;
96c52749 724#ifdef CONFIG_X86_HT
b6278470 725 per_cpu(cpu_llc_id, cpu) = l2_id;
1e9f28fa 726#endif
b06be912 727 }
1da177e4 728
b06be912
SL
729 if (new_l3) {
730 l3 = new_l3;
96c52749 731#ifdef CONFIG_X86_HT
b6278470 732 per_cpu(cpu_llc_id, cpu) = l3_id;
1e9f28fa 733#endif
1da177e4
LT
734 }
735
b06be912
SL
736 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
737
1da177e4
LT
738 return l2;
739}
740
ba1d755a
IM
741#ifdef CONFIG_SYSFS
742
1da177e4 743/* pointer to _cpuid4_info array (for each cache leaf) */
0fe1e009
TH
744static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
745#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
1da177e4
LT
746
747#ifdef CONFIG_SMP
32c32338
AH
748
749static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
1da177e4 750{
32c32338 751 struct _cpuid4_info *this_leaf;
27d3a8a2 752 int i, sibling;
1da177e4 753
27d3a8a2
AH
754 if (cpu_has_topoext) {
755 unsigned int apicid, nshared, first, last;
756
757 if (!per_cpu(ici_cpuid4_info, cpu))
758 return 0;
759
760 this_leaf = CPUID4_INFO_IDX(cpu, index);
761 nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
762 apicid = cpu_data(cpu).apicid;
763 first = apicid - (apicid % nshared);
764 last = first + nshared - 1;
765
766 for_each_online_cpu(i) {
767 apicid = cpu_data(i).apicid;
768 if ((apicid < first) || (apicid > last))
769 continue;
0fe1e009 770 if (!per_cpu(ici_cpuid4_info, i))
a326e948 771 continue;
a326e948 772 this_leaf = CPUID4_INFO_IDX(i, index);
27d3a8a2
AH
773
774 for_each_online_cpu(sibling) {
775 apicid = cpu_data(sibling).apicid;
776 if ((apicid < first) || (apicid > last))
ebb682f5
PB
777 continue;
778 set_bit(sibling, this_leaf->shared_cpu_map);
779 }
a326e948 780 }
27d3a8a2
AH
781 } else if (index == 3) {
782 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
32c32338
AH
783 if (!per_cpu(ici_cpuid4_info, i))
784 continue;
785 this_leaf = CPUID4_INFO_IDX(i, index);
27d3a8a2 786 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
32c32338
AH
787 if (!cpu_online(sibling))
788 continue;
789 set_bit(sibling, this_leaf->shared_cpu_map);
790 }
791 }
27d3a8a2
AH
792 } else
793 return 0;
32c32338 794
27d3a8a2 795 return 1;
32c32338
AH
796}
797
798static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
799{
800 struct _cpuid4_info *this_leaf, *sibling_leaf;
801 unsigned long num_threads_sharing;
802 int index_msb, i;
803 struct cpuinfo_x86 *c = &cpu_data(cpu);
804
805 if (c->x86_vendor == X86_VENDOR_AMD) {
806 if (cache_shared_amd_cpu_map_setup(cpu, index))
807 return;
808 }
809
1da177e4 810 this_leaf = CPUID4_INFO_IDX(cpu, index);
b7d11a76 811 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
1da177e4
LT
812
813 if (num_threads_sharing == 1)
f9b90566 814 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
2b091875
SS
815 else {
816 index_msb = get_count_order(num_threads_sharing);
817
818 for_each_online_cpu(i) {
92cb7612
MT
819 if (cpu_data(i).apicid >> index_msb ==
820 c->apicid >> index_msb) {
f9b90566
MT
821 cpumask_set_cpu(i,
822 to_cpumask(this_leaf->shared_cpu_map));
0fe1e009 823 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
f9b90566
MT
824 sibling_leaf =
825 CPUID4_INFO_IDX(i, index);
826 cpumask_set_cpu(cpu, to_cpumask(
827 sibling_leaf->shared_cpu_map));
2b091875
SS
828 }
829 }
830 }
831 }
832}
3bc9b76b 833static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
2b091875
SS
834{
835 struct _cpuid4_info *this_leaf, *sibling_leaf;
836 int sibling;
837
838 this_leaf = CPUID4_INFO_IDX(cpu, index);
f9b90566 839 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
cdcf772e 840 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
f9b90566
MT
841 cpumask_clear_cpu(cpu,
842 to_cpumask(sibling_leaf->shared_cpu_map));
2b091875 843 }
1da177e4
LT
844}
845#else
8bdbd962
AC
846static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
847{
848}
849
850static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
851{
852}
1da177e4
LT
853#endif
854
f22d9bc1 855static void __cpuinit free_cache_attributes(unsigned int cpu)
1da177e4 856{
ef1d7151
AM
857 int i;
858
859 for (i = 0; i < num_cache_leaves; i++)
860 cache_remove_shared_cpu_map(cpu, i);
861
0fe1e009
TH
862 kfree(per_cpu(ici_cpuid4_info, cpu));
863 per_cpu(ici_cpuid4_info, cpu) = NULL;
1da177e4
LT
864}
865
6092848a 866static void __cpuinit get_cpu_leaves(void *_retval)
1da177e4 867{
b2bb8554 868 int j, *retval = _retval, cpu = smp_processor_id();
e2cac789 869
1da177e4
LT
870 /* Do cpuid and store the results */
871 for (j = 0; j < num_cache_leaves; j++) {
b7d11a76
TG
872 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
873
874 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
b2bb8554 875 if (unlikely(*retval < 0)) {
ef1d7151
AM
876 int i;
877
878 for (i = 0; i < j; i++)
879 cache_remove_shared_cpu_map(cpu, i);
e2cac789 880 break;
ef1d7151 881 }
1da177e4
LT
882 cache_shared_cpu_map_setup(cpu, j);
883 }
b2bb8554
MT
884}
885
886static int __cpuinit detect_cache_attributes(unsigned int cpu)
887{
888 int retval;
889
890 if (num_cache_leaves == 0)
891 return -ENOENT;
892
0fe1e009 893 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
b2bb8554 894 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
0fe1e009 895 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
b2bb8554 896 return -ENOMEM;
1da177e4 897
b2bb8554 898 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
ef1d7151 899 if (retval) {
0fe1e009
TH
900 kfree(per_cpu(ici_cpuid4_info, cpu));
901 per_cpu(ici_cpuid4_info, cpu) = NULL;
ef1d7151
AM
902 }
903
e2cac789 904 return retval;
1da177e4
LT
905}
906
1da177e4
LT
907#include <linux/kobject.h>
908#include <linux/sysfs.h>
8a25a2fd 909#include <linux/cpu.h>
1da177e4
LT
910
911/* pointer to kobject for cpuX/cache */
0fe1e009 912static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
1da177e4
LT
913
914struct _index_kobject {
915 struct kobject kobj;
916 unsigned int cpu;
917 unsigned short index;
918};
919
920/* pointer to array of kobjects for cpuX/cache/indexY */
0fe1e009
TH
921static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
922#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
1da177e4
LT
923
924#define show_one_plus(file_name, object, val) \
cabb5bd7
HR
925static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
926 unsigned int cpu) \
1da177e4 927{ \
8bdbd962 928 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
1da177e4
LT
929}
930
b7d11a76
TG
931show_one_plus(level, base.eax.split.level, 0);
932show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
933show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
934show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
935show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
1da177e4 936
cabb5bd7
HR
937static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
938 unsigned int cpu)
1da177e4 939{
b7d11a76 940 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
1da177e4
LT
941}
942
fb0f330e
MT
943static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
944 int type, char *buf)
1da177e4 945{
fb0f330e 946 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
6b6309b4 947 int n = 0;
6b6309b4 948
fb0f330e 949 if (len > 1) {
f9b90566 950 const struct cpumask *mask;
fb0f330e 951
f9b90566 952 mask = to_cpumask(this_leaf->shared_cpu_map);
8bdbd962 953 n = type ?
29c0177e
RR
954 cpulist_scnprintf(buf, len-2, mask) :
955 cpumask_scnprintf(buf, len-2, mask);
fb0f330e
MT
956 buf[n++] = '\n';
957 buf[n] = '\0';
6b6309b4
MT
958 }
959 return n;
1da177e4
LT
960}
961
cabb5bd7
HR
962static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
963 unsigned int cpu)
fb0f330e
MT
964{
965 return show_shared_cpu_map_func(leaf, 0, buf);
966}
967
cabb5bd7
HR
968static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
969 unsigned int cpu)
fb0f330e
MT
970{
971 return show_shared_cpu_map_func(leaf, 1, buf);
972}
973
cabb5bd7
HR
974static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
975 unsigned int cpu)
4385cecf 976{
b7d11a76 977 switch (this_leaf->base.eax.split.type) {
4385cecf 978 case CACHE_TYPE_DATA:
1da177e4 979 return sprintf(buf, "Data\n");
4385cecf 980 case CACHE_TYPE_INST:
1da177e4 981 return sprintf(buf, "Instruction\n");
4385cecf 982 case CACHE_TYPE_UNIFIED:
1da177e4 983 return sprintf(buf, "Unified\n");
4385cecf 984 default:
1da177e4 985 return sprintf(buf, "Unknown\n");
1da177e4
LT
986 }
987}
988
7a4983bb
IM
989#define to_object(k) container_of(k, struct _index_kobject, kobj)
990#define to_attr(a) container_of(a, struct _cache_attr, attr)
8cb22bcb 991
1da177e4
LT
992#define define_one_ro(_name) \
993static struct _cache_attr _name = \
994 __ATTR(_name, 0444, show_##_name, NULL)
995
996define_one_ro(level);
997define_one_ro(type);
998define_one_ro(coherency_line_size);
999define_one_ro(physical_line_partition);
1000define_one_ro(ways_of_associativity);
1001define_one_ro(number_of_sets);
1002define_one_ro(size);
1003define_one_ro(shared_cpu_map);
fb0f330e 1004define_one_ro(shared_cpu_list);
1da177e4 1005
8bdbd962 1006static struct attribute *default_attrs[] = {
f658bcfb
HR
1007 &type.attr,
1008 &level.attr,
1009 &coherency_line_size.attr,
1010 &physical_line_partition.attr,
1011 &ways_of_associativity.attr,
1012 &number_of_sets.attr,
1013 &size.attr,
1014 &shared_cpu_map.attr,
1015 &shared_cpu_list.attr,
897de50e
BP
1016 NULL
1017};
1018
23ac4ae8 1019#ifdef CONFIG_AMD_NB
f658bcfb
HR
1020static struct attribute ** __cpuinit amd_l3_attrs(void)
1021{
1022 static struct attribute **attrs;
1023 int n;
1024
1025 if (attrs)
1026 return attrs;
1027
961c7976 1028 n = ARRAY_SIZE(default_attrs);
f658bcfb
HR
1029
1030 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
1031 n += 2;
1032
cabb5bd7
HR
1033 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1034 n += 1;
1035
f658bcfb
HR
1036 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1037 if (attrs == NULL)
1038 return attrs = default_attrs;
1039
1040 for (n = 0; default_attrs[n]; n++)
1041 attrs[n] = default_attrs[n];
1042
1043 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1044 attrs[n++] = &cache_disable_0.attr;
1045 attrs[n++] = &cache_disable_1.attr;
1046 }
1047
cabb5bd7
HR
1048 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1049 attrs[n++] = &subcaches.attr;
1050
f658bcfb
HR
1051 return attrs;
1052}
cb19060a 1053#endif
1da177e4 1054
8bdbd962 1055static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4
LT
1056{
1057 struct _cache_attr *fattr = to_attr(attr);
1058 struct _index_kobject *this_leaf = to_object(kobj);
1059 ssize_t ret;
1060
1061 ret = fattr->show ?
1062 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
cabb5bd7 1063 buf, this_leaf->cpu) :
cdcf772e 1064 0;
1da177e4
LT
1065 return ret;
1066}
1067
8bdbd962
AC
1068static ssize_t store(struct kobject *kobj, struct attribute *attr,
1069 const char *buf, size_t count)
1da177e4 1070{
8cb22bcb
ML
1071 struct _cache_attr *fattr = to_attr(attr);
1072 struct _index_kobject *this_leaf = to_object(kobj);
1073 ssize_t ret;
1074
cdcf772e
IM
1075 ret = fattr->store ?
1076 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
cabb5bd7 1077 buf, count, this_leaf->cpu) :
8cb22bcb
ML
1078 0;
1079 return ret;
1da177e4
LT
1080}
1081
52cf25d0 1082static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
1083 .show = show,
1084 .store = store,
1085};
1086
1087static struct kobj_type ktype_cache = {
1088 .sysfs_ops = &sysfs_ops,
1089 .default_attrs = default_attrs,
1090};
1091
1092static struct kobj_type ktype_percpu_entry = {
1093 .sysfs_ops = &sysfs_ops,
1094};
1095
ef1d7151 1096static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1da177e4 1097{
0fe1e009
TH
1098 kfree(per_cpu(ici_cache_kobject, cpu));
1099 kfree(per_cpu(ici_index_kobject, cpu));
1100 per_cpu(ici_cache_kobject, cpu) = NULL;
1101 per_cpu(ici_index_kobject, cpu) = NULL;
1da177e4
LT
1102 free_cache_attributes(cpu);
1103}
1104
1aa1a9f9 1105static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1da177e4 1106{
ef1d7151 1107 int err;
1da177e4
LT
1108
1109 if (num_cache_leaves == 0)
1110 return -ENOENT;
1111
ef1d7151
AM
1112 err = detect_cache_attributes(cpu);
1113 if (err)
1114 return err;
1da177e4
LT
1115
1116 /* Allocate all required memory */
0fe1e009 1117 per_cpu(ici_cache_kobject, cpu) =
6b6309b4 1118 kzalloc(sizeof(struct kobject), GFP_KERNEL);
0fe1e009 1119 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1da177e4 1120 goto err_out;
1da177e4 1121
0fe1e009 1122 per_cpu(ici_index_kobject, cpu) = kzalloc(
8bdbd962 1123 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
0fe1e009 1124 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1da177e4 1125 goto err_out;
1da177e4
LT
1126
1127 return 0;
1128
1129err_out:
1130 cpuid4_cache_sysfs_exit(cpu);
1131 return -ENOMEM;
1132}
1133
f9b90566 1134static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
ef1d7151 1135
1da177e4 1136/* Add/Remove cache interface for CPU device */
8a25a2fd 1137static int __cpuinit cache_add_dev(struct device *dev)
1da177e4 1138{
8a25a2fd 1139 unsigned int cpu = dev->id;
1da177e4
LT
1140 unsigned long i, j;
1141 struct _index_kobject *this_object;
897de50e 1142 struct _cpuid4_info *this_leaf;
ef1d7151 1143 int retval;
1da177e4
LT
1144
1145 retval = cpuid4_cache_sysfs_init(cpu);
1146 if (unlikely(retval < 0))
1147 return retval;
1148
0fe1e009 1149 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
6b6309b4 1150 &ktype_percpu_entry,
8a25a2fd 1151 &dev->kobj, "%s", "cache");
ef1d7151
AM
1152 if (retval < 0) {
1153 cpuid4_cache_sysfs_exit(cpu);
1154 return retval;
1155 }
1da177e4
LT
1156
1157 for (i = 0; i < num_cache_leaves; i++) {
8bdbd962 1158 this_object = INDEX_KOBJECT_PTR(cpu, i);
1da177e4
LT
1159 this_object->cpu = cpu;
1160 this_object->index = i;
897de50e
BP
1161
1162 this_leaf = CPUID4_INFO_IDX(cpu, i);
1163
f658bcfb
HR
1164 ktype_cache.default_attrs = default_attrs;
1165#ifdef CONFIG_AMD_NB
d2946041 1166 if (this_leaf->base.nb)
f658bcfb
HR
1167 ktype_cache.default_attrs = amd_l3_attrs();
1168#endif
5b3f355d 1169 retval = kobject_init_and_add(&(this_object->kobj),
6b6309b4 1170 &ktype_cache,
0fe1e009 1171 per_cpu(ici_cache_kobject, cpu),
5b3f355d 1172 "index%1lu", i);
1da177e4 1173 if (unlikely(retval)) {
8bdbd962
AC
1174 for (j = 0; j < i; j++)
1175 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
0fe1e009 1176 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1177 cpuid4_cache_sysfs_exit(cpu);
8b2b9c1a 1178 return retval;
1da177e4 1179 }
5b3f355d 1180 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1da177e4 1181 }
f9b90566 1182 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151 1183
0fe1e009 1184 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
8b2b9c1a 1185 return 0;
1da177e4
LT
1186}
1187
8a25a2fd 1188static void __cpuinit cache_remove_dev(struct device *dev)
1da177e4 1189{
8a25a2fd 1190 unsigned int cpu = dev->id;
1da177e4
LT
1191 unsigned long i;
1192
0fe1e009 1193 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
2966c6a0 1194 return;
f9b90566 1195 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
ef1d7151 1196 return;
f9b90566 1197 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151
AM
1198
1199 for (i = 0; i < num_cache_leaves; i++)
8bdbd962 1200 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
0fe1e009 1201 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1202 cpuid4_cache_sysfs_exit(cpu);
1aa1a9f9
AR
1203}
1204
9c7b216d 1205static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1aa1a9f9
AR
1206 unsigned long action, void *hcpu)
1207{
1208 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1209 struct device *dev;
1aa1a9f9 1210
8a25a2fd 1211 dev = get_cpu_device(cpu);
1aa1a9f9
AR
1212 switch (action) {
1213 case CPU_ONLINE:
8bb78442 1214 case CPU_ONLINE_FROZEN:
8a25a2fd 1215 cache_add_dev(dev);
1aa1a9f9
AR
1216 break;
1217 case CPU_DEAD:
8bb78442 1218 case CPU_DEAD_FROZEN:
8a25a2fd 1219 cache_remove_dev(dev);
1aa1a9f9
AR
1220 break;
1221 }
1222 return NOTIFY_OK;
1da177e4
LT
1223}
1224
8bdbd962 1225static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
ef1d7151 1226 .notifier_call = cacheinfo_cpu_callback,
1da177e4
LT
1227};
1228
1aa1a9f9 1229static int __cpuinit cache_sysfs_init(void)
1da177e4 1230{
1aa1a9f9
AR
1231 int i;
1232
1da177e4
LT
1233 if (num_cache_leaves == 0)
1234 return 0;
1235
1aa1a9f9 1236 for_each_online_cpu(i) {
ef1d7151 1237 int err;
8a25a2fd 1238 struct device *dev = get_cpu_device(i);
c789c037 1239
8a25a2fd 1240 err = cache_add_dev(dev);
ef1d7151
AM
1241 if (err)
1242 return err;
1aa1a9f9 1243 }
ef1d7151 1244 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1aa1a9f9 1245 return 0;
1da177e4
LT
1246}
1247
1aa1a9f9 1248device_initcall(cache_sysfs_init);
1da177e4
LT
1249
1250#endif