]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/cpu/intel_cacheinfo.c
Merge remote-tracking branches 'asoc/topic/omap', 'asoc/topic/oom' and 'asoc/topic...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
CommitLineData
1da177e4 1/*
3f79410c 2 * Routines to identify caches on Intel CPU.
1da177e4 3 *
cdcf772e
IM
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
8bdbd962 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
67cddd94 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
1da177e4
LT
8 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
4e57b681 15#include <linux/sched.h>
a24e8d36 16#include <linux/pci.h>
1da177e4
LT
17
18#include <asm/processor.h>
8bdbd962 19#include <linux/smp.h>
23ac4ae8 20#include <asm/amd_nb.h>
dcf39daf 21#include <asm/smp.h>
1da177e4
LT
22
23#define LVL_1_INST 1
24#define LVL_1_DATA 2
25#define LVL_2 3
26#define LVL_3 4
27#define LVL_TRACE 5
28
8bdbd962 29struct _cache_table {
1da177e4
LT
30 unsigned char descriptor;
31 char cache_type;
32 short size;
33};
34
2ca49b2f
DJ
35#define MB(x) ((x) * 1024)
36
8bdbd962
AC
37/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
39
148f9bb8 40static const struct _cache_table cache_table[] =
1da177e4
LT
41{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8 44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
1da177e4
LT
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8 47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
fb87ec38 48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
9a8ecae8 49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
1da177e4 50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
2ca49b2f
DJ
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479 57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479
DJ
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
04fa11ea 62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
1da177e4
LT
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
2ca49b2f
DJ
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
fb87ec38 70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
2ca49b2f
DJ
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
6fe8f479 84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
2ca49b2f
DJ
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
fb87ec38 92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
2ca49b2f
DJ
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
114 { 0x00, 0, 0}
115};
116
117
8bdbd962 118enum _cache_type {
1da177e4
LT
119 CACHE_TYPE_NULL = 0,
120 CACHE_TYPE_DATA = 1,
121 CACHE_TYPE_INST = 2,
122 CACHE_TYPE_UNIFIED = 3
123};
124
125union _cpuid4_leaf_eax {
126 struct {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
134 } split;
135 u32 full;
136};
137
138union _cpuid4_leaf_ebx {
139 struct {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
143 } split;
144 u32 full;
145};
146
147union _cpuid4_leaf_ecx {
148 struct {
149 unsigned int number_of_sets:32;
150 } split;
151 u32 full;
152};
153
b7d11a76 154struct _cpuid4_info_regs {
1da177e4
LT
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
d2946041 159 struct amd_northbridge *nb;
f9b90566
MT
160};
161
b7d11a76
TG
162struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
1da177e4
LT
165};
166
240cd6a8
AK
167unsigned short num_cache_leaves;
168
169/* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
67cddd94 171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
240cd6a8
AK
172
173 In theory the TLBs could be reported as fake type (they are in "dummy").
174 Maybe later */
175union l1_cache {
176 struct {
8bdbd962
AC
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
179 unsigned assoc:8;
180 unsigned size_in_kb:8;
240cd6a8
AK
181 };
182 unsigned val;
183};
184
185union l2_cache {
186 struct {
8bdbd962
AC
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
189 unsigned assoc:4;
190 unsigned size_in_kb:16;
240cd6a8
AK
191 };
192 unsigned val;
193};
194
67cddd94
AK
195union l3_cache {
196 struct {
8bdbd962
AC
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
199 unsigned assoc:4;
200 unsigned res:2;
201 unsigned size_encoded:14;
67cddd94
AK
202 };
203 unsigned val;
204};
205
148f9bb8 206static const unsigned short assocs[] = {
6265ff19
AH
207 [1] = 1,
208 [2] = 2,
209 [4] = 4,
210 [6] = 8,
211 [8] = 16,
212 [0xa] = 32,
213 [0xb] = 48,
67cddd94 214 [0xc] = 64,
6265ff19
AH
215 [0xd] = 96,
216 [0xe] = 128,
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
67cddd94
AK
218};
219
148f9bb8
PG
220static const unsigned char levels[] = { 1, 1, 2, 3 };
221static const unsigned char types[] = { 1, 2, 3, 3 };
240cd6a8 222
148f9bb8 223static void
cdcf772e
IM
224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
240cd6a8
AK
227{
228 unsigned dummy;
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
231 union l2_cache l2;
67cddd94
AK
232 union l3_cache l3;
233 union l1_cache *l1 = &l1d;
240cd6a8
AK
234
235 eax->full = 0;
236 ebx->full = 0;
237 ecx->full = 0;
238
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
67cddd94 240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
240cd6a8 241
67cddd94
AK
242 switch (leaf) {
243 case 1:
244 l1 = &l1i;
245 case 0:
246 if (!l1->val)
247 return;
a326e948 248 assoc = assocs[l1->assoc];
240cd6a8
AK
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
67cddd94
AK
252 break;
253 case 2:
254 if (!l2.val)
255 return;
a326e948 256 assoc = assocs[l2.assoc];
240cd6a8
AK
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
7b543a53 260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
67cddd94
AK
261 break;
262 case 3:
263 if (!l3.val)
264 return;
a326e948 265 assoc = assocs[l3.assoc];
67cddd94
AK
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
a326e948
AH
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
271 assoc = assoc >> 1;
272 }
67cddd94
AK
273 break;
274 default:
275 return;
240cd6a8
AK
276 }
277
67cddd94
AK
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
a326e948 281 eax->split.num_threads_sharing = 0;
7b543a53 282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
67cddd94
AK
283
284
a326e948 285 if (assoc == 0xffff)
240cd6a8
AK
286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
a326e948 288 ebx->split.ways_of_associativity = assoc - 1;
240cd6a8
AK
289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
292}
1da177e4 293
cb19060a
BP
294struct _cache_attr {
295 struct attribute attr;
cabb5bd7
HR
296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
298 unsigned int);
cb19060a
BP
299};
300
f76e39c5 301#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
ba06edb6
BP
302/*
303 * L3 cache descriptors
304 */
148f9bb8 305static void amd_calc_l3_indices(struct amd_northbridge *nb)
048a8774 306{
d2946041 307 struct amd_l3_cache *l3 = &nb->l3_cache;
048a8774 308 unsigned int sc0, sc1, sc2, sc3;
cb19060a 309 u32 val = 0;
048a8774 310
d2946041 311 pci_read_config_dword(nb->misc, 0x1C4, &val);
048a8774
BP
312
313 /* calculate subcache sizes */
9350f982
BP
314 l3->subcaches[0] = sc0 = !(val & BIT(0));
315 l3->subcaches[1] = sc1 = !(val & BIT(4));
77e75fc7
FA
316
317 if (boot_cpu_data.x86 == 0x15) {
318 l3->subcaches[0] = sc0 += !(val & BIT(1));
319 l3->subcaches[1] = sc1 += !(val & BIT(5));
320 }
321
9350f982
BP
322 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
323 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
324
732eacc0 325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
ba06edb6
BP
326}
327
148f9bb8 328static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
8cb22bcb 329{
ba06edb6
BP
330 int node;
331
f658bcfb 332 /* only for L3, and not in virtualized environments */
d2946041 333 if (index < 3)
f2b20e41
FA
334 return;
335
ba06edb6 336 node = amd_get_nb_id(smp_processor_id());
d2946041
TG
337 this_leaf->nb = node_to_amd_nb(node);
338 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
339 amd_calc_l3_indices(this_leaf->nb);
8cb22bcb
ML
340}
341
8cc1176e
BP
342/*
343 * check whether a slot used for disabling an L3 index is occupied.
344 * @l3: L3 cache descriptor
345 * @slot: slot number (0..1)
346 *
347 * @returns: the disabled index if used or negative value if slot free.
348 */
d2946041 349int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
8cc1176e
BP
350{
351 unsigned int reg = 0;
352
d2946041 353 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
8cc1176e
BP
354
355 /* check whether this slot is activated already */
356 if (reg & (3UL << 30))
357 return reg & 0xfff;
358
359 return -1;
360}
361
cb19060a 362static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
59d3b388 363 unsigned int slot)
cb19060a 364{
8cc1176e 365 int index;
cb19060a 366
d2946041 367 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
cb19060a
BP
368 return -EINVAL;
369
d2946041 370 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
8cc1176e
BP
371 if (index >= 0)
372 return sprintf(buf, "%d\n", index);
cb19060a 373
8cc1176e 374 return sprintf(buf, "FREE\n");
cb19060a
BP
375}
376
59d3b388 377#define SHOW_CACHE_DISABLE(slot) \
cb19060a 378static ssize_t \
cabb5bd7
HR
379show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
380 unsigned int cpu) \
cb19060a 381{ \
59d3b388 382 return show_cache_disable(this_leaf, buf, slot); \
cb19060a
BP
383}
384SHOW_CACHE_DISABLE(0)
385SHOW_CACHE_DISABLE(1)
386
d2946041 387static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
59d3b388
BP
388 unsigned slot, unsigned long idx)
389{
390 int i;
391
392 idx |= BIT(30);
393
394 /*
395 * disable index in all 4 subcaches
396 */
397 for (i = 0; i < 4; i++) {
398 u32 reg = idx | (i << 20);
399
d2946041 400 if (!nb->l3_cache.subcaches[i])
59d3b388
BP
401 continue;
402
d2946041 403 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
59d3b388
BP
404
405 /*
406 * We need to WBINVD on a core on the node containing the L3
407 * cache which indices we disable therefore a simple wbinvd()
408 * is not sufficient.
409 */
410 wbinvd_on_cpu(cpu);
411
412 reg |= BIT(31);
d2946041 413 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
59d3b388
BP
414 }
415}
416
8cc1176e
BP
417/*
418 * disable a L3 cache index by using a disable-slot
419 *
420 * @l3: L3 cache descriptor
421 * @cpu: A CPU on the node containing the L3 cache
422 * @slot: slot number (0..1)
423 * @index: index to disable
424 *
425 * @return: 0 on success, error status on failure
426 */
d2946041 427int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
8cc1176e 428 unsigned long index)
cb19060a 429{
8cc1176e 430 int ret = 0;
cb19060a 431
42be4505 432 /* check if @slot is already used or the index is already disabled */
d2946041 433 ret = amd_get_l3_disable_slot(nb, slot);
8cc1176e 434 if (ret >= 0)
a720b2dd 435 return -EEXIST;
cb19060a 436
d2946041 437 if (index > nb->l3_cache.indices)
8cc1176e
BP
438 return -EINVAL;
439
42be4505 440 /* check whether the other slot has disabled the same index already */
d2946041 441 if (index == amd_get_l3_disable_slot(nb, !slot))
a720b2dd 442 return -EEXIST;
8cc1176e 443
d2946041 444 amd_l3_disable_index(nb, cpu, slot, index);
8cc1176e
BP
445
446 return 0;
447}
448
449static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
450 const char *buf, size_t count,
451 unsigned int slot)
452{
453 unsigned long val = 0;
454 int cpu, err = 0;
455
cb19060a
BP
456 if (!capable(CAP_SYS_ADMIN))
457 return -EPERM;
458
d2946041 459 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
cb19060a
BP
460 return -EINVAL;
461
8cc1176e 462 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
cb19060a 463
8cc1176e 464 if (strict_strtoul(buf, 10, &val) < 0)
cb19060a
BP
465 return -EINVAL;
466
d2946041 467 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
8cc1176e
BP
468 if (err) {
469 if (err == -EEXIST)
a720b2dd
SB
470 pr_warning("L3 slot %d in use/index already disabled!\n",
471 slot);
8cc1176e
BP
472 return err;
473 }
cb19060a
BP
474 return count;
475}
476
59d3b388 477#define STORE_CACHE_DISABLE(slot) \
cb19060a 478static ssize_t \
59d3b388 479store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
cabb5bd7
HR
480 const char *buf, size_t count, \
481 unsigned int cpu) \
cb19060a 482{ \
59d3b388 483 return store_cache_disable(this_leaf, buf, count, slot); \
8cb22bcb 484}
cb19060a
BP
485STORE_CACHE_DISABLE(0)
486STORE_CACHE_DISABLE(1)
487
488static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
489 show_cache_disable_0, store_cache_disable_0);
490static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
491 show_cache_disable_1, store_cache_disable_1);
492
cabb5bd7
HR
493static ssize_t
494show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
495{
d2946041 496 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
cabb5bd7
HR
497 return -EINVAL;
498
499 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
500}
501
502static ssize_t
503store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
504 unsigned int cpu)
505{
506 unsigned long val;
507
508 if (!capable(CAP_SYS_ADMIN))
509 return -EPERM;
510
d2946041 511 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
cabb5bd7
HR
512 return -EINVAL;
513
514 if (strict_strtoul(buf, 16, &val) < 0)
515 return -EINVAL;
516
517 if (amd_set_subcaches(cpu, val))
518 return -EINVAL;
519
520 return count;
521}
522
523static struct _cache_attr subcaches =
524 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
525
f76e39c5 526#else
f658bcfb 527#define amd_init_l3_cache(x, y)
f76e39c5 528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
8cb22bcb 529
7a4983bb 530static int
148f9bb8 531cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
1da177e4 532{
cabb5bd7
HR
533 union _cpuid4_leaf_eax eax;
534 union _cpuid4_leaf_ebx ebx;
535 union _cpuid4_leaf_ecx ecx;
240cd6a8 536 unsigned edx;
1da177e4 537
8cb22bcb 538 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
2e8458df
AH
539 if (cpu_has_topoext)
540 cpuid_count(0x8000001d, index, &eax.full,
541 &ebx.full, &ecx.full, &edx);
542 else
543 amd_cpuid4(index, &eax, &ebx, &ecx);
f658bcfb 544 amd_init_l3_cache(this_leaf, index);
7a4983bb
IM
545 } else {
546 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
547 }
548
240cd6a8 549 if (eax.split.type == CACHE_TYPE_NULL)
e2cac789 550 return -EIO; /* better error ? */
1da177e4 551
240cd6a8
AK
552 this_leaf->eax = eax;
553 this_leaf->ebx = ebx;
554 this_leaf->ecx = ecx;
7a4983bb
IM
555 this_leaf->size = (ecx.split.number_of_sets + 1) *
556 (ebx.split.coherency_line_size + 1) *
557 (ebx.split.physical_line_partition + 1) *
558 (ebx.split.ways_of_associativity + 1);
1da177e4
LT
559 return 0;
560}
561
148f9bb8 562static int find_num_cache_leaves(struct cpuinfo_x86 *c)
1da177e4 563{
04a15418 564 unsigned int eax, ebx, ecx, edx, op;
1da177e4 565 union _cpuid4_leaf_eax cache_eax;
d16aafff 566 int i = -1;
1da177e4 567
04a15418
AH
568 if (c->x86_vendor == X86_VENDOR_AMD)
569 op = 0x8000001d;
570 else
571 op = 4;
572
d16aafff
SS
573 do {
574 ++i;
04a15418
AH
575 /* Do cpuid(op) loop to find out num_cache_leaves */
576 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
1da177e4 577 cache_eax.full = eax;
d16aafff
SS
578 } while (cache_eax.split.type != CACHE_TYPE_NULL);
579 return i;
1da177e4
LT
580}
581
148f9bb8 582void init_amd_cacheinfo(struct cpuinfo_x86 *c)
04a15418
AH
583{
584
585 if (cpu_has_topoext) {
586 num_cache_leaves = find_num_cache_leaves(c);
587 } else if (c->extended_cpuid_level >= 0x80000006) {
588 if (cpuid_edx(0x80000006) & 0xf000)
589 num_cache_leaves = 4;
590 else
591 num_cache_leaves = 3;
592 }
593}
594
148f9bb8 595unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
1da177e4 596{
8bdbd962
AC
597 /* Cache sizes */
598 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
1da177e4
LT
599 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
600 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
1e9f28fa 601 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
96c52749 602#ifdef CONFIG_X86_HT
92cb7612 603 unsigned int cpu = c->cpu_index;
1e9f28fa 604#endif
1da177e4 605
f2d0d263 606 if (c->cpuid_level > 3) {
1da177e4
LT
607 static int is_initialized;
608
609 if (is_initialized == 0) {
610 /* Init num_cache_leaves from boot CPU */
04a15418 611 num_cache_leaves = find_num_cache_leaves(c);
1da177e4
LT
612 is_initialized++;
613 }
614
615 /*
616 * Whenever possible use cpuid(4), deterministic cache
617 * parameters cpuid leaf to find the cache details
618 */
619 for (i = 0; i < num_cache_leaves; i++) {
719038de 620 struct _cpuid4_info_regs this_leaf = {};
1da177e4
LT
621 int retval;
622
f9b90566 623 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
719038de
BP
624 if (retval < 0)
625 continue;
626
627 switch (this_leaf.eax.split.level) {
628 case 1:
629 if (this_leaf.eax.split.type == CACHE_TYPE_DATA)
630 new_l1d = this_leaf.size/1024;
631 else if (this_leaf.eax.split.type == CACHE_TYPE_INST)
632 new_l1i = this_leaf.size/1024;
633 break;
634 case 2:
635 new_l2 = this_leaf.size/1024;
636 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
637 index_msb = get_count_order(num_threads_sharing);
638 l2_id = c->apicid & ~((1 << index_msb) - 1);
639 break;
640 case 3:
641 new_l3 = this_leaf.size/1024;
642 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
643 index_msb = get_count_order(num_threads_sharing);
644 l3_id = c->apicid & ~((1 << index_msb) - 1);
645 break;
646 default:
647 break;
1da177e4
LT
648 }
649 }
650 }
b06be912
SL
651 /*
652 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
653 * trace cache
654 */
655 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
1da177e4 656 /* supports eax=2 call */
c1666e66
HH
657 int j, n;
658 unsigned int regs[4];
1da177e4 659 unsigned char *dp = (unsigned char *)regs;
b06be912
SL
660 int only_trace = 0;
661
662 if (num_cache_leaves != 0 && c->x86 == 15)
663 only_trace = 1;
1da177e4
LT
664
665 /* Number of times to iterate */
666 n = cpuid_eax(2) & 0xFF;
667
8bdbd962 668 for (i = 0 ; i < n ; i++) {
1da177e4
LT
669 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
670
671 /* If bit 31 is set, this is an unknown format */
8bdbd962
AC
672 for (j = 0 ; j < 3 ; j++)
673 if (regs[j] & (1 << 31))
674 regs[j] = 0;
1da177e4
LT
675
676 /* Byte 0 is level count, not a descriptor */
8bdbd962 677 for (j = 1 ; j < 16 ; j++) {
1da177e4
LT
678 unsigned char des = dp[j];
679 unsigned char k = 0;
680
681 /* look up this descriptor in the table */
8bdbd962 682 while (cache_table[k].descriptor != 0) {
1da177e4 683 if (cache_table[k].descriptor == des) {
b06be912
SL
684 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
685 break;
1da177e4
LT
686 switch (cache_table[k].cache_type) {
687 case LVL_1_INST:
688 l1i += cache_table[k].size;
689 break;
690 case LVL_1_DATA:
691 l1d += cache_table[k].size;
692 break;
693 case LVL_2:
694 l2 += cache_table[k].size;
695 break;
696 case LVL_3:
697 l3 += cache_table[k].size;
698 break;
699 case LVL_TRACE:
700 trace += cache_table[k].size;
701 break;
702 }
703
704 break;
705 }
706
707 k++;
708 }
709 }
710 }
b06be912 711 }
1da177e4 712
b06be912
SL
713 if (new_l1d)
714 l1d = new_l1d;
1da177e4 715
b06be912
SL
716 if (new_l1i)
717 l1i = new_l1i;
1da177e4 718
b06be912
SL
719 if (new_l2) {
720 l2 = new_l2;
96c52749 721#ifdef CONFIG_X86_HT
b6278470 722 per_cpu(cpu_llc_id, cpu) = l2_id;
1e9f28fa 723#endif
b06be912 724 }
1da177e4 725
b06be912
SL
726 if (new_l3) {
727 l3 = new_l3;
96c52749 728#ifdef CONFIG_X86_HT
b6278470 729 per_cpu(cpu_llc_id, cpu) = l3_id;
1e9f28fa 730#endif
1da177e4
LT
731 }
732
2a226155
PZ
733#ifdef CONFIG_X86_HT
734 /*
735 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
736 * turns means that the only possibility is SMT (as indicated in
737 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
738 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
739 * c->phys_proc_id.
740 */
741 if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
742 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
743#endif
744
b06be912
SL
745 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
746
1da177e4
LT
747 return l2;
748}
749
ba1d755a
IM
750#ifdef CONFIG_SYSFS
751
1da177e4 752/* pointer to _cpuid4_info array (for each cache leaf) */
0fe1e009
TH
753static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
754#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
1da177e4
LT
755
756#ifdef CONFIG_SMP
32c32338 757
148f9bb8 758static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
1da177e4 759{
32c32338 760 struct _cpuid4_info *this_leaf;
27d3a8a2 761 int i, sibling;
1da177e4 762
27d3a8a2
AH
763 if (cpu_has_topoext) {
764 unsigned int apicid, nshared, first, last;
765
766 if (!per_cpu(ici_cpuid4_info, cpu))
767 return 0;
768
769 this_leaf = CPUID4_INFO_IDX(cpu, index);
770 nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
771 apicid = cpu_data(cpu).apicid;
772 first = apicid - (apicid % nshared);
773 last = first + nshared - 1;
774
775 for_each_online_cpu(i) {
776 apicid = cpu_data(i).apicid;
777 if ((apicid < first) || (apicid > last))
778 continue;
0fe1e009 779 if (!per_cpu(ici_cpuid4_info, i))
a326e948 780 continue;
a326e948 781 this_leaf = CPUID4_INFO_IDX(i, index);
27d3a8a2
AH
782
783 for_each_online_cpu(sibling) {
784 apicid = cpu_data(sibling).apicid;
785 if ((apicid < first) || (apicid > last))
ebb682f5
PB
786 continue;
787 set_bit(sibling, this_leaf->shared_cpu_map);
788 }
a326e948 789 }
27d3a8a2
AH
790 } else if (index == 3) {
791 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
32c32338
AH
792 if (!per_cpu(ici_cpuid4_info, i))
793 continue;
794 this_leaf = CPUID4_INFO_IDX(i, index);
27d3a8a2 795 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
32c32338
AH
796 if (!cpu_online(sibling))
797 continue;
798 set_bit(sibling, this_leaf->shared_cpu_map);
799 }
800 }
27d3a8a2
AH
801 } else
802 return 0;
32c32338 803
27d3a8a2 804 return 1;
32c32338
AH
805}
806
148f9bb8 807static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
32c32338
AH
808{
809 struct _cpuid4_info *this_leaf, *sibling_leaf;
810 unsigned long num_threads_sharing;
811 int index_msb, i;
812 struct cpuinfo_x86 *c = &cpu_data(cpu);
813
814 if (c->x86_vendor == X86_VENDOR_AMD) {
815 if (cache_shared_amd_cpu_map_setup(cpu, index))
816 return;
817 }
818
1da177e4 819 this_leaf = CPUID4_INFO_IDX(cpu, index);
b7d11a76 820 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
1da177e4
LT
821
822 if (num_threads_sharing == 1)
f9b90566 823 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
2b091875
SS
824 else {
825 index_msb = get_count_order(num_threads_sharing);
826
827 for_each_online_cpu(i) {
92cb7612
MT
828 if (cpu_data(i).apicid >> index_msb ==
829 c->apicid >> index_msb) {
f9b90566
MT
830 cpumask_set_cpu(i,
831 to_cpumask(this_leaf->shared_cpu_map));
0fe1e009 832 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
f9b90566
MT
833 sibling_leaf =
834 CPUID4_INFO_IDX(i, index);
835 cpumask_set_cpu(cpu, to_cpumask(
836 sibling_leaf->shared_cpu_map));
2b091875
SS
837 }
838 }
839 }
840 }
841}
148f9bb8 842static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
2b091875
SS
843{
844 struct _cpuid4_info *this_leaf, *sibling_leaf;
845 int sibling;
846
847 this_leaf = CPUID4_INFO_IDX(cpu, index);
f9b90566 848 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
cdcf772e 849 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
f9b90566
MT
850 cpumask_clear_cpu(cpu,
851 to_cpumask(sibling_leaf->shared_cpu_map));
2b091875 852 }
1da177e4
LT
853}
854#else
148f9bb8 855static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
8bdbd962
AC
856{
857}
858
148f9bb8 859static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
8bdbd962
AC
860{
861}
1da177e4
LT
862#endif
863
148f9bb8 864static void free_cache_attributes(unsigned int cpu)
1da177e4 865{
ef1d7151
AM
866 int i;
867
868 for (i = 0; i < num_cache_leaves; i++)
869 cache_remove_shared_cpu_map(cpu, i);
870
0fe1e009
TH
871 kfree(per_cpu(ici_cpuid4_info, cpu));
872 per_cpu(ici_cpuid4_info, cpu) = NULL;
1da177e4
LT
873}
874
148f9bb8 875static void get_cpu_leaves(void *_retval)
1da177e4 876{
b2bb8554 877 int j, *retval = _retval, cpu = smp_processor_id();
e2cac789 878
1da177e4
LT
879 /* Do cpuid and store the results */
880 for (j = 0; j < num_cache_leaves; j++) {
b7d11a76
TG
881 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
882
883 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
b2bb8554 884 if (unlikely(*retval < 0)) {
ef1d7151
AM
885 int i;
886
887 for (i = 0; i < j; i++)
888 cache_remove_shared_cpu_map(cpu, i);
e2cac789 889 break;
ef1d7151 890 }
1da177e4
LT
891 cache_shared_cpu_map_setup(cpu, j);
892 }
b2bb8554
MT
893}
894
148f9bb8 895static int detect_cache_attributes(unsigned int cpu)
b2bb8554
MT
896{
897 int retval;
898
899 if (num_cache_leaves == 0)
900 return -ENOENT;
901
0fe1e009 902 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
b2bb8554 903 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
0fe1e009 904 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
b2bb8554 905 return -ENOMEM;
1da177e4 906
b2bb8554 907 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
ef1d7151 908 if (retval) {
0fe1e009
TH
909 kfree(per_cpu(ici_cpuid4_info, cpu));
910 per_cpu(ici_cpuid4_info, cpu) = NULL;
ef1d7151
AM
911 }
912
e2cac789 913 return retval;
1da177e4
LT
914}
915
1da177e4
LT
916#include <linux/kobject.h>
917#include <linux/sysfs.h>
8a25a2fd 918#include <linux/cpu.h>
1da177e4
LT
919
920/* pointer to kobject for cpuX/cache */
0fe1e009 921static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
1da177e4
LT
922
923struct _index_kobject {
924 struct kobject kobj;
925 unsigned int cpu;
926 unsigned short index;
927};
928
929/* pointer to array of kobjects for cpuX/cache/indexY */
0fe1e009
TH
930static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
931#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
1da177e4
LT
932
933#define show_one_plus(file_name, object, val) \
cabb5bd7
HR
934static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
935 unsigned int cpu) \
1da177e4 936{ \
8bdbd962 937 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
1da177e4
LT
938}
939
b7d11a76
TG
940show_one_plus(level, base.eax.split.level, 0);
941show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
942show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
943show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
944show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
1da177e4 945
cabb5bd7
HR
946static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
947 unsigned int cpu)
1da177e4 948{
b7d11a76 949 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
1da177e4
LT
950}
951
fb0f330e
MT
952static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
953 int type, char *buf)
1da177e4 954{
fb0f330e 955 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
6b6309b4 956 int n = 0;
6b6309b4 957
fb0f330e 958 if (len > 1) {
f9b90566 959 const struct cpumask *mask;
fb0f330e 960
f9b90566 961 mask = to_cpumask(this_leaf->shared_cpu_map);
8bdbd962 962 n = type ?
29c0177e
RR
963 cpulist_scnprintf(buf, len-2, mask) :
964 cpumask_scnprintf(buf, len-2, mask);
fb0f330e
MT
965 buf[n++] = '\n';
966 buf[n] = '\0';
6b6309b4
MT
967 }
968 return n;
1da177e4
LT
969}
970
cabb5bd7
HR
971static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
972 unsigned int cpu)
fb0f330e
MT
973{
974 return show_shared_cpu_map_func(leaf, 0, buf);
975}
976
cabb5bd7
HR
977static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
978 unsigned int cpu)
fb0f330e
MT
979{
980 return show_shared_cpu_map_func(leaf, 1, buf);
981}
982
cabb5bd7
HR
983static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
984 unsigned int cpu)
4385cecf 985{
b7d11a76 986 switch (this_leaf->base.eax.split.type) {
4385cecf 987 case CACHE_TYPE_DATA:
1da177e4 988 return sprintf(buf, "Data\n");
4385cecf 989 case CACHE_TYPE_INST:
1da177e4 990 return sprintf(buf, "Instruction\n");
4385cecf 991 case CACHE_TYPE_UNIFIED:
1da177e4 992 return sprintf(buf, "Unified\n");
4385cecf 993 default:
1da177e4 994 return sprintf(buf, "Unknown\n");
1da177e4
LT
995 }
996}
997
7a4983bb
IM
998#define to_object(k) container_of(k, struct _index_kobject, kobj)
999#define to_attr(a) container_of(a, struct _cache_attr, attr)
8cb22bcb 1000
1da177e4
LT
1001#define define_one_ro(_name) \
1002static struct _cache_attr _name = \
1003 __ATTR(_name, 0444, show_##_name, NULL)
1004
1005define_one_ro(level);
1006define_one_ro(type);
1007define_one_ro(coherency_line_size);
1008define_one_ro(physical_line_partition);
1009define_one_ro(ways_of_associativity);
1010define_one_ro(number_of_sets);
1011define_one_ro(size);
1012define_one_ro(shared_cpu_map);
fb0f330e 1013define_one_ro(shared_cpu_list);
1da177e4 1014
8bdbd962 1015static struct attribute *default_attrs[] = {
f658bcfb
HR
1016 &type.attr,
1017 &level.attr,
1018 &coherency_line_size.attr,
1019 &physical_line_partition.attr,
1020 &ways_of_associativity.attr,
1021 &number_of_sets.attr,
1022 &size.attr,
1023 &shared_cpu_map.attr,
1024 &shared_cpu_list.attr,
897de50e
BP
1025 NULL
1026};
1027
23ac4ae8 1028#ifdef CONFIG_AMD_NB
148f9bb8 1029static struct attribute **amd_l3_attrs(void)
f658bcfb
HR
1030{
1031 static struct attribute **attrs;
1032 int n;
1033
1034 if (attrs)
1035 return attrs;
1036
961c7976 1037 n = ARRAY_SIZE(default_attrs);
f658bcfb
HR
1038
1039 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
1040 n += 2;
1041
cabb5bd7
HR
1042 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1043 n += 1;
1044
f658bcfb
HR
1045 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1046 if (attrs == NULL)
1047 return attrs = default_attrs;
1048
1049 for (n = 0; default_attrs[n]; n++)
1050 attrs[n] = default_attrs[n];
1051
1052 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1053 attrs[n++] = &cache_disable_0.attr;
1054 attrs[n++] = &cache_disable_1.attr;
1055 }
1056
cabb5bd7
HR
1057 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1058 attrs[n++] = &subcaches.attr;
1059
f658bcfb
HR
1060 return attrs;
1061}
cb19060a 1062#endif
1da177e4 1063
8bdbd962 1064static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4
LT
1065{
1066 struct _cache_attr *fattr = to_attr(attr);
1067 struct _index_kobject *this_leaf = to_object(kobj);
1068 ssize_t ret;
1069
1070 ret = fattr->show ?
1071 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
cabb5bd7 1072 buf, this_leaf->cpu) :
cdcf772e 1073 0;
1da177e4
LT
1074 return ret;
1075}
1076
8bdbd962
AC
1077static ssize_t store(struct kobject *kobj, struct attribute *attr,
1078 const char *buf, size_t count)
1da177e4 1079{
8cb22bcb
ML
1080 struct _cache_attr *fattr = to_attr(attr);
1081 struct _index_kobject *this_leaf = to_object(kobj);
1082 ssize_t ret;
1083
cdcf772e
IM
1084 ret = fattr->store ?
1085 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
cabb5bd7 1086 buf, count, this_leaf->cpu) :
8cb22bcb
ML
1087 0;
1088 return ret;
1da177e4
LT
1089}
1090
52cf25d0 1091static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
1092 .show = show,
1093 .store = store,
1094};
1095
1096static struct kobj_type ktype_cache = {
1097 .sysfs_ops = &sysfs_ops,
1098 .default_attrs = default_attrs,
1099};
1100
1101static struct kobj_type ktype_percpu_entry = {
1102 .sysfs_ops = &sysfs_ops,
1103};
1104
148f9bb8 1105static void cpuid4_cache_sysfs_exit(unsigned int cpu)
1da177e4 1106{
0fe1e009
TH
1107 kfree(per_cpu(ici_cache_kobject, cpu));
1108 kfree(per_cpu(ici_index_kobject, cpu));
1109 per_cpu(ici_cache_kobject, cpu) = NULL;
1110 per_cpu(ici_index_kobject, cpu) = NULL;
1da177e4
LT
1111 free_cache_attributes(cpu);
1112}
1113
148f9bb8 1114static int cpuid4_cache_sysfs_init(unsigned int cpu)
1da177e4 1115{
ef1d7151 1116 int err;
1da177e4
LT
1117
1118 if (num_cache_leaves == 0)
1119 return -ENOENT;
1120
ef1d7151
AM
1121 err = detect_cache_attributes(cpu);
1122 if (err)
1123 return err;
1da177e4
LT
1124
1125 /* Allocate all required memory */
0fe1e009 1126 per_cpu(ici_cache_kobject, cpu) =
6b6309b4 1127 kzalloc(sizeof(struct kobject), GFP_KERNEL);
0fe1e009 1128 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1da177e4 1129 goto err_out;
1da177e4 1130
0fe1e009 1131 per_cpu(ici_index_kobject, cpu) = kzalloc(
8bdbd962 1132 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
0fe1e009 1133 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1da177e4 1134 goto err_out;
1da177e4
LT
1135
1136 return 0;
1137
1138err_out:
1139 cpuid4_cache_sysfs_exit(cpu);
1140 return -ENOMEM;
1141}
1142
f9b90566 1143static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
ef1d7151 1144
1da177e4 1145/* Add/Remove cache interface for CPU device */
148f9bb8 1146static int cache_add_dev(struct device *dev)
1da177e4 1147{
8a25a2fd 1148 unsigned int cpu = dev->id;
1da177e4
LT
1149 unsigned long i, j;
1150 struct _index_kobject *this_object;
897de50e 1151 struct _cpuid4_info *this_leaf;
ef1d7151 1152 int retval;
1da177e4
LT
1153
1154 retval = cpuid4_cache_sysfs_init(cpu);
1155 if (unlikely(retval < 0))
1156 return retval;
1157
0fe1e009 1158 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
6b6309b4 1159 &ktype_percpu_entry,
8a25a2fd 1160 &dev->kobj, "%s", "cache");
ef1d7151
AM
1161 if (retval < 0) {
1162 cpuid4_cache_sysfs_exit(cpu);
1163 return retval;
1164 }
1da177e4
LT
1165
1166 for (i = 0; i < num_cache_leaves; i++) {
8bdbd962 1167 this_object = INDEX_KOBJECT_PTR(cpu, i);
1da177e4
LT
1168 this_object->cpu = cpu;
1169 this_object->index = i;
897de50e
BP
1170
1171 this_leaf = CPUID4_INFO_IDX(cpu, i);
1172
f658bcfb
HR
1173 ktype_cache.default_attrs = default_attrs;
1174#ifdef CONFIG_AMD_NB
d2946041 1175 if (this_leaf->base.nb)
f658bcfb
HR
1176 ktype_cache.default_attrs = amd_l3_attrs();
1177#endif
5b3f355d 1178 retval = kobject_init_and_add(&(this_object->kobj),
6b6309b4 1179 &ktype_cache,
0fe1e009 1180 per_cpu(ici_cache_kobject, cpu),
5b3f355d 1181 "index%1lu", i);
1da177e4 1182 if (unlikely(retval)) {
8bdbd962
AC
1183 for (j = 0; j < i; j++)
1184 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
0fe1e009 1185 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1186 cpuid4_cache_sysfs_exit(cpu);
8b2b9c1a 1187 return retval;
1da177e4 1188 }
5b3f355d 1189 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1da177e4 1190 }
f9b90566 1191 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151 1192
0fe1e009 1193 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
8b2b9c1a 1194 return 0;
1da177e4
LT
1195}
1196
148f9bb8 1197static void cache_remove_dev(struct device *dev)
1da177e4 1198{
8a25a2fd 1199 unsigned int cpu = dev->id;
1da177e4
LT
1200 unsigned long i;
1201
0fe1e009 1202 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
2966c6a0 1203 return;
f9b90566 1204 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
ef1d7151 1205 return;
f9b90566 1206 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151
AM
1207
1208 for (i = 0; i < num_cache_leaves; i++)
8bdbd962 1209 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
0fe1e009 1210 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1211 cpuid4_cache_sysfs_exit(cpu);
1aa1a9f9
AR
1212}
1213
148f9bb8
PG
1214static int cacheinfo_cpu_callback(struct notifier_block *nfb,
1215 unsigned long action, void *hcpu)
1aa1a9f9
AR
1216{
1217 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1218 struct device *dev;
1aa1a9f9 1219
8a25a2fd 1220 dev = get_cpu_device(cpu);
1aa1a9f9
AR
1221 switch (action) {
1222 case CPU_ONLINE:
8bb78442 1223 case CPU_ONLINE_FROZEN:
8a25a2fd 1224 cache_add_dev(dev);
1aa1a9f9
AR
1225 break;
1226 case CPU_DEAD:
8bb78442 1227 case CPU_DEAD_FROZEN:
8a25a2fd 1228 cache_remove_dev(dev);
1aa1a9f9
AR
1229 break;
1230 }
1231 return NOTIFY_OK;
1da177e4
LT
1232}
1233
148f9bb8 1234static struct notifier_block cacheinfo_cpu_notifier = {
ef1d7151 1235 .notifier_call = cacheinfo_cpu_callback,
1da177e4
LT
1236};
1237
9611dc7a 1238static int __init cache_sysfs_init(void)
1da177e4 1239{
8c60ea14 1240 int i, err = 0;
1aa1a9f9 1241
1da177e4
LT
1242 if (num_cache_leaves == 0)
1243 return 0;
1244
8c60ea14 1245 cpu_notifier_register_begin();
1aa1a9f9 1246 for_each_online_cpu(i) {
8a25a2fd 1247 struct device *dev = get_cpu_device(i);
c789c037 1248
8a25a2fd 1249 err = cache_add_dev(dev);
ef1d7151 1250 if (err)
8c60ea14 1251 goto out;
1aa1a9f9 1252 }
8c60ea14
SB
1253 __register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1254
1255out:
1256 cpu_notifier_register_done();
1257 return err;
1da177e4
LT
1258}
1259
1aa1a9f9 1260device_initcall(cache_sysfs_init);
1da177e4
LT
1261
1262#endif