]>
Commit | Line | Data |
---|---|---|
246246cb SH |
1 | /* |
2 | * cacheinfo support - processor cache information via sysfs | |
3 | * | |
4 | * Based on arch/x86/kernel/cpu/intel_cacheinfo.c | |
5 | * Author: Sudeep Holla <sudeep.holla@arm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
12 | * kind, whether express or implied; without even the implied warranty | |
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
8e1073b1 SH |
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
20 | ||
55877ef4 | 21 | #include <linux/acpi.h> |
246246cb SH |
22 | #include <linux/bitops.h> |
23 | #include <linux/cacheinfo.h> | |
24 | #include <linux/compiler.h> | |
25 | #include <linux/cpu.h> | |
26 | #include <linux/device.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/of.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/smp.h> | |
32 | #include <linux/sysfs.h> | |
33 | ||
34 | /* pointer to per cpu cacheinfo */ | |
35 | static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); | |
36 | #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) | |
37 | #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) | |
38 | #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) | |
39 | ||
40 | struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) | |
41 | { | |
42 | return ci_cacheinfo(cpu); | |
43 | } | |
44 | ||
45 | #ifdef CONFIG_OF | |
246246cb SH |
46 | static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, |
47 | struct cacheinfo *sib_leaf) | |
48 | { | |
95c7d3b2 | 49 | return sib_leaf->fw_token == this_leaf->fw_token; |
246246cb | 50 | } |
dfea747d SH |
51 | |
52 | /* OF properties to query for a given cache type */ | |
53 | struct cache_type_info { | |
54 | const char *size_prop; | |
55 | const char *line_size_props[2]; | |
56 | const char *nr_sets_prop; | |
57 | }; | |
58 | ||
59 | static const struct cache_type_info cache_type_info[] = { | |
60 | { | |
61 | .size_prop = "cache-size", | |
62 | .line_size_props = { "cache-line-size", | |
63 | "cache-block-size", }, | |
64 | .nr_sets_prop = "cache-sets", | |
65 | }, { | |
66 | .size_prop = "i-cache-size", | |
67 | .line_size_props = { "i-cache-line-size", | |
68 | "i-cache-block-size", }, | |
69 | .nr_sets_prop = "i-cache-sets", | |
70 | }, { | |
71 | .size_prop = "d-cache-size", | |
72 | .line_size_props = { "d-cache-line-size", | |
73 | "d-cache-block-size", }, | |
74 | .nr_sets_prop = "d-cache-sets", | |
75 | }, | |
76 | }; | |
77 | ||
78 | static inline int get_cacheinfo_idx(enum cache_type type) | |
79 | { | |
80 | if (type == CACHE_TYPE_UNIFIED) | |
81 | return 0; | |
82 | return type; | |
83 | } | |
84 | ||
b5d1de4c | 85 | static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) |
dfea747d SH |
86 | { |
87 | const char *propname; | |
88 | const __be32 *cache_size; | |
89 | int ct_idx; | |
90 | ||
91 | ct_idx = get_cacheinfo_idx(this_leaf->type); | |
92 | propname = cache_type_info[ct_idx].size_prop; | |
93 | ||
b5d1de4c | 94 | cache_size = of_get_property(np, propname, NULL); |
dfea747d SH |
95 | if (cache_size) |
96 | this_leaf->size = of_read_number(cache_size, 1); | |
97 | } | |
98 | ||
99 | /* not cache_line_size() because that's a macro in include/linux/cache.h */ | |
b5d1de4c JL |
100 | static void cache_get_line_size(struct cacheinfo *this_leaf, |
101 | struct device_node *np) | |
dfea747d SH |
102 | { |
103 | const __be32 *line_size; | |
104 | int i, lim, ct_idx; | |
105 | ||
106 | ct_idx = get_cacheinfo_idx(this_leaf->type); | |
107 | lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props); | |
108 | ||
109 | for (i = 0; i < lim; i++) { | |
110 | const char *propname; | |
111 | ||
112 | propname = cache_type_info[ct_idx].line_size_props[i]; | |
b5d1de4c | 113 | line_size = of_get_property(np, propname, NULL); |
dfea747d SH |
114 | if (line_size) |
115 | break; | |
116 | } | |
117 | ||
118 | if (line_size) | |
119 | this_leaf->coherency_line_size = of_read_number(line_size, 1); | |
120 | } | |
121 | ||
b5d1de4c | 122 | static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) |
dfea747d SH |
123 | { |
124 | const char *propname; | |
125 | const __be32 *nr_sets; | |
126 | int ct_idx; | |
127 | ||
128 | ct_idx = get_cacheinfo_idx(this_leaf->type); | |
129 | propname = cache_type_info[ct_idx].nr_sets_prop; | |
130 | ||
b5d1de4c | 131 | nr_sets = of_get_property(np, propname, NULL); |
dfea747d SH |
132 | if (nr_sets) |
133 | this_leaf->number_of_sets = of_read_number(nr_sets, 1); | |
134 | } | |
135 | ||
136 | static void cache_associativity(struct cacheinfo *this_leaf) | |
137 | { | |
138 | unsigned int line_size = this_leaf->coherency_line_size; | |
139 | unsigned int nr_sets = this_leaf->number_of_sets; | |
140 | unsigned int size = this_leaf->size; | |
141 | ||
142 | /* | |
143 | * If the cache is fully associative, there is no need to | |
144 | * check the other properties. | |
145 | */ | |
146 | if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0)) | |
147 | this_leaf->ways_of_associativity = (size / nr_sets) / line_size; | |
148 | } | |
149 | ||
b5d1de4c JL |
150 | static bool cache_node_is_unified(struct cacheinfo *this_leaf, |
151 | struct device_node *np) | |
f57ab9a0 | 152 | { |
b5d1de4c | 153 | return of_property_read_bool(np, "cache-unified"); |
f57ab9a0 SH |
154 | } |
155 | ||
b5d1de4c JL |
156 | static void cache_of_set_props(struct cacheinfo *this_leaf, |
157 | struct device_node *np) | |
dfea747d | 158 | { |
b5d1de4c JL |
159 | /* |
160 | * init_cache_level must setup the cache level correctly | |
161 | * overriding the architecturally specified levels, so | |
162 | * if type is NONE at this stage, it should be unified | |
163 | */ | |
164 | if (this_leaf->type == CACHE_TYPE_NOCACHE && | |
165 | cache_node_is_unified(this_leaf, np)) | |
166 | this_leaf->type = CACHE_TYPE_UNIFIED; | |
167 | cache_size(this_leaf, np); | |
168 | cache_get_line_size(this_leaf, np); | |
169 | cache_nr_sets(this_leaf, np); | |
170 | cache_associativity(this_leaf); | |
dfea747d | 171 | } |
fa7b8847 JL |
172 | |
173 | static int cache_setup_of_node(unsigned int cpu) | |
174 | { | |
175 | struct device_node *np; | |
176 | struct cacheinfo *this_leaf; | |
177 | struct device *cpu_dev = get_cpu_device(cpu); | |
178 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
179 | unsigned int index = 0; | |
180 | ||
95c7d3b2 JL |
181 | /* skip if fw_token is already populated */ |
182 | if (this_cpu_ci->info_list->fw_token) { | |
fa7b8847 | 183 | return 0; |
95c7d3b2 | 184 | } |
fa7b8847 JL |
185 | |
186 | if (!cpu_dev) { | |
187 | pr_err("No cpu device for CPU %d\n", cpu); | |
188 | return -ENODEV; | |
189 | } | |
190 | np = cpu_dev->of_node; | |
191 | if (!np) { | |
192 | pr_err("Failed to find cpu%d device node\n", cpu); | |
193 | return -ENOENT; | |
194 | } | |
195 | ||
196 | while (index < cache_leaves(cpu)) { | |
197 | this_leaf = this_cpu_ci->info_list + index; | |
198 | if (this_leaf->level != 1) | |
199 | np = of_find_next_cache_node(np); | |
200 | else | |
201 | np = of_node_get(np);/* cpu node itself */ | |
202 | if (!np) | |
203 | break; | |
b5d1de4c | 204 | cache_of_set_props(this_leaf, np); |
95c7d3b2 | 205 | this_leaf->fw_token = np; |
fa7b8847 JL |
206 | index++; |
207 | } | |
208 | ||
209 | if (index != cache_leaves(cpu)) /* not all OF nodes populated */ | |
210 | return -ENOENT; | |
211 | ||
212 | return 0; | |
213 | } | |
246246cb SH |
214 | #else |
215 | static inline int cache_setup_of_node(unsigned int cpu) { return 0; } | |
216 | static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, | |
217 | struct cacheinfo *sib_leaf) | |
218 | { | |
219 | /* | |
2564e743 | 220 | * For non-DT/ACPI systems, assume unique level 1 caches, system-wide |
246246cb SH |
221 | * shared caches for all other levels. This will be used only if |
222 | * arch specific code has not populated shared_cpu_map | |
223 | */ | |
224 | return !(this_leaf->level == 1); | |
225 | } | |
226 | #endif | |
227 | ||
2564e743 JL |
228 | int __weak cache_setup_acpi(unsigned int cpu) |
229 | { | |
230 | return -ENOTSUPP; | |
231 | } | |
232 | ||
246246cb SH |
233 | static int cache_shared_cpu_map_setup(unsigned int cpu) |
234 | { | |
235 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
236 | struct cacheinfo *this_leaf, *sib_leaf; | |
237 | unsigned int index; | |
55877ef4 | 238 | int ret = 0; |
246246cb | 239 | |
fac51482 SH |
240 | if (this_cpu_ci->cpu_map_populated) |
241 | return 0; | |
242 | ||
55877ef4 SH |
243 | if (of_have_populated_dt()) |
244 | ret = cache_setup_of_node(cpu); | |
245 | else if (!acpi_disabled) | |
2564e743 JL |
246 | ret = cache_setup_acpi(cpu); |
247 | ||
246246cb SH |
248 | if (ret) |
249 | return ret; | |
250 | ||
251 | for (index = 0; index < cache_leaves(cpu); index++) { | |
252 | unsigned int i; | |
253 | ||
254 | this_leaf = this_cpu_ci->info_list + index; | |
255 | /* skip if shared_cpu_map is already populated */ | |
256 | if (!cpumask_empty(&this_leaf->shared_cpu_map)) | |
257 | continue; | |
258 | ||
259 | cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); | |
260 | for_each_online_cpu(i) { | |
261 | struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); | |
262 | ||
263 | if (i == cpu || !sib_cpu_ci->info_list) | |
264 | continue;/* skip if itself or no cacheinfo */ | |
265 | sib_leaf = sib_cpu_ci->info_list + index; | |
266 | if (cache_leaves_are_shared(this_leaf, sib_leaf)) { | |
267 | cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); | |
268 | cpumask_set_cpu(i, &this_leaf->shared_cpu_map); | |
269 | } | |
270 | } | |
271 | } | |
272 | ||
273 | return 0; | |
274 | } | |
275 | ||
276 | static void cache_shared_cpu_map_remove(unsigned int cpu) | |
277 | { | |
278 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
279 | struct cacheinfo *this_leaf, *sib_leaf; | |
280 | unsigned int sibling, index; | |
281 | ||
282 | for (index = 0; index < cache_leaves(cpu); index++) { | |
283 | this_leaf = this_cpu_ci->info_list + index; | |
284 | for_each_cpu(sibling, &this_leaf->shared_cpu_map) { | |
285 | struct cpu_cacheinfo *sib_cpu_ci; | |
286 | ||
287 | if (sibling == cpu) /* skip itself */ | |
288 | continue; | |
2110d70c | 289 | |
246246cb | 290 | sib_cpu_ci = get_cpu_cacheinfo(sibling); |
2110d70c BP |
291 | if (!sib_cpu_ci->info_list) |
292 | continue; | |
293 | ||
246246cb SH |
294 | sib_leaf = sib_cpu_ci->info_list + index; |
295 | cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); | |
296 | cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); | |
297 | } | |
2564e743 JL |
298 | if (of_have_populated_dt()) |
299 | of_node_put(this_leaf->fw_token); | |
246246cb SH |
300 | } |
301 | } | |
302 | ||
303 | static void free_cache_attributes(unsigned int cpu) | |
304 | { | |
2110d70c BP |
305 | if (!per_cpu_cacheinfo(cpu)) |
306 | return; | |
307 | ||
246246cb SH |
308 | cache_shared_cpu_map_remove(cpu); |
309 | ||
310 | kfree(per_cpu_cacheinfo(cpu)); | |
311 | per_cpu_cacheinfo(cpu) = NULL; | |
312 | } | |
313 | ||
314 | int __weak init_cache_level(unsigned int cpu) | |
315 | { | |
316 | return -ENOENT; | |
317 | } | |
318 | ||
319 | int __weak populate_cache_leaves(unsigned int cpu) | |
320 | { | |
321 | return -ENOENT; | |
322 | } | |
323 | ||
324 | static int detect_cache_attributes(unsigned int cpu) | |
325 | { | |
326 | int ret; | |
327 | ||
3370e13a | 328 | if (init_cache_level(cpu) || !cache_leaves(cpu)) |
246246cb SH |
329 | return -ENOENT; |
330 | ||
331 | per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), | |
332 | sizeof(struct cacheinfo), GFP_KERNEL); | |
333 | if (per_cpu_cacheinfo(cpu) == NULL) | |
334 | return -ENOMEM; | |
335 | ||
b5d1de4c JL |
336 | /* |
337 | * populate_cache_leaves() may completely setup the cache leaves and | |
338 | * shared_cpu_map or it may leave it partially setup. | |
339 | */ | |
246246cb SH |
340 | ret = populate_cache_leaves(cpu); |
341 | if (ret) | |
342 | goto free_ci; | |
343 | /* | |
95c7d3b2 JL |
344 | * For systems using DT for cache hierarchy, fw_token |
345 | * and shared_cpu_map will be set up here only if they are | |
346 | * not populated already | |
246246cb SH |
347 | */ |
348 | ret = cache_shared_cpu_map_setup(cpu); | |
8a7d95f9 | 349 | if (ret) { |
55877ef4 | 350 | pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu); |
246246cb | 351 | goto free_ci; |
8a7d95f9 | 352 | } |
dfea747d | 353 | |
246246cb SH |
354 | return 0; |
355 | ||
356 | free_ci: | |
357 | free_cache_attributes(cpu); | |
358 | return ret; | |
359 | } | |
360 | ||
361 | /* pointer to cpuX/cache device */ | |
362 | static DEFINE_PER_CPU(struct device *, ci_cache_dev); | |
363 | #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) | |
364 | ||
365 | static cpumask_t cache_dev_map; | |
366 | ||
367 | /* pointer to array of devices for cpuX/cache/indexY */ | |
368 | static DEFINE_PER_CPU(struct device **, ci_index_dev); | |
369 | #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) | |
370 | #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) | |
371 | ||
372 | #define show_one(file_name, object) \ | |
373 | static ssize_t file_name##_show(struct device *dev, \ | |
374 | struct device_attribute *attr, char *buf) \ | |
375 | { \ | |
376 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ | |
377 | return sprintf(buf, "%u\n", this_leaf->object); \ | |
378 | } | |
379 | ||
e9a2ea5a | 380 | show_one(id, id); |
246246cb SH |
381 | show_one(level, level); |
382 | show_one(coherency_line_size, coherency_line_size); | |
383 | show_one(number_of_sets, number_of_sets); | |
384 | show_one(physical_line_partition, physical_line_partition); | |
385 | show_one(ways_of_associativity, ways_of_associativity); | |
386 | ||
387 | static ssize_t size_show(struct device *dev, | |
388 | struct device_attribute *attr, char *buf) | |
389 | { | |
390 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
391 | ||
392 | return sprintf(buf, "%uK\n", this_leaf->size >> 10); | |
393 | } | |
394 | ||
395 | static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf) | |
396 | { | |
397 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
398 | const struct cpumask *mask = &this_leaf->shared_cpu_map; | |
399 | ||
400 | return cpumap_print_to_pagebuf(list, buf, mask); | |
401 | } | |
402 | ||
403 | static ssize_t shared_cpu_map_show(struct device *dev, | |
404 | struct device_attribute *attr, char *buf) | |
405 | { | |
406 | return shared_cpumap_show_func(dev, false, buf); | |
407 | } | |
408 | ||
409 | static ssize_t shared_cpu_list_show(struct device *dev, | |
410 | struct device_attribute *attr, char *buf) | |
411 | { | |
412 | return shared_cpumap_show_func(dev, true, buf); | |
413 | } | |
414 | ||
415 | static ssize_t type_show(struct device *dev, | |
416 | struct device_attribute *attr, char *buf) | |
417 | { | |
418 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
419 | ||
420 | switch (this_leaf->type) { | |
421 | case CACHE_TYPE_DATA: | |
422 | return sprintf(buf, "Data\n"); | |
423 | case CACHE_TYPE_INST: | |
424 | return sprintf(buf, "Instruction\n"); | |
425 | case CACHE_TYPE_UNIFIED: | |
426 | return sprintf(buf, "Unified\n"); | |
427 | default: | |
428 | return -EINVAL; | |
429 | } | |
430 | } | |
431 | ||
432 | static ssize_t allocation_policy_show(struct device *dev, | |
433 | struct device_attribute *attr, char *buf) | |
434 | { | |
435 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
436 | unsigned int ci_attr = this_leaf->attributes; | |
437 | int n = 0; | |
438 | ||
439 | if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) | |
440 | n = sprintf(buf, "ReadWriteAllocate\n"); | |
441 | else if (ci_attr & CACHE_READ_ALLOCATE) | |
442 | n = sprintf(buf, "ReadAllocate\n"); | |
443 | else if (ci_attr & CACHE_WRITE_ALLOCATE) | |
444 | n = sprintf(buf, "WriteAllocate\n"); | |
445 | return n; | |
446 | } | |
447 | ||
448 | static ssize_t write_policy_show(struct device *dev, | |
449 | struct device_attribute *attr, char *buf) | |
450 | { | |
451 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
452 | unsigned int ci_attr = this_leaf->attributes; | |
453 | int n = 0; | |
454 | ||
455 | if (ci_attr & CACHE_WRITE_THROUGH) | |
456 | n = sprintf(buf, "WriteThrough\n"); | |
457 | else if (ci_attr & CACHE_WRITE_BACK) | |
458 | n = sprintf(buf, "WriteBack\n"); | |
459 | return n; | |
460 | } | |
461 | ||
e9a2ea5a | 462 | static DEVICE_ATTR_RO(id); |
246246cb SH |
463 | static DEVICE_ATTR_RO(level); |
464 | static DEVICE_ATTR_RO(type); | |
465 | static DEVICE_ATTR_RO(coherency_line_size); | |
466 | static DEVICE_ATTR_RO(ways_of_associativity); | |
467 | static DEVICE_ATTR_RO(number_of_sets); | |
468 | static DEVICE_ATTR_RO(size); | |
469 | static DEVICE_ATTR_RO(allocation_policy); | |
470 | static DEVICE_ATTR_RO(write_policy); | |
471 | static DEVICE_ATTR_RO(shared_cpu_map); | |
472 | static DEVICE_ATTR_RO(shared_cpu_list); | |
473 | static DEVICE_ATTR_RO(physical_line_partition); | |
474 | ||
475 | static struct attribute *cache_default_attrs[] = { | |
e9a2ea5a | 476 | &dev_attr_id.attr, |
246246cb SH |
477 | &dev_attr_type.attr, |
478 | &dev_attr_level.attr, | |
479 | &dev_attr_shared_cpu_map.attr, | |
480 | &dev_attr_shared_cpu_list.attr, | |
481 | &dev_attr_coherency_line_size.attr, | |
482 | &dev_attr_ways_of_associativity.attr, | |
483 | &dev_attr_number_of_sets.attr, | |
484 | &dev_attr_size.attr, | |
485 | &dev_attr_allocation_policy.attr, | |
486 | &dev_attr_write_policy.attr, | |
487 | &dev_attr_physical_line_partition.attr, | |
488 | NULL | |
489 | }; | |
490 | ||
491 | static umode_t | |
492 | cache_default_attrs_is_visible(struct kobject *kobj, | |
493 | struct attribute *attr, int unused) | |
494 | { | |
495 | struct device *dev = kobj_to_dev(kobj); | |
496 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
497 | const struct cpumask *mask = &this_leaf->shared_cpu_map; | |
498 | umode_t mode = attr->mode; | |
499 | ||
e9a2ea5a FY |
500 | if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID)) |
501 | return mode; | |
246246cb SH |
502 | if ((attr == &dev_attr_type.attr) && this_leaf->type) |
503 | return mode; | |
504 | if ((attr == &dev_attr_level.attr) && this_leaf->level) | |
505 | return mode; | |
506 | if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask)) | |
507 | return mode; | |
508 | if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask)) | |
509 | return mode; | |
510 | if ((attr == &dev_attr_coherency_line_size.attr) && | |
511 | this_leaf->coherency_line_size) | |
512 | return mode; | |
513 | if ((attr == &dev_attr_ways_of_associativity.attr) && | |
514 | this_leaf->size) /* allow 0 = full associativity */ | |
515 | return mode; | |
516 | if ((attr == &dev_attr_number_of_sets.attr) && | |
517 | this_leaf->number_of_sets) | |
518 | return mode; | |
519 | if ((attr == &dev_attr_size.attr) && this_leaf->size) | |
520 | return mode; | |
521 | if ((attr == &dev_attr_write_policy.attr) && | |
522 | (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) | |
523 | return mode; | |
524 | if ((attr == &dev_attr_allocation_policy.attr) && | |
525 | (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) | |
526 | return mode; | |
527 | if ((attr == &dev_attr_physical_line_partition.attr) && | |
528 | this_leaf->physical_line_partition) | |
529 | return mode; | |
530 | ||
531 | return 0; | |
532 | } | |
533 | ||
534 | static const struct attribute_group cache_default_group = { | |
535 | .attrs = cache_default_attrs, | |
536 | .is_visible = cache_default_attrs_is_visible, | |
537 | }; | |
538 | ||
539 | static const struct attribute_group *cache_default_groups[] = { | |
540 | &cache_default_group, | |
541 | NULL, | |
542 | }; | |
543 | ||
544 | static const struct attribute_group *cache_private_groups[] = { | |
545 | &cache_default_group, | |
546 | NULL, /* Place holder for private group */ | |
547 | NULL, | |
548 | }; | |
549 | ||
550 | const struct attribute_group * | |
551 | __weak cache_get_priv_group(struct cacheinfo *this_leaf) | |
552 | { | |
553 | return NULL; | |
554 | } | |
555 | ||
556 | static const struct attribute_group ** | |
557 | cache_get_attribute_groups(struct cacheinfo *this_leaf) | |
558 | { | |
559 | const struct attribute_group *priv_group = | |
560 | cache_get_priv_group(this_leaf); | |
561 | ||
562 | if (!priv_group) | |
563 | return cache_default_groups; | |
564 | ||
565 | if (!cache_private_groups[1]) | |
566 | cache_private_groups[1] = priv_group; | |
567 | ||
568 | return cache_private_groups; | |
569 | } | |
570 | ||
571 | /* Add/Remove cache interface for CPU device */ | |
572 | static void cpu_cache_sysfs_exit(unsigned int cpu) | |
573 | { | |
574 | int i; | |
575 | struct device *ci_dev; | |
576 | ||
577 | if (per_cpu_index_dev(cpu)) { | |
578 | for (i = 0; i < cache_leaves(cpu); i++) { | |
579 | ci_dev = per_cache_index_dev(cpu, i); | |
580 | if (!ci_dev) | |
581 | continue; | |
582 | device_unregister(ci_dev); | |
583 | } | |
584 | kfree(per_cpu_index_dev(cpu)); | |
585 | per_cpu_index_dev(cpu) = NULL; | |
586 | } | |
587 | device_unregister(per_cpu_cache_dev(cpu)); | |
588 | per_cpu_cache_dev(cpu) = NULL; | |
589 | } | |
590 | ||
591 | static int cpu_cache_sysfs_init(unsigned int cpu) | |
592 | { | |
593 | struct device *dev = get_cpu_device(cpu); | |
594 | ||
595 | if (per_cpu_cacheinfo(cpu) == NULL) | |
596 | return -ENOENT; | |
597 | ||
598 | per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); | |
599 | if (IS_ERR(per_cpu_cache_dev(cpu))) | |
600 | return PTR_ERR(per_cpu_cache_dev(cpu)); | |
601 | ||
602 | /* Allocate all required memory */ | |
603 | per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), | |
604 | sizeof(struct device *), GFP_KERNEL); | |
605 | if (unlikely(per_cpu_index_dev(cpu) == NULL)) | |
606 | goto err_out; | |
607 | ||
608 | return 0; | |
609 | ||
610 | err_out: | |
611 | cpu_cache_sysfs_exit(cpu); | |
612 | return -ENOMEM; | |
613 | } | |
614 | ||
615 | static int cache_add_dev(unsigned int cpu) | |
616 | { | |
617 | unsigned int i; | |
618 | int rc; | |
619 | struct device *ci_dev, *parent; | |
620 | struct cacheinfo *this_leaf; | |
621 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
622 | const struct attribute_group **cache_groups; | |
623 | ||
624 | rc = cpu_cache_sysfs_init(cpu); | |
625 | if (unlikely(rc < 0)) | |
626 | return rc; | |
627 | ||
628 | parent = per_cpu_cache_dev(cpu); | |
629 | for (i = 0; i < cache_leaves(cpu); i++) { | |
630 | this_leaf = this_cpu_ci->info_list + i; | |
631 | if (this_leaf->disable_sysfs) | |
632 | continue; | |
633 | cache_groups = cache_get_attribute_groups(this_leaf); | |
634 | ci_dev = cpu_device_create(parent, this_leaf, cache_groups, | |
635 | "index%1u", i); | |
636 | if (IS_ERR(ci_dev)) { | |
637 | rc = PTR_ERR(ci_dev); | |
638 | goto err; | |
639 | } | |
640 | per_cache_index_dev(cpu, i) = ci_dev; | |
641 | } | |
642 | cpumask_set_cpu(cpu, &cache_dev_map); | |
643 | ||
644 | return 0; | |
645 | err: | |
646 | cpu_cache_sysfs_exit(cpu); | |
647 | return rc; | |
648 | } | |
649 | ||
7cc277b4 | 650 | static int cacheinfo_cpu_online(unsigned int cpu) |
246246cb | 651 | { |
7cc277b4 | 652 | int rc = detect_cache_attributes(cpu); |
246246cb | 653 | |
7cc277b4 SAS |
654 | if (rc) |
655 | return rc; | |
656 | rc = cache_add_dev(cpu); | |
657 | if (rc) | |
658 | free_cache_attributes(cpu); | |
659 | return rc; | |
246246cb SH |
660 | } |
661 | ||
7cc277b4 | 662 | static int cacheinfo_cpu_pre_down(unsigned int cpu) |
246246cb | 663 | { |
7cc277b4 SAS |
664 | if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map)) |
665 | cpu_cache_sysfs_exit(cpu); | |
666 | ||
667 | free_cache_attributes(cpu); | |
668 | return 0; | |
246246cb SH |
669 | } |
670 | ||
671 | static int __init cacheinfo_sysfs_init(void) | |
672 | { | |
7cc277b4 SAS |
673 | return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online", |
674 | cacheinfo_cpu_online, cacheinfo_cpu_pre_down); | |
246246cb | 675 | } |
246246cb | 676 | device_initcall(cacheinfo_sysfs_init); |