]>
Commit | Line | Data |
---|---|---|
246246cb SH |
1 | /* |
2 | * cacheinfo support - processor cache information via sysfs | |
3 | * | |
4 | * Based on arch/x86/kernel/cpu/intel_cacheinfo.c | |
5 | * Author: Sudeep Holla <sudeep.holla@arm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
12 | * kind, whether express or implied; without even the implied warranty | |
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
55877ef4 | 19 | #include <linux/acpi.h> |
246246cb SH |
20 | #include <linux/bitops.h> |
21 | #include <linux/cacheinfo.h> | |
22 | #include <linux/compiler.h> | |
23 | #include <linux/cpu.h> | |
24 | #include <linux/device.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/of.h> | |
27 | #include <linux/sched.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/smp.h> | |
30 | #include <linux/sysfs.h> | |
31 | ||
32 | /* pointer to per cpu cacheinfo */ | |
33 | static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); | |
34 | #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) | |
35 | #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) | |
36 | #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) | |
37 | ||
38 | struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) | |
39 | { | |
40 | return ci_cacheinfo(cpu); | |
41 | } | |
42 | ||
43 | #ifdef CONFIG_OF | |
44 | static int cache_setup_of_node(unsigned int cpu) | |
45 | { | |
46 | struct device_node *np; | |
47 | struct cacheinfo *this_leaf; | |
48 | struct device *cpu_dev = get_cpu_device(cpu); | |
49 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
50 | unsigned int index = 0; | |
51 | ||
52 | /* skip if of_node is already populated */ | |
53 | if (this_cpu_ci->info_list->of_node) | |
54 | return 0; | |
55 | ||
56 | if (!cpu_dev) { | |
57 | pr_err("No cpu device for CPU %d\n", cpu); | |
58 | return -ENODEV; | |
59 | } | |
60 | np = cpu_dev->of_node; | |
61 | if (!np) { | |
62 | pr_err("Failed to find cpu%d device node\n", cpu); | |
63 | return -ENOENT; | |
64 | } | |
65 | ||
8a7d95f9 | 66 | while (index < cache_leaves(cpu)) { |
246246cb SH |
67 | this_leaf = this_cpu_ci->info_list + index; |
68 | if (this_leaf->level != 1) | |
69 | np = of_find_next_cache_node(np); | |
70 | else | |
71 | np = of_node_get(np);/* cpu node itself */ | |
8a7d95f9 SH |
72 | if (!np) |
73 | break; | |
246246cb SH |
74 | this_leaf->of_node = np; |
75 | index++; | |
76 | } | |
8a7d95f9 SH |
77 | |
78 | if (index != cache_leaves(cpu)) /* not all OF nodes populated */ | |
79 | return -ENOENT; | |
80 | ||
246246cb SH |
81 | return 0; |
82 | } | |
83 | ||
84 | static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, | |
85 | struct cacheinfo *sib_leaf) | |
86 | { | |
87 | return sib_leaf->of_node == this_leaf->of_node; | |
88 | } | |
89 | #else | |
90 | static inline int cache_setup_of_node(unsigned int cpu) { return 0; } | |
91 | static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, | |
92 | struct cacheinfo *sib_leaf) | |
93 | { | |
94 | /* | |
95 | * For non-DT systems, assume unique level 1 cache, system-wide | |
96 | * shared caches for all other levels. This will be used only if | |
97 | * arch specific code has not populated shared_cpu_map | |
98 | */ | |
99 | return !(this_leaf->level == 1); | |
100 | } | |
101 | #endif | |
102 | ||
103 | static int cache_shared_cpu_map_setup(unsigned int cpu) | |
104 | { | |
105 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
106 | struct cacheinfo *this_leaf, *sib_leaf; | |
107 | unsigned int index; | |
55877ef4 | 108 | int ret = 0; |
246246cb | 109 | |
fac51482 SH |
110 | if (this_cpu_ci->cpu_map_populated) |
111 | return 0; | |
112 | ||
55877ef4 SH |
113 | if (of_have_populated_dt()) |
114 | ret = cache_setup_of_node(cpu); | |
115 | else if (!acpi_disabled) | |
116 | /* No cache property/hierarchy support yet in ACPI */ | |
117 | ret = -ENOTSUPP; | |
246246cb SH |
118 | if (ret) |
119 | return ret; | |
120 | ||
121 | for (index = 0; index < cache_leaves(cpu); index++) { | |
122 | unsigned int i; | |
123 | ||
124 | this_leaf = this_cpu_ci->info_list + index; | |
125 | /* skip if shared_cpu_map is already populated */ | |
126 | if (!cpumask_empty(&this_leaf->shared_cpu_map)) | |
127 | continue; | |
128 | ||
129 | cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); | |
130 | for_each_online_cpu(i) { | |
131 | struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); | |
132 | ||
133 | if (i == cpu || !sib_cpu_ci->info_list) | |
134 | continue;/* skip if itself or no cacheinfo */ | |
135 | sib_leaf = sib_cpu_ci->info_list + index; | |
136 | if (cache_leaves_are_shared(this_leaf, sib_leaf)) { | |
137 | cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); | |
138 | cpumask_set_cpu(i, &this_leaf->shared_cpu_map); | |
139 | } | |
140 | } | |
141 | } | |
142 | ||
143 | return 0; | |
144 | } | |
145 | ||
146 | static void cache_shared_cpu_map_remove(unsigned int cpu) | |
147 | { | |
148 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
149 | struct cacheinfo *this_leaf, *sib_leaf; | |
150 | unsigned int sibling, index; | |
151 | ||
152 | for (index = 0; index < cache_leaves(cpu); index++) { | |
153 | this_leaf = this_cpu_ci->info_list + index; | |
154 | for_each_cpu(sibling, &this_leaf->shared_cpu_map) { | |
155 | struct cpu_cacheinfo *sib_cpu_ci; | |
156 | ||
157 | if (sibling == cpu) /* skip itself */ | |
158 | continue; | |
2110d70c | 159 | |
246246cb | 160 | sib_cpu_ci = get_cpu_cacheinfo(sibling); |
2110d70c BP |
161 | if (!sib_cpu_ci->info_list) |
162 | continue; | |
163 | ||
246246cb SH |
164 | sib_leaf = sib_cpu_ci->info_list + index; |
165 | cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); | |
166 | cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); | |
167 | } | |
168 | of_node_put(this_leaf->of_node); | |
169 | } | |
170 | } | |
171 | ||
172 | static void free_cache_attributes(unsigned int cpu) | |
173 | { | |
2110d70c BP |
174 | if (!per_cpu_cacheinfo(cpu)) |
175 | return; | |
176 | ||
246246cb SH |
177 | cache_shared_cpu_map_remove(cpu); |
178 | ||
179 | kfree(per_cpu_cacheinfo(cpu)); | |
180 | per_cpu_cacheinfo(cpu) = NULL; | |
181 | } | |
182 | ||
183 | int __weak init_cache_level(unsigned int cpu) | |
184 | { | |
185 | return -ENOENT; | |
186 | } | |
187 | ||
188 | int __weak populate_cache_leaves(unsigned int cpu) | |
189 | { | |
190 | return -ENOENT; | |
191 | } | |
192 | ||
193 | static int detect_cache_attributes(unsigned int cpu) | |
194 | { | |
195 | int ret; | |
196 | ||
3370e13a | 197 | if (init_cache_level(cpu) || !cache_leaves(cpu)) |
246246cb SH |
198 | return -ENOENT; |
199 | ||
200 | per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), | |
201 | sizeof(struct cacheinfo), GFP_KERNEL); | |
202 | if (per_cpu_cacheinfo(cpu) == NULL) | |
203 | return -ENOMEM; | |
204 | ||
205 | ret = populate_cache_leaves(cpu); | |
206 | if (ret) | |
207 | goto free_ci; | |
208 | /* | |
2539b258 | 209 | * For systems using DT for cache hierarchy, of_node and shared_cpu_map |
246246cb SH |
210 | * will be set up here only if they are not populated already |
211 | */ | |
212 | ret = cache_shared_cpu_map_setup(cpu); | |
8a7d95f9 | 213 | if (ret) { |
55877ef4 | 214 | pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu); |
246246cb | 215 | goto free_ci; |
8a7d95f9 | 216 | } |
246246cb SH |
217 | return 0; |
218 | ||
219 | free_ci: | |
220 | free_cache_attributes(cpu); | |
221 | return ret; | |
222 | } | |
223 | ||
224 | /* pointer to cpuX/cache device */ | |
225 | static DEFINE_PER_CPU(struct device *, ci_cache_dev); | |
226 | #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) | |
227 | ||
228 | static cpumask_t cache_dev_map; | |
229 | ||
230 | /* pointer to array of devices for cpuX/cache/indexY */ | |
231 | static DEFINE_PER_CPU(struct device **, ci_index_dev); | |
232 | #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) | |
233 | #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) | |
234 | ||
235 | #define show_one(file_name, object) \ | |
236 | static ssize_t file_name##_show(struct device *dev, \ | |
237 | struct device_attribute *attr, char *buf) \ | |
238 | { \ | |
239 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ | |
240 | return sprintf(buf, "%u\n", this_leaf->object); \ | |
241 | } | |
242 | ||
243 | show_one(level, level); | |
244 | show_one(coherency_line_size, coherency_line_size); | |
245 | show_one(number_of_sets, number_of_sets); | |
246 | show_one(physical_line_partition, physical_line_partition); | |
247 | show_one(ways_of_associativity, ways_of_associativity); | |
248 | ||
249 | static ssize_t size_show(struct device *dev, | |
250 | struct device_attribute *attr, char *buf) | |
251 | { | |
252 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
253 | ||
254 | return sprintf(buf, "%uK\n", this_leaf->size >> 10); | |
255 | } | |
256 | ||
257 | static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf) | |
258 | { | |
259 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
260 | const struct cpumask *mask = &this_leaf->shared_cpu_map; | |
261 | ||
262 | return cpumap_print_to_pagebuf(list, buf, mask); | |
263 | } | |
264 | ||
265 | static ssize_t shared_cpu_map_show(struct device *dev, | |
266 | struct device_attribute *attr, char *buf) | |
267 | { | |
268 | return shared_cpumap_show_func(dev, false, buf); | |
269 | } | |
270 | ||
271 | static ssize_t shared_cpu_list_show(struct device *dev, | |
272 | struct device_attribute *attr, char *buf) | |
273 | { | |
274 | return shared_cpumap_show_func(dev, true, buf); | |
275 | } | |
276 | ||
277 | static ssize_t type_show(struct device *dev, | |
278 | struct device_attribute *attr, char *buf) | |
279 | { | |
280 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
281 | ||
282 | switch (this_leaf->type) { | |
283 | case CACHE_TYPE_DATA: | |
284 | return sprintf(buf, "Data\n"); | |
285 | case CACHE_TYPE_INST: | |
286 | return sprintf(buf, "Instruction\n"); | |
287 | case CACHE_TYPE_UNIFIED: | |
288 | return sprintf(buf, "Unified\n"); | |
289 | default: | |
290 | return -EINVAL; | |
291 | } | |
292 | } | |
293 | ||
294 | static ssize_t allocation_policy_show(struct device *dev, | |
295 | struct device_attribute *attr, char *buf) | |
296 | { | |
297 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
298 | unsigned int ci_attr = this_leaf->attributes; | |
299 | int n = 0; | |
300 | ||
301 | if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) | |
302 | n = sprintf(buf, "ReadWriteAllocate\n"); | |
303 | else if (ci_attr & CACHE_READ_ALLOCATE) | |
304 | n = sprintf(buf, "ReadAllocate\n"); | |
305 | else if (ci_attr & CACHE_WRITE_ALLOCATE) | |
306 | n = sprintf(buf, "WriteAllocate\n"); | |
307 | return n; | |
308 | } | |
309 | ||
310 | static ssize_t write_policy_show(struct device *dev, | |
311 | struct device_attribute *attr, char *buf) | |
312 | { | |
313 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
314 | unsigned int ci_attr = this_leaf->attributes; | |
315 | int n = 0; | |
316 | ||
317 | if (ci_attr & CACHE_WRITE_THROUGH) | |
318 | n = sprintf(buf, "WriteThrough\n"); | |
319 | else if (ci_attr & CACHE_WRITE_BACK) | |
320 | n = sprintf(buf, "WriteBack\n"); | |
321 | return n; | |
322 | } | |
323 | ||
324 | static DEVICE_ATTR_RO(level); | |
325 | static DEVICE_ATTR_RO(type); | |
326 | static DEVICE_ATTR_RO(coherency_line_size); | |
327 | static DEVICE_ATTR_RO(ways_of_associativity); | |
328 | static DEVICE_ATTR_RO(number_of_sets); | |
329 | static DEVICE_ATTR_RO(size); | |
330 | static DEVICE_ATTR_RO(allocation_policy); | |
331 | static DEVICE_ATTR_RO(write_policy); | |
332 | static DEVICE_ATTR_RO(shared_cpu_map); | |
333 | static DEVICE_ATTR_RO(shared_cpu_list); | |
334 | static DEVICE_ATTR_RO(physical_line_partition); | |
335 | ||
336 | static struct attribute *cache_default_attrs[] = { | |
337 | &dev_attr_type.attr, | |
338 | &dev_attr_level.attr, | |
339 | &dev_attr_shared_cpu_map.attr, | |
340 | &dev_attr_shared_cpu_list.attr, | |
341 | &dev_attr_coherency_line_size.attr, | |
342 | &dev_attr_ways_of_associativity.attr, | |
343 | &dev_attr_number_of_sets.attr, | |
344 | &dev_attr_size.attr, | |
345 | &dev_attr_allocation_policy.attr, | |
346 | &dev_attr_write_policy.attr, | |
347 | &dev_attr_physical_line_partition.attr, | |
348 | NULL | |
349 | }; | |
350 | ||
351 | static umode_t | |
352 | cache_default_attrs_is_visible(struct kobject *kobj, | |
353 | struct attribute *attr, int unused) | |
354 | { | |
355 | struct device *dev = kobj_to_dev(kobj); | |
356 | struct cacheinfo *this_leaf = dev_get_drvdata(dev); | |
357 | const struct cpumask *mask = &this_leaf->shared_cpu_map; | |
358 | umode_t mode = attr->mode; | |
359 | ||
360 | if ((attr == &dev_attr_type.attr) && this_leaf->type) | |
361 | return mode; | |
362 | if ((attr == &dev_attr_level.attr) && this_leaf->level) | |
363 | return mode; | |
364 | if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask)) | |
365 | return mode; | |
366 | if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask)) | |
367 | return mode; | |
368 | if ((attr == &dev_attr_coherency_line_size.attr) && | |
369 | this_leaf->coherency_line_size) | |
370 | return mode; | |
371 | if ((attr == &dev_attr_ways_of_associativity.attr) && | |
372 | this_leaf->size) /* allow 0 = full associativity */ | |
373 | return mode; | |
374 | if ((attr == &dev_attr_number_of_sets.attr) && | |
375 | this_leaf->number_of_sets) | |
376 | return mode; | |
377 | if ((attr == &dev_attr_size.attr) && this_leaf->size) | |
378 | return mode; | |
379 | if ((attr == &dev_attr_write_policy.attr) && | |
380 | (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) | |
381 | return mode; | |
382 | if ((attr == &dev_attr_allocation_policy.attr) && | |
383 | (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) | |
384 | return mode; | |
385 | if ((attr == &dev_attr_physical_line_partition.attr) && | |
386 | this_leaf->physical_line_partition) | |
387 | return mode; | |
388 | ||
389 | return 0; | |
390 | } | |
391 | ||
392 | static const struct attribute_group cache_default_group = { | |
393 | .attrs = cache_default_attrs, | |
394 | .is_visible = cache_default_attrs_is_visible, | |
395 | }; | |
396 | ||
397 | static const struct attribute_group *cache_default_groups[] = { | |
398 | &cache_default_group, | |
399 | NULL, | |
400 | }; | |
401 | ||
402 | static const struct attribute_group *cache_private_groups[] = { | |
403 | &cache_default_group, | |
404 | NULL, /* Place holder for private group */ | |
405 | NULL, | |
406 | }; | |
407 | ||
408 | const struct attribute_group * | |
409 | __weak cache_get_priv_group(struct cacheinfo *this_leaf) | |
410 | { | |
411 | return NULL; | |
412 | } | |
413 | ||
414 | static const struct attribute_group ** | |
415 | cache_get_attribute_groups(struct cacheinfo *this_leaf) | |
416 | { | |
417 | const struct attribute_group *priv_group = | |
418 | cache_get_priv_group(this_leaf); | |
419 | ||
420 | if (!priv_group) | |
421 | return cache_default_groups; | |
422 | ||
423 | if (!cache_private_groups[1]) | |
424 | cache_private_groups[1] = priv_group; | |
425 | ||
426 | return cache_private_groups; | |
427 | } | |
428 | ||
429 | /* Add/Remove cache interface for CPU device */ | |
430 | static void cpu_cache_sysfs_exit(unsigned int cpu) | |
431 | { | |
432 | int i; | |
433 | struct device *ci_dev; | |
434 | ||
435 | if (per_cpu_index_dev(cpu)) { | |
436 | for (i = 0; i < cache_leaves(cpu); i++) { | |
437 | ci_dev = per_cache_index_dev(cpu, i); | |
438 | if (!ci_dev) | |
439 | continue; | |
440 | device_unregister(ci_dev); | |
441 | } | |
442 | kfree(per_cpu_index_dev(cpu)); | |
443 | per_cpu_index_dev(cpu) = NULL; | |
444 | } | |
445 | device_unregister(per_cpu_cache_dev(cpu)); | |
446 | per_cpu_cache_dev(cpu) = NULL; | |
447 | } | |
448 | ||
449 | static int cpu_cache_sysfs_init(unsigned int cpu) | |
450 | { | |
451 | struct device *dev = get_cpu_device(cpu); | |
452 | ||
453 | if (per_cpu_cacheinfo(cpu) == NULL) | |
454 | return -ENOENT; | |
455 | ||
456 | per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); | |
457 | if (IS_ERR(per_cpu_cache_dev(cpu))) | |
458 | return PTR_ERR(per_cpu_cache_dev(cpu)); | |
459 | ||
460 | /* Allocate all required memory */ | |
461 | per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), | |
462 | sizeof(struct device *), GFP_KERNEL); | |
463 | if (unlikely(per_cpu_index_dev(cpu) == NULL)) | |
464 | goto err_out; | |
465 | ||
466 | return 0; | |
467 | ||
468 | err_out: | |
469 | cpu_cache_sysfs_exit(cpu); | |
470 | return -ENOMEM; | |
471 | } | |
472 | ||
473 | static int cache_add_dev(unsigned int cpu) | |
474 | { | |
475 | unsigned int i; | |
476 | int rc; | |
477 | struct device *ci_dev, *parent; | |
478 | struct cacheinfo *this_leaf; | |
479 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | |
480 | const struct attribute_group **cache_groups; | |
481 | ||
482 | rc = cpu_cache_sysfs_init(cpu); | |
483 | if (unlikely(rc < 0)) | |
484 | return rc; | |
485 | ||
486 | parent = per_cpu_cache_dev(cpu); | |
487 | for (i = 0; i < cache_leaves(cpu); i++) { | |
488 | this_leaf = this_cpu_ci->info_list + i; | |
489 | if (this_leaf->disable_sysfs) | |
490 | continue; | |
491 | cache_groups = cache_get_attribute_groups(this_leaf); | |
492 | ci_dev = cpu_device_create(parent, this_leaf, cache_groups, | |
493 | "index%1u", i); | |
494 | if (IS_ERR(ci_dev)) { | |
495 | rc = PTR_ERR(ci_dev); | |
496 | goto err; | |
497 | } | |
498 | per_cache_index_dev(cpu, i) = ci_dev; | |
499 | } | |
500 | cpumask_set_cpu(cpu, &cache_dev_map); | |
501 | ||
502 | return 0; | |
503 | err: | |
504 | cpu_cache_sysfs_exit(cpu); | |
505 | return rc; | |
506 | } | |
507 | ||
508 | static void cache_remove_dev(unsigned int cpu) | |
509 | { | |
510 | if (!cpumask_test_cpu(cpu, &cache_dev_map)) | |
511 | return; | |
512 | cpumask_clear_cpu(cpu, &cache_dev_map); | |
513 | ||
514 | cpu_cache_sysfs_exit(cpu); | |
515 | } | |
516 | ||
517 | static int cacheinfo_cpu_callback(struct notifier_block *nfb, | |
518 | unsigned long action, void *hcpu) | |
519 | { | |
520 | unsigned int cpu = (unsigned long)hcpu; | |
521 | int rc = 0; | |
522 | ||
523 | switch (action & ~CPU_TASKS_FROZEN) { | |
524 | case CPU_ONLINE: | |
525 | rc = detect_cache_attributes(cpu); | |
526 | if (!rc) | |
527 | rc = cache_add_dev(cpu); | |
528 | break; | |
529 | case CPU_DEAD: | |
530 | cache_remove_dev(cpu); | |
2110d70c | 531 | free_cache_attributes(cpu); |
246246cb SH |
532 | break; |
533 | } | |
534 | return notifier_from_errno(rc); | |
535 | } | |
536 | ||
537 | static int __init cacheinfo_sysfs_init(void) | |
538 | { | |
539 | int cpu, rc = 0; | |
540 | ||
541 | cpu_notifier_register_begin(); | |
542 | ||
543 | for_each_online_cpu(cpu) { | |
544 | rc = detect_cache_attributes(cpu); | |
6df43c9b | 545 | if (rc) |
246246cb | 546 | goto out; |
246246cb SH |
547 | rc = cache_add_dev(cpu); |
548 | if (rc) { | |
549 | free_cache_attributes(cpu); | |
550 | pr_err("error populating cacheinfo..cpu%d\n", cpu); | |
551 | goto out; | |
552 | } | |
553 | } | |
554 | __hotcpu_notifier(cacheinfo_cpu_callback, 0); | |
555 | ||
556 | out: | |
557 | cpu_notifier_register_done(); | |
558 | return rc; | |
559 | } | |
560 | ||
561 | device_initcall(cacheinfo_sysfs_init); |