]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/base/cacheinfo.c
drivers: base: cacheinfo: support DT overrides for cache properties
[mirror_ubuntu-zesty-kernel.git] / drivers / base / cacheinfo.c
1 /*
2 * cacheinfo support - processor cache information via sysfs
3 *
4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5 * Author: Sudeep Holla <sudeep.holla@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/acpi.h>
22 #include <linux/bitops.h>
23 #include <linux/cacheinfo.h>
24 #include <linux/compiler.h>
25 #include <linux/cpu.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/of.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/smp.h>
32 #include <linux/sysfs.h>
33
34 /* pointer to per cpu cacheinfo */
35 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
36 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
37 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
38 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
39
40 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
41 {
42 return ci_cacheinfo(cpu);
43 }
44
45 #ifdef CONFIG_OF
46 static int cache_setup_of_node(unsigned int cpu)
47 {
48 struct device_node *np;
49 struct cacheinfo *this_leaf;
50 struct device *cpu_dev = get_cpu_device(cpu);
51 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
52 unsigned int index = 0;
53
54 /* skip if of_node is already populated */
55 if (this_cpu_ci->info_list->of_node)
56 return 0;
57
58 if (!cpu_dev) {
59 pr_err("No cpu device for CPU %d\n", cpu);
60 return -ENODEV;
61 }
62 np = cpu_dev->of_node;
63 if (!np) {
64 pr_err("Failed to find cpu%d device node\n", cpu);
65 return -ENOENT;
66 }
67
68 while (index < cache_leaves(cpu)) {
69 this_leaf = this_cpu_ci->info_list + index;
70 if (this_leaf->level != 1)
71 np = of_find_next_cache_node(np);
72 else
73 np = of_node_get(np);/* cpu node itself */
74 if (!np)
75 break;
76 this_leaf->of_node = np;
77 index++;
78 }
79
80 if (index != cache_leaves(cpu)) /* not all OF nodes populated */
81 return -ENOENT;
82
83 return 0;
84 }
85
86 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
87 struct cacheinfo *sib_leaf)
88 {
89 return sib_leaf->of_node == this_leaf->of_node;
90 }
91
92 /* OF properties to query for a given cache type */
93 struct cache_type_info {
94 const char *size_prop;
95 const char *line_size_props[2];
96 const char *nr_sets_prop;
97 };
98
99 static const struct cache_type_info cache_type_info[] = {
100 {
101 .size_prop = "cache-size",
102 .line_size_props = { "cache-line-size",
103 "cache-block-size", },
104 .nr_sets_prop = "cache-sets",
105 }, {
106 .size_prop = "i-cache-size",
107 .line_size_props = { "i-cache-line-size",
108 "i-cache-block-size", },
109 .nr_sets_prop = "i-cache-sets",
110 }, {
111 .size_prop = "d-cache-size",
112 .line_size_props = { "d-cache-line-size",
113 "d-cache-block-size", },
114 .nr_sets_prop = "d-cache-sets",
115 },
116 };
117
118 static inline int get_cacheinfo_idx(enum cache_type type)
119 {
120 if (type == CACHE_TYPE_UNIFIED)
121 return 0;
122 return type;
123 }
124
125 static void cache_size(struct cacheinfo *this_leaf)
126 {
127 const char *propname;
128 const __be32 *cache_size;
129 int ct_idx;
130
131 ct_idx = get_cacheinfo_idx(this_leaf->type);
132 propname = cache_type_info[ct_idx].size_prop;
133
134 cache_size = of_get_property(this_leaf->of_node, propname, NULL);
135 if (cache_size)
136 this_leaf->size = of_read_number(cache_size, 1);
137 }
138
139 /* not cache_line_size() because that's a macro in include/linux/cache.h */
140 static void cache_get_line_size(struct cacheinfo *this_leaf)
141 {
142 const __be32 *line_size;
143 int i, lim, ct_idx;
144
145 ct_idx = get_cacheinfo_idx(this_leaf->type);
146 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
147
148 for (i = 0; i < lim; i++) {
149 const char *propname;
150
151 propname = cache_type_info[ct_idx].line_size_props[i];
152 line_size = of_get_property(this_leaf->of_node, propname, NULL);
153 if (line_size)
154 break;
155 }
156
157 if (line_size)
158 this_leaf->coherency_line_size = of_read_number(line_size, 1);
159 }
160
161 static void cache_nr_sets(struct cacheinfo *this_leaf)
162 {
163 const char *propname;
164 const __be32 *nr_sets;
165 int ct_idx;
166
167 ct_idx = get_cacheinfo_idx(this_leaf->type);
168 propname = cache_type_info[ct_idx].nr_sets_prop;
169
170 nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
171 if (nr_sets)
172 this_leaf->number_of_sets = of_read_number(nr_sets, 1);
173 }
174
175 static void cache_associativity(struct cacheinfo *this_leaf)
176 {
177 unsigned int line_size = this_leaf->coherency_line_size;
178 unsigned int nr_sets = this_leaf->number_of_sets;
179 unsigned int size = this_leaf->size;
180
181 /*
182 * If the cache is fully associative, there is no need to
183 * check the other properties.
184 */
185 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
186 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
187 }
188
189 static void cache_of_override_properties(unsigned int cpu)
190 {
191 int index;
192 struct cacheinfo *this_leaf;
193 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
194
195 for (index = 0; index < cache_leaves(cpu); index++) {
196 this_leaf = this_cpu_ci->info_list + index;
197 cache_size(this_leaf);
198 cache_get_line_size(this_leaf);
199 cache_nr_sets(this_leaf);
200 cache_associativity(this_leaf);
201 }
202 }
203 #else
204 static void cache_of_override_properties(unsigned int cpu) { }
205 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
206 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
207 struct cacheinfo *sib_leaf)
208 {
209 /*
210 * For non-DT systems, assume unique level 1 cache, system-wide
211 * shared caches for all other levels. This will be used only if
212 * arch specific code has not populated shared_cpu_map
213 */
214 return !(this_leaf->level == 1);
215 }
216 #endif
217
218 static int cache_shared_cpu_map_setup(unsigned int cpu)
219 {
220 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
221 struct cacheinfo *this_leaf, *sib_leaf;
222 unsigned int index;
223 int ret = 0;
224
225 if (this_cpu_ci->cpu_map_populated)
226 return 0;
227
228 if (of_have_populated_dt())
229 ret = cache_setup_of_node(cpu);
230 else if (!acpi_disabled)
231 /* No cache property/hierarchy support yet in ACPI */
232 ret = -ENOTSUPP;
233 if (ret)
234 return ret;
235
236 for (index = 0; index < cache_leaves(cpu); index++) {
237 unsigned int i;
238
239 this_leaf = this_cpu_ci->info_list + index;
240 /* skip if shared_cpu_map is already populated */
241 if (!cpumask_empty(&this_leaf->shared_cpu_map))
242 continue;
243
244 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
245 for_each_online_cpu(i) {
246 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
247
248 if (i == cpu || !sib_cpu_ci->info_list)
249 continue;/* skip if itself or no cacheinfo */
250 sib_leaf = sib_cpu_ci->info_list + index;
251 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
252 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
253 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
254 }
255 }
256 }
257
258 return 0;
259 }
260
261 static void cache_shared_cpu_map_remove(unsigned int cpu)
262 {
263 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
264 struct cacheinfo *this_leaf, *sib_leaf;
265 unsigned int sibling, index;
266
267 for (index = 0; index < cache_leaves(cpu); index++) {
268 this_leaf = this_cpu_ci->info_list + index;
269 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
270 struct cpu_cacheinfo *sib_cpu_ci;
271
272 if (sibling == cpu) /* skip itself */
273 continue;
274
275 sib_cpu_ci = get_cpu_cacheinfo(sibling);
276 if (!sib_cpu_ci->info_list)
277 continue;
278
279 sib_leaf = sib_cpu_ci->info_list + index;
280 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
281 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
282 }
283 of_node_put(this_leaf->of_node);
284 }
285 }
286
287 static void cache_override_properties(unsigned int cpu)
288 {
289 if (of_have_populated_dt())
290 return cache_of_override_properties(cpu);
291 }
292
293 static void free_cache_attributes(unsigned int cpu)
294 {
295 if (!per_cpu_cacheinfo(cpu))
296 return;
297
298 cache_shared_cpu_map_remove(cpu);
299
300 kfree(per_cpu_cacheinfo(cpu));
301 per_cpu_cacheinfo(cpu) = NULL;
302 }
303
304 int __weak init_cache_level(unsigned int cpu)
305 {
306 return -ENOENT;
307 }
308
309 int __weak populate_cache_leaves(unsigned int cpu)
310 {
311 return -ENOENT;
312 }
313
314 static int detect_cache_attributes(unsigned int cpu)
315 {
316 int ret;
317
318 if (init_cache_level(cpu) || !cache_leaves(cpu))
319 return -ENOENT;
320
321 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
322 sizeof(struct cacheinfo), GFP_KERNEL);
323 if (per_cpu_cacheinfo(cpu) == NULL)
324 return -ENOMEM;
325
326 ret = populate_cache_leaves(cpu);
327 if (ret)
328 goto free_ci;
329 /*
330 * For systems using DT for cache hierarchy, of_node and shared_cpu_map
331 * will be set up here only if they are not populated already
332 */
333 ret = cache_shared_cpu_map_setup(cpu);
334 if (ret) {
335 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
336 goto free_ci;
337 }
338
339 cache_override_properties(cpu);
340 return 0;
341
342 free_ci:
343 free_cache_attributes(cpu);
344 return ret;
345 }
346
347 /* pointer to cpuX/cache device */
348 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
349 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
350
351 static cpumask_t cache_dev_map;
352
353 /* pointer to array of devices for cpuX/cache/indexY */
354 static DEFINE_PER_CPU(struct device **, ci_index_dev);
355 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
356 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
357
358 #define show_one(file_name, object) \
359 static ssize_t file_name##_show(struct device *dev, \
360 struct device_attribute *attr, char *buf) \
361 { \
362 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
363 return sprintf(buf, "%u\n", this_leaf->object); \
364 }
365
366 show_one(level, level);
367 show_one(coherency_line_size, coherency_line_size);
368 show_one(number_of_sets, number_of_sets);
369 show_one(physical_line_partition, physical_line_partition);
370 show_one(ways_of_associativity, ways_of_associativity);
371
372 static ssize_t size_show(struct device *dev,
373 struct device_attribute *attr, char *buf)
374 {
375 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
376
377 return sprintf(buf, "%uK\n", this_leaf->size >> 10);
378 }
379
380 static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
381 {
382 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
383 const struct cpumask *mask = &this_leaf->shared_cpu_map;
384
385 return cpumap_print_to_pagebuf(list, buf, mask);
386 }
387
388 static ssize_t shared_cpu_map_show(struct device *dev,
389 struct device_attribute *attr, char *buf)
390 {
391 return shared_cpumap_show_func(dev, false, buf);
392 }
393
394 static ssize_t shared_cpu_list_show(struct device *dev,
395 struct device_attribute *attr, char *buf)
396 {
397 return shared_cpumap_show_func(dev, true, buf);
398 }
399
400 static ssize_t type_show(struct device *dev,
401 struct device_attribute *attr, char *buf)
402 {
403 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
404
405 switch (this_leaf->type) {
406 case CACHE_TYPE_DATA:
407 return sprintf(buf, "Data\n");
408 case CACHE_TYPE_INST:
409 return sprintf(buf, "Instruction\n");
410 case CACHE_TYPE_UNIFIED:
411 return sprintf(buf, "Unified\n");
412 default:
413 return -EINVAL;
414 }
415 }
416
417 static ssize_t allocation_policy_show(struct device *dev,
418 struct device_attribute *attr, char *buf)
419 {
420 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
421 unsigned int ci_attr = this_leaf->attributes;
422 int n = 0;
423
424 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
425 n = sprintf(buf, "ReadWriteAllocate\n");
426 else if (ci_attr & CACHE_READ_ALLOCATE)
427 n = sprintf(buf, "ReadAllocate\n");
428 else if (ci_attr & CACHE_WRITE_ALLOCATE)
429 n = sprintf(buf, "WriteAllocate\n");
430 return n;
431 }
432
433 static ssize_t write_policy_show(struct device *dev,
434 struct device_attribute *attr, char *buf)
435 {
436 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
437 unsigned int ci_attr = this_leaf->attributes;
438 int n = 0;
439
440 if (ci_attr & CACHE_WRITE_THROUGH)
441 n = sprintf(buf, "WriteThrough\n");
442 else if (ci_attr & CACHE_WRITE_BACK)
443 n = sprintf(buf, "WriteBack\n");
444 return n;
445 }
446
447 static DEVICE_ATTR_RO(level);
448 static DEVICE_ATTR_RO(type);
449 static DEVICE_ATTR_RO(coherency_line_size);
450 static DEVICE_ATTR_RO(ways_of_associativity);
451 static DEVICE_ATTR_RO(number_of_sets);
452 static DEVICE_ATTR_RO(size);
453 static DEVICE_ATTR_RO(allocation_policy);
454 static DEVICE_ATTR_RO(write_policy);
455 static DEVICE_ATTR_RO(shared_cpu_map);
456 static DEVICE_ATTR_RO(shared_cpu_list);
457 static DEVICE_ATTR_RO(physical_line_partition);
458
459 static struct attribute *cache_default_attrs[] = {
460 &dev_attr_type.attr,
461 &dev_attr_level.attr,
462 &dev_attr_shared_cpu_map.attr,
463 &dev_attr_shared_cpu_list.attr,
464 &dev_attr_coherency_line_size.attr,
465 &dev_attr_ways_of_associativity.attr,
466 &dev_attr_number_of_sets.attr,
467 &dev_attr_size.attr,
468 &dev_attr_allocation_policy.attr,
469 &dev_attr_write_policy.attr,
470 &dev_attr_physical_line_partition.attr,
471 NULL
472 };
473
474 static umode_t
475 cache_default_attrs_is_visible(struct kobject *kobj,
476 struct attribute *attr, int unused)
477 {
478 struct device *dev = kobj_to_dev(kobj);
479 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
480 const struct cpumask *mask = &this_leaf->shared_cpu_map;
481 umode_t mode = attr->mode;
482
483 if ((attr == &dev_attr_type.attr) && this_leaf->type)
484 return mode;
485 if ((attr == &dev_attr_level.attr) && this_leaf->level)
486 return mode;
487 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
488 return mode;
489 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
490 return mode;
491 if ((attr == &dev_attr_coherency_line_size.attr) &&
492 this_leaf->coherency_line_size)
493 return mode;
494 if ((attr == &dev_attr_ways_of_associativity.attr) &&
495 this_leaf->size) /* allow 0 = full associativity */
496 return mode;
497 if ((attr == &dev_attr_number_of_sets.attr) &&
498 this_leaf->number_of_sets)
499 return mode;
500 if ((attr == &dev_attr_size.attr) && this_leaf->size)
501 return mode;
502 if ((attr == &dev_attr_write_policy.attr) &&
503 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
504 return mode;
505 if ((attr == &dev_attr_allocation_policy.attr) &&
506 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
507 return mode;
508 if ((attr == &dev_attr_physical_line_partition.attr) &&
509 this_leaf->physical_line_partition)
510 return mode;
511
512 return 0;
513 }
514
515 static const struct attribute_group cache_default_group = {
516 .attrs = cache_default_attrs,
517 .is_visible = cache_default_attrs_is_visible,
518 };
519
520 static const struct attribute_group *cache_default_groups[] = {
521 &cache_default_group,
522 NULL,
523 };
524
525 static const struct attribute_group *cache_private_groups[] = {
526 &cache_default_group,
527 NULL, /* Place holder for private group */
528 NULL,
529 };
530
531 const struct attribute_group *
532 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
533 {
534 return NULL;
535 }
536
537 static const struct attribute_group **
538 cache_get_attribute_groups(struct cacheinfo *this_leaf)
539 {
540 const struct attribute_group *priv_group =
541 cache_get_priv_group(this_leaf);
542
543 if (!priv_group)
544 return cache_default_groups;
545
546 if (!cache_private_groups[1])
547 cache_private_groups[1] = priv_group;
548
549 return cache_private_groups;
550 }
551
552 /* Add/Remove cache interface for CPU device */
553 static void cpu_cache_sysfs_exit(unsigned int cpu)
554 {
555 int i;
556 struct device *ci_dev;
557
558 if (per_cpu_index_dev(cpu)) {
559 for (i = 0; i < cache_leaves(cpu); i++) {
560 ci_dev = per_cache_index_dev(cpu, i);
561 if (!ci_dev)
562 continue;
563 device_unregister(ci_dev);
564 }
565 kfree(per_cpu_index_dev(cpu));
566 per_cpu_index_dev(cpu) = NULL;
567 }
568 device_unregister(per_cpu_cache_dev(cpu));
569 per_cpu_cache_dev(cpu) = NULL;
570 }
571
572 static int cpu_cache_sysfs_init(unsigned int cpu)
573 {
574 struct device *dev = get_cpu_device(cpu);
575
576 if (per_cpu_cacheinfo(cpu) == NULL)
577 return -ENOENT;
578
579 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
580 if (IS_ERR(per_cpu_cache_dev(cpu)))
581 return PTR_ERR(per_cpu_cache_dev(cpu));
582
583 /* Allocate all required memory */
584 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
585 sizeof(struct device *), GFP_KERNEL);
586 if (unlikely(per_cpu_index_dev(cpu) == NULL))
587 goto err_out;
588
589 return 0;
590
591 err_out:
592 cpu_cache_sysfs_exit(cpu);
593 return -ENOMEM;
594 }
595
596 static int cache_add_dev(unsigned int cpu)
597 {
598 unsigned int i;
599 int rc;
600 struct device *ci_dev, *parent;
601 struct cacheinfo *this_leaf;
602 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
603 const struct attribute_group **cache_groups;
604
605 rc = cpu_cache_sysfs_init(cpu);
606 if (unlikely(rc < 0))
607 return rc;
608
609 parent = per_cpu_cache_dev(cpu);
610 for (i = 0; i < cache_leaves(cpu); i++) {
611 this_leaf = this_cpu_ci->info_list + i;
612 if (this_leaf->disable_sysfs)
613 continue;
614 cache_groups = cache_get_attribute_groups(this_leaf);
615 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
616 "index%1u", i);
617 if (IS_ERR(ci_dev)) {
618 rc = PTR_ERR(ci_dev);
619 goto err;
620 }
621 per_cache_index_dev(cpu, i) = ci_dev;
622 }
623 cpumask_set_cpu(cpu, &cache_dev_map);
624
625 return 0;
626 err:
627 cpu_cache_sysfs_exit(cpu);
628 return rc;
629 }
630
631 static void cache_remove_dev(unsigned int cpu)
632 {
633 if (!cpumask_test_cpu(cpu, &cache_dev_map))
634 return;
635 cpumask_clear_cpu(cpu, &cache_dev_map);
636
637 cpu_cache_sysfs_exit(cpu);
638 }
639
640 static int cacheinfo_cpu_callback(struct notifier_block *nfb,
641 unsigned long action, void *hcpu)
642 {
643 unsigned int cpu = (unsigned long)hcpu;
644 int rc = 0;
645
646 switch (action & ~CPU_TASKS_FROZEN) {
647 case CPU_ONLINE:
648 rc = detect_cache_attributes(cpu);
649 if (!rc)
650 rc = cache_add_dev(cpu);
651 break;
652 case CPU_DEAD:
653 cache_remove_dev(cpu);
654 free_cache_attributes(cpu);
655 break;
656 }
657 return notifier_from_errno(rc);
658 }
659
660 static int __init cacheinfo_sysfs_init(void)
661 {
662 int cpu, rc = 0;
663
664 cpu_notifier_register_begin();
665
666 for_each_online_cpu(cpu) {
667 rc = detect_cache_attributes(cpu);
668 if (rc)
669 goto out;
670 rc = cache_add_dev(cpu);
671 if (rc) {
672 free_cache_attributes(cpu);
673 pr_err("error populating cacheinfo..cpu%d\n", cpu);
674 goto out;
675 }
676 }
677 __hotcpu_notifier(cacheinfo_cpu_callback, 0);
678
679 out:
680 cpu_notifier_register_done();
681 return rc;
682 }
683
684 device_initcall(cacheinfo_sysfs_init);