]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/base/cacheinfo.c
ecde8957835ad1816223acb2f37479ca87469992
[mirror_ubuntu-zesty-kernel.git] / drivers / base / cacheinfo.c
1 /*
2 * cacheinfo support - processor cache information via sysfs
3 *
4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5 * Author: Sudeep Holla <sudeep.holla@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <linux/bitops.h>
20 #include <linux/cacheinfo.h>
21 #include <linux/compiler.h>
22 #include <linux/cpu.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/of.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/smp.h>
29 #include <linux/sysfs.h>
30
31 /* pointer to per cpu cacheinfo */
32 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
33 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
34 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
35 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
36
37 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
38 {
39 return ci_cacheinfo(cpu);
40 }
41
42 #ifdef CONFIG_OF
43 static int cache_setup_of_node(unsigned int cpu)
44 {
45 struct device_node *np;
46 struct cacheinfo *this_leaf;
47 struct device *cpu_dev = get_cpu_device(cpu);
48 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
49 unsigned int index = 0;
50
51 /* skip if of_node is already populated */
52 if (this_cpu_ci->info_list->of_node)
53 return 0;
54
55 if (!cpu_dev) {
56 pr_err("No cpu device for CPU %d\n", cpu);
57 return -ENODEV;
58 }
59 np = cpu_dev->of_node;
60 if (!np) {
61 pr_err("Failed to find cpu%d device node\n", cpu);
62 return -ENOENT;
63 }
64
65 while (index < cache_leaves(cpu)) {
66 this_leaf = this_cpu_ci->info_list + index;
67 if (this_leaf->level != 1)
68 np = of_find_next_cache_node(np);
69 else
70 np = of_node_get(np);/* cpu node itself */
71 if (!np)
72 break;
73 this_leaf->of_node = np;
74 index++;
75 }
76
77 if (index != cache_leaves(cpu)) /* not all OF nodes populated */
78 return -ENOENT;
79
80 return 0;
81 }
82
83 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
84 struct cacheinfo *sib_leaf)
85 {
86 return sib_leaf->of_node == this_leaf->of_node;
87 }
88 #else
89 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
90 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
91 struct cacheinfo *sib_leaf)
92 {
93 /*
94 * For non-DT systems, assume unique level 1 cache, system-wide
95 * shared caches for all other levels. This will be used only if
96 * arch specific code has not populated shared_cpu_map
97 */
98 return !(this_leaf->level == 1);
99 }
100 #endif
101
102 static int cache_shared_cpu_map_setup(unsigned int cpu)
103 {
104 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
105 struct cacheinfo *this_leaf, *sib_leaf;
106 unsigned int index;
107 int ret;
108
109 if (this_cpu_ci->cpu_map_populated)
110 return 0;
111
112 ret = cache_setup_of_node(cpu);
113 if (ret)
114 return ret;
115
116 for (index = 0; index < cache_leaves(cpu); index++) {
117 unsigned int i;
118
119 this_leaf = this_cpu_ci->info_list + index;
120 /* skip if shared_cpu_map is already populated */
121 if (!cpumask_empty(&this_leaf->shared_cpu_map))
122 continue;
123
124 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
125 for_each_online_cpu(i) {
126 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
127
128 if (i == cpu || !sib_cpu_ci->info_list)
129 continue;/* skip if itself or no cacheinfo */
130 sib_leaf = sib_cpu_ci->info_list + index;
131 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
132 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
133 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
134 }
135 }
136 }
137
138 return 0;
139 }
140
141 static void cache_shared_cpu_map_remove(unsigned int cpu)
142 {
143 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
144 struct cacheinfo *this_leaf, *sib_leaf;
145 unsigned int sibling, index;
146
147 for (index = 0; index < cache_leaves(cpu); index++) {
148 this_leaf = this_cpu_ci->info_list + index;
149 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
150 struct cpu_cacheinfo *sib_cpu_ci;
151
152 if (sibling == cpu) /* skip itself */
153 continue;
154
155 sib_cpu_ci = get_cpu_cacheinfo(sibling);
156 if (!sib_cpu_ci->info_list)
157 continue;
158
159 sib_leaf = sib_cpu_ci->info_list + index;
160 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
161 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
162 }
163 of_node_put(this_leaf->of_node);
164 }
165 }
166
167 static void free_cache_attributes(unsigned int cpu)
168 {
169 if (!per_cpu_cacheinfo(cpu))
170 return;
171
172 cache_shared_cpu_map_remove(cpu);
173
174 kfree(per_cpu_cacheinfo(cpu));
175 per_cpu_cacheinfo(cpu) = NULL;
176 }
177
178 int __weak init_cache_level(unsigned int cpu)
179 {
180 return -ENOENT;
181 }
182
183 int __weak populate_cache_leaves(unsigned int cpu)
184 {
185 return -ENOENT;
186 }
187
188 static int detect_cache_attributes(unsigned int cpu)
189 {
190 int ret;
191
192 if (init_cache_level(cpu) || !cache_leaves(cpu))
193 return -ENOENT;
194
195 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
196 sizeof(struct cacheinfo), GFP_KERNEL);
197 if (per_cpu_cacheinfo(cpu) == NULL)
198 return -ENOMEM;
199
200 ret = populate_cache_leaves(cpu);
201 if (ret)
202 goto free_ci;
203 /*
204 * For systems using DT for cache hierarchy, of_node and shared_cpu_map
205 * will be set up here only if they are not populated already
206 */
207 ret = cache_shared_cpu_map_setup(cpu);
208 if (ret) {
209 pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
210 cpu);
211 goto free_ci;
212 }
213 return 0;
214
215 free_ci:
216 free_cache_attributes(cpu);
217 return ret;
218 }
219
220 /* pointer to cpuX/cache device */
221 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
222 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
223
224 static cpumask_t cache_dev_map;
225
226 /* pointer to array of devices for cpuX/cache/indexY */
227 static DEFINE_PER_CPU(struct device **, ci_index_dev);
228 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
229 #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
230
231 #define show_one(file_name, object) \
232 static ssize_t file_name##_show(struct device *dev, \
233 struct device_attribute *attr, char *buf) \
234 { \
235 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
236 return sprintf(buf, "%u\n", this_leaf->object); \
237 }
238
239 show_one(level, level);
240 show_one(coherency_line_size, coherency_line_size);
241 show_one(number_of_sets, number_of_sets);
242 show_one(physical_line_partition, physical_line_partition);
243 show_one(ways_of_associativity, ways_of_associativity);
244
245 static ssize_t size_show(struct device *dev,
246 struct device_attribute *attr, char *buf)
247 {
248 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
249
250 return sprintf(buf, "%uK\n", this_leaf->size >> 10);
251 }
252
253 static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
254 {
255 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
256 const struct cpumask *mask = &this_leaf->shared_cpu_map;
257
258 return cpumap_print_to_pagebuf(list, buf, mask);
259 }
260
261 static ssize_t shared_cpu_map_show(struct device *dev,
262 struct device_attribute *attr, char *buf)
263 {
264 return shared_cpumap_show_func(dev, false, buf);
265 }
266
267 static ssize_t shared_cpu_list_show(struct device *dev,
268 struct device_attribute *attr, char *buf)
269 {
270 return shared_cpumap_show_func(dev, true, buf);
271 }
272
273 static ssize_t type_show(struct device *dev,
274 struct device_attribute *attr, char *buf)
275 {
276 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
277
278 switch (this_leaf->type) {
279 case CACHE_TYPE_DATA:
280 return sprintf(buf, "Data\n");
281 case CACHE_TYPE_INST:
282 return sprintf(buf, "Instruction\n");
283 case CACHE_TYPE_UNIFIED:
284 return sprintf(buf, "Unified\n");
285 default:
286 return -EINVAL;
287 }
288 }
289
290 static ssize_t allocation_policy_show(struct device *dev,
291 struct device_attribute *attr, char *buf)
292 {
293 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
294 unsigned int ci_attr = this_leaf->attributes;
295 int n = 0;
296
297 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
298 n = sprintf(buf, "ReadWriteAllocate\n");
299 else if (ci_attr & CACHE_READ_ALLOCATE)
300 n = sprintf(buf, "ReadAllocate\n");
301 else if (ci_attr & CACHE_WRITE_ALLOCATE)
302 n = sprintf(buf, "WriteAllocate\n");
303 return n;
304 }
305
306 static ssize_t write_policy_show(struct device *dev,
307 struct device_attribute *attr, char *buf)
308 {
309 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
310 unsigned int ci_attr = this_leaf->attributes;
311 int n = 0;
312
313 if (ci_attr & CACHE_WRITE_THROUGH)
314 n = sprintf(buf, "WriteThrough\n");
315 else if (ci_attr & CACHE_WRITE_BACK)
316 n = sprintf(buf, "WriteBack\n");
317 return n;
318 }
319
320 static DEVICE_ATTR_RO(level);
321 static DEVICE_ATTR_RO(type);
322 static DEVICE_ATTR_RO(coherency_line_size);
323 static DEVICE_ATTR_RO(ways_of_associativity);
324 static DEVICE_ATTR_RO(number_of_sets);
325 static DEVICE_ATTR_RO(size);
326 static DEVICE_ATTR_RO(allocation_policy);
327 static DEVICE_ATTR_RO(write_policy);
328 static DEVICE_ATTR_RO(shared_cpu_map);
329 static DEVICE_ATTR_RO(shared_cpu_list);
330 static DEVICE_ATTR_RO(physical_line_partition);
331
332 static struct attribute *cache_default_attrs[] = {
333 &dev_attr_type.attr,
334 &dev_attr_level.attr,
335 &dev_attr_shared_cpu_map.attr,
336 &dev_attr_shared_cpu_list.attr,
337 &dev_attr_coherency_line_size.attr,
338 &dev_attr_ways_of_associativity.attr,
339 &dev_attr_number_of_sets.attr,
340 &dev_attr_size.attr,
341 &dev_attr_allocation_policy.attr,
342 &dev_attr_write_policy.attr,
343 &dev_attr_physical_line_partition.attr,
344 NULL
345 };
346
347 static umode_t
348 cache_default_attrs_is_visible(struct kobject *kobj,
349 struct attribute *attr, int unused)
350 {
351 struct device *dev = kobj_to_dev(kobj);
352 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
353 const struct cpumask *mask = &this_leaf->shared_cpu_map;
354 umode_t mode = attr->mode;
355
356 if ((attr == &dev_attr_type.attr) && this_leaf->type)
357 return mode;
358 if ((attr == &dev_attr_level.attr) && this_leaf->level)
359 return mode;
360 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
361 return mode;
362 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
363 return mode;
364 if ((attr == &dev_attr_coherency_line_size.attr) &&
365 this_leaf->coherency_line_size)
366 return mode;
367 if ((attr == &dev_attr_ways_of_associativity.attr) &&
368 this_leaf->size) /* allow 0 = full associativity */
369 return mode;
370 if ((attr == &dev_attr_number_of_sets.attr) &&
371 this_leaf->number_of_sets)
372 return mode;
373 if ((attr == &dev_attr_size.attr) && this_leaf->size)
374 return mode;
375 if ((attr == &dev_attr_write_policy.attr) &&
376 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
377 return mode;
378 if ((attr == &dev_attr_allocation_policy.attr) &&
379 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
380 return mode;
381 if ((attr == &dev_attr_physical_line_partition.attr) &&
382 this_leaf->physical_line_partition)
383 return mode;
384
385 return 0;
386 }
387
388 static const struct attribute_group cache_default_group = {
389 .attrs = cache_default_attrs,
390 .is_visible = cache_default_attrs_is_visible,
391 };
392
393 static const struct attribute_group *cache_default_groups[] = {
394 &cache_default_group,
395 NULL,
396 };
397
398 static const struct attribute_group *cache_private_groups[] = {
399 &cache_default_group,
400 NULL, /* Place holder for private group */
401 NULL,
402 };
403
404 const struct attribute_group *
405 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
406 {
407 return NULL;
408 }
409
410 static const struct attribute_group **
411 cache_get_attribute_groups(struct cacheinfo *this_leaf)
412 {
413 const struct attribute_group *priv_group =
414 cache_get_priv_group(this_leaf);
415
416 if (!priv_group)
417 return cache_default_groups;
418
419 if (!cache_private_groups[1])
420 cache_private_groups[1] = priv_group;
421
422 return cache_private_groups;
423 }
424
425 /* Add/Remove cache interface for CPU device */
426 static void cpu_cache_sysfs_exit(unsigned int cpu)
427 {
428 int i;
429 struct device *ci_dev;
430
431 if (per_cpu_index_dev(cpu)) {
432 for (i = 0; i < cache_leaves(cpu); i++) {
433 ci_dev = per_cache_index_dev(cpu, i);
434 if (!ci_dev)
435 continue;
436 device_unregister(ci_dev);
437 }
438 kfree(per_cpu_index_dev(cpu));
439 per_cpu_index_dev(cpu) = NULL;
440 }
441 device_unregister(per_cpu_cache_dev(cpu));
442 per_cpu_cache_dev(cpu) = NULL;
443 }
444
445 static int cpu_cache_sysfs_init(unsigned int cpu)
446 {
447 struct device *dev = get_cpu_device(cpu);
448
449 if (per_cpu_cacheinfo(cpu) == NULL)
450 return -ENOENT;
451
452 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
453 if (IS_ERR(per_cpu_cache_dev(cpu)))
454 return PTR_ERR(per_cpu_cache_dev(cpu));
455
456 /* Allocate all required memory */
457 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
458 sizeof(struct device *), GFP_KERNEL);
459 if (unlikely(per_cpu_index_dev(cpu) == NULL))
460 goto err_out;
461
462 return 0;
463
464 err_out:
465 cpu_cache_sysfs_exit(cpu);
466 return -ENOMEM;
467 }
468
469 static int cache_add_dev(unsigned int cpu)
470 {
471 unsigned int i;
472 int rc;
473 struct device *ci_dev, *parent;
474 struct cacheinfo *this_leaf;
475 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
476 const struct attribute_group **cache_groups;
477
478 rc = cpu_cache_sysfs_init(cpu);
479 if (unlikely(rc < 0))
480 return rc;
481
482 parent = per_cpu_cache_dev(cpu);
483 for (i = 0; i < cache_leaves(cpu); i++) {
484 this_leaf = this_cpu_ci->info_list + i;
485 if (this_leaf->disable_sysfs)
486 continue;
487 cache_groups = cache_get_attribute_groups(this_leaf);
488 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
489 "index%1u", i);
490 if (IS_ERR(ci_dev)) {
491 rc = PTR_ERR(ci_dev);
492 goto err;
493 }
494 per_cache_index_dev(cpu, i) = ci_dev;
495 }
496 cpumask_set_cpu(cpu, &cache_dev_map);
497
498 return 0;
499 err:
500 cpu_cache_sysfs_exit(cpu);
501 return rc;
502 }
503
504 static void cache_remove_dev(unsigned int cpu)
505 {
506 if (!cpumask_test_cpu(cpu, &cache_dev_map))
507 return;
508 cpumask_clear_cpu(cpu, &cache_dev_map);
509
510 cpu_cache_sysfs_exit(cpu);
511 }
512
513 static int cacheinfo_cpu_callback(struct notifier_block *nfb,
514 unsigned long action, void *hcpu)
515 {
516 unsigned int cpu = (unsigned long)hcpu;
517 int rc = 0;
518
519 switch (action & ~CPU_TASKS_FROZEN) {
520 case CPU_ONLINE:
521 rc = detect_cache_attributes(cpu);
522 if (!rc)
523 rc = cache_add_dev(cpu);
524 break;
525 case CPU_DEAD:
526 cache_remove_dev(cpu);
527 free_cache_attributes(cpu);
528 break;
529 }
530 return notifier_from_errno(rc);
531 }
532
533 static int __init cacheinfo_sysfs_init(void)
534 {
535 int cpu, rc = 0;
536
537 cpu_notifier_register_begin();
538
539 for_each_online_cpu(cpu) {
540 rc = detect_cache_attributes(cpu);
541 if (rc)
542 goto out;
543 rc = cache_add_dev(cpu);
544 if (rc) {
545 free_cache_attributes(cpu);
546 pr_err("error populating cacheinfo..cpu%d\n", cpu);
547 goto out;
548 }
549 }
550 __hotcpu_notifier(cacheinfo_cpu_callback, 0);
551
552 out:
553 cpu_notifier_register_done();
554 return rc;
555 }
556
557 device_initcall(cacheinfo_sysfs_init);