2 * arch/arm64/kernel/topology.c
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cacheinfo.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/init.h>
20 #include <linux/percpu.h>
21 #include <linux/node.h>
22 #include <linux/nodemask.h>
24 #include <linux/sched.h>
25 #include <linux/sched/topology.h>
26 #include <linux/slab.h>
27 #include <linux/smp.h>
28 #include <linux/string.h>
31 #include <asm/cputype.h>
32 #include <asm/topology.h>
34 static int __init
get_cpu_for_node(struct device_node
*node
)
36 struct device_node
*cpu_node
;
39 cpu_node
= of_parse_phandle(node
, "cpu", 0);
43 for_each_possible_cpu(cpu
) {
44 if (of_get_cpu_node(cpu
, NULL
) == cpu_node
) {
45 topology_parse_cpu_capacity(cpu_node
, cpu
);
46 of_node_put(cpu_node
);
51 pr_crit("Unable to find CPU node for %pOF\n", cpu_node
);
53 of_node_put(cpu_node
);
57 static int __init
parse_core(struct device_node
*core
, int package_id
,
64 struct device_node
*t
;
67 snprintf(name
, sizeof(name
), "thread%d", i
);
68 t
= of_get_child_by_name(core
, name
);
71 cpu
= get_cpu_for_node(t
);
73 cpu_topology
[cpu
].package_id
= package_id
;
74 cpu_topology
[cpu
].core_id
= core_id
;
75 cpu_topology
[cpu
].thread_id
= i
;
77 pr_err("%pOF: Can't get CPU for thread\n",
87 cpu
= get_cpu_for_node(core
);
90 pr_err("%pOF: Core has both threads and CPU\n",
95 cpu_topology
[cpu
].package_id
= package_id
;
96 cpu_topology
[cpu
].core_id
= core_id
;
98 pr_err("%pOF: Can't get CPU for leaf core\n", core
);
105 static int __init
parse_cluster(struct device_node
*cluster
, int depth
)
109 bool has_cores
= false;
110 struct device_node
*c
;
111 static int package_id __initdata
;
116 * First check for child clusters; we currently ignore any
117 * information about the nesting of clusters and present the
118 * scheduler with a flat list of them.
122 snprintf(name
, sizeof(name
), "cluster%d", i
);
123 c
= of_get_child_by_name(cluster
, name
);
126 ret
= parse_cluster(c
, depth
+ 1);
134 /* Now check for cores */
137 snprintf(name
, sizeof(name
), "core%d", i
);
138 c
= of_get_child_by_name(cluster
, name
);
143 pr_err("%pOF: cpu-map children should be clusters\n",
150 ret
= parse_core(c
, package_id
, core_id
++);
152 pr_err("%pOF: Non-leaf cluster with core %s\n",
164 if (leaf
&& !has_cores
)
165 pr_warn("%pOF: empty cluster\n", cluster
);
173 static int __init
parse_dt_topology(void)
175 struct device_node
*cn
, *map
;
179 cn
= of_find_node_by_path("/cpus");
181 pr_err("No CPU information found in DT\n");
186 * When topology is provided cpu-map is essentially a root
187 * cluster with restricted subnodes.
189 map
= of_get_child_by_name(cn
, "cpu-map");
193 ret
= parse_cluster(map
, 0);
197 topology_normalize_cpu_scale();
200 * Check that all cores are in the topology; the SMP code will
201 * only mark cores described in the DT as possible.
203 for_each_possible_cpu(cpu
)
204 if (cpu_topology
[cpu
].package_id
== -1)
217 struct cpu_topology cpu_topology
[NR_CPUS
];
218 EXPORT_SYMBOL_GPL(cpu_topology
);
220 const struct cpumask
*cpu_coregroup_mask(int cpu
)
222 const cpumask_t
*core_mask
= &cpu_topology
[cpu
].core_sibling
;
224 if (cpu_topology
[cpu
].llc_id
!= -1) {
225 if (cpumask_subset(&cpu_topology
[cpu
].llc_siblings
, core_mask
))
226 core_mask
= &cpu_topology
[cpu
].llc_siblings
;
232 static void update_siblings_masks(unsigned int cpuid
)
234 struct cpu_topology
*cpu_topo
, *cpuid_topo
= &cpu_topology
[cpuid
];
237 /* update core and thread sibling masks */
238 for_each_possible_cpu(cpu
) {
239 cpu_topo
= &cpu_topology
[cpu
];
241 if (cpuid_topo
->llc_id
== cpu_topo
->llc_id
) {
242 cpumask_set_cpu(cpu
, &cpuid_topo
->llc_siblings
);
243 cpumask_set_cpu(cpuid
, &cpu_topo
->llc_siblings
);
246 if (cpuid_topo
->package_id
!= cpu_topo
->package_id
)
249 cpumask_set_cpu(cpuid
, &cpu_topo
->core_sibling
);
251 cpumask_set_cpu(cpu
, &cpuid_topo
->core_sibling
);
253 if (cpuid_topo
->core_id
!= cpu_topo
->core_id
)
256 cpumask_set_cpu(cpuid
, &cpu_topo
->thread_sibling
);
258 cpumask_set_cpu(cpu
, &cpuid_topo
->thread_sibling
);
262 void store_cpu_topology(unsigned int cpuid
)
264 struct cpu_topology
*cpuid_topo
= &cpu_topology
[cpuid
];
267 if (cpuid_topo
->package_id
!= -1)
268 goto topology_populated
;
270 mpidr
= read_cpuid_mpidr();
272 /* Uniprocessor systems can rely on default topology values */
273 if (mpidr
& MPIDR_UP_BITMASK
)
276 /* Create cpu topology mapping based on MPIDR. */
277 if (mpidr
& MPIDR_MT_BITMASK
) {
278 /* Multiprocessor system : Multi-threads per core */
279 cpuid_topo
->thread_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
280 cpuid_topo
->core_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 1);
281 cpuid_topo
->package_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 2) |
282 MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 8;
284 /* Multiprocessor system : Single-thread per core */
285 cpuid_topo
->thread_id
= -1;
286 cpuid_topo
->core_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 0);
287 cpuid_topo
->package_id
= MPIDR_AFFINITY_LEVEL(mpidr
, 1) |
288 MPIDR_AFFINITY_LEVEL(mpidr
, 2) << 8 |
289 MPIDR_AFFINITY_LEVEL(mpidr
, 3) << 16;
292 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
293 cpuid
, cpuid_topo
->package_id
, cpuid_topo
->core_id
,
294 cpuid_topo
->thread_id
, mpidr
);
297 update_siblings_masks(cpuid
);
300 static void __init
reset_cpu_topology(void)
304 for_each_possible_cpu(cpu
) {
305 struct cpu_topology
*cpu_topo
= &cpu_topology
[cpu
];
307 cpu_topo
->thread_id
= -1;
308 cpu_topo
->core_id
= 0;
309 cpu_topo
->package_id
= -1;
311 cpu_topo
->llc_id
= -1;
312 cpumask_clear(&cpu_topo
->llc_siblings
);
313 cpumask_set_cpu(cpu
, &cpu_topo
->llc_siblings
);
315 cpumask_clear(&cpu_topo
->core_sibling
);
316 cpumask_set_cpu(cpu
, &cpu_topo
->core_sibling
);
317 cpumask_clear(&cpu_topo
->thread_sibling
);
318 cpumask_set_cpu(cpu
, &cpu_topo
->thread_sibling
);
324 * Propagate the topology information of the processor_topology_node tree to the
325 * cpu_topology array.
327 static int __init
parse_acpi_topology(void)
330 int cpu
, topology_id
;
332 is_threaded
= read_cpuid_mpidr() & MPIDR_MT_BITMASK
;
334 for_each_possible_cpu(cpu
) {
337 topology_id
= find_acpi_cpu_topology(cpu
, 0);
342 cpu_topology
[cpu
].thread_id
= topology_id
;
343 topology_id
= find_acpi_cpu_topology(cpu
, 1);
344 cpu_topology
[cpu
].core_id
= topology_id
;
346 cpu_topology
[cpu
].thread_id
= -1;
347 cpu_topology
[cpu
].core_id
= topology_id
;
349 topology_id
= find_acpi_cpu_topology_package(cpu
);
350 cpu_topology
[cpu
].package_id
= topology_id
;
352 i
= acpi_find_last_cache_level(cpu
);
356 * this is the only part of cpu_topology that has
357 * a direct relationship with the cache topology
359 cache_id
= find_acpi_cpu_cache_topology(cpu
, i
);
361 cpu_topology
[cpu
].llc_id
= cache_id
;
369 static inline int __init
parse_acpi_topology(void)
375 void __init
init_cpu_topology(void)
377 reset_cpu_topology();
380 * Discard anything that was parsed if we hit an error so we
381 * don't use partial information.
383 if (!acpi_disabled
&& parse_acpi_topology())
384 reset_cpu_topology();
385 else if (of_have_populated_dt() && parse_dt_topology())
386 reset_cpu_topology();