2 * Arch specific cpu topology information
4 * Copyright (C) 2016, ARM Ltd.
5 * Written by: Juri Lelli, ARM Ltd.
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
11 * Released under the GPLv2 only.
12 * SPDX-License-Identifier: GPL-2.0
15 #include <linux/acpi.h>
16 #include <linux/arch_topology.h>
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/device.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/sched/topology.h>
25 static DEFINE_MUTEX(cpu_scale_mutex
);
26 static DEFINE_PER_CPU(unsigned long, cpu_scale
) = SCHED_CAPACITY_SCALE
;
28 unsigned long topology_get_cpu_scale(struct sched_domain
*sd
, int cpu
)
30 return per_cpu(cpu_scale
, cpu
);
33 void topology_set_cpu_scale(unsigned int cpu
, unsigned long capacity
)
35 per_cpu(cpu_scale
, cpu
) = capacity
;
38 static ssize_t
cpu_capacity_show(struct device
*dev
,
39 struct device_attribute
*attr
,
42 struct cpu
*cpu
= container_of(dev
, struct cpu
, dev
);
44 return sprintf(buf
, "%lu\n",
45 topology_get_cpu_scale(NULL
, cpu
->dev
.id
));
48 static ssize_t
cpu_capacity_store(struct device
*dev
,
49 struct device_attribute
*attr
,
53 struct cpu
*cpu
= container_of(dev
, struct cpu
, dev
);
54 int this_cpu
= cpu
->dev
.id
;
56 unsigned long new_capacity
;
62 ret
= kstrtoul(buf
, 0, &new_capacity
);
65 if (new_capacity
> SCHED_CAPACITY_SCALE
)
68 mutex_lock(&cpu_scale_mutex
);
69 for_each_cpu(i
, &cpu_topology
[this_cpu
].core_sibling
)
70 topology_set_cpu_scale(i
, new_capacity
);
71 mutex_unlock(&cpu_scale_mutex
);
76 static DEVICE_ATTR_RW(cpu_capacity
);
78 static int register_cpu_capacity_sysctl(void)
83 for_each_possible_cpu(i
) {
84 cpu
= get_cpu_device(i
);
86 pr_err("%s: too early to get CPU%d device!\n",
90 device_create_file(cpu
, &dev_attr_cpu_capacity
);
95 subsys_initcall(register_cpu_capacity_sysctl
);
97 static u32 capacity_scale
;
98 static u32
*raw_capacity
;
99 static bool cap_parsing_failed
;
101 void topology_normalize_cpu_scale(void)
106 if (!raw_capacity
|| cap_parsing_failed
)
109 pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale
);
110 mutex_lock(&cpu_scale_mutex
);
111 for_each_possible_cpu(cpu
) {
112 pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
113 cpu
, raw_capacity
[cpu
]);
114 capacity
= (raw_capacity
[cpu
] << SCHED_CAPACITY_SHIFT
)
116 topology_set_cpu_scale(cpu
, capacity
);
117 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
118 cpu
, topology_get_cpu_scale(NULL
, cpu
));
120 mutex_unlock(&cpu_scale_mutex
);
123 int __init
topology_parse_cpu_capacity(struct device_node
*cpu_node
, int cpu
)
128 if (cap_parsing_failed
)
131 ret
= of_property_read_u32(cpu_node
,
132 "capacity-dmips-mhz",
136 raw_capacity
= kcalloc(num_possible_cpus(),
137 sizeof(*raw_capacity
),
140 pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
141 cap_parsing_failed
= true;
145 capacity_scale
= max(cpu_capacity
, capacity_scale
);
146 raw_capacity
[cpu
] = cpu_capacity
;
147 pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
148 cpu_node
->full_name
, raw_capacity
[cpu
]);
151 pr_err("cpu_capacity: missing %s raw capacity\n",
152 cpu_node
->full_name
);
153 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
155 cap_parsing_failed
= true;
162 #ifdef CONFIG_CPU_FREQ
163 static bool cap_parsing_done __initdata
;
164 static cpumask_var_t cpus_to_visit __initdata
;
165 static void __init
parsing_done_workfn(struct work_struct
*work
);
166 static __initdata
DECLARE_WORK(parsing_done_work
, parsing_done_workfn
);
169 init_cpu_capacity_callback(struct notifier_block
*nb
,
173 struct cpufreq_policy
*policy
= data
;
176 if (cap_parsing_failed
|| cap_parsing_done
)
181 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
182 cpumask_pr_args(policy
->related_cpus
),
183 cpumask_pr_args(cpus_to_visit
));
184 cpumask_andnot(cpus_to_visit
,
186 policy
->related_cpus
);
187 for_each_cpu(cpu
, policy
->related_cpus
) {
188 raw_capacity
[cpu
] = topology_get_cpu_scale(NULL
, cpu
) *
189 policy
->cpuinfo
.max_freq
/ 1000UL;
190 capacity_scale
= max(raw_capacity
[cpu
], capacity_scale
);
192 if (cpumask_empty(cpus_to_visit
)) {
193 topology_normalize_cpu_scale();
195 pr_debug("cpu_capacity: parsing done\n");
196 cap_parsing_done
= true;
197 schedule_work(&parsing_done_work
);
203 static struct notifier_block init_cpu_capacity_notifier __initdata
= {
204 .notifier_call
= init_cpu_capacity_callback
,
207 static int __init
register_cpufreq_notifier(void)
210 * on ACPI-based systems we need to use the default cpu capacity
211 * until we have the necessary code to parse the cpu capacity, so
212 * skip registering cpufreq notifier.
214 if (!acpi_disabled
|| !raw_capacity
)
217 if (!alloc_cpumask_var(&cpus_to_visit
, GFP_KERNEL
)) {
218 pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
222 cpumask_copy(cpus_to_visit
, cpu_possible_mask
);
224 return cpufreq_register_notifier(&init_cpu_capacity_notifier
,
225 CPUFREQ_POLICY_NOTIFIER
);
227 core_initcall(register_cpufreq_notifier
);
229 static void __init
parsing_done_workfn(struct work_struct
*work
)
231 cpufreq_unregister_notifier(&init_cpu_capacity_notifier
,
232 CPUFREQ_POLICY_NOTIFIER
);
236 static int __init
free_raw_capacity(void)
242 core_initcall(free_raw_capacity
);