]>
Commit | Line | Data |
---|---|---|
a0dd7b79 | 1 | /* |
33692dc3 | 2 | * Generic OPP helper interface for CPU device |
a0dd7b79 NM |
3 | * |
4 | * Copyright (C) 2009-2014 Texas Instruments Incorporated. | |
5 | * Nishanth Menon | |
6 | * Romit Dasgupta | |
7 | * Kevin Hilman | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
d6d2a528 VK |
13 | |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
15 | ||
f59d3ee8 | 16 | #include <linux/cpu.h> |
a0dd7b79 | 17 | #include <linux/cpufreq.h> |
a0dd7b79 NM |
18 | #include <linux/err.h> |
19 | #include <linux/errno.h> | |
20 | #include <linux/export.h> | |
a0dd7b79 NM |
21 | #include <linux/slab.h> |
22 | ||
f59d3ee8 VK |
23 | #include "opp.h" |
24 | ||
33692dc3 | 25 | #ifdef CONFIG_CPU_FREQ |
f59d3ee8 | 26 | |
a0dd7b79 NM |
27 | /** |
28 | * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device | |
29 | * @dev: device for which we do this operation | |
30 | * @table: Cpufreq table returned back to caller | |
31 | * | |
32 | * Generate a cpufreq table for a provided device- this assumes that the | |
2c2709dc | 33 | * opp table is already initialized and ready for usage. |
a0dd7b79 NM |
34 | * |
35 | * This function allocates required memory for the cpufreq table. It is | |
36 | * expected that the caller does the required maintenance such as freeing | |
37 | * the table as required. | |
38 | * | |
39 | * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM | |
40 | * if no memory available for the operation (table is not populated), returns 0 | |
41 | * if successful and table is populated. | |
42 | * | |
43 | * WARNING: It is important for the callers to ensure refreshing their copy of | |
44 | * the table if any of the mentioned functions have been invoked in the interim. | |
45 | * | |
2c2709dc | 46 | * Locking: The internal opp_table and opp structures are RCU protected. |
a0dd7b79 NM |
47 | * Since we just use the regular accessor functions to access the internal data |
48 | * structures, we use RCU read lock inside this function. As a result, users of | |
49 | * this function DONOT need to use explicit locks for invoking. | |
50 | */ | |
51 | int dev_pm_opp_init_cpufreq_table(struct device *dev, | |
52 | struct cpufreq_frequency_table **table) | |
53 | { | |
54 | struct dev_pm_opp *opp; | |
55 | struct cpufreq_frequency_table *freq_table = NULL; | |
56 | int i, max_opps, ret = 0; | |
57 | unsigned long rate; | |
58 | ||
59 | rcu_read_lock(); | |
60 | ||
61 | max_opps = dev_pm_opp_get_opp_count(dev); | |
62 | if (max_opps <= 0) { | |
63 | ret = max_opps ? max_opps : -ENODATA; | |
64 | goto out; | |
65 | } | |
66 | ||
d3599920 | 67 | freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); |
a0dd7b79 NM |
68 | if (!freq_table) { |
69 | ret = -ENOMEM; | |
70 | goto out; | |
71 | } | |
72 | ||
73 | for (i = 0, rate = 0; i < max_opps; i++, rate++) { | |
74 | /* find next rate */ | |
75 | opp = dev_pm_opp_find_freq_ceil(dev, &rate); | |
76 | if (IS_ERR(opp)) { | |
77 | ret = PTR_ERR(opp); | |
78 | goto out; | |
79 | } | |
80 | freq_table[i].driver_data = i; | |
81 | freq_table[i].frequency = rate / 1000; | |
79eea44a BZ |
82 | |
83 | /* Is Boost/turbo opp ? */ | |
84 | if (dev_pm_opp_is_turbo(opp)) | |
85 | freq_table[i].flags = CPUFREQ_BOOST_FREQ; | |
a0dd7b79 NM |
86 | } |
87 | ||
88 | freq_table[i].driver_data = i; | |
89 | freq_table[i].frequency = CPUFREQ_TABLE_END; | |
90 | ||
91 | *table = &freq_table[0]; | |
92 | ||
93 | out: | |
94 | rcu_read_unlock(); | |
95 | if (ret) | |
96 | kfree(freq_table); | |
97 | ||
98 | return ret; | |
99 | } | |
100 | EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); | |
101 | ||
102 | /** | |
103 | * dev_pm_opp_free_cpufreq_table() - free the cpufreq table | |
104 | * @dev: device for which we do this operation | |
105 | * @table: table to free | |
106 | * | |
107 | * Free up the table allocated by dev_pm_opp_init_cpufreq_table | |
108 | */ | |
109 | void dev_pm_opp_free_cpufreq_table(struct device *dev, | |
110 | struct cpufreq_frequency_table **table) | |
111 | { | |
112 | if (!table) | |
113 | return; | |
114 | ||
115 | kfree(*table); | |
116 | *table = NULL; | |
117 | } | |
118 | EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); | |
33692dc3 | 119 | #endif /* CONFIG_CPU_FREQ */ |
f59d3ee8 | 120 | |
f47b72a1 | 121 | void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of) |
f59d3ee8 VK |
122 | { |
123 | struct device *cpu_dev; | |
124 | int cpu; | |
125 | ||
126 | WARN_ON(cpumask_empty(cpumask)); | |
127 | ||
128 | for_each_cpu(cpu, cpumask) { | |
129 | cpu_dev = get_cpu_device(cpu); | |
130 | if (!cpu_dev) { | |
131 | pr_err("%s: failed to get cpu%d device\n", __func__, | |
132 | cpu); | |
133 | continue; | |
134 | } | |
135 | ||
411466c5 SH |
136 | if (of) |
137 | dev_pm_opp_of_remove_table(cpu_dev); | |
138 | else | |
139 | dev_pm_opp_remove_table(cpu_dev); | |
f59d3ee8 VK |
140 | } |
141 | } | |
411466c5 SH |
142 | |
143 | /** | |
144 | * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask | |
145 | * @cpumask: cpumask for which OPP table needs to be removed | |
146 | * | |
147 | * This removes the OPP tables for CPUs present in the @cpumask. | |
148 | * This should be used to remove all the OPPs entries associated with | |
149 | * the cpus in @cpumask. | |
150 | * | |
151 | * Locking: The internal opp_table and opp structures are RCU protected. | |
152 | * Hence this function internally uses RCU updater strategy with mutex locks | |
153 | * to keep the integrity of the internal data structures. Callers should ensure | |
154 | * that this function is *NOT* called under RCU protection or in contexts where | |
155 | * mutex cannot be locked. | |
156 | */ | |
157 | void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) | |
158 | { | |
159 | _dev_pm_opp_cpumask_remove_table(cpumask, false); | |
160 | } | |
161 | EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table); | |
162 | ||
2c93104f VK |
163 | /** |
164 | * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs | |
165 | * @cpu_dev: CPU device for which we do this operation | |
166 | * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev | |
167 | * | |
168 | * This marks OPP table of the @cpu_dev as shared by the CPUs present in | |
169 | * @cpumask. | |
170 | * | |
171 | * Returns -ENODEV if OPP table isn't already present. | |
172 | * | |
173 | * Locking: The internal opp_table and opp structures are RCU protected. | |
174 | * Hence this function internally uses RCU updater strategy with mutex locks | |
175 | * to keep the integrity of the internal data structures. Callers should ensure | |
176 | * that this function is *NOT* called under RCU protection or in contexts where | |
177 | * mutex cannot be locked. | |
178 | */ | |
dde370b2 | 179 | int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, |
ddbb74bc | 180 | const struct cpumask *cpumask) |
2c93104f VK |
181 | { |
182 | struct opp_device *opp_dev; | |
183 | struct opp_table *opp_table; | |
184 | struct device *dev; | |
185 | int cpu, ret = 0; | |
186 | ||
187 | mutex_lock(&opp_table_lock); | |
188 | ||
189 | opp_table = _find_opp_table(cpu_dev); | |
190 | if (IS_ERR(opp_table)) { | |
191 | ret = PTR_ERR(opp_table); | |
192 | goto unlock; | |
193 | } | |
194 | ||
195 | for_each_cpu(cpu, cpumask) { | |
196 | if (cpu == cpu_dev->id) | |
197 | continue; | |
198 | ||
199 | dev = get_cpu_device(cpu); | |
200 | if (!dev) { | |
201 | dev_err(cpu_dev, "%s: failed to get cpu%d device\n", | |
202 | __func__, cpu); | |
203 | continue; | |
204 | } | |
205 | ||
206 | opp_dev = _add_opp_dev(dev, opp_table); | |
207 | if (!opp_dev) { | |
208 | dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n", | |
209 | __func__, cpu); | |
210 | continue; | |
211 | } | |
46e7a4e1 VK |
212 | |
213 | /* Mark opp-table as multiple CPUs are sharing it now */ | |
214 | opp_table->shared_opp = true; | |
2c93104f VK |
215 | } |
216 | unlock: | |
217 | mutex_unlock(&opp_table_lock); | |
218 | ||
219 | return ret; | |
220 | } | |
221 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); | |
6f707daa VK |
222 | |
223 | /** | |
224 | * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev | |
225 | * @cpu_dev: CPU device for which we do this operation | |
226 | * @cpumask: cpumask to update with information of sharing CPUs | |
227 | * | |
228 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. | |
229 | * | |
230 | * Returns -ENODEV if OPP table isn't already present. | |
231 | * | |
232 | * Locking: The internal opp_table and opp structures are RCU protected. | |
233 | * Hence this function internally uses RCU updater strategy with mutex locks | |
234 | * to keep the integrity of the internal data structures. Callers should ensure | |
235 | * that this function is *NOT* called under RCU protection or in contexts where | |
236 | * mutex cannot be locked. | |
237 | */ | |
ddbb74bc | 238 | int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) |
6f707daa VK |
239 | { |
240 | struct opp_device *opp_dev; | |
241 | struct opp_table *opp_table; | |
242 | int ret = 0; | |
243 | ||
244 | mutex_lock(&opp_table_lock); | |
245 | ||
246 | opp_table = _find_opp_table(cpu_dev); | |
247 | if (IS_ERR(opp_table)) { | |
248 | ret = PTR_ERR(opp_table); | |
249 | goto unlock; | |
250 | } | |
251 | ||
252 | cpumask_clear(cpumask); | |
253 | ||
254 | if (opp_table->shared_opp) { | |
255 | list_for_each_entry(opp_dev, &opp_table->dev_list, node) | |
256 | cpumask_set_cpu(opp_dev->dev->id, cpumask); | |
257 | } else { | |
258 | cpumask_set_cpu(cpu_dev->id, cpumask); | |
259 | } | |
260 | ||
261 | unlock: | |
262 | mutex_unlock(&opp_table_lock); | |
263 | ||
264 | return ret; | |
265 | } | |
266 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus); |