2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
7 * EXYNOS5440 - CPU frequency scaling support
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/pm_opp.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 /* Register definitions */
28 #define XMU_DVFS_CTRL 0x0060
29 #define XMU_PMU_P0_7 0x0064
30 #define XMU_C0_3_PSTATE 0x0090
31 #define XMU_P_LIMIT 0x00a0
32 #define XMU_P_STATUS 0x00a4
33 #define XMU_PMUEVTEN 0x00d0
34 #define XMU_PMUIRQEN 0x00d4
35 #define XMU_PMUIRQ 0x00d8
37 /* PMU mask and shift definations */
38 #define P_VALUE_MASK 0x7
40 #define XMU_DVFS_CTRL_EN_SHIFT 0
42 #define P0_7_CPUCLKDEV_SHIFT 21
43 #define P0_7_CPUCLKDEV_MASK 0x7
44 #define P0_7_ATBCLKDEV_SHIFT 18
45 #define P0_7_ATBCLKDEV_MASK 0x7
46 #define P0_7_CSCLKDEV_SHIFT 15
47 #define P0_7_CSCLKDEV_MASK 0x7
48 #define P0_7_CPUEMA_SHIFT 28
49 #define P0_7_CPUEMA_MASK 0xf
50 #define P0_7_L2EMA_SHIFT 24
51 #define P0_7_L2EMA_MASK 0xf
52 #define P0_7_VDD_SHIFT 8
53 #define P0_7_VDD_MASK 0x7f
54 #define P0_7_FREQ_SHIFT 0
55 #define P0_7_FREQ_MASK 0xff
57 #define C0_3_PSTATE_VALID_SHIFT 8
58 #define C0_3_PSTATE_CURR_SHIFT 4
59 #define C0_3_PSTATE_NEW_SHIFT 0
61 #define PSTATE_CHANGED_EVTEN_SHIFT 0
63 #define PSTATE_CHANGED_IRQEN_SHIFT 0
65 #define PSTATE_CHANGED_SHIFT 0
67 /* some constant values for clock divider calculation */
68 #define CPU_DIV_FREQ_MAX 500
69 #define CPU_DBG_FREQ_MAX 375
70 #define CPU_ATB_FREQ_MAX 500
72 #define PMIC_LOW_VOLT 0x30
73 #define PMIC_HIGH_VOLT 0x28
75 #define CPUEMA_HIGH 0x2
76 #define CPUEMA_MID 0x4
77 #define CPUEMA_LOW 0x7
79 #define L2EMA_HIGH 0x1
84 /* frequency unit is 20MHZ */
86 #define MAX_VOLTAGE 1550000 /* In microvolt */
87 #define VOLTAGE_STEP 12500 /* In microvolt */
89 #define CPUFREQ_NAME "exynos5440_dvfs"
90 #define DEF_TRANS_LATENCY 100000
92 enum cpufreq_level_index
{
96 #define CPUFREQ_LEVEL_END (L7 + 1)
98 struct exynos_dvfs_data
{
100 struct resource
*mem
;
103 unsigned int latency
;
104 struct cpufreq_frequency_table
*freq_table
;
105 unsigned int freq_count
;
108 struct work_struct irq_work
;
111 static struct exynos_dvfs_data
*dvfs_info
;
112 static DEFINE_MUTEX(cpufreq_lock
);
113 static struct cpufreq_freqs freqs
;
115 static int init_div_table(void)
117 struct cpufreq_frequency_table
*pos
, *freq_tbl
= dvfs_info
->freq_table
;
118 unsigned int tmp
, clk_div
, ema_div
, freq
, volt_id
;
119 struct dev_pm_opp
*opp
;
122 cpufreq_for_each_entry(pos
, freq_tbl
) {
123 opp
= dev_pm_opp_find_freq_exact(dvfs_info
->dev
,
124 pos
->frequency
* 1000, true);
127 dev_err(dvfs_info
->dev
,
128 "failed to find valid OPP for %u KHZ\n",
133 freq
= pos
->frequency
/ 1000; /* In MHZ */
134 clk_div
= ((freq
/ CPU_DIV_FREQ_MAX
) & P0_7_CPUCLKDEV_MASK
)
135 << P0_7_CPUCLKDEV_SHIFT
;
136 clk_div
|= ((freq
/ CPU_ATB_FREQ_MAX
) & P0_7_ATBCLKDEV_MASK
)
137 << P0_7_ATBCLKDEV_SHIFT
;
138 clk_div
|= ((freq
/ CPU_DBG_FREQ_MAX
) & P0_7_CSCLKDEV_MASK
)
139 << P0_7_CSCLKDEV_SHIFT
;
142 volt_id
= dev_pm_opp_get_voltage(opp
);
143 volt_id
= (MAX_VOLTAGE
- volt_id
) / VOLTAGE_STEP
;
144 if (volt_id
< PMIC_HIGH_VOLT
) {
145 ema_div
= (CPUEMA_HIGH
<< P0_7_CPUEMA_SHIFT
) |
146 (L2EMA_HIGH
<< P0_7_L2EMA_SHIFT
);
147 } else if (volt_id
> PMIC_LOW_VOLT
) {
148 ema_div
= (CPUEMA_LOW
<< P0_7_CPUEMA_SHIFT
) |
149 (L2EMA_LOW
<< P0_7_L2EMA_SHIFT
);
151 ema_div
= (CPUEMA_MID
<< P0_7_CPUEMA_SHIFT
) |
152 (L2EMA_MID
<< P0_7_L2EMA_SHIFT
);
155 tmp
= (clk_div
| ema_div
| (volt_id
<< P0_7_VDD_SHIFT
)
156 | ((freq
/ FREQ_UNIT
) << P0_7_FREQ_SHIFT
));
158 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMU_P0_7
+ 4 *
166 static void exynos_enable_dvfs(unsigned int cur_frequency
)
168 unsigned int tmp
, cpu
;
169 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
170 struct cpufreq_frequency_table
*pos
;
172 __raw_writel(0, dvfs_info
->base
+ XMU_DVFS_CTRL
);
174 /* Enable PSTATE Change Event */
175 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUEVTEN
);
176 tmp
|= (1 << PSTATE_CHANGED_EVTEN_SHIFT
);
177 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUEVTEN
);
179 /* Enable PSTATE Change IRQ */
180 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUIRQEN
);
181 tmp
|= (1 << PSTATE_CHANGED_IRQEN_SHIFT
);
182 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUIRQEN
);
184 /* Set initial performance index */
185 cpufreq_for_each_entry(pos
, freq_table
)
186 if (pos
->frequency
== cur_frequency
)
189 if (pos
->frequency
== CPUFREQ_TABLE_END
) {
190 dev_crit(dvfs_info
->dev
, "Boot up frequency not supported\n");
191 /* Assign the highest frequency */
193 cur_frequency
= pos
->frequency
;
196 dev_info(dvfs_info
->dev
, "Setting dvfs initial frequency = %uKHZ",
199 for (cpu
= 0; cpu
< CONFIG_NR_CPUS
; cpu
++) {
200 tmp
= __raw_readl(dvfs_info
->base
+ XMU_C0_3_PSTATE
+ cpu
* 4);
201 tmp
&= ~(P_VALUE_MASK
<< C0_3_PSTATE_NEW_SHIFT
);
202 tmp
|= ((pos
- freq_table
) << C0_3_PSTATE_NEW_SHIFT
);
203 __raw_writel(tmp
, dvfs_info
->base
+ XMU_C0_3_PSTATE
+ cpu
* 4);
207 __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT
,
208 dvfs_info
->base
+ XMU_DVFS_CTRL
);
211 static int exynos_target(struct cpufreq_policy
*policy
, unsigned int index
)
215 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
217 mutex_lock(&cpufreq_lock
);
219 freqs
.old
= policy
->cur
;
220 freqs
.new = freq_table
[index
].frequency
;
222 cpufreq_freq_transition_begin(policy
, &freqs
);
224 /* Set the target frequency in all C0_3_PSTATE register */
225 for_each_cpu(i
, policy
->cpus
) {
226 tmp
= __raw_readl(dvfs_info
->base
+ XMU_C0_3_PSTATE
+ i
* 4);
227 tmp
&= ~(P_VALUE_MASK
<< C0_3_PSTATE_NEW_SHIFT
);
228 tmp
|= (index
<< C0_3_PSTATE_NEW_SHIFT
);
230 __raw_writel(tmp
, dvfs_info
->base
+ XMU_C0_3_PSTATE
+ i
* 4);
232 mutex_unlock(&cpufreq_lock
);
236 static void exynos_cpufreq_work(struct work_struct
*work
)
238 unsigned int cur_pstate
, index
;
239 struct cpufreq_policy
*policy
= cpufreq_cpu_get(0); /* boot CPU */
240 struct cpufreq_frequency_table
*freq_table
= dvfs_info
->freq_table
;
242 /* Ensure we can access cpufreq structures */
243 if (unlikely(dvfs_info
->dvfs_enabled
== false))
246 mutex_lock(&cpufreq_lock
);
247 freqs
.old
= policy
->cur
;
249 cur_pstate
= __raw_readl(dvfs_info
->base
+ XMU_P_STATUS
);
250 if (cur_pstate
>> C0_3_PSTATE_VALID_SHIFT
& 0x1)
251 index
= (cur_pstate
>> C0_3_PSTATE_CURR_SHIFT
) & P_VALUE_MASK
;
253 index
= (cur_pstate
>> C0_3_PSTATE_NEW_SHIFT
) & P_VALUE_MASK
;
255 if (likely(index
< dvfs_info
->freq_count
)) {
256 freqs
.new = freq_table
[index
].frequency
;
258 dev_crit(dvfs_info
->dev
, "New frequency out of range\n");
259 freqs
.new = freqs
.old
;
261 cpufreq_freq_transition_end(policy
, &freqs
, 0);
263 cpufreq_cpu_put(policy
);
264 mutex_unlock(&cpufreq_lock
);
266 enable_irq(dvfs_info
->irq
);
269 static irqreturn_t
exynos_cpufreq_irq(int irq
, void *id
)
273 tmp
= __raw_readl(dvfs_info
->base
+ XMU_PMUIRQ
);
274 if (tmp
>> PSTATE_CHANGED_SHIFT
& 0x1) {
275 __raw_writel(tmp
, dvfs_info
->base
+ XMU_PMUIRQ
);
276 disable_irq_nosync(irq
);
277 schedule_work(&dvfs_info
->irq_work
);
282 static void exynos_sort_descend_freq_table(void)
284 struct cpufreq_frequency_table
*freq_tbl
= dvfs_info
->freq_table
;
286 unsigned int tmp_freq
;
288 * Exynos5440 clock controller state logic expects the cpufreq table to
289 * be in descending order. But the OPP library constructs the table in
290 * ascending order. So to make the table descending we just need to
291 * swap the i element with the N - i element.
293 for (i
= 0; i
< dvfs_info
->freq_count
/ 2; i
++) {
294 index
= dvfs_info
->freq_count
- i
- 1;
295 tmp_freq
= freq_tbl
[i
].frequency
;
296 freq_tbl
[i
].frequency
= freq_tbl
[index
].frequency
;
297 freq_tbl
[index
].frequency
= tmp_freq
;
301 static int exynos_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
303 policy
->clk
= dvfs_info
->cpu_clk
;
304 return cpufreq_generic_init(policy
, dvfs_info
->freq_table
,
308 static struct cpufreq_driver exynos_driver
= {
309 .flags
= CPUFREQ_STICKY
| CPUFREQ_ASYNC_NOTIFICATION
|
310 CPUFREQ_NEED_INITIAL_FREQ_CHECK
,
311 .verify
= cpufreq_generic_frequency_table_verify
,
312 .target_index
= exynos_target
,
313 .get
= cpufreq_generic_get
,
314 .init
= exynos_cpufreq_cpu_init
,
315 .name
= CPUFREQ_NAME
,
316 .attr
= cpufreq_generic_attr
,
319 static const struct of_device_id exynos_cpufreq_match
[] = {
321 .compatible
= "samsung,exynos5440-cpufreq",
325 MODULE_DEVICE_TABLE(of
, exynos_cpufreq_match
);
327 static int exynos_cpufreq_probe(struct platform_device
*pdev
)
330 struct device_node
*np
;
332 unsigned int cur_frequency
;
334 np
= pdev
->dev
.of_node
;
338 dvfs_info
= devm_kzalloc(&pdev
->dev
, sizeof(*dvfs_info
), GFP_KERNEL
);
344 dvfs_info
->dev
= &pdev
->dev
;
346 ret
= of_address_to_resource(np
, 0, &res
);
350 dvfs_info
->base
= devm_ioremap_resource(dvfs_info
->dev
, &res
);
351 if (IS_ERR(dvfs_info
->base
)) {
352 ret
= PTR_ERR(dvfs_info
->base
);
356 dvfs_info
->irq
= irq_of_parse_and_map(np
, 0);
357 if (!dvfs_info
->irq
) {
358 dev_err(dvfs_info
->dev
, "No cpufreq irq found\n");
363 ret
= dev_pm_opp_of_add_table(dvfs_info
->dev
);
365 dev_err(dvfs_info
->dev
, "failed to init OPP table: %d\n", ret
);
369 ret
= dev_pm_opp_init_cpufreq_table(dvfs_info
->dev
,
370 &dvfs_info
->freq_table
);
372 dev_err(dvfs_info
->dev
,
373 "failed to init cpufreq table: %d\n", ret
);
376 dvfs_info
->freq_count
= dev_pm_opp_get_opp_count(dvfs_info
->dev
);
377 exynos_sort_descend_freq_table();
379 if (of_property_read_u32(np
, "clock-latency", &dvfs_info
->latency
))
380 dvfs_info
->latency
= DEF_TRANS_LATENCY
;
382 dvfs_info
->cpu_clk
= devm_clk_get(dvfs_info
->dev
, "armclk");
383 if (IS_ERR(dvfs_info
->cpu_clk
)) {
384 dev_err(dvfs_info
->dev
, "Failed to get cpu clock\n");
385 ret
= PTR_ERR(dvfs_info
->cpu_clk
);
389 cur_frequency
= clk_get_rate(dvfs_info
->cpu_clk
);
390 if (!cur_frequency
) {
391 dev_err(dvfs_info
->dev
, "Failed to get clock rate\n");
395 cur_frequency
/= 1000;
397 INIT_WORK(&dvfs_info
->irq_work
, exynos_cpufreq_work
);
398 ret
= devm_request_irq(dvfs_info
->dev
, dvfs_info
->irq
,
399 exynos_cpufreq_irq
, IRQF_TRIGGER_NONE
,
400 CPUFREQ_NAME
, dvfs_info
);
402 dev_err(dvfs_info
->dev
, "Failed to register IRQ\n");
406 ret
= init_div_table();
408 dev_err(dvfs_info
->dev
, "Failed to initialise div table\n");
412 exynos_enable_dvfs(cur_frequency
);
413 ret
= cpufreq_register_driver(&exynos_driver
);
415 dev_err(dvfs_info
->dev
,
416 "%s: failed to register cpufreq driver\n", __func__
);
421 dvfs_info
->dvfs_enabled
= true;
425 dev_pm_opp_free_cpufreq_table(dvfs_info
->dev
, &dvfs_info
->freq_table
);
427 dev_pm_opp_of_remove_table(dvfs_info
->dev
);
430 dev_err(&pdev
->dev
, "%s: failed initialization\n", __func__
);
434 static int exynos_cpufreq_remove(struct platform_device
*pdev
)
436 cpufreq_unregister_driver(&exynos_driver
);
437 dev_pm_opp_free_cpufreq_table(dvfs_info
->dev
, &dvfs_info
->freq_table
);
438 dev_pm_opp_of_remove_table(dvfs_info
->dev
);
442 static struct platform_driver exynos_cpufreq_platdrv
= {
444 .name
= "exynos5440-cpufreq",
445 .of_match_table
= exynos_cpufreq_match
,
447 .probe
= exynos_cpufreq_probe
,
448 .remove
= exynos_cpufreq_remove
,
450 module_platform_driver(exynos_cpufreq_platdrv
);
452 MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
453 MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
454 MODULE_LICENSE("GPL");