]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/clk/mvebu/clk-cpu.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth...
[mirror_ubuntu-eoan-kernel.git] / drivers / clk / mvebu / clk-cpu.c
1 /*
2 * Marvell MVEBU CPU clock handling.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12 #include <linux/kernel.h>
13 #include <linux/clkdev.h>
14 #include <linux/clk-provider.h>
15 #include <linux/of_address.h>
16 #include <linux/io.h>
17 #include <linux/of.h>
18 #include <linux/delay.h>
19
20 #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
21 #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
22 #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
23
24 #define MAX_CPU 4
25 struct cpu_clk {
26 struct clk_hw hw;
27 int cpu;
28 const char *clk_name;
29 const char *parent_name;
30 void __iomem *reg_base;
31 };
32
33 static struct clk **clks;
34
35 static struct clk_onecell_data clk_data;
36
37 #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
38
39 static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
40 unsigned long parent_rate)
41 {
42 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
43 u32 reg, div;
44
45 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
46 div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
47 return parent_rate / div;
48 }
49
50 static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
51 unsigned long *parent_rate)
52 {
53 /* Valid ratio are 1:1, 1:2 and 1:3 */
54 u32 div;
55
56 div = *parent_rate / rate;
57 if (div == 0)
58 div = 1;
59 else if (div > 3)
60 div = 3;
61
62 return *parent_rate / div;
63 }
64
65 static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
66 unsigned long parent_rate)
67 {
68 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
69 u32 reg, div;
70 u32 reload_mask;
71
72 div = parent_rate / rate;
73 reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
74 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
75 | (div << (cpuclk->cpu * 8));
76 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
77 /* Set clock divider reload smooth bit mask */
78 reload_mask = 1 << (20 + cpuclk->cpu);
79
80 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
81 | reload_mask;
82 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
83
84 /* Now trigger the clock update */
85 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
86 | 1 << 24;
87 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
88
89 /* Wait for clocks to settle down then clear reload request */
90 udelay(1000);
91 reg &= ~(reload_mask | 1 << 24);
92 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
93 udelay(1000);
94
95 return 0;
96 }
97
98 static const struct clk_ops cpu_ops = {
99 .recalc_rate = clk_cpu_recalc_rate,
100 .round_rate = clk_cpu_round_rate,
101 .set_rate = clk_cpu_set_rate,
102 };
103
104 void __init of_cpu_clk_setup(struct device_node *node)
105 {
106 struct cpu_clk *cpuclk;
107 void __iomem *clock_complex_base = of_iomap(node, 0);
108 int ncpus = 0;
109 struct device_node *dn;
110
111 if (clock_complex_base == NULL) {
112 pr_err("%s: clock-complex base register not set\n",
113 __func__);
114 return;
115 }
116
117 for_each_node_by_type(dn, "cpu")
118 ncpus++;
119
120 cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
121 if (WARN_ON(!cpuclk))
122 goto cpuclk_out;
123
124 clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
125 if (WARN_ON(!clks))
126 goto clks_out;
127
128 for_each_node_by_type(dn, "cpu") {
129 struct clk_init_data init;
130 struct clk *clk;
131 struct clk *parent_clk;
132 char *clk_name = kzalloc(5, GFP_KERNEL);
133 int cpu, err;
134
135 if (WARN_ON(!clk_name))
136 goto bail_out;
137
138 err = of_property_read_u32(dn, "reg", &cpu);
139 if (WARN_ON(err))
140 goto bail_out;
141
142 sprintf(clk_name, "cpu%d", cpu);
143 parent_clk = of_clk_get(node, 0);
144
145 cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
146 cpuclk[cpu].clk_name = clk_name;
147 cpuclk[cpu].cpu = cpu;
148 cpuclk[cpu].reg_base = clock_complex_base;
149 cpuclk[cpu].hw.init = &init;
150
151 init.name = cpuclk[cpu].clk_name;
152 init.ops = &cpu_ops;
153 init.flags = 0;
154 init.parent_names = &cpuclk[cpu].parent_name;
155 init.num_parents = 1;
156
157 clk = clk_register(NULL, &cpuclk[cpu].hw);
158 if (WARN_ON(IS_ERR(clk)))
159 goto bail_out;
160 clks[cpu] = clk;
161 }
162 clk_data.clk_num = MAX_CPU;
163 clk_data.clks = clks;
164 of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
165
166 return;
167 bail_out:
168 kfree(clks);
169 while(ncpus--)
170 kfree(cpuclk[ncpus].clk_name);
171 clks_out:
172 kfree(cpuclk);
173 cpuclk_out:
174 iounmap(clock_complex_base);
175 }
176
177 CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
178 of_cpu_clk_setup);