]>
Commit | Line | Data |
---|---|---|
059ab7b8 DA |
1 | /* |
2 | * Copyright 2016 Freescale Semiconductor, Inc. | |
3 | * Copyright 2017 NXP | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
7 | * as published by the Free Software Foundation; either version 2 | |
8 | * of the License, or (at your option) any later version. | |
9 | */ | |
10 | ||
11 | #include <linux/clk.h> | |
12 | #include <linux/clockchips.h> | |
13 | #include <linux/clocksource.h> | |
14 | #include <linux/delay.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/of_address.h> | |
17 | #include <linux/of_irq.h> | |
18 | #include <linux/sched_clock.h> | |
19 | ||
20 | #define TPM_SC 0x10 | |
21 | #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) | |
22 | #define TPM_SC_CMOD_DIV_DEFAULT 0x3 | |
23 | #define TPM_CNT 0x14 | |
24 | #define TPM_MOD 0x18 | |
25 | #define TPM_STATUS 0x1c | |
26 | #define TPM_STATUS_CH0F BIT(0) | |
27 | #define TPM_C0SC 0x20 | |
28 | #define TPM_C0SC_CHIE BIT(6) | |
29 | #define TPM_C0SC_MODE_SHIFT 2 | |
30 | #define TPM_C0SC_MODE_MASK 0x3c | |
31 | #define TPM_C0SC_MODE_SW_COMPARE 0x4 | |
32 | #define TPM_C0V 0x24 | |
33 | ||
34 | static void __iomem *timer_base; | |
35 | static struct clock_event_device clockevent_tpm; | |
36 | ||
37 | static inline void tpm_timer_disable(void) | |
38 | { | |
39 | unsigned int val; | |
40 | ||
41 | /* channel disable */ | |
42 | val = readl(timer_base + TPM_C0SC); | |
43 | val &= ~(TPM_C0SC_MODE_MASK | TPM_C0SC_CHIE); | |
44 | writel(val, timer_base + TPM_C0SC); | |
45 | } | |
46 | ||
47 | static inline void tpm_timer_enable(void) | |
48 | { | |
49 | unsigned int val; | |
50 | ||
51 | /* channel enabled in sw compare mode */ | |
52 | val = readl(timer_base + TPM_C0SC); | |
53 | val |= (TPM_C0SC_MODE_SW_COMPARE << TPM_C0SC_MODE_SHIFT) | | |
54 | TPM_C0SC_CHIE; | |
55 | writel(val, timer_base + TPM_C0SC); | |
56 | } | |
57 | ||
58 | static inline void tpm_irq_acknowledge(void) | |
59 | { | |
60 | writel(TPM_STATUS_CH0F, timer_base + TPM_STATUS); | |
61 | } | |
62 | ||
63 | static struct delay_timer tpm_delay_timer; | |
64 | ||
65 | static inline unsigned long tpm_read_counter(void) | |
66 | { | |
67 | return readl(timer_base + TPM_CNT); | |
68 | } | |
69 | ||
70 | static unsigned long tpm_read_current_timer(void) | |
71 | { | |
72 | return tpm_read_counter(); | |
73 | } | |
74 | ||
75 | static u64 notrace tpm_read_sched_clock(void) | |
76 | { | |
77 | return tpm_read_counter(); | |
78 | } | |
79 | ||
80 | static int __init tpm_clocksource_init(unsigned long rate) | |
81 | { | |
82 | tpm_delay_timer.read_current_timer = &tpm_read_current_timer; | |
83 | tpm_delay_timer.freq = rate; | |
84 | register_current_timer_delay(&tpm_delay_timer); | |
85 | ||
86 | sched_clock_register(tpm_read_sched_clock, 32, rate); | |
87 | ||
88 | return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm", | |
89 | rate, 200, 32, clocksource_mmio_readl_up); | |
90 | } | |
91 | ||
92 | static int tpm_set_next_event(unsigned long delta, | |
93 | struct clock_event_device *evt) | |
94 | { | |
95 | unsigned long next, now; | |
96 | ||
97 | next = tpm_read_counter(); | |
98 | next += delta; | |
99 | writel(next, timer_base + TPM_C0V); | |
100 | now = tpm_read_counter(); | |
101 | ||
102 | /* | |
103 | * NOTE: We observed in a very small probability, the bus fabric | |
104 | * contention between GPU and A7 may results a few cycles delay | |
105 | * of writing CNT registers which may cause the min_delta event got | |
106 | * missed, so we need add a ETIME check here in case it happened. | |
107 | */ | |
108 | return (int)((next - now) <= 0) ? -ETIME : 0; | |
109 | } | |
110 | ||
111 | static int tpm_set_state_oneshot(struct clock_event_device *evt) | |
112 | { | |
113 | tpm_timer_enable(); | |
114 | ||
115 | return 0; | |
116 | } | |
117 | ||
118 | static int tpm_set_state_shutdown(struct clock_event_device *evt) | |
119 | { | |
120 | tpm_timer_disable(); | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
125 | static irqreturn_t tpm_timer_interrupt(int irq, void *dev_id) | |
126 | { | |
127 | struct clock_event_device *evt = dev_id; | |
128 | ||
129 | tpm_irq_acknowledge(); | |
130 | ||
131 | evt->event_handler(evt); | |
132 | ||
133 | return IRQ_HANDLED; | |
134 | } | |
135 | ||
136 | static struct clock_event_device clockevent_tpm = { | |
137 | .name = "i.MX7ULP TPM Timer", | |
138 | .features = CLOCK_EVT_FEAT_ONESHOT, | |
139 | .set_state_oneshot = tpm_set_state_oneshot, | |
140 | .set_next_event = tpm_set_next_event, | |
141 | .set_state_shutdown = tpm_set_state_shutdown, | |
142 | .rating = 200, | |
143 | }; | |
144 | ||
145 | static int __init tpm_clockevent_init(unsigned long rate, int irq) | |
146 | { | |
147 | int ret; | |
148 | ||
149 | ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, | |
150 | "i.MX7ULP TPM Timer", &clockevent_tpm); | |
151 | ||
152 | clockevent_tpm.cpumask = cpumask_of(0); | |
153 | clockevent_tpm.irq = irq; | |
154 | clockevents_config_and_register(&clockevent_tpm, | |
155 | rate, 300, 0xfffffffe); | |
156 | ||
157 | return ret; | |
158 | } | |
159 | ||
160 | static int __init tpm_timer_init(struct device_node *np) | |
161 | { | |
162 | struct clk *ipg, *per; | |
163 | int irq, ret; | |
164 | u32 rate; | |
165 | ||
166 | timer_base = of_iomap(np, 0); | |
167 | if (!timer_base) { | |
168 | pr_err("tpm: failed to get base address\n"); | |
169 | return -ENXIO; | |
170 | } | |
171 | ||
172 | irq = irq_of_parse_and_map(np, 0); | |
173 | if (!irq) { | |
174 | pr_err("tpm: failed to get irq\n"); | |
175 | ret = -ENOENT; | |
176 | goto err_iomap; | |
177 | } | |
178 | ||
179 | ipg = of_clk_get_by_name(np, "ipg"); | |
180 | per = of_clk_get_by_name(np, "per"); | |
181 | if (IS_ERR(ipg) || IS_ERR(per)) { | |
182 | pr_err("tpm: failed to get igp or per clk\n"); | |
183 | ret = -ENODEV; | |
184 | goto err_clk_get; | |
185 | } | |
186 | ||
187 | /* enable clk before accessing registers */ | |
188 | ret = clk_prepare_enable(ipg); | |
189 | if (ret) { | |
190 | pr_err("tpm: ipg clock enable failed (%d)\n", ret); | |
191 | goto err_clk_get; | |
192 | } | |
193 | ||
194 | ret = clk_prepare_enable(per); | |
195 | if (ret) { | |
196 | pr_err("tpm: per clock enable failed (%d)\n", ret); | |
197 | goto err_per_clk_enable; | |
198 | } | |
199 | ||
200 | /* | |
201 | * Initialize tpm module to a known state | |
202 | * 1) Counter disabled | |
203 | * 2) TPM counter operates in up counting mode | |
204 | * 3) Timer Overflow Interrupt disabled | |
205 | * 4) Channel0 disabled | |
206 | * 5) DMA transfers disabled | |
207 | */ | |
208 | writel(0, timer_base + TPM_SC); | |
209 | writel(0, timer_base + TPM_CNT); | |
210 | writel(0, timer_base + TPM_C0SC); | |
211 | ||
212 | /* increase per cnt, div 8 by default */ | |
213 | writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT, | |
214 | timer_base + TPM_SC); | |
215 | ||
216 | /* set MOD register to maximum for free running mode */ | |
217 | writel(0xffffffff, timer_base + TPM_MOD); | |
218 | ||
219 | rate = clk_get_rate(per) >> 3; | |
220 | ret = tpm_clocksource_init(rate); | |
221 | if (ret) | |
222 | goto err_per_clk_enable; | |
223 | ||
224 | ret = tpm_clockevent_init(rate, irq); | |
225 | if (ret) | |
226 | goto err_per_clk_enable; | |
227 | ||
228 | return 0; | |
229 | ||
230 | err_per_clk_enable: | |
231 | clk_disable_unprepare(ipg); | |
232 | err_clk_get: | |
233 | clk_put(per); | |
234 | clk_put(ipg); | |
235 | err_iomap: | |
236 | iounmap(timer_base); | |
237 | return ret; | |
238 | } | |
239 | TIMER_OF_DECLARE(imx7ulp, "fsl,imx7ulp-tpm", tpm_timer_init); |