2 * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 /* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
11 * programmed to go from @count to @limit and optionally interrupt.
12 * We've designated TIMER0 for clockevents and TIMER1 for clocksource
14 * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP)
15 * which are suitable for UP and SMP based clocksources respectively
18 #include <linux/interrupt.h>
19 #include <linux/clk.h>
20 #include <linux/clk-provider.h>
21 #include <linux/clocksource.h>
22 #include <linux/clockchips.h>
23 #include <linux/cpu.h>
25 #include <linux/of_irq.h>
26 #include <linux/sched_clock.h>
28 #include <soc/arc/timers.h>
29 #include <soc/arc/mcip.h>
32 static unsigned long arc_timer_freq
;
34 static int noinline
arc_get_timer_clk(struct device_node
*node
)
39 clk
= of_clk_get(node
, 0);
41 pr_err("timer missing clk\n");
45 ret
= clk_prepare_enable(clk
);
47 pr_err("Couldn't enable parent clk\n");
51 arc_timer_freq
= clk_get_rate(clk
);
56 /********** Clock Source Device *********/
58 #ifdef CONFIG_ARC_TIMERS_64BIT
60 static u64
arc_read_gfrc(struct clocksource
*cs
)
65 local_irq_save(flags
);
67 __mcip_cmd(CMD_GFRC_READ_LO
, 0);
68 l
= read_aux_reg(ARC_REG_MCIP_READBACK
);
70 __mcip_cmd(CMD_GFRC_READ_HI
, 0);
71 h
= read_aux_reg(ARC_REG_MCIP_READBACK
);
73 local_irq_restore(flags
);
75 return (((u64
)h
) << 32) | l
;
78 static notrace u64
arc_gfrc_clock_read(void)
80 return arc_read_gfrc(NULL
);
83 static struct clocksource arc_counter_gfrc
= {
84 .name
= "ARConnect GFRC",
86 .read
= arc_read_gfrc
,
87 .mask
= CLOCKSOURCE_MASK(64),
88 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
91 static int __init
arc_cs_setup_gfrc(struct device_node
*node
)
96 READ_BCR(ARC_REG_MCIP_BCR
, mp
);
98 pr_warn("Global-64-bit-Ctr clocksource not detected\n");
102 ret
= arc_get_timer_clk(node
);
106 sched_clock_register(arc_gfrc_clock_read
, 64, arc_timer_freq
);
108 return clocksource_register_hz(&arc_counter_gfrc
, arc_timer_freq
);
110 TIMER_OF_DECLARE(arc_gfrc
, "snps,archs-timer-gfrc", arc_cs_setup_gfrc
);
112 #define AUX_RTC_CTRL 0x103
113 #define AUX_RTC_LOW 0x104
114 #define AUX_RTC_HIGH 0x105
116 static u64
arc_read_rtc(struct clocksource
*cs
)
118 unsigned long status
;
122 * hardware has an internal state machine which tracks readout of
123 * low/high and updates the CTRL.status if
124 * - interrupt/exception taken between the two reads
125 * - high increments after low has been read
128 l
= read_aux_reg(AUX_RTC_LOW
);
129 h
= read_aux_reg(AUX_RTC_HIGH
);
130 status
= read_aux_reg(AUX_RTC_CTRL
);
131 } while (!(status
& _BITUL(31)));
133 return (((u64
)h
) << 32) | l
;
136 static notrace u64
arc_rtc_clock_read(void)
138 return arc_read_rtc(NULL
);
141 static struct clocksource arc_counter_rtc
= {
144 .read
= arc_read_rtc
,
145 .mask
= CLOCKSOURCE_MASK(64),
146 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
149 static int __init
arc_cs_setup_rtc(struct device_node
*node
)
151 struct bcr_timer timer
;
154 READ_BCR(ARC_REG_TIMERS_BCR
, timer
);
156 pr_warn("Local-64-bit-Ctr clocksource not detected\n");
160 /* Local to CPU hence not usable in SMP */
161 if (IS_ENABLED(CONFIG_SMP
)) {
162 pr_warn("Local-64-bit-Ctr not usable in SMP\n");
166 ret
= arc_get_timer_clk(node
);
170 write_aux_reg(AUX_RTC_CTRL
, 1);
172 sched_clock_register(arc_rtc_clock_read
, 64, arc_timer_freq
);
174 return clocksource_register_hz(&arc_counter_rtc
, arc_timer_freq
);
176 TIMER_OF_DECLARE(arc_rtc
, "snps,archs-timer-rtc", arc_cs_setup_rtc
);
181 * 32bit TIMER1 to keep counting monotonically and wraparound
184 static u64
arc_read_timer1(struct clocksource
*cs
)
186 return (u64
) read_aux_reg(ARC_REG_TIMER1_CNT
);
189 static notrace u64
arc_timer1_clock_read(void)
191 return arc_read_timer1(NULL
);
194 static struct clocksource arc_counter_timer1
= {
195 .name
= "ARC Timer1",
197 .read
= arc_read_timer1
,
198 .mask
= CLOCKSOURCE_MASK(32),
199 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
202 static int __init
arc_cs_setup_timer1(struct device_node
*node
)
206 /* Local to CPU hence not usable in SMP */
207 if (IS_ENABLED(CONFIG_SMP
))
210 ret
= arc_get_timer_clk(node
);
214 write_aux_reg(ARC_REG_TIMER1_LIMIT
, ARC_TIMERN_MAX
);
215 write_aux_reg(ARC_REG_TIMER1_CNT
, 0);
216 write_aux_reg(ARC_REG_TIMER1_CTRL
, TIMER_CTRL_NH
);
218 sched_clock_register(arc_timer1_clock_read
, 32, arc_timer_freq
);
220 return clocksource_register_hz(&arc_counter_timer1
, arc_timer_freq
);
223 /********** Clock Event Device *********/
225 static int arc_timer_irq
;
228 * Arm the timer to interrupt after @cycles
229 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
231 static void arc_timer_event_setup(unsigned int cycles
)
233 write_aux_reg(ARC_REG_TIMER0_LIMIT
, cycles
);
234 write_aux_reg(ARC_REG_TIMER0_CNT
, 0); /* start from 0 */
236 write_aux_reg(ARC_REG_TIMER0_CTRL
, TIMER_CTRL_IE
| TIMER_CTRL_NH
);
240 static int arc_clkevent_set_next_event(unsigned long delta
,
241 struct clock_event_device
*dev
)
243 arc_timer_event_setup(delta
);
247 static int arc_clkevent_set_periodic(struct clock_event_device
*dev
)
250 * At X Hz, 1 sec = 1000ms -> X cycles;
251 * 10ms -> X / 100 cycles
253 arc_timer_event_setup(arc_timer_freq
/ HZ
);
257 static DEFINE_PER_CPU(struct clock_event_device
, arc_clockevent_device
) = {
258 .name
= "ARC Timer0",
259 .features
= CLOCK_EVT_FEAT_ONESHOT
|
260 CLOCK_EVT_FEAT_PERIODIC
,
262 .set_next_event
= arc_clkevent_set_next_event
,
263 .set_state_periodic
= arc_clkevent_set_periodic
,
266 static irqreturn_t
timer_irq_handler(int irq
, void *dev_id
)
269 * Note that generic IRQ core could have passed @evt for @dev_id if
270 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
272 struct clock_event_device
*evt
= this_cpu_ptr(&arc_clockevent_device
);
273 int irq_reenable
= clockevent_state_periodic(evt
);
276 * Any write to CTRL reg ACks the interrupt, we rewrite the
277 * Count when [N]ot [H]alted bit.
278 * And re-arm it if perioid by [I]nterrupt [E]nable bit
280 write_aux_reg(ARC_REG_TIMER0_CTRL
, irq_reenable
| TIMER_CTRL_NH
);
282 evt
->event_handler(evt
);
288 static int arc_timer_starting_cpu(unsigned int cpu
)
290 struct clock_event_device
*evt
= this_cpu_ptr(&arc_clockevent_device
);
292 evt
->cpumask
= cpumask_of(smp_processor_id());
294 clockevents_config_and_register(evt
, arc_timer_freq
, 0, ARC_TIMERN_MAX
);
295 enable_percpu_irq(arc_timer_irq
, 0);
299 static int arc_timer_dying_cpu(unsigned int cpu
)
301 disable_percpu_irq(arc_timer_irq
);
306 * clockevent setup for boot CPU
308 static int __init
arc_clockevent_setup(struct device_node
*node
)
310 struct clock_event_device
*evt
= this_cpu_ptr(&arc_clockevent_device
);
313 arc_timer_irq
= irq_of_parse_and_map(node
, 0);
314 if (arc_timer_irq
<= 0) {
315 pr_err("clockevent: missing irq\n");
319 ret
= arc_get_timer_clk(node
);
321 pr_err("clockevent: missing clk\n");
325 /* Needs apriori irq_set_percpu_devid() done in intc map function */
326 ret
= request_percpu_irq(arc_timer_irq
, timer_irq_handler
,
327 "Timer0 (per-cpu-tick)", evt
);
329 pr_err("clockevent: unable to request irq\n");
333 ret
= cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING
,
334 "clockevents/arc/timer:starting",
335 arc_timer_starting_cpu
,
336 arc_timer_dying_cpu
);
338 pr_err("Failed to setup hotplug state\n");
344 static int __init
arc_of_timer_init(struct device_node
*np
)
346 static int init_count
= 0;
351 ret
= arc_clockevent_setup(np
);
353 ret
= arc_cs_setup_timer1(np
);
358 TIMER_OF_DECLARE(arc_clkevt
, "snps,arc-timer", arc_of_timer_init
);