]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/clocksource/arm_arch_timer.c
532e47fa43b346ecc4a84ce2c663d5949c3f061b
[mirror_ubuntu-bionic-kernel.git] / drivers / clocksource / arm_arch_timer.c
1 /*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #define pr_fmt(fmt) "arm_arch_timer: " fmt
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27 #include <linux/sched/clock.h>
28 #include <linux/sched_clock.h>
29 #include <linux/acpi.h>
30
31 #include <asm/arch_timer.h>
32 #include <asm/virt.h>
33
34 #include <clocksource/arm_arch_timer.h>
35
36 #define CNTTIDR 0x08
37 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
38
39 #define CNTACR(n) (0x40 + ((n) * 4))
40 #define CNTACR_RPCT BIT(0)
41 #define CNTACR_RVCT BIT(1)
42 #define CNTACR_RFRQ BIT(2)
43 #define CNTACR_RVOFF BIT(3)
44 #define CNTACR_RWVT BIT(4)
45 #define CNTACR_RWPT BIT(5)
46
47 #define CNTVCT_LO 0x08
48 #define CNTVCT_HI 0x0c
49 #define CNTFRQ 0x10
50 #define CNTP_TVAL 0x28
51 #define CNTP_CTL 0x2c
52 #define CNTV_TVAL 0x38
53 #define CNTV_CTL 0x3c
54
55 #define ARCH_CP15_TIMER BIT(0)
56 #define ARCH_MEM_TIMER BIT(1)
57 static unsigned arch_timers_present __initdata;
58
59 static void __iomem *arch_counter_base;
60
61 struct arch_timer {
62 void __iomem *base;
63 struct clock_event_device evt;
64 };
65
66 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
67
68 static u32 arch_timer_rate;
69
70 enum ppi_nr {
71 PHYS_SECURE_PPI,
72 PHYS_NONSECURE_PPI,
73 VIRT_PPI,
74 HYP_PPI,
75 MAX_TIMER_PPI
76 };
77
78 static int arch_timer_ppi[MAX_TIMER_PPI];
79
80 static struct clock_event_device __percpu *arch_timer_evt;
81
82 static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
83 static bool arch_timer_c3stop;
84 static bool arch_timer_mem_use_virtual;
85 static bool arch_counter_suspend_stop;
86
87 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
88
89 static int __init early_evtstrm_cfg(char *buf)
90 {
91 return strtobool(buf, &evtstrm_enable);
92 }
93 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
94
95 /*
96 * Architected system timer support.
97 */
98
99 #ifdef CONFIG_FSL_ERRATUM_A008585
100 /*
101 * The number of retries is an arbitrary value well beyond the highest number
102 * of iterations the loop has been observed to take.
103 */
104 #define __fsl_a008585_read_reg(reg) ({ \
105 u64 _old, _new; \
106 int _retries = 200; \
107 \
108 do { \
109 _old = read_sysreg(reg); \
110 _new = read_sysreg(reg); \
111 _retries--; \
112 } while (unlikely(_old != _new) && _retries); \
113 \
114 WARN_ON_ONCE(!_retries); \
115 _new; \
116 })
117
118 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
119 {
120 return __fsl_a008585_read_reg(cntp_tval_el0);
121 }
122
123 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
124 {
125 return __fsl_a008585_read_reg(cntv_tval_el0);
126 }
127
128 static u64 notrace fsl_a008585_read_cntvct_el0(void)
129 {
130 return __fsl_a008585_read_reg(cntvct_el0);
131 }
132 #endif
133
134 #ifdef CONFIG_HISILICON_ERRATUM_161010101
135 /*
136 * Verify whether the value of the second read is larger than the first by
137 * less than 32 is the only way to confirm the value is correct, so clear the
138 * lower 5 bits to check whether the difference is greater than 32 or not.
139 * Theoretically the erratum should not occur more than twice in succession
140 * when reading the system counter, but it is possible that some interrupts
141 * may lead to more than twice read errors, triggering the warning, so setting
142 * the number of retries far beyond the number of iterations the loop has been
143 * observed to take.
144 */
145 #define __hisi_161010101_read_reg(reg) ({ \
146 u64 _old, _new; \
147 int _retries = 50; \
148 \
149 do { \
150 _old = read_sysreg(reg); \
151 _new = read_sysreg(reg); \
152 _retries--; \
153 } while (unlikely((_new - _old) >> 5) && _retries); \
154 \
155 WARN_ON_ONCE(!_retries); \
156 _new; \
157 })
158
159 static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
160 {
161 return __hisi_161010101_read_reg(cntp_tval_el0);
162 }
163
164 static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
165 {
166 return __hisi_161010101_read_reg(cntv_tval_el0);
167 }
168
169 static u64 notrace hisi_161010101_read_cntvct_el0(void)
170 {
171 return __hisi_161010101_read_reg(cntvct_el0);
172 }
173 #endif
174
175 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
176 const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL;
177 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
178
179 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
180 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
181
182 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
183 #ifdef CONFIG_FSL_ERRATUM_A008585
184 {
185 .match_type = ate_match_dt,
186 .id = "fsl,erratum-a008585",
187 .desc = "Freescale erratum a005858",
188 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
189 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
190 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
191 },
192 #endif
193 #ifdef CONFIG_HISILICON_ERRATUM_161010101
194 {
195 .match_type = ate_match_dt,
196 .id = "hisilicon,erratum-161010101",
197 .desc = "HiSilicon erratum 161010101",
198 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
199 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
200 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
201 },
202 #endif
203 };
204
205 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
206 const void *);
207
208 static
209 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
210 const void *arg)
211 {
212 const struct device_node *np = arg;
213
214 return of_property_read_bool(np, wa->id);
215 }
216
217 static
218 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
219 const void *arg)
220 {
221 return this_cpu_has_cap((uintptr_t)wa->id);
222 }
223
224 static const struct arch_timer_erratum_workaround *
225 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
226 ate_match_fn_t match_fn,
227 void *arg)
228 {
229 int i;
230
231 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
232 if (ool_workarounds[i].match_type != type)
233 continue;
234
235 if (match_fn(&ool_workarounds[i], arg))
236 return &ool_workarounds[i];
237 }
238
239 return NULL;
240 }
241
242 static
243 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa)
244 {
245 timer_unstable_counter_workaround = wa;
246 static_branch_enable(&arch_timer_read_ool_enabled);
247 }
248
249 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
250 void *arg)
251 {
252 const struct arch_timer_erratum_workaround *wa;
253 ate_match_fn_t match_fn = NULL;
254 bool local = false;
255
256 switch (type) {
257 case ate_match_dt:
258 match_fn = arch_timer_check_dt_erratum;
259 break;
260 case ate_match_local_cap_id:
261 match_fn = arch_timer_check_local_cap_erratum;
262 local = true;
263 break;
264 default:
265 WARN_ON(1);
266 return;
267 }
268
269 wa = arch_timer_iterate_errata(type, match_fn, arg);
270 if (!wa)
271 return;
272
273 if (needs_unstable_timer_counter_workaround()) {
274 if (wa != timer_unstable_counter_workaround)
275 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
276 wa->desc,
277 timer_unstable_counter_workaround->desc);
278 return;
279 }
280
281 arch_timer_enable_workaround(wa);
282 pr_info("Enabling %s workaround for %s\n",
283 local ? "local" : "global", wa->desc);
284 }
285
286 #else
287 #define arch_timer_check_ool_workaround(t,a) do { } while(0)
288 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
289
290 static __always_inline
291 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
292 struct clock_event_device *clk)
293 {
294 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
295 struct arch_timer *timer = to_arch_timer(clk);
296 switch (reg) {
297 case ARCH_TIMER_REG_CTRL:
298 writel_relaxed(val, timer->base + CNTP_CTL);
299 break;
300 case ARCH_TIMER_REG_TVAL:
301 writel_relaxed(val, timer->base + CNTP_TVAL);
302 break;
303 }
304 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
305 struct arch_timer *timer = to_arch_timer(clk);
306 switch (reg) {
307 case ARCH_TIMER_REG_CTRL:
308 writel_relaxed(val, timer->base + CNTV_CTL);
309 break;
310 case ARCH_TIMER_REG_TVAL:
311 writel_relaxed(val, timer->base + CNTV_TVAL);
312 break;
313 }
314 } else {
315 arch_timer_reg_write_cp15(access, reg, val);
316 }
317 }
318
319 static __always_inline
320 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
321 struct clock_event_device *clk)
322 {
323 u32 val;
324
325 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
326 struct arch_timer *timer = to_arch_timer(clk);
327 switch (reg) {
328 case ARCH_TIMER_REG_CTRL:
329 val = readl_relaxed(timer->base + CNTP_CTL);
330 break;
331 case ARCH_TIMER_REG_TVAL:
332 val = readl_relaxed(timer->base + CNTP_TVAL);
333 break;
334 }
335 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
336 struct arch_timer *timer = to_arch_timer(clk);
337 switch (reg) {
338 case ARCH_TIMER_REG_CTRL:
339 val = readl_relaxed(timer->base + CNTV_CTL);
340 break;
341 case ARCH_TIMER_REG_TVAL:
342 val = readl_relaxed(timer->base + CNTV_TVAL);
343 break;
344 }
345 } else {
346 val = arch_timer_reg_read_cp15(access, reg);
347 }
348
349 return val;
350 }
351
352 static __always_inline irqreturn_t timer_handler(const int access,
353 struct clock_event_device *evt)
354 {
355 unsigned long ctrl;
356
357 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
358 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
359 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
360 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
361 evt->event_handler(evt);
362 return IRQ_HANDLED;
363 }
364
365 return IRQ_NONE;
366 }
367
368 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
369 {
370 struct clock_event_device *evt = dev_id;
371
372 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
373 }
374
375 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
376 {
377 struct clock_event_device *evt = dev_id;
378
379 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
380 }
381
382 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
383 {
384 struct clock_event_device *evt = dev_id;
385
386 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
387 }
388
389 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
390 {
391 struct clock_event_device *evt = dev_id;
392
393 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
394 }
395
396 static __always_inline int timer_shutdown(const int access,
397 struct clock_event_device *clk)
398 {
399 unsigned long ctrl;
400
401 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
402 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
403 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
404
405 return 0;
406 }
407
408 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
409 {
410 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
411 }
412
413 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
414 {
415 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
416 }
417
418 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
419 {
420 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
421 }
422
423 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
424 {
425 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
426 }
427
428 static __always_inline void set_next_event(const int access, unsigned long evt,
429 struct clock_event_device *clk)
430 {
431 unsigned long ctrl;
432 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
433 ctrl |= ARCH_TIMER_CTRL_ENABLE;
434 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
435 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
436 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
437 }
438
439 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
440 static __always_inline void erratum_set_next_event_generic(const int access,
441 unsigned long evt, struct clock_event_device *clk)
442 {
443 unsigned long ctrl;
444 u64 cval = evt + arch_counter_get_cntvct();
445
446 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
447 ctrl |= ARCH_TIMER_CTRL_ENABLE;
448 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
449
450 if (access == ARCH_TIMER_PHYS_ACCESS)
451 write_sysreg(cval, cntp_cval_el0);
452 else if (access == ARCH_TIMER_VIRT_ACCESS)
453 write_sysreg(cval, cntv_cval_el0);
454
455 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
456 }
457
458 static int erratum_set_next_event_virt(unsigned long evt,
459 struct clock_event_device *clk)
460 {
461 erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
462 return 0;
463 }
464
465 static int erratum_set_next_event_phys(unsigned long evt,
466 struct clock_event_device *clk)
467 {
468 erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
469 return 0;
470 }
471 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
472
473 static int arch_timer_set_next_event_virt(unsigned long evt,
474 struct clock_event_device *clk)
475 {
476 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
477 return 0;
478 }
479
480 static int arch_timer_set_next_event_phys(unsigned long evt,
481 struct clock_event_device *clk)
482 {
483 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
484 return 0;
485 }
486
487 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
488 struct clock_event_device *clk)
489 {
490 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
491 return 0;
492 }
493
494 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
495 struct clock_event_device *clk)
496 {
497 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
498 return 0;
499 }
500
501 static void erratum_workaround_set_sne(struct clock_event_device *clk)
502 {
503 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
504 if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
505 return;
506
507 if (arch_timer_uses_ppi == VIRT_PPI)
508 clk->set_next_event = erratum_set_next_event_virt;
509 else
510 clk->set_next_event = erratum_set_next_event_phys;
511 #endif
512 }
513
514 static void __arch_timer_setup(unsigned type,
515 struct clock_event_device *clk)
516 {
517 clk->features = CLOCK_EVT_FEAT_ONESHOT;
518
519 if (type == ARCH_CP15_TIMER) {
520 if (arch_timer_c3stop)
521 clk->features |= CLOCK_EVT_FEAT_C3STOP;
522 clk->name = "arch_sys_timer";
523 clk->rating = 450;
524 clk->cpumask = cpumask_of(smp_processor_id());
525 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
526 switch (arch_timer_uses_ppi) {
527 case VIRT_PPI:
528 clk->set_state_shutdown = arch_timer_shutdown_virt;
529 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
530 clk->set_next_event = arch_timer_set_next_event_virt;
531 break;
532 case PHYS_SECURE_PPI:
533 case PHYS_NONSECURE_PPI:
534 case HYP_PPI:
535 clk->set_state_shutdown = arch_timer_shutdown_phys;
536 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
537 clk->set_next_event = arch_timer_set_next_event_phys;
538 break;
539 default:
540 BUG();
541 }
542
543 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
544
545 erratum_workaround_set_sne(clk);
546 } else {
547 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
548 clk->name = "arch_mem_timer";
549 clk->rating = 400;
550 clk->cpumask = cpu_all_mask;
551 if (arch_timer_mem_use_virtual) {
552 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
553 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
554 clk->set_next_event =
555 arch_timer_set_next_event_virt_mem;
556 } else {
557 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
558 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
559 clk->set_next_event =
560 arch_timer_set_next_event_phys_mem;
561 }
562 }
563
564 clk->set_state_shutdown(clk);
565
566 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
567 }
568
569 static void arch_timer_evtstrm_enable(int divider)
570 {
571 u32 cntkctl = arch_timer_get_cntkctl();
572
573 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
574 /* Set the divider and enable virtual event stream */
575 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
576 | ARCH_TIMER_VIRT_EVT_EN;
577 arch_timer_set_cntkctl(cntkctl);
578 elf_hwcap |= HWCAP_EVTSTRM;
579 #ifdef CONFIG_COMPAT
580 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
581 #endif
582 }
583
584 static void arch_timer_configure_evtstream(void)
585 {
586 int evt_stream_div, pos;
587
588 /* Find the closest power of two to the divisor */
589 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
590 pos = fls(evt_stream_div);
591 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
592 pos--;
593 /* enable event stream */
594 arch_timer_evtstrm_enable(min(pos, 15));
595 }
596
597 static void arch_counter_set_user_access(void)
598 {
599 u32 cntkctl = arch_timer_get_cntkctl();
600
601 /* Disable user access to the timers and the physical counter */
602 /* Also disable virtual event stream */
603 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
604 | ARCH_TIMER_USR_VT_ACCESS_EN
605 | ARCH_TIMER_VIRT_EVT_EN
606 | ARCH_TIMER_USR_PCT_ACCESS_EN);
607
608 /* Enable user access to the virtual counter */
609 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
610
611 arch_timer_set_cntkctl(cntkctl);
612 }
613
614 static bool arch_timer_has_nonsecure_ppi(void)
615 {
616 return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
617 arch_timer_ppi[PHYS_NONSECURE_PPI]);
618 }
619
620 static u32 check_ppi_trigger(int irq)
621 {
622 u32 flags = irq_get_trigger_type(irq);
623
624 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
625 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
626 pr_warn("WARNING: Please fix your firmware\n");
627 flags = IRQF_TRIGGER_LOW;
628 }
629
630 return flags;
631 }
632
633 static int arch_timer_starting_cpu(unsigned int cpu)
634 {
635 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
636 u32 flags;
637
638 __arch_timer_setup(ARCH_CP15_TIMER, clk);
639
640 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
641 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
642
643 if (arch_timer_has_nonsecure_ppi()) {
644 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
645 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
646 }
647
648 arch_counter_set_user_access();
649 if (evtstrm_enable)
650 arch_timer_configure_evtstream();
651
652 return 0;
653 }
654
655 static void
656 arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
657 {
658 /* Who has more than one independent system counter? */
659 if (arch_timer_rate)
660 return;
661
662 /*
663 * Try to determine the frequency from the device tree or CNTFRQ,
664 * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
665 */
666 if (!acpi_disabled ||
667 of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
668 if (cntbase)
669 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
670 else
671 arch_timer_rate = arch_timer_get_cntfrq();
672 }
673
674 /* Check the timer frequency. */
675 if (arch_timer_rate == 0)
676 pr_warn("Architected timer frequency not available\n");
677 }
678
679 static void arch_timer_banner(unsigned type)
680 {
681 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
682 type & ARCH_CP15_TIMER ? "cp15" : "",
683 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
684 type & ARCH_MEM_TIMER ? "mmio" : "",
685 (unsigned long)arch_timer_rate / 1000000,
686 (unsigned long)(arch_timer_rate / 10000) % 100,
687 type & ARCH_CP15_TIMER ?
688 (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
689 "",
690 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
691 type & ARCH_MEM_TIMER ?
692 arch_timer_mem_use_virtual ? "virt" : "phys" :
693 "");
694 }
695
696 u32 arch_timer_get_rate(void)
697 {
698 return arch_timer_rate;
699 }
700
701 static u64 arch_counter_get_cntvct_mem(void)
702 {
703 u32 vct_lo, vct_hi, tmp_hi;
704
705 do {
706 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
707 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
708 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
709 } while (vct_hi != tmp_hi);
710
711 return ((u64) vct_hi << 32) | vct_lo;
712 }
713
714 /*
715 * Default to cp15 based access because arm64 uses this function for
716 * sched_clock() before DT is probed and the cp15 method is guaranteed
717 * to exist on arm64. arm doesn't use this before DT is probed so even
718 * if we don't have the cp15 accessors we won't have a problem.
719 */
720 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
721
722 static u64 arch_counter_read(struct clocksource *cs)
723 {
724 return arch_timer_read_counter();
725 }
726
727 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
728 {
729 return arch_timer_read_counter();
730 }
731
732 static struct clocksource clocksource_counter = {
733 .name = "arch_sys_counter",
734 .rating = 400,
735 .read = arch_counter_read,
736 .mask = CLOCKSOURCE_MASK(56),
737 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
738 };
739
740 static struct cyclecounter cyclecounter __ro_after_init = {
741 .read = arch_counter_read_cc,
742 .mask = CLOCKSOURCE_MASK(56),
743 };
744
745 static struct arch_timer_kvm_info arch_timer_kvm_info;
746
747 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
748 {
749 return &arch_timer_kvm_info;
750 }
751
752 static void __init arch_counter_register(unsigned type)
753 {
754 u64 start_count;
755
756 /* Register the CP15 based counter if we have one */
757 if (type & ARCH_CP15_TIMER) {
758 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
759 arch_timer_read_counter = arch_counter_get_cntvct;
760 else
761 arch_timer_read_counter = arch_counter_get_cntpct;
762
763 clocksource_counter.archdata.vdso_direct = true;
764
765 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
766 /*
767 * Don't use the vdso fastpath if errata require using
768 * the out-of-line counter accessor.
769 */
770 if (static_branch_unlikely(&arch_timer_read_ool_enabled))
771 clocksource_counter.archdata.vdso_direct = false;
772 #endif
773 } else {
774 arch_timer_read_counter = arch_counter_get_cntvct_mem;
775 }
776
777 if (!arch_counter_suspend_stop)
778 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
779 start_count = arch_timer_read_counter();
780 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
781 cyclecounter.mult = clocksource_counter.mult;
782 cyclecounter.shift = clocksource_counter.shift;
783 timecounter_init(&arch_timer_kvm_info.timecounter,
784 &cyclecounter, start_count);
785
786 /* 56 bits minimum, so we assume worst case rollover */
787 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
788 }
789
790 static void arch_timer_stop(struct clock_event_device *clk)
791 {
792 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
793 clk->irq, smp_processor_id());
794
795 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
796 if (arch_timer_has_nonsecure_ppi())
797 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
798
799 clk->set_state_shutdown(clk);
800 }
801
802 static int arch_timer_dying_cpu(unsigned int cpu)
803 {
804 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
805
806 arch_timer_stop(clk);
807 return 0;
808 }
809
810 #ifdef CONFIG_CPU_PM
811 static unsigned int saved_cntkctl;
812 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
813 unsigned long action, void *hcpu)
814 {
815 if (action == CPU_PM_ENTER)
816 saved_cntkctl = arch_timer_get_cntkctl();
817 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
818 arch_timer_set_cntkctl(saved_cntkctl);
819 return NOTIFY_OK;
820 }
821
822 static struct notifier_block arch_timer_cpu_pm_notifier = {
823 .notifier_call = arch_timer_cpu_pm_notify,
824 };
825
826 static int __init arch_timer_cpu_pm_init(void)
827 {
828 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
829 }
830
831 static void __init arch_timer_cpu_pm_deinit(void)
832 {
833 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
834 }
835
836 #else
837 static int __init arch_timer_cpu_pm_init(void)
838 {
839 return 0;
840 }
841
842 static void __init arch_timer_cpu_pm_deinit(void)
843 {
844 }
845 #endif
846
847 static int __init arch_timer_register(void)
848 {
849 int err;
850 int ppi;
851
852 arch_timer_evt = alloc_percpu(struct clock_event_device);
853 if (!arch_timer_evt) {
854 err = -ENOMEM;
855 goto out;
856 }
857
858 ppi = arch_timer_ppi[arch_timer_uses_ppi];
859 switch (arch_timer_uses_ppi) {
860 case VIRT_PPI:
861 err = request_percpu_irq(ppi, arch_timer_handler_virt,
862 "arch_timer", arch_timer_evt);
863 break;
864 case PHYS_SECURE_PPI:
865 case PHYS_NONSECURE_PPI:
866 err = request_percpu_irq(ppi, arch_timer_handler_phys,
867 "arch_timer", arch_timer_evt);
868 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
869 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
870 err = request_percpu_irq(ppi, arch_timer_handler_phys,
871 "arch_timer", arch_timer_evt);
872 if (err)
873 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
874 arch_timer_evt);
875 }
876 break;
877 case HYP_PPI:
878 err = request_percpu_irq(ppi, arch_timer_handler_phys,
879 "arch_timer", arch_timer_evt);
880 break;
881 default:
882 BUG();
883 }
884
885 if (err) {
886 pr_err("arch_timer: can't register interrupt %d (%d)\n",
887 ppi, err);
888 goto out_free;
889 }
890
891 err = arch_timer_cpu_pm_init();
892 if (err)
893 goto out_unreg_notify;
894
895
896 /* Register and immediately configure the timer on the boot CPU */
897 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
898 "clockevents/arm/arch_timer:starting",
899 arch_timer_starting_cpu, arch_timer_dying_cpu);
900 if (err)
901 goto out_unreg_cpupm;
902 return 0;
903
904 out_unreg_cpupm:
905 arch_timer_cpu_pm_deinit();
906
907 out_unreg_notify:
908 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
909 if (arch_timer_has_nonsecure_ppi())
910 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
911 arch_timer_evt);
912
913 out_free:
914 free_percpu(arch_timer_evt);
915 out:
916 return err;
917 }
918
919 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
920 {
921 int ret;
922 irq_handler_t func;
923 struct arch_timer *t;
924
925 t = kzalloc(sizeof(*t), GFP_KERNEL);
926 if (!t)
927 return -ENOMEM;
928
929 t->base = base;
930 t->evt.irq = irq;
931 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
932
933 if (arch_timer_mem_use_virtual)
934 func = arch_timer_handler_virt_mem;
935 else
936 func = arch_timer_handler_phys_mem;
937
938 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
939 if (ret) {
940 pr_err("arch_timer: Failed to request mem timer irq\n");
941 kfree(t);
942 }
943
944 return ret;
945 }
946
947 static const struct of_device_id arch_timer_of_match[] __initconst = {
948 { .compatible = "arm,armv7-timer", },
949 { .compatible = "arm,armv8-timer", },
950 {},
951 };
952
953 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
954 { .compatible = "arm,armv7-timer-mem", },
955 {},
956 };
957
958 static bool __init
959 arch_timer_needs_probing(int type, const struct of_device_id *matches)
960 {
961 struct device_node *dn;
962 bool needs_probing = false;
963
964 dn = of_find_matching_node(NULL, matches);
965 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
966 needs_probing = true;
967 of_node_put(dn);
968
969 return needs_probing;
970 }
971
972 static int __init arch_timer_common_init(void)
973 {
974 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
975
976 /* Wait until both nodes are probed if we have two timers */
977 if ((arch_timers_present & mask) != mask) {
978 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
979 return 0;
980 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
981 return 0;
982 }
983
984 arch_timer_banner(arch_timers_present);
985 arch_counter_register(arch_timers_present);
986 return arch_timer_arch_init();
987 }
988
989 static int __init arch_timer_init(void)
990 {
991 int ret;
992 /*
993 * If HYP mode is available, we know that the physical timer
994 * has been configured to be accessible from PL1. Use it, so
995 * that a guest can use the virtual timer instead.
996 *
997 * If no interrupt provided for virtual timer, we'll have to
998 * stick to the physical timer. It'd better be accessible...
999 *
1000 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1001 * accesses to CNTP_*_EL1 registers are silently redirected to
1002 * their CNTHP_*_EL2 counterparts, and use a different PPI
1003 * number.
1004 */
1005 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
1006 bool has_ppi;
1007
1008 if (is_kernel_in_hyp_mode()) {
1009 arch_timer_uses_ppi = HYP_PPI;
1010 has_ppi = !!arch_timer_ppi[HYP_PPI];
1011 } else {
1012 arch_timer_uses_ppi = PHYS_SECURE_PPI;
1013 has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
1014 !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
1015 }
1016
1017 if (!has_ppi) {
1018 pr_warn("arch_timer: No interrupt available, giving up\n");
1019 return -EINVAL;
1020 }
1021 }
1022
1023 ret = arch_timer_register();
1024 if (ret)
1025 return ret;
1026
1027 ret = arch_timer_common_init();
1028 if (ret)
1029 return ret;
1030
1031 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
1032
1033 return 0;
1034 }
1035
1036 static int __init arch_timer_of_init(struct device_node *np)
1037 {
1038 int i;
1039
1040 if (arch_timers_present & ARCH_CP15_TIMER) {
1041 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
1042 return 0;
1043 }
1044
1045 arch_timers_present |= ARCH_CP15_TIMER;
1046 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
1047 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1048
1049 arch_timer_detect_rate(NULL, np);
1050
1051 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1052
1053 /* Check for globally applicable workarounds */
1054 arch_timer_check_ool_workaround(ate_match_dt, np);
1055
1056 /*
1057 * If we cannot rely on firmware initializing the timer registers then
1058 * we should use the physical timers instead.
1059 */
1060 if (IS_ENABLED(CONFIG_ARM) &&
1061 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1062 arch_timer_uses_ppi = PHYS_SECURE_PPI;
1063
1064 /* On some systems, the counter stops ticking when in suspend. */
1065 arch_counter_suspend_stop = of_property_read_bool(np,
1066 "arm,no-tick-in-suspend");
1067
1068 return arch_timer_init();
1069 }
1070 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1071 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1072
1073 static int __init arch_timer_mem_init(struct device_node *np)
1074 {
1075 struct device_node *frame, *best_frame = NULL;
1076 void __iomem *cntctlbase, *base;
1077 unsigned int irq, ret = -EINVAL;
1078 u32 cnttidr;
1079
1080 arch_timers_present |= ARCH_MEM_TIMER;
1081 cntctlbase = of_iomap(np, 0);
1082 if (!cntctlbase) {
1083 pr_err("arch_timer: Can't find CNTCTLBase\n");
1084 return -ENXIO;
1085 }
1086
1087 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1088
1089 /*
1090 * Try to find a virtual capable frame. Otherwise fall back to a
1091 * physical capable frame.
1092 */
1093 for_each_available_child_of_node(np, frame) {
1094 int n;
1095 u32 cntacr;
1096
1097 if (of_property_read_u32(frame, "frame-number", &n)) {
1098 pr_err("arch_timer: Missing frame-number\n");
1099 of_node_put(frame);
1100 goto out;
1101 }
1102
1103 /* Try enabling everything, and see what sticks */
1104 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1105 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1106 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
1107 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
1108
1109 if ((cnttidr & CNTTIDR_VIRT(n)) &&
1110 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1111 of_node_put(best_frame);
1112 best_frame = frame;
1113 arch_timer_mem_use_virtual = true;
1114 break;
1115 }
1116
1117 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1118 continue;
1119
1120 of_node_put(best_frame);
1121 best_frame = of_node_get(frame);
1122 }
1123
1124 ret= -ENXIO;
1125 base = arch_counter_base = of_io_request_and_map(best_frame, 0,
1126 "arch_mem_timer");
1127 if (IS_ERR(base)) {
1128 pr_err("arch_timer: Can't map frame's registers\n");
1129 goto out;
1130 }
1131
1132 if (arch_timer_mem_use_virtual)
1133 irq = irq_of_parse_and_map(best_frame, 1);
1134 else
1135 irq = irq_of_parse_and_map(best_frame, 0);
1136
1137 ret = -EINVAL;
1138 if (!irq) {
1139 pr_err("arch_timer: Frame missing %s irq",
1140 arch_timer_mem_use_virtual ? "virt" : "phys");
1141 goto out;
1142 }
1143
1144 arch_timer_detect_rate(base, np);
1145 ret = arch_timer_mem_register(base, irq);
1146 if (ret)
1147 goto out;
1148
1149 return arch_timer_common_init();
1150 out:
1151 iounmap(cntctlbase);
1152 of_node_put(best_frame);
1153 return ret;
1154 }
1155 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1156 arch_timer_mem_init);
1157
1158 #ifdef CONFIG_ACPI
1159 static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1160 {
1161 int trigger, polarity;
1162
1163 if (!interrupt)
1164 return 0;
1165
1166 trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1167 : ACPI_LEVEL_SENSITIVE;
1168
1169 polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1170 : ACPI_ACTIVE_HIGH;
1171
1172 return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1173 }
1174
1175 /* Initialize per-processor generic timer */
1176 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1177 {
1178 struct acpi_table_gtdt *gtdt;
1179
1180 if (arch_timers_present & ARCH_CP15_TIMER) {
1181 pr_warn("arch_timer: already initialized, skipping\n");
1182 return -EINVAL;
1183 }
1184
1185 gtdt = container_of(table, struct acpi_table_gtdt, header);
1186
1187 arch_timers_present |= ARCH_CP15_TIMER;
1188
1189 arch_timer_ppi[PHYS_SECURE_PPI] =
1190 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1191 gtdt->secure_el1_flags);
1192
1193 arch_timer_ppi[PHYS_NONSECURE_PPI] =
1194 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1195 gtdt->non_secure_el1_flags);
1196
1197 arch_timer_ppi[VIRT_PPI] =
1198 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1199 gtdt->virtual_timer_flags);
1200
1201 arch_timer_ppi[HYP_PPI] =
1202 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1203 gtdt->non_secure_el2_flags);
1204
1205 /* Get the frequency from CNTFRQ */
1206 arch_timer_detect_rate(NULL, NULL);
1207
1208 /* Always-on capability */
1209 arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1210
1211 arch_timer_init();
1212 return 0;
1213 }
1214 CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1215 #endif