2 * linux/drivers/clocksource/arm_arch_timer.c
4 * Copyright (C) 2011 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) "arm_arch_timer: " fmt
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
26 #include <linux/slab.h>
27 #include <linux/sched_clock.h>
28 #include <linux/acpi.h>
30 #include <asm/arch_timer.h>
33 #include <clocksource/arm_arch_timer.h>
36 #define pr_fmt(fmt) "arch_timer: " fmt
39 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
41 #define CNTACR(n) (0x40 + ((n) * 4))
42 #define CNTACR_RPCT BIT(0)
43 #define CNTACR_RVCT BIT(1)
44 #define CNTACR_RFRQ BIT(2)
45 #define CNTACR_RVOFF BIT(3)
46 #define CNTACR_RWVT BIT(4)
47 #define CNTACR_RWPT BIT(5)
49 #define CNTVCT_LO 0x08
50 #define CNTVCT_HI 0x0c
52 #define CNTP_TVAL 0x28
54 #define CNTV_TVAL 0x38
57 static unsigned arch_timers_present __initdata
;
59 static void __iomem
*arch_counter_base
;
63 struct clock_event_device evt
;
66 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
68 static u32 arch_timer_rate
;
69 static int arch_timer_ppi
[ARCH_TIMER_MAX_TIMER_PPI
];
71 static struct clock_event_device __percpu
*arch_timer_evt
;
73 static enum arch_timer_ppi_nr arch_timer_uses_ppi
= ARCH_TIMER_VIRT_PPI
;
74 static bool arch_timer_c3stop
;
75 static bool arch_timer_mem_use_virtual
;
76 static bool arch_counter_suspend_stop
;
77 static bool vdso_default
= true;
79 static bool evtstrm_enable
= IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM
);
81 static int __init
early_evtstrm_cfg(char *buf
)
83 return strtobool(buf
, &evtstrm_enable
);
85 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg
);
88 * Architected system timer support.
91 static __always_inline
92 void arch_timer_reg_write(int access
, enum arch_timer_reg reg
, u32 val
,
93 struct clock_event_device
*clk
)
95 if (access
== ARCH_TIMER_MEM_PHYS_ACCESS
) {
96 struct arch_timer
*timer
= to_arch_timer(clk
);
98 case ARCH_TIMER_REG_CTRL
:
99 writel_relaxed(val
, timer
->base
+ CNTP_CTL
);
101 case ARCH_TIMER_REG_TVAL
:
102 writel_relaxed(val
, timer
->base
+ CNTP_TVAL
);
105 } else if (access
== ARCH_TIMER_MEM_VIRT_ACCESS
) {
106 struct arch_timer
*timer
= to_arch_timer(clk
);
108 case ARCH_TIMER_REG_CTRL
:
109 writel_relaxed(val
, timer
->base
+ CNTV_CTL
);
111 case ARCH_TIMER_REG_TVAL
:
112 writel_relaxed(val
, timer
->base
+ CNTV_TVAL
);
116 arch_timer_reg_write_cp15(access
, reg
, val
);
120 static __always_inline
121 u32
arch_timer_reg_read(int access
, enum arch_timer_reg reg
,
122 struct clock_event_device
*clk
)
126 if (access
== ARCH_TIMER_MEM_PHYS_ACCESS
) {
127 struct arch_timer
*timer
= to_arch_timer(clk
);
129 case ARCH_TIMER_REG_CTRL
:
130 val
= readl_relaxed(timer
->base
+ CNTP_CTL
);
132 case ARCH_TIMER_REG_TVAL
:
133 val
= readl_relaxed(timer
->base
+ CNTP_TVAL
);
136 } else if (access
== ARCH_TIMER_MEM_VIRT_ACCESS
) {
137 struct arch_timer
*timer
= to_arch_timer(clk
);
139 case ARCH_TIMER_REG_CTRL
:
140 val
= readl_relaxed(timer
->base
+ CNTV_CTL
);
142 case ARCH_TIMER_REG_TVAL
:
143 val
= readl_relaxed(timer
->base
+ CNTV_TVAL
);
147 val
= arch_timer_reg_read_cp15(access
, reg
);
154 * Default to cp15 based access because arm64 uses this function for
155 * sched_clock() before DT is probed and the cp15 method is guaranteed
156 * to exist on arm64. arm doesn't use this before DT is probed so even
157 * if we don't have the cp15 accessors we won't have a problem.
159 u64 (*arch_timer_read_counter
)(void) = arch_counter_get_cntvct
;
161 static u64
arch_counter_read(struct clocksource
*cs
)
163 return arch_timer_read_counter();
166 static u64
arch_counter_read_cc(const struct cyclecounter
*cc
)
168 return arch_timer_read_counter();
171 static struct clocksource clocksource_counter
= {
172 .name
= "arch_sys_counter",
174 .read
= arch_counter_read
,
175 .mask
= CLOCKSOURCE_MASK(56),
176 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
179 static struct cyclecounter cyclecounter
= {
180 .read
= arch_counter_read_cc
,
181 .mask
= CLOCKSOURCE_MASK(56),
184 struct ate_acpi_oem_info
{
185 char oem_id
[ACPI_OEM_ID_SIZE
+ 1];
186 char oem_table_id
[ACPI_OEM_TABLE_ID_SIZE
+ 1];
190 #ifdef CONFIG_FSL_ERRATUM_A008585
192 * The number of retries is an arbitrary value well beyond the highest number
193 * of iterations the loop has been observed to take.
195 #define __fsl_a008585_read_reg(reg) ({ \
197 int _retries = 200; \
200 _old = read_sysreg(reg); \
201 _new = read_sysreg(reg); \
203 } while (unlikely(_old != _new) && _retries); \
205 WARN_ON_ONCE(!_retries); \
209 static u32 notrace
fsl_a008585_read_cntp_tval_el0(void)
211 return __fsl_a008585_read_reg(cntp_tval_el0
);
214 static u32 notrace
fsl_a008585_read_cntv_tval_el0(void)
216 return __fsl_a008585_read_reg(cntv_tval_el0
);
219 static u64 notrace
fsl_a008585_read_cntvct_el0(void)
221 return __fsl_a008585_read_reg(cntvct_el0
);
225 #ifdef CONFIG_HISILICON_ERRATUM_161010101
227 * Verify whether the value of the second read is larger than the first by
228 * less than 32 is the only way to confirm the value is correct, so clear the
229 * lower 5 bits to check whether the difference is greater than 32 or not.
230 * Theoretically the erratum should not occur more than twice in succession
231 * when reading the system counter, but it is possible that some interrupts
232 * may lead to more than twice read errors, triggering the warning, so setting
233 * the number of retries far beyond the number of iterations the loop has been
236 #define __hisi_161010101_read_reg(reg) ({ \
241 _old = read_sysreg(reg); \
242 _new = read_sysreg(reg); \
244 } while (unlikely((_new - _old) >> 5) && _retries); \
246 WARN_ON_ONCE(!_retries); \
250 static u32 notrace
hisi_161010101_read_cntp_tval_el0(void)
252 return __hisi_161010101_read_reg(cntp_tval_el0
);
255 static u32 notrace
hisi_161010101_read_cntv_tval_el0(void)
257 return __hisi_161010101_read_reg(cntv_tval_el0
);
260 static u64 notrace
hisi_161010101_read_cntvct_el0(void)
262 return __hisi_161010101_read_reg(cntvct_el0
);
265 static struct ate_acpi_oem_info hisi_161010101_oem_info
[] = {
267 * Note that trailing spaces are required to properly match
268 * the OEM table information.
272 .oem_table_id
= "HIP05 ",
277 .oem_table_id
= "HIP06 ",
282 .oem_table_id
= "HIP07 ",
285 { /* Sentinel indicating the end of the OEM array */ },
289 #ifdef CONFIG_ARM64_ERRATUM_858921
290 static u64 notrace
arm64_858921_read_cntvct_el0(void)
294 old
= read_sysreg(cntvct_el0
);
295 new = read_sysreg(cntvct_el0
);
296 return (((old
^ new) >> 32) & 1) ? old
: new;
300 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
301 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround
*,
302 timer_unstable_counter_workaround
);
303 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround
);
305 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled
);
306 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled
);
308 static void erratum_set_next_event_tval_generic(const int access
, unsigned long evt
,
309 struct clock_event_device
*clk
)
312 u64 cval
= evt
+ arch_counter_get_cntvct();
314 ctrl
= arch_timer_reg_read(access
, ARCH_TIMER_REG_CTRL
, clk
);
315 ctrl
|= ARCH_TIMER_CTRL_ENABLE
;
316 ctrl
&= ~ARCH_TIMER_CTRL_IT_MASK
;
318 if (access
== ARCH_TIMER_PHYS_ACCESS
)
319 write_sysreg(cval
, cntp_cval_el0
);
321 write_sysreg(cval
, cntv_cval_el0
);
323 arch_timer_reg_write(access
, ARCH_TIMER_REG_CTRL
, ctrl
, clk
);
326 static int erratum_set_next_event_tval_virt(unsigned long evt
,
327 struct clock_event_device
*clk
)
329 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS
, evt
, clk
);
333 static int erratum_set_next_event_tval_phys(unsigned long evt
,
334 struct clock_event_device
*clk
)
336 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS
, evt
, clk
);
340 static const struct arch_timer_erratum_workaround ool_workarounds
[] = {
341 #ifdef CONFIG_FSL_ERRATUM_A008585
343 .match_type
= ate_match_dt
,
344 .id
= "fsl,erratum-a008585",
345 .desc
= "Freescale erratum a005858",
346 .read_cntp_tval_el0
= fsl_a008585_read_cntp_tval_el0
,
347 .read_cntv_tval_el0
= fsl_a008585_read_cntv_tval_el0
,
348 .read_cntvct_el0
= fsl_a008585_read_cntvct_el0
,
349 .set_next_event_phys
= erratum_set_next_event_tval_phys
,
350 .set_next_event_virt
= erratum_set_next_event_tval_virt
,
353 #ifdef CONFIG_HISILICON_ERRATUM_161010101
355 .match_type
= ate_match_dt
,
356 .id
= "hisilicon,erratum-161010101",
357 .desc
= "HiSilicon erratum 161010101",
358 .read_cntp_tval_el0
= hisi_161010101_read_cntp_tval_el0
,
359 .read_cntv_tval_el0
= hisi_161010101_read_cntv_tval_el0
,
360 .read_cntvct_el0
= hisi_161010101_read_cntvct_el0
,
361 .set_next_event_phys
= erratum_set_next_event_tval_phys
,
362 .set_next_event_virt
= erratum_set_next_event_tval_virt
,
365 .match_type
= ate_match_acpi_oem_info
,
366 .id
= hisi_161010101_oem_info
,
367 .desc
= "HiSilicon erratum 161010101",
368 .read_cntp_tval_el0
= hisi_161010101_read_cntp_tval_el0
,
369 .read_cntv_tval_el0
= hisi_161010101_read_cntv_tval_el0
,
370 .read_cntvct_el0
= hisi_161010101_read_cntvct_el0
,
371 .set_next_event_phys
= erratum_set_next_event_tval_phys
,
372 .set_next_event_virt
= erratum_set_next_event_tval_virt
,
375 #ifdef CONFIG_ARM64_ERRATUM_858921
377 .match_type
= ate_match_local_cap_id
,
378 .id
= (void *)ARM64_WORKAROUND_858921
,
379 .desc
= "ARM erratum 858921",
380 .read_cntvct_el0
= arm64_858921_read_cntvct_el0
,
385 typedef bool (*ate_match_fn_t
)(const struct arch_timer_erratum_workaround
*,
389 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround
*wa
,
392 const struct device_node
*np
= arg
;
394 return of_property_read_bool(np
, wa
->id
);
398 bool arch_timer_check_global_cap_erratum(const struct arch_timer_erratum_workaround
*wa
,
401 return cpus_have_cap((uintptr_t)wa
->id
);
405 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround
*wa
,
408 return this_cpu_has_cap((uintptr_t)wa
->id
);
413 bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround
*wa
,
416 static const struct ate_acpi_oem_info empty_oem_info
= {};
417 const struct ate_acpi_oem_info
*info
= wa
->id
;
418 const struct acpi_table_header
*table
= arg
;
420 /* Iterate over the ACPI OEM info array, looking for a match */
421 while (memcmp(info
, &empty_oem_info
, sizeof(*info
))) {
422 if (!memcmp(info
->oem_id
, table
->oem_id
, ACPI_OEM_ID_SIZE
) &&
423 !memcmp(info
->oem_table_id
, table
->oem_table_id
, ACPI_OEM_TABLE_ID_SIZE
) &&
424 info
->oem_revision
== table
->oem_revision
)
433 static const struct arch_timer_erratum_workaround
*
434 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type
,
435 ate_match_fn_t match_fn
,
440 for (i
= 0; i
< ARRAY_SIZE(ool_workarounds
); i
++) {
441 if (ool_workarounds
[i
].match_type
!= type
)
444 if (match_fn(&ool_workarounds
[i
], arg
))
445 return &ool_workarounds
[i
];
452 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround
*wa
,
458 __this_cpu_write(timer_unstable_counter_workaround
, wa
);
460 for_each_possible_cpu(i
)
461 per_cpu(timer_unstable_counter_workaround
, i
) = wa
;
464 static_branch_enable(&arch_timer_read_ool_enabled
);
467 * Don't use the vdso fastpath if errata require using the
468 * out-of-line counter accessor. We may change our mind pretty
469 * late in the game (with a per-CPU erratum, for example), so
470 * change both the default value and the vdso itself.
472 if (wa
->read_cntvct_el0
) {
473 clocksource_counter
.archdata
.vdso_direct
= false;
474 vdso_default
= false;
478 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type
,
481 const struct arch_timer_erratum_workaround
*wa
;
482 ate_match_fn_t match_fn
= NULL
;
487 match_fn
= arch_timer_check_dt_erratum
;
489 case ate_match_global_cap_id
:
490 match_fn
= arch_timer_check_global_cap_erratum
;
492 case ate_match_local_cap_id
:
493 match_fn
= arch_timer_check_local_cap_erratum
;
496 case ate_match_acpi_oem_info
:
497 match_fn
= arch_timer_check_acpi_oem_erratum
;
500 pr_err("arch_timer: Unknown erratum workaround type specified.\n");
504 wa
= arch_timer_iterate_errata(type
, match_fn
, arg
);
508 if (static_branch_unlikely(&arch_timer_read_ool_enabled
)) {
509 const struct arch_timer_erratum_workaround
*__wa
;
510 __wa
= __this_cpu_read(timer_unstable_counter_workaround
);
511 if (__wa
&& wa
!= __wa
)
512 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
513 wa
->desc
, __wa
->desc
);
519 arch_timer_enable_workaround(wa
, local
);
520 pr_info("Enabling %s workaround for %s\n",
521 local
? "local" : "global", wa
->desc
);
524 #define erratum_handler(fn, r, ...) \
527 if (needs_unstable_timer_counter_workaround()) { \
528 const struct arch_timer_erratum_workaround *__wa; \
529 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
530 if (__wa && __wa->fn) { \
531 r = __wa->fn(__VA_ARGS__); \
542 static bool arch_timer_this_cpu_has_cntvct_wa(void)
544 const struct arch_timer_erratum_workaround
*wa
;
546 wa
= __this_cpu_read(timer_unstable_counter_workaround
);
547 return wa
&& wa
->read_cntvct_el0
;
550 #define arch_timer_check_ool_workaround(t,a) do { } while(0)
551 #define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
552 #define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
553 #define erratum_handler(fn, r, ...) ({false;})
554 #define arch_timer_this_cpu_has_cntvct_wa() ({false;})
555 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
557 static __always_inline irqreturn_t
timer_handler(const int access
,
558 struct clock_event_device
*evt
)
562 ctrl
= arch_timer_reg_read(access
, ARCH_TIMER_REG_CTRL
, evt
);
563 if (ctrl
& ARCH_TIMER_CTRL_IT_STAT
) {
564 ctrl
|= ARCH_TIMER_CTRL_IT_MASK
;
565 arch_timer_reg_write(access
, ARCH_TIMER_REG_CTRL
, ctrl
, evt
);
566 evt
->event_handler(evt
);
573 static irqreturn_t
arch_timer_handler_virt(int irq
, void *dev_id
)
575 struct clock_event_device
*evt
= dev_id
;
577 return timer_handler(ARCH_TIMER_VIRT_ACCESS
, evt
);
580 static irqreturn_t
arch_timer_handler_phys(int irq
, void *dev_id
)
582 struct clock_event_device
*evt
= dev_id
;
584 return timer_handler(ARCH_TIMER_PHYS_ACCESS
, evt
);
587 static irqreturn_t
arch_timer_handler_phys_mem(int irq
, void *dev_id
)
589 struct clock_event_device
*evt
= dev_id
;
591 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS
, evt
);
594 static irqreturn_t
arch_timer_handler_virt_mem(int irq
, void *dev_id
)
596 struct clock_event_device
*evt
= dev_id
;
598 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS
, evt
);
601 static __always_inline
int timer_shutdown(const int access
,
602 struct clock_event_device
*clk
)
606 ctrl
= arch_timer_reg_read(access
, ARCH_TIMER_REG_CTRL
, clk
);
607 ctrl
&= ~ARCH_TIMER_CTRL_ENABLE
;
608 arch_timer_reg_write(access
, ARCH_TIMER_REG_CTRL
, ctrl
, clk
);
613 static int arch_timer_shutdown_virt(struct clock_event_device
*clk
)
615 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS
, clk
);
618 static int arch_timer_shutdown_phys(struct clock_event_device
*clk
)
620 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS
, clk
);
623 static int arch_timer_shutdown_virt_mem(struct clock_event_device
*clk
)
625 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS
, clk
);
628 static int arch_timer_shutdown_phys_mem(struct clock_event_device
*clk
)
630 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS
, clk
);
633 static __always_inline
void set_next_event(const int access
, unsigned long evt
,
634 struct clock_event_device
*clk
)
637 ctrl
= arch_timer_reg_read(access
, ARCH_TIMER_REG_CTRL
, clk
);
638 ctrl
|= ARCH_TIMER_CTRL_ENABLE
;
639 ctrl
&= ~ARCH_TIMER_CTRL_IT_MASK
;
640 arch_timer_reg_write(access
, ARCH_TIMER_REG_TVAL
, evt
, clk
);
641 arch_timer_reg_write(access
, ARCH_TIMER_REG_CTRL
, ctrl
, clk
);
644 static int arch_timer_set_next_event_virt(unsigned long evt
,
645 struct clock_event_device
*clk
)
649 if (erratum_handler(set_next_event_virt
, ret
, evt
, clk
))
652 set_next_event(ARCH_TIMER_VIRT_ACCESS
, evt
, clk
);
656 static int arch_timer_set_next_event_phys(unsigned long evt
,
657 struct clock_event_device
*clk
)
661 if (erratum_handler(set_next_event_phys
, ret
, evt
, clk
))
664 set_next_event(ARCH_TIMER_PHYS_ACCESS
, evt
, clk
);
668 static int arch_timer_set_next_event_virt_mem(unsigned long evt
,
669 struct clock_event_device
*clk
)
671 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS
, evt
, clk
);
675 static int arch_timer_set_next_event_phys_mem(unsigned long evt
,
676 struct clock_event_device
*clk
)
678 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS
, evt
, clk
);
682 static void __arch_timer_setup(unsigned type
,
683 struct clock_event_device
*clk
)
685 clk
->features
= CLOCK_EVT_FEAT_ONESHOT
;
687 if (type
== ARCH_TIMER_TYPE_CP15
) {
688 if (arch_timer_c3stop
)
689 clk
->features
|= CLOCK_EVT_FEAT_C3STOP
;
690 clk
->name
= "arch_sys_timer";
692 clk
->cpumask
= cpumask_of(smp_processor_id());
693 clk
->irq
= arch_timer_ppi
[arch_timer_uses_ppi
];
694 switch (arch_timer_uses_ppi
) {
695 case ARCH_TIMER_VIRT_PPI
:
696 clk
->set_state_shutdown
= arch_timer_shutdown_virt
;
697 clk
->set_state_oneshot_stopped
= arch_timer_shutdown_virt
;
698 clk
->set_next_event
= arch_timer_set_next_event_virt
;
700 case ARCH_TIMER_PHYS_SECURE_PPI
:
701 case ARCH_TIMER_PHYS_NONSECURE_PPI
:
702 case ARCH_TIMER_HYP_PPI
:
703 clk
->set_state_shutdown
= arch_timer_shutdown_phys
;
704 clk
->set_state_oneshot_stopped
= arch_timer_shutdown_phys
;
705 clk
->set_next_event
= arch_timer_set_next_event_phys
;
711 arch_timer_check_ool_workaround(ate_match_local_cap_id
, NULL
);
713 clk
->features
|= CLOCK_EVT_FEAT_DYNIRQ
;
714 clk
->name
= "arch_mem_timer";
716 clk
->cpumask
= cpu_all_mask
;
717 if (arch_timer_mem_use_virtual
) {
718 clk
->set_state_shutdown
= arch_timer_shutdown_virt_mem
;
719 clk
->set_state_oneshot_stopped
= arch_timer_shutdown_virt_mem
;
720 clk
->set_next_event
=
721 arch_timer_set_next_event_virt_mem
;
723 clk
->set_state_shutdown
= arch_timer_shutdown_phys_mem
;
724 clk
->set_state_oneshot_stopped
= arch_timer_shutdown_phys_mem
;
725 clk
->set_next_event
=
726 arch_timer_set_next_event_phys_mem
;
730 clk
->set_state_shutdown(clk
);
732 clockevents_config_and_register(clk
, arch_timer_rate
, 0xf, 0x7fffffff);
735 static void arch_timer_evtstrm_enable(int divider
)
737 u32 cntkctl
= arch_timer_get_cntkctl();
739 cntkctl
&= ~ARCH_TIMER_EVT_TRIGGER_MASK
;
740 /* Set the divider and enable virtual event stream */
741 cntkctl
|= (divider
<< ARCH_TIMER_EVT_TRIGGER_SHIFT
)
742 | ARCH_TIMER_VIRT_EVT_EN
;
743 arch_timer_set_cntkctl(cntkctl
);
744 elf_hwcap
|= HWCAP_EVTSTRM
;
746 compat_elf_hwcap
|= COMPAT_HWCAP_EVTSTRM
;
750 static void arch_timer_configure_evtstream(void)
752 int evt_stream_div
, pos
;
754 /* Find the closest power of two to the divisor */
755 evt_stream_div
= arch_timer_rate
/ ARCH_TIMER_EVT_STREAM_FREQ
;
756 pos
= fls(evt_stream_div
);
757 if (pos
> 1 && !(evt_stream_div
& (1 << (pos
- 2))))
759 /* enable event stream */
760 arch_timer_evtstrm_enable(min(pos
, 15));
763 static void arch_counter_set_user_access(void)
765 u32 cntkctl
= arch_timer_get_cntkctl();
767 /* Disable user access to the timers and both counters */
768 /* Also disable virtual event stream */
769 cntkctl
&= ~(ARCH_TIMER_USR_PT_ACCESS_EN
770 | ARCH_TIMER_USR_VT_ACCESS_EN
771 | ARCH_TIMER_USR_VCT_ACCESS_EN
772 | ARCH_TIMER_VIRT_EVT_EN
773 | ARCH_TIMER_USR_PCT_ACCESS_EN
);
776 * Enable user access to the virtual counter if it doesn't
777 * need to be workaround. The vdso may have been already
780 if (arch_timer_this_cpu_has_cntvct_wa())
781 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
783 cntkctl
|= ARCH_TIMER_USR_VCT_ACCESS_EN
;
785 arch_timer_set_cntkctl(cntkctl
);
788 static bool arch_timer_has_nonsecure_ppi(void)
790 return (arch_timer_uses_ppi
== ARCH_TIMER_PHYS_SECURE_PPI
&&
791 arch_timer_ppi
[ARCH_TIMER_PHYS_NONSECURE_PPI
]);
794 static u32
check_ppi_trigger(int irq
)
796 u32 flags
= irq_get_trigger_type(irq
);
798 if (flags
!= IRQF_TRIGGER_HIGH
&& flags
!= IRQF_TRIGGER_LOW
) {
799 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq
);
800 pr_warn("WARNING: Please fix your firmware\n");
801 flags
= IRQF_TRIGGER_LOW
;
807 static int arch_timer_starting_cpu(unsigned int cpu
)
809 struct clock_event_device
*clk
= this_cpu_ptr(arch_timer_evt
);
812 __arch_timer_setup(ARCH_TIMER_TYPE_CP15
, clk
);
814 flags
= check_ppi_trigger(arch_timer_ppi
[arch_timer_uses_ppi
]);
815 enable_percpu_irq(arch_timer_ppi
[arch_timer_uses_ppi
], flags
);
817 if (arch_timer_has_nonsecure_ppi()) {
818 flags
= check_ppi_trigger(arch_timer_ppi
[ARCH_TIMER_PHYS_NONSECURE_PPI
]);
819 enable_percpu_irq(arch_timer_ppi
[ARCH_TIMER_PHYS_NONSECURE_PPI
],
823 arch_counter_set_user_access();
825 arch_timer_configure_evtstream();
831 * For historical reasons, when probing with DT we use whichever (non-zero)
832 * rate was probed first, and don't verify that others match. If the first node
833 * probed has a clock-frequency property, this overrides the HW register.
835 static void arch_timer_of_configure_rate(u32 rate
, struct device_node
*np
)
837 /* Who has more than one independent system counter? */
841 if (of_property_read_u32(np
, "clock-frequency", &arch_timer_rate
))
842 arch_timer_rate
= rate
;
844 /* Check the timer frequency. */
845 if (arch_timer_rate
== 0)
846 pr_warn("frequency not available\n");
849 static void arch_timer_banner(unsigned type
)
851 pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
852 type
& ARCH_TIMER_TYPE_CP15
? "cp15" : "",
853 type
== (ARCH_TIMER_TYPE_CP15
| ARCH_TIMER_TYPE_MEM
) ?
855 type
& ARCH_TIMER_TYPE_MEM
? "mmio" : "",
856 (unsigned long)arch_timer_rate
/ 1000000,
857 (unsigned long)(arch_timer_rate
/ 10000) % 100,
858 type
& ARCH_TIMER_TYPE_CP15
?
859 (arch_timer_uses_ppi
== ARCH_TIMER_VIRT_PPI
) ? "virt" : "phys" :
861 type
== (ARCH_TIMER_TYPE_CP15
| ARCH_TIMER_TYPE_MEM
) ? "/" : "",
862 type
& ARCH_TIMER_TYPE_MEM
?
863 arch_timer_mem_use_virtual
? "virt" : "phys" :
867 u32
arch_timer_get_rate(void)
869 return arch_timer_rate
;
872 static u64
arch_counter_get_cntvct_mem(void)
874 u32 vct_lo
, vct_hi
, tmp_hi
;
877 vct_hi
= readl_relaxed(arch_counter_base
+ CNTVCT_HI
);
878 vct_lo
= readl_relaxed(arch_counter_base
+ CNTVCT_LO
);
879 tmp_hi
= readl_relaxed(arch_counter_base
+ CNTVCT_HI
);
880 } while (vct_hi
!= tmp_hi
);
882 return ((u64
) vct_hi
<< 32) | vct_lo
;
885 static struct arch_timer_kvm_info arch_timer_kvm_info
;
887 struct arch_timer_kvm_info
*arch_timer_get_kvm_info(void)
889 return &arch_timer_kvm_info
;
892 static void __init
arch_counter_register(unsigned type
)
896 /* Register the CP15 based counter if we have one */
897 if (type
& ARCH_TIMER_TYPE_CP15
) {
898 if (IS_ENABLED(CONFIG_ARM64
) ||
899 arch_timer_uses_ppi
== ARCH_TIMER_VIRT_PPI
)
900 arch_timer_read_counter
= arch_counter_get_cntvct
;
902 arch_timer_read_counter
= arch_counter_get_cntpct
;
904 clocksource_counter
.archdata
.vdso_direct
= vdso_default
;
906 arch_timer_read_counter
= arch_counter_get_cntvct_mem
;
909 if (!arch_counter_suspend_stop
)
910 clocksource_counter
.flags
|= CLOCK_SOURCE_SUSPEND_NONSTOP
;
911 start_count
= arch_timer_read_counter();
912 clocksource_register_hz(&clocksource_counter
, arch_timer_rate
);
913 cyclecounter
.mult
= clocksource_counter
.mult
;
914 cyclecounter
.shift
= clocksource_counter
.shift
;
915 timecounter_init(&arch_timer_kvm_info
.timecounter
,
916 &cyclecounter
, start_count
);
918 /* 56 bits minimum, so we assume worst case rollover */
919 sched_clock_register(arch_timer_read_counter
, 56, arch_timer_rate
);
922 static void arch_timer_stop(struct clock_event_device
*clk
)
924 pr_debug("disable IRQ%d cpu #%d\n", clk
->irq
, smp_processor_id());
926 disable_percpu_irq(arch_timer_ppi
[arch_timer_uses_ppi
]);
927 if (arch_timer_has_nonsecure_ppi())
928 disable_percpu_irq(arch_timer_ppi
[ARCH_TIMER_PHYS_NONSECURE_PPI
]);
930 clk
->set_state_shutdown(clk
);
933 static int arch_timer_dying_cpu(unsigned int cpu
)
935 struct clock_event_device
*clk
= this_cpu_ptr(arch_timer_evt
);
937 arch_timer_stop(clk
);
942 static unsigned int saved_cntkctl
;
943 static int arch_timer_cpu_pm_notify(struct notifier_block
*self
,
944 unsigned long action
, void *hcpu
)
946 if (action
== CPU_PM_ENTER
)
947 saved_cntkctl
= arch_timer_get_cntkctl();
948 else if (action
== CPU_PM_ENTER_FAILED
|| action
== CPU_PM_EXIT
)
949 arch_timer_set_cntkctl(saved_cntkctl
);
953 static struct notifier_block arch_timer_cpu_pm_notifier
= {
954 .notifier_call
= arch_timer_cpu_pm_notify
,
957 static int __init
arch_timer_cpu_pm_init(void)
959 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier
);
962 static void __init
arch_timer_cpu_pm_deinit(void)
964 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier
));
968 static int __init
arch_timer_cpu_pm_init(void)
973 static void __init
arch_timer_cpu_pm_deinit(void)
978 static int __init
arch_timer_register(void)
983 arch_timer_evt
= alloc_percpu(struct clock_event_device
);
984 if (!arch_timer_evt
) {
989 ppi
= arch_timer_ppi
[arch_timer_uses_ppi
];
990 switch (arch_timer_uses_ppi
) {
991 case ARCH_TIMER_VIRT_PPI
:
992 err
= request_percpu_irq(ppi
, arch_timer_handler_virt
,
993 "arch_timer", arch_timer_evt
);
995 case ARCH_TIMER_PHYS_SECURE_PPI
:
996 case ARCH_TIMER_PHYS_NONSECURE_PPI
:
997 err
= request_percpu_irq(ppi
, arch_timer_handler_phys
,
998 "arch_timer", arch_timer_evt
);
999 if (!err
&& arch_timer_has_nonsecure_ppi()) {
1000 ppi
= arch_timer_ppi
[ARCH_TIMER_PHYS_NONSECURE_PPI
];
1001 err
= request_percpu_irq(ppi
, arch_timer_handler_phys
,
1002 "arch_timer", arch_timer_evt
);
1004 free_percpu_irq(arch_timer_ppi
[ARCH_TIMER_PHYS_SECURE_PPI
],
1008 case ARCH_TIMER_HYP_PPI
:
1009 err
= request_percpu_irq(ppi
, arch_timer_handler_phys
,
1010 "arch_timer", arch_timer_evt
);
1017 pr_err("can't register interrupt %d (%d)\n", ppi
, err
);
1021 err
= arch_timer_cpu_pm_init();
1023 goto out_unreg_notify
;
1026 /* Register and immediately configure the timer on the boot CPU */
1027 err
= cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING
,
1028 "clockevents/arm/arch_timer:starting",
1029 arch_timer_starting_cpu
, arch_timer_dying_cpu
);
1031 goto out_unreg_cpupm
;
1035 arch_timer_cpu_pm_deinit();
1038 free_percpu_irq(arch_timer_ppi
[arch_timer_uses_ppi
], arch_timer_evt
);
1039 if (arch_timer_has_nonsecure_ppi())
1040 free_percpu_irq(arch_timer_ppi
[ARCH_TIMER_PHYS_NONSECURE_PPI
],
1044 free_percpu(arch_timer_evt
);
1049 static int __init
arch_timer_mem_register(void __iomem
*base
, unsigned int irq
)
1053 struct arch_timer
*t
;
1055 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
1061 __arch_timer_setup(ARCH_TIMER_TYPE_MEM
, &t
->evt
);
1063 if (arch_timer_mem_use_virtual
)
1064 func
= arch_timer_handler_virt_mem
;
1066 func
= arch_timer_handler_phys_mem
;
1068 ret
= request_irq(irq
, func
, IRQF_TIMER
, "arch_mem_timer", &t
->evt
);
1070 pr_err("Failed to request mem timer irq\n");
1077 static const struct of_device_id arch_timer_of_match
[] __initconst
= {
1078 { .compatible
= "arm,armv7-timer", },
1079 { .compatible
= "arm,armv8-timer", },
1083 static const struct of_device_id arch_timer_mem_of_match
[] __initconst
= {
1084 { .compatible
= "arm,armv7-timer-mem", },
1088 static bool __init
arch_timer_needs_of_probing(void)
1090 struct device_node
*dn
;
1091 bool needs_probing
= false;
1092 unsigned int mask
= ARCH_TIMER_TYPE_CP15
| ARCH_TIMER_TYPE_MEM
;
1094 /* We have two timers, and both device-tree nodes are probed. */
1095 if ((arch_timers_present
& mask
) == mask
)
1099 * Only one type of timer is probed,
1100 * check if we have another type of timer node in device-tree.
1102 if (arch_timers_present
& ARCH_TIMER_TYPE_CP15
)
1103 dn
= of_find_matching_node(NULL
, arch_timer_mem_of_match
);
1105 dn
= of_find_matching_node(NULL
, arch_timer_of_match
);
1107 if (dn
&& of_device_is_available(dn
))
1108 needs_probing
= true;
1112 return needs_probing
;
1115 static int __init
arch_timer_common_init(void)
1117 arch_timer_banner(arch_timers_present
);
1118 arch_counter_register(arch_timers_present
);
1119 return arch_timer_arch_init();
1123 * arch_timer_select_ppi() - Select suitable PPI for the current system.
1125 * If HYP mode is available, we know that the physical timer
1126 * has been configured to be accessible from PL1. Use it, so
1127 * that a guest can use the virtual timer instead.
1129 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1130 * accesses to CNTP_*_EL1 registers are silently redirected to
1131 * their CNTHP_*_EL2 counterparts, and use a different PPI
1134 * If no interrupt provided for virtual timer, we'll have to
1135 * stick to the physical timer. It'd better be accessible...
1136 * For arm64 we never use the secure interrupt.
1138 * Return: a suitable PPI type for the current system.
1140 static enum arch_timer_ppi_nr __init
arch_timer_select_ppi(void)
1142 if (is_kernel_in_hyp_mode())
1143 return ARCH_TIMER_HYP_PPI
;
1145 if (!is_hyp_mode_available() && arch_timer_ppi
[ARCH_TIMER_VIRT_PPI
])
1146 return ARCH_TIMER_VIRT_PPI
;
1148 if (IS_ENABLED(CONFIG_ARM64
))
1149 return ARCH_TIMER_PHYS_NONSECURE_PPI
;
1151 return ARCH_TIMER_PHYS_SECURE_PPI
;
1154 static int __init
arch_timer_of_init(struct device_node
*np
)
1159 if (arch_timers_present
& ARCH_TIMER_TYPE_CP15
) {
1160 pr_warn("multiple nodes in dt, skipping\n");
1164 arch_timers_present
|= ARCH_TIMER_TYPE_CP15
;
1165 for (i
= ARCH_TIMER_PHYS_SECURE_PPI
; i
< ARCH_TIMER_MAX_TIMER_PPI
; i
++)
1166 arch_timer_ppi
[i
] = irq_of_parse_and_map(np
, i
);
1168 arch_timer_kvm_info
.virtual_irq
= arch_timer_ppi
[ARCH_TIMER_VIRT_PPI
];
1170 rate
= arch_timer_get_cntfrq();
1171 arch_timer_of_configure_rate(rate
, np
);
1173 arch_timer_c3stop
= !of_property_read_bool(np
, "always-on");
1175 /* Check for globally applicable workarounds */
1176 arch_timer_check_ool_workaround(ate_match_dt
, np
);
1177 arch_timer_check_ool_workaround(ate_match_global_cap_id
, NULL
);
1180 * If we cannot rely on firmware initializing the timer registers then
1181 * we should use the physical timers instead.
1183 if (IS_ENABLED(CONFIG_ARM
) &&
1184 of_property_read_bool(np
, "arm,cpu-registers-not-fw-configured"))
1185 arch_timer_uses_ppi
= ARCH_TIMER_PHYS_SECURE_PPI
;
1187 arch_timer_uses_ppi
= arch_timer_select_ppi();
1189 if (!arch_timer_ppi
[arch_timer_uses_ppi
]) {
1190 pr_err("No interrupt available, giving up\n");
1194 /* On some systems, the counter stops ticking when in suspend. */
1195 arch_counter_suspend_stop
= of_property_read_bool(np
,
1196 "arm,no-tick-in-suspend");
1198 ret
= arch_timer_register();
1202 if (arch_timer_needs_of_probing())
1205 return arch_timer_common_init();
1207 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer
, "arm,armv7-timer", arch_timer_of_init
);
1208 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer
, "arm,armv8-timer", arch_timer_of_init
);
1211 arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame
*frame
)
1216 base
= ioremap(frame
->cntbase
, frame
->size
);
1218 pr_err("Unable to map frame @ %pa\n", &frame
->cntbase
);
1222 rate
= readl_relaxed(base
+ CNTFRQ
);
1229 static struct arch_timer_mem_frame
* __init
1230 arch_timer_mem_find_best_frame(struct arch_timer_mem
*timer_mem
)
1232 struct arch_timer_mem_frame
*frame
, *best_frame
= NULL
;
1233 void __iomem
*cntctlbase
;
1237 cntctlbase
= ioremap(timer_mem
->cntctlbase
, timer_mem
->size
);
1239 pr_err("Can't map CNTCTLBase @ %pa\n",
1240 &timer_mem
->cntctlbase
);
1244 cnttidr
= readl_relaxed(cntctlbase
+ CNTTIDR
);
1247 * Try to find a virtual capable frame. Otherwise fall back to a
1248 * physical capable frame.
1250 for (i
= 0; i
< ARCH_TIMER_MEM_MAX_FRAMES
; i
++) {
1251 u32 cntacr
= CNTACR_RFRQ
| CNTACR_RWPT
| CNTACR_RPCT
|
1252 CNTACR_RWVT
| CNTACR_RVOFF
| CNTACR_RVCT
;
1254 frame
= &timer_mem
->frame
[i
];
1258 /* Try enabling everything, and see what sticks */
1259 writel_relaxed(cntacr
, cntctlbase
+ CNTACR(i
));
1260 cntacr
= readl_relaxed(cntctlbase
+ CNTACR(i
));
1262 if ((cnttidr
& CNTTIDR_VIRT(i
)) &&
1263 !(~cntacr
& (CNTACR_RWVT
| CNTACR_RVCT
))) {
1265 arch_timer_mem_use_virtual
= true;
1269 if (~cntacr
& (CNTACR_RWPT
| CNTACR_RPCT
))
1275 iounmap(cntctlbase
);
1278 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1279 &timer_mem
->cntctlbase
);
1285 arch_timer_mem_frame_register(struct arch_timer_mem_frame
*frame
)
1290 if (arch_timer_mem_use_virtual
)
1291 irq
= frame
->virt_irq
;
1293 irq
= frame
->phys_irq
;
1296 pr_err("Frame missing %s irq.\n",
1297 arch_timer_mem_use_virtual
? "virt" : "phys");
1301 if (!request_mem_region(frame
->cntbase
, frame
->size
,
1305 base
= ioremap(frame
->cntbase
, frame
->size
);
1307 pr_err("Can't map frame's registers\n");
1311 ret
= arch_timer_mem_register(base
, irq
);
1317 arch_counter_base
= base
;
1318 arch_timers_present
|= ARCH_TIMER_TYPE_MEM
;
1323 static int __init
arch_timer_mem_of_init(struct device_node
*np
)
1325 struct arch_timer_mem
*timer_mem
;
1326 struct arch_timer_mem_frame
*frame
;
1327 struct device_node
*frame_node
;
1328 struct resource res
;
1332 timer_mem
= kzalloc(sizeof(*timer_mem
), GFP_KERNEL
);
1336 if (of_address_to_resource(np
, 0, &res
))
1338 timer_mem
->cntctlbase
= res
.start
;
1339 timer_mem
->size
= resource_size(&res
);
1341 for_each_available_child_of_node(np
, frame_node
) {
1343 struct arch_timer_mem_frame
*frame
;
1345 if (of_property_read_u32(frame_node
, "frame-number", &n
)) {
1346 pr_err(FW_BUG
"Missing frame-number.\n");
1347 of_node_put(frame_node
);
1350 if (n
>= ARCH_TIMER_MEM_MAX_FRAMES
) {
1351 pr_err(FW_BUG
"Wrong frame-number, only 0-%u are permitted.\n",
1352 ARCH_TIMER_MEM_MAX_FRAMES
- 1);
1353 of_node_put(frame_node
);
1356 frame
= &timer_mem
->frame
[n
];
1359 pr_err(FW_BUG
"Duplicated frame-number.\n");
1360 of_node_put(frame_node
);
1364 if (of_address_to_resource(frame_node
, 0, &res
)) {
1365 of_node_put(frame_node
);
1368 frame
->cntbase
= res
.start
;
1369 frame
->size
= resource_size(&res
);
1371 frame
->virt_irq
= irq_of_parse_and_map(frame_node
,
1372 ARCH_TIMER_VIRT_SPI
);
1373 frame
->phys_irq
= irq_of_parse_and_map(frame_node
,
1374 ARCH_TIMER_PHYS_SPI
);
1376 frame
->valid
= true;
1379 frame
= arch_timer_mem_find_best_frame(timer_mem
);
1385 rate
= arch_timer_mem_frame_get_cntfrq(frame
);
1386 arch_timer_of_configure_rate(rate
, np
);
1388 ret
= arch_timer_mem_frame_register(frame
);
1389 if (!ret
&& !arch_timer_needs_of_probing())
1390 ret
= arch_timer_common_init();
1395 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem
, "arm,armv7-timer-mem",
1396 arch_timer_mem_of_init
);
1398 #ifdef CONFIG_ACPI_GTDT
1400 arch_timer_mem_verify_cntfrq(struct arch_timer_mem
*timer_mem
)
1402 struct arch_timer_mem_frame
*frame
;
1406 for (i
= 0; i
< ARCH_TIMER_MEM_MAX_FRAMES
; i
++) {
1407 frame
= &timer_mem
->frame
[i
];
1412 rate
= arch_timer_mem_frame_get_cntfrq(frame
);
1413 if (rate
== arch_timer_rate
)
1416 pr_err(FW_BUG
"CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1418 (unsigned long)rate
, (unsigned long)arch_timer_rate
);
1426 static int __init
arch_timer_mem_acpi_init(int platform_timer_count
)
1428 struct arch_timer_mem
*timers
, *timer
;
1429 struct arch_timer_mem_frame
*frame
;
1430 int timer_count
, i
, ret
= 0;
1432 timers
= kcalloc(platform_timer_count
, sizeof(*timers
),
1437 ret
= acpi_arch_timer_mem_init(timers
, &timer_count
);
1438 if (ret
|| !timer_count
)
1441 for (i
= 0; i
< timer_count
; i
++) {
1442 ret
= arch_timer_mem_verify_cntfrq(&timers
[i
]);
1444 pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1450 * While unlikely, it's theoretically possible that none of the frames
1451 * in a timer expose the combination of feature we want.
1453 for (i
= i
; i
< timer_count
; i
++) {
1456 frame
= arch_timer_mem_find_best_frame(timer
);
1462 ret
= arch_timer_mem_frame_register(frame
);
1468 /* Initialize per-processor generic timer and memory-mapped timer(if present) */
1469 static int __init
arch_timer_acpi_init(struct acpi_table_header
*table
)
1471 int ret
, platform_timer_count
;
1473 if (arch_timers_present
& ARCH_TIMER_TYPE_CP15
) {
1474 pr_warn("already initialized, skipping\n");
1478 arch_timers_present
|= ARCH_TIMER_TYPE_CP15
;
1480 ret
= acpi_gtdt_init(table
, &platform_timer_count
);
1482 pr_err("Failed to init GTDT table.\n");
1486 arch_timer_ppi
[ARCH_TIMER_PHYS_NONSECURE_PPI
] =
1487 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI
);
1489 arch_timer_ppi
[ARCH_TIMER_VIRT_PPI
] =
1490 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI
);
1492 arch_timer_ppi
[ARCH_TIMER_HYP_PPI
] =
1493 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI
);
1495 arch_timer_kvm_info
.virtual_irq
= arch_timer_ppi
[ARCH_TIMER_VIRT_PPI
];
1498 * When probing via ACPI, we have no mechanism to override the sysreg
1499 * CNTFRQ value. This *must* be correct.
1501 arch_timer_rate
= arch_timer_get_cntfrq();
1502 if (!arch_timer_rate
) {
1503 pr_err(FW_BUG
"frequency not available.\n");
1507 arch_timer_uses_ppi
= arch_timer_select_ppi();
1508 if (!arch_timer_ppi
[arch_timer_uses_ppi
]) {
1509 pr_err("No interrupt available, giving up\n");
1513 /* Always-on capability */
1514 arch_timer_c3stop
= acpi_gtdt_c3stop(arch_timer_uses_ppi
);
1516 /* Check for globally applicable workarounds */
1517 arch_timer_check_ool_workaround(ate_match_global_cap_id
, NULL
);
1518 arch_timer_check_ool_workaround(ate_match_acpi_oem_info
, table
);
1520 ret
= arch_timer_register();
1524 if (platform_timer_count
&&
1525 arch_timer_mem_acpi_init(platform_timer_count
))
1526 pr_err("Failed to initialize memory-mapped timer.\n");
1528 return arch_timer_common_init();
1530 CLOCKSOURCE_ACPI_DECLARE(arch_timer
, ACPI_SIG_GTDT
, arch_timer_acpi_init
);