1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
6 #ifndef __ASM_CPUFEATURE_H
7 #define __ASM_CPUFEATURE_H
9 #include <asm/alternative-macros.h>
10 #include <asm/cpucaps.h>
11 #include <asm/cputype.h>
12 #include <asm/hwcap.h>
13 #include <asm/sysreg.h>
15 #define MAX_CPU_FEATURES 128
16 #define cpu_feature(x) KERNEL_HWCAP_ ## x
18 #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0
19 #define ARM64_SW_FEATURE_OVERRIDE_HVHE 4
23 #include <linux/bug.h>
24 #include <linux/jump_label.h>
25 #include <linux/kernel.h>
28 * CPU feature register tracking
30 * The safe value of a CPUID feature field is dependent on the implications
31 * of the values assigned to it by the architecture. Based on the relationship
32 * between the values, the features are classified into 3 types - LOWER_SAFE,
33 * HIGHER_SAFE and EXACT.
35 * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
36 * for HIGHER_SAFE. It is expected that all CPUs have the same value for
37 * a field when EXACT is specified, failing which, the safe value specified
38 * in the table is chosen.
42 FTR_EXACT
, /* Use a predefined safe value */
43 FTR_LOWER_SAFE
, /* Smaller value is safe */
44 FTR_HIGHER_SAFE
, /* Bigger value is safe */
45 FTR_HIGHER_OR_ZERO_SAFE
, /* Bigger value is safe, but 0 is biggest */
48 #define FTR_STRICT true /* SANITY check strict matching required */
49 #define FTR_NONSTRICT false /* SANITY check ignored */
51 #define FTR_SIGNED true /* Value should be treated as signed */
52 #define FTR_UNSIGNED false /* Value should be treated as unsigned */
54 #define FTR_VISIBLE true /* Feature visible to the user space */
55 #define FTR_HIDDEN false /* Feature is hidden from the user */
57 #define FTR_VISIBLE_IF_IS_ENABLED(config) \
58 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
60 struct arm64_ftr_bits
{
61 bool sign
; /* Value is signed ? */
63 bool strict
; /* CPU Sanity check: strict matching required ? */
67 s64 safe_val
; /* safe value for FTR_EXACT features */
71 * Describe the early feature override to the core override code:
73 * @val Values that are to be merged into the final
74 * sanitised value of the register. Only the bitfields
75 * set to 1 in @mask are valid
76 * @mask Mask of the features that are overridden by @val
78 * A @mask field set to full-1 indicates that the corresponding field
79 * in @val is a valid override.
81 * A @mask field set to full-0 with the corresponding @val field set
82 * to full-0 denotes that this field has no override
84 * A @mask field set to full-0 with the corresponding @val field set
85 * to full-1 denotes thath this field has an invalid override.
87 struct arm64_ftr_override
{
93 * @arm64_ftr_reg - Feature register
94 * @strict_mask Bits which should match across all CPUs for sanity.
95 * @sys_val Safe value across the CPUs (system view)
97 struct arm64_ftr_reg
{
103 struct arm64_ftr_override
*override
;
104 const struct arm64_ftr_bits
*ftr_bits
;
107 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0
;
112 * We use arm64_cpu_capabilities to represent system features, errata work
113 * arounds (both used internally by kernel and tracked in cpu_hwcaps) and
114 * ELF HWCAPs (which are exposed to user).
116 * To support systems with heterogeneous CPUs, we need to make sure that we
117 * detect the capabilities correctly on the system and take appropriate
118 * measures to ensure there are no incompatibilities.
120 * This comment tries to explain how we treat the capabilities.
121 * Each capability has the following list of attributes :
123 * 1) Scope of Detection : The system detects a given capability by
124 * performing some checks at runtime. This could be, e.g, checking the
125 * value of a field in CPU ID feature register or checking the cpu
126 * model. The capability provides a call back ( @matches() ) to
127 * perform the check. Scope defines how the checks should be performed.
128 * There are three cases:
130 * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
131 * matches. This implies, we have to run the check on all the
132 * booting CPUs, until the system decides that state of the
133 * capability is finalised. (See section 2 below)
135 * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
136 * matches. This implies, we run the check only once, when the
137 * system decides to finalise the state of the capability. If the
138 * capability relies on a field in one of the CPU ID feature
139 * registers, we use the sanitised value of the register from the
140 * CPU feature infrastructure to make the decision.
142 * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
143 * feature. This category is for features that are "finalised"
144 * (or used) by the kernel very early even before the SMP cpus
147 * The process of detection is usually denoted by "update" capability
150 * 2) Finalise the state : The kernel should finalise the state of a
151 * capability at some point during its execution and take necessary
152 * actions if any. Usually, this is done, after all the boot-time
153 * enabled CPUs are brought up by the kernel, so that it can make
154 * better decision based on the available set of CPUs. However, there
155 * are some special cases, where the action is taken during the early
156 * boot by the primary boot CPU. (e.g, running the kernel at EL2 with
157 * Virtualisation Host Extensions). The kernel usually disallows any
158 * changes to the state of a capability once it finalises the capability
159 * and takes any action, as it may be impossible to execute the actions
160 * safely. A CPU brought up after a capability is "finalised" is
161 * referred to as "Late CPU" w.r.t the capability. e.g, all secondary
162 * CPUs are treated "late CPUs" for capabilities determined by the boot
165 * At the moment there are two passes of finalising the capabilities.
166 * a) Boot CPU scope capabilities - Finalised by primary boot CPU via
167 * setup_boot_cpu_capabilities().
168 * b) Everything except (a) - Run via setup_system_capabilities().
170 * 3) Verification: When a CPU is brought online (e.g, by user or by the
171 * kernel), the kernel should make sure that it is safe to use the CPU,
172 * by verifying that the CPU is compliant with the state of the
173 * capabilities finalised already. This happens via :
175 * secondary_start_kernel()-> check_local_cpu_capabilities()
177 * As explained in (2) above, capabilities could be finalised at
178 * different points in the execution. Each newly booted CPU is verified
179 * against the capabilities that have been finalised by the time it
182 * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
183 * except for the primary boot CPU.
185 * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
186 * user after the kernel boot are verified against the capability.
188 * If there is a conflict, the kernel takes an action, based on the
189 * severity (e.g, a CPU could be prevented from booting or cause a
190 * kernel panic). The CPU is allowed to "affect" the state of the
191 * capability, if it has not been finalised already. See section 5
192 * for more details on conflicts.
194 * 4) Action: As mentioned in (2), the kernel can take an action for each
195 * detected capability, on all CPUs on the system. Appropriate actions
196 * include, turning on an architectural feature, modifying the control
197 * registers (e.g, SCTLR, TCR etc.) or patching the kernel via
198 * alternatives. The kernel patching is batched and performed at later
199 * point. The actions are always initiated only after the capability
200 * is finalised. This is usally denoted by "enabling" the capability.
201 * The actions are initiated as follows :
202 * a) Action is triggered on all online CPUs, after the capability is
203 * finalised, invoked within the stop_machine() context from
204 * enable_cpu_capabilitie().
206 * b) Any late CPU, brought up after (1), the action is triggered via:
208 * check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
210 * 5) Conflicts: Based on the state of the capability on a late CPU vs.
211 * the system state, we could have the following combinations :
213 * x-----------------------------x
214 * | Type | System | Late CPU |
215 * |-----------------------------|
217 * |-----------------------------|
219 * x-----------------------------x
221 * Two separate flag bits are defined to indicate whether each kind of
222 * conflict can be allowed:
223 * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
224 * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
226 * Case (a) is not permitted for a capability that the system requires
227 * all CPUs to have in order for the capability to be enabled. This is
228 * typical for capabilities that represent enhanced functionality.
230 * Case (b) is not permitted for a capability that must be enabled
231 * during boot if any CPU in the system requires it in order to run
232 * safely. This is typical for erratum work arounds that cannot be
233 * enabled after the corresponding capability is finalised.
235 * In some non-typical cases either both (a) and (b), or neither,
236 * should be permitted. This can be described by including neither
237 * or both flags in the capability's type field.
239 * In case of a conflict, the CPU is prevented from booting. If the
240 * ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
241 * then a kernel panic is triggered.
246 * Decide how the capability is detected.
247 * On any local CPU vs System wide vs the primary boot CPU
249 #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
250 #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
252 * The capabilitiy is detected on the Boot CPU and is used by kernel
253 * during early boot. i.e, the capability should be "detected" and
254 * "enabled" as early as possibly on all booting CPUs.
256 #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
257 #define ARM64_CPUCAP_SCOPE_MASK \
258 (ARM64_CPUCAP_SCOPE_SYSTEM | \
259 ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
260 ARM64_CPUCAP_SCOPE_BOOT_CPU)
262 #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
263 #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
264 #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
265 #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
268 * Is it permitted for a late CPU to have this capability when system
269 * hasn't already enabled it ?
271 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
272 /* Is it safe for a late CPU to miss this capability when system has it */
273 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
274 /* Panic when a conflict is detected */
275 #define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
278 * CPU errata workarounds that need to be enabled at boot time if one or
279 * more CPUs in the system requires it. When one of these capabilities
280 * has been enabled, it is safe to allow any CPU to boot that doesn't
281 * require the workaround. However, it is not safe if a "late" CPU
282 * requires a workaround and the system hasn't enabled it already.
284 #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
285 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
287 * CPU feature detected at boot time based on system-wide value of a
288 * feature. It is safe for a late CPU to have this feature even though
289 * the system hasn't enabled it, although the feature will not be used
290 * by Linux in this case. If the system has enabled this feature already,
291 * then every late CPU must have it.
293 #define ARM64_CPUCAP_SYSTEM_FEATURE \
294 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
296 * CPU feature detected at boot time based on feature of one or more CPUs.
297 * All possible conflicts for a late CPU are ignored.
298 * NOTE: this means that a late CPU with the feature will *not* cause the
299 * capability to be advertised by cpus_have_*cap()!
301 #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
302 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
303 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
304 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
307 * CPU feature detected at boot time, on one or more CPUs. A late CPU
308 * is not allowed to have the capability when the system doesn't have it.
309 * It is Ok for a late CPU to miss the feature.
311 #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
312 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
313 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
316 * CPU feature used early in the boot based on the boot CPU. All secondary
317 * CPUs must match the state of the capability as detected by the boot CPU. In
318 * case of a conflict, a kernel panic is triggered.
320 #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
321 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
324 * CPU feature used early in the boot based on the boot CPU. It is safe for a
325 * late CPU to have this feature even though the boot CPU hasn't enabled it,
326 * although the feature will not be used by Linux in this case. If the boot CPU
327 * has enabled this feature already, then every late CPU must have it.
329 #define ARM64_CPUCAP_BOOT_CPU_FEATURE \
330 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
332 struct arm64_cpu_capabilities
{
336 bool (*matches
)(const struct arm64_cpu_capabilities
*caps
, int scope
);
338 * Take the appropriate actions to configure this capability
339 * for this CPU. If the capability is detected by the kernel
340 * this will be called on all the CPUs in the system,
341 * including the hotplugged CPUs, regardless of whether the
342 * capability is available on that specific CPU. This is
343 * useful for some capabilities (e.g, working around CPU
344 * errata), where all the CPUs must take some action (e.g,
345 * changing system control/configuration). Thus, if an action
346 * is required only if the CPU has the capability, then the
347 * routine must check it before taking any action.
349 void (*cpu_enable
)(const struct arm64_cpu_capabilities
*cap
);
351 struct { /* To be used for erratum handling only */
352 struct midr_range midr_range
;
353 const struct arm64_midr_revidr
{
354 u32 midr_rv
; /* revision/variant */
356 } * const fixed_revs
;
359 const struct midr_range
*midr_range_list
;
360 struct { /* Feature register checking */
372 * An optional list of "matches/cpu_enable" pair for the same
373 * "capability" of the same "type" as described by the parent.
374 * Only matches(), cpu_enable() and fields relevant to these
375 * methods are significant in the list. The cpu_enable is
376 * invoked only if the corresponding entry "matches()".
377 * However, if a cpu_enable() method is associated
378 * with multiple matches(), care should be taken that either
379 * the match criteria are mutually exclusive, or that the
380 * method is robust against being called multiple times.
382 const struct arm64_cpu_capabilities
*match_list
;
385 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities
*cap
)
387 return cap
->type
& ARM64_CPUCAP_SCOPE_MASK
;
391 * Generic helper for handling capabilities with multiple (match,enable) pairs
392 * of call backs, sharing the same capability bit.
393 * Iterate over each entry to see if at least one matches.
396 cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities
*entry
,
399 const struct arm64_cpu_capabilities
*caps
;
401 for (caps
= entry
->match_list
; caps
->matches
; caps
++)
402 if (caps
->matches(caps
, scope
))
408 static __always_inline
bool is_vhe_hyp_code(void)
410 /* Only defined for code run in VHE hyp context */
411 return __is_defined(__KVM_VHE_HYPERVISOR__
);
414 static __always_inline
bool is_nvhe_hyp_code(void)
416 /* Only defined for code run in NVHE hyp context */
417 return __is_defined(__KVM_NVHE_HYPERVISOR__
);
420 static __always_inline
bool is_hyp_code(void)
422 return is_vhe_hyp_code() || is_nvhe_hyp_code();
425 extern DECLARE_BITMAP(cpu_hwcaps
, ARM64_NCAPS
);
427 extern DECLARE_BITMAP(boot_capabilities
, ARM64_NCAPS
);
429 #define for_each_available_cap(cap) \
430 for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
432 bool this_cpu_has_cap(unsigned int cap
);
433 void cpu_set_feature(unsigned int num
);
434 bool cpu_have_feature(unsigned int num
);
435 unsigned long cpu_get_elf_hwcap(void);
436 unsigned long cpu_get_elf_hwcap2(void);
438 #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
439 #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
441 static __always_inline
bool system_capabilities_finalized(void)
443 return alternative_has_feature_likely(ARM64_ALWAYS_SYSTEM
);
447 * Test for a capability with a runtime check.
449 * Before the capability is detected, this returns false.
451 static __always_inline
bool cpus_have_cap(unsigned int num
)
453 if (num
>= ARM64_NCAPS
)
455 return arch_test_bit(num
, cpu_hwcaps
);
459 * Test for a capability without a runtime check.
461 * Before capabilities are finalized, this returns false.
462 * After capabilities are finalized, this is patched to avoid a runtime check.
464 * @num must be a compile-time constant.
466 static __always_inline
bool __cpus_have_const_cap(int num
)
468 if (num
>= ARM64_NCAPS
)
470 return alternative_has_feature_unlikely(num
);
474 * Test for a capability without a runtime check.
476 * Before capabilities are finalized, this will BUG().
477 * After capabilities are finalized, this is patched to avoid a runtime check.
479 * @num must be a compile-time constant.
481 static __always_inline
bool cpus_have_final_cap(int num
)
483 if (system_capabilities_finalized())
484 return __cpus_have_const_cap(num
);
490 * Test for a capability, possibly with a runtime check for non-hyp code.
492 * For hyp code, this behaves the same as cpus_have_final_cap().
495 * Before capabilities are finalized, this behaves as cpus_have_cap().
496 * After capabilities are finalized, this is patched to avoid a runtime check.
498 * @num must be a compile-time constant.
500 static __always_inline
bool cpus_have_const_cap(int num
)
503 return cpus_have_final_cap(num
);
504 else if (system_capabilities_finalized())
505 return __cpus_have_const_cap(num
);
507 return cpus_have_cap(num
);
510 static inline void cpus_set_cap(unsigned int num
)
512 if (num
>= ARM64_NCAPS
) {
513 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
516 __set_bit(num
, cpu_hwcaps
);
520 static inline int __attribute_const__
521 cpuid_feature_extract_signed_field_width(u64 features
, int field
, int width
)
523 return (s64
)(features
<< (64 - width
- field
)) >> (64 - width
);
526 static inline int __attribute_const__
527 cpuid_feature_extract_signed_field(u64 features
, int field
)
529 return cpuid_feature_extract_signed_field_width(features
, field
, 4);
532 static __always_inline
unsigned int __attribute_const__
533 cpuid_feature_extract_unsigned_field_width(u64 features
, int field
, int width
)
535 return (u64
)(features
<< (64 - width
- field
)) >> (64 - width
);
538 static __always_inline
unsigned int __attribute_const__
539 cpuid_feature_extract_unsigned_field(u64 features
, int field
)
541 return cpuid_feature_extract_unsigned_field_width(features
, field
, 4);
545 * Fields that identify the version of the Performance Monitors Extension do
546 * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
547 * "Alternative ID scheme used for the Performance Monitors Extension version".
549 static inline u64 __attribute_const__
550 cpuid_feature_cap_perfmon_field(u64 features
, int field
, u64 cap
)
552 u64 val
= cpuid_feature_extract_unsigned_field(features
, field
);
553 u64 mask
= GENMASK_ULL(field
+ 3, field
);
555 /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
556 if (val
== ID_AA64DFR0_EL1_PMUVer_IMP_DEF
)
561 features
|= (cap
<< field
) & mask
;
567 static inline u64
arm64_ftr_mask(const struct arm64_ftr_bits
*ftrp
)
569 return (u64
)GENMASK(ftrp
->shift
+ ftrp
->width
- 1, ftrp
->shift
);
572 static inline u64
arm64_ftr_reg_user_value(const struct arm64_ftr_reg
*reg
)
574 return (reg
->user_val
| (reg
->sys_val
& reg
->user_mask
));
577 static inline int __attribute_const__
578 cpuid_feature_extract_field_width(u64 features
, int field
, int width
, bool sign
)
580 if (WARN_ON_ONCE(!width
))
583 cpuid_feature_extract_signed_field_width(features
, field
, width
) :
584 cpuid_feature_extract_unsigned_field_width(features
, field
, width
);
587 static inline int __attribute_const__
588 cpuid_feature_extract_field(u64 features
, int field
, bool sign
)
590 return cpuid_feature_extract_field_width(features
, field
, 4, sign
);
593 static inline s64
arm64_ftr_value(const struct arm64_ftr_bits
*ftrp
, u64 val
)
595 return (s64
)cpuid_feature_extract_field_width(val
, ftrp
->shift
, ftrp
->width
, ftrp
->sign
);
598 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0
)
600 return cpuid_feature_extract_unsigned_field(mmfr0
, ID_AA64MMFR0_EL1_BIGEND_SHIFT
) == 0x1 ||
601 cpuid_feature_extract_unsigned_field(mmfr0
, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT
) == 0x1;
604 static inline bool id_aa64pfr0_32bit_el1(u64 pfr0
)
606 u32 val
= cpuid_feature_extract_unsigned_field(pfr0
, ID_AA64PFR0_EL1_EL1_SHIFT
);
608 return val
== ID_AA64PFR0_EL1_ELx_32BIT_64BIT
;
611 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0
)
613 u32 val
= cpuid_feature_extract_unsigned_field(pfr0
, ID_AA64PFR0_EL1_EL0_SHIFT
);
615 return val
== ID_AA64PFR0_EL1_ELx_32BIT_64BIT
;
618 static inline bool id_aa64pfr0_sve(u64 pfr0
)
620 u32 val
= cpuid_feature_extract_unsigned_field(pfr0
, ID_AA64PFR0_EL1_SVE_SHIFT
);
625 static inline bool id_aa64pfr1_sme(u64 pfr1
)
627 u32 val
= cpuid_feature_extract_unsigned_field(pfr1
, ID_AA64PFR1_EL1_SME_SHIFT
);
632 static inline bool id_aa64pfr1_mte(u64 pfr1
)
634 u32 val
= cpuid_feature_extract_unsigned_field(pfr1
, ID_AA64PFR1_EL1_MTE_SHIFT
);
636 return val
>= ID_AA64PFR1_EL1_MTE_MTE2
;
639 void __init
setup_cpu_features(void);
640 void check_local_cpu_capabilities(void);
642 u64
read_sanitised_ftr_reg(u32 id
);
643 u64
__read_sysreg_by_encoding(u32 sys_id
);
645 static inline bool cpu_supports_mixed_endian_el0(void)
647 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1
));
651 static inline bool supports_csv2p3(int scope
)
656 if (scope
== SCOPE_LOCAL_CPU
)
657 pfr0
= read_sysreg_s(SYS_ID_AA64PFR0_EL1
);
659 pfr0
= read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1
);
661 csv2_val
= cpuid_feature_extract_unsigned_field(pfr0
,
662 ID_AA64PFR0_EL1_CSV2_SHIFT
);
663 return csv2_val
== 3;
666 static inline bool supports_clearbhb(int scope
)
670 if (scope
== SCOPE_LOCAL_CPU
)
671 isar2
= read_sysreg_s(SYS_ID_AA64ISAR2_EL1
);
673 isar2
= read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1
);
675 return cpuid_feature_extract_unsigned_field(isar2
,
676 ID_AA64ISAR2_EL1_BC_SHIFT
);
679 const struct cpumask
*system_32bit_el0_cpumask(void);
680 DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0
);
682 static inline bool system_supports_32bit_el0(void)
684 u64 pfr0
= read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1
);
686 return static_branch_unlikely(&arm64_mismatched_32bit_el0
) ||
687 id_aa64pfr0_32bit_el0(pfr0
);
690 static inline bool system_supports_4kb_granule(void)
695 mmfr0
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1
);
696 val
= cpuid_feature_extract_unsigned_field(mmfr0
,
697 ID_AA64MMFR0_EL1_TGRAN4_SHIFT
);
699 return (val
>= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
) &&
700 (val
<= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX
);
703 static inline bool system_supports_64kb_granule(void)
708 mmfr0
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1
);
709 val
= cpuid_feature_extract_unsigned_field(mmfr0
,
710 ID_AA64MMFR0_EL1_TGRAN64_SHIFT
);
712 return (val
>= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN
) &&
713 (val
<= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX
);
716 static inline bool system_supports_16kb_granule(void)
721 mmfr0
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1
);
722 val
= cpuid_feature_extract_unsigned_field(mmfr0
,
723 ID_AA64MMFR0_EL1_TGRAN16_SHIFT
);
725 return (val
>= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN
) &&
726 (val
<= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX
);
729 static inline bool system_supports_mixed_endian_el0(void)
731 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1
));
734 static inline bool system_supports_mixed_endian(void)
739 mmfr0
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1
);
740 val
= cpuid_feature_extract_unsigned_field(mmfr0
,
741 ID_AA64MMFR0_EL1_BIGEND_SHIFT
);
746 static __always_inline
bool system_supports_fpsimd(void)
748 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD
);
751 static inline bool system_uses_hw_pan(void)
753 return IS_ENABLED(CONFIG_ARM64_PAN
) &&
754 cpus_have_const_cap(ARM64_HAS_PAN
);
757 static inline bool system_uses_ttbr0_pan(void)
759 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN
) &&
760 !system_uses_hw_pan();
763 static __always_inline
bool system_supports_sve(void)
765 return IS_ENABLED(CONFIG_ARM64_SVE
) &&
766 cpus_have_const_cap(ARM64_SVE
);
769 static __always_inline
bool system_supports_sme(void)
771 return IS_ENABLED(CONFIG_ARM64_SME
) &&
772 cpus_have_const_cap(ARM64_SME
);
775 static __always_inline
bool system_supports_sme2(void)
777 return IS_ENABLED(CONFIG_ARM64_SME
) &&
778 cpus_have_const_cap(ARM64_SME2
);
781 static __always_inline
bool system_supports_fa64(void)
783 return IS_ENABLED(CONFIG_ARM64_SME
) &&
784 cpus_have_const_cap(ARM64_SME_FA64
);
787 static __always_inline
bool system_supports_tpidr2(void)
789 return system_supports_sme();
792 static __always_inline
bool system_supports_cnp(void)
794 return IS_ENABLED(CONFIG_ARM64_CNP
) &&
795 cpus_have_const_cap(ARM64_HAS_CNP
);
798 static inline bool system_supports_address_auth(void)
800 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH
) &&
801 cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH
);
804 static inline bool system_supports_generic_auth(void)
806 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH
) &&
807 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH
);
810 static inline bool system_has_full_ptr_auth(void)
812 return system_supports_address_auth() && system_supports_generic_auth();
815 static __always_inline
bool system_uses_irq_prio_masking(void)
817 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI
) &&
818 cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING
);
821 static inline bool system_supports_mte(void)
823 return IS_ENABLED(CONFIG_ARM64_MTE
) &&
824 cpus_have_const_cap(ARM64_MTE
);
827 static inline bool system_has_prio_mask_debugging(void)
829 return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING
) &&
830 system_uses_irq_prio_masking();
833 static inline bool system_supports_bti(void)
835 return IS_ENABLED(CONFIG_ARM64_BTI
) && cpus_have_const_cap(ARM64_BTI
);
838 static inline bool system_supports_tlb_range(void)
840 return IS_ENABLED(CONFIG_ARM64_TLB_RANGE
) &&
841 cpus_have_const_cap(ARM64_HAS_TLB_RANGE
);
844 int do_emulate_mrs(struct pt_regs
*regs
, u32 sys_reg
, u32 rt
);
845 bool try_emulate_mrs(struct pt_regs
*regs
, u32 isn
);
847 static inline u32
id_aa64mmfr0_parange_to_phys_shift(int parange
)
850 case ID_AA64MMFR0_EL1_PARANGE_32
: return 32;
851 case ID_AA64MMFR0_EL1_PARANGE_36
: return 36;
852 case ID_AA64MMFR0_EL1_PARANGE_40
: return 40;
853 case ID_AA64MMFR0_EL1_PARANGE_42
: return 42;
854 case ID_AA64MMFR0_EL1_PARANGE_44
: return 44;
855 case ID_AA64MMFR0_EL1_PARANGE_48
: return 48;
856 case ID_AA64MMFR0_EL1_PARANGE_52
: return 52;
858 * A future PE could use a value unknown to the kernel.
859 * However, by the "D10.1.4 Principles of the ID scheme
860 * for fields in ID registers", ARM DDI 0487C.a, any new
861 * value is guaranteed to be higher than what we know already.
862 * As a safe limit, we return the limit supported by the kernel.
864 default: return CONFIG_ARM64_PA_BITS
;
868 /* Check whether hardware update of the Access flag is supported */
869 static inline bool cpu_has_hw_af(void)
873 if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM
))
877 * Use cached version to avoid emulated msr operation on KVM
880 mmfr1
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1
);
881 return cpuid_feature_extract_unsigned_field(mmfr1
,
882 ID_AA64MMFR1_EL1_HAFDBS_SHIFT
);
885 static inline bool cpu_has_pan(void)
887 u64 mmfr1
= read_cpuid(ID_AA64MMFR1_EL1
);
888 return cpuid_feature_extract_unsigned_field(mmfr1
,
889 ID_AA64MMFR1_EL1_PAN_SHIFT
);
892 #ifdef CONFIG_ARM64_AMU_EXTN
893 /* Check whether the cpu supports the Activity Monitors Unit (AMU) */
894 extern bool cpu_has_amu_feat(int cpu
);
896 static inline bool cpu_has_amu_feat(int cpu
)
902 /* Get a cpu that supports the Activity Monitors Unit (AMU) */
903 extern int get_cpu_with_amu_feat(void);
905 static inline unsigned int get_vmid_bits(u64 mmfr1
)
909 vmid_bits
= cpuid_feature_extract_unsigned_field(mmfr1
,
910 ID_AA64MMFR1_EL1_VMIDBits_SHIFT
);
911 if (vmid_bits
== ID_AA64MMFR1_EL1_VMIDBits_16
)
915 * Return the default here even if any reserved
916 * value is fetched from the system register.
921 s64
arm64_ftr_safe_value(const struct arm64_ftr_bits
*ftrp
, s64
new, s64 cur
);
922 struct arm64_ftr_reg
*get_arm64_ftr_reg(u32 sys_id
);
924 extern struct arm64_ftr_override id_aa64mmfr1_override
;
925 extern struct arm64_ftr_override id_aa64pfr0_override
;
926 extern struct arm64_ftr_override id_aa64pfr1_override
;
927 extern struct arm64_ftr_override id_aa64zfr0_override
;
928 extern struct arm64_ftr_override id_aa64smfr0_override
;
929 extern struct arm64_ftr_override id_aa64isar1_override
;
930 extern struct arm64_ftr_override id_aa64isar2_override
;
932 extern struct arm64_ftr_override arm64_sw_feature_override
;
934 u32
get_kvm_ipa_limit(void);
935 void dump_cpu_features(void);
937 #endif /* __ASSEMBLY__ */