]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blobdiff - arch/arm64/kernel/cpufeature.c
Merge branch 'for-next/bti-user' into for-next/bti
[mirror_ubuntu-hirsute-kernel.git] / arch / arm64 / kernel / cpufeature.c
index e6d31776e49bddcaa49663ba9e1f50ffb675f8d8..b234d6f71cba6402a57bc833473b77bdfde2edba 100644 (file)
@@ -116,6 +116,8 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
 
 static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
 
+static bool __system_matches_cap(unsigned int n);
+
 /*
  * NOTE: Any changes to the visibility of features should be kept in
  * sync with the documentation of the CPU feature register ABI.
@@ -163,6 +165,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
                                   FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
@@ -553,7 +556,7 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
 
        BUG_ON(!reg);
 
-       for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
+       for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
                u64 ftr_mask = arm64_ftr_mask(ftrp);
                s64 ftr_new = arm64_ftr_value(ftrp, new);
 
@@ -1224,6 +1227,57 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
 
 #endif
 
+#ifdef CONFIG_ARM64_AMU_EXTN
+
+/*
+ * The "amu_cpus" cpumask only signals that the CPU implementation for the
+ * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide
+ * information regarding all the events that it supports. When a CPU bit is
+ * set in the cpumask, the user of this feature can only rely on the presence
+ * of the 4 fixed counters for that CPU. But this does not guarantee that the
+ * counters are enabled or access to these counters is enabled by code
+ * executed at higher exception levels (firmware).
+ */
+static struct cpumask amu_cpus __read_mostly;
+
+bool cpu_has_amu_feat(int cpu)
+{
+       return cpumask_test_cpu(cpu, &amu_cpus);
+}
+
+/* Initialize the use of AMU counters for frequency invariance */
+extern void init_cpu_freq_invariance_counters(void);
+
+static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
+{
+       if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {
+               pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
+                       smp_processor_id());
+               cpumask_set_cpu(smp_processor_id(), &amu_cpus);
+               init_cpu_freq_invariance_counters();
+       }
+}
+
+static bool has_amu(const struct arm64_cpu_capabilities *cap,
+                   int __unused)
+{
+       /*
+        * The AMU extension is a non-conflicting feature: the kernel can
+        * safely run a mix of CPUs with and without support for the
+        * activity monitors extension. Therefore, unconditionally enable
+        * the capability to allow any late CPU to use the feature.
+        *
+        * With this feature unconditionally enabled, the cpu_enable
+        * function will be called for all CPUs that match the criteria,
+        * including secondary and hotplugged, marking this feature as
+        * present on that respective CPU. The enable function will also
+        * print a detection message.
+        */
+
+       return true;
+}
+#endif
+
 #ifdef CONFIG_ARM64_VHE
 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
 {
@@ -1318,10 +1372,18 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
 #endif /* CONFIG_ARM64_RAS_EXTN */
 
 #ifdef CONFIG_ARM64_PTR_AUTH
-static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
+static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
+                            int __unused)
 {
-       sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
-                                      SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
+       return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
+              __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
+}
+
+static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
+                            int __unused)
+{
+       return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
+              __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
 }
 #endif /* CONFIG_ARM64_PTR_AUTH */
 
@@ -1364,6 +1426,25 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
 }
 #endif /* CONFIG_ARM64_BTI */
 
+/* Internal helper functions to match cpu capability type */
+static bool
+cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
+{
+       return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
+}
+
+static bool
+cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
+{
+       return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
+}
+
+static bool
+cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
+{
+       return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
@@ -1516,6 +1597,24 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .cpu_enable = cpu_clear_disr,
        },
 #endif /* CONFIG_ARM64_RAS_EXTN */
+#ifdef CONFIG_ARM64_AMU_EXTN
+       {
+               /*
+                * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y.
+                * Therefore, don't provide .desc as we don't want the detection
+                * message to be shown until at least one CPU is detected to
+                * support the feature.
+                */
+               .capability = ARM64_HAS_AMU_EXTN,
+               .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+               .matches = has_amu,
+               .sys_reg = SYS_ID_AA64PFR0_EL1,
+               .sign = FTR_UNSIGNED,
+               .field_pos = ID_AA64PFR0_AMU_SHIFT,
+               .min_field_value = ID_AA64PFR0_AMU,
+               .cpu_enable = cpu_amu_enable,
+       },
+#endif /* CONFIG_ARM64_AMU_EXTN */
        {
                .desc = "Data cache clean to the PoU not required for I/D coherence",
                .capability = ARM64_HAS_CACHE_IDC,
@@ -1609,24 +1708,27 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "Address authentication (architected algorithm)",
                .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
-               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
                .sign = FTR_UNSIGNED,
                .field_pos = ID_AA64ISAR1_APA_SHIFT,
                .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
                .matches = has_cpuid_feature,
-               .cpu_enable = cpu_enable_address_auth,
        },
        {
                .desc = "Address authentication (IMP DEF algorithm)",
                .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
-               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
                .sys_reg = SYS_ID_AA64ISAR1_EL1,
                .sign = FTR_UNSIGNED,
                .field_pos = ID_AA64ISAR1_API_SHIFT,
                .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
                .matches = has_cpuid_feature,
-               .cpu_enable = cpu_enable_address_auth,
+       },
+       {
+               .capability = ARM64_HAS_ADDRESS_AUTH,
+               .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
+               .matches = has_address_auth,
        },
        {
                .desc = "Generic authentication (architected algorithm)",
@@ -1648,6 +1750,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
                .matches = has_cpuid_feature,
        },
+       {
+               .capability = ARM64_HAS_GENERIC_AUTH,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_generic_auth,
+       },
 #endif /* CONFIG_ARM64_PTR_AUTH */
 #ifdef CONFIG_ARM64_PSEUDO_NMI
        {
@@ -2013,10 +2120,8 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
  * Run through the list of capabilities to check for conflicts.
  * If the system has already detected a capability, take necessary
  * action on this CPU.
- *
- * Returns "false" on conflicts.
  */
-static bool verify_local_cpu_caps(u16 scope_mask)
+static void verify_local_cpu_caps(u16 scope_mask)
 {
        int i;
        bool cpu_has_cap, system_has_cap;
@@ -2061,10 +2166,12 @@ static bool verify_local_cpu_caps(u16 scope_mask)
                pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
                        smp_processor_id(), caps->capability,
                        caps->desc, system_has_cap, cpu_has_cap);
-               return false;
-       }
 
-       return true;
+               if (cpucap_panic_on_conflict(caps))
+                       cpu_panic_kernel();
+               else
+                       cpu_die_early();
+       }
 }
 
 /*
@@ -2074,12 +2181,8 @@ static bool verify_local_cpu_caps(u16 scope_mask)
 static void check_early_cpu_features(void)
 {
        verify_cpu_asid_bits();
-       /*
-        * Early features are used by the kernel already. If there
-        * is a conflict, we cannot proceed further.
-        */
-       if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
-               cpu_panic_kernel();
+
+       verify_local_cpu_caps(SCOPE_BOOT_CPU);
 }
 
 static void
@@ -2127,8 +2230,7 @@ static void verify_local_cpu_capabilities(void)
         * check_early_cpu_features(), as they need to be verified
         * on all secondary CPUs.
         */
-       if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
-               cpu_die_early();
+       verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
 
        verify_local_elf_hwcaps(arm64_elf_hwcaps);
 
@@ -2179,6 +2281,23 @@ bool this_cpu_has_cap(unsigned int n)
        return false;
 }
 
+/*
+ * This helper function is used in a narrow window when,
+ * - The system wide safe registers are set with all the SMP CPUs and,
+ * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
+ * In all other cases cpus_have_{const_}cap() should be used.
+ */
+static bool __system_matches_cap(unsigned int n)
+{
+       if (n < ARM64_NCAPS) {
+               const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
+
+               if (cap)
+                       return cap->matches(cap, SCOPE_SYSTEM);
+       }
+       return false;
+}
+
 void cpu_set_feature(unsigned int num)
 {
        WARN_ON(num >= MAX_CPU_FEATURES);
@@ -2251,7 +2370,7 @@ void __init setup_cpu_features(void)
 static bool __maybe_unused
 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
 {
-       return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
+       return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
 }
 
 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)