]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - arch/arm64/include/asm/kvm_mmu.h
KVM: arm/arm64: vgic-its: Take the srcu lock when writing to guest memory
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / include / asm / kvm_mmu.h
index 672c8684d5c2a796fadae762846c1f314016c7c3..8251077d3ea57dc91b06f8b4a85ce4909f6bcd97 100644 (file)
@@ -130,6 +130,26 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
 
 #define kern_hyp_va(v)         ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
 
+/*
+ * Obtain the PC-relative address of a kernel symbol
+ * s: symbol
+ *
+ * The goal of this macro is to return a symbol's address based on a
+ * PC-relative computation, as opposed to a loading the VA from a
+ * constant pool or something similar. This works well for HYP, as an
+ * absolute VA is guaranteed to be wrong. Only use this if trying to
+ * obtain the address of a symbol (i.e. not something you obtained by
+ * following a pointer).
+ */
+#define hyp_symbol_addr(s)                                             \
+       ({                                                              \
+               typeof(s) *addr;                                        \
+               asm("adrp       %0, %1\n"                               \
+                   "add        %0, %0, :lo12:%1\n"                     \
+                   : "=r" (addr) : "S" (&s));                          \
+               addr;                                                   \
+       })
+
 /*
  * We currently only support a 40bit IPA.
  */
@@ -309,5 +329,94 @@ static inline unsigned int kvm_get_vmid_bits(void)
        return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+                                     gpa_t gpa, void *data, unsigned long len)
+{
+       int srcu_idx = srcu_read_lock(&kvm->srcu);
+       int ret = kvm_read_guest(kvm, gpa, data, len);
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+       return ret;
+}
+
+static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+                                      const void *data, unsigned long len)
+{
+       int srcu_idx = srcu_read_lock(&kvm->srcu);
+       int ret = kvm_write_guest(kvm, gpa, data, len);
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+       return ret;
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu.h>
+
+static inline void *kvm_get_hyp_vector(void)
+{
+       struct bp_hardening_data *data = arm64_get_bp_hardening_data();
+       void *vect = kvm_ksym_ref(__kvm_hyp_vector);
+
+       if (data->fn) {
+               vect = __bp_harden_hyp_vecs_start +
+                      data->hyp_vectors_slot * SZ_2K;
+
+               if (!has_vhe())
+                       vect = lm_alias(vect);
+       }
+
+       return vect;
+}
+
+static inline int kvm_map_vectors(void)
+{
+       return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
+                                  kvm_ksym_ref(__bp_harden_hyp_vecs_end),
+                                  PAGE_HYP_EXEC);
+}
+
+#else
+static inline void *kvm_get_hyp_vector(void)
+{
+       return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_ARM64_SSBD
+DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+static inline int hyp_map_aux_data(void)
+{
+       int cpu, err;
+
+       for_each_possible_cpu(cpu) {
+               u64 *ptr;
+
+               ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
+               err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+#else
+static inline int hyp_map_aux_data(void)
+{
+       return 0;
+}
+#endif
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */