v8 is capable of invalidating Stage-2 by IPA, but v7 is not.
Change kvm_tlb_flush_vmid() to take an IPA parameter, which is
then ignored by the invalidation code (and nuke the whole TLB
as it always did).
This allows v8 to implement a more optimized strategy.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
#endif
/********************************************************************
* Flush per-VMID TLBs
*
- * void __kvm_tlb_flush_vmid(struct kvm *kvm);
+ * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
*
* We rely on the hardware to broadcast the TLB invalidation to all CPUs
* inside the inner-shareable domain (which is the case for all v7
* implementations). If we come across a non-IS SMP implementation, we'll
* have to use an IPI based mechanism. Until then, we stick to the simple
* hardware assisted version.
+ *
+ * As v7 does not support flushing per IPA, just nuke the whole TLB
+ * instead, ignoring the ipa value.
*/
-ENTRY(__kvm_tlb_flush_vmid)
+ENTRY(__kvm_tlb_flush_vmid_ipa)
push {r2, r3}
add r0, r0, #KVM_VTTBR
pop {r2, r3}
bx lr
-ENDPROC(__kvm_tlb_flush_vmid)
+ENDPROC(__kvm_tlb_flush_vmid_ipa)
/********************************************************************
* Flush TLBs and instruction caches of all CPUs inside the inner-shareable
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
-static void kvm_tlb_flush_vmid(struct kvm *kvm)
+static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
- kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
old_pte = *pte;
kvm_set_pte(pte, *new_pte);
if (pte_present(old_pte))
- kvm_tlb_flush_vmid(kvm);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
else
get_page(virt_to_page(pte));
static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
{
unmap_stage2_range(kvm, gpa, PAGE_SIZE);
- kvm_tlb_flush_vmid(kvm);
+ kvm_tlb_flush_vmid_ipa(kvm, gpa);
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)