]>
Commit | Line | Data |
---|---|---|
06282fd2 MZ |
1 | /* |
2 | * Copyright (C) 2012-2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/compiler.h> | |
19 | #include <linux/irqchip/arm-gic.h> | |
20 | #include <linux/kvm_host.h> | |
21 | ||
bf8feb39 | 22 | #include <asm/kvm_emulate.h> |
13720a56 | 23 | #include <asm/kvm_hyp.h> |
06282fd2 | 24 | |
2a1044f8 MZ |
25 | static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base) |
26 | { | |
27 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | |
50926d82 | 28 | int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr; |
2a1044f8 MZ |
29 | u32 elrsr0, elrsr1; |
30 | ||
31 | elrsr0 = readl_relaxed(base + GICH_ELRSR0); | |
32 | if (unlikely(nr_lr > 32)) | |
33 | elrsr1 = readl_relaxed(base + GICH_ELRSR1); | |
34 | else | |
35 | elrsr1 = 0; | |
36 | ||
2a1044f8 | 37 | cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; |
2a1044f8 MZ |
38 | } |
39 | ||
f8cfbce1 MZ |
40 | static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) |
41 | { | |
42 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | |
f8cfbce1 | 43 | int i; |
00dafa0f | 44 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
f8cfbce1 | 45 | |
00dafa0f | 46 | for (i = 0; i < used_lrs; i++) { |
4d3afc9b | 47 | if (cpu_if->vgic_elrsr & (1UL << i)) |
f8cfbce1 | 48 | cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; |
4d3afc9b CD |
49 | else |
50 | cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); | |
f8cfbce1 | 51 | |
cc1daf0b | 52 | writel_relaxed(0, base + GICH_LR0 + (i * 4)); |
f8cfbce1 MZ |
53 | } |
54 | } | |
55 | ||
06282fd2 MZ |
56 | /* vcpu is already in the HYP VA space */ |
57 | void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) | |
58 | { | |
59 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | |
60 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | |
61 | struct vgic_dist *vgic = &kvm->arch.vgic; | |
62 | void __iomem *base = kern_hyp_va(vgic->vctrl_base); | |
00dafa0f | 63 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
06282fd2 MZ |
64 | |
65 | if (!base) | |
66 | return; | |
67 | ||
00dafa0f | 68 | if (used_lrs) { |
2a1044f8 | 69 | cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); |
06282fd2 | 70 | |
2a1044f8 | 71 | save_elrsr(vcpu, base); |
f8cfbce1 | 72 | save_lrs(vcpu, base); |
06282fd2 | 73 | |
59f00ff9 | 74 | writel_relaxed(0, base + GICH_HCR); |
59f00ff9 | 75 | } else { |
59f00ff9 | 76 | cpu_if->vgic_elrsr = ~0UL; |
59f00ff9 MZ |
77 | cpu_if->vgic_apr = 0; |
78 | } | |
06282fd2 MZ |
79 | } |
80 | ||
81 | /* vcpu is already in the HYP VA space */ | |
82 | void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) | |
83 | { | |
84 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | |
85 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | |
86 | struct vgic_dist *vgic = &kvm->arch.vgic; | |
87 | void __iomem *base = kern_hyp_va(vgic->vctrl_base); | |
2db4c104 | 88 | int i; |
00dafa0f | 89 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
06282fd2 MZ |
90 | |
91 | if (!base) | |
92 | return; | |
93 | ||
00dafa0f | 94 | if (used_lrs) { |
59f00ff9 MZ |
95 | writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); |
96 | writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); | |
00dafa0f | 97 | for (i = 0; i < used_lrs; i++) { |
cc1daf0b MZ |
98 | writel_relaxed(cpu_if->vgic_lr[i], |
99 | base + GICH_LR0 + (i * 4)); | |
59f00ff9 MZ |
100 | } |
101 | } | |
06282fd2 | 102 | } |
fb5ee369 MZ |
103 | |
104 | #ifdef CONFIG_ARM64 | |
3272f0d0 MZ |
105 | /* |
106 | * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the | |
107 | * guest. | |
108 | * | |
109 | * @vcpu: the offending vcpu | |
110 | * | |
111 | * Returns: | |
112 | * 1: GICV access successfully performed | |
113 | * 0: Not a GICV access | |
114 | * -1: Illegal GICV access | |
115 | */ | |
116 | int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) | |
fb5ee369 | 117 | { |
bf8feb39 MZ |
118 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); |
119 | struct vgic_dist *vgic = &kvm->arch.vgic; | |
120 | phys_addr_t fault_ipa; | |
121 | void __iomem *addr; | |
122 | int rd; | |
123 | ||
124 | /* Build the full address */ | |
125 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); | |
126 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); | |
127 | ||
128 | /* If not for GICV, move on */ | |
129 | if (fault_ipa < vgic->vgic_cpu_base || | |
130 | fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE)) | |
3272f0d0 | 131 | return 0; |
bf8feb39 MZ |
132 | |
133 | /* Reject anything but a 32bit access */ | |
134 | if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) | |
3272f0d0 | 135 | return -1; |
bf8feb39 MZ |
136 | |
137 | /* Not aligned? Don't bother */ | |
138 | if (fault_ipa & 3) | |
3272f0d0 | 139 | return -1; |
bf8feb39 MZ |
140 | |
141 | rd = kvm_vcpu_dabt_get_rd(vcpu); | |
c7588eb4 | 142 | addr = kern_hyp_va(hyp_symbol_addr(kvm_vgic_global_state)->vcpu_base_va); |
bf8feb39 MZ |
143 | addr += fault_ipa - vgic->vgic_cpu_base; |
144 | ||
145 | if (kvm_vcpu_dabt_iswrite(vcpu)) { | |
146 | u32 data = vcpu_data_guest_to_host(vcpu, | |
147 | vcpu_get_reg(vcpu, rd), | |
148 | sizeof(u32)); | |
149 | writel_relaxed(data, addr); | |
150 | } else { | |
151 | u32 data = readl_relaxed(addr); | |
152 | vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data, | |
153 | sizeof(u32))); | |
154 | } | |
155 | ||
3272f0d0 | 156 | return 1; |
fb5ee369 MZ |
157 | } |
158 | #endif |