]>
Commit | Line | Data |
---|---|---|
f4672752 MZ |
1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * Derived from arch/arm/kvm/reset.c | |
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License, version 2, as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #include <linux/errno.h> | |
23 | #include <linux/kvm_host.h> | |
24 | #include <linux/kvm.h> | |
834bf887 | 25 | #include <linux/hw_breakpoint.h> |
f4672752 | 26 | |
003300de MZ |
27 | #include <kvm/arm_arch_timer.h> |
28 | ||
7665f3a8 | 29 | #include <asm/cpufeature.h> |
f4672752 MZ |
30 | #include <asm/cputype.h> |
31 | #include <asm/ptrace.h> | |
32 | #include <asm/kvm_arm.h> | |
67f69197 | 33 | #include <asm/kvm_asm.h> |
f4672752 | 34 | #include <asm/kvm_coproc.h> |
358b28f0 | 35 | #include <asm/kvm_emulate.h> |
67f69197 | 36 | #include <asm/kvm_mmu.h> |
f4672752 | 37 | |
0f62f0e9 SP |
38 | /* Maximum phys_shift supported for any VM on this host */ |
39 | static u32 kvm_ipa_limit; | |
40 | ||
f4672752 MZ |
41 | /* |
42 | * ARMv8 Reset Values | |
43 | */ | |
44 | static const struct kvm_regs default_regs_reset = { | |
45 | .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | | |
46 | PSR_F_BIT | PSR_D_BIT), | |
47 | }; | |
48 | ||
0d854a60 | 49 | static const struct kvm_regs default_regs_reset32 = { |
256c0960 MR |
50 | .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | |
51 | PSR_AA32_I_BIT | PSR_AA32_F_BIT), | |
0d854a60 MZ |
52 | }; |
53 | ||
0d854a60 MZ |
54 | static bool cpu_has_32bit_el1(void) |
55 | { | |
56 | u64 pfr0; | |
57 | ||
46823dd1 | 58 | pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
0d854a60 MZ |
59 | return !!(pfr0 & 0x20); |
60 | } | |
61 | ||
834bf887 | 62 | /** |
375bdd3b | 63 | * kvm_arch_vm_ioctl_check_extension |
834bf887 AB |
64 | * |
65 | * We currently assume that the number of HW registers is uniform | |
66 | * across all CPUs (see cpuinfo_sanity_check). | |
67 | */ | |
375bdd3b | 68 | int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
f4672752 MZ |
69 | { |
70 | int r; | |
71 | ||
72 | switch (ext) { | |
0d854a60 MZ |
73 | case KVM_CAP_ARM_EL1_32BIT: |
74 | r = cpu_has_32bit_el1(); | |
75 | break; | |
834bf887 AB |
76 | case KVM_CAP_GUEST_DEBUG_HW_BPS: |
77 | r = get_num_brps(); | |
78 | break; | |
79 | case KVM_CAP_GUEST_DEBUG_HW_WPS: | |
80 | r = get_num_wrps(); | |
81 | break; | |
808e7381 SZ |
82 | case KVM_CAP_ARM_PMU_V3: |
83 | r = kvm_arm_support_pmu_v3(); | |
84 | break; | |
be26b3a7 DG |
85 | case KVM_CAP_ARM_INJECT_SERROR_ESR: |
86 | r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); | |
87 | break; | |
834bf887 | 88 | case KVM_CAP_SET_GUEST_DEBUG: |
f577f6c2 | 89 | case KVM_CAP_VCPU_ATTRIBUTES: |
834bf887 AB |
90 | r = 1; |
91 | break; | |
233a7cb2 SP |
92 | case KVM_CAP_ARM_VM_IPA_SIZE: |
93 | r = kvm_ipa_limit; | |
94 | break; | |
f4672752 MZ |
95 | default: |
96 | r = 0; | |
97 | } | |
98 | ||
99 | return r; | |
100 | } | |
101 | ||
102 | /** | |
103 | * kvm_reset_vcpu - sets core registers and sys_regs to reset value | |
104 | * @vcpu: The VCPU pointer | |
105 | * | |
106 | * This function finds the right table above and sets the registers on | |
edce2292 | 107 | * the virtual CPU struct to their architecturally defined reset |
f4672752 | 108 | * values. |
e761a927 CD |
109 | * |
110 | * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT | |
111 | * ioctl or as part of handling a request issued by another VCPU in the PSCI | |
112 | * handling code. In the first case, the VCPU will not be loaded, and in the | |
113 | * second case the VCPU will be loaded. Because this function operates purely | |
114 | * on the memory-backed valus of system registers, we want to do a full put if | |
115 | * we were loaded (handling a request) and load the values back at the end of | |
116 | * the function. Otherwise we leave the state alone. In both cases, we | |
117 | * disable preemption around the vcpu reset as we would otherwise race with | |
118 | * preempt notifiers which also call put/load. | |
f4672752 MZ |
119 | */ |
120 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |
121 | { | |
122 | const struct kvm_regs *cpu_reset; | |
e761a927 CD |
123 | int ret = -EINVAL; |
124 | bool loaded; | |
125 | ||
126 | preempt_disable(); | |
127 | loaded = (vcpu->cpu != -1); | |
128 | if (loaded) | |
129 | kvm_arch_vcpu_put(vcpu); | |
f4672752 MZ |
130 | |
131 | switch (vcpu->arch.target) { | |
132 | default: | |
0d854a60 MZ |
133 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { |
134 | if (!cpu_has_32bit_el1()) | |
e761a927 | 135 | goto out; |
0d854a60 | 136 | cpu_reset = &default_regs_reset32; |
0d854a60 MZ |
137 | } else { |
138 | cpu_reset = &default_regs_reset; | |
139 | } | |
140 | ||
f4672752 MZ |
141 | break; |
142 | } | |
143 | ||
144 | /* Reset core registers */ | |
145 | memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset)); | |
146 | ||
147 | /* Reset system registers */ | |
148 | kvm_reset_sys_regs(vcpu); | |
149 | ||
358b28f0 MZ |
150 | /* |
151 | * Additional reset state handling that PSCI may have imposed on us. | |
152 | * Must be done after all the sys_reg reset. | |
153 | */ | |
154 | if (vcpu->arch.reset_state.reset) { | |
155 | unsigned long target_pc = vcpu->arch.reset_state.pc; | |
156 | ||
157 | /* Gracefully handle Thumb2 entry point */ | |
158 | if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { | |
159 | target_pc &= ~1UL; | |
160 | vcpu_set_thumb(vcpu); | |
161 | } | |
162 | ||
163 | /* Propagate caller endianness */ | |
164 | if (vcpu->arch.reset_state.be) | |
165 | kvm_vcpu_set_be(vcpu); | |
166 | ||
167 | *vcpu_pc(vcpu) = target_pc; | |
168 | vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); | |
169 | ||
170 | vcpu->arch.reset_state.reset = false; | |
171 | } | |
172 | ||
2aa36e98 SZ |
173 | /* Reset PMU */ |
174 | kvm_pmu_vcpu_reset(vcpu); | |
175 | ||
5d81f7dc MZ |
176 | /* Default workaround setup is enabled (if supported) */ |
177 | if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) | |
178 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; | |
179 | ||
003300de | 180 | /* Reset timer */ |
e761a927 CD |
181 | ret = kvm_timer_vcpu_reset(vcpu); |
182 | out: | |
183 | if (loaded) | |
184 | kvm_arch_vcpu_load(vcpu, smp_processor_id()); | |
185 | preempt_enable(); | |
186 | return ret; | |
f4672752 | 187 | } |
5b6c6742 | 188 | |
0f62f0e9 SP |
189 | void kvm_set_ipa_limit(void) |
190 | { | |
191 | unsigned int ipa_max, pa_max, va_max, parange; | |
192 | ||
193 | parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7; | |
194 | pa_max = id_aa64mmfr0_parange_to_phys_shift(parange); | |
195 | ||
196 | /* Clamp the IPA limit to the PA size supported by the kernel */ | |
197 | ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max; | |
198 | /* | |
199 | * Since our stage2 table is dependent on the stage1 page table code, | |
200 | * we must always honor the following condition: | |
201 | * | |
202 | * Number of levels in Stage1 >= Number of levels in Stage2. | |
203 | * | |
204 | * So clamp the ipa limit further down to limit the number of levels. | |
205 | * Since we can concatenate upto 16 tables at entry level, we could | |
206 | * go upto 4bits above the maximum VA addressible with the current | |
207 | * number of levels. | |
208 | */ | |
209 | va_max = PGDIR_SHIFT + PAGE_SHIFT - 3; | |
210 | va_max += 4; | |
211 | ||
212 | if (va_max < ipa_max) | |
213 | ipa_max = va_max; | |
214 | ||
215 | /* | |
216 | * If the final limit is lower than the real physical address | |
217 | * limit of the CPUs, report the reason. | |
218 | */ | |
219 | if (ipa_max < pa_max) | |
220 | pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n", | |
221 | (va_max < pa_max) ? "Virtual" : "Physical"); | |
222 | ||
223 | WARN(ipa_max < KVM_PHYS_SHIFT, | |
224 | "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max); | |
225 | kvm_ipa_limit = ipa_max; | |
226 | kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit); | |
227 | } | |
228 | ||
7665f3a8 SP |
229 | /* |
230 | * Configure the VTCR_EL2 for this VM. The VTCR value is common | |
231 | * across all the physical CPUs on the system. We use system wide | |
232 | * sanitised values to fill in different fields, except for Hardware | |
233 | * Management of Access Flags. HA Flag is set unconditionally on | |
234 | * all CPUs, as it is safe to run with or without the feature and | |
235 | * the bit is RES0 on CPUs that don't support it. | |
236 | */ | |
bca607eb | 237 | int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) |
5b6c6742 | 238 | { |
7665f3a8 SP |
239 | u64 vtcr = VTCR_EL2_FLAGS; |
240 | u32 parange, phys_shift; | |
58b3efc8 | 241 | u8 lvls; |
7665f3a8 | 242 | |
233a7cb2 | 243 | if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) |
5b6c6742 | 244 | return -EINVAL; |
7665f3a8 | 245 | |
233a7cb2 SP |
246 | phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); |
247 | if (phys_shift) { | |
248 | if (phys_shift > kvm_ipa_limit || | |
249 | phys_shift < 32) | |
250 | return -EINVAL; | |
251 | } else { | |
252 | phys_shift = KVM_PHYS_SHIFT; | |
253 | } | |
254 | ||
7665f3a8 SP |
255 | parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7; |
256 | if (parange > ID_AA64MMFR0_PARANGE_MAX) | |
257 | parange = ID_AA64MMFR0_PARANGE_MAX; | |
258 | vtcr |= parange << VTCR_EL2_PS_SHIFT; | |
259 | ||
7665f3a8 | 260 | vtcr |= VTCR_EL2_T0SZ(phys_shift); |
58b3efc8 SP |
261 | /* |
262 | * Use a minimum 2 level page table to prevent splitting | |
263 | * host PMD huge pages at stage2. | |
264 | */ | |
265 | lvls = stage2_pgtable_levels(phys_shift); | |
266 | if (lvls < 2) | |
267 | lvls = 2; | |
268 | vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); | |
7665f3a8 SP |
269 | |
270 | /* | |
271 | * Enable the Hardware Access Flag management, unconditionally | |
272 | * on all CPUs. The features is RES0 on CPUs without the support | |
273 | * and must be ignored by the CPUs. | |
274 | */ | |
275 | vtcr |= VTCR_EL2_HA; | |
276 | ||
277 | /* Set the vmid bits */ | |
278 | vtcr |= (kvm_get_vmid_bits() == 16) ? | |
279 | VTCR_EL2_VS_16BIT : | |
280 | VTCR_EL2_VS_8BIT; | |
281 | kvm->arch.vtcr = vtcr; | |
5b6c6742 SP |
282 | return 0; |
283 | } |