]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/arm64/kvm/reset.c
Merge branch 'am335x-phy-fixes' into omap-for-v5.0/fixes-v2
[mirror_ubuntu-eoan-kernel.git] / arch / arm64 / kvm / reset.c
1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/reset.c
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include <linux/errno.h>
23 #include <linux/kvm_host.h>
24 #include <linux/kvm.h>
25 #include <linux/hw_breakpoint.h>
26
27 #include <kvm/arm_arch_timer.h>
28
29 #include <asm/cpufeature.h>
30 #include <asm/cputype.h>
31 #include <asm/ptrace.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_asm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_mmu.h>
36
37 /* Maximum phys_shift supported for any VM on this host */
38 static u32 kvm_ipa_limit;
39
40 /*
41 * ARMv8 Reset Values
42 */
43 static const struct kvm_regs default_regs_reset = {
44 .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT |
45 PSR_F_BIT | PSR_D_BIT),
46 };
47
48 static const struct kvm_regs default_regs_reset32 = {
49 .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT |
50 PSR_AA32_I_BIT | PSR_AA32_F_BIT),
51 };
52
53 static bool cpu_has_32bit_el1(void)
54 {
55 u64 pfr0;
56
57 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
58 return !!(pfr0 & 0x20);
59 }
60
61 /**
62 * kvm_arch_vm_ioctl_check_extension
63 *
64 * We currently assume that the number of HW registers is uniform
65 * across all CPUs (see cpuinfo_sanity_check).
66 */
67 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
68 {
69 int r;
70
71 switch (ext) {
72 case KVM_CAP_ARM_EL1_32BIT:
73 r = cpu_has_32bit_el1();
74 break;
75 case KVM_CAP_GUEST_DEBUG_HW_BPS:
76 r = get_num_brps();
77 break;
78 case KVM_CAP_GUEST_DEBUG_HW_WPS:
79 r = get_num_wrps();
80 break;
81 case KVM_CAP_ARM_PMU_V3:
82 r = kvm_arm_support_pmu_v3();
83 break;
84 case KVM_CAP_ARM_INJECT_SERROR_ESR:
85 r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
86 break;
87 case KVM_CAP_SET_GUEST_DEBUG:
88 case KVM_CAP_VCPU_ATTRIBUTES:
89 r = 1;
90 break;
91 case KVM_CAP_ARM_VM_IPA_SIZE:
92 r = kvm_ipa_limit;
93 break;
94 default:
95 r = 0;
96 }
97
98 return r;
99 }
100
101 /**
102 * kvm_reset_vcpu - sets core registers and sys_regs to reset value
103 * @vcpu: The VCPU pointer
104 *
105 * This function finds the right table above and sets the registers on
106 * the virtual CPU struct to their architecturally defined reset
107 * values.
108 */
109 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
110 {
111 const struct kvm_regs *cpu_reset;
112
113 switch (vcpu->arch.target) {
114 default:
115 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
116 if (!cpu_has_32bit_el1())
117 return -EINVAL;
118 cpu_reset = &default_regs_reset32;
119 } else {
120 cpu_reset = &default_regs_reset;
121 }
122
123 break;
124 }
125
126 /* Reset core registers */
127 memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
128
129 /* Reset system registers */
130 kvm_reset_sys_regs(vcpu);
131
132 /* Reset PMU */
133 kvm_pmu_vcpu_reset(vcpu);
134
135 /* Default workaround setup is enabled (if supported) */
136 if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
137 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
138
139 /* Reset timer */
140 return kvm_timer_vcpu_reset(vcpu);
141 }
142
143 void kvm_set_ipa_limit(void)
144 {
145 unsigned int ipa_max, pa_max, va_max, parange;
146
147 parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7;
148 pa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
149
150 /* Clamp the IPA limit to the PA size supported by the kernel */
151 ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max;
152 /*
153 * Since our stage2 table is dependent on the stage1 page table code,
154 * we must always honor the following condition:
155 *
156 * Number of levels in Stage1 >= Number of levels in Stage2.
157 *
158 * So clamp the ipa limit further down to limit the number of levels.
159 * Since we can concatenate upto 16 tables at entry level, we could
160 * go upto 4bits above the maximum VA addressible with the current
161 * number of levels.
162 */
163 va_max = PGDIR_SHIFT + PAGE_SHIFT - 3;
164 va_max += 4;
165
166 if (va_max < ipa_max)
167 ipa_max = va_max;
168
169 /*
170 * If the final limit is lower than the real physical address
171 * limit of the CPUs, report the reason.
172 */
173 if (ipa_max < pa_max)
174 pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n",
175 (va_max < pa_max) ? "Virtual" : "Physical");
176
177 WARN(ipa_max < KVM_PHYS_SHIFT,
178 "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max);
179 kvm_ipa_limit = ipa_max;
180 kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit);
181 }
182
183 /*
184 * Configure the VTCR_EL2 for this VM. The VTCR value is common
185 * across all the physical CPUs on the system. We use system wide
186 * sanitised values to fill in different fields, except for Hardware
187 * Management of Access Flags. HA Flag is set unconditionally on
188 * all CPUs, as it is safe to run with or without the feature and
189 * the bit is RES0 on CPUs that don't support it.
190 */
191 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
192 {
193 u64 vtcr = VTCR_EL2_FLAGS;
194 u32 parange, phys_shift;
195 u8 lvls;
196
197 if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
198 return -EINVAL;
199
200 phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
201 if (phys_shift) {
202 if (phys_shift > kvm_ipa_limit ||
203 phys_shift < 32)
204 return -EINVAL;
205 } else {
206 phys_shift = KVM_PHYS_SHIFT;
207 }
208
209 parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7;
210 if (parange > ID_AA64MMFR0_PARANGE_MAX)
211 parange = ID_AA64MMFR0_PARANGE_MAX;
212 vtcr |= parange << VTCR_EL2_PS_SHIFT;
213
214 vtcr |= VTCR_EL2_T0SZ(phys_shift);
215 /*
216 * Use a minimum 2 level page table to prevent splitting
217 * host PMD huge pages at stage2.
218 */
219 lvls = stage2_pgtable_levels(phys_shift);
220 if (lvls < 2)
221 lvls = 2;
222 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
223
224 /*
225 * Enable the Hardware Access Flag management, unconditionally
226 * on all CPUs. The features is RES0 on CPUs without the support
227 * and must be ignored by the CPUs.
228 */
229 vtcr |= VTCR_EL2_HA;
230
231 /* Set the vmid bits */
232 vtcr |= (kvm_get_vmid_bits() == 16) ?
233 VTCR_EL2_VS_16BIT :
234 VTCR_EL2_VS_8BIT;
235 kvm->arch.vtcr = vtcr;
236 return 0;
237 }