]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/kvm/hyp/debug-sr.c
arm64: KVM: Save/restore the host SPE state when entering/leaving a VM
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / kvm / hyp / debug-sr.c
1 /*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/compiler.h>
19 #include <linux/kvm_host.h>
20
21 #include <asm/debug-monitors.h>
22 #include <asm/kvm_asm.h>
23 #include <asm/kvm_hyp.h>
24
25 #define read_debug(r,n) read_sysreg(r##n##_el1)
26 #define write_debug(v,r,n) write_sysreg(v, r##n##_el1)
27
28 #define save_debug(ptr,reg,nr) \
29 switch (nr) { \
30 case 15: ptr[15] = read_debug(reg, 15); \
31 case 14: ptr[14] = read_debug(reg, 14); \
32 case 13: ptr[13] = read_debug(reg, 13); \
33 case 12: ptr[12] = read_debug(reg, 12); \
34 case 11: ptr[11] = read_debug(reg, 11); \
35 case 10: ptr[10] = read_debug(reg, 10); \
36 case 9: ptr[9] = read_debug(reg, 9); \
37 case 8: ptr[8] = read_debug(reg, 8); \
38 case 7: ptr[7] = read_debug(reg, 7); \
39 case 6: ptr[6] = read_debug(reg, 6); \
40 case 5: ptr[5] = read_debug(reg, 5); \
41 case 4: ptr[4] = read_debug(reg, 4); \
42 case 3: ptr[3] = read_debug(reg, 3); \
43 case 2: ptr[2] = read_debug(reg, 2); \
44 case 1: ptr[1] = read_debug(reg, 1); \
45 default: ptr[0] = read_debug(reg, 0); \
46 }
47
48 #define restore_debug(ptr,reg,nr) \
49 switch (nr) { \
50 case 15: write_debug(ptr[15], reg, 15); \
51 case 14: write_debug(ptr[14], reg, 14); \
52 case 13: write_debug(ptr[13], reg, 13); \
53 case 12: write_debug(ptr[12], reg, 12); \
54 case 11: write_debug(ptr[11], reg, 11); \
55 case 10: write_debug(ptr[10], reg, 10); \
56 case 9: write_debug(ptr[9], reg, 9); \
57 case 8: write_debug(ptr[8], reg, 8); \
58 case 7: write_debug(ptr[7], reg, 7); \
59 case 6: write_debug(ptr[6], reg, 6); \
60 case 5: write_debug(ptr[5], reg, 5); \
61 case 4: write_debug(ptr[4], reg, 4); \
62 case 3: write_debug(ptr[3], reg, 3); \
63 case 2: write_debug(ptr[2], reg, 2); \
64 case 1: write_debug(ptr[1], reg, 1); \
65 default: write_debug(ptr[0], reg, 0); \
66 }
67
68 #define PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
69
70 #define PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
71 #define PMBLIMITR_EL1_E BIT(0)
72
73 #define PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
74 #define PMBIDR_EL1_P BIT(4)
75
76 #define psb_csync() asm volatile("hint #17")
77
78 static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1)
79 {
80 /* The vcpu can run. but it can't hide. */
81 }
82
83 static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
84 {
85 u64 reg;
86
87 /* SPE present on this CPU? */
88 if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
89 ID_AA64DFR0_PMSVER_SHIFT))
90 return;
91
92 /* Yes; is it owned by EL3? */
93 reg = read_sysreg_s(PMBIDR_EL1);
94 if (reg & PMBIDR_EL1_P)
95 return;
96
97 /* No; is the host actually using the thing? */
98 reg = read_sysreg_s(PMBLIMITR_EL1);
99 if (!(reg & PMBLIMITR_EL1_E))
100 return;
101
102 /* Yes; save the control register and disable data generation */
103 *pmscr_el1 = read_sysreg_s(PMSCR_EL1);
104 write_sysreg_s(0, PMSCR_EL1);
105 isb();
106
107 /* Now drain all buffered data to memory */
108 psb_csync();
109 dsb(nsh);
110 }
111
112 static hyp_alternate_select(__debug_save_spe,
113 __debug_save_spe_nvhe, __debug_save_spe_vhe,
114 ARM64_HAS_VIRT_HOST_EXTN);
115
116 static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
117 {
118 if (!pmscr_el1)
119 return;
120
121 /* The host page table is installed, but not yet synchronised */
122 isb();
123
124 /* Re-enable data generation */
125 write_sysreg_s(pmscr_el1, PMSCR_EL1);
126 }
127
128 void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
129 struct kvm_guest_debug_arch *dbg,
130 struct kvm_cpu_context *ctxt)
131 {
132 u64 aa64dfr0;
133 int brps, wrps;
134
135 if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
136 return;
137
138 aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
139 brps = (aa64dfr0 >> 12) & 0xf;
140 wrps = (aa64dfr0 >> 20) & 0xf;
141
142 save_debug(dbg->dbg_bcr, dbgbcr, brps);
143 save_debug(dbg->dbg_bvr, dbgbvr, brps);
144 save_debug(dbg->dbg_wcr, dbgwcr, wrps);
145 save_debug(dbg->dbg_wvr, dbgwvr, wrps);
146
147 ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
148 }
149
150 void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
151 struct kvm_guest_debug_arch *dbg,
152 struct kvm_cpu_context *ctxt)
153 {
154 u64 aa64dfr0;
155 int brps, wrps;
156
157 if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
158 return;
159
160 aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
161
162 brps = (aa64dfr0 >> 12) & 0xf;
163 wrps = (aa64dfr0 >> 20) & 0xf;
164
165 restore_debug(dbg->dbg_bcr, dbgbcr, brps);
166 restore_debug(dbg->dbg_bvr, dbgbvr, brps);
167 restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
168 restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
169
170 write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
171 }
172
173 void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu)
174 {
175 /* If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY is set, perform
176 * a full save/restore cycle. */
177 if ((vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_KDE) ||
178 (vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE))
179 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
180
181 __debug_save_state(vcpu, &vcpu->arch.host_debug_state.regs,
182 kern_hyp_va(vcpu->arch.host_cpu_context));
183 __debug_save_spe()(&vcpu->arch.host_debug_state.pmscr_el1);
184 }
185
186 void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)
187 {
188 __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
189 __debug_restore_state(vcpu, &vcpu->arch.host_debug_state.regs,
190 kern_hyp_va(vcpu->arch.host_cpu_context));
191
192 if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
193 vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
194 }
195
196 u32 __hyp_text __kvm_get_mdcr_el2(void)
197 {
198 return read_sysreg(mdcr_el2);
199 }