]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/powerpc/kvm/e500_emulate.c
Merge branch 'drm-fixes-5.0' of git://people.freedesktop.org/~agd5f/linux into drm...
[mirror_ubuntu-hirsute-kernel.git] / arch / powerpc / kvm / e500_emulate.c
1 /*
2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, <yu.liu@freescale.com>
5 *
6 * Description:
7 * This file is derived from arch/powerpc/kvm/44x_emulate.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15 #include <asm/kvm_ppc.h>
16 #include <asm/disassemble.h>
17 #include <asm/dbell.h>
18 #include <asm/reg_booke.h>
19
20 #include "booke.h"
21 #include "e500.h"
22
23 #define XOP_DCBTLS 166
24 #define XOP_MSGSND 206
25 #define XOP_MSGCLR 238
26 #define XOP_MFTMR 366
27 #define XOP_TLBIVAX 786
28 #define XOP_TLBSX 914
29 #define XOP_TLBRE 946
30 #define XOP_TLBWE 978
31 #define XOP_TLBILX 18
32 #define XOP_EHPRIV 270
33
34 #ifdef CONFIG_KVM_E500MC
35 static int dbell2prio(ulong param)
36 {
37 int msg = param & PPC_DBELL_TYPE_MASK;
38 int prio = -1;
39
40 switch (msg) {
41 case PPC_DBELL_TYPE(PPC_DBELL):
42 prio = BOOKE_IRQPRIO_DBELL;
43 break;
44 case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
45 prio = BOOKE_IRQPRIO_DBELL_CRIT;
46 break;
47 default:
48 break;
49 }
50
51 return prio;
52 }
53
54 static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
55 {
56 ulong param = vcpu->arch.regs.gpr[rb];
57 int prio = dbell2prio(param);
58
59 if (prio < 0)
60 return EMULATE_FAIL;
61
62 clear_bit(prio, &vcpu->arch.pending_exceptions);
63 return EMULATE_DONE;
64 }
65
66 static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
67 {
68 ulong param = vcpu->arch.regs.gpr[rb];
69 int prio = dbell2prio(rb);
70 int pir = param & PPC_DBELL_PIR_MASK;
71 int i;
72 struct kvm_vcpu *cvcpu;
73
74 if (prio < 0)
75 return EMULATE_FAIL;
76
77 kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
78 int cpir = cvcpu->arch.shared->pir;
79 if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
80 set_bit(prio, &cvcpu->arch.pending_exceptions);
81 kvm_vcpu_kick(cvcpu);
82 }
83 }
84
85 return EMULATE_DONE;
86 }
87 #endif
88
89 static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
90 unsigned int inst, int *advance)
91 {
92 int emulated = EMULATE_DONE;
93
94 switch (get_oc(inst)) {
95 case EHPRIV_OC_DEBUG:
96 run->exit_reason = KVM_EXIT_DEBUG;
97 run->debug.arch.address = vcpu->arch.regs.nip;
98 run->debug.arch.status = 0;
99 kvmppc_account_exit(vcpu, DEBUG_EXITS);
100 emulated = EMULATE_EXIT_USER;
101 *advance = 0;
102 break;
103 default:
104 emulated = EMULATE_FAIL;
105 }
106 return emulated;
107 }
108
109 static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
110 {
111 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
112
113 /* Always fail to lock the cache */
114 vcpu_e500->l1csr0 |= L1CSR0_CUL;
115 return EMULATE_DONE;
116 }
117
118 static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
119 int rt)
120 {
121 /* Expose one thread per vcpu */
122 if (get_tmrn(inst) == TMRN_TMCFG0) {
123 kvmppc_set_gpr(vcpu, rt,
124 1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
125 return EMULATE_DONE;
126 }
127
128 return EMULATE_FAIL;
129 }
130
131 int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
132 unsigned int inst, int *advance)
133 {
134 int emulated = EMULATE_DONE;
135 int ra = get_ra(inst);
136 int rb = get_rb(inst);
137 int rt = get_rt(inst);
138 gva_t ea;
139
140 switch (get_op(inst)) {
141 case 31:
142 switch (get_xop(inst)) {
143
144 case XOP_DCBTLS:
145 emulated = kvmppc_e500_emul_dcbtls(vcpu);
146 break;
147
148 #ifdef CONFIG_KVM_E500MC
149 case XOP_MSGSND:
150 emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
151 break;
152
153 case XOP_MSGCLR:
154 emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
155 break;
156 #endif
157
158 case XOP_TLBRE:
159 emulated = kvmppc_e500_emul_tlbre(vcpu);
160 break;
161
162 case XOP_TLBWE:
163 emulated = kvmppc_e500_emul_tlbwe(vcpu);
164 break;
165
166 case XOP_TLBSX:
167 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
168 emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
169 break;
170
171 case XOP_TLBILX: {
172 int type = rt & 0x3;
173 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
174 emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
175 break;
176 }
177
178 case XOP_TLBIVAX:
179 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
180 emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
181 break;
182
183 case XOP_MFTMR:
184 emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
185 break;
186
187 case XOP_EHPRIV:
188 emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
189 advance);
190 break;
191
192 default:
193 emulated = EMULATE_FAIL;
194 }
195
196 break;
197
198 default:
199 emulated = EMULATE_FAIL;
200 }
201
202 if (emulated == EMULATE_FAIL)
203 emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
204
205 return emulated;
206 }
207
208 int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
209 {
210 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
211 int emulated = EMULATE_DONE;
212
213 switch (sprn) {
214 #ifndef CONFIG_KVM_BOOKE_HV
215 case SPRN_PID:
216 kvmppc_set_pid(vcpu, spr_val);
217 break;
218 case SPRN_PID1:
219 if (spr_val != 0)
220 return EMULATE_FAIL;
221 vcpu_e500->pid[1] = spr_val;
222 break;
223 case SPRN_PID2:
224 if (spr_val != 0)
225 return EMULATE_FAIL;
226 vcpu_e500->pid[2] = spr_val;
227 break;
228 case SPRN_MAS0:
229 vcpu->arch.shared->mas0 = spr_val;
230 break;
231 case SPRN_MAS1:
232 vcpu->arch.shared->mas1 = spr_val;
233 break;
234 case SPRN_MAS2:
235 vcpu->arch.shared->mas2 = spr_val;
236 break;
237 case SPRN_MAS3:
238 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
239 vcpu->arch.shared->mas7_3 |= spr_val;
240 break;
241 case SPRN_MAS4:
242 vcpu->arch.shared->mas4 = spr_val;
243 break;
244 case SPRN_MAS6:
245 vcpu->arch.shared->mas6 = spr_val;
246 break;
247 case SPRN_MAS7:
248 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
249 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
250 break;
251 #endif
252 case SPRN_L1CSR0:
253 vcpu_e500->l1csr0 = spr_val;
254 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
255 break;
256 case SPRN_L1CSR1:
257 vcpu_e500->l1csr1 = spr_val;
258 vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR);
259 break;
260 case SPRN_HID0:
261 vcpu_e500->hid0 = spr_val;
262 break;
263 case SPRN_HID1:
264 vcpu_e500->hid1 = spr_val;
265 break;
266
267 case SPRN_MMUCSR0:
268 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
269 spr_val);
270 break;
271
272 case SPRN_PWRMGTCR0:
273 /*
274 * Guest relies on host power management configurations
275 * Treat the request as a general store
276 */
277 vcpu->arch.pwrmgtcr0 = spr_val;
278 break;
279
280 case SPRN_BUCSR:
281 /*
282 * If we are here, it means that we have already flushed the
283 * branch predictor, so just return to guest.
284 */
285 break;
286
287 /* extra exceptions */
288 #ifdef CONFIG_SPE_POSSIBLE
289 case SPRN_IVOR32:
290 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
291 break;
292 case SPRN_IVOR33:
293 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
294 break;
295 case SPRN_IVOR34:
296 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
297 break;
298 #endif
299 #ifdef CONFIG_ALTIVEC
300 case SPRN_IVOR32:
301 vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
302 break;
303 case SPRN_IVOR33:
304 vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
305 break;
306 #endif
307 case SPRN_IVOR35:
308 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
309 break;
310 #ifdef CONFIG_KVM_BOOKE_HV
311 case SPRN_IVOR36:
312 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
313 break;
314 case SPRN_IVOR37:
315 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
316 break;
317 #endif
318 default:
319 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
320 }
321
322 return emulated;
323 }
324
325 int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
326 {
327 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
328 int emulated = EMULATE_DONE;
329
330 switch (sprn) {
331 #ifndef CONFIG_KVM_BOOKE_HV
332 case SPRN_PID:
333 *spr_val = vcpu_e500->pid[0];
334 break;
335 case SPRN_PID1:
336 *spr_val = vcpu_e500->pid[1];
337 break;
338 case SPRN_PID2:
339 *spr_val = vcpu_e500->pid[2];
340 break;
341 case SPRN_MAS0:
342 *spr_val = vcpu->arch.shared->mas0;
343 break;
344 case SPRN_MAS1:
345 *spr_val = vcpu->arch.shared->mas1;
346 break;
347 case SPRN_MAS2:
348 *spr_val = vcpu->arch.shared->mas2;
349 break;
350 case SPRN_MAS3:
351 *spr_val = (u32)vcpu->arch.shared->mas7_3;
352 break;
353 case SPRN_MAS4:
354 *spr_val = vcpu->arch.shared->mas4;
355 break;
356 case SPRN_MAS6:
357 *spr_val = vcpu->arch.shared->mas6;
358 break;
359 case SPRN_MAS7:
360 *spr_val = vcpu->arch.shared->mas7_3 >> 32;
361 break;
362 #endif
363 case SPRN_DECAR:
364 *spr_val = vcpu->arch.decar;
365 break;
366 case SPRN_TLB0CFG:
367 *spr_val = vcpu->arch.tlbcfg[0];
368 break;
369 case SPRN_TLB1CFG:
370 *spr_val = vcpu->arch.tlbcfg[1];
371 break;
372 case SPRN_TLB0PS:
373 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
374 return EMULATE_FAIL;
375 *spr_val = vcpu->arch.tlbps[0];
376 break;
377 case SPRN_TLB1PS:
378 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
379 return EMULATE_FAIL;
380 *spr_val = vcpu->arch.tlbps[1];
381 break;
382 case SPRN_L1CSR0:
383 *spr_val = vcpu_e500->l1csr0;
384 break;
385 case SPRN_L1CSR1:
386 *spr_val = vcpu_e500->l1csr1;
387 break;
388 case SPRN_HID0:
389 *spr_val = vcpu_e500->hid0;
390 break;
391 case SPRN_HID1:
392 *spr_val = vcpu_e500->hid1;
393 break;
394 case SPRN_SVR:
395 *spr_val = vcpu_e500->svr;
396 break;
397
398 case SPRN_MMUCSR0:
399 *spr_val = 0;
400 break;
401
402 case SPRN_MMUCFG:
403 *spr_val = vcpu->arch.mmucfg;
404 break;
405 case SPRN_EPTCFG:
406 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
407 return EMULATE_FAIL;
408 /*
409 * Legacy Linux guests access EPTCFG register even if the E.PT
410 * category is disabled in the VM. Give them a chance to live.
411 */
412 *spr_val = vcpu->arch.eptcfg;
413 break;
414
415 case SPRN_PWRMGTCR0:
416 *spr_val = vcpu->arch.pwrmgtcr0;
417 break;
418
419 /* extra exceptions */
420 #ifdef CONFIG_SPE_POSSIBLE
421 case SPRN_IVOR32:
422 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
423 break;
424 case SPRN_IVOR33:
425 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
426 break;
427 case SPRN_IVOR34:
428 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
429 break;
430 #endif
431 #ifdef CONFIG_ALTIVEC
432 case SPRN_IVOR32:
433 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
434 break;
435 case SPRN_IVOR33:
436 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
437 break;
438 #endif
439 case SPRN_IVOR35:
440 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
441 break;
442 #ifdef CONFIG_KVM_BOOKE_HV
443 case SPRN_IVOR36:
444 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
445 break;
446 case SPRN_IVOR37:
447 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
448 break;
449 #endif
450 default:
451 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
452 }
453
454 return emulated;
455 }
456