2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, <yu.liu@freescale.com>
7 * This file is derived from arch/powerpc/kvm/44x_emulate.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <asm/kvm_ppc.h>
16 #include <asm/disassemble.h>
17 #include <asm/dbell.h>
18 #include <asm/reg_booke.h>
23 #define XOP_DCBTLS 166
24 #define XOP_MSGSND 206
25 #define XOP_MSGCLR 238
27 #define XOP_TLBIVAX 786
32 #define XOP_EHPRIV 270
34 #ifdef CONFIG_KVM_E500MC
35 static int dbell2prio(ulong param
)
37 int msg
= param
& PPC_DBELL_TYPE_MASK
;
41 case PPC_DBELL_TYPE(PPC_DBELL
):
42 prio
= BOOKE_IRQPRIO_DBELL
;
44 case PPC_DBELL_TYPE(PPC_DBELL_CRIT
):
45 prio
= BOOKE_IRQPRIO_DBELL_CRIT
;
54 static int kvmppc_e500_emul_msgclr(struct kvm_vcpu
*vcpu
, int rb
)
56 ulong param
= vcpu
->arch
.regs
.gpr
[rb
];
57 int prio
= dbell2prio(param
);
62 clear_bit(prio
, &vcpu
->arch
.pending_exceptions
);
66 static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu
*vcpu
, int rb
)
68 ulong param
= vcpu
->arch
.regs
.gpr
[rb
];
69 int prio
= dbell2prio(rb
);
70 int pir
= param
& PPC_DBELL_PIR_MASK
;
72 struct kvm_vcpu
*cvcpu
;
77 kvm_for_each_vcpu(i
, cvcpu
, vcpu
->kvm
) {
78 int cpir
= cvcpu
->arch
.shared
->pir
;
79 if ((param
& PPC_DBELL_MSG_BRDCAST
) || (cpir
== pir
)) {
80 set_bit(prio
, &cvcpu
->arch
.pending_exceptions
);
89 static int kvmppc_e500_emul_ehpriv(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
90 unsigned int inst
, int *advance
)
92 int emulated
= EMULATE_DONE
;
94 switch (get_oc(inst
)) {
96 run
->exit_reason
= KVM_EXIT_DEBUG
;
97 run
->debug
.arch
.address
= vcpu
->arch
.regs
.nip
;
98 run
->debug
.arch
.status
= 0;
99 kvmppc_account_exit(vcpu
, DEBUG_EXITS
);
100 emulated
= EMULATE_EXIT_USER
;
104 emulated
= EMULATE_FAIL
;
109 static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu
*vcpu
)
111 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
113 /* Always fail to lock the cache */
114 vcpu_e500
->l1csr0
|= L1CSR0_CUL
;
118 static int kvmppc_e500_emul_mftmr(struct kvm_vcpu
*vcpu
, unsigned int inst
,
121 /* Expose one thread per vcpu */
122 if (get_tmrn(inst
) == TMRN_TMCFG0
) {
123 kvmppc_set_gpr(vcpu
, rt
,
124 1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT
));
131 int kvmppc_core_emulate_op_e500(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
132 unsigned int inst
, int *advance
)
134 int emulated
= EMULATE_DONE
;
135 int ra
= get_ra(inst
);
136 int rb
= get_rb(inst
);
137 int rt
= get_rt(inst
);
140 switch (get_op(inst
)) {
142 switch (get_xop(inst
)) {
145 emulated
= kvmppc_e500_emul_dcbtls(vcpu
);
148 #ifdef CONFIG_KVM_E500MC
150 emulated
= kvmppc_e500_emul_msgsnd(vcpu
, rb
);
154 emulated
= kvmppc_e500_emul_msgclr(vcpu
, rb
);
159 emulated
= kvmppc_e500_emul_tlbre(vcpu
);
163 emulated
= kvmppc_e500_emul_tlbwe(vcpu
);
167 ea
= kvmppc_get_ea_indexed(vcpu
, ra
, rb
);
168 emulated
= kvmppc_e500_emul_tlbsx(vcpu
, ea
);
173 ea
= kvmppc_get_ea_indexed(vcpu
, ra
, rb
);
174 emulated
= kvmppc_e500_emul_tlbilx(vcpu
, type
, ea
);
179 ea
= kvmppc_get_ea_indexed(vcpu
, ra
, rb
);
180 emulated
= kvmppc_e500_emul_tlbivax(vcpu
, ea
);
184 emulated
= kvmppc_e500_emul_mftmr(vcpu
, inst
, rt
);
188 emulated
= kvmppc_e500_emul_ehpriv(run
, vcpu
, inst
,
193 emulated
= EMULATE_FAIL
;
199 emulated
= EMULATE_FAIL
;
202 if (emulated
== EMULATE_FAIL
)
203 emulated
= kvmppc_booke_emulate_op(run
, vcpu
, inst
, advance
);
208 int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu
*vcpu
, int sprn
, ulong spr_val
)
210 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
211 int emulated
= EMULATE_DONE
;
214 #ifndef CONFIG_KVM_BOOKE_HV
216 kvmppc_set_pid(vcpu
, spr_val
);
221 vcpu_e500
->pid
[1] = spr_val
;
226 vcpu_e500
->pid
[2] = spr_val
;
229 vcpu
->arch
.shared
->mas0
= spr_val
;
232 vcpu
->arch
.shared
->mas1
= spr_val
;
235 vcpu
->arch
.shared
->mas2
= spr_val
;
238 vcpu
->arch
.shared
->mas7_3
&= ~(u64
)0xffffffff;
239 vcpu
->arch
.shared
->mas7_3
|= spr_val
;
242 vcpu
->arch
.shared
->mas4
= spr_val
;
245 vcpu
->arch
.shared
->mas6
= spr_val
;
248 vcpu
->arch
.shared
->mas7_3
&= (u64
)0xffffffff;
249 vcpu
->arch
.shared
->mas7_3
|= (u64
)spr_val
<< 32;
253 vcpu_e500
->l1csr0
= spr_val
;
254 vcpu_e500
->l1csr0
&= ~(L1CSR0_DCFI
| L1CSR0_CLFC
);
257 vcpu_e500
->l1csr1
= spr_val
;
258 vcpu_e500
->l1csr1
&= ~(L1CSR1_ICFI
| L1CSR1_ICLFR
);
261 vcpu_e500
->hid0
= spr_val
;
264 vcpu_e500
->hid1
= spr_val
;
268 emulated
= kvmppc_e500_emul_mt_mmucsr0(vcpu_e500
,
274 * Guest relies on host power management configurations
275 * Treat the request as a general store
277 vcpu
->arch
.pwrmgtcr0
= spr_val
;
282 * If we are here, it means that we have already flushed the
283 * branch predictor, so just return to guest.
287 /* extra exceptions */
288 #ifdef CONFIG_SPE_POSSIBLE
290 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
] = spr_val
;
293 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
] = spr_val
;
296 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
] = spr_val
;
299 #ifdef CONFIG_ALTIVEC
301 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL
] = spr_val
;
304 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_ASSIST
] = spr_val
;
308 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
] = spr_val
;
310 #ifdef CONFIG_KVM_BOOKE_HV
312 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL
] = spr_val
;
315 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL_CRIT
] = spr_val
;
319 emulated
= kvmppc_booke_emulate_mtspr(vcpu
, sprn
, spr_val
);
325 int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu
*vcpu
, int sprn
, ulong
*spr_val
)
327 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
328 int emulated
= EMULATE_DONE
;
331 #ifndef CONFIG_KVM_BOOKE_HV
333 *spr_val
= vcpu_e500
->pid
[0];
336 *spr_val
= vcpu_e500
->pid
[1];
339 *spr_val
= vcpu_e500
->pid
[2];
342 *spr_val
= vcpu
->arch
.shared
->mas0
;
345 *spr_val
= vcpu
->arch
.shared
->mas1
;
348 *spr_val
= vcpu
->arch
.shared
->mas2
;
351 *spr_val
= (u32
)vcpu
->arch
.shared
->mas7_3
;
354 *spr_val
= vcpu
->arch
.shared
->mas4
;
357 *spr_val
= vcpu
->arch
.shared
->mas6
;
360 *spr_val
= vcpu
->arch
.shared
->mas7_3
>> 32;
364 *spr_val
= vcpu
->arch
.decar
;
367 *spr_val
= vcpu
->arch
.tlbcfg
[0];
370 *spr_val
= vcpu
->arch
.tlbcfg
[1];
373 if (!has_feature(vcpu
, VCPU_FTR_MMU_V2
))
375 *spr_val
= vcpu
->arch
.tlbps
[0];
378 if (!has_feature(vcpu
, VCPU_FTR_MMU_V2
))
380 *spr_val
= vcpu
->arch
.tlbps
[1];
383 *spr_val
= vcpu_e500
->l1csr0
;
386 *spr_val
= vcpu_e500
->l1csr1
;
389 *spr_val
= vcpu_e500
->hid0
;
392 *spr_val
= vcpu_e500
->hid1
;
395 *spr_val
= vcpu_e500
->svr
;
403 *spr_val
= vcpu
->arch
.mmucfg
;
406 if (!has_feature(vcpu
, VCPU_FTR_MMU_V2
))
409 * Legacy Linux guests access EPTCFG register even if the E.PT
410 * category is disabled in the VM. Give them a chance to live.
412 *spr_val
= vcpu
->arch
.eptcfg
;
416 *spr_val
= vcpu
->arch
.pwrmgtcr0
;
419 /* extra exceptions */
420 #ifdef CONFIG_SPE_POSSIBLE
422 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
];
425 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
];
428 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
];
431 #ifdef CONFIG_ALTIVEC
433 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL
];
436 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_ASSIST
];
440 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
];
442 #ifdef CONFIG_KVM_BOOKE_HV
444 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL
];
447 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL_CRIT
];
451 emulated
= kvmppc_booke_emulate_mfspr(vcpu
, sprn
, spr_val
);