]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/arm64/kvm/emulate.c
arm64: KVM: Move kvm_vcpu_get_condition out of emulate.c
[mirror_ubuntu-zesty-kernel.git] / arch / arm64 / kvm / emulate.c
CommitLineData
27b190bd
MZ
1/*
2 * (not much of an) Emulation layer for 32bit guests.
3 *
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include <linux/kvm_host.h>
25#include <asm/kvm_emulate.h>
26
27/*
28 * stolen from arch/arm/kernel/opcodes.c
29 *
30 * condition code lookup table
31 * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
32 *
33 * bit position in short is condition code: NZCV
34 */
35static const unsigned short cc_map[16] = {
36 0xF0F0, /* EQ == Z set */
37 0x0F0F, /* NE */
38 0xCCCC, /* CS == C set */
39 0x3333, /* CC */
40 0xFF00, /* MI == N set */
41 0x00FF, /* PL */
42 0xAAAA, /* VS == V set */
43 0x5555, /* VC */
44 0x0C0C, /* HI == C set && Z clear */
45 0xF3F3, /* LS == C clear || Z set */
46 0xAA55, /* GE == (N==V) */
47 0x55AA, /* LT == (N!=V) */
48 0x0A05, /* GT == (!Z && (N==V)) */
49 0xF5FA, /* LE == (Z || (N!=V)) */
50 0xFFFF, /* AL always */
51 0 /* NV */
52};
53
27b190bd
MZ
54/*
55 * Check if a trapped instruction should have been executed or not.
56 */
57bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
58{
59 unsigned long cpsr;
60 u32 cpsr_cond;
61 int cond;
62
63 /* Top two bits non-zero? Unconditional. */
64 if (kvm_vcpu_get_hsr(vcpu) >> 30)
65 return true;
66
67 /* Is condition field valid? */
68 cond = kvm_vcpu_get_condition(vcpu);
69 if (cond == 0xE)
70 return true;
71
72 cpsr = *vcpu_cpsr(vcpu);
73
74 if (cond < 0) {
75 /* This can happen in Thumb mode: examine IT state. */
76 unsigned long it;
77
78 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
79
80 /* it == 0 => unconditional. */
81 if (it == 0)
82 return true;
83
84 /* The cond for this insn works out as the top 4 bits. */
85 cond = (it >> 4);
86 }
87
88 cpsr_cond = cpsr >> 28;
89
90 if (!((cc_map[cond] >> cpsr_cond) & 1))
91 return false;
92
93 return true;
94}
95
96/**
97 * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
98 * @vcpu: The VCPU pointer
99 *
100 * When exceptions occur while instructions are executed in Thumb IF-THEN
101 * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
102 * to do this little bit of work manually. The fields map like this:
103 *
104 * IT[7:0] -> CPSR[26:25],CPSR[15:10]
105 */
106static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
107{
108 unsigned long itbits, cond;
109 unsigned long cpsr = *vcpu_cpsr(vcpu);
110 bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
111
112 BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK));
113
114 if (!(cpsr & COMPAT_PSR_IT_MASK))
115 return;
116
117 cond = (cpsr & 0xe000) >> 13;
118 itbits = (cpsr & 0x1c00) >> (10 - 2);
119 itbits |= (cpsr & (0x3 << 25)) >> 25;
120
121 /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
122 if ((itbits & 0x7) == 0)
123 itbits = cond = 0;
124 else
125 itbits = (itbits << 1) & 0x1f;
126
127 cpsr &= ~COMPAT_PSR_IT_MASK;
128 cpsr |= cond << 13;
129 cpsr |= (itbits & 0x1c) << (10 - 2);
130 cpsr |= (itbits & 0x3) << 25;
131 *vcpu_cpsr(vcpu) = cpsr;
132}
133
134/**
135 * kvm_skip_instr - skip a trapped instruction and proceed to the next
136 * @vcpu: The vcpu pointer
137 */
138void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
139{
140 bool is_thumb;
141
142 is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
143 if (is_thumb && !is_wide_instr)
144 *vcpu_pc(vcpu) += 2;
145 else
146 *vcpu_pc(vcpu) += 4;
147 kvm_adjust_itstate(vcpu);
148}