]>
Commit | Line | Data |
---|---|---|
c862125c AG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2009 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | #include <asm/ppc_asm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | #include <asm/reg.h> | |
23 | #include <asm/page.h> | |
24 | #include <asm/asm-offsets.h> | |
8c3a4e0b AG |
25 | |
26 | #ifdef CONFIG_PPC_BOOK3S_64 | |
c862125c | 27 | #include <asm/exception-64s.h> |
8c3a4e0b | 28 | #endif |
c862125c AG |
29 | |
30 | /***************************************************************************** | |
31 | * * | |
32 | * Real Mode handlers that need to be in low physical memory * | |
33 | * * | |
34 | ****************************************************************************/ | |
35 | ||
8c3a4e0b AG |
36 | #if defined(CONFIG_PPC_BOOK3S_64) |
37 | ||
2dd60d79 | 38 | #define LOAD_SHADOW_VCPU(reg) GET_PACA(reg) |
8c3a4e0b AG |
39 | #define SHADOW_VCPU_OFF PACA_KVM_SVCPU |
40 | #define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) | |
41 | #define FUNC(name) GLUE(.,name) | |
42 | ||
43 | #elif defined(CONFIG_PPC_BOOK3S_32) | |
44 | ||
45 | #define LOAD_SHADOW_VCPU(reg) \ | |
46 | mfspr reg, SPRN_SPRG_THREAD; \ | |
47 | lwz reg, THREAD_KVM_SVCPU(reg); \ | |
48 | /* PPC32 can have a NULL pointer - let's check for that */ \ | |
49 | mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \ | |
50 | mfcr r12; \ | |
51 | cmpwi reg, 0; \ | |
52 | bne 1f; \ | |
53 | mfspr reg, SPRN_SPRG_SCRATCH0; \ | |
54 | mtcr r12; \ | |
55 | mfspr r12, SPRN_SPRG_SCRATCH1; \ | |
56 | b kvmppc_resume_\intno; \ | |
57 | 1:; \ | |
58 | mtcr r12; \ | |
59 | mfspr r12, SPRN_SPRG_SCRATCH1; \ | |
60 | tophys(reg, reg) | |
61 | ||
62 | #define SHADOW_VCPU_OFF 0 | |
63 | #define MSR_NOIRQ MSR_KERNEL | |
64 | #define FUNC(name) name | |
65 | ||
66 | #endif | |
c862125c AG |
67 | |
68 | .macro INTERRUPT_TRAMPOLINE intno | |
69 | ||
70 | .global kvmppc_trampoline_\intno | |
71 | kvmppc_trampoline_\intno: | |
72 | ||
73 | mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */ | |
74 | ||
75 | /* | |
76 | * First thing to do is to find out if we're coming | |
77 | * from a KVM guest or a Linux process. | |
78 | * | |
8c3a4e0b | 79 | * To distinguish, we check a magic byte in the PACA/current |
c862125c | 80 | */ |
8c3a4e0b AG |
81 | LOAD_SHADOW_VCPU(r13) |
82 | PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) | |
c862125c | 83 | mfcr r12 |
8c3a4e0b AG |
84 | stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) |
85 | lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) | |
b4433a7c | 86 | cmpwi r12, KVM_GUEST_MODE_NONE |
c862125c AG |
87 | bne ..kvmppc_handler_hasmagic_\intno |
88 | /* No KVM guest? Then jump back to the Linux handler! */ | |
8c3a4e0b | 89 | lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) |
c862125c | 90 | mtcr r12 |
8c3a4e0b | 91 | PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) |
c862125c AG |
92 | mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ |
93 | b kvmppc_resume_\intno /* Get back original handler */ | |
94 | ||
95 | /* Now we know we're handling a KVM guest */ | |
96 | ..kvmppc_handler_hasmagic_\intno: | |
b4433a7c AG |
97 | |
98 | /* Should we just skip the faulting instruction? */ | |
99 | cmpwi r12, KVM_GUEST_MODE_SKIP | |
100 | beq kvmppc_handler_skip_ins | |
101 | ||
c862125c AG |
102 | /* Let's store which interrupt we're handling */ |
103 | li r12, \intno | |
104 | ||
105 | /* Jump into the SLB exit code that goes to the highmem handler */ | |
106 | b kvmppc_handler_trampoline_exit | |
107 | ||
108 | .endm | |
109 | ||
110 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET | |
111 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK | |
112 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE | |
c862125c | 113 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE |
c862125c | 114 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL |
a5d4f3ad | 115 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV |
c862125c AG |
116 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT |
117 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM | |
118 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL | |
119 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DECREMENTER | |
120 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL | |
121 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE | |
122 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON | |
123 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC | |
8c3a4e0b AG |
124 | |
125 | /* Those are only available on 64 bit machines */ | |
126 | ||
127 | #ifdef CONFIG_PPC_BOOK3S_64 | |
128 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT | |
129 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT | |
c862125c | 130 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX |
8c3a4e0b | 131 | #endif |
c862125c | 132 | |
b4433a7c AG |
133 | /* |
134 | * Bring us back to the faulting code, but skip the | |
135 | * faulting instruction. | |
136 | * | |
137 | * This is a generic exit path from the interrupt | |
138 | * trampolines above. | |
139 | * | |
140 | * Input Registers: | |
141 | * | |
8c3a4e0b AG |
142 | * R12 = free |
143 | * R13 = Shadow VCPU (PACA) | |
144 | * SVCPU.SCRATCH0 = guest R12 | |
145 | * SVCPU.SCRATCH1 = guest CR | |
146 | * SPRG_SCRATCH0 = guest R13 | |
b4433a7c AG |
147 | * |
148 | */ | |
149 | kvmppc_handler_skip_ins: | |
150 | ||
151 | /* Patch the IP to the next instruction */ | |
152 | mfsrr0 r12 | |
153 | addi r12, r12, 4 | |
154 | mtsrr0 r12 | |
155 | ||
156 | /* Clean up all state */ | |
8c3a4e0b | 157 | lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) |
b4433a7c | 158 | mtcr r12 |
8c3a4e0b | 159 | PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) |
b4433a7c AG |
160 | mfspr r13, SPRN_SPRG_SCRATCH0 |
161 | ||
162 | /* And get back into the code */ | |
163 | RFI | |
164 | ||
c862125c AG |
165 | /* |
166 | * This trampoline brings us back to a real mode handler | |
167 | * | |
168 | * Input Registers: | |
169 | * | |
7e57cba0 AG |
170 | * R5 = SRR0 |
171 | * R6 = SRR1 | |
c862125c AG |
172 | * LR = real-mode IP |
173 | * | |
174 | */ | |
175 | .global kvmppc_handler_lowmem_trampoline | |
176 | kvmppc_handler_lowmem_trampoline: | |
177 | ||
7e57cba0 AG |
178 | mtsrr0 r5 |
179 | mtsrr1 r6 | |
c862125c AG |
180 | blr |
181 | kvmppc_handler_lowmem_trampoline_end: | |
182 | ||
021ec9c6 AG |
183 | /* |
184 | * Call a function in real mode | |
185 | * | |
186 | * Input Registers: | |
187 | * | |
188 | * R3 = function | |
189 | * R4 = MSR | |
8c3a4e0b | 190 | * R5 = scratch register |
021ec9c6 AG |
191 | * |
192 | */ | |
193 | _GLOBAL(kvmppc_rmcall) | |
8c3a4e0b AG |
194 | LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ) |
195 | mtmsr r5 /* Disable relocation and interrupts, so mtsrr | |
021ec9c6 | 196 | doesn't get interrupted */ |
8c3a4e0b | 197 | sync |
021ec9c6 AG |
198 | mtsrr0 r3 |
199 | mtsrr1 r4 | |
200 | RFI | |
201 | ||
8c3a4e0b AG |
202 | #if defined(CONFIG_PPC_BOOK3S_32) |
203 | #define STACK_LR INT_FRAME_SIZE+4 | |
0e677903 AG |
204 | |
205 | /* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */ | |
206 | #define MSR_EXT_START \ | |
207 | PPC_STL r20, _NIP(r1); \ | |
208 | mfmsr r20; \ | |
209 | LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ | |
210 | andc r3,r20,r3; /* Disable DR,EE */ \ | |
211 | mtmsr r3; \ | |
212 | sync | |
213 | ||
214 | #define MSR_EXT_END \ | |
215 | mtmsr r20; /* Enable DR,EE */ \ | |
216 | sync; \ | |
217 | PPC_LL r20, _NIP(r1) | |
218 | ||
8c3a4e0b AG |
219 | #elif defined(CONFIG_PPC_BOOK3S_64) |
220 | #define STACK_LR _LINK | |
0e677903 AG |
221 | #define MSR_EXT_START |
222 | #define MSR_EXT_END | |
8c3a4e0b AG |
223 | #endif |
224 | ||
d5e52813 AG |
225 | /* |
226 | * Activate current's external feature (FPU/Altivec/VSX) | |
227 | */ | |
8c3a4e0b AG |
228 | #define define_load_up(what) \ |
229 | \ | |
230 | _GLOBAL(kvmppc_load_up_ ## what); \ | |
231 | PPC_STLU r1, -INT_FRAME_SIZE(r1); \ | |
232 | mflr r3; \ | |
233 | PPC_STL r3, STACK_LR(r1); \ | |
0e677903 | 234 | MSR_EXT_START; \ |
8c3a4e0b AG |
235 | \ |
236 | bl FUNC(load_up_ ## what); \ | |
237 | \ | |
0e677903 | 238 | MSR_EXT_END; \ |
8c3a4e0b | 239 | PPC_LL r3, STACK_LR(r1); \ |
8c3a4e0b AG |
240 | mtlr r3; \ |
241 | addi r1, r1, INT_FRAME_SIZE; \ | |
d5e52813 AG |
242 | blr |
243 | ||
244 | define_load_up(fpu) | |
245 | #ifdef CONFIG_ALTIVEC | |
246 | define_load_up(altivec) | |
247 | #endif | |
248 | #ifdef CONFIG_VSX | |
249 | define_load_up(vsx) | |
250 | #endif | |
251 | ||
c862125c AG |
252 | .global kvmppc_trampoline_lowmem |
253 | kvmppc_trampoline_lowmem: | |
2b05d71f | 254 | PPC_LONG kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START |
c862125c AG |
255 | |
256 | .global kvmppc_trampoline_enter | |
257 | kvmppc_trampoline_enter: | |
2b05d71f | 258 | PPC_LONG kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START |
c862125c | 259 | |
53e5b8bb | 260 | #include "book3s_segment.S" |