]>
Commit | Line | Data |
---|---|---|
d69614a2 AG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
16 | * Copyright 2011 Freescale Semiconductor, Inc. | |
17 | * | |
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
19 | */ | |
20 | ||
21 | #include <linux/jiffies.h> | |
22 | #include <linux/hrtimer.h> | |
23 | #include <linux/types.h> | |
24 | #include <linux/string.h> | |
25 | #include <linux/kvm_host.h> | |
26 | #include <linux/clockchips.h> | |
27 | ||
28 | #include <asm/reg.h> | |
29 | #include <asm/time.h> | |
30 | #include <asm/byteorder.h> | |
31 | #include <asm/kvm_ppc.h> | |
32 | #include <asm/disassemble.h> | |
33 | #include <asm/ppc-opcode.h> | |
34 | #include "timing.h" | |
35 | #include "trace.h" | |
36 | ||
6f63e81b BL |
37 | #ifdef CONFIG_PPC_FPU |
38 | static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) | |
39 | { | |
40 | if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { | |
41 | kvmppc_core_queue_fpunavail(vcpu); | |
42 | return true; | |
43 | } | |
44 | ||
45 | return false; | |
46 | } | |
47 | #endif /* CONFIG_PPC_FPU */ | |
48 | ||
49 | #ifdef CONFIG_VSX | |
50 | static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) | |
51 | { | |
52 | if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { | |
53 | kvmppc_core_queue_vsx_unavail(vcpu); | |
54 | return true; | |
55 | } | |
56 | ||
57 | return false; | |
58 | } | |
59 | #endif /* CONFIG_VSX */ | |
60 | ||
b540072a JRZ |
61 | #ifdef CONFIG_ALTIVEC |
62 | static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) | |
63 | { | |
64 | if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { | |
65 | kvmppc_core_queue_vec_unavail(vcpu); | |
66 | return true; | |
67 | } | |
68 | ||
69 | return false; | |
70 | } | |
71 | #endif /* CONFIG_ALTIVEC */ | |
72 | ||
ceba57df PM |
73 | /* |
74 | * XXX to do: | |
75 | * lfiwax, lfiwzx | |
76 | * vector loads and stores | |
d69614a2 | 77 | * |
ceba57df PM |
78 | * Instructions that trap when used on cache-inhibited mappings |
79 | * are not emulated here: multiple and string instructions, | |
80 | * lq/stq, and the load-reserve/store-conditional instructions. | |
d69614a2 AG |
81 | */ |
82 | int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) | |
83 | { | |
84 | struct kvm_run *run = vcpu->run; | |
85 | u32 inst; | |
86 | int ra, rs, rt; | |
87 | enum emulation_result emulated; | |
88 | int advance = 1; | |
89 | ||
90 | /* this default type might be overwritten by subcategories */ | |
91 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | |
92 | ||
8d0eff63 | 93 | emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); |
d69614a2 AG |
94 | if (emulated != EMULATE_DONE) |
95 | return emulated; | |
96 | ||
97 | ra = get_ra(inst); | |
98 | rs = get_rs(inst); | |
99 | rt = get_rt(inst); | |
100 | ||
6f63e81b BL |
101 | /* |
102 | * if mmio_vsx_tx_sx_enabled == 0, copy data between | |
103 | * VSR[0..31] and memory | |
104 | * if mmio_vsx_tx_sx_enabled == 1, copy data between | |
105 | * VSR[32..63] and memory | |
106 | */ | |
107 | vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst); | |
108 | vcpu->arch.mmio_vsx_copy_nums = 0; | |
109 | vcpu->arch.mmio_vsx_offset = 0; | |
110 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; | |
111 | vcpu->arch.mmio_sp64_extend = 0; | |
112 | vcpu->arch.mmio_sign_extend = 0; | |
b540072a | 113 | vcpu->arch.mmio_vmx_copy_nums = 0; |
6f63e81b | 114 | |
d69614a2 AG |
115 | switch (get_op(inst)) { |
116 | case 31: | |
117 | switch (get_xop(inst)) { | |
118 | case OP_31_XOP_LWZX: | |
119 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | |
120 | break; | |
121 | ||
ceba57df PM |
122 | case OP_31_XOP_LWZUX: |
123 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | |
124 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
125 | break; | |
126 | ||
d69614a2 AG |
127 | case OP_31_XOP_LBZX: |
128 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | |
129 | break; | |
130 | ||
131 | case OP_31_XOP_LBZUX: | |
132 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | |
133 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
134 | break; | |
135 | ||
91242fd1 AK |
136 | case OP_31_XOP_STDX: |
137 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df PM |
138 | kvmppc_get_gpr(vcpu, rs), 8, 1); |
139 | break; | |
140 | ||
141 | case OP_31_XOP_STDUX: | |
142 | emulated = kvmppc_handle_store(run, vcpu, | |
143 | kvmppc_get_gpr(vcpu, rs), 8, 1); | |
144 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
91242fd1 AK |
145 | break; |
146 | ||
d69614a2 AG |
147 | case OP_31_XOP_STWX: |
148 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df PM |
149 | kvmppc_get_gpr(vcpu, rs), 4, 1); |
150 | break; | |
151 | ||
152 | case OP_31_XOP_STWUX: | |
153 | emulated = kvmppc_handle_store(run, vcpu, | |
154 | kvmppc_get_gpr(vcpu, rs), 4, 1); | |
155 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
d69614a2 AG |
156 | break; |
157 | ||
158 | case OP_31_XOP_STBX: | |
159 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 160 | kvmppc_get_gpr(vcpu, rs), 1, 1); |
d69614a2 AG |
161 | break; |
162 | ||
163 | case OP_31_XOP_STBUX: | |
164 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 165 | kvmppc_get_gpr(vcpu, rs), 1, 1); |
d69614a2 AG |
166 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
167 | break; | |
168 | ||
169 | case OP_31_XOP_LHAX: | |
170 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | |
171 | break; | |
172 | ||
ceba57df PM |
173 | case OP_31_XOP_LHAUX: |
174 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | |
175 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
176 | break; | |
177 | ||
d69614a2 AG |
178 | case OP_31_XOP_LHZX: |
179 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | |
180 | break; | |
181 | ||
182 | case OP_31_XOP_LHZUX: | |
183 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | |
184 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
185 | break; | |
186 | ||
187 | case OP_31_XOP_STHX: | |
188 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 189 | kvmppc_get_gpr(vcpu, rs), 2, 1); |
d69614a2 AG |
190 | break; |
191 | ||
192 | case OP_31_XOP_STHUX: | |
193 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 194 | kvmppc_get_gpr(vcpu, rs), 2, 1); |
d69614a2 AG |
195 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
196 | break; | |
197 | ||
198 | case OP_31_XOP_DCBST: | |
199 | case OP_31_XOP_DCBF: | |
200 | case OP_31_XOP_DCBI: | |
201 | /* Do nothing. The guest is performing dcbi because | |
202 | * hardware DMA is not snooped by the dcache, but | |
203 | * emulated DMA either goes through the dcache as | |
204 | * normal writes, or the host kernel has handled dcache | |
205 | * coherence. */ | |
206 | break; | |
207 | ||
208 | case OP_31_XOP_LWBRX: | |
209 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | |
210 | break; | |
211 | ||
212 | case OP_31_XOP_STWBRX: | |
213 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 214 | kvmppc_get_gpr(vcpu, rs), 4, 0); |
d69614a2 AG |
215 | break; |
216 | ||
217 | case OP_31_XOP_LHBRX: | |
218 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | |
219 | break; | |
220 | ||
221 | case OP_31_XOP_STHBRX: | |
222 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df PM |
223 | kvmppc_get_gpr(vcpu, rs), 2, 0); |
224 | break; | |
225 | ||
226 | case OP_31_XOP_LDBRX: | |
227 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0); | |
228 | break; | |
229 | ||
230 | case OP_31_XOP_STDBRX: | |
231 | emulated = kvmppc_handle_store(run, vcpu, | |
232 | kvmppc_get_gpr(vcpu, rs), 8, 0); | |
d69614a2 AG |
233 | break; |
234 | ||
6f63e81b BL |
235 | case OP_31_XOP_LDX: |
236 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | |
237 | break; | |
238 | ||
239 | case OP_31_XOP_LDUX: | |
240 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | |
241 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
242 | break; | |
243 | ||
244 | case OP_31_XOP_LWAX: | |
245 | emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); | |
246 | break; | |
247 | ||
ceba57df PM |
248 | case OP_31_XOP_LWAUX: |
249 | emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); | |
250 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
251 | break; | |
252 | ||
6f63e81b BL |
253 | #ifdef CONFIG_PPC_FPU |
254 | case OP_31_XOP_LFSX: | |
255 | if (kvmppc_check_fp_disabled(vcpu)) | |
256 | return EMULATE_DONE; | |
257 | vcpu->arch.mmio_sp64_extend = 1; | |
258 | emulated = kvmppc_handle_load(run, vcpu, | |
259 | KVM_MMIO_REG_FPR|rt, 4, 1); | |
260 | break; | |
261 | ||
262 | case OP_31_XOP_LFSUX: | |
263 | if (kvmppc_check_fp_disabled(vcpu)) | |
264 | return EMULATE_DONE; | |
265 | vcpu->arch.mmio_sp64_extend = 1; | |
266 | emulated = kvmppc_handle_load(run, vcpu, | |
267 | KVM_MMIO_REG_FPR|rt, 4, 1); | |
268 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
269 | break; | |
270 | ||
271 | case OP_31_XOP_LFDX: | |
272 | if (kvmppc_check_fp_disabled(vcpu)) | |
273 | return EMULATE_DONE; | |
274 | emulated = kvmppc_handle_load(run, vcpu, | |
275 | KVM_MMIO_REG_FPR|rt, 8, 1); | |
276 | break; | |
277 | ||
278 | case OP_31_XOP_LFDUX: | |
279 | if (kvmppc_check_fp_disabled(vcpu)) | |
280 | return EMULATE_DONE; | |
281 | emulated = kvmppc_handle_load(run, vcpu, | |
282 | KVM_MMIO_REG_FPR|rt, 8, 1); | |
283 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
284 | break; | |
285 | ||
9b5ab005 PM |
286 | case OP_31_XOP_LFIWAX: |
287 | if (kvmppc_check_fp_disabled(vcpu)) | |
288 | return EMULATE_DONE; | |
289 | emulated = kvmppc_handle_loads(run, vcpu, | |
290 | KVM_MMIO_REG_FPR|rt, 4, 1); | |
291 | break; | |
292 | ||
293 | case OP_31_XOP_LFIWZX: | |
294 | if (kvmppc_check_fp_disabled(vcpu)) | |
295 | return EMULATE_DONE; | |
296 | emulated = kvmppc_handle_load(run, vcpu, | |
297 | KVM_MMIO_REG_FPR|rt, 4, 1); | |
298 | break; | |
299 | ||
6f63e81b BL |
300 | case OP_31_XOP_STFSX: |
301 | if (kvmppc_check_fp_disabled(vcpu)) | |
302 | return EMULATE_DONE; | |
303 | vcpu->arch.mmio_sp64_extend = 1; | |
304 | emulated = kvmppc_handle_store(run, vcpu, | |
305 | VCPU_FPR(vcpu, rs), 4, 1); | |
306 | break; | |
307 | ||
308 | case OP_31_XOP_STFSUX: | |
309 | if (kvmppc_check_fp_disabled(vcpu)) | |
310 | return EMULATE_DONE; | |
311 | vcpu->arch.mmio_sp64_extend = 1; | |
312 | emulated = kvmppc_handle_store(run, vcpu, | |
313 | VCPU_FPR(vcpu, rs), 4, 1); | |
314 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
315 | break; | |
316 | ||
317 | case OP_31_XOP_STFDX: | |
318 | if (kvmppc_check_fp_disabled(vcpu)) | |
319 | return EMULATE_DONE; | |
320 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 321 | VCPU_FPR(vcpu, rs), 8, 1); |
6f63e81b BL |
322 | break; |
323 | ||
324 | case OP_31_XOP_STFDUX: | |
325 | if (kvmppc_check_fp_disabled(vcpu)) | |
326 | return EMULATE_DONE; | |
327 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 328 | VCPU_FPR(vcpu, rs), 8, 1); |
6f63e81b BL |
329 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
330 | break; | |
331 | ||
332 | case OP_31_XOP_STFIWX: | |
333 | if (kvmppc_check_fp_disabled(vcpu)) | |
334 | return EMULATE_DONE; | |
335 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 336 | VCPU_FPR(vcpu, rs), 4, 1); |
6f63e81b BL |
337 | break; |
338 | #endif | |
339 | ||
340 | #ifdef CONFIG_VSX | |
341 | case OP_31_XOP_LXSDX: | |
342 | if (kvmppc_check_vsx_disabled(vcpu)) | |
343 | return EMULATE_DONE; | |
344 | vcpu->arch.mmio_vsx_copy_nums = 1; | |
345 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | |
346 | emulated = kvmppc_handle_vsx_load(run, vcpu, | |
347 | KVM_MMIO_REG_VSX|rt, 8, 1, 0); | |
348 | break; | |
349 | ||
350 | case OP_31_XOP_LXSSPX: | |
351 | if (kvmppc_check_vsx_disabled(vcpu)) | |
352 | return EMULATE_DONE; | |
353 | vcpu->arch.mmio_vsx_copy_nums = 1; | |
354 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | |
355 | vcpu->arch.mmio_sp64_extend = 1; | |
356 | emulated = kvmppc_handle_vsx_load(run, vcpu, | |
357 | KVM_MMIO_REG_VSX|rt, 4, 1, 0); | |
358 | break; | |
359 | ||
360 | case OP_31_XOP_LXSIWAX: | |
361 | if (kvmppc_check_vsx_disabled(vcpu)) | |
362 | return EMULATE_DONE; | |
363 | vcpu->arch.mmio_vsx_copy_nums = 1; | |
364 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | |
365 | emulated = kvmppc_handle_vsx_load(run, vcpu, | |
366 | KVM_MMIO_REG_VSX|rt, 4, 1, 1); | |
367 | break; | |
368 | ||
369 | case OP_31_XOP_LXSIWZX: | |
370 | if (kvmppc_check_vsx_disabled(vcpu)) | |
371 | return EMULATE_DONE; | |
372 | vcpu->arch.mmio_vsx_copy_nums = 1; | |
373 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | |
374 | emulated = kvmppc_handle_vsx_load(run, vcpu, | |
375 | KVM_MMIO_REG_VSX|rt, 4, 1, 0); | |
376 | break; | |
377 | ||
378 | case OP_31_XOP_LXVD2X: | |
379 | /* | |
380 | * In this case, the official load/store process is like this: | |
381 | * Step1, exit from vm by page fault isr, then kvm save vsr. | |
382 | * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS | |
383 | * as reference. | |
384 | * | |
385 | * Step2, copy data between memory and VCPU | |
386 | * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use | |
387 | * 2copies*8bytes or 4copies*4bytes | |
388 | * to simulate one copy of 16bytes. | |
389 | * Also there is an endian issue here, we should notice the | |
390 | * layout of memory. | |
391 | * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference. | |
392 | * If host is little-endian, kvm will call XXSWAPD for | |
393 | * LXVD2X_ROT/STXVD2X_ROT. | |
394 | * So, if host is little-endian, | |
395 | * the postion of memeory should be swapped. | |
396 | * | |
397 | * Step3, return to guest, kvm reset register. | |
398 | * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS | |
399 | * as reference. | |
400 | */ | |
401 | if (kvmppc_check_vsx_disabled(vcpu)) | |
402 | return EMULATE_DONE; | |
403 | vcpu->arch.mmio_vsx_copy_nums = 2; | |
404 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | |
405 | emulated = kvmppc_handle_vsx_load(run, vcpu, | |
406 | KVM_MMIO_REG_VSX|rt, 8, 1, 0); | |
407 | break; | |
408 | ||
409 | case OP_31_XOP_LXVW4X: | |
410 | if (kvmppc_check_vsx_disabled(vcpu)) | |
411 | return EMULATE_DONE; | |
412 | vcpu->arch.mmio_vsx_copy_nums = 4; | |
413 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; | |
414 | emulated = kvmppc_handle_vsx_load(run, vcpu, | |
415 | KVM_MMIO_REG_VSX|rt, 4, 1, 0); | |
416 | break; | |
417 | ||
418 | case OP_31_XOP_LXVDSX: | |
419 | if (kvmppc_check_vsx_disabled(vcpu)) | |
420 | return EMULATE_DONE; | |
421 | vcpu->arch.mmio_vsx_copy_nums = 1; | |
422 | vcpu->arch.mmio_vsx_copy_type = | |
423 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; | |
424 | emulated = kvmppc_handle_vsx_load(run, vcpu, | |
425 | KVM_MMIO_REG_VSX|rt, 8, 1, 0); | |
426 | break; | |
427 | ||
428 | case OP_31_XOP_STXSDX: | |
429 | if (kvmppc_check_vsx_disabled(vcpu)) | |
430 | return EMULATE_DONE; | |
431 | vcpu->arch.mmio_vsx_copy_nums = 1; | |
432 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | |
433 | emulated = kvmppc_handle_vsx_store(run, vcpu, | |
434 | rs, 8, 1); | |
435 | break; | |
436 | ||
437 | case OP_31_XOP_STXSSPX: | |
438 | if (kvmppc_check_vsx_disabled(vcpu)) | |
439 | return EMULATE_DONE; | |
440 | vcpu->arch.mmio_vsx_copy_nums = 1; | |
441 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | |
442 | vcpu->arch.mmio_sp64_extend = 1; | |
443 | emulated = kvmppc_handle_vsx_store(run, vcpu, | |
444 | rs, 4, 1); | |
445 | break; | |
446 | ||
447 | case OP_31_XOP_STXSIWX: | |
448 | if (kvmppc_check_vsx_disabled(vcpu)) | |
449 | return EMULATE_DONE; | |
450 | vcpu->arch.mmio_vsx_offset = 1; | |
451 | vcpu->arch.mmio_vsx_copy_nums = 1; | |
452 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; | |
453 | emulated = kvmppc_handle_vsx_store(run, vcpu, | |
454 | rs, 4, 1); | |
455 | break; | |
456 | ||
457 | case OP_31_XOP_STXVD2X: | |
458 | if (kvmppc_check_vsx_disabled(vcpu)) | |
459 | return EMULATE_DONE; | |
460 | vcpu->arch.mmio_vsx_copy_nums = 2; | |
461 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; | |
462 | emulated = kvmppc_handle_vsx_store(run, vcpu, | |
463 | rs, 8, 1); | |
464 | break; | |
465 | ||
466 | case OP_31_XOP_STXVW4X: | |
467 | if (kvmppc_check_vsx_disabled(vcpu)) | |
468 | return EMULATE_DONE; | |
469 | vcpu->arch.mmio_vsx_copy_nums = 4; | |
470 | vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; | |
471 | emulated = kvmppc_handle_vsx_store(run, vcpu, | |
472 | rs, 4, 1); | |
473 | break; | |
474 | #endif /* CONFIG_VSX */ | |
b540072a JRZ |
475 | |
476 | #ifdef CONFIG_ALTIVEC | |
477 | case OP_31_XOP_LVX: | |
478 | if (kvmppc_check_altivec_disabled(vcpu)) | |
479 | return EMULATE_DONE; | |
480 | vcpu->arch.vaddr_accessed &= ~0xFULL; | |
481 | vcpu->arch.paddr_accessed &= ~0xFULL; | |
482 | vcpu->arch.mmio_vmx_copy_nums = 2; | |
483 | emulated = kvmppc_handle_load128_by2x64(run, vcpu, | |
484 | KVM_MMIO_REG_VMX|rt, 1); | |
485 | break; | |
486 | ||
487 | case OP_31_XOP_STVX: | |
488 | if (kvmppc_check_altivec_disabled(vcpu)) | |
489 | return EMULATE_DONE; | |
490 | vcpu->arch.vaddr_accessed &= ~0xFULL; | |
491 | vcpu->arch.paddr_accessed &= ~0xFULL; | |
492 | vcpu->arch.mmio_vmx_copy_nums = 2; | |
493 | emulated = kvmppc_handle_store128_by2x64(run, vcpu, | |
494 | rs, 1); | |
495 | break; | |
496 | #endif /* CONFIG_ALTIVEC */ | |
497 | ||
d69614a2 AG |
498 | default: |
499 | emulated = EMULATE_FAIL; | |
500 | break; | |
501 | } | |
502 | break; | |
503 | ||
504 | case OP_LWZ: | |
505 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | |
506 | break; | |
507 | ||
6f63e81b BL |
508 | #ifdef CONFIG_PPC_FPU |
509 | case OP_STFS: | |
510 | if (kvmppc_check_fp_disabled(vcpu)) | |
511 | return EMULATE_DONE; | |
512 | vcpu->arch.mmio_sp64_extend = 1; | |
513 | emulated = kvmppc_handle_store(run, vcpu, | |
514 | VCPU_FPR(vcpu, rs), | |
515 | 4, 1); | |
516 | break; | |
517 | ||
518 | case OP_STFSU: | |
519 | if (kvmppc_check_fp_disabled(vcpu)) | |
520 | return EMULATE_DONE; | |
521 | vcpu->arch.mmio_sp64_extend = 1; | |
522 | emulated = kvmppc_handle_store(run, vcpu, | |
523 | VCPU_FPR(vcpu, rs), | |
524 | 4, 1); | |
525 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
526 | break; | |
527 | ||
528 | case OP_STFD: | |
529 | if (kvmppc_check_fp_disabled(vcpu)) | |
530 | return EMULATE_DONE; | |
531 | emulated = kvmppc_handle_store(run, vcpu, | |
532 | VCPU_FPR(vcpu, rs), | |
533 | 8, 1); | |
534 | break; | |
535 | ||
536 | case OP_STFDU: | |
537 | if (kvmppc_check_fp_disabled(vcpu)) | |
538 | return EMULATE_DONE; | |
539 | emulated = kvmppc_handle_store(run, vcpu, | |
540 | VCPU_FPR(vcpu, rs), | |
541 | 8, 1); | |
542 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
543 | break; | |
544 | #endif | |
545 | ||
d69614a2 AG |
546 | case OP_LD: |
547 | rt = get_rt(inst); | |
ceba57df PM |
548 | switch (inst & 3) { |
549 | case 0: /* ld */ | |
550 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | |
551 | break; | |
552 | case 1: /* ldu */ | |
553 | emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); | |
554 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
555 | break; | |
556 | case 2: /* lwa */ | |
557 | emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); | |
558 | break; | |
559 | default: | |
560 | emulated = EMULATE_FAIL; | |
561 | } | |
d69614a2 AG |
562 | break; |
563 | ||
564 | case OP_LWZU: | |
565 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | |
566 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
567 | break; | |
568 | ||
569 | case OP_LBZ: | |
570 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | |
571 | break; | |
572 | ||
573 | case OP_LBZU: | |
574 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | |
575 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
576 | break; | |
577 | ||
578 | case OP_STW: | |
579 | emulated = kvmppc_handle_store(run, vcpu, | |
580 | kvmppc_get_gpr(vcpu, rs), | |
581 | 4, 1); | |
582 | break; | |
583 | ||
d69614a2 AG |
584 | case OP_STD: |
585 | rs = get_rs(inst); | |
ceba57df PM |
586 | switch (inst & 3) { |
587 | case 0: /* std */ | |
588 | emulated = kvmppc_handle_store(run, vcpu, | |
589 | kvmppc_get_gpr(vcpu, rs), 8, 1); | |
590 | break; | |
591 | case 1: /* stdu */ | |
592 | emulated = kvmppc_handle_store(run, vcpu, | |
593 | kvmppc_get_gpr(vcpu, rs), 8, 1); | |
594 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
595 | break; | |
596 | default: | |
597 | emulated = EMULATE_FAIL; | |
598 | } | |
d69614a2 AG |
599 | break; |
600 | ||
601 | case OP_STWU: | |
602 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 603 | kvmppc_get_gpr(vcpu, rs), 4, 1); |
d69614a2 AG |
604 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
605 | break; | |
606 | ||
607 | case OP_STB: | |
608 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 609 | kvmppc_get_gpr(vcpu, rs), 1, 1); |
d69614a2 AG |
610 | break; |
611 | ||
612 | case OP_STBU: | |
613 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 614 | kvmppc_get_gpr(vcpu, rs), 1, 1); |
d69614a2 AG |
615 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
616 | break; | |
617 | ||
618 | case OP_LHZ: | |
619 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | |
620 | break; | |
621 | ||
622 | case OP_LHZU: | |
623 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | |
624 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
625 | break; | |
626 | ||
627 | case OP_LHA: | |
628 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | |
629 | break; | |
630 | ||
631 | case OP_LHAU: | |
632 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | |
633 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
634 | break; | |
635 | ||
636 | case OP_STH: | |
637 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 638 | kvmppc_get_gpr(vcpu, rs), 2, 1); |
d69614a2 AG |
639 | break; |
640 | ||
641 | case OP_STHU: | |
642 | emulated = kvmppc_handle_store(run, vcpu, | |
ceba57df | 643 | kvmppc_get_gpr(vcpu, rs), 2, 1); |
d69614a2 AG |
644 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
645 | break; | |
646 | ||
6f63e81b BL |
647 | #ifdef CONFIG_PPC_FPU |
648 | case OP_LFS: | |
649 | if (kvmppc_check_fp_disabled(vcpu)) | |
650 | return EMULATE_DONE; | |
651 | vcpu->arch.mmio_sp64_extend = 1; | |
652 | emulated = kvmppc_handle_load(run, vcpu, | |
653 | KVM_MMIO_REG_FPR|rt, 4, 1); | |
654 | break; | |
655 | ||
656 | case OP_LFSU: | |
657 | if (kvmppc_check_fp_disabled(vcpu)) | |
658 | return EMULATE_DONE; | |
659 | vcpu->arch.mmio_sp64_extend = 1; | |
660 | emulated = kvmppc_handle_load(run, vcpu, | |
661 | KVM_MMIO_REG_FPR|rt, 4, 1); | |
662 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
663 | break; | |
664 | ||
665 | case OP_LFD: | |
666 | if (kvmppc_check_fp_disabled(vcpu)) | |
667 | return EMULATE_DONE; | |
668 | emulated = kvmppc_handle_load(run, vcpu, | |
669 | KVM_MMIO_REG_FPR|rt, 8, 1); | |
670 | break; | |
671 | ||
672 | case OP_LFDU: | |
673 | if (kvmppc_check_fp_disabled(vcpu)) | |
674 | return EMULATE_DONE; | |
675 | emulated = kvmppc_handle_load(run, vcpu, | |
676 | KVM_MMIO_REG_FPR|rt, 8, 1); | |
677 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); | |
678 | break; | |
679 | #endif | |
680 | ||
d69614a2 AG |
681 | default: |
682 | emulated = EMULATE_FAIL; | |
683 | break; | |
684 | } | |
685 | ||
686 | if (emulated == EMULATE_FAIL) { | |
687 | advance = 0; | |
688 | kvmppc_core_queue_program(vcpu, 0); | |
689 | } | |
690 | ||
691 | trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); | |
692 | ||
693 | /* Advance past emulated instruction. */ | |
694 | if (advance) | |
695 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); | |
696 | ||
697 | return emulated; | |
698 | } |