]> git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/user-exec.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[mirror_qemu.git] / accel / tcg / user-exec.c
1 /*
2 * User emulator execution
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg.h"
24 #include "qemu/bitops.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/translate-all.h"
27 #include "exec/helper-proto.h"
28 #include "qemu/atomic128.h"
29 #include "trace/trace-root.h"
30 #include "tcg/tcg-ldst.h"
31 #include "internal.h"
32
33 __thread uintptr_t helper_retaddr;
34
35 //#define DEBUG_SIGNAL
36
37 /*
38 * Adjust the pc to pass to cpu_restore_state; return the memop type.
39 */
40 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
41 {
42 switch (helper_retaddr) {
43 default:
44 /*
45 * Fault during host memory operation within a helper function.
46 * The helper's host return address, saved here, gives us a
47 * pointer into the generated code that will unwind to the
48 * correct guest pc.
49 */
50 *pc = helper_retaddr;
51 break;
52
53 case 0:
54 /*
55 * Fault during host memory operation within generated code.
56 * (Or, a unrelated bug within qemu, but we can't tell from here).
57 *
58 * We take the host pc from the signal frame. However, we cannot
59 * use that value directly. Within cpu_restore_state_from_tb, we
60 * assume PC comes from GETPC(), as used by the helper functions,
61 * so we adjust the address by -GETPC_ADJ to form an address that
62 * is within the call insn, so that the address does not accidentally
63 * match the beginning of the next guest insn. However, when the
64 * pc comes from the signal frame it points to the actual faulting
65 * host memory insn and not the return from a call insn.
66 *
67 * Therefore, adjust to compensate for what will be done later
68 * by cpu_restore_state_from_tb.
69 */
70 *pc += GETPC_ADJ;
71 break;
72
73 case 1:
74 /*
75 * Fault during host read for translation, or loosely, "execution".
76 *
77 * The guest pc is already pointing to the start of the TB for which
78 * code is being generated. If the guest translator manages the
79 * page crossings correctly, this is exactly the correct address
80 * (and if the translator doesn't handle page boundaries correctly
81 * there's little we can do about that here). Therefore, do not
82 * trigger the unwinder.
83 *
84 * Like tb_gen_code, release the memory lock before cpu_loop_exit.
85 */
86 mmap_unlock();
87 *pc = 0;
88 return MMU_INST_FETCH;
89 }
90
91 return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
92 }
93
94 /**
95 * handle_sigsegv_accerr_write:
96 * @cpu: the cpu context
97 * @old_set: the sigset_t from the signal ucontext_t
98 * @host_pc: the host pc, adjusted for the signal
99 * @guest_addr: the guest address of the fault
100 *
101 * Return true if the write fault has been handled, and should be re-tried.
102 *
103 * Note that it is important that we don't call page_unprotect() unless
104 * this is really a "write to nonwriteable page" fault, because
105 * page_unprotect() assumes that if it is called for an access to
106 * a page that's writeable this means we had two threads racing and
107 * another thread got there first and already made the page writeable;
108 * so we will retry the access. If we were to call page_unprotect()
109 * for some other kind of fault that should really be passed to the
110 * guest, we'd end up in an infinite loop of retrying the faulting access.
111 */
112 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
113 uintptr_t host_pc, abi_ptr guest_addr)
114 {
115 switch (page_unprotect(guest_addr, host_pc)) {
116 case 0:
117 /*
118 * Fault not caused by a page marked unwritable to protect
119 * cached translations, must be the guest binary's problem.
120 */
121 return false;
122 case 1:
123 /*
124 * Fault caused by protection of cached translation; TBs
125 * invalidated, so resume execution.
126 */
127 return true;
128 case 2:
129 /*
130 * Fault caused by protection of cached translation, and the
131 * currently executing TB was modified and must be exited immediately.
132 */
133 sigprocmask(SIG_SETMASK, old_set, NULL);
134 cpu_loop_exit_noexc(cpu);
135 /* NORETURN */
136 default:
137 g_assert_not_reached();
138 }
139 }
140
141 static int probe_access_internal(CPUArchState *env, target_ulong addr,
142 int fault_size, MMUAccessType access_type,
143 bool nonfault, uintptr_t ra)
144 {
145 int acc_flag;
146 bool maperr;
147
148 switch (access_type) {
149 case MMU_DATA_STORE:
150 acc_flag = PAGE_WRITE_ORG;
151 break;
152 case MMU_DATA_LOAD:
153 acc_flag = PAGE_READ;
154 break;
155 case MMU_INST_FETCH:
156 acc_flag = PAGE_EXEC;
157 break;
158 default:
159 g_assert_not_reached();
160 }
161
162 if (guest_addr_valid_untagged(addr)) {
163 int page_flags = page_get_flags(addr);
164 if (page_flags & acc_flag) {
165 return 0; /* success */
166 }
167 maperr = !(page_flags & PAGE_VALID);
168 } else {
169 maperr = true;
170 }
171
172 if (nonfault) {
173 return TLB_INVALID_MASK;
174 }
175
176 cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
177 }
178
179 int probe_access_flags(CPUArchState *env, target_ulong addr,
180 MMUAccessType access_type, int mmu_idx,
181 bool nonfault, void **phost, uintptr_t ra)
182 {
183 int flags;
184
185 flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
186 *phost = flags ? NULL : g2h(env_cpu(env), addr);
187 return flags;
188 }
189
190 void *probe_access(CPUArchState *env, target_ulong addr, int size,
191 MMUAccessType access_type, int mmu_idx, uintptr_t ra)
192 {
193 int flags;
194
195 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
196 flags = probe_access_internal(env, addr, size, access_type, false, ra);
197 g_assert(flags == 0);
198
199 return size ? g2h(env_cpu(env), addr) : NULL;
200 }
201
202 /* The softmmu versions of these helpers are in cputlb.c. */
203
204 /*
205 * Verify that we have passed the correct MemOp to the correct function.
206 *
207 * We could present one function to target code, and dispatch based on
208 * the MemOp, but so far we have worked hard to avoid an indirect function
209 * call along the memory path.
210 */
211 static void validate_memop(MemOpIdx oi, MemOp expected)
212 {
213 #ifdef CONFIG_DEBUG_TCG
214 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
215 assert(have == expected);
216 #endif
217 }
218
219 void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
220 {
221 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
222 }
223
224 void helper_unaligned_st(CPUArchState *env, target_ulong addr)
225 {
226 cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
227 }
228
229 static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
230 MemOpIdx oi, uintptr_t ra, MMUAccessType type)
231 {
232 MemOp mop = get_memop(oi);
233 int a_bits = get_alignment_bits(mop);
234 void *ret;
235
236 /* Enforce guest required alignment. */
237 if (unlikely(addr & ((1 << a_bits) - 1))) {
238 cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
239 }
240
241 ret = g2h(env_cpu(env), addr);
242 set_helper_retaddr(ra);
243 return ret;
244 }
245
246 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
247 MemOpIdx oi, uintptr_t ra)
248 {
249 void *haddr;
250 uint8_t ret;
251
252 validate_memop(oi, MO_UB);
253 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
254 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
255 ret = ldub_p(haddr);
256 clear_helper_retaddr();
257 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
258 return ret;
259 }
260
261 uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
262 MemOpIdx oi, uintptr_t ra)
263 {
264 void *haddr;
265 uint16_t ret;
266
267 validate_memop(oi, MO_BEUW);
268 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
269 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
270 ret = lduw_be_p(haddr);
271 clear_helper_retaddr();
272 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
273 return ret;
274 }
275
276 uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
277 MemOpIdx oi, uintptr_t ra)
278 {
279 void *haddr;
280 uint32_t ret;
281
282 validate_memop(oi, MO_BEUL);
283 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
284 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
285 ret = ldl_be_p(haddr);
286 clear_helper_retaddr();
287 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
288 return ret;
289 }
290
291 uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
292 MemOpIdx oi, uintptr_t ra)
293 {
294 void *haddr;
295 uint64_t ret;
296
297 validate_memop(oi, MO_BEUQ);
298 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
299 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
300 ret = ldq_be_p(haddr);
301 clear_helper_retaddr();
302 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
303 return ret;
304 }
305
306 uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
307 MemOpIdx oi, uintptr_t ra)
308 {
309 void *haddr;
310 uint16_t ret;
311
312 validate_memop(oi, MO_LEUW);
313 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
314 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
315 ret = lduw_le_p(haddr);
316 clear_helper_retaddr();
317 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
318 return ret;
319 }
320
321 uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
322 MemOpIdx oi, uintptr_t ra)
323 {
324 void *haddr;
325 uint32_t ret;
326
327 validate_memop(oi, MO_LEUL);
328 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
329 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
330 ret = ldl_le_p(haddr);
331 clear_helper_retaddr();
332 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
333 return ret;
334 }
335
336 uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
337 MemOpIdx oi, uintptr_t ra)
338 {
339 void *haddr;
340 uint64_t ret;
341
342 validate_memop(oi, MO_LEUQ);
343 trace_guest_ld_before_exec(env_cpu(env), addr, oi);
344 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
345 ret = ldq_le_p(haddr);
346 clear_helper_retaddr();
347 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
348 return ret;
349 }
350
351 void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
352 MemOpIdx oi, uintptr_t ra)
353 {
354 void *haddr;
355
356 validate_memop(oi, MO_UB);
357 trace_guest_st_before_exec(env_cpu(env), addr, oi);
358 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
359 stb_p(haddr, val);
360 clear_helper_retaddr();
361 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
362 }
363
364 void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
365 MemOpIdx oi, uintptr_t ra)
366 {
367 void *haddr;
368
369 validate_memop(oi, MO_BEUW);
370 trace_guest_st_before_exec(env_cpu(env), addr, oi);
371 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
372 stw_be_p(haddr, val);
373 clear_helper_retaddr();
374 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
375 }
376
377 void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
378 MemOpIdx oi, uintptr_t ra)
379 {
380 void *haddr;
381
382 validate_memop(oi, MO_BEUL);
383 trace_guest_st_before_exec(env_cpu(env), addr, oi);
384 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
385 stl_be_p(haddr, val);
386 clear_helper_retaddr();
387 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
388 }
389
390 void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
391 MemOpIdx oi, uintptr_t ra)
392 {
393 void *haddr;
394
395 validate_memop(oi, MO_BEUQ);
396 trace_guest_st_before_exec(env_cpu(env), addr, oi);
397 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
398 stq_be_p(haddr, val);
399 clear_helper_retaddr();
400 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
401 }
402
403 void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
404 MemOpIdx oi, uintptr_t ra)
405 {
406 void *haddr;
407
408 validate_memop(oi, MO_LEUW);
409 trace_guest_st_before_exec(env_cpu(env), addr, oi);
410 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
411 stw_le_p(haddr, val);
412 clear_helper_retaddr();
413 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
414 }
415
416 void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
417 MemOpIdx oi, uintptr_t ra)
418 {
419 void *haddr;
420
421 validate_memop(oi, MO_LEUL);
422 trace_guest_st_before_exec(env_cpu(env), addr, oi);
423 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
424 stl_le_p(haddr, val);
425 clear_helper_retaddr();
426 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
427 }
428
429 void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
430 MemOpIdx oi, uintptr_t ra)
431 {
432 void *haddr;
433
434 validate_memop(oi, MO_LEUQ);
435 trace_guest_st_before_exec(env_cpu(env), addr, oi);
436 haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
437 stq_le_p(haddr, val);
438 clear_helper_retaddr();
439 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
440 }
441
442 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
443 {
444 uint32_t ret;
445
446 set_helper_retaddr(1);
447 ret = ldub_p(g2h_untagged(ptr));
448 clear_helper_retaddr();
449 return ret;
450 }
451
452 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
453 {
454 uint32_t ret;
455
456 set_helper_retaddr(1);
457 ret = lduw_p(g2h_untagged(ptr));
458 clear_helper_retaddr();
459 return ret;
460 }
461
462 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
463 {
464 uint32_t ret;
465
466 set_helper_retaddr(1);
467 ret = ldl_p(g2h_untagged(ptr));
468 clear_helper_retaddr();
469 return ret;
470 }
471
472 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
473 {
474 uint64_t ret;
475
476 set_helper_retaddr(1);
477 ret = ldq_p(g2h_untagged(ptr));
478 clear_helper_retaddr();
479 return ret;
480 }
481
482 #include "ldst_common.c.inc"
483
484 /*
485 * Do not allow unaligned operations to proceed. Return the host address.
486 *
487 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
488 */
489 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
490 MemOpIdx oi, int size, int prot,
491 uintptr_t retaddr)
492 {
493 MemOp mop = get_memop(oi);
494 int a_bits = get_alignment_bits(mop);
495 void *ret;
496
497 /* Enforce guest required alignment. */
498 if (unlikely(addr & ((1 << a_bits) - 1))) {
499 MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
500 cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
501 }
502
503 /* Enforce qemu required alignment. */
504 if (unlikely(addr & (size - 1))) {
505 cpu_loop_exit_atomic(env_cpu(env), retaddr);
506 }
507
508 ret = g2h(env_cpu(env), addr);
509 set_helper_retaddr(retaddr);
510 return ret;
511 }
512
513 #include "atomic_common.c.inc"
514
515 /*
516 * First set of functions passes in OI and RETADDR.
517 * This makes them callable from other helpers.
518 */
519
520 #define ATOMIC_NAME(X) \
521 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
522 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
523 #define ATOMIC_MMU_IDX MMU_USER_IDX
524
525 #define DATA_SIZE 1
526 #include "atomic_template.h"
527
528 #define DATA_SIZE 2
529 #include "atomic_template.h"
530
531 #define DATA_SIZE 4
532 #include "atomic_template.h"
533
534 #ifdef CONFIG_ATOMIC64
535 #define DATA_SIZE 8
536 #include "atomic_template.h"
537 #endif
538
539 #if HAVE_ATOMIC128 || HAVE_CMPXCHG128
540 #define DATA_SIZE 16
541 #include "atomic_template.h"
542 #endif