]> git.proxmox.com Git - mirror_qemu.git/blame - accel/tcg/translate-all.c
accel/tcg: Move perf and debuginfo support to tcg/
[mirror_qemu.git] / accel / tcg / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
fb0343d5 9 * version 2.1 of the License, or (at your option) any later version.
d19893da
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
14a48c1d 19
7b31bbc2 20#include "qemu/osdep.h"
d19893da 21
244f1441 22#include "trace.h"
76cad711 23#include "disas/disas.h"
63c91552 24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg.h"
5b6dd868
BS
26#if defined(CONFIG_USER_ONLY)
27#include "qemu.h"
28#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
29#include <sys/param.h>
30#if __FreeBSD_version >= 700104
31#define HAVE_KINFO_GETVMMAP
32#define sigqueue sigqueue_freebsd /* avoid redefinition */
5b6dd868
BS
33#include <sys/proc.h>
34#include <machine/profile.h>
35#define _KERNEL
36#include <sys/user.h>
37#undef _KERNEL
38#undef sigqueue
39#include <libutil.h>
40#endif
41#endif
0bc3cd62 42#else
8bca9a03 43#include "exec/ram_addr.h"
5b6dd868
BS
44#endif
45
022c62cb 46#include "exec/cputlb.h"
3b9bd3f4 47#include "exec/translate-all.h"
306c8721 48#include "exec/translator.h"
548c9609 49#include "exec/tb-flush.h"
510a647f 50#include "qemu/bitmap.h"
3de2faa9 51#include "qemu/qemu-print.h"
8d04fb55 52#include "qemu/main-loop.h"
ad768e6f 53#include "qemu/cacheinfo.h"
533206f0 54#include "qemu/timer.h"
508127e2 55#include "exec/log.h"
d2528bdc 56#include "sysemu/cpus.h"
740b1759 57#include "sysemu/cpu-timers.h"
14a48c1d 58#include "sysemu/tcg.h"
6bc14423 59#include "qapi/error.h"
d9bcb58a 60#include "hw/core/tcg-cpu-ops.h"
a976a99a 61#include "tb-jmp-cache.h"
e5ceadff
PMD
62#include "tb-hash.h"
63#include "tb-context.h"
5934660f 64#include "internal-common.h"
4c268d6d 65#include "internal-target.h"
327b75a4 66#include "tcg/perf.h"
747bd69d 67#include "tcg/insn-start-words.h"
5b6dd868 68
44ded3d0 69TBContext tb_ctx;
d19893da 70
c9ad8d27
RH
71/*
72 * Encode VAL as a signed leb128 sequence at P.
73 * Return P incremented past the encoded value.
74 */
75static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
fca8a500
RH
76{
77 int more, byte;
78
79 do {
80 byte = val & 0x7f;
81 val >>= 7;
82 more = !((val == 0 && (byte & 0x40) == 0)
83 || (val == -1 && (byte & 0x40) != 0));
84 if (more) {
85 byte |= 0x80;
86 }
87 *p++ = byte;
88 } while (more);
89
90 return p;
91}
92
c9ad8d27
RH
93/*
94 * Decode a signed leb128 sequence at *PP; increment *PP past the
95 * decoded value. Return the decoded value.
96 */
97static int64_t decode_sleb128(const uint8_t **pp)
fca8a500 98{
db0c51a3 99 const uint8_t *p = *pp;
c9ad8d27 100 int64_t val = 0;
fca8a500
RH
101 int byte, shift = 0;
102
103 do {
104 byte = *p++;
c9ad8d27 105 val |= (int64_t)(byte & 0x7f) << shift;
fca8a500
RH
106 shift += 7;
107 } while (byte & 0x80);
108 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
c9ad8d27 109 val |= -(int64_t)1 << shift;
fca8a500
RH
110 }
111
112 *pp = p;
113 return val;
114}
115
116/* Encode the data collected about the instructions while compiling TB.
117 Place the data at BLOCK, and return the number of bytes consumed.
118
55bbc861 119 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
fca8a500
RH
120 which come from the target's insn_start data, followed by a uintptr_t
121 which comes from the host pc of the end of the code implementing the insn.
122
123 Each line of the table is encoded as sleb128 deltas from the previous
e7e168f4 124 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
fca8a500
RH
125 That is, the first column is seeded with the guest pc, the last column
126 with the host pc, and the middle columns with zeros. */
127
128static int encode_search(TranslationBlock *tb, uint8_t *block)
129{
b1311c4a 130 uint8_t *highwater = tcg_ctx->code_gen_highwater;
747bd69d
RH
131 uint64_t *insn_data = tcg_ctx->gen_insn_data;
132 uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
fca8a500
RH
133 uint8_t *p = block;
134 int i, j, n;
135
fca8a500 136 for (i = 0, n = tb->icount; i < n; ++i) {
747bd69d 137 uint64_t prev, curr;
fca8a500
RH
138
139 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
140 if (i == 0) {
279513c7 141 prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
fca8a500 142 } else {
747bd69d 143 prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
fca8a500 144 }
747bd69d
RH
145 curr = insn_data[i * TARGET_INSN_START_WORDS + j];
146 p = encode_sleb128(p, curr - prev);
fca8a500 147 }
747bd69d
RH
148 prev = (i == 0 ? 0 : insn_end_off[i - 1]);
149 curr = insn_end_off[i];
150 p = encode_sleb128(p, curr - prev);
b125f9dc
RH
151
152 /* Test for (pending) buffer overflow. The assumption is that any
153 one row beginning below the high water mark cannot overrun
154 the buffer completely. Thus we can test for overflow after
155 encoding a row without having to check during encoding. */
156 if (unlikely(p > highwater)) {
157 return -1;
158 }
fca8a500
RH
159 }
160
161 return p - block;
162}
163
6392bd6b
RH
164static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
165 uint64_t *data)
d19893da 166{
6392bd6b 167 uintptr_t iter_pc = (uintptr_t)tb->tc.ptr;
db0c51a3 168 const uint8_t *p = tb->tc.ptr + tb->tc.size;
fca8a500 169 int i, j, num_insns = tb->icount;
57fec1fe 170
6392bd6b 171 host_pc -= GETPC_ADJ;
01ecaf43 172
6392bd6b 173 if (host_pc < iter_pc) {
fca8a500
RH
174 return -1;
175 }
d19893da 176
6392bd6b 177 memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
4be79026 178 if (!(tb_cflags(tb) & CF_PCREL)) {
279513c7 179 data[0] = tb->pc;
8ed558ec
RH
180 }
181
6392bd6b
RH
182 /*
183 * Reconstruct the stored insn data while looking for the point
184 * at which the end of the insn exceeds host_pc.
185 */
fca8a500
RH
186 for (i = 0; i < num_insns; ++i) {
187 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
188 data[j] += decode_sleb128(&p);
189 }
6392bd6b
RH
190 iter_pc += decode_sleb128(&p);
191 if (iter_pc > host_pc) {
192 return num_insns - i;
fca8a500
RH
193 }
194 }
195 return -1;
6392bd6b
RH
196}
197
198/*
cfa29dd5
RH
199 * The cpu state corresponding to 'host_pc' is restored in
200 * preparation for exiting the TB.
6392bd6b
RH
201 */
202void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
cfa29dd5 203 uintptr_t host_pc)
6392bd6b
RH
204{
205 uint64_t data[TARGET_INSN_START_WORDS];
6392bd6b
RH
206 int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
207
208 if (insns_left < 0) {
209 return;
210 }
3b46e624 211
cfa29dd5 212 if (tb_cflags(tb) & CF_USE_ICOUNT) {
740b1759 213 assert(icount_enabled());
6392bd6b
RH
214 /*
215 * Reset the cycle counter to the start of the block and
216 * shift if to the number of actually executed instructions.
217 */
a953b5fa 218 cpu->neg.icount_decr.u16.low += insns_left;
2e70f6ef 219 }
d2925689 220
04f10575 221 cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
d19893da 222}
5b6dd868 223
3d419a4d 224bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
a8a826a3 225{
4846cd37 226 /*
db0c51a3 227 * The host_pc has to be in the rx region of the code buffer.
4846cd37
RH
228 * If it is not we will not be able to resolve it here.
229 * The two cases where host_pc will not be correct are:
d25f2a72
AB
230 *
231 * - fault during translation (instruction fetch)
232 * - fault from helper (not using GETPC() macro)
233 *
0ac20318 234 * Either way we need return early as we can't resolve it here.
d8b2239b 235 */
db0c51a3 236 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
4846cd37 237 TranslationBlock *tb = tcg_tb_lookup(host_pc);
d25f2a72 238 if (tb) {
cfa29dd5 239 cpu_restore_state_from_tb(cpu, tb, host_pc);
4846cd37 240 return true;
d8a499f1 241 }
a8a826a3 242 }
4846cd37 243 return false;
a8a826a3
BS
244}
245
6392bd6b
RH
246bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
247{
248 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
249 TranslationBlock *tb = tcg_tb_lookup(host_pc);
250 if (tb) {
251 return cpu_unwind_data_from_tb(tb, host_pc, data) >= 0;
252 }
253 }
254 return false;
255}
256
fa79cde6 257void page_init(void)
47c16ed5
AK
258{
259 page_size_init();
66ec9f49 260 page_table_config_init();
5b6dd868
BS
261}
262
344b63b3
RH
263/*
264 * Isolate the portion of code gen which can setjmp/longjmp.
265 * Return the size of the generated code, or negative on error.
266 */
267static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
256d11f9 268 vaddr pc, void *host_pc,
344b63b3
RH
269 int *max_insns, int64_t *ti)
270{
271 int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
272 if (unlikely(ret != 0)) {
273 return ret;
274 }
275
276 tcg_func_start(tcg_ctx);
277
278 tcg_ctx->cpu = env_cpu(env);
597f9b2d 279 gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
344b63b3
RH
280 assert(tb->size != 0);
281 tcg_ctx->cpu = NULL;
282 *max_insns = tb->icount;
283
344b63b3
RH
284 return tcg_gen_code(tcg_ctx, tb, pc);
285}
286
75692087 287/* Called with mmap_lock held for user mode emulation. */
648f034c 288TranslationBlock *tb_gen_code(CPUState *cpu,
256d11f9 289 vaddr pc, uint64_t cs_base,
89fee74a 290 uint32_t flags, int cflags)
5b6dd868 291{
b77af26e 292 CPUArchState *env = cpu_env(cpu);
95590e24 293 TranslationBlock *tb, *existing_tb;
deba7870 294 tb_page_addr_t phys_pc, phys_p2;
fec88f64 295 tcg_insn_unit *gen_code_buf;
8b86d6d2 296 int gen_code_size, search_size, max_insns;
344b63b3 297 int64_t ti;
306c8721 298 void *host_pc;
fe9b676f 299
e505a063 300 assert_memory_lock();
653b87eb 301 qemu_thread_jit_write();
5b6dd868 302
306c8721 303 phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
b125f9dc 304
9739e376 305 if (phys_pc == -1) {
873d64ac 306 /* Generate a one-shot TB with 1 insn in it */
cf9b5790 307 cflags = (cflags & ~CF_COUNT_MASK) | 1;
9739e376
PM
308 }
309
8b86d6d2
RH
310 max_insns = cflags & CF_COUNT_MASK;
311 if (max_insns == 0) {
8b86d6d2
RH
312 max_insns = TCG_MAX_INSNS;
313 }
78ff82bb
RH
314 QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
315
e8feb96f 316 buffer_overflow:
deba7870 317 assert_no_pages_locked();
fe9b676f 318 tb = tcg_tb_alloc(tcg_ctx);
b125f9dc 319 if (unlikely(!tb)) {
5b6dd868 320 /* flush must be done */
bbd77c18 321 tb_flush(cpu);
3359baad 322 mmap_unlock();
8499c8fc
PD
323 /* Make the execution loop process the flush as soon as possible. */
324 cpu->exception_index = EXCP_INTERRUPT;
3359baad 325 cpu_loop_exit(cpu);
5b6dd868 326 }
fec88f64 327
b1311c4a 328 gen_code_buf = tcg_ctx->code_gen_ptr;
db0c51a3 329 tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
4be79026
AJ
330 if (!(cflags & CF_PCREL)) {
331 tb->pc = pc;
332 }
5b6dd868
BS
333 tb->cs_base = cs_base;
334 tb->flags = flags;
335 tb->cflags = cflags;
28905cfb
RH
336 tb_set_page_addr0(tb, phys_pc);
337 tb_set_page_addr1(tb, -1);
deba7870
RH
338 if (phys_pc != -1) {
339 tb_lock_page0(phys_pc);
340 }
341
b7e4afbd 342 tcg_ctx->gen_tb = tb;
ff0c61bf 343 tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
aece72b7
RH
344#ifdef CONFIG_SOFTMMU
345 tcg_ctx->page_bits = TARGET_PAGE_BITS;
346 tcg_ctx->page_mask = TARGET_PAGE_MASK;
a66efde1 347 tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
aece72b7 348#endif
747bd69d 349 tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
28ea568a
RH
350#ifdef TCG_GUEST_DEFAULT_MO
351 tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
352#else
353 tcg_ctx->guest_mo = TCG_MO_ALL;
354#endif
4baf3978 355
deba7870 356 restart_translate:
fbf59aad 357 trace_translate_block(tb, pc, tb->tc.ptr);
fec88f64 358
344b63b3 359 gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
b125f9dc 360 if (unlikely(gen_code_size < 0)) {
6e6c4efe
RH
361 switch (gen_code_size) {
362 case -1:
363 /*
364 * Overflow of code_gen_buffer, or the current slice of it.
365 *
366 * TODO: We don't need to re-do gen_intermediate_code, nor
367 * should we re-do the tcg optimization currently hidden
368 * inside tcg_gen_code. All that should be required is to
369 * flush the TBs, allocate a new TB, re-initialize it per
370 * above, and re-do the actual code generation.
371 */
ae30e866
RH
372 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
373 "Restarting code generation for "
374 "code_gen_buffer overflow\n");
deba7870 375 tb_unlock_pages(tb);
ad17868e 376 tcg_ctx->gen_tb = NULL;
6e6c4efe
RH
377 goto buffer_overflow;
378
379 case -2:
380 /*
381 * The code generated for the TranslationBlock is too large.
382 * The maximum size allowed by the unwind info is 64k.
383 * There may be stricter constraints from relocations
384 * in the tcg backend.
385 *
386 * Try again with half as many insns as we attempted this time.
387 * If a single insn overflows, there's a bug somewhere...
388 */
6e6c4efe
RH
389 assert(max_insns > 1);
390 max_insns /= 2;
ae30e866
RH
391 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
392 "Restarting code generation with "
393 "smaller translation block (max %d insns)\n",
394 max_insns);
deba7870
RH
395
396 /*
397 * The half-sized TB may not cross pages.
398 * TODO: Fix all targets that cross pages except with
399 * the first insn, at which point this can't be reached.
400 */
401 phys_p2 = tb_page_addr1(tb);
402 if (unlikely(phys_p2 != -1)) {
403 tb_unlock_page1(phys_pc, phys_p2);
404 tb_set_page_addr1(tb, -1);
405 }
406 goto restart_translate;
407
408 case -3:
409 /*
410 * We had a page lock ordering problem. In order to avoid
411 * deadlock we had to drop the lock on page0, which means
412 * that everything we translated so far is compromised.
413 * Restart with locks held on both pages.
414 */
415 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
416 "Restarting code generation with re-locked pages");
417 goto restart_translate;
6e6c4efe
RH
418
419 default:
420 g_assert_not_reached();
421 }
b125f9dc 422 }
deba7870
RH
423 tcg_ctx->gen_tb = NULL;
424
fca8a500 425 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
b125f9dc 426 if (unlikely(search_size < 0)) {
deba7870 427 tb_unlock_pages(tb);
b125f9dc
RH
428 goto buffer_overflow;
429 }
2ac01d6d 430 tb->tc.size = gen_code_size;
fec88f64 431
5584e2db 432 /*
4be79026
AJ
433 * For CF_PCREL, attribute all executions of the generated code
434 * to its first mapping.
5584e2db
IL
435 */
436 perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
437
d977e1c2 438 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
fbf59aad 439 qemu_log_in_addr_range(pc)) {
c60f599b 440 FILE *logfile = qemu_log_trylock();
78b54858
RH
441 if (logfile) {
442 int code_size, data_size;
443 const tcg_target_ulong *rx_data_gen_ptr;
444 size_t chunk_start;
445 int insn = 0;
446
447 if (tcg_ctx->data_gen_ptr) {
448 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
449 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
450 data_size = gen_code_size - code_size;
451 } else {
452 rx_data_gen_ptr = 0;
453 code_size = gen_code_size;
454 data_size = 0;
455 }
57a26946 456
78b54858
RH
457 /* Dump header and the first instruction */
458 fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
459 fprintf(logfile,
c9ad8d27 460 " -- guest addr 0x%016" PRIx64 " + tb prologue\n",
747bd69d 461 tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
78b54858
RH
462 chunk_start = tcg_ctx->gen_insn_end_off[insn];
463 disas(logfile, tb->tc.ptr, chunk_start);
57a26946 464
78b54858
RH
465 /*
466 * Dump each instruction chunk, wrapping up empty chunks into
467 * the next instruction. The whole array is offset so the
468 * first entry is the beginning of the 2nd instruction.
469 */
470 while (insn < tb->icount) {
471 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
472 if (chunk_end > chunk_start) {
c9ad8d27 473 fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
747bd69d 474 tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
78b54858
RH
475 disas(logfile, tb->tc.ptr + chunk_start,
476 chunk_end - chunk_start);
477 chunk_start = chunk_end;
478 }
479 insn++;
5f0df033 480 }
5f0df033 481
78b54858
RH
482 if (chunk_start < code_size) {
483 fprintf(logfile, " -- tb slow paths + alignment\n");
484 disas(logfile, tb->tc.ptr + chunk_start,
485 code_size - chunk_start);
486 }
4c389f6e 487
78b54858
RH
488 /* Finally dump any data we may have after the block */
489 if (data_size) {
490 int i;
491 fprintf(logfile, " data: [size=%d]\n", data_size);
492 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
493 if (sizeof(tcg_target_ulong) == 8) {
494 fprintf(logfile,
495 "0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n",
496 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
497 } else if (sizeof(tcg_target_ulong) == 4) {
498 fprintf(logfile,
499 "0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n",
500 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
501 } else {
502 qemu_build_not_reached();
503 }
6c6a4a76 504 }
57a26946 505 }
78b54858 506 fprintf(logfile, "\n");
78b54858 507 qemu_log_unlock(logfile);
57a26946 508 }
fec88f64 509 }
fec88f64 510
d73415a3 511 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
fca8a500 512 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
e8feb96f 513 CODE_GEN_ALIGN));
5b6dd868 514
901bc3de 515 /* init jump list */
194125e3
EC
516 qemu_spin_init(&tb->jmp_lock);
517 tb->jmp_list_head = (uintptr_t)NULL;
901bc3de
SF
518 tb->jmp_list_next[0] = (uintptr_t)NULL;
519 tb->jmp_list_next[1] = (uintptr_t)NULL;
194125e3
EC
520 tb->jmp_dest[0] = (uintptr_t)NULL;
521 tb->jmp_dest[1] = (uintptr_t)NULL;
901bc3de 522
696c7066 523 /* init original jump addresses which have been set during tcg_gen_code() */
3a50f424 524 if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
901bc3de
SF
525 tb_reset_jump(tb, 0);
526 }
3a50f424 527 if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
901bc3de
SF
528 tb_reset_jump(tb, 1);
529 }
530
873d64ac 531 /*
50627f1b
RH
532 * If the TB is not associated with a physical RAM page then it must be
533 * a temporary one-insn TB, and we have nothing left to do. Return early
534 * before attempting to link to other TBs or add to the lookup table.
873d64ac 535 */
28905cfb 536 if (tb_page_addr0(tb) == -1) {
deba7870 537 assert_no_pages_locked();
873d64ac
AB
538 return tb;
539 }
540
f4cba756
LW
541 /*
542 * Insert TB into the corresponding region tree before publishing it
543 * through QHT. Otherwise rewinding happened in the TB might fail to
544 * lookup itself using host PC.
545 */
546 tcg_tb_insert(tb);
547
0ac20318
EC
548 /*
549 * No explicit memory barrier is required -- tb_link_page() makes the
550 * TB visible in a consistent state.
901bc3de 551 */
deba7870
RH
552 existing_tb = tb_link_page(tb);
553 assert_no_pages_locked();
554
95590e24
EC
555 /* if the TB already exists, discard what we just translated */
556 if (unlikely(existing_tb != tb)) {
557 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
558
559 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
d73415a3 560 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
f4cba756 561 tcg_tb_remove(tb);
95590e24
EC
562 return existing_tb;
563 }
5b6dd868
BS
564 return tb;
565}
566
0ac20318 567/* user-mode: call with mmap_lock held */
ae57db63 568void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
5b6dd868
BS
569{
570 TranslationBlock *tb;
571
0ac20318
EC
572 assert_memory_lock();
573
ae57db63 574 tb = tcg_tb_lookup(retaddr);
8d302e76
AJ
575 if (tb) {
576 /* We can use retranslation to find the PC. */
cfa29dd5 577 cpu_restore_state_from_tb(cpu, tb, retaddr);
8d302e76
AJ
578 tb_phys_invalidate(tb, -1);
579 } else {
580 /* The exception probably happened in a helper. The CPU state should
581 have been saved before calling it. Fetch the PC from there. */
b77af26e 582 CPUArchState *env = cpu_env(cpu);
bb5de525
AJ
583 vaddr pc;
584 uint64_t cs_base;
8d302e76 585 tb_page_addr_t addr;
89fee74a 586 uint32_t flags;
8d302e76
AJ
587
588 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
589 addr = get_page_addr_code(env, pc);
c360a0fd 590 if (addr != -1) {
e506ad6a 591 tb_invalidate_phys_range(addr, addr);
c360a0fd 592 }
5b6dd868 593 }
5b6dd868
BS
594}
595
596#ifndef CONFIG_USER_ONLY
cfd405ea
AB
597/*
598 * In deterministic execution mode, instructions doing device I/Os
8d04fb55
JK
599 * must be at the end of the TB.
600 *
601 * Called by softmmu_template.h, with iothread mutex not held.
602 */
90b40a69 603void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868
BS
604{
605 TranslationBlock *tb;
d9bcb58a 606 CPUClass *cc;
87f963be 607 uint32_t n;
5b6dd868 608
be2cdc5e 609 tb = tcg_tb_lookup(retaddr);
5b6dd868 610 if (!tb) {
a47dddd7 611 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
612 (void *)retaddr);
613 }
cfa29dd5 614 cpu_restore_state_from_tb(cpu, tb, retaddr);
87f963be 615
d9bcb58a
RH
616 /*
617 * Some guests must re-execute the branch when re-executing a delay
618 * slot instruction. When this is the case, adjust icount and N
619 * to account for the re-execution of the branch.
620 */
87f963be 621 n = 1;
d9bcb58a
RH
622 cc = CPU_GET_CLASS(cpu);
623 if (cc->tcg_ops->io_recompile_replay_branch &&
624 cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
a953b5fa 625 cpu->neg.icount_decr.u16.low++;
d9bcb58a
RH
626 n = 2;
627 }
5b6dd868 628
cfd405ea
AB
629 /*
630 * Exit the loop and potentially generate a new TB executing the
631 * just the I/O insns. We also limit instrumentation to memory
632 * operations only (which execute after completion) so we don't
633 * double instrument the instruction.
634 */
cf9b5790 635 cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
9b990ee5 636
fbf59aad 637 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
256d11f9 638 vaddr pc = log_pc(cpu, tb);
fbf59aad 639 if (qemu_log_in_addr_range(pc)) {
e60a7d0d 640 qemu_log("cpu_io_recompile: rewound execution of TB to %016"
256d11f9 641 VADDR_PRIx "\n", pc);
fbf59aad
RH
642 }
643 }
1d705e8a 644
6886b980 645 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
646}
647
5b6dd868
BS
648#else /* CONFIG_USER_ONLY */
649
c3affe56 650void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 651{
195801d7 652 g_assert(bql_locked());
259186a7 653 cpu->interrupt_request |= mask;
a953b5fa 654 qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
5b6dd868
BS
655}
656
5b6dd868 657#endif /* CONFIG_USER_ONLY */
2cd53943 658
a976a99a
RH
659/*
660 * Called by generic code at e.g. cpu reset after cpu creation,
661 * therefore we must be prepared to allocate the jump cache.
662 */
663void tcg_flush_jmp_cache(CPUState *cpu)
664{
665 CPUJumpCache *jc = cpu->tb_jmp_cache;
666
4e4fa6c1
RH
667 /* During early initialization, the cache may not yet be allocated. */
668 if (unlikely(jc == NULL)) {
669 return;
670 }
671
672 for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
673 qatomic_set(&jc->array[i].tb, NULL);
a976a99a
RH
674 }
675}