]> git.proxmox.com Git - mirror_qemu.git/blame - accel/tcg/translate-all.c
tcg: define tcg_init_ctx and make tcg_ctx a pointer
[mirror_qemu.git] / accel / tcg / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
5b6dd868 21#endif
7b31bbc2 22#include "qemu/osdep.h"
d19893da 23
2054396a 24
5b6dd868 25#include "qemu-common.h"
af5ad107 26#define NO_CPU_IO_DEFS
d3eead2e 27#include "cpu.h"
244f1441 28#include "trace.h"
76cad711 29#include "disas/disas.h"
63c91552 30#include "exec/exec-all.h"
57fec1fe 31#include "tcg.h"
5b6dd868
BS
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
301e40ed 34#include "exec/exec-all.h"
5b6dd868
BS
35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd /* avoid redefinition */
5b6dd868
BS
40#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
0bc3cd62
PB
49#else
50#include "exec/address-spaces.h"
5b6dd868
BS
51#endif
52
022c62cb 53#include "exec/cputlb.h"
e1b89321 54#include "exec/tb-hash.h"
5b6dd868 55#include "translate-all.h"
510a647f 56#include "qemu/bitmap.h"
61a67f71 57#include "qemu/error-report.h"
0aa09897 58#include "qemu/timer.h"
8d04fb55 59#include "qemu/main-loop.h"
508127e2 60#include "exec/log.h"
d2528bdc 61#include "sysemu/cpus.h"
5b6dd868 62
955939a2
AB
63/* #define DEBUG_TB_INVALIDATE */
64/* #define DEBUG_TB_FLUSH */
5b6dd868 65/* make various TB consistency checks */
955939a2 66/* #define DEBUG_TB_CHECK */
5b6dd868 67
dae9e03a
EC
68#ifdef DEBUG_TB_INVALIDATE
69#define DEBUG_TB_INVALIDATE_GATE 1
70#else
71#define DEBUG_TB_INVALIDATE_GATE 0
72#endif
73
424079c1
EC
74#ifdef DEBUG_TB_FLUSH
75#define DEBUG_TB_FLUSH_GATE 1
76#else
77#define DEBUG_TB_FLUSH_GATE 0
78#endif
79
5b6dd868
BS
80#if !defined(CONFIG_USER_ONLY)
81/* TB consistency checks only implemented for usermode emulation. */
82#undef DEBUG_TB_CHECK
83#endif
84
6eb062ab
EC
85#ifdef DEBUG_TB_CHECK
86#define DEBUG_TB_CHECK_GATE 1
87#else
88#define DEBUG_TB_CHECK_GATE 0
89#endif
90
301e40ed
AB
91/* Access to the various translations structures need to be serialised via locks
92 * for consistency. This is automatic for SoftMMU based system
93 * emulation due to its single threaded nature. In user-mode emulation
94 * access to the memory related structures are protected with the
95 * mmap_lock.
96 */
301e40ed 97#ifdef CONFIG_SOFTMMU
2f169606 98#define assert_memory_lock() tcg_debug_assert(have_tb_lock)
301e40ed 99#else
6ac3d7e8 100#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
301e40ed
AB
101#endif
102
5b6dd868
BS
103#define SMC_BITMAP_USE_THRESHOLD 10
104
5b6dd868
BS
105typedef struct PageDesc {
106 /* list of TBs intersecting this ram page */
107 TranslationBlock *first_tb;
6fad459c 108#ifdef CONFIG_SOFTMMU
5b6dd868
BS
109 /* in order to optimize self modifying code, we count the number
110 of lookups we do to a given page to use a bitmap */
111 unsigned int code_write_count;
510a647f 112 unsigned long *code_bitmap;
6fad459c 113#else
5b6dd868
BS
114 unsigned long flags;
115#endif
116} PageDesc;
117
118/* In system mode we want L1_MAP to be based on ram offsets,
119 while in user mode we want it to be based on virtual addresses. */
120#if !defined(CONFIG_USER_ONLY)
121#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
122# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
123#else
124# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
125#endif
126#else
127# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
128#endif
129
03f49957
PB
130/* Size of the L2 (and L3, etc) page tables. */
131#define V_L2_BITS 10
132#define V_L2_SIZE (1 << V_L2_BITS)
133
61a67f71
LV
134/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
135QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
136 sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
137 * BITS_PER_BYTE);
138
66ec9f49
VK
139/*
140 * L1 Mapping properties
141 */
142static int v_l1_size;
143static int v_l1_shift;
144static int v_l2_levels;
145
146/* The bottom level has pointers to PageDesc, and is indexed by
147 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
148 */
149#define V_L1_MIN_BITS 4
150#define V_L1_MAX_BITS (V_L2_BITS + 3)
151#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
152
153static void *l1_map[V_L1_MAX_SIZE];
5b6dd868 154
57fec1fe 155/* code generation context */
b1311c4a
EC
156TCGContext tcg_init_ctx;
157TCGContext *tcg_ctx;
44ded3d0 158TBContext tb_ctx;
fdbc2b57 159bool parallel_cpus;
d19893da 160
677ef623 161/* translation block context */
13e10947 162static __thread int have_tb_lock;
677ef623 163
66ec9f49
VK
164static void page_table_config_init(void)
165{
166 uint32_t v_l1_bits;
167
168 assert(TARGET_PAGE_BITS);
169 /* The bits remaining after N lower levels of page tables. */
170 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
171 if (v_l1_bits < V_L1_MIN_BITS) {
172 v_l1_bits += V_L2_BITS;
173 }
174
175 v_l1_size = 1 << v_l1_bits;
176 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
177 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
178
179 assert(v_l1_bits <= V_L1_MAX_BITS);
180 assert(v_l1_shift % V_L2_BITS == 0);
181 assert(v_l2_levels >= 0);
182}
183
6ac3d7e8
PK
184#define assert_tb_locked() tcg_debug_assert(have_tb_lock)
185#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
6ac3d7e8 186
677ef623
FK
187void tb_lock(void)
188{
6ac3d7e8 189 assert_tb_unlocked();
44ded3d0 190 qemu_mutex_lock(&tb_ctx.tb_lock);
677ef623 191 have_tb_lock++;
677ef623
FK
192}
193
194void tb_unlock(void)
195{
6ac3d7e8 196 assert_tb_locked();
677ef623 197 have_tb_lock--;
44ded3d0 198 qemu_mutex_unlock(&tb_ctx.tb_lock);
677ef623
FK
199}
200
201void tb_lock_reset(void)
202{
677ef623 203 if (have_tb_lock) {
44ded3d0 204 qemu_mutex_unlock(&tb_ctx.tb_lock);
677ef623
FK
205 have_tb_lock = 0;
206 }
677ef623
FK
207}
208
a8a826a3 209static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 210
57fec1fe
FB
211void cpu_gen_init(void)
212{
b1311c4a 213 tcg_context_init(&tcg_init_ctx);
57fec1fe
FB
214}
215
fca8a500
RH
216/* Encode VAL as a signed leb128 sequence at P.
217 Return P incremented past the encoded value. */
218static uint8_t *encode_sleb128(uint8_t *p, target_long val)
219{
220 int more, byte;
221
222 do {
223 byte = val & 0x7f;
224 val >>= 7;
225 more = !((val == 0 && (byte & 0x40) == 0)
226 || (val == -1 && (byte & 0x40) != 0));
227 if (more) {
228 byte |= 0x80;
229 }
230 *p++ = byte;
231 } while (more);
232
233 return p;
234}
235
236/* Decode a signed leb128 sequence at *PP; increment *PP past the
237 decoded value. Return the decoded value. */
238static target_long decode_sleb128(uint8_t **pp)
239{
240 uint8_t *p = *pp;
241 target_long val = 0;
242 int byte, shift = 0;
243
244 do {
245 byte = *p++;
246 val |= (target_ulong)(byte & 0x7f) << shift;
247 shift += 7;
248 } while (byte & 0x80);
249 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
250 val |= -(target_ulong)1 << shift;
251 }
252
253 *pp = p;
254 return val;
255}
256
257/* Encode the data collected about the instructions while compiling TB.
258 Place the data at BLOCK, and return the number of bytes consumed.
259
260 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
261 which come from the target's insn_start data, followed by a uintptr_t
262 which comes from the host pc of the end of the code implementing the insn.
263
264 Each line of the table is encoded as sleb128 deltas from the previous
e7e168f4 265 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
fca8a500
RH
266 That is, the first column is seeded with the guest pc, the last column
267 with the host pc, and the middle columns with zeros. */
268
269static int encode_search(TranslationBlock *tb, uint8_t *block)
270{
b1311c4a 271 uint8_t *highwater = tcg_ctx->code_gen_highwater;
fca8a500
RH
272 uint8_t *p = block;
273 int i, j, n;
274
fca8a500
RH
275 for (i = 0, n = tb->icount; i < n; ++i) {
276 target_ulong prev;
277
278 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
279 if (i == 0) {
280 prev = (j == 0 ? tb->pc : 0);
281 } else {
b1311c4a 282 prev = tcg_ctx->gen_insn_data[i - 1][j];
fca8a500 283 }
b1311c4a 284 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
fca8a500 285 }
b1311c4a
EC
286 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
287 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
b125f9dc
RH
288
289 /* Test for (pending) buffer overflow. The assumption is that any
290 one row beginning below the high water mark cannot overrun
291 the buffer completely. Thus we can test for overflow after
292 encoding a row without having to check during encoding. */
293 if (unlikely(p > highwater)) {
294 return -1;
295 }
fca8a500
RH
296 }
297
298 return p - block;
299}
300
7d7500d9
PB
301/* The cpu state corresponding to 'searched_pc' is restored.
302 * Called with tb_lock held.
303 */
74f10515 304static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
a8a826a3 305 uintptr_t searched_pc)
d19893da 306{
fca8a500 307 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
e7e168f4 308 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
74f10515 309 CPUArchState *env = cpu->env_ptr;
2ac01d6d 310 uint8_t *p = tb->tc.ptr + tb->tc.size;
fca8a500 311 int i, j, num_insns = tb->icount;
57fec1fe 312#ifdef CONFIG_PROFILER
fca8a500 313 int64_t ti = profile_getclock();
57fec1fe
FB
314#endif
315
01ecaf43
RH
316 searched_pc -= GETPC_ADJ;
317
fca8a500
RH
318 if (searched_pc < host_pc) {
319 return -1;
320 }
d19893da 321
fca8a500
RH
322 /* Reconstruct the stored insn data while looking for the point at
323 which the end of the insn exceeds the searched_pc. */
324 for (i = 0; i < num_insns; ++i) {
325 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
326 data[j] += decode_sleb128(&p);
327 }
328 host_pc += decode_sleb128(&p);
329 if (host_pc > searched_pc) {
330 goto found;
331 }
332 }
333 return -1;
3b46e624 334
fca8a500 335 found:
bd79255d 336 if (tb->cflags & CF_USE_ICOUNT) {
414b15c9 337 assert(use_icount);
2e70f6ef 338 /* Reset the cycle counter to the start of the block. */
fca8a500 339 cpu->icount_decr.u16.low += num_insns;
2e70f6ef 340 /* Clear the IO flag. */
99df7dce 341 cpu->can_do_io = 0;
2e70f6ef 342 }
fca8a500
RH
343 cpu->icount_decr.u16.low -= i;
344 restore_state_to_opc(env, tb, data);
57fec1fe
FB
345
346#ifdef CONFIG_PROFILER
b1311c4a
EC
347 tcg_ctx->restore_time += profile_getclock() - ti;
348 tcg_ctx->restore_count++;
57fec1fe 349#endif
d19893da
FB
350 return 0;
351}
5b6dd868 352
3f38f309 353bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
a8a826a3
BS
354{
355 TranslationBlock *tb;
a5e99826 356 bool r = false;
a8a826a3 357
d8b2239b
AB
358 /* A retaddr of zero is invalid so we really shouldn't have ended
359 * up here. The target code has likely forgotten to check retaddr
360 * != 0 before attempting to restore state. We return early to
361 * avoid blowing up on a recursive tb_lock(). The target must have
362 * previously survived a failed cpu_restore_state because
363 * tb_find_pc(0) would have failed anyway. It still should be
364 * fixed though.
365 */
366
367 if (!retaddr) {
368 return r;
369 }
370
a5e99826 371 tb_lock();
a8a826a3
BS
372 tb = tb_find_pc(retaddr);
373 if (tb) {
74f10515 374 cpu_restore_state_from_tb(cpu, tb, retaddr);
d8a499f1
PD
375 if (tb->cflags & CF_NOCACHE) {
376 /* one-shot translation, invalidate it immediately */
d8a499f1 377 tb_phys_invalidate(tb, -1);
be1e0117 378 tb_remove(tb);
d8a499f1 379 }
a5e99826 380 r = true;
a8a826a3 381 }
a5e99826
FK
382 tb_unlock();
383
384 return r;
a8a826a3
BS
385}
386
47c16ed5
AK
387static void page_init(void)
388{
389 page_size_init();
66ec9f49
VK
390 page_table_config_init();
391
5b6dd868
BS
392#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
393 {
394#ifdef HAVE_KINFO_GETVMMAP
395 struct kinfo_vmentry *freep;
396 int i, cnt;
397
398 freep = kinfo_getvmmap(getpid(), &cnt);
399 if (freep) {
400 mmap_lock();
401 for (i = 0; i < cnt; i++) {
402 unsigned long startaddr, endaddr;
403
404 startaddr = freep[i].kve_start;
405 endaddr = freep[i].kve_end;
406 if (h2g_valid(startaddr)) {
407 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
408
409 if (h2g_valid(endaddr)) {
410 endaddr = h2g(endaddr);
411 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
412 } else {
413#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
414 endaddr = ~0ul;
415 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
416#endif
417 }
418 }
419 }
420 free(freep);
421 mmap_unlock();
422 }
423#else
424 FILE *f;
425
426 last_brk = (unsigned long)sbrk(0);
427
428 f = fopen("/compat/linux/proc/self/maps", "r");
429 if (f) {
430 mmap_lock();
431
432 do {
433 unsigned long startaddr, endaddr;
434 int n;
435
436 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
437
438 if (n == 2 && h2g_valid(startaddr)) {
439 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
440
441 if (h2g_valid(endaddr)) {
442 endaddr = h2g(endaddr);
443 } else {
444 endaddr = ~0ul;
445 }
446 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
447 }
448 } while (!feof(f));
449
450 fclose(f);
451 mmap_unlock();
452 }
453#endif
454 }
455#endif
456}
457
75692087 458/* If alloc=1:
7d7500d9 459 * Called with tb_lock held for system emulation.
75692087
PB
460 * Called with mmap_lock held for user-mode emulation.
461 */
5b6dd868
BS
462static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
463{
464 PageDesc *pd;
465 void **lp;
466 int i;
467
e505a063
AB
468 if (alloc) {
469 assert_memory_lock();
470 }
471
5b6dd868 472 /* Level 1. Always allocated. */
66ec9f49 473 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
5b6dd868
BS
474
475 /* Level 2..N-1. */
66ec9f49 476 for (i = v_l2_levels; i > 0; i--) {
6940fab8 477 void **p = atomic_rcu_read(lp);
5b6dd868
BS
478
479 if (p == NULL) {
480 if (!alloc) {
481 return NULL;
482 }
e3a0abfd 483 p = g_new0(void *, V_L2_SIZE);
6940fab8 484 atomic_rcu_set(lp, p);
5b6dd868
BS
485 }
486
03f49957 487 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
488 }
489
6940fab8 490 pd = atomic_rcu_read(lp);
5b6dd868
BS
491 if (pd == NULL) {
492 if (!alloc) {
493 return NULL;
494 }
e3a0abfd 495 pd = g_new0(PageDesc, V_L2_SIZE);
6940fab8 496 atomic_rcu_set(lp, pd);
5b6dd868
BS
497 }
498
03f49957 499 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
500}
501
502static inline PageDesc *page_find(tb_page_addr_t index)
503{
504 return page_find_alloc(index, 0);
505}
506
5b6dd868
BS
507#if defined(CONFIG_USER_ONLY)
508/* Currently it is not recommended to allocate big chunks of data in
509 user mode. It will change when a dedicated libc will be used. */
510/* ??? 64-bit hosts ought to have no problem mmaping data outside the
511 region in which the guest needs to run. Revisit this. */
512#define USE_STATIC_CODE_GEN_BUFFER
513#endif
514
5b6dd868
BS
515/* Minimum size of the code gen buffer. This number is randomly chosen,
516 but not so small that we can't have a fair number of TB's live. */
517#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
518
519/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
520 indicated, this is constrained by the range of direct branches on the
521 host cpu, as used by the TCG implementation of goto_tb. */
522#if defined(__x86_64__)
523# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
524#elif defined(__sparc__)
525# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
5bfd75a3
RH
526#elif defined(__powerpc64__)
527# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
399f1648
SF
528#elif defined(__powerpc__)
529# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
4a136e0a 530#elif defined(__aarch64__)
b68686bd 531# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
5b6dd868
BS
532#elif defined(__s390x__)
533 /* We have a +- 4GB range on the branches; leave some slop. */
534# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
479eb121
RH
535#elif defined(__mips__)
536 /* We have a 256MB branch region, but leave room to make sure the
537 main executable is also within that region. */
538# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
539#else
540# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
541#endif
542
543#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
544
545#define DEFAULT_CODE_GEN_BUFFER_SIZE \
546 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
547 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
548
549static inline size_t size_code_gen_buffer(size_t tb_size)
550{
551 /* Size the buffer. */
552 if (tb_size == 0) {
553#ifdef USE_STATIC_CODE_GEN_BUFFER
554 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
555#else
556 /* ??? Needs adjustments. */
557 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
558 static buffer, we could size this on RESERVED_VA, on the text
559 segment size of the executable, or continue to use the default. */
560 tb_size = (unsigned long)(ram_size / 4);
561#endif
562 }
563 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
564 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
565 }
566 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
567 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
568 }
5b6dd868
BS
569 return tb_size;
570}
571
483c76e1
RH
572#ifdef __mips__
573/* In order to use J and JAL within the code_gen_buffer, we require
574 that the buffer not cross a 256MB boundary. */
575static inline bool cross_256mb(void *addr, size_t size)
576{
7ba6a512 577 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
483c76e1
RH
578}
579
580/* We weren't able to allocate a buffer without crossing that boundary,
581 so make do with the larger portion of the buffer that doesn't cross.
582 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
583static inline void *split_cross_256mb(void *buf1, size_t size1)
584{
7ba6a512 585 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
483c76e1
RH
586 size_t size2 = buf1 + size1 - buf2;
587
588 size1 = buf2 - buf1;
589 if (size1 < size2) {
590 size1 = size2;
591 buf1 = buf2;
592 }
593
b1311c4a 594 tcg_ctx->code_gen_buffer_size = size1;
483c76e1
RH
595 return buf1;
596}
597#endif
598
5b6dd868
BS
599#ifdef USE_STATIC_CODE_GEN_BUFFER
600static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
601 __attribute__((aligned(CODE_GEN_ALIGN)));
602
f293709c
RH
603# ifdef _WIN32
604static inline void do_protect(void *addr, long size, int prot)
605{
606 DWORD old_protect;
607 VirtualProtect(addr, size, prot, &old_protect);
608}
609
610static inline void map_exec(void *addr, long size)
611{
612 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
613}
614
615static inline void map_none(void *addr, long size)
616{
617 do_protect(addr, size, PAGE_NOACCESS);
618}
619# else
620static inline void do_protect(void *addr, long size, int prot)
621{
622 uintptr_t start, end;
623
624 start = (uintptr_t)addr;
625 start &= qemu_real_host_page_mask;
626
627 end = (uintptr_t)addr + size;
628 end = ROUND_UP(end, qemu_real_host_page_size);
629
630 mprotect((void *)start, end - start, prot);
631}
632
633static inline void map_exec(void *addr, long size)
634{
635 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
636}
637
638static inline void map_none(void *addr, long size)
639{
640 do_protect(addr, size, PROT_NONE);
641}
642# endif /* WIN32 */
643
5b6dd868
BS
644static inline void *alloc_code_gen_buffer(void)
645{
483c76e1 646 void *buf = static_code_gen_buffer;
f293709c
RH
647 size_t full_size, size;
648
649 /* The size of the buffer, rounded down to end on a page boundary. */
650 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
651 & qemu_real_host_page_mask) - (uintptr_t)buf;
652
653 /* Reserve a guard page. */
654 size = full_size - qemu_real_host_page_size;
655
656 /* Honor a command-line option limiting the size of the buffer. */
b1311c4a
EC
657 if (size > tcg_ctx->code_gen_buffer_size) {
658 size = (((uintptr_t)buf + tcg_ctx->code_gen_buffer_size)
f293709c
RH
659 & qemu_real_host_page_mask) - (uintptr_t)buf;
660 }
b1311c4a 661 tcg_ctx->code_gen_buffer_size = size;
f293709c 662
483c76e1 663#ifdef __mips__
f293709c
RH
664 if (cross_256mb(buf, size)) {
665 buf = split_cross_256mb(buf, size);
b1311c4a 666 size = tcg_ctx->code_gen_buffer_size;
483c76e1
RH
667 }
668#endif
f293709c
RH
669
670 map_exec(buf, size);
671 map_none(buf + size, qemu_real_host_page_size);
672 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
673
483c76e1 674 return buf;
5b6dd868 675}
f293709c
RH
676#elif defined(_WIN32)
677static inline void *alloc_code_gen_buffer(void)
678{
b1311c4a 679 size_t size = tcg_ctx->code_gen_buffer_size;
f293709c
RH
680 void *buf1, *buf2;
681
682 /* Perform the allocation in two steps, so that the guard page
683 is reserved but uncommitted. */
684 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
685 MEM_RESERVE, PAGE_NOACCESS);
686 if (buf1 != NULL) {
687 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
688 assert(buf1 == buf2);
689 }
690
691 return buf1;
692}
693#else
5b6dd868
BS
694static inline void *alloc_code_gen_buffer(void)
695{
696 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
697 uintptr_t start = 0;
b1311c4a 698 size_t size = tcg_ctx->code_gen_buffer_size;
5b6dd868
BS
699 void *buf;
700
701 /* Constrain the position of the buffer based on the host cpu.
702 Note that these addresses are chosen in concert with the
703 addresses assigned in the relevant linker script file. */
704# if defined(__PIE__) || defined(__PIC__)
705 /* Don't bother setting a preferred location if we're building
706 a position-independent executable. We're more likely to get
707 an address near the main executable if we let the kernel
708 choose the address. */
709# elif defined(__x86_64__) && defined(MAP_32BIT)
710 /* Force the memory down into low memory with the executable.
711 Leave the choice of exact location with the kernel. */
712 flags |= MAP_32BIT;
713 /* Cannot expect to map more than 800MB in low memory. */
f293709c 714 if (size > 800u * 1024 * 1024) {
b1311c4a 715 tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
5b6dd868
BS
716 }
717# elif defined(__sparc__)
718 start = 0x40000000ul;
719# elif defined(__s390x__)
720 start = 0x90000000ul;
479eb121 721# elif defined(__mips__)
f293709c 722# if _MIPS_SIM == _ABI64
479eb121
RH
723 start = 0x128000000ul;
724# else
725 start = 0x08000000ul;
726# endif
5b6dd868
BS
727# endif
728
f293709c
RH
729 buf = mmap((void *)start, size + qemu_real_host_page_size,
730 PROT_NONE, flags, -1, 0);
483c76e1
RH
731 if (buf == MAP_FAILED) {
732 return NULL;
733 }
734
735#ifdef __mips__
f293709c 736 if (cross_256mb(buf, size)) {
5d831be2 737 /* Try again, with the original still mapped, to avoid re-acquiring
483c76e1 738 that 256mb crossing. This time don't specify an address. */
f293709c
RH
739 size_t size2;
740 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
741 PROT_NONE, flags, -1, 0);
f68808c7 742 switch ((int)(buf2 != MAP_FAILED)) {
f293709c
RH
743 case 1:
744 if (!cross_256mb(buf2, size)) {
483c76e1 745 /* Success! Use the new buffer. */
8bdf4997 746 munmap(buf, size + qemu_real_host_page_size);
f293709c 747 break;
483c76e1
RH
748 }
749 /* Failure. Work with what we had. */
8bdf4997 750 munmap(buf2, size + qemu_real_host_page_size);
f293709c
RH
751 /* fallthru */
752 default:
753 /* Split the original buffer. Free the smaller half. */
754 buf2 = split_cross_256mb(buf, size);
b1311c4a 755 size2 = tcg_ctx->code_gen_buffer_size;
f293709c
RH
756 if (buf == buf2) {
757 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
758 } else {
759 munmap(buf, size - size2);
760 }
761 size = size2;
762 break;
483c76e1 763 }
f293709c 764 buf = buf2;
483c76e1
RH
765 }
766#endif
767
f293709c
RH
768 /* Make the final buffer accessible. The guard page at the end
769 will remain inaccessible with PROT_NONE. */
770 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
483c76e1 771
f293709c
RH
772 /* Request large pages for the buffer. */
773 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
483c76e1 774
5b6dd868
BS
775 return buf;
776}
f293709c 777#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
5b6dd868 778
2ac01d6d
EC
779/* compare a pointer @ptr and a tb_tc @s */
780static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
781{
782 if (ptr >= s->ptr + s->size) {
783 return 1;
784 } else if (ptr < s->ptr) {
785 return -1;
786 }
787 return 0;
788}
789
790static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
791{
792 const struct tb_tc *a = ap;
793 const struct tb_tc *b = bp;
794
795 /*
796 * When both sizes are set, we know this isn't a lookup.
797 * This is the most likely case: every TB must be inserted; lookups
798 * are a lot less frequent.
799 */
800 if (likely(a->size && b->size)) {
801 if (a->ptr > b->ptr) {
802 return 1;
803 } else if (a->ptr < b->ptr) {
804 return -1;
805 }
806 /* a->ptr == b->ptr should happen only on deletions */
807 g_assert(a->size == b->size);
808 return 0;
809 }
810 /*
811 * All lookups have either .size field set to 0.
812 * From the glib sources we see that @ap is always the lookup key. However
813 * the docs provide no guarantee, so we just mark this case as likely.
814 */
815 if (likely(a->size == 0)) {
816 return ptr_cmp_tb_tc(a->ptr, b);
817 }
818 return ptr_cmp_tb_tc(b->ptr, a);
819}
820
5b6dd868
BS
821static inline void code_gen_alloc(size_t tb_size)
822{
b1311c4a
EC
823 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
824 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
825 if (tcg_ctx->code_gen_buffer == NULL) {
5b6dd868
BS
826 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
827 exit(1);
828 }
44ded3d0
EC
829 tb_ctx.tb_tree = g_tree_new(tb_tc_cmp);
830 qemu_mutex_init(&tb_ctx.tb_lock);
5b6dd868
BS
831}
832
909eaac9
EC
833static void tb_htable_init(void)
834{
835 unsigned int mode = QHT_MODE_AUTO_RESIZE;
836
44ded3d0 837 qht_init(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
909eaac9
EC
838}
839
5b6dd868
BS
840/* Must be called before using the QEMU cpus. 'tb_size' is the size
841 (in bytes) allocated to the translation buffer. Zero means default
842 size. */
843void tcg_exec_init(unsigned long tb_size)
844{
8e2b7299 845 tcg_allowed = true;
5b6dd868 846 cpu_gen_init();
5b6dd868 847 page_init();
909eaac9 848 tb_htable_init();
f293709c 849 code_gen_alloc(tb_size);
4cbea598 850#if defined(CONFIG_SOFTMMU)
5b6dd868
BS
851 /* There's no guest base to take into account, so go ahead and
852 initialize the prologue now. */
b1311c4a 853 tcg_prologue_init(tcg_ctx);
5b6dd868
BS
854#endif
855}
856
7d7500d9
PB
857/*
858 * Allocate a new translation block. Flush the translation buffer if
859 * too many translation blocks or too much generated code.
860 *
861 * Called with tb_lock held.
862 */
5b6dd868
BS
863static TranslationBlock *tb_alloc(target_ulong pc)
864{
865 TranslationBlock *tb;
866
6ac3d7e8 867 assert_tb_locked();
e505a063 868
b1311c4a 869 tb = tcg_tb_alloc(tcg_ctx);
6e3b2bfd 870 if (unlikely(tb == NULL)) {
5b6dd868
BS
871 return NULL;
872 }
5b6dd868
BS
873 return tb;
874}
875
7d7500d9 876/* Called with tb_lock held. */
be1e0117 877void tb_remove(TranslationBlock *tb)
5b6dd868 878{
6ac3d7e8 879 assert_tb_locked();
e505a063 880
44ded3d0 881 g_tree_remove(tb_ctx.tb_tree, &tb->tc);
5b6dd868
BS
882}
883
884static inline void invalidate_page_bitmap(PageDesc *p)
885{
6fad459c 886#ifdef CONFIG_SOFTMMU
012aef07
MA
887 g_free(p->code_bitmap);
888 p->code_bitmap = NULL;
5b6dd868 889 p->code_write_count = 0;
6fad459c 890#endif
5b6dd868
BS
891}
892
893/* Set to NULL all the 'first_tb' fields in all PageDescs. */
894static void page_flush_tb_1(int level, void **lp)
895{
896 int i;
897
898 if (*lp == NULL) {
899 return;
900 }
901 if (level == 0) {
902 PageDesc *pd = *lp;
903
03f49957 904 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
905 pd[i].first_tb = NULL;
906 invalidate_page_bitmap(pd + i);
907 }
908 } else {
909 void **pp = *lp;
910
03f49957 911 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
912 page_flush_tb_1(level - 1, pp + i);
913 }
914 }
915}
916
917static void page_flush_tb(void)
918{
66ec9f49 919 int i, l1_sz = v_l1_size;
5b6dd868 920
66ec9f49
VK
921 for (i = 0; i < l1_sz; i++) {
922 page_flush_tb_1(v_l2_levels, l1_map + i);
5b6dd868
BS
923 }
924}
925
f19c6cc6
EC
926static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
927{
928 const TranslationBlock *tb = value;
929 size_t *size = data;
930
931 *size += tb->tc.size;
932 return false;
933}
934
5b6dd868 935/* flush all the translation blocks */
14e6fe12 936static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
5b6dd868 937{
3359baad
SF
938 tb_lock();
939
14e6fe12 940 /* If it is already been done on request of another CPU,
3359baad
SF
941 * just retry.
942 */
44ded3d0 943 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
3359baad 944 goto done;
135a972b 945 }
3359baad 946
424079c1 947 if (DEBUG_TB_FLUSH_GATE) {
44ded3d0 948 size_t nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
f19c6cc6 949 size_t host_size = 0;
2ac01d6d 950
44ded3d0 951 g_tree_foreach(tb_ctx.tb_tree, tb_host_size_iter, &host_size);
f19c6cc6 952 printf("qemu: flush code_size=%td nb_tbs=%zu avg_tb_size=%zu\n",
b1311c4a 953 tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer, nb_tbs,
f19c6cc6 954 nb_tbs > 0 ? host_size / nb_tbs : 0);
424079c1 955 }
b1311c4a
EC
956 if ((unsigned long)(tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer)
957 > tcg_ctx->code_gen_buffer_size) {
a47dddd7 958 cpu_abort(cpu, "Internal error: code buffer overflow\n");
5b6dd868 959 }
5b6dd868 960
bdc44640 961 CPU_FOREACH(cpu) {
f3ced3c5 962 cpu_tb_jmp_cache_clear(cpu);
5b6dd868
BS
963 }
964
2ac01d6d 965 /* Increment the refcount first so that destroy acts as a reset */
44ded3d0
EC
966 g_tree_ref(tb_ctx.tb_tree);
967 g_tree_destroy(tb_ctx.tb_tree);
2ac01d6d 968
44ded3d0 969 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
5b6dd868
BS
970 page_flush_tb();
971
b1311c4a 972 tcg_ctx->code_gen_ptr = tcg_ctx->code_gen_buffer;
5b6dd868
BS
973 /* XXX: flush processor icache at this point if cache flush is
974 expensive */
44ded3d0 975 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
3359baad
SF
976
977done:
978 tb_unlock();
979}
980
981void tb_flush(CPUState *cpu)
982{
983 if (tcg_enabled()) {
44ded3d0 984 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
14e6fe12
PB
985 async_safe_run_on_cpu(cpu, do_tb_flush,
986 RUN_ON_CPU_HOST_INT(tb_flush_count));
3359baad 987 }
5b6dd868
BS
988}
989
6eb062ab
EC
990/*
991 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
992 * so in order to prevent bit rot we compile them unconditionally in user-mode,
993 * and let the optimizer get rid of them by wrapping their user-only callers
994 * with if (DEBUG_TB_CHECK_GATE).
995 */
996#ifdef CONFIG_USER_ONLY
5b6dd868 997
909eaac9
EC
998static void
999do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
5b6dd868 1000{
909eaac9
EC
1001 TranslationBlock *tb = p;
1002 target_ulong addr = *(target_ulong *)userp;
1003
1004 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1005 printf("ERROR invalidate: address=" TARGET_FMT_lx
1006 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1007 }
1008}
5b6dd868 1009
7d7500d9
PB
1010/* verify that all the pages have correct rights for code
1011 *
1012 * Called with tb_lock held.
1013 */
909eaac9
EC
1014static void tb_invalidate_check(target_ulong address)
1015{
5b6dd868 1016 address &= TARGET_PAGE_MASK;
44ded3d0 1017 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
909eaac9
EC
1018}
1019
1020static void
1021do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
1022{
1023 TranslationBlock *tb = p;
1024 int flags1, flags2;
1025
1026 flags1 = page_get_flags(tb->pc);
1027 flags2 = page_get_flags(tb->pc + tb->size - 1);
1028 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1029 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1030 (long)tb->pc, tb->size, flags1, flags2);
5b6dd868
BS
1031 }
1032}
1033
1034/* verify that all the pages have correct rights for code */
1035static void tb_page_check(void)
1036{
44ded3d0 1037 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
5b6dd868
BS
1038}
1039
6eb062ab 1040#endif /* CONFIG_USER_ONLY */
5b6dd868 1041
5b6dd868
BS
1042static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
1043{
1044 TranslationBlock *tb1;
1045 unsigned int n1;
1046
1047 for (;;) {
1048 tb1 = *ptb;
1049 n1 = (uintptr_t)tb1 & 3;
1050 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1051 if (tb1 == tb) {
1052 *ptb = tb1->page_next[n1];
1053 break;
1054 }
1055 ptb = &tb1->page_next[n1];
1056 }
1057}
1058
13362678
SF
1059/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1060static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
5b6dd868 1061{
c37e6d7e
SF
1062 TranslationBlock *tb1;
1063 uintptr_t *ptb, ntb;
5b6dd868
BS
1064 unsigned int n1;
1065
f309101c 1066 ptb = &tb->jmp_list_next[n];
c37e6d7e 1067 if (*ptb) {
5b6dd868
BS
1068 /* find tb(n) in circular list */
1069 for (;;) {
c37e6d7e
SF
1070 ntb = *ptb;
1071 n1 = ntb & 3;
1072 tb1 = (TranslationBlock *)(ntb & ~3);
5b6dd868
BS
1073 if (n1 == n && tb1 == tb) {
1074 break;
1075 }
1076 if (n1 == 2) {
f309101c 1077 ptb = &tb1->jmp_list_first;
5b6dd868 1078 } else {
f309101c 1079 ptb = &tb1->jmp_list_next[n1];
5b6dd868
BS
1080 }
1081 }
1082 /* now we can suppress tb(n) from the list */
f309101c 1083 *ptb = tb->jmp_list_next[n];
5b6dd868 1084
c37e6d7e 1085 tb->jmp_list_next[n] = (uintptr_t)NULL;
5b6dd868
BS
1086 }
1087}
1088
1089/* reset the jump entry 'n' of a TB so that it is not chained to
1090 another TB */
1091static inline void tb_reset_jump(TranslationBlock *tb, int n)
1092{
e7e168f4 1093 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
f309101c 1094 tb_set_jmp_target(tb, n, addr);
5b6dd868
BS
1095}
1096
89bba496
SF
1097/* remove any jumps to the TB */
1098static inline void tb_jmp_unlink(TranslationBlock *tb)
1099{
f9c5b66f
SF
1100 TranslationBlock *tb1;
1101 uintptr_t *ptb, ntb;
89bba496
SF
1102 unsigned int n1;
1103
f9c5b66f 1104 ptb = &tb->jmp_list_first;
89bba496 1105 for (;;) {
f9c5b66f
SF
1106 ntb = *ptb;
1107 n1 = ntb & 3;
1108 tb1 = (TranslationBlock *)(ntb & ~3);
89bba496
SF
1109 if (n1 == 2) {
1110 break;
1111 }
f9c5b66f
SF
1112 tb_reset_jump(tb1, n1);
1113 *ptb = tb1->jmp_list_next[n1];
1114 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
89bba496 1115 }
89bba496
SF
1116}
1117
7d7500d9
PB
1118/* invalidate one TB
1119 *
1120 * Called with tb_lock held.
1121 */
5b6dd868
BS
1122void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1123{
182735ef 1124 CPUState *cpu;
5b6dd868 1125 PageDesc *p;
42bd3228 1126 uint32_t h;
5b6dd868 1127 tb_page_addr_t phys_pc;
5b6dd868 1128
6ac3d7e8 1129 assert_tb_locked();
e505a063 1130
84f1c148 1131 atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
6d21e420 1132
5b6dd868
BS
1133 /* remove the TB from the hash list */
1134 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
4e2ca83e
EC
1135 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1136 tb->trace_vcpu_dstate);
44ded3d0 1137 qht_remove(&tb_ctx.htable, tb, h);
5b6dd868
BS
1138
1139 /* remove the TB from the page list */
1140 if (tb->page_addr[0] != page_addr) {
1141 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1142 tb_page_remove(&p->first_tb, tb);
1143 invalidate_page_bitmap(p);
1144 }
1145 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1146 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1147 tb_page_remove(&p->first_tb, tb);
1148 invalidate_page_bitmap(p);
1149 }
1150
5b6dd868
BS
1151 /* remove the TB from the hash list */
1152 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 1153 CPU_FOREACH(cpu) {
89a16b1e
SF
1154 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1155 atomic_set(&cpu->tb_jmp_cache[h], NULL);
5b6dd868
BS
1156 }
1157 }
1158
1159 /* suppress this TB from the two jump lists */
13362678
SF
1160 tb_remove_from_jmp_list(tb, 0);
1161 tb_remove_from_jmp_list(tb, 1);
5b6dd868
BS
1162
1163 /* suppress any remaining jumps to this TB */
89bba496 1164 tb_jmp_unlink(tb);
5b6dd868 1165
44ded3d0 1166 tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
1167}
1168
6fad459c 1169#ifdef CONFIG_SOFTMMU
5b6dd868
BS
1170static void build_page_bitmap(PageDesc *p)
1171{
1172 int n, tb_start, tb_end;
1173 TranslationBlock *tb;
1174
510a647f 1175 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
5b6dd868
BS
1176
1177 tb = p->first_tb;
1178 while (tb != NULL) {
1179 n = (uintptr_t)tb & 3;
1180 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1181 /* NOTE: this is subtle as a TB may span two physical pages */
1182 if (n == 0) {
1183 /* NOTE: tb_end may be after the end of the page, but
1184 it is not a problem */
1185 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1186 tb_end = tb_start + tb->size;
1187 if (tb_end > TARGET_PAGE_SIZE) {
1188 tb_end = TARGET_PAGE_SIZE;
e505a063 1189 }
5b6dd868
BS
1190 } else {
1191 tb_start = 0;
1192 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1193 }
510a647f 1194 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
5b6dd868
BS
1195 tb = tb->page_next[n];
1196 }
1197}
6fad459c 1198#endif
5b6dd868 1199
e90d96b1
SF
1200/* add the tb in the target page and protect it if necessary
1201 *
1202 * Called with mmap_lock held for user-mode emulation.
1203 */
1204static inline void tb_alloc_page(TranslationBlock *tb,
1205 unsigned int n, tb_page_addr_t page_addr)
1206{
1207 PageDesc *p;
1208#ifndef CONFIG_USER_ONLY
1209 bool page_already_protected;
1210#endif
1211
e505a063
AB
1212 assert_memory_lock();
1213
e90d96b1
SF
1214 tb->page_addr[n] = page_addr;
1215 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1216 tb->page_next[n] = p->first_tb;
1217#ifndef CONFIG_USER_ONLY
1218 page_already_protected = p->first_tb != NULL;
1219#endif
1220 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1221 invalidate_page_bitmap(p);
1222
1223#if defined(CONFIG_USER_ONLY)
1224 if (p->flags & PAGE_WRITE) {
1225 target_ulong addr;
1226 PageDesc *p2;
1227 int prot;
1228
1229 /* force the host page as non writable (writes will have a
1230 page fault + mprotect overhead) */
1231 page_addr &= qemu_host_page_mask;
1232 prot = 0;
1233 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1234 addr += TARGET_PAGE_SIZE) {
1235
1236 p2 = page_find(addr >> TARGET_PAGE_BITS);
1237 if (!p2) {
1238 continue;
1239 }
1240 prot |= p2->flags;
1241 p2->flags &= ~PAGE_WRITE;
1242 }
1243 mprotect(g2h(page_addr), qemu_host_page_size,
1244 (prot & PAGE_BITS) & ~PAGE_WRITE);
dae9e03a
EC
1245 if (DEBUG_TB_INVALIDATE_GATE) {
1246 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1247 }
e90d96b1
SF
1248 }
1249#else
1250 /* if some code is already present, then the pages are already
1251 protected. So we handle the case where only the first TB is
1252 allocated in a physical page */
1253 if (!page_already_protected) {
1254 tlb_protect_code(page_addr);
1255 }
1256#endif
1257}
1258
1259/* add a new TB and link it to the physical page tables. phys_page2 is
1260 * (-1) to indicate that only one page contains the TB.
1261 *
1262 * Called with mmap_lock held for user-mode emulation.
1263 */
1264static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1265 tb_page_addr_t phys_page2)
1266{
42bd3228 1267 uint32_t h;
e90d96b1 1268
e505a063
AB
1269 assert_memory_lock();
1270
e90d96b1
SF
1271 /* add in the page list */
1272 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1273 if (phys_page2 != -1) {
1274 tb_alloc_page(tb, 1, phys_page2);
1275 } else {
1276 tb->page_addr[1] = -1;
1277 }
1278
2e1ae44a 1279 /* add in the hash table */
4e2ca83e
EC
1280 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1281 tb->trace_vcpu_dstate);
44ded3d0 1282 qht_insert(&tb_ctx.htable, tb, h);
2e1ae44a 1283
6eb062ab
EC
1284#ifdef CONFIG_USER_ONLY
1285 if (DEBUG_TB_CHECK_GATE) {
1286 tb_page_check();
1287 }
e90d96b1
SF
1288#endif
1289}
1290
75692087 1291/* Called with mmap_lock held for user mode emulation. */
648f034c 1292TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868 1293 target_ulong pc, target_ulong cs_base,
89fee74a 1294 uint32_t flags, int cflags)
5b6dd868 1295{
648f034c 1296 CPUArchState *env = cpu->env_ptr;
5b6dd868 1297 TranslationBlock *tb;
5b6dd868
BS
1298 tb_page_addr_t phys_pc, phys_page2;
1299 target_ulong virt_page2;
fec88f64 1300 tcg_insn_unit *gen_code_buf;
fca8a500 1301 int gen_code_size, search_size;
fec88f64
RH
1302#ifdef CONFIG_PROFILER
1303 int64_t ti;
1304#endif
e505a063 1305 assert_memory_lock();
5b6dd868
BS
1306
1307 phys_pc = get_page_addr_code(env, pc);
b125f9dc 1308
5b6dd868 1309 tb = tb_alloc(pc);
b125f9dc
RH
1310 if (unlikely(!tb)) {
1311 buffer_overflow:
5b6dd868 1312 /* flush must be done */
bbd77c18 1313 tb_flush(cpu);
3359baad 1314 mmap_unlock();
8499c8fc
PD
1315 /* Make the execution loop process the flush as soon as possible. */
1316 cpu->exception_index = EXCP_INTERRUPT;
3359baad 1317 cpu_loop_exit(cpu);
5b6dd868 1318 }
fec88f64 1319
b1311c4a 1320 gen_code_buf = tcg_ctx->code_gen_ptr;
e7e168f4 1321 tb->tc.ptr = gen_code_buf;
2b48e10f 1322 tb->pc = pc;
5b6dd868
BS
1323 tb->cs_base = cs_base;
1324 tb->flags = flags;
1325 tb->cflags = cflags;
61a67f71 1326 tb->trace_vcpu_dstate = *cpu->trace_dstate;
b1311c4a 1327 tcg_ctx->tb_cflags = cflags;
fec88f64
RH
1328
1329#ifdef CONFIG_PROFILER
b1311c4a 1330 tcg_ctx->tb_count1++; /* includes aborted translations because of
fec88f64
RH
1331 exceptions */
1332 ti = profile_getclock();
1333#endif
1334
b1311c4a 1335 tcg_func_start(tcg_ctx);
fec88f64 1336
b1311c4a 1337 tcg_ctx->cpu = ENV_GET_CPU(env);
9c489ea6 1338 gen_intermediate_code(cpu, tb);
b1311c4a 1339 tcg_ctx->cpu = NULL;
fec88f64 1340
e7e168f4 1341 trace_translate_block(tb, tb->pc, tb->tc.ptr);
fec88f64
RH
1342
1343 /* generate machine code */
f309101c
SF
1344 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1345 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
b1311c4a 1346 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
a8583393 1347 if (TCG_TARGET_HAS_direct_jump) {
b1311c4a
EC
1348 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1349 tcg_ctx->tb_jmp_target_addr = NULL;
a8583393 1350 } else {
b1311c4a
EC
1351 tcg_ctx->tb_jmp_insn_offset = NULL;
1352 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
a8583393 1353 }
fec88f64
RH
1354
1355#ifdef CONFIG_PROFILER
b1311c4a
EC
1356 tcg_ctx->tb_count++;
1357 tcg_ctx->interm_time += profile_getclock() - ti;
0aecede6 1358 ti = profile_getclock();
fec88f64
RH
1359#endif
1360
b125f9dc
RH
1361 /* ??? Overflow could be handled better here. In particular, we
1362 don't need to re-do gen_intermediate_code, nor should we re-do
1363 the tcg optimization currently hidden inside tcg_gen_code. All
1364 that should be required is to flush the TBs, allocate a new TB,
1365 re-initialize it per above, and re-do the actual code generation. */
b1311c4a 1366 gen_code_size = tcg_gen_code(tcg_ctx, tb);
b125f9dc
RH
1367 if (unlikely(gen_code_size < 0)) {
1368 goto buffer_overflow;
1369 }
fca8a500 1370 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
b125f9dc
RH
1371 if (unlikely(search_size < 0)) {
1372 goto buffer_overflow;
1373 }
2ac01d6d 1374 tb->tc.size = gen_code_size;
fec88f64
RH
1375
1376#ifdef CONFIG_PROFILER
b1311c4a
EC
1377 tcg_ctx->code_time += profile_getclock() - ti;
1378 tcg_ctx->code_in_len += tb->size;
1379 tcg_ctx->code_out_len += gen_code_size;
1380 tcg_ctx->search_out_len += search_size;
fec88f64
RH
1381#endif
1382
1383#ifdef DEBUG_DISAS
d977e1c2
AB
1384 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1385 qemu_log_in_addr_range(tb->pc)) {
1ee73216 1386 qemu_log_lock();
fec88f64 1387 qemu_log("OUT: [size=%d]\n", gen_code_size);
b1311c4a
EC
1388 if (tcg_ctx->data_gen_ptr) {
1389 size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
57a26946
RH
1390 size_t data_size = gen_code_size - code_size;
1391 size_t i;
1392
e7e168f4 1393 log_disas(tb->tc.ptr, code_size);
57a26946
RH
1394
1395 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1396 if (sizeof(tcg_target_ulong) == 8) {
1397 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
b1311c4a
EC
1398 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1399 *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
57a26946
RH
1400 } else {
1401 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
b1311c4a
EC
1402 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1403 *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
57a26946
RH
1404 }
1405 }
1406 } else {
e7e168f4 1407 log_disas(tb->tc.ptr, gen_code_size);
57a26946 1408 }
fec88f64
RH
1409 qemu_log("\n");
1410 qemu_log_flush();
1ee73216 1411 qemu_log_unlock();
fec88f64
RH
1412 }
1413#endif
1414
b1311c4a 1415 tcg_ctx->code_gen_ptr = (void *)
fca8a500
RH
1416 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1417 CODE_GEN_ALIGN);
5b6dd868 1418
901bc3de
SF
1419 /* init jump list */
1420 assert(((uintptr_t)tb & 3) == 0);
1421 tb->jmp_list_first = (uintptr_t)tb | 2;
1422 tb->jmp_list_next[0] = (uintptr_t)NULL;
1423 tb->jmp_list_next[1] = (uintptr_t)NULL;
1424
1425 /* init original jump addresses wich has been set during tcg_gen_code() */
1426 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1427 tb_reset_jump(tb, 0);
1428 }
1429 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1430 tb_reset_jump(tb, 1);
1431 }
1432
5b6dd868
BS
1433 /* check next page if needed */
1434 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1435 phys_page2 = -1;
1436 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1437 phys_page2 = get_page_addr_code(env, virt_page2);
1438 }
901bc3de
SF
1439 /* As long as consistency of the TB stuff is provided by tb_lock in user
1440 * mode and is implicit in single-threaded softmmu emulation, no explicit
1441 * memory barrier is required before tb_link_page() makes the TB visible
1442 * through the physical hash table and physical page list.
1443 */
5b6dd868 1444 tb_link_page(tb, phys_pc, phys_page2);
44ded3d0 1445 g_tree_insert(tb_ctx.tb_tree, &tb->tc, tb);
5b6dd868
BS
1446 return tb;
1447}
1448
1449/*
1450 * Invalidate all TBs which intersect with the target physical address range
1451 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1452 * 'is_cpu_write_access' should be true if called from a real cpu write
1453 * access: the virtual CPU will exit the current TB if code is modified inside
1454 * this TB.
75692087 1455 *
ba051fb5
AB
1456 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1457 * Called with tb_lock held for system-mode emulation
5b6dd868 1458 */
ba051fb5 1459static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
5b6dd868
BS
1460{
1461 while (start < end) {
35865339 1462 tb_invalidate_phys_page_range(start, end, 0);
5b6dd868
BS
1463 start &= TARGET_PAGE_MASK;
1464 start += TARGET_PAGE_SIZE;
1465 }
1466}
1467
ba051fb5
AB
1468#ifdef CONFIG_SOFTMMU
1469void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1470{
6ac3d7e8 1471 assert_tb_locked();
ba051fb5
AB
1472 tb_invalidate_phys_range_1(start, end);
1473}
1474#else
1475void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1476{
1477 assert_memory_lock();
1478 tb_lock();
1479 tb_invalidate_phys_range_1(start, end);
1480 tb_unlock();
1481}
1482#endif
5b6dd868
BS
1483/*
1484 * Invalidate all TBs which intersect with the target physical address range
1485 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1486 * 'is_cpu_write_access' should be true if called from a real cpu write
1487 * access: the virtual CPU will exit the current TB if code is modified inside
1488 * this TB.
75692087 1489 *
ba051fb5
AB
1490 * Called with tb_lock/mmap_lock held for user-mode emulation
1491 * Called with tb_lock held for system-mode emulation
5b6dd868
BS
1492 */
1493void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1494 int is_cpu_write_access)
1495{
3213525f 1496 TranslationBlock *tb, *tb_next;
5b6dd868
BS
1497 tb_page_addr_t tb_start, tb_end;
1498 PageDesc *p;
1499 int n;
1500#ifdef TARGET_HAS_PRECISE_SMC
9b990ee5
RH
1501 CPUState *cpu = current_cpu;
1502 CPUArchState *env = NULL;
5b6dd868
BS
1503 int current_tb_not_found = is_cpu_write_access;
1504 TranslationBlock *current_tb = NULL;
1505 int current_tb_modified = 0;
1506 target_ulong current_pc = 0;
1507 target_ulong current_cs_base = 0;
89fee74a 1508 uint32_t current_flags = 0;
5b6dd868
BS
1509#endif /* TARGET_HAS_PRECISE_SMC */
1510
e505a063 1511 assert_memory_lock();
6ac3d7e8 1512 assert_tb_locked();
e505a063 1513
5b6dd868
BS
1514 p = page_find(start >> TARGET_PAGE_BITS);
1515 if (!p) {
1516 return;
1517 }
baea4fae 1518#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1519 if (cpu != NULL) {
1520 env = cpu->env_ptr;
d77953b9 1521 }
4917cf44 1522#endif
5b6dd868
BS
1523
1524 /* we remove all the TBs in the range [start, end[ */
1525 /* XXX: see if in some cases it could be faster to invalidate all
1526 the code */
1527 tb = p->first_tb;
1528 while (tb != NULL) {
1529 n = (uintptr_t)tb & 3;
1530 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1531 tb_next = tb->page_next[n];
1532 /* NOTE: this is subtle as a TB may span two physical pages */
1533 if (n == 0) {
1534 /* NOTE: tb_end may be after the end of the page, but
1535 it is not a problem */
1536 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1537 tb_end = tb_start + tb->size;
1538 } else {
1539 tb_start = tb->page_addr[1];
1540 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1541 }
1542 if (!(tb_end <= start || tb_start >= end)) {
1543#ifdef TARGET_HAS_PRECISE_SMC
1544 if (current_tb_not_found) {
1545 current_tb_not_found = 0;
1546 current_tb = NULL;
93afeade 1547 if (cpu->mem_io_pc) {
5b6dd868 1548 /* now we have a real cpu fault */
93afeade 1549 current_tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868
BS
1550 }
1551 }
1552 if (current_tb == tb &&
1553 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1554 /* If we are modifying the current TB, we must stop
1555 its execution. We could be more precise by checking
1556 that the modification is after the current PC, but it
1557 would require a specialized function to partially
1558 restore the CPU state */
1559
1560 current_tb_modified = 1;
74f10515 1561 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
5b6dd868
BS
1562 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1563 &current_flags);
1564 }
1565#endif /* TARGET_HAS_PRECISE_SMC */
5b6dd868 1566 tb_phys_invalidate(tb, -1);
5b6dd868
BS
1567 }
1568 tb = tb_next;
1569 }
1570#if !defined(CONFIG_USER_ONLY)
1571 /* if no code remaining, no need to continue to use slow writes */
1572 if (!p->first_tb) {
1573 invalidate_page_bitmap(p);
fc377bcf 1574 tlb_unprotect_code(start);
5b6dd868
BS
1575 }
1576#endif
1577#ifdef TARGET_HAS_PRECISE_SMC
1578 if (current_tb_modified) {
9b990ee5
RH
1579 /* Force execution of one insn next time. */
1580 cpu->cflags_next_tb = 1 | curr_cflags();
6886b980 1581 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1582 }
1583#endif
1584}
1585
6fad459c 1586#ifdef CONFIG_SOFTMMU
ba051fb5
AB
1587/* len must be <= 8 and start must be a multiple of len.
1588 * Called via softmmu_template.h when code areas are written to with
8d04fb55 1589 * iothread mutex not held.
ba051fb5 1590 */
5b6dd868
BS
1591void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1592{
1593 PageDesc *p;
5b6dd868
BS
1594
1595#if 0
1596 if (1) {
1597 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1598 cpu_single_env->mem_io_vaddr, len,
1599 cpu_single_env->eip,
1600 cpu_single_env->eip +
1601 (intptr_t)cpu_single_env->segs[R_CS].base);
1602 }
1603#endif
ba051fb5
AB
1604 assert_memory_lock();
1605
5b6dd868
BS
1606 p = page_find(start >> TARGET_PAGE_BITS);
1607 if (!p) {
1608 return;
1609 }
fc377bcf
PB
1610 if (!p->code_bitmap &&
1611 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
7d7500d9
PB
1612 /* build code bitmap. FIXME: writes should be protected by
1613 * tb_lock, reads by tb_lock or RCU.
1614 */
fc377bcf
PB
1615 build_page_bitmap(p);
1616 }
5b6dd868 1617 if (p->code_bitmap) {
510a647f
EC
1618 unsigned int nr;
1619 unsigned long b;
1620
1621 nr = start & ~TARGET_PAGE_MASK;
1622 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
5b6dd868
BS
1623 if (b & ((1 << len) - 1)) {
1624 goto do_invalidate;
1625 }
1626 } else {
1627 do_invalidate:
1628 tb_invalidate_phys_page_range(start, start + len, 1);
1629 }
1630}
6fad459c 1631#else
75809229
PM
1632/* Called with mmap_lock held. If pc is not 0 then it indicates the
1633 * host PC of the faulting store instruction that caused this invalidate.
1634 * Returns true if the caller needs to abort execution of the current
1635 * TB (because it was modified by this store and the guest CPU has
1636 * precise-SMC semantics).
1637 */
1638static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
5b6dd868
BS
1639{
1640 TranslationBlock *tb;
1641 PageDesc *p;
1642 int n;
1643#ifdef TARGET_HAS_PRECISE_SMC
1644 TranslationBlock *current_tb = NULL;
4917cf44
AF
1645 CPUState *cpu = current_cpu;
1646 CPUArchState *env = NULL;
5b6dd868
BS
1647 int current_tb_modified = 0;
1648 target_ulong current_pc = 0;
1649 target_ulong current_cs_base = 0;
89fee74a 1650 uint32_t current_flags = 0;
5b6dd868
BS
1651#endif
1652
ba051fb5
AB
1653 assert_memory_lock();
1654
5b6dd868
BS
1655 addr &= TARGET_PAGE_MASK;
1656 p = page_find(addr >> TARGET_PAGE_BITS);
1657 if (!p) {
75809229 1658 return false;
5b6dd868 1659 }
a5e99826
FK
1660
1661 tb_lock();
5b6dd868
BS
1662 tb = p->first_tb;
1663#ifdef TARGET_HAS_PRECISE_SMC
1664 if (tb && pc != 0) {
1665 current_tb = tb_find_pc(pc);
1666 }
4917cf44
AF
1667 if (cpu != NULL) {
1668 env = cpu->env_ptr;
d77953b9 1669 }
5b6dd868
BS
1670#endif
1671 while (tb != NULL) {
1672 n = (uintptr_t)tb & 3;
1673 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1674#ifdef TARGET_HAS_PRECISE_SMC
1675 if (current_tb == tb &&
1676 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1677 /* If we are modifying the current TB, we must stop
1678 its execution. We could be more precise by checking
1679 that the modification is after the current PC, but it
1680 would require a specialized function to partially
1681 restore the CPU state */
1682
1683 current_tb_modified = 1;
74f10515 1684 cpu_restore_state_from_tb(cpu, current_tb, pc);
5b6dd868
BS
1685 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1686 &current_flags);
1687 }
1688#endif /* TARGET_HAS_PRECISE_SMC */
1689 tb_phys_invalidate(tb, addr);
1690 tb = tb->page_next[n];
1691 }
1692 p->first_tb = NULL;
1693#ifdef TARGET_HAS_PRECISE_SMC
1694 if (current_tb_modified) {
9b990ee5
RH
1695 /* Force execution of one insn next time. */
1696 cpu->cflags_next_tb = 1 | curr_cflags();
a5e99826
FK
1697 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1698 * back into the cpu_exec loop. */
75809229 1699 return true;
5b6dd868
BS
1700 }
1701#endif
a5e99826
FK
1702 tb_unlock();
1703
75809229 1704 return false;
5b6dd868
BS
1705}
1706#endif
1707
2ac01d6d
EC
1708/*
1709 * Find the TB 'tb' such that
1710 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
1711 * Return NULL if not found.
1712 */
a8a826a3 1713static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868 1714{
2ac01d6d 1715 struct tb_tc s = { .ptr = (void *)tc_ptr };
5b6dd868 1716
44ded3d0 1717 return g_tree_lookup(tb_ctx.tb_tree, &s);
5b6dd868
BS
1718}
1719
ec53b45b 1720#if !defined(CONFIG_USER_ONLY)
29d8ec7b 1721void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
5b6dd868
BS
1722{
1723 ram_addr_t ram_addr;
5c8a00ce 1724 MemoryRegion *mr;
149f54b5 1725 hwaddr l = 1;
5b6dd868 1726
41063e1e 1727 rcu_read_lock();
29d8ec7b 1728 mr = address_space_translate(as, addr, &addr, &l, false);
5c8a00ce
PB
1729 if (!(memory_region_is_ram(mr)
1730 || memory_region_is_romd(mr))) {
41063e1e 1731 rcu_read_unlock();
5b6dd868
BS
1732 return;
1733 }
e4e69794 1734 ram_addr = memory_region_get_ram_addr(mr) + addr;
ba051fb5 1735 tb_lock();
5b6dd868 1736 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
ba051fb5 1737 tb_unlock();
41063e1e 1738 rcu_read_unlock();
5b6dd868 1739}
ec53b45b 1740#endif /* !defined(CONFIG_USER_ONLY) */
5b6dd868 1741
7d7500d9 1742/* Called with tb_lock held. */
239c51a5 1743void tb_check_watchpoint(CPUState *cpu)
5b6dd868
BS
1744{
1745 TranslationBlock *tb;
1746
93afeade 1747 tb = tb_find_pc(cpu->mem_io_pc);
8d302e76
AJ
1748 if (tb) {
1749 /* We can use retranslation to find the PC. */
1750 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1751 tb_phys_invalidate(tb, -1);
1752 } else {
1753 /* The exception probably happened in a helper. The CPU state should
1754 have been saved before calling it. Fetch the PC from there. */
1755 CPUArchState *env = cpu->env_ptr;
1756 target_ulong pc, cs_base;
1757 tb_page_addr_t addr;
89fee74a 1758 uint32_t flags;
8d302e76
AJ
1759
1760 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1761 addr = get_page_addr_code(env, pc);
1762 tb_invalidate_phys_range(addr, addr + 1);
5b6dd868 1763 }
5b6dd868
BS
1764}
1765
1766#ifndef CONFIG_USER_ONLY
5b6dd868 1767/* in deterministic execution mode, instructions doing device I/Os
8d04fb55
JK
1768 * must be at the end of the TB.
1769 *
1770 * Called by softmmu_template.h, with iothread mutex not held.
1771 */
90b40a69 1772void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 1773{
a47dddd7 1774#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 1775 CPUArchState *env = cpu->env_ptr;
a47dddd7 1776#endif
5b6dd868 1777 TranslationBlock *tb;
9b990ee5 1778 uint32_t n;
5b6dd868 1779
a5e99826 1780 tb_lock();
5b6dd868
BS
1781 tb = tb_find_pc(retaddr);
1782 if (!tb) {
a47dddd7 1783 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
1784 (void *)retaddr);
1785 }
28ecfd7a 1786 n = cpu->icount_decr.u16.low + tb->icount;
74f10515 1787 cpu_restore_state_from_tb(cpu, tb, retaddr);
5b6dd868
BS
1788 /* Calculate how many instructions had been executed before the fault
1789 occurred. */
28ecfd7a 1790 n = n - cpu->icount_decr.u16.low;
5b6dd868
BS
1791 /* Generate a new TB ending on the I/O insn. */
1792 n++;
1793 /* On MIPS and SH, delay slot instructions can only be restarted if
1794 they were already the first instruction in the TB. If this is not
1795 the first instruction in a TB then re-execute the preceding
1796 branch. */
1797#if defined(TARGET_MIPS)
1798 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
c3577479 1799 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
28ecfd7a 1800 cpu->icount_decr.u16.low++;
5b6dd868
BS
1801 env->hflags &= ~MIPS_HFLAG_BMASK;
1802 }
1803#elif defined(TARGET_SH4)
1804 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1805 && n > 1) {
1806 env->pc -= 2;
28ecfd7a 1807 cpu->icount_decr.u16.low++;
5b6dd868
BS
1808 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1809 }
1810#endif
1811 /* This should never happen. */
1812 if (n > CF_COUNT_MASK) {
a47dddd7 1813 cpu_abort(cpu, "TB too big during recompile");
5b6dd868
BS
1814 }
1815
9b990ee5
RH
1816 /* Adjust the execution state of the next TB. */
1817 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
1818
02d57ea1
SF
1819 if (tb->cflags & CF_NOCACHE) {
1820 if (tb->orig_tb) {
1821 /* Invalidate original TB if this TB was generated in
1822 * cpu_exec_nocache() */
1823 tb_phys_invalidate(tb->orig_tb, -1);
1824 }
be1e0117 1825 tb_remove(tb);
02d57ea1 1826 }
a5e99826 1827
5b6dd868 1828 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
a5e99826
FK
1829 * the first in the TB) then we end up generating a whole new TB and
1830 * repeating the fault, which is horribly inefficient.
1831 * Better would be to execute just this insn uncached, or generate a
1832 * second new TB.
1833 *
1834 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1835 * tb_lock gets reset.
1836 */
6886b980 1837 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1838}
1839
f3ced3c5 1840static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
5b6dd868 1841{
f3ced3c5 1842 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
5b6dd868 1843
f3ced3c5
EC
1844 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1845 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1846 }
1847}
1848
1849void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1850{
5b6dd868
BS
1851 /* Discard jump cache entries for any tb which might potentially
1852 overlap the flushed page. */
f3ced3c5
EC
1853 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1854 tb_jmp_cache_clear_page(cpu, addr);
5b6dd868
BS
1855}
1856
7266ae91
EC
1857static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1858 struct qht_stats hst)
1859{
1860 uint32_t hgram_opts;
1861 size_t hgram_bins;
1862 char *hgram;
1863
1864 if (!hst.head_buckets) {
1865 return;
1866 }
1867 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1868 hst.used_head_buckets, hst.head_buckets,
1869 (double)hst.used_head_buckets / hst.head_buckets * 100);
1870
1871 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1872 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1873 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1874 hgram_opts |= QDIST_PR_NODECIMAL;
1875 }
1876 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1877 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1878 qdist_avg(&hst.occupancy) * 100, hgram);
1879 g_free(hgram);
1880
1881 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1882 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1883 if (hgram_bins > 10) {
1884 hgram_bins = 10;
1885 } else {
1886 hgram_bins = 0;
1887 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1888 }
1889 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1890 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1891 qdist_avg(&hst.chain), hgram);
1892 g_free(hgram);
1893}
1894
2ac01d6d 1895struct tb_tree_stats {
f19c6cc6 1896 size_t host_size;
2ac01d6d
EC
1897 size_t target_size;
1898 size_t max_target_size;
1899 size_t direct_jmp_count;
1900 size_t direct_jmp2_count;
1901 size_t cross_page;
1902};
1903
1904static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
1905{
1906 const TranslationBlock *tb = value;
1907 struct tb_tree_stats *tst = data;
1908
f19c6cc6 1909 tst->host_size += tb->tc.size;
2ac01d6d
EC
1910 tst->target_size += tb->size;
1911 if (tb->size > tst->max_target_size) {
1912 tst->max_target_size = tb->size;
1913 }
1914 if (tb->page_addr[1] != -1) {
1915 tst->cross_page++;
1916 }
1917 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1918 tst->direct_jmp_count++;
1919 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1920 tst->direct_jmp2_count++;
1921 }
1922 }
1923 return false;
1924}
1925
5b6dd868
BS
1926void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1927{
2ac01d6d 1928 struct tb_tree_stats tst = {};
329844d4 1929 struct qht_stats hst;
2ac01d6d 1930 size_t nb_tbs;
5b6dd868 1931
a5e99826
FK
1932 tb_lock();
1933
44ded3d0
EC
1934 nb_tbs = g_tree_nnodes(tb_ctx.tb_tree);
1935 g_tree_foreach(tb_ctx.tb_tree, tb_tree_stats_iter, &tst);
5b6dd868
BS
1936 /* XXX: avoid using doubles ? */
1937 cpu_fprintf(f, "Translation buffer state:\n");
f19c6cc6
EC
1938 /*
1939 * Report total code size including the padding and TB structs;
1940 * otherwise users might think "-tb-size" is not honoured.
1941 * For avg host size we use the precise numbers from tb_tree_stats though.
1942 */
5b6dd868 1943 cpu_fprintf(f, "gen code size %td/%zd\n",
b1311c4a
EC
1944 tcg_ctx->code_gen_ptr - tcg_ctx->code_gen_buffer,
1945 tcg_ctx->code_gen_highwater - tcg_ctx->code_gen_buffer);
2ac01d6d
EC
1946 cpu_fprintf(f, "TB count %zu\n", nb_tbs);
1947 cpu_fprintf(f, "TB avg target size %zu max=%zu bytes\n",
1948 nb_tbs ? tst.target_size / nb_tbs : 0,
1949 tst.max_target_size);
f19c6cc6
EC
1950 cpu_fprintf(f, "TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
1951 nb_tbs ? tst.host_size / nb_tbs : 0,
1952 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2ac01d6d
EC
1953 cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
1954 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
1955 cpu_fprintf(f, "direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
1956 tst.direct_jmp_count,
1957 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
1958 tst.direct_jmp2_count,
1959 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
329844d4 1960
44ded3d0 1961 qht_statistics_init(&tb_ctx.htable, &hst);
7266ae91 1962 print_qht_statistics(f, cpu_fprintf, hst);
329844d4
EC
1963 qht_statistics_destroy(&hst);
1964
5b6dd868 1965 cpu_fprintf(f, "\nStatistics:\n");
3359baad 1966 cpu_fprintf(f, "TB flush count %u\n",
44ded3d0
EC
1967 atomic_read(&tb_ctx.tb_flush_count));
1968 cpu_fprintf(f, "TB invalidate count %d\n", tb_ctx.tb_phys_invalidate_count);
83974cf4 1969 cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
5b6dd868 1970 tcg_dump_info(f, cpu_fprintf);
a5e99826
FK
1971
1972 tb_unlock();
5b6dd868
BS
1973}
1974
246ae24d
MF
1975void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1976{
1977 tcg_dump_op_count(f, cpu_fprintf);
1978}
1979
5b6dd868
BS
1980#else /* CONFIG_USER_ONLY */
1981
c3affe56 1982void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1983{
8d04fb55 1984 g_assert(qemu_mutex_iothread_locked());
259186a7 1985 cpu->interrupt_request |= mask;
1aab16c2 1986 cpu->icount_decr.u16.high = -1;
5b6dd868
BS
1987}
1988
1989/*
1990 * Walks guest process memory "regions" one by one
1991 * and calls callback function 'fn' for each region.
1992 */
1993struct walk_memory_regions_data {
1994 walk_memory_regions_fn fn;
1995 void *priv;
1a1c4db9 1996 target_ulong start;
5b6dd868
BS
1997 int prot;
1998};
1999
2000static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1a1c4db9 2001 target_ulong end, int new_prot)
5b6dd868 2002{
1a1c4db9 2003 if (data->start != -1u) {
5b6dd868
BS
2004 int rc = data->fn(data->priv, data->start, end, data->prot);
2005 if (rc != 0) {
2006 return rc;
2007 }
2008 }
2009
1a1c4db9 2010 data->start = (new_prot ? end : -1u);
5b6dd868
BS
2011 data->prot = new_prot;
2012
2013 return 0;
2014}
2015
2016static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1a1c4db9 2017 target_ulong base, int level, void **lp)
5b6dd868 2018{
1a1c4db9 2019 target_ulong pa;
5b6dd868
BS
2020 int i, rc;
2021
2022 if (*lp == NULL) {
2023 return walk_memory_regions_end(data, base, 0);
2024 }
2025
2026 if (level == 0) {
2027 PageDesc *pd = *lp;
2028
03f49957 2029 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
2030 int prot = pd[i].flags;
2031
2032 pa = base | (i << TARGET_PAGE_BITS);
2033 if (prot != data->prot) {
2034 rc = walk_memory_regions_end(data, pa, prot);
2035 if (rc != 0) {
2036 return rc;
2037 }
2038 }
2039 }
2040 } else {
2041 void **pp = *lp;
2042
03f49957 2043 for (i = 0; i < V_L2_SIZE; ++i) {
1a1c4db9 2044 pa = base | ((target_ulong)i <<
03f49957 2045 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
2046 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2047 if (rc != 0) {
2048 return rc;
2049 }
2050 }
2051 }
2052
2053 return 0;
2054}
2055
2056int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2057{
2058 struct walk_memory_regions_data data;
66ec9f49 2059 uintptr_t i, l1_sz = v_l1_size;
5b6dd868
BS
2060
2061 data.fn = fn;
2062 data.priv = priv;
1a1c4db9 2063 data.start = -1u;
5b6dd868
BS
2064 data.prot = 0;
2065
66ec9f49
VK
2066 for (i = 0; i < l1_sz; i++) {
2067 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2068 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
5b6dd868
BS
2069 if (rc != 0) {
2070 return rc;
2071 }
2072 }
2073
2074 return walk_memory_regions_end(&data, 0, 0);
2075}
2076
1a1c4db9
MI
2077static int dump_region(void *priv, target_ulong start,
2078 target_ulong end, unsigned long prot)
5b6dd868
BS
2079{
2080 FILE *f = (FILE *)priv;
2081
1a1c4db9
MI
2082 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2083 " "TARGET_FMT_lx" %c%c%c\n",
5b6dd868
BS
2084 start, end, end - start,
2085 ((prot & PAGE_READ) ? 'r' : '-'),
2086 ((prot & PAGE_WRITE) ? 'w' : '-'),
2087 ((prot & PAGE_EXEC) ? 'x' : '-'));
2088
2089 return 0;
2090}
2091
2092/* dump memory mappings */
2093void page_dump(FILE *f)
2094{
1a1c4db9 2095 const int length = sizeof(target_ulong) * 2;
227b8175
SW
2096 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2097 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
2098 walk_memory_regions(f, dump_region);
2099}
2100
2101int page_get_flags(target_ulong address)
2102{
2103 PageDesc *p;
2104
2105 p = page_find(address >> TARGET_PAGE_BITS);
2106 if (!p) {
2107 return 0;
2108 }
2109 return p->flags;
2110}
2111
2112/* Modify the flags of a page and invalidate the code if necessary.
2113 The flag PAGE_WRITE_ORG is positioned automatically depending
2114 on PAGE_WRITE. The mmap_lock should already be held. */
2115void page_set_flags(target_ulong start, target_ulong end, int flags)
2116{
2117 target_ulong addr, len;
2118
2119 /* This function should never be called with addresses outside the
2120 guest address space. If this assert fires, it probably indicates
2121 a missing call to h2g_valid. */
2122#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
de258eb0 2123 assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
2124#endif
2125 assert(start < end);
e505a063 2126 assert_memory_lock();
5b6dd868
BS
2127
2128 start = start & TARGET_PAGE_MASK;
2129 end = TARGET_PAGE_ALIGN(end);
2130
2131 if (flags & PAGE_WRITE) {
2132 flags |= PAGE_WRITE_ORG;
2133 }
2134
2135 for (addr = start, len = end - start;
2136 len != 0;
2137 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2138 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2139
2140 /* If the write protection bit is set, then we invalidate
2141 the code inside. */
2142 if (!(p->flags & PAGE_WRITE) &&
2143 (flags & PAGE_WRITE) &&
2144 p->first_tb) {
75809229 2145 tb_invalidate_phys_page(addr, 0);
5b6dd868
BS
2146 }
2147 p->flags = flags;
2148 }
2149}
2150
2151int page_check_range(target_ulong start, target_ulong len, int flags)
2152{
2153 PageDesc *p;
2154 target_ulong end;
2155 target_ulong addr;
2156
2157 /* This function should never be called with addresses outside the
2158 guest address space. If this assert fires, it probably indicates
2159 a missing call to h2g_valid. */
2160#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 2161 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
2162#endif
2163
2164 if (len == 0) {
2165 return 0;
2166 }
2167 if (start + len - 1 < start) {
2168 /* We've wrapped around. */
2169 return -1;
2170 }
2171
2172 /* must do before we loose bits in the next step */
2173 end = TARGET_PAGE_ALIGN(start + len);
2174 start = start & TARGET_PAGE_MASK;
2175
2176 for (addr = start, len = end - start;
2177 len != 0;
2178 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2179 p = page_find(addr >> TARGET_PAGE_BITS);
2180 if (!p) {
2181 return -1;
2182 }
2183 if (!(p->flags & PAGE_VALID)) {
2184 return -1;
2185 }
2186
2187 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2188 return -1;
2189 }
2190 if (flags & PAGE_WRITE) {
2191 if (!(p->flags & PAGE_WRITE_ORG)) {
2192 return -1;
2193 }
2194 /* unprotect the page if it was put read-only because it
2195 contains translated code */
2196 if (!(p->flags & PAGE_WRITE)) {
f213e72f 2197 if (!page_unprotect(addr, 0)) {
5b6dd868
BS
2198 return -1;
2199 }
2200 }
5b6dd868
BS
2201 }
2202 }
2203 return 0;
2204}
2205
2206/* called from signal handler: invalidate the code and unprotect the
f213e72f
PM
2207 * page. Return 0 if the fault was not handled, 1 if it was handled,
2208 * and 2 if it was handled but the caller must cause the TB to be
2209 * immediately exited. (We can only return 2 if the 'pc' argument is
2210 * non-zero.)
2211 */
2212int page_unprotect(target_ulong address, uintptr_t pc)
5b6dd868
BS
2213{
2214 unsigned int prot;
7399a337 2215 bool current_tb_invalidated;
5b6dd868
BS
2216 PageDesc *p;
2217 target_ulong host_start, host_end, addr;
2218
2219 /* Technically this isn't safe inside a signal handler. However we
2220 know this only ever happens in a synchronous SEGV handler, so in
2221 practice it seems to be ok. */
2222 mmap_lock();
2223
2224 p = page_find(address >> TARGET_PAGE_BITS);
2225 if (!p) {
2226 mmap_unlock();
2227 return 0;
2228 }
2229
2230 /* if the page was really writable, then we change its
2231 protection back to writable */
2232 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2233 host_start = address & qemu_host_page_mask;
2234 host_end = host_start + qemu_host_page_size;
2235
2236 prot = 0;
7399a337 2237 current_tb_invalidated = false;
5b6dd868
BS
2238 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2239 p = page_find(addr >> TARGET_PAGE_BITS);
2240 p->flags |= PAGE_WRITE;
2241 prot |= p->flags;
2242
2243 /* and since the content will be modified, we must invalidate
2244 the corresponding translated code. */
7399a337 2245 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
6eb062ab
EC
2246#ifdef CONFIG_USER_ONLY
2247 if (DEBUG_TB_CHECK_GATE) {
2248 tb_invalidate_check(addr);
2249 }
5b6dd868
BS
2250#endif
2251 }
2252 mprotect((void *)g2h(host_start), qemu_host_page_size,
2253 prot & PAGE_BITS);
2254
2255 mmap_unlock();
7399a337
SS
2256 /* If current TB was invalidated return to main loop */
2257 return current_tb_invalidated ? 2 : 1;
5b6dd868
BS
2258 }
2259 mmap_unlock();
2260 return 0;
2261}
2262#endif /* CONFIG_USER_ONLY */
2cd53943
TH
2263
2264/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2265void tcg_flush_softmmu_tlb(CPUState *cs)
2266{
2267#ifdef CONFIG_SOFTMMU
2268 tlb_flush(cs);
2269#endif
2270}