]> git.proxmox.com Git - mirror_qemu.git/blame - translate-all.c
linux-user: Clean up do_syscall() Coding Style for TARGET_NR_exit
[mirror_qemu.git] / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/types.h>
23#include <sys/mman.h>
24#endif
d19893da
FB
25#include <stdarg.h>
26#include <stdlib.h>
27#include <stdio.h>
28#include <string.h>
29#include <inttypes.h>
30
31#include "config.h"
2054396a 32
5b6dd868 33#include "qemu-common.h"
af5ad107 34#define NO_CPU_IO_DEFS
d3eead2e 35#include "cpu.h"
76cad711 36#include "disas/disas.h"
57fec1fe 37#include "tcg.h"
5b6dd868
BS
38#if defined(CONFIG_USER_ONLY)
39#include "qemu.h"
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
0bc3cd62
PB
55#else
56#include "exec/address-spaces.h"
5b6dd868
BS
57#endif
58
022c62cb 59#include "exec/cputlb.h"
5b6dd868 60#include "translate-all.h"
0aa09897 61#include "qemu/timer.h"
5b6dd868
BS
62
63//#define DEBUG_TB_INVALIDATE
64//#define DEBUG_FLUSH
65/* make various TB consistency checks */
66//#define DEBUG_TB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
5b6dd868
BS
75typedef struct PageDesc {
76 /* list of TBs intersecting this ram page */
77 TranslationBlock *first_tb;
78 /* in order to optimize self modifying code, we count the number
79 of lookups we do to a given page to use a bitmap */
80 unsigned int code_write_count;
81 uint8_t *code_bitmap;
82#if defined(CONFIG_USER_ONLY)
83 unsigned long flags;
84#endif
85} PageDesc;
86
87/* In system mode we want L1_MAP to be based on ram offsets,
88 while in user mode we want it to be based on virtual addresses. */
89#if !defined(CONFIG_USER_ONLY)
90#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
92#else
93# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
94#endif
95#else
96# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
97#endif
98
99/* The bits remaining after N lower levels of page tables. */
100#define V_L1_BITS_REM \
101 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
102
103#if V_L1_BITS_REM < 4
104#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
105#else
106#define V_L1_BITS V_L1_BITS_REM
107#endif
108
109#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
110
111#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
112
113uintptr_t qemu_real_host_page_size;
114uintptr_t qemu_host_page_size;
115uintptr_t qemu_host_page_mask;
116
117/* This is a multi-level map on the virtual address space.
118 The bottom level has pointers to PageDesc. */
119static void *l1_map[V_L1_SIZE];
120
57fec1fe
FB
121/* code generation context */
122TCGContext tcg_ctx;
d19893da 123
5b6dd868
BS
124static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
125 tb_page_addr_t phys_page2);
a8a826a3 126static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 127
57fec1fe
FB
128void cpu_gen_init(void)
129{
130 tcg_context_init(&tcg_ctx);
57fec1fe
FB
131}
132
d19893da 133/* return non zero if the very first instruction is invalid so that
5fafdf24 134 the virtual CPU can trigger an exception.
d19893da
FB
135
136 '*gen_code_size_ptr' contains the size of the generated code (host
137 code).
138*/
9349b4f9 139int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
d19893da 140{
57fec1fe 141 TCGContext *s = &tcg_ctx;
d19893da
FB
142 uint8_t *gen_code_buf;
143 int gen_code_size;
57fec1fe
FB
144#ifdef CONFIG_PROFILER
145 int64_t ti;
146#endif
147
148#ifdef CONFIG_PROFILER
b67d9a52
FB
149 s->tb_count1++; /* includes aborted translations because of
150 exceptions */
57fec1fe
FB
151 ti = profile_getclock();
152#endif
153 tcg_func_start(s);
d19893da 154
2cfc5f17
TS
155 gen_intermediate_code(env, tb);
156
ec6338ba 157 /* generate machine code */
57fec1fe 158 gen_code_buf = tb->tc_ptr;
ec6338ba
FB
159 tb->tb_next_offset[0] = 0xffff;
160 tb->tb_next_offset[1] = 0xffff;
57fec1fe 161 s->tb_next_offset = tb->tb_next_offset;
4cbb86e1 162#ifdef USE_DIRECT_JUMP
57fec1fe
FB
163 s->tb_jmp_offset = tb->tb_jmp_offset;
164 s->tb_next = NULL;
d19893da 165#else
57fec1fe
FB
166 s->tb_jmp_offset = NULL;
167 s->tb_next = tb->tb_next;
d19893da 168#endif
57fec1fe
FB
169
170#ifdef CONFIG_PROFILER
b67d9a52
FB
171 s->tb_count++;
172 s->interm_time += profile_getclock() - ti;
173 s->code_time -= profile_getclock();
57fec1fe 174#endif
54604f74 175 gen_code_size = tcg_gen_code(s, gen_code_buf);
d19893da 176 *gen_code_size_ptr = gen_code_size;
57fec1fe 177#ifdef CONFIG_PROFILER
b67d9a52
FB
178 s->code_time += profile_getclock();
179 s->code_in_len += tb->size;
180 s->code_out_len += gen_code_size;
57fec1fe
FB
181#endif
182
d19893da 183#ifdef DEBUG_DISAS
8fec2b8c 184 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
93fcfe39
AL
185 qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
186 log_disas(tb->tc_ptr, *gen_code_size_ptr);
187 qemu_log("\n");
31b1a7b4 188 qemu_log_flush();
d19893da
FB
189 }
190#endif
191 return 0;
192}
193
5fafdf24 194/* The cpu state corresponding to 'searched_pc' is restored.
d19893da 195 */
a8a826a3
BS
196static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
197 uintptr_t searched_pc)
d19893da 198{
57fec1fe
FB
199 TCGContext *s = &tcg_ctx;
200 int j;
6375e09e 201 uintptr_t tc_ptr;
57fec1fe
FB
202#ifdef CONFIG_PROFILER
203 int64_t ti;
204#endif
205
206#ifdef CONFIG_PROFILER
207 ti = profile_getclock();
208#endif
209 tcg_func_start(s);
d19893da 210
2cfc5f17 211 gen_intermediate_code_pc(env, tb);
3b46e624 212
2e70f6ef
PB
213 if (use_icount) {
214 /* Reset the cycle counter to the start of the block. */
215 env->icount_decr.u16.low += tb->icount;
216 /* Clear the IO flag. */
217 env->can_do_io = 0;
218 }
219
d19893da 220 /* find opc index corresponding to search_pc */
6375e09e 221 tc_ptr = (uintptr_t)tb->tc_ptr;
d19893da
FB
222 if (searched_pc < tc_ptr)
223 return -1;
57fec1fe
FB
224
225 s->tb_next_offset = tb->tb_next_offset;
226#ifdef USE_DIRECT_JUMP
227 s->tb_jmp_offset = tb->tb_jmp_offset;
228 s->tb_next = NULL;
229#else
230 s->tb_jmp_offset = NULL;
231 s->tb_next = tb->tb_next;
232#endif
54604f74 233 j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
57fec1fe
FB
234 if (j < 0)
235 return -1;
d19893da 236 /* now find start of instruction before */
ab1103de 237 while (s->gen_opc_instr_start[j] == 0) {
d19893da 238 j--;
ab1103de 239 }
c9c99c22 240 env->icount_decr.u16.low -= s->gen_opc_icount[j];
3b46e624 241
e87b7cb0 242 restore_state_to_opc(env, tb, j);
57fec1fe
FB
243
244#ifdef CONFIG_PROFILER
b67d9a52
FB
245 s->restore_time += profile_getclock() - ti;
246 s->restore_count++;
57fec1fe 247#endif
d19893da
FB
248 return 0;
249}
5b6dd868 250
a8a826a3
BS
251bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
252{
253 TranslationBlock *tb;
254
255 tb = tb_find_pc(retaddr);
256 if (tb) {
257 cpu_restore_state_from_tb(tb, env, retaddr);
258 return true;
259 }
260 return false;
261}
262
5b6dd868
BS
263#ifdef _WIN32
264static inline void map_exec(void *addr, long size)
265{
266 DWORD old_protect;
267 VirtualProtect(addr, size,
268 PAGE_EXECUTE_READWRITE, &old_protect);
269}
270#else
271static inline void map_exec(void *addr, long size)
272{
273 unsigned long start, end, page_size;
274
275 page_size = getpagesize();
276 start = (unsigned long)addr;
277 start &= ~(page_size - 1);
278
279 end = (unsigned long)addr + size;
280 end += page_size - 1;
281 end &= ~(page_size - 1);
282
283 mprotect((void *)start, end - start,
284 PROT_READ | PROT_WRITE | PROT_EXEC);
285}
286#endif
287
288static void page_init(void)
289{
290 /* NOTE: we can always suppose that qemu_host_page_size >=
291 TARGET_PAGE_SIZE */
292#ifdef _WIN32
293 {
294 SYSTEM_INFO system_info;
295
296 GetSystemInfo(&system_info);
297 qemu_real_host_page_size = system_info.dwPageSize;
298 }
299#else
300 qemu_real_host_page_size = getpagesize();
301#endif
302 if (qemu_host_page_size == 0) {
303 qemu_host_page_size = qemu_real_host_page_size;
304 }
305 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
306 qemu_host_page_size = TARGET_PAGE_SIZE;
307 }
308 qemu_host_page_mask = ~(qemu_host_page_size - 1);
309
310#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
311 {
312#ifdef HAVE_KINFO_GETVMMAP
313 struct kinfo_vmentry *freep;
314 int i, cnt;
315
316 freep = kinfo_getvmmap(getpid(), &cnt);
317 if (freep) {
318 mmap_lock();
319 for (i = 0; i < cnt; i++) {
320 unsigned long startaddr, endaddr;
321
322 startaddr = freep[i].kve_start;
323 endaddr = freep[i].kve_end;
324 if (h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
330 } else {
331#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
332 endaddr = ~0ul;
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334#endif
335 }
336 }
337 }
338 free(freep);
339 mmap_unlock();
340 }
341#else
342 FILE *f;
343
344 last_brk = (unsigned long)sbrk(0);
345
346 f = fopen("/compat/linux/proc/self/maps", "r");
347 if (f) {
348 mmap_lock();
349
350 do {
351 unsigned long startaddr, endaddr;
352 int n;
353
354 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
355
356 if (n == 2 && h2g_valid(startaddr)) {
357 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
358
359 if (h2g_valid(endaddr)) {
360 endaddr = h2g(endaddr);
361 } else {
362 endaddr = ~0ul;
363 }
364 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
365 }
366 } while (!feof(f));
367
368 fclose(f);
369 mmap_unlock();
370 }
371#endif
372 }
373#endif
374}
375
376static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
377{
378 PageDesc *pd;
379 void **lp;
380 int i;
381
382#if defined(CONFIG_USER_ONLY)
383 /* We can't use g_malloc because it may recurse into a locked mutex. */
384# define ALLOC(P, SIZE) \
385 do { \
386 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
387 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
388 } while (0)
389#else
390# define ALLOC(P, SIZE) \
391 do { P = g_malloc0(SIZE); } while (0)
392#endif
393
394 /* Level 1. Always allocated. */
395 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
396
397 /* Level 2..N-1. */
398 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
399 void **p = *lp;
400
401 if (p == NULL) {
402 if (!alloc) {
403 return NULL;
404 }
405 ALLOC(p, sizeof(void *) * L2_SIZE);
406 *lp = p;
407 }
408
409 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
410 }
411
412 pd = *lp;
413 if (pd == NULL) {
414 if (!alloc) {
415 return NULL;
416 }
417 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
418 *lp = pd;
419 }
420
421#undef ALLOC
422
423 return pd + (index & (L2_SIZE - 1));
424}
425
426static inline PageDesc *page_find(tb_page_addr_t index)
427{
428 return page_find_alloc(index, 0);
429}
430
431#if !defined(CONFIG_USER_ONLY)
432#define mmap_lock() do { } while (0)
433#define mmap_unlock() do { } while (0)
434#endif
435
436#if defined(CONFIG_USER_ONLY)
437/* Currently it is not recommended to allocate big chunks of data in
438 user mode. It will change when a dedicated libc will be used. */
439/* ??? 64-bit hosts ought to have no problem mmaping data outside the
440 region in which the guest needs to run. Revisit this. */
441#define USE_STATIC_CODE_GEN_BUFFER
442#endif
443
444/* ??? Should configure for this, not list operating systems here. */
445#if (defined(__linux__) \
446 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
447 || defined(__DragonFly__) || defined(__OpenBSD__) \
448 || defined(__NetBSD__))
449# define USE_MMAP
450#endif
451
452/* Minimum size of the code gen buffer. This number is randomly chosen,
453 but not so small that we can't have a fair number of TB's live. */
454#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
455
456/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
457 indicated, this is constrained by the range of direct branches on the
458 host cpu, as used by the TCG implementation of goto_tb. */
459#if defined(__x86_64__)
460# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
461#elif defined(__sparc__)
462# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
4a136e0a
CF
463#elif defined(__aarch64__)
464# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
465#elif defined(__arm__)
466# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
467#elif defined(__s390x__)
468 /* We have a +- 4GB range on the branches; leave some slop. */
469# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
470#else
471# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
472#endif
473
474#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
475
476#define DEFAULT_CODE_GEN_BUFFER_SIZE \
477 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
478 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
479
480static inline size_t size_code_gen_buffer(size_t tb_size)
481{
482 /* Size the buffer. */
483 if (tb_size == 0) {
484#ifdef USE_STATIC_CODE_GEN_BUFFER
485 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
486#else
487 /* ??? Needs adjustments. */
488 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
489 static buffer, we could size this on RESERVED_VA, on the text
490 segment size of the executable, or continue to use the default. */
491 tb_size = (unsigned long)(ram_size / 4);
492#endif
493 }
494 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
495 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
496 }
497 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
498 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
499 }
0b0d3320 500 tcg_ctx.code_gen_buffer_size = tb_size;
5b6dd868
BS
501 return tb_size;
502}
503
504#ifdef USE_STATIC_CODE_GEN_BUFFER
505static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
506 __attribute__((aligned(CODE_GEN_ALIGN)));
507
508static inline void *alloc_code_gen_buffer(void)
509{
0b0d3320 510 map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
511 return static_code_gen_buffer;
512}
513#elif defined(USE_MMAP)
514static inline void *alloc_code_gen_buffer(void)
515{
516 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
517 uintptr_t start = 0;
518 void *buf;
519
520 /* Constrain the position of the buffer based on the host cpu.
521 Note that these addresses are chosen in concert with the
522 addresses assigned in the relevant linker script file. */
523# if defined(__PIE__) || defined(__PIC__)
524 /* Don't bother setting a preferred location if we're building
525 a position-independent executable. We're more likely to get
526 an address near the main executable if we let the kernel
527 choose the address. */
528# elif defined(__x86_64__) && defined(MAP_32BIT)
529 /* Force the memory down into low memory with the executable.
530 Leave the choice of exact location with the kernel. */
531 flags |= MAP_32BIT;
532 /* Cannot expect to map more than 800MB in low memory. */
0b0d3320
EV
533 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
534 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
5b6dd868
BS
535 }
536# elif defined(__sparc__)
537 start = 0x40000000ul;
538# elif defined(__s390x__)
539 start = 0x90000000ul;
540# endif
541
0b0d3320 542 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
5b6dd868
BS
543 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
544 return buf == MAP_FAILED ? NULL : buf;
545}
546#else
547static inline void *alloc_code_gen_buffer(void)
548{
0b0d3320 549 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
550
551 if (buf) {
0b0d3320 552 map_exec(buf, tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
553 }
554 return buf;
555}
556#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
557
558static inline void code_gen_alloc(size_t tb_size)
559{
0b0d3320
EV
560 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
561 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
562 if (tcg_ctx.code_gen_buffer == NULL) {
5b6dd868
BS
563 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
564 exit(1);
565 }
566
0b0d3320
EV
567 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
568 QEMU_MADV_HUGEPAGE);
5b6dd868
BS
569
570 /* Steal room for the prologue at the end of the buffer. This ensures
571 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
572 from TB's to the prologue are going to be in range. It also means
573 that we don't need to mark (additional) portions of the data segment
574 as executable. */
0b0d3320
EV
575 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
576 tcg_ctx.code_gen_buffer_size - 1024;
577 tcg_ctx.code_gen_buffer_size -= 1024;
5b6dd868 578
0b0d3320 579 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
5b6dd868 580 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
0b0d3320
EV
581 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
582 CODE_GEN_AVG_BLOCK_SIZE;
5e5f07e0
EV
583 tcg_ctx.tb_ctx.tbs =
584 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
5b6dd868
BS
585}
586
587/* Must be called before using the QEMU cpus. 'tb_size' is the size
588 (in bytes) allocated to the translation buffer. Zero means default
589 size. */
590void tcg_exec_init(unsigned long tb_size)
591{
592 cpu_gen_init();
593 code_gen_alloc(tb_size);
0b0d3320
EV
594 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
595 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
596 page_init();
597#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
598 /* There's no guest base to take into account, so go ahead and
599 initialize the prologue now. */
600 tcg_prologue_init(&tcg_ctx);
601#endif
602}
603
604bool tcg_enabled(void)
605{
0b0d3320 606 return tcg_ctx.code_gen_buffer != NULL;
5b6dd868
BS
607}
608
609/* Allocate a new translation block. Flush the translation buffer if
610 too many translation blocks or too much generated code. */
611static TranslationBlock *tb_alloc(target_ulong pc)
612{
613 TranslationBlock *tb;
614
5e5f07e0 615 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
0b0d3320
EV
616 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
617 tcg_ctx.code_gen_buffer_max_size) {
5b6dd868
BS
618 return NULL;
619 }
5e5f07e0 620 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
5b6dd868
BS
621 tb->pc = pc;
622 tb->cflags = 0;
623 return tb;
624}
625
626void tb_free(TranslationBlock *tb)
627{
628 /* In practice this is mostly used for single use temporary TB
629 Ignore the hard cases and just back up if this TB happens to
630 be the last one generated. */
5e5f07e0
EV
631 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
632 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
0b0d3320 633 tcg_ctx.code_gen_ptr = tb->tc_ptr;
5e5f07e0 634 tcg_ctx.tb_ctx.nb_tbs--;
5b6dd868
BS
635 }
636}
637
638static inline void invalidate_page_bitmap(PageDesc *p)
639{
640 if (p->code_bitmap) {
641 g_free(p->code_bitmap);
642 p->code_bitmap = NULL;
643 }
644 p->code_write_count = 0;
645}
646
647/* Set to NULL all the 'first_tb' fields in all PageDescs. */
648static void page_flush_tb_1(int level, void **lp)
649{
650 int i;
651
652 if (*lp == NULL) {
653 return;
654 }
655 if (level == 0) {
656 PageDesc *pd = *lp;
657
658 for (i = 0; i < L2_SIZE; ++i) {
659 pd[i].first_tb = NULL;
660 invalidate_page_bitmap(pd + i);
661 }
662 } else {
663 void **pp = *lp;
664
665 for (i = 0; i < L2_SIZE; ++i) {
666 page_flush_tb_1(level - 1, pp + i);
667 }
668 }
669}
670
671static void page_flush_tb(void)
672{
673 int i;
674
675 for (i = 0; i < V_L1_SIZE; i++) {
676 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
677 }
678}
679
680/* flush all the translation blocks */
681/* XXX: tb_flush is currently not thread safe */
682void tb_flush(CPUArchState *env1)
683{
684 CPUArchState *env;
685
686#if defined(DEBUG_FLUSH)
687 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
0b0d3320 688 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
5e5f07e0 689 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
0b0d3320 690 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
5e5f07e0 691 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 692#endif
0b0d3320
EV
693 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
694 > tcg_ctx.code_gen_buffer_size) {
5b6dd868
BS
695 cpu_abort(env1, "Internal error: code buffer overflow\n");
696 }
5e5f07e0 697 tcg_ctx.tb_ctx.nb_tbs = 0;
5b6dd868
BS
698
699 for (env = first_cpu; env != NULL; env = env->next_cpu) {
700 memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
701 }
702
5e5f07e0
EV
703 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
704 CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
5b6dd868
BS
705 page_flush_tb();
706
0b0d3320 707 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
5b6dd868
BS
708 /* XXX: flush processor icache at this point if cache flush is
709 expensive */
5e5f07e0 710 tcg_ctx.tb_ctx.tb_flush_count++;
5b6dd868
BS
711}
712
713#ifdef DEBUG_TB_CHECK
714
715static void tb_invalidate_check(target_ulong address)
716{
717 TranslationBlock *tb;
718 int i;
719
720 address &= TARGET_PAGE_MASK;
721 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0 722 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
5b6dd868
BS
723 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
724 address >= tb->pc + tb->size)) {
725 printf("ERROR invalidate: address=" TARGET_FMT_lx
726 " PC=%08lx size=%04x\n",
727 address, (long)tb->pc, tb->size);
728 }
729 }
730 }
731}
732
733/* verify that all the pages have correct rights for code */
734static void tb_page_check(void)
735{
736 TranslationBlock *tb;
737 int i, flags1, flags2;
738
739 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0
EV
740 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
741 tb = tb->phys_hash_next) {
5b6dd868
BS
742 flags1 = page_get_flags(tb->pc);
743 flags2 = page_get_flags(tb->pc + tb->size - 1);
744 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
745 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
746 (long)tb->pc, tb->size, flags1, flags2);
747 }
748 }
749 }
750}
751
752#endif
753
0c884d16 754static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
5b6dd868
BS
755{
756 TranslationBlock *tb1;
757
758 for (;;) {
759 tb1 = *ptb;
760 if (tb1 == tb) {
0c884d16 761 *ptb = tb1->phys_hash_next;
5b6dd868
BS
762 break;
763 }
0c884d16 764 ptb = &tb1->phys_hash_next;
5b6dd868
BS
765 }
766}
767
768static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
769{
770 TranslationBlock *tb1;
771 unsigned int n1;
772
773 for (;;) {
774 tb1 = *ptb;
775 n1 = (uintptr_t)tb1 & 3;
776 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
777 if (tb1 == tb) {
778 *ptb = tb1->page_next[n1];
779 break;
780 }
781 ptb = &tb1->page_next[n1];
782 }
783}
784
785static inline void tb_jmp_remove(TranslationBlock *tb, int n)
786{
787 TranslationBlock *tb1, **ptb;
788 unsigned int n1;
789
790 ptb = &tb->jmp_next[n];
791 tb1 = *ptb;
792 if (tb1) {
793 /* find tb(n) in circular list */
794 for (;;) {
795 tb1 = *ptb;
796 n1 = (uintptr_t)tb1 & 3;
797 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
798 if (n1 == n && tb1 == tb) {
799 break;
800 }
801 if (n1 == 2) {
802 ptb = &tb1->jmp_first;
803 } else {
804 ptb = &tb1->jmp_next[n1];
805 }
806 }
807 /* now we can suppress tb(n) from the list */
808 *ptb = tb->jmp_next[n];
809
810 tb->jmp_next[n] = NULL;
811 }
812}
813
814/* reset the jump entry 'n' of a TB so that it is not chained to
815 another TB */
816static inline void tb_reset_jump(TranslationBlock *tb, int n)
817{
818 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
819}
820
0c884d16 821/* invalidate one TB */
5b6dd868
BS
822void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
823{
824 CPUArchState *env;
825 PageDesc *p;
826 unsigned int h, n1;
827 tb_page_addr_t phys_pc;
828 TranslationBlock *tb1, *tb2;
829
830 /* remove the TB from the hash list */
831 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
832 h = tb_phys_hash_func(phys_pc);
5e5f07e0 833 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
5b6dd868
BS
834
835 /* remove the TB from the page list */
836 if (tb->page_addr[0] != page_addr) {
837 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
838 tb_page_remove(&p->first_tb, tb);
839 invalidate_page_bitmap(p);
840 }
841 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
842 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
843 tb_page_remove(&p->first_tb, tb);
844 invalidate_page_bitmap(p);
845 }
846
5e5f07e0 847 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868
BS
848
849 /* remove the TB from the hash list */
850 h = tb_jmp_cache_hash_func(tb->pc);
851 for (env = first_cpu; env != NULL; env = env->next_cpu) {
852 if (env->tb_jmp_cache[h] == tb) {
853 env->tb_jmp_cache[h] = NULL;
854 }
855 }
856
857 /* suppress this TB from the two jump lists */
858 tb_jmp_remove(tb, 0);
859 tb_jmp_remove(tb, 1);
860
861 /* suppress any remaining jumps to this TB */
862 tb1 = tb->jmp_first;
863 for (;;) {
864 n1 = (uintptr_t)tb1 & 3;
865 if (n1 == 2) {
866 break;
867 }
868 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
869 tb2 = tb1->jmp_next[n1];
870 tb_reset_jump(tb1, n1);
871 tb1->jmp_next[n1] = NULL;
872 tb1 = tb2;
873 }
874 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
875
5e5f07e0 876 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
877}
878
879static inline void set_bits(uint8_t *tab, int start, int len)
880{
881 int end, mask, end1;
882
883 end = start + len;
884 tab += start >> 3;
885 mask = 0xff << (start & 7);
886 if ((start & ~7) == (end & ~7)) {
887 if (start < end) {
888 mask &= ~(0xff << (end & 7));
889 *tab |= mask;
890 }
891 } else {
892 *tab++ |= mask;
893 start = (start + 8) & ~7;
894 end1 = end & ~7;
895 while (start < end1) {
896 *tab++ = 0xff;
897 start += 8;
898 }
899 if (start < end) {
900 mask = ~(0xff << (end & 7));
901 *tab |= mask;
902 }
903 }
904}
905
906static void build_page_bitmap(PageDesc *p)
907{
908 int n, tb_start, tb_end;
909 TranslationBlock *tb;
910
911 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
912
913 tb = p->first_tb;
914 while (tb != NULL) {
915 n = (uintptr_t)tb & 3;
916 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
917 /* NOTE: this is subtle as a TB may span two physical pages */
918 if (n == 0) {
919 /* NOTE: tb_end may be after the end of the page, but
920 it is not a problem */
921 tb_start = tb->pc & ~TARGET_PAGE_MASK;
922 tb_end = tb_start + tb->size;
923 if (tb_end > TARGET_PAGE_SIZE) {
924 tb_end = TARGET_PAGE_SIZE;
925 }
926 } else {
927 tb_start = 0;
928 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
929 }
930 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
931 tb = tb->page_next[n];
932 }
933}
934
935TranslationBlock *tb_gen_code(CPUArchState *env,
936 target_ulong pc, target_ulong cs_base,
937 int flags, int cflags)
938{
939 TranslationBlock *tb;
940 uint8_t *tc_ptr;
941 tb_page_addr_t phys_pc, phys_page2;
942 target_ulong virt_page2;
943 int code_gen_size;
944
945 phys_pc = get_page_addr_code(env, pc);
946 tb = tb_alloc(pc);
947 if (!tb) {
948 /* flush must be done */
949 tb_flush(env);
950 /* cannot fail at this point */
951 tb = tb_alloc(pc);
952 /* Don't forget to invalidate previous TB info. */
5e5f07e0 953 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868 954 }
0b0d3320 955 tc_ptr = tcg_ctx.code_gen_ptr;
5b6dd868
BS
956 tb->tc_ptr = tc_ptr;
957 tb->cs_base = cs_base;
958 tb->flags = flags;
959 tb->cflags = cflags;
960 cpu_gen_code(env, tb, &code_gen_size);
0b0d3320
EV
961 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
962 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
5b6dd868
BS
963
964 /* check next page if needed */
965 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
966 phys_page2 = -1;
967 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
968 phys_page2 = get_page_addr_code(env, virt_page2);
969 }
970 tb_link_page(tb, phys_pc, phys_page2);
971 return tb;
972}
973
974/*
975 * Invalidate all TBs which intersect with the target physical address range
976 * [start;end[. NOTE: start and end may refer to *different* physical pages.
977 * 'is_cpu_write_access' should be true if called from a real cpu write
978 * access: the virtual CPU will exit the current TB if code is modified inside
979 * this TB.
980 */
981void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
982 int is_cpu_write_access)
983{
984 while (start < end) {
985 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
986 start &= TARGET_PAGE_MASK;
987 start += TARGET_PAGE_SIZE;
988 }
989}
990
991/*
992 * Invalidate all TBs which intersect with the target physical address range
993 * [start;end[. NOTE: start and end must refer to the *same* physical page.
994 * 'is_cpu_write_access' should be true if called from a real cpu write
995 * access: the virtual CPU will exit the current TB if code is modified inside
996 * this TB.
997 */
998void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
999 int is_cpu_write_access)
1000{
1001 TranslationBlock *tb, *tb_next, *saved_tb;
4917cf44
AF
1002 CPUState *cpu = current_cpu;
1003#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1004 CPUArchState *env = NULL;
1005#endif
5b6dd868
BS
1006 tb_page_addr_t tb_start, tb_end;
1007 PageDesc *p;
1008 int n;
1009#ifdef TARGET_HAS_PRECISE_SMC
1010 int current_tb_not_found = is_cpu_write_access;
1011 TranslationBlock *current_tb = NULL;
1012 int current_tb_modified = 0;
1013 target_ulong current_pc = 0;
1014 target_ulong current_cs_base = 0;
1015 int current_flags = 0;
1016#endif /* TARGET_HAS_PRECISE_SMC */
1017
1018 p = page_find(start >> TARGET_PAGE_BITS);
1019 if (!p) {
1020 return;
1021 }
1022 if (!p->code_bitmap &&
1023 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1024 is_cpu_write_access) {
1025 /* build code bitmap */
1026 build_page_bitmap(p);
1027 }
4917cf44
AF
1028#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
1029 if (cpu != NULL) {
1030 env = cpu->env_ptr;
d77953b9 1031 }
4917cf44 1032#endif
5b6dd868
BS
1033
1034 /* we remove all the TBs in the range [start, end[ */
1035 /* XXX: see if in some cases it could be faster to invalidate all
1036 the code */
1037 tb = p->first_tb;
1038 while (tb != NULL) {
1039 n = (uintptr_t)tb & 3;
1040 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1041 tb_next = tb->page_next[n];
1042 /* NOTE: this is subtle as a TB may span two physical pages */
1043 if (n == 0) {
1044 /* NOTE: tb_end may be after the end of the page, but
1045 it is not a problem */
1046 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1047 tb_end = tb_start + tb->size;
1048 } else {
1049 tb_start = tb->page_addr[1];
1050 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1051 }
1052 if (!(tb_end <= start || tb_start >= end)) {
1053#ifdef TARGET_HAS_PRECISE_SMC
1054 if (current_tb_not_found) {
1055 current_tb_not_found = 0;
1056 current_tb = NULL;
1057 if (env->mem_io_pc) {
1058 /* now we have a real cpu fault */
1059 current_tb = tb_find_pc(env->mem_io_pc);
1060 }
1061 }
1062 if (current_tb == tb &&
1063 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1064 /* If we are modifying the current TB, we must stop
1065 its execution. We could be more precise by checking
1066 that the modification is after the current PC, but it
1067 would require a specialized function to partially
1068 restore the CPU state */
1069
1070 current_tb_modified = 1;
a8a826a3 1071 cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
5b6dd868
BS
1072 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1073 &current_flags);
1074 }
1075#endif /* TARGET_HAS_PRECISE_SMC */
1076 /* we need to do that to handle the case where a signal
1077 occurs while doing tb_phys_invalidate() */
1078 saved_tb = NULL;
d77953b9
AF
1079 if (cpu != NULL) {
1080 saved_tb = cpu->current_tb;
1081 cpu->current_tb = NULL;
5b6dd868
BS
1082 }
1083 tb_phys_invalidate(tb, -1);
d77953b9
AF
1084 if (cpu != NULL) {
1085 cpu->current_tb = saved_tb;
c3affe56
AF
1086 if (cpu->interrupt_request && cpu->current_tb) {
1087 cpu_interrupt(cpu, cpu->interrupt_request);
5b6dd868
BS
1088 }
1089 }
1090 }
1091 tb = tb_next;
1092 }
1093#if !defined(CONFIG_USER_ONLY)
1094 /* if no code remaining, no need to continue to use slow writes */
1095 if (!p->first_tb) {
1096 invalidate_page_bitmap(p);
1097 if (is_cpu_write_access) {
1098 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1099 }
1100 }
1101#endif
1102#ifdef TARGET_HAS_PRECISE_SMC
1103 if (current_tb_modified) {
1104 /* we generate a block containing just the instruction
1105 modifying the memory. It will ensure that it cannot modify
1106 itself */
d77953b9 1107 cpu->current_tb = NULL;
5b6dd868
BS
1108 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1109 cpu_resume_from_signal(env, NULL);
1110 }
1111#endif
1112}
1113
1114/* len must be <= 8 and start must be a multiple of len */
1115void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1116{
1117 PageDesc *p;
1118 int offset, b;
1119
1120#if 0
1121 if (1) {
1122 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1123 cpu_single_env->mem_io_vaddr, len,
1124 cpu_single_env->eip,
1125 cpu_single_env->eip +
1126 (intptr_t)cpu_single_env->segs[R_CS].base);
1127 }
1128#endif
1129 p = page_find(start >> TARGET_PAGE_BITS);
1130 if (!p) {
1131 return;
1132 }
1133 if (p->code_bitmap) {
1134 offset = start & ~TARGET_PAGE_MASK;
1135 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1136 if (b & ((1 << len) - 1)) {
1137 goto do_invalidate;
1138 }
1139 } else {
1140 do_invalidate:
1141 tb_invalidate_phys_page_range(start, start + len, 1);
1142 }
1143}
1144
1145#if !defined(CONFIG_SOFTMMU)
1146static void tb_invalidate_phys_page(tb_page_addr_t addr,
1147 uintptr_t pc, void *puc)
1148{
1149 TranslationBlock *tb;
1150 PageDesc *p;
1151 int n;
1152#ifdef TARGET_HAS_PRECISE_SMC
1153 TranslationBlock *current_tb = NULL;
4917cf44
AF
1154 CPUState *cpu = current_cpu;
1155 CPUArchState *env = NULL;
5b6dd868
BS
1156 int current_tb_modified = 0;
1157 target_ulong current_pc = 0;
1158 target_ulong current_cs_base = 0;
1159 int current_flags = 0;
1160#endif
1161
1162 addr &= TARGET_PAGE_MASK;
1163 p = page_find(addr >> TARGET_PAGE_BITS);
1164 if (!p) {
1165 return;
1166 }
1167 tb = p->first_tb;
1168#ifdef TARGET_HAS_PRECISE_SMC
1169 if (tb && pc != 0) {
1170 current_tb = tb_find_pc(pc);
1171 }
4917cf44
AF
1172 if (cpu != NULL) {
1173 env = cpu->env_ptr;
d77953b9 1174 }
5b6dd868
BS
1175#endif
1176 while (tb != NULL) {
1177 n = (uintptr_t)tb & 3;
1178 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1179#ifdef TARGET_HAS_PRECISE_SMC
1180 if (current_tb == tb &&
1181 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1182 /* If we are modifying the current TB, we must stop
1183 its execution. We could be more precise by checking
1184 that the modification is after the current PC, but it
1185 would require a specialized function to partially
1186 restore the CPU state */
1187
1188 current_tb_modified = 1;
a8a826a3 1189 cpu_restore_state_from_tb(current_tb, env, pc);
5b6dd868
BS
1190 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1191 &current_flags);
1192 }
1193#endif /* TARGET_HAS_PRECISE_SMC */
1194 tb_phys_invalidate(tb, addr);
1195 tb = tb->page_next[n];
1196 }
1197 p->first_tb = NULL;
1198#ifdef TARGET_HAS_PRECISE_SMC
1199 if (current_tb_modified) {
1200 /* we generate a block containing just the instruction
1201 modifying the memory. It will ensure that it cannot modify
1202 itself */
d77953b9 1203 cpu->current_tb = NULL;
5b6dd868
BS
1204 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1205 cpu_resume_from_signal(env, puc);
1206 }
1207#endif
1208}
1209#endif
1210
1211/* add the tb in the target page and protect it if necessary */
1212static inline void tb_alloc_page(TranslationBlock *tb,
1213 unsigned int n, tb_page_addr_t page_addr)
1214{
1215 PageDesc *p;
1216#ifndef CONFIG_USER_ONLY
1217 bool page_already_protected;
1218#endif
1219
1220 tb->page_addr[n] = page_addr;
1221 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1222 tb->page_next[n] = p->first_tb;
1223#ifndef CONFIG_USER_ONLY
1224 page_already_protected = p->first_tb != NULL;
1225#endif
1226 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1227 invalidate_page_bitmap(p);
1228
1229#if defined(TARGET_HAS_SMC) || 1
1230
1231#if defined(CONFIG_USER_ONLY)
1232 if (p->flags & PAGE_WRITE) {
1233 target_ulong addr;
1234 PageDesc *p2;
1235 int prot;
1236
1237 /* force the host page as non writable (writes will have a
1238 page fault + mprotect overhead) */
1239 page_addr &= qemu_host_page_mask;
1240 prot = 0;
1241 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1242 addr += TARGET_PAGE_SIZE) {
1243
1244 p2 = page_find(addr >> TARGET_PAGE_BITS);
1245 if (!p2) {
1246 continue;
1247 }
1248 prot |= p2->flags;
1249 p2->flags &= ~PAGE_WRITE;
1250 }
1251 mprotect(g2h(page_addr), qemu_host_page_size,
1252 (prot & PAGE_BITS) & ~PAGE_WRITE);
1253#ifdef DEBUG_TB_INVALIDATE
1254 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1255 page_addr);
1256#endif
1257 }
1258#else
1259 /* if some code is already present, then the pages are already
1260 protected. So we handle the case where only the first TB is
1261 allocated in a physical page */
1262 if (!page_already_protected) {
1263 tlb_protect_code(page_addr);
1264 }
1265#endif
1266
1267#endif /* TARGET_HAS_SMC */
1268}
1269
1270/* add a new TB and link it to the physical page tables. phys_page2 is
1271 (-1) to indicate that only one page contains the TB. */
1272static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1273 tb_page_addr_t phys_page2)
1274{
1275 unsigned int h;
1276 TranslationBlock **ptb;
1277
1278 /* Grab the mmap lock to stop another thread invalidating this TB
1279 before we are done. */
1280 mmap_lock();
1281 /* add in the physical hash table */
1282 h = tb_phys_hash_func(phys_pc);
5e5f07e0 1283 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
5b6dd868
BS
1284 tb->phys_hash_next = *ptb;
1285 *ptb = tb;
1286
1287 /* add in the page list */
1288 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1289 if (phys_page2 != -1) {
1290 tb_alloc_page(tb, 1, phys_page2);
1291 } else {
1292 tb->page_addr[1] = -1;
1293 }
1294
1295 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1296 tb->jmp_next[0] = NULL;
1297 tb->jmp_next[1] = NULL;
1298
1299 /* init original jump addresses */
1300 if (tb->tb_next_offset[0] != 0xffff) {
1301 tb_reset_jump(tb, 0);
1302 }
1303 if (tb->tb_next_offset[1] != 0xffff) {
1304 tb_reset_jump(tb, 1);
1305 }
1306
1307#ifdef DEBUG_TB_CHECK
1308 tb_page_check();
1309#endif
1310 mmap_unlock();
1311}
1312
1313#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1314/* check whether the given addr is in TCG generated code buffer or not */
1315bool is_tcg_gen_code(uintptr_t tc_ptr)
1316{
52ae646d 1317 /* This can be called during code generation, code_gen_buffer_size
5b6dd868 1318 is used instead of code_gen_ptr for upper boundary checking */
0b0d3320
EV
1319 return (tc_ptr >= (uintptr_t)tcg_ctx.code_gen_buffer &&
1320 tc_ptr < (uintptr_t)(tcg_ctx.code_gen_buffer +
52ae646d 1321 tcg_ctx.code_gen_buffer_size));
5b6dd868
BS
1322}
1323#endif
1324
1325/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326 tb[1].tc_ptr. Return NULL if not found */
a8a826a3 1327static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868
BS
1328{
1329 int m_min, m_max, m;
1330 uintptr_t v;
1331 TranslationBlock *tb;
1332
5e5f07e0 1333 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
5b6dd868
BS
1334 return NULL;
1335 }
0b0d3320
EV
1336 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1337 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
5b6dd868
BS
1338 return NULL;
1339 }
1340 /* binary search (cf Knuth) */
1341 m_min = 0;
5e5f07e0 1342 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
5b6dd868
BS
1343 while (m_min <= m_max) {
1344 m = (m_min + m_max) >> 1;
5e5f07e0 1345 tb = &tcg_ctx.tb_ctx.tbs[m];
5b6dd868
BS
1346 v = (uintptr_t)tb->tc_ptr;
1347 if (v == tc_ptr) {
1348 return tb;
1349 } else if (tc_ptr < v) {
1350 m_max = m - 1;
1351 } else {
1352 m_min = m + 1;
1353 }
1354 }
5e5f07e0 1355 return &tcg_ctx.tb_ctx.tbs[m_max];
5b6dd868
BS
1356}
1357
5b6dd868
BS
1358#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1359void tb_invalidate_phys_addr(hwaddr addr)
1360{
1361 ram_addr_t ram_addr;
5c8a00ce 1362 MemoryRegion *mr;
149f54b5 1363 hwaddr l = 1;
5b6dd868 1364
5c8a00ce
PB
1365 mr = address_space_translate(&address_space_memory, addr, &addr, &l, false);
1366 if (!(memory_region_is_ram(mr)
1367 || memory_region_is_romd(mr))) {
5b6dd868
BS
1368 return;
1369 }
5c8a00ce 1370 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
149f54b5 1371 + addr;
5b6dd868
BS
1372 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1373}
1374#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1375
5b6dd868
BS
1376void tb_check_watchpoint(CPUArchState *env)
1377{
1378 TranslationBlock *tb;
1379
1380 tb = tb_find_pc(env->mem_io_pc);
1381 if (!tb) {
1382 cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1383 (void *)env->mem_io_pc);
1384 }
a8a826a3 1385 cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
5b6dd868
BS
1386 tb_phys_invalidate(tb, -1);
1387}
1388
1389#ifndef CONFIG_USER_ONLY
1390/* mask must never be zero, except for A20 change call */
c3affe56 1391static void tcg_handle_interrupt(CPUState *cpu, int mask)
5b6dd868 1392{
c3affe56 1393 CPUArchState *env = cpu->env_ptr;
5b6dd868
BS
1394 int old_mask;
1395
259186a7
AF
1396 old_mask = cpu->interrupt_request;
1397 cpu->interrupt_request |= mask;
5b6dd868
BS
1398
1399 /*
1400 * If called from iothread context, wake the target cpu in
1401 * case its halted.
1402 */
1403 if (!qemu_cpu_is_self(cpu)) {
1404 qemu_cpu_kick(cpu);
1405 return;
1406 }
1407
1408 if (use_icount) {
1409 env->icount_decr.u16.high = 0xffff;
1410 if (!can_do_io(env)
1411 && (mask & ~old_mask) != 0) {
1412 cpu_abort(env, "Raised interrupt while not in I/O function");
1413 }
1414 } else {
378df4b2 1415 cpu->tcg_exit_req = 1;
5b6dd868
BS
1416 }
1417}
1418
1419CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1420
1421/* in deterministic execution mode, instructions doing device I/Os
1422 must be at the end of the TB */
1423void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1424{
1425 TranslationBlock *tb;
1426 uint32_t n, cflags;
1427 target_ulong pc, cs_base;
1428 uint64_t flags;
1429
1430 tb = tb_find_pc(retaddr);
1431 if (!tb) {
1432 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
1433 (void *)retaddr);
1434 }
1435 n = env->icount_decr.u16.low + tb->icount;
a8a826a3 1436 cpu_restore_state_from_tb(tb, env, retaddr);
5b6dd868
BS
1437 /* Calculate how many instructions had been executed before the fault
1438 occurred. */
1439 n = n - env->icount_decr.u16.low;
1440 /* Generate a new TB ending on the I/O insn. */
1441 n++;
1442 /* On MIPS and SH, delay slot instructions can only be restarted if
1443 they were already the first instruction in the TB. If this is not
1444 the first instruction in a TB then re-execute the preceding
1445 branch. */
1446#if defined(TARGET_MIPS)
1447 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1448 env->active_tc.PC -= 4;
1449 env->icount_decr.u16.low++;
1450 env->hflags &= ~MIPS_HFLAG_BMASK;
1451 }
1452#elif defined(TARGET_SH4)
1453 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1454 && n > 1) {
1455 env->pc -= 2;
1456 env->icount_decr.u16.low++;
1457 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1458 }
1459#endif
1460 /* This should never happen. */
1461 if (n > CF_COUNT_MASK) {
1462 cpu_abort(env, "TB too big during recompile");
1463 }
1464
1465 cflags = n | CF_LAST_IO;
1466 pc = tb->pc;
1467 cs_base = tb->cs_base;
1468 flags = tb->flags;
1469 tb_phys_invalidate(tb, -1);
1470 /* FIXME: In theory this could raise an exception. In practice
1471 we have already translated the block once so it's probably ok. */
1472 tb_gen_code(env, pc, cs_base, flags, cflags);
1473 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1474 the first in the TB) then we end up generating a whole new TB and
1475 repeating the fault, which is horribly inefficient.
1476 Better would be to execute just this insn uncached, or generate a
1477 second new TB. */
1478 cpu_resume_from_signal(env, NULL);
1479}
1480
1481void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1482{
1483 unsigned int i;
1484
1485 /* Discard jump cache entries for any tb which might potentially
1486 overlap the flushed page. */
1487 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1488 memset(&env->tb_jmp_cache[i], 0,
1489 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1490
1491 i = tb_jmp_cache_hash_page(addr);
1492 memset(&env->tb_jmp_cache[i], 0,
1493 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1494}
1495
1496void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1497{
1498 int i, target_code_size, max_target_code_size;
1499 int direct_jmp_count, direct_jmp2_count, cross_page;
1500 TranslationBlock *tb;
1501
1502 target_code_size = 0;
1503 max_target_code_size = 0;
1504 cross_page = 0;
1505 direct_jmp_count = 0;
1506 direct_jmp2_count = 0;
5e5f07e0
EV
1507 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1508 tb = &tcg_ctx.tb_ctx.tbs[i];
5b6dd868
BS
1509 target_code_size += tb->size;
1510 if (tb->size > max_target_code_size) {
1511 max_target_code_size = tb->size;
1512 }
1513 if (tb->page_addr[1] != -1) {
1514 cross_page++;
1515 }
1516 if (tb->tb_next_offset[0] != 0xffff) {
1517 direct_jmp_count++;
1518 if (tb->tb_next_offset[1] != 0xffff) {
1519 direct_jmp2_count++;
1520 }
1521 }
1522 }
1523 /* XXX: avoid using doubles ? */
1524 cpu_fprintf(f, "Translation buffer state:\n");
1525 cpu_fprintf(f, "gen code size %td/%zd\n",
0b0d3320
EV
1526 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1527 tcg_ctx.code_gen_buffer_max_size);
5b6dd868 1528 cpu_fprintf(f, "TB count %d/%d\n",
5e5f07e0 1529 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
5b6dd868 1530 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
5e5f07e0
EV
1531 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1532 tcg_ctx.tb_ctx.nb_tbs : 0,
1533 max_target_code_size);
5b6dd868 1534 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
5e5f07e0
EV
1535 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1536 tcg_ctx.code_gen_buffer) /
1537 tcg_ctx.tb_ctx.nb_tbs : 0,
1538 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1539 tcg_ctx.code_gen_buffer) /
1540 target_code_size : 0);
1541 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1542 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1543 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868
BS
1544 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1545 direct_jmp_count,
5e5f07e0
EV
1546 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1547 tcg_ctx.tb_ctx.nb_tbs : 0,
5b6dd868 1548 direct_jmp2_count,
5e5f07e0
EV
1549 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1550 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 1551 cpu_fprintf(f, "\nStatistics:\n");
5e5f07e0
EV
1552 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1553 cpu_fprintf(f, "TB invalidate count %d\n",
1554 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
5b6dd868
BS
1555 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1556 tcg_dump_info(f, cpu_fprintf);
1557}
1558
1559#else /* CONFIG_USER_ONLY */
1560
c3affe56 1561void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1562{
259186a7 1563 cpu->interrupt_request |= mask;
378df4b2 1564 cpu->tcg_exit_req = 1;
5b6dd868
BS
1565}
1566
1567/*
1568 * Walks guest process memory "regions" one by one
1569 * and calls callback function 'fn' for each region.
1570 */
1571struct walk_memory_regions_data {
1572 walk_memory_regions_fn fn;
1573 void *priv;
1574 uintptr_t start;
1575 int prot;
1576};
1577
1578static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1579 abi_ulong end, int new_prot)
1580{
1581 if (data->start != -1ul) {
1582 int rc = data->fn(data->priv, data->start, end, data->prot);
1583 if (rc != 0) {
1584 return rc;
1585 }
1586 }
1587
1588 data->start = (new_prot ? end : -1ul);
1589 data->prot = new_prot;
1590
1591 return 0;
1592}
1593
1594static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1595 abi_ulong base, int level, void **lp)
1596{
1597 abi_ulong pa;
1598 int i, rc;
1599
1600 if (*lp == NULL) {
1601 return walk_memory_regions_end(data, base, 0);
1602 }
1603
1604 if (level == 0) {
1605 PageDesc *pd = *lp;
1606
1607 for (i = 0; i < L2_SIZE; ++i) {
1608 int prot = pd[i].flags;
1609
1610 pa = base | (i << TARGET_PAGE_BITS);
1611 if (prot != data->prot) {
1612 rc = walk_memory_regions_end(data, pa, prot);
1613 if (rc != 0) {
1614 return rc;
1615 }
1616 }
1617 }
1618 } else {
1619 void **pp = *lp;
1620
1621 for (i = 0; i < L2_SIZE; ++i) {
1622 pa = base | ((abi_ulong)i <<
1623 (TARGET_PAGE_BITS + L2_BITS * level));
1624 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1625 if (rc != 0) {
1626 return rc;
1627 }
1628 }
1629 }
1630
1631 return 0;
1632}
1633
1634int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1635{
1636 struct walk_memory_regions_data data;
1637 uintptr_t i;
1638
1639 data.fn = fn;
1640 data.priv = priv;
1641 data.start = -1ul;
1642 data.prot = 0;
1643
1644 for (i = 0; i < V_L1_SIZE; i++) {
1645 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1646 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1647
1648 if (rc != 0) {
1649 return rc;
1650 }
1651 }
1652
1653 return walk_memory_regions_end(&data, 0, 0);
1654}
1655
1656static int dump_region(void *priv, abi_ulong start,
1657 abi_ulong end, unsigned long prot)
1658{
1659 FILE *f = (FILE *)priv;
1660
1661 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1662 " "TARGET_ABI_FMT_lx" %c%c%c\n",
1663 start, end, end - start,
1664 ((prot & PAGE_READ) ? 'r' : '-'),
1665 ((prot & PAGE_WRITE) ? 'w' : '-'),
1666 ((prot & PAGE_EXEC) ? 'x' : '-'));
1667
1668 return 0;
1669}
1670
1671/* dump memory mappings */
1672void page_dump(FILE *f)
1673{
1674 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
1675 "start", "end", "size", "prot");
1676 walk_memory_regions(f, dump_region);
1677}
1678
1679int page_get_flags(target_ulong address)
1680{
1681 PageDesc *p;
1682
1683 p = page_find(address >> TARGET_PAGE_BITS);
1684 if (!p) {
1685 return 0;
1686 }
1687 return p->flags;
1688}
1689
1690/* Modify the flags of a page and invalidate the code if necessary.
1691 The flag PAGE_WRITE_ORG is positioned automatically depending
1692 on PAGE_WRITE. The mmap_lock should already be held. */
1693void page_set_flags(target_ulong start, target_ulong end, int flags)
1694{
1695 target_ulong addr, len;
1696
1697 /* This function should never be called with addresses outside the
1698 guest address space. If this assert fires, it probably indicates
1699 a missing call to h2g_valid. */
1700#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1701 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1702#endif
1703 assert(start < end);
1704
1705 start = start & TARGET_PAGE_MASK;
1706 end = TARGET_PAGE_ALIGN(end);
1707
1708 if (flags & PAGE_WRITE) {
1709 flags |= PAGE_WRITE_ORG;
1710 }
1711
1712 for (addr = start, len = end - start;
1713 len != 0;
1714 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1715 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1716
1717 /* If the write protection bit is set, then we invalidate
1718 the code inside. */
1719 if (!(p->flags & PAGE_WRITE) &&
1720 (flags & PAGE_WRITE) &&
1721 p->first_tb) {
1722 tb_invalidate_phys_page(addr, 0, NULL);
1723 }
1724 p->flags = flags;
1725 }
1726}
1727
1728int page_check_range(target_ulong start, target_ulong len, int flags)
1729{
1730 PageDesc *p;
1731 target_ulong end;
1732 target_ulong addr;
1733
1734 /* This function should never be called with addresses outside the
1735 guest address space. If this assert fires, it probably indicates
1736 a missing call to h2g_valid. */
1737#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1738 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1739#endif
1740
1741 if (len == 0) {
1742 return 0;
1743 }
1744 if (start + len - 1 < start) {
1745 /* We've wrapped around. */
1746 return -1;
1747 }
1748
1749 /* must do before we loose bits in the next step */
1750 end = TARGET_PAGE_ALIGN(start + len);
1751 start = start & TARGET_PAGE_MASK;
1752
1753 for (addr = start, len = end - start;
1754 len != 0;
1755 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1756 p = page_find(addr >> TARGET_PAGE_BITS);
1757 if (!p) {
1758 return -1;
1759 }
1760 if (!(p->flags & PAGE_VALID)) {
1761 return -1;
1762 }
1763
1764 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1765 return -1;
1766 }
1767 if (flags & PAGE_WRITE) {
1768 if (!(p->flags & PAGE_WRITE_ORG)) {
1769 return -1;
1770 }
1771 /* unprotect the page if it was put read-only because it
1772 contains translated code */
1773 if (!(p->flags & PAGE_WRITE)) {
1774 if (!page_unprotect(addr, 0, NULL)) {
1775 return -1;
1776 }
1777 }
1778 return 0;
1779 }
1780 }
1781 return 0;
1782}
1783
1784/* called from signal handler: invalidate the code and unprotect the
1785 page. Return TRUE if the fault was successfully handled. */
1786int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1787{
1788 unsigned int prot;
1789 PageDesc *p;
1790 target_ulong host_start, host_end, addr;
1791
1792 /* Technically this isn't safe inside a signal handler. However we
1793 know this only ever happens in a synchronous SEGV handler, so in
1794 practice it seems to be ok. */
1795 mmap_lock();
1796
1797 p = page_find(address >> TARGET_PAGE_BITS);
1798 if (!p) {
1799 mmap_unlock();
1800 return 0;
1801 }
1802
1803 /* if the page was really writable, then we change its
1804 protection back to writable */
1805 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1806 host_start = address & qemu_host_page_mask;
1807 host_end = host_start + qemu_host_page_size;
1808
1809 prot = 0;
1810 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1811 p = page_find(addr >> TARGET_PAGE_BITS);
1812 p->flags |= PAGE_WRITE;
1813 prot |= p->flags;
1814
1815 /* and since the content will be modified, we must invalidate
1816 the corresponding translated code. */
1817 tb_invalidate_phys_page(addr, pc, puc);
1818#ifdef DEBUG_TB_CHECK
1819 tb_invalidate_check(addr);
1820#endif
1821 }
1822 mprotect((void *)g2h(host_start), qemu_host_page_size,
1823 prot & PAGE_BITS);
1824
1825 mmap_unlock();
1826 return 1;
1827 }
1828 mmap_unlock();
1829 return 0;
1830}
1831#endif /* CONFIG_USER_ONLY */