]> git.proxmox.com Git - mirror_qemu.git/blame - translate-all.c
vhost-user-test: use tmpfs by default
[mirror_qemu.git] / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/types.h>
23#include <sys/mman.h>
24#endif
d19893da
FB
25#include <stdarg.h>
26#include <stdlib.h>
27#include <stdio.h>
28#include <string.h>
29#include <inttypes.h>
30
31#include "config.h"
2054396a 32
5b6dd868 33#include "qemu-common.h"
af5ad107 34#define NO_CPU_IO_DEFS
d3eead2e 35#include "cpu.h"
6db8b538 36#include "trace.h"
76cad711 37#include "disas/disas.h"
57fec1fe 38#include "tcg.h"
5b6dd868
BS
39#if defined(CONFIG_USER_ONLY)
40#include "qemu.h"
41#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42#include <sys/param.h>
43#if __FreeBSD_version >= 700104
44#define HAVE_KINFO_GETVMMAP
45#define sigqueue sigqueue_freebsd /* avoid redefinition */
46#include <sys/time.h>
47#include <sys/proc.h>
48#include <machine/profile.h>
49#define _KERNEL
50#include <sys/user.h>
51#undef _KERNEL
52#undef sigqueue
53#include <libutil.h>
54#endif
55#endif
0bc3cd62
PB
56#else
57#include "exec/address-spaces.h"
5b6dd868
BS
58#endif
59
022c62cb 60#include "exec/cputlb.h"
e1b89321 61#include "exec/tb-hash.h"
5b6dd868 62#include "translate-all.h"
510a647f 63#include "qemu/bitmap.h"
0aa09897 64#include "qemu/timer.h"
5b6dd868
BS
65
66//#define DEBUG_TB_INVALIDATE
67//#define DEBUG_FLUSH
68/* make various TB consistency checks */
69//#define DEBUG_TB_CHECK
70
71#if !defined(CONFIG_USER_ONLY)
72/* TB consistency checks only implemented for usermode emulation. */
73#undef DEBUG_TB_CHECK
74#endif
75
76#define SMC_BITMAP_USE_THRESHOLD 10
77
5b6dd868
BS
78typedef struct PageDesc {
79 /* list of TBs intersecting this ram page */
80 TranslationBlock *first_tb;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count;
510a647f 84 unsigned long *code_bitmap;
5b6dd868
BS
85#if defined(CONFIG_USER_ONLY)
86 unsigned long flags;
87#endif
88} PageDesc;
89
90/* In system mode we want L1_MAP to be based on ram offsets,
91 while in user mode we want it to be based on virtual addresses. */
92#if !defined(CONFIG_USER_ONLY)
93#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
94# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
95#else
96# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
97#endif
98#else
99# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
100#endif
101
03f49957
PB
102/* Size of the L2 (and L3, etc) page tables. */
103#define V_L2_BITS 10
104#define V_L2_SIZE (1 << V_L2_BITS)
105
5b6dd868
BS
106/* The bits remaining after N lower levels of page tables. */
107#define V_L1_BITS_REM \
03f49957 108 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
5b6dd868
BS
109
110#if V_L1_BITS_REM < 4
03f49957 111#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
5b6dd868
BS
112#else
113#define V_L1_BITS V_L1_BITS_REM
114#endif
115
116#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
117
118#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
119
5b6dd868
BS
120uintptr_t qemu_host_page_size;
121uintptr_t qemu_host_page_mask;
122
d1142fb8 123/* The bottom level has pointers to PageDesc */
5b6dd868
BS
124static void *l1_map[V_L1_SIZE];
125
57fec1fe
FB
126/* code generation context */
127TCGContext tcg_ctx;
d19893da 128
677ef623
FK
129/* translation block context */
130#ifdef CONFIG_USER_ONLY
131__thread int have_tb_lock;
132#endif
133
134void tb_lock(void)
135{
136#ifdef CONFIG_USER_ONLY
137 assert(!have_tb_lock);
138 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
139 have_tb_lock++;
140#endif
141}
142
143void tb_unlock(void)
144{
145#ifdef CONFIG_USER_ONLY
146 assert(have_tb_lock);
147 have_tb_lock--;
148 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
149#endif
150}
151
152void tb_lock_reset(void)
153{
154#ifdef CONFIG_USER_ONLY
155 if (have_tb_lock) {
156 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
157 have_tb_lock = 0;
158 }
159#endif
160}
161
5b6dd868
BS
162static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
163 tb_page_addr_t phys_page2);
a8a826a3 164static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 165
57fec1fe
FB
166void cpu_gen_init(void)
167{
168 tcg_context_init(&tcg_ctx);
57fec1fe
FB
169}
170
d19893da 171/* return non zero if the very first instruction is invalid so that
75692087
PB
172 * the virtual CPU can trigger an exception.
173 *
174 * '*gen_code_size_ptr' contains the size of the generated code (host
175 * code).
176 *
177 * Called with mmap_lock held for user-mode emulation.
178 */
9349b4f9 179int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
d19893da 180{
57fec1fe 181 TCGContext *s = &tcg_ctx;
1813e175 182 tcg_insn_unit *gen_code_buf;
d19893da 183 int gen_code_size;
57fec1fe
FB
184#ifdef CONFIG_PROFILER
185 int64_t ti;
186#endif
187
188#ifdef CONFIG_PROFILER
b67d9a52
FB
189 s->tb_count1++; /* includes aborted translations because of
190 exceptions */
57fec1fe
FB
191 ti = profile_getclock();
192#endif
193 tcg_func_start(s);
d19893da 194
2cfc5f17
TS
195 gen_intermediate_code(env, tb);
196
6db8b538
AB
197 trace_translate_block(tb, tb->pc, tb->tc_ptr);
198
ec6338ba 199 /* generate machine code */
57fec1fe 200 gen_code_buf = tb->tc_ptr;
ec6338ba
FB
201 tb->tb_next_offset[0] = 0xffff;
202 tb->tb_next_offset[1] = 0xffff;
57fec1fe 203 s->tb_next_offset = tb->tb_next_offset;
4cbb86e1 204#ifdef USE_DIRECT_JUMP
57fec1fe
FB
205 s->tb_jmp_offset = tb->tb_jmp_offset;
206 s->tb_next = NULL;
d19893da 207#else
57fec1fe
FB
208 s->tb_jmp_offset = NULL;
209 s->tb_next = tb->tb_next;
d19893da 210#endif
57fec1fe
FB
211
212#ifdef CONFIG_PROFILER
b67d9a52
FB
213 s->tb_count++;
214 s->interm_time += profile_getclock() - ti;
215 s->code_time -= profile_getclock();
57fec1fe 216#endif
54604f74 217 gen_code_size = tcg_gen_code(s, gen_code_buf);
d19893da 218 *gen_code_size_ptr = gen_code_size;
57fec1fe 219#ifdef CONFIG_PROFILER
b67d9a52
FB
220 s->code_time += profile_getclock();
221 s->code_in_len += tb->size;
222 s->code_out_len += gen_code_size;
57fec1fe
FB
223#endif
224
d19893da 225#ifdef DEBUG_DISAS
8fec2b8c 226 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1813e175
RH
227 qemu_log("OUT: [size=%d]\n", gen_code_size);
228 log_disas(tb->tc_ptr, gen_code_size);
93fcfe39 229 qemu_log("\n");
31b1a7b4 230 qemu_log_flush();
d19893da
FB
231 }
232#endif
233 return 0;
234}
235
5fafdf24 236/* The cpu state corresponding to 'searched_pc' is restored.
d19893da 237 */
74f10515 238static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
a8a826a3 239 uintptr_t searched_pc)
d19893da 240{
74f10515 241 CPUArchState *env = cpu->env_ptr;
57fec1fe
FB
242 TCGContext *s = &tcg_ctx;
243 int j;
6375e09e 244 uintptr_t tc_ptr;
57fec1fe
FB
245#ifdef CONFIG_PROFILER
246 int64_t ti;
247#endif
248
249#ifdef CONFIG_PROFILER
250 ti = profile_getclock();
251#endif
252 tcg_func_start(s);
d19893da 253
2cfc5f17 254 gen_intermediate_code_pc(env, tb);
3b46e624 255
bd79255d 256 if (tb->cflags & CF_USE_ICOUNT) {
414b15c9 257 assert(use_icount);
2e70f6ef 258 /* Reset the cycle counter to the start of the block. */
28ecfd7a 259 cpu->icount_decr.u16.low += tb->icount;
2e70f6ef 260 /* Clear the IO flag. */
99df7dce 261 cpu->can_do_io = 0;
2e70f6ef
PB
262 }
263
d19893da 264 /* find opc index corresponding to search_pc */
6375e09e 265 tc_ptr = (uintptr_t)tb->tc_ptr;
d19893da
FB
266 if (searched_pc < tc_ptr)
267 return -1;
57fec1fe
FB
268
269 s->tb_next_offset = tb->tb_next_offset;
270#ifdef USE_DIRECT_JUMP
271 s->tb_jmp_offset = tb->tb_jmp_offset;
272 s->tb_next = NULL;
273#else
274 s->tb_jmp_offset = NULL;
275 s->tb_next = tb->tb_next;
276#endif
1813e175
RH
277 j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
278 searched_pc - tc_ptr);
57fec1fe
FB
279 if (j < 0)
280 return -1;
d19893da 281 /* now find start of instruction before */
ab1103de 282 while (s->gen_opc_instr_start[j] == 0) {
d19893da 283 j--;
ab1103de 284 }
28ecfd7a 285 cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
3b46e624 286
e87b7cb0 287 restore_state_to_opc(env, tb, j);
57fec1fe
FB
288
289#ifdef CONFIG_PROFILER
b67d9a52
FB
290 s->restore_time += profile_getclock() - ti;
291 s->restore_count++;
57fec1fe 292#endif
d19893da
FB
293 return 0;
294}
5b6dd868 295
3f38f309 296bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
a8a826a3
BS
297{
298 TranslationBlock *tb;
299
300 tb = tb_find_pc(retaddr);
301 if (tb) {
74f10515 302 cpu_restore_state_from_tb(cpu, tb, retaddr);
d8a499f1
PD
303 if (tb->cflags & CF_NOCACHE) {
304 /* one-shot translation, invalidate it immediately */
305 cpu->current_tb = NULL;
306 tb_phys_invalidate(tb, -1);
307 tb_free(tb);
308 }
a8a826a3
BS
309 return true;
310 }
311 return false;
312}
313
5b6dd868 314#ifdef _WIN32
2d8ac5eb 315static __attribute__((unused)) void map_exec(void *addr, long size)
5b6dd868
BS
316{
317 DWORD old_protect;
318 VirtualProtect(addr, size,
319 PAGE_EXECUTE_READWRITE, &old_protect);
320}
321#else
2d8ac5eb 322static __attribute__((unused)) void map_exec(void *addr, long size)
5b6dd868
BS
323{
324 unsigned long start, end, page_size;
325
326 page_size = getpagesize();
327 start = (unsigned long)addr;
328 start &= ~(page_size - 1);
329
330 end = (unsigned long)addr + size;
331 end += page_size - 1;
332 end &= ~(page_size - 1);
333
334 mprotect((void *)start, end - start,
335 PROT_READ | PROT_WRITE | PROT_EXEC);
336}
337#endif
338
47c16ed5 339void page_size_init(void)
5b6dd868
BS
340{
341 /* NOTE: we can always suppose that qemu_host_page_size >=
342 TARGET_PAGE_SIZE */
5b6dd868 343 qemu_real_host_page_size = getpagesize();
4e51361d 344 qemu_real_host_page_mask = ~(qemu_real_host_page_size - 1);
5b6dd868
BS
345 if (qemu_host_page_size == 0) {
346 qemu_host_page_size = qemu_real_host_page_size;
347 }
348 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
349 qemu_host_page_size = TARGET_PAGE_SIZE;
350 }
351 qemu_host_page_mask = ~(qemu_host_page_size - 1);
47c16ed5 352}
5b6dd868 353
47c16ed5
AK
354static void page_init(void)
355{
356 page_size_init();
5b6dd868
BS
357#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
358 {
359#ifdef HAVE_KINFO_GETVMMAP
360 struct kinfo_vmentry *freep;
361 int i, cnt;
362
363 freep = kinfo_getvmmap(getpid(), &cnt);
364 if (freep) {
365 mmap_lock();
366 for (i = 0; i < cnt; i++) {
367 unsigned long startaddr, endaddr;
368
369 startaddr = freep[i].kve_start;
370 endaddr = freep[i].kve_end;
371 if (h2g_valid(startaddr)) {
372 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
373
374 if (h2g_valid(endaddr)) {
375 endaddr = h2g(endaddr);
376 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
377 } else {
378#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
379 endaddr = ~0ul;
380 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
381#endif
382 }
383 }
384 }
385 free(freep);
386 mmap_unlock();
387 }
388#else
389 FILE *f;
390
391 last_brk = (unsigned long)sbrk(0);
392
393 f = fopen("/compat/linux/proc/self/maps", "r");
394 if (f) {
395 mmap_lock();
396
397 do {
398 unsigned long startaddr, endaddr;
399 int n;
400
401 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
402
403 if (n == 2 && h2g_valid(startaddr)) {
404 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
405
406 if (h2g_valid(endaddr)) {
407 endaddr = h2g(endaddr);
408 } else {
409 endaddr = ~0ul;
410 }
411 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
412 }
413 } while (!feof(f));
414
415 fclose(f);
416 mmap_unlock();
417 }
418#endif
419 }
420#endif
421}
422
75692087
PB
423/* If alloc=1:
424 * Called with mmap_lock held for user-mode emulation.
425 */
5b6dd868
BS
426static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
427{
428 PageDesc *pd;
429 void **lp;
430 int i;
431
5b6dd868
BS
432 /* Level 1. Always allocated. */
433 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
434
435 /* Level 2..N-1. */
03f49957 436 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
6940fab8 437 void **p = atomic_rcu_read(lp);
5b6dd868
BS
438
439 if (p == NULL) {
440 if (!alloc) {
441 return NULL;
442 }
e3a0abfd 443 p = g_new0(void *, V_L2_SIZE);
6940fab8 444 atomic_rcu_set(lp, p);
5b6dd868
BS
445 }
446
03f49957 447 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
448 }
449
6940fab8 450 pd = atomic_rcu_read(lp);
5b6dd868
BS
451 if (pd == NULL) {
452 if (!alloc) {
453 return NULL;
454 }
e3a0abfd 455 pd = g_new0(PageDesc, V_L2_SIZE);
6940fab8 456 atomic_rcu_set(lp, pd);
5b6dd868
BS
457 }
458
03f49957 459 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
460}
461
462static inline PageDesc *page_find(tb_page_addr_t index)
463{
464 return page_find_alloc(index, 0);
465}
466
5b6dd868
BS
467#if defined(CONFIG_USER_ONLY)
468/* Currently it is not recommended to allocate big chunks of data in
469 user mode. It will change when a dedicated libc will be used. */
470/* ??? 64-bit hosts ought to have no problem mmaping data outside the
471 region in which the guest needs to run. Revisit this. */
472#define USE_STATIC_CODE_GEN_BUFFER
473#endif
474
475/* ??? Should configure for this, not list operating systems here. */
476#if (defined(__linux__) \
477 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
478 || defined(__DragonFly__) || defined(__OpenBSD__) \
479 || defined(__NetBSD__))
480# define USE_MMAP
481#endif
482
483/* Minimum size of the code gen buffer. This number is randomly chosen,
484 but not so small that we can't have a fair number of TB's live. */
485#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
486
487/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
488 indicated, this is constrained by the range of direct branches on the
489 host cpu, as used by the TCG implementation of goto_tb. */
490#if defined(__x86_64__)
491# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
492#elif defined(__sparc__)
493# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
4a136e0a
CF
494#elif defined(__aarch64__)
495# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
496#elif defined(__arm__)
497# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
498#elif defined(__s390x__)
499 /* We have a +- 4GB range on the branches; leave some slop. */
500# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
479eb121
RH
501#elif defined(__mips__)
502 /* We have a 256MB branch region, but leave room to make sure the
503 main executable is also within that region. */
504# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
505#else
506# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
507#endif
508
509#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
510
511#define DEFAULT_CODE_GEN_BUFFER_SIZE \
512 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
513 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
514
515static inline size_t size_code_gen_buffer(size_t tb_size)
516{
517 /* Size the buffer. */
518 if (tb_size == 0) {
519#ifdef USE_STATIC_CODE_GEN_BUFFER
520 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
521#else
522 /* ??? Needs adjustments. */
523 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
524 static buffer, we could size this on RESERVED_VA, on the text
525 segment size of the executable, or continue to use the default. */
526 tb_size = (unsigned long)(ram_size / 4);
527#endif
528 }
529 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
530 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
531 }
532 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
533 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
534 }
0b0d3320 535 tcg_ctx.code_gen_buffer_size = tb_size;
5b6dd868
BS
536 return tb_size;
537}
538
483c76e1
RH
539#ifdef __mips__
540/* In order to use J and JAL within the code_gen_buffer, we require
541 that the buffer not cross a 256MB boundary. */
542static inline bool cross_256mb(void *addr, size_t size)
543{
544 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
545}
546
547/* We weren't able to allocate a buffer without crossing that boundary,
548 so make do with the larger portion of the buffer that doesn't cross.
549 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
550static inline void *split_cross_256mb(void *buf1, size_t size1)
551{
552 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
553 size_t size2 = buf1 + size1 - buf2;
554
555 size1 = buf2 - buf1;
556 if (size1 < size2) {
557 size1 = size2;
558 buf1 = buf2;
559 }
560
561 tcg_ctx.code_gen_buffer_size = size1;
562 return buf1;
563}
564#endif
565
5b6dd868
BS
566#ifdef USE_STATIC_CODE_GEN_BUFFER
567static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
568 __attribute__((aligned(CODE_GEN_ALIGN)));
569
570static inline void *alloc_code_gen_buffer(void)
571{
483c76e1
RH
572 void *buf = static_code_gen_buffer;
573#ifdef __mips__
574 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
575 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
576 }
577#endif
578 map_exec(buf, tcg_ctx.code_gen_buffer_size);
579 return buf;
5b6dd868
BS
580}
581#elif defined(USE_MMAP)
582static inline void *alloc_code_gen_buffer(void)
583{
584 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
585 uintptr_t start = 0;
586 void *buf;
587
588 /* Constrain the position of the buffer based on the host cpu.
589 Note that these addresses are chosen in concert with the
590 addresses assigned in the relevant linker script file. */
591# if defined(__PIE__) || defined(__PIC__)
592 /* Don't bother setting a preferred location if we're building
593 a position-independent executable. We're more likely to get
594 an address near the main executable if we let the kernel
595 choose the address. */
596# elif defined(__x86_64__) && defined(MAP_32BIT)
597 /* Force the memory down into low memory with the executable.
598 Leave the choice of exact location with the kernel. */
599 flags |= MAP_32BIT;
600 /* Cannot expect to map more than 800MB in low memory. */
0b0d3320
EV
601 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
602 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
5b6dd868
BS
603 }
604# elif defined(__sparc__)
605 start = 0x40000000ul;
606# elif defined(__s390x__)
607 start = 0x90000000ul;
479eb121
RH
608# elif defined(__mips__)
609 /* ??? We ought to more explicitly manage layout for softmmu too. */
610# ifdef CONFIG_USER_ONLY
611 start = 0x68000000ul;
612# elif _MIPS_SIM == _ABI64
613 start = 0x128000000ul;
614# else
615 start = 0x08000000ul;
616# endif
5b6dd868
BS
617# endif
618
0b0d3320 619 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
5b6dd868 620 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
483c76e1
RH
621 if (buf == MAP_FAILED) {
622 return NULL;
623 }
624
625#ifdef __mips__
626 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
5d831be2 627 /* Try again, with the original still mapped, to avoid re-acquiring
483c76e1
RH
628 that 256mb crossing. This time don't specify an address. */
629 size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
630 void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
631 flags, -1, 0);
632 if (buf2 != MAP_FAILED) {
633 if (!cross_256mb(buf2, size1)) {
634 /* Success! Use the new buffer. */
635 munmap(buf, size1);
636 return buf2;
637 }
638 /* Failure. Work with what we had. */
639 munmap(buf2, size1);
640 }
641
642 /* Split the original buffer. Free the smaller half. */
643 buf2 = split_cross_256mb(buf, size1);
644 size2 = tcg_ctx.code_gen_buffer_size;
645 munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
646 return buf2;
647 }
648#endif
649
650 return buf;
5b6dd868
BS
651}
652#else
653static inline void *alloc_code_gen_buffer(void)
654{
8b98ade3 655 void *buf = g_try_malloc(tcg_ctx.code_gen_buffer_size);
5b6dd868 656
483c76e1
RH
657 if (buf == NULL) {
658 return NULL;
659 }
660
661#ifdef __mips__
662 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
663 void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
664 if (buf2 != NULL && !cross_256mb(buf2, size1)) {
665 /* Success! Use the new buffer. */
666 free(buf);
667 buf = buf2;
668 } else {
669 /* Failure. Work with what we had. Since this is malloc
670 and not mmap, we can't free the other half. */
671 free(buf2);
672 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
673 }
5b6dd868 674 }
483c76e1
RH
675#endif
676
677 map_exec(buf, tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
678 return buf;
679}
680#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
681
682static inline void code_gen_alloc(size_t tb_size)
683{
0b0d3320
EV
684 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
685 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
686 if (tcg_ctx.code_gen_buffer == NULL) {
5b6dd868
BS
687 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
688 exit(1);
689 }
690
0b0d3320
EV
691 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
692 QEMU_MADV_HUGEPAGE);
5b6dd868
BS
693
694 /* Steal room for the prologue at the end of the buffer. This ensures
695 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
696 from TB's to the prologue are going to be in range. It also means
697 that we don't need to mark (additional) portions of the data segment
698 as executable. */
0b0d3320
EV
699 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
700 tcg_ctx.code_gen_buffer_size - 1024;
701 tcg_ctx.code_gen_buffer_size -= 1024;
5b6dd868 702
0b0d3320 703 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
5b6dd868 704 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
0b0d3320
EV
705 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
706 CODE_GEN_AVG_BLOCK_SIZE;
5e5f07e0
EV
707 tcg_ctx.tb_ctx.tbs =
708 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
677ef623 709 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
5b6dd868
BS
710}
711
712/* Must be called before using the QEMU cpus. 'tb_size' is the size
713 (in bytes) allocated to the translation buffer. Zero means default
714 size. */
715void tcg_exec_init(unsigned long tb_size)
716{
717 cpu_gen_init();
718 code_gen_alloc(tb_size);
0b0d3320
EV
719 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
720 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
5b6dd868 721 page_init();
4cbea598 722#if defined(CONFIG_SOFTMMU)
5b6dd868
BS
723 /* There's no guest base to take into account, so go ahead and
724 initialize the prologue now. */
725 tcg_prologue_init(&tcg_ctx);
726#endif
727}
728
729bool tcg_enabled(void)
730{
0b0d3320 731 return tcg_ctx.code_gen_buffer != NULL;
5b6dd868
BS
732}
733
734/* Allocate a new translation block. Flush the translation buffer if
735 too many translation blocks or too much generated code. */
736static TranslationBlock *tb_alloc(target_ulong pc)
737{
738 TranslationBlock *tb;
739
5e5f07e0 740 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
0b0d3320
EV
741 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
742 tcg_ctx.code_gen_buffer_max_size) {
5b6dd868
BS
743 return NULL;
744 }
5e5f07e0 745 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
5b6dd868
BS
746 tb->pc = pc;
747 tb->cflags = 0;
748 return tb;
749}
750
751void tb_free(TranslationBlock *tb)
752{
753 /* In practice this is mostly used for single use temporary TB
754 Ignore the hard cases and just back up if this TB happens to
755 be the last one generated. */
5e5f07e0
EV
756 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
757 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
0b0d3320 758 tcg_ctx.code_gen_ptr = tb->tc_ptr;
5e5f07e0 759 tcg_ctx.tb_ctx.nb_tbs--;
5b6dd868
BS
760 }
761}
762
763static inline void invalidate_page_bitmap(PageDesc *p)
764{
012aef07
MA
765 g_free(p->code_bitmap);
766 p->code_bitmap = NULL;
5b6dd868
BS
767 p->code_write_count = 0;
768}
769
770/* Set to NULL all the 'first_tb' fields in all PageDescs. */
771static void page_flush_tb_1(int level, void **lp)
772{
773 int i;
774
775 if (*lp == NULL) {
776 return;
777 }
778 if (level == 0) {
779 PageDesc *pd = *lp;
780
03f49957 781 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
782 pd[i].first_tb = NULL;
783 invalidate_page_bitmap(pd + i);
784 }
785 } else {
786 void **pp = *lp;
787
03f49957 788 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
789 page_flush_tb_1(level - 1, pp + i);
790 }
791 }
792}
793
794static void page_flush_tb(void)
795{
796 int i;
797
798 for (i = 0; i < V_L1_SIZE; i++) {
03f49957 799 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
800 }
801}
802
803/* flush all the translation blocks */
804/* XXX: tb_flush is currently not thread safe */
bbd77c18 805void tb_flush(CPUState *cpu)
5b6dd868 806{
5b6dd868
BS
807#if defined(DEBUG_FLUSH)
808 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
0b0d3320 809 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
5e5f07e0 810 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
0b0d3320 811 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
5e5f07e0 812 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 813#endif
0b0d3320
EV
814 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
815 > tcg_ctx.code_gen_buffer_size) {
a47dddd7 816 cpu_abort(cpu, "Internal error: code buffer overflow\n");
5b6dd868 817 }
5e5f07e0 818 tcg_ctx.tb_ctx.nb_tbs = 0;
5b6dd868 819
bdc44640 820 CPU_FOREACH(cpu) {
8cd70437 821 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
5b6dd868
BS
822 }
823
eb2535f4 824 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
5b6dd868
BS
825 page_flush_tb();
826
0b0d3320 827 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
5b6dd868
BS
828 /* XXX: flush processor icache at this point if cache flush is
829 expensive */
5e5f07e0 830 tcg_ctx.tb_ctx.tb_flush_count++;
5b6dd868
BS
831}
832
833#ifdef DEBUG_TB_CHECK
834
835static void tb_invalidate_check(target_ulong address)
836{
837 TranslationBlock *tb;
838 int i;
839
840 address &= TARGET_PAGE_MASK;
841 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0 842 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
5b6dd868
BS
843 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
844 address >= tb->pc + tb->size)) {
845 printf("ERROR invalidate: address=" TARGET_FMT_lx
846 " PC=%08lx size=%04x\n",
847 address, (long)tb->pc, tb->size);
848 }
849 }
850 }
851}
852
853/* verify that all the pages have correct rights for code */
854static void tb_page_check(void)
855{
856 TranslationBlock *tb;
857 int i, flags1, flags2;
858
859 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0
EV
860 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
861 tb = tb->phys_hash_next) {
5b6dd868
BS
862 flags1 = page_get_flags(tb->pc);
863 flags2 = page_get_flags(tb->pc + tb->size - 1);
864 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
865 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
866 (long)tb->pc, tb->size, flags1, flags2);
867 }
868 }
869 }
870}
871
872#endif
873
0c884d16 874static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
5b6dd868
BS
875{
876 TranslationBlock *tb1;
877
878 for (;;) {
879 tb1 = *ptb;
880 if (tb1 == tb) {
0c884d16 881 *ptb = tb1->phys_hash_next;
5b6dd868
BS
882 break;
883 }
0c884d16 884 ptb = &tb1->phys_hash_next;
5b6dd868
BS
885 }
886}
887
888static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
889{
890 TranslationBlock *tb1;
891 unsigned int n1;
892
893 for (;;) {
894 tb1 = *ptb;
895 n1 = (uintptr_t)tb1 & 3;
896 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
897 if (tb1 == tb) {
898 *ptb = tb1->page_next[n1];
899 break;
900 }
901 ptb = &tb1->page_next[n1];
902 }
903}
904
905static inline void tb_jmp_remove(TranslationBlock *tb, int n)
906{
907 TranslationBlock *tb1, **ptb;
908 unsigned int n1;
909
910 ptb = &tb->jmp_next[n];
911 tb1 = *ptb;
912 if (tb1) {
913 /* find tb(n) in circular list */
914 for (;;) {
915 tb1 = *ptb;
916 n1 = (uintptr_t)tb1 & 3;
917 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
918 if (n1 == n && tb1 == tb) {
919 break;
920 }
921 if (n1 == 2) {
922 ptb = &tb1->jmp_first;
923 } else {
924 ptb = &tb1->jmp_next[n1];
925 }
926 }
927 /* now we can suppress tb(n) from the list */
928 *ptb = tb->jmp_next[n];
929
930 tb->jmp_next[n] = NULL;
931 }
932}
933
934/* reset the jump entry 'n' of a TB so that it is not chained to
935 another TB */
936static inline void tb_reset_jump(TranslationBlock *tb, int n)
937{
938 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
939}
940
0c884d16 941/* invalidate one TB */
5b6dd868
BS
942void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
943{
182735ef 944 CPUState *cpu;
5b6dd868
BS
945 PageDesc *p;
946 unsigned int h, n1;
947 tb_page_addr_t phys_pc;
948 TranslationBlock *tb1, *tb2;
949
950 /* remove the TB from the hash list */
951 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
952 h = tb_phys_hash_func(phys_pc);
5e5f07e0 953 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
5b6dd868
BS
954
955 /* remove the TB from the page list */
956 if (tb->page_addr[0] != page_addr) {
957 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
958 tb_page_remove(&p->first_tb, tb);
959 invalidate_page_bitmap(p);
960 }
961 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
962 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
963 tb_page_remove(&p->first_tb, tb);
964 invalidate_page_bitmap(p);
965 }
966
5e5f07e0 967 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868
BS
968
969 /* remove the TB from the hash list */
970 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 971 CPU_FOREACH(cpu) {
8cd70437
AF
972 if (cpu->tb_jmp_cache[h] == tb) {
973 cpu->tb_jmp_cache[h] = NULL;
5b6dd868
BS
974 }
975 }
976
977 /* suppress this TB from the two jump lists */
978 tb_jmp_remove(tb, 0);
979 tb_jmp_remove(tb, 1);
980
981 /* suppress any remaining jumps to this TB */
982 tb1 = tb->jmp_first;
983 for (;;) {
984 n1 = (uintptr_t)tb1 & 3;
985 if (n1 == 2) {
986 break;
987 }
988 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
989 tb2 = tb1->jmp_next[n1];
990 tb_reset_jump(tb1, n1);
991 tb1->jmp_next[n1] = NULL;
992 tb1 = tb2;
993 }
994 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
995
5e5f07e0 996 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
997}
998
5b6dd868
BS
999static void build_page_bitmap(PageDesc *p)
1000{
1001 int n, tb_start, tb_end;
1002 TranslationBlock *tb;
1003
510a647f 1004 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
5b6dd868
BS
1005
1006 tb = p->first_tb;
1007 while (tb != NULL) {
1008 n = (uintptr_t)tb & 3;
1009 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1010 /* NOTE: this is subtle as a TB may span two physical pages */
1011 if (n == 0) {
1012 /* NOTE: tb_end may be after the end of the page, but
1013 it is not a problem */
1014 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1015 tb_end = tb_start + tb->size;
1016 if (tb_end > TARGET_PAGE_SIZE) {
1017 tb_end = TARGET_PAGE_SIZE;
1018 }
1019 } else {
1020 tb_start = 0;
1021 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1022 }
510a647f 1023 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
5b6dd868
BS
1024 tb = tb->page_next[n];
1025 }
1026}
1027
75692087 1028/* Called with mmap_lock held for user mode emulation. */
648f034c 1029TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868
BS
1030 target_ulong pc, target_ulong cs_base,
1031 int flags, int cflags)
1032{
648f034c 1033 CPUArchState *env = cpu->env_ptr;
5b6dd868 1034 TranslationBlock *tb;
5b6dd868
BS
1035 tb_page_addr_t phys_pc, phys_page2;
1036 target_ulong virt_page2;
1037 int code_gen_size;
1038
1039 phys_pc = get_page_addr_code(env, pc);
0266359e
PB
1040 if (use_icount) {
1041 cflags |= CF_USE_ICOUNT;
1042 }
5b6dd868
BS
1043 tb = tb_alloc(pc);
1044 if (!tb) {
1045 /* flush must be done */
bbd77c18 1046 tb_flush(cpu);
5b6dd868
BS
1047 /* cannot fail at this point */
1048 tb = tb_alloc(pc);
1049 /* Don't forget to invalidate previous TB info. */
5e5f07e0 1050 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868 1051 }
1813e175 1052 tb->tc_ptr = tcg_ctx.code_gen_ptr;
5b6dd868
BS
1053 tb->cs_base = cs_base;
1054 tb->flags = flags;
1055 tb->cflags = cflags;
1056 cpu_gen_code(env, tb, &code_gen_size);
0b0d3320
EV
1057 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1058 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
5b6dd868
BS
1059
1060 /* check next page if needed */
1061 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1062 phys_page2 = -1;
1063 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1064 phys_page2 = get_page_addr_code(env, virt_page2);
1065 }
1066 tb_link_page(tb, phys_pc, phys_page2);
1067 return tb;
1068}
1069
1070/*
1071 * Invalidate all TBs which intersect with the target physical address range
1072 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1073 * 'is_cpu_write_access' should be true if called from a real cpu write
1074 * access: the virtual CPU will exit the current TB if code is modified inside
1075 * this TB.
75692087
PB
1076 *
1077 * Called with mmap_lock held for user-mode emulation
5b6dd868 1078 */
35865339 1079void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
5b6dd868
BS
1080{
1081 while (start < end) {
35865339 1082 tb_invalidate_phys_page_range(start, end, 0);
5b6dd868
BS
1083 start &= TARGET_PAGE_MASK;
1084 start += TARGET_PAGE_SIZE;
1085 }
1086}
1087
1088/*
1089 * Invalidate all TBs which intersect with the target physical address range
1090 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1091 * 'is_cpu_write_access' should be true if called from a real cpu write
1092 * access: the virtual CPU will exit the current TB if code is modified inside
1093 * this TB.
75692087
PB
1094 *
1095 * Called with mmap_lock held for user-mode emulation
5b6dd868
BS
1096 */
1097void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1098 int is_cpu_write_access)
1099{
1100 TranslationBlock *tb, *tb_next, *saved_tb;
4917cf44 1101 CPUState *cpu = current_cpu;
baea4fae 1102#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1103 CPUArchState *env = NULL;
1104#endif
5b6dd868
BS
1105 tb_page_addr_t tb_start, tb_end;
1106 PageDesc *p;
1107 int n;
1108#ifdef TARGET_HAS_PRECISE_SMC
1109 int current_tb_not_found = is_cpu_write_access;
1110 TranslationBlock *current_tb = NULL;
1111 int current_tb_modified = 0;
1112 target_ulong current_pc = 0;
1113 target_ulong current_cs_base = 0;
1114 int current_flags = 0;
1115#endif /* TARGET_HAS_PRECISE_SMC */
1116
1117 p = page_find(start >> TARGET_PAGE_BITS);
1118 if (!p) {
1119 return;
1120 }
baea4fae 1121#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1122 if (cpu != NULL) {
1123 env = cpu->env_ptr;
d77953b9 1124 }
4917cf44 1125#endif
5b6dd868
BS
1126
1127 /* we remove all the TBs in the range [start, end[ */
1128 /* XXX: see if in some cases it could be faster to invalidate all
1129 the code */
1130 tb = p->first_tb;
1131 while (tb != NULL) {
1132 n = (uintptr_t)tb & 3;
1133 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1134 tb_next = tb->page_next[n];
1135 /* NOTE: this is subtle as a TB may span two physical pages */
1136 if (n == 0) {
1137 /* NOTE: tb_end may be after the end of the page, but
1138 it is not a problem */
1139 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1140 tb_end = tb_start + tb->size;
1141 } else {
1142 tb_start = tb->page_addr[1];
1143 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1144 }
1145 if (!(tb_end <= start || tb_start >= end)) {
1146#ifdef TARGET_HAS_PRECISE_SMC
1147 if (current_tb_not_found) {
1148 current_tb_not_found = 0;
1149 current_tb = NULL;
93afeade 1150 if (cpu->mem_io_pc) {
5b6dd868 1151 /* now we have a real cpu fault */
93afeade 1152 current_tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868
BS
1153 }
1154 }
1155 if (current_tb == tb &&
1156 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1157 /* If we are modifying the current TB, we must stop
1158 its execution. We could be more precise by checking
1159 that the modification is after the current PC, but it
1160 would require a specialized function to partially
1161 restore the CPU state */
1162
1163 current_tb_modified = 1;
74f10515 1164 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
5b6dd868
BS
1165 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1166 &current_flags);
1167 }
1168#endif /* TARGET_HAS_PRECISE_SMC */
1169 /* we need to do that to handle the case where a signal
1170 occurs while doing tb_phys_invalidate() */
1171 saved_tb = NULL;
d77953b9
AF
1172 if (cpu != NULL) {
1173 saved_tb = cpu->current_tb;
1174 cpu->current_tb = NULL;
5b6dd868
BS
1175 }
1176 tb_phys_invalidate(tb, -1);
d77953b9
AF
1177 if (cpu != NULL) {
1178 cpu->current_tb = saved_tb;
c3affe56
AF
1179 if (cpu->interrupt_request && cpu->current_tb) {
1180 cpu_interrupt(cpu, cpu->interrupt_request);
5b6dd868
BS
1181 }
1182 }
1183 }
1184 tb = tb_next;
1185 }
1186#if !defined(CONFIG_USER_ONLY)
1187 /* if no code remaining, no need to continue to use slow writes */
1188 if (!p->first_tb) {
1189 invalidate_page_bitmap(p);
fc377bcf 1190 tlb_unprotect_code(start);
5b6dd868
BS
1191 }
1192#endif
1193#ifdef TARGET_HAS_PRECISE_SMC
1194 if (current_tb_modified) {
1195 /* we generate a block containing just the instruction
1196 modifying the memory. It will ensure that it cannot modify
1197 itself */
d77953b9 1198 cpu->current_tb = NULL;
648f034c 1199 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
0ea8cb88 1200 cpu_resume_from_signal(cpu, NULL);
5b6dd868
BS
1201 }
1202#endif
1203}
1204
1205/* len must be <= 8 and start must be a multiple of len */
1206void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1207{
1208 PageDesc *p;
5b6dd868
BS
1209
1210#if 0
1211 if (1) {
1212 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1213 cpu_single_env->mem_io_vaddr, len,
1214 cpu_single_env->eip,
1215 cpu_single_env->eip +
1216 (intptr_t)cpu_single_env->segs[R_CS].base);
1217 }
1218#endif
1219 p = page_find(start >> TARGET_PAGE_BITS);
1220 if (!p) {
1221 return;
1222 }
fc377bcf
PB
1223 if (!p->code_bitmap &&
1224 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1225 /* build code bitmap */
1226 build_page_bitmap(p);
1227 }
5b6dd868 1228 if (p->code_bitmap) {
510a647f
EC
1229 unsigned int nr;
1230 unsigned long b;
1231
1232 nr = start & ~TARGET_PAGE_MASK;
1233 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
5b6dd868
BS
1234 if (b & ((1 << len) - 1)) {
1235 goto do_invalidate;
1236 }
1237 } else {
1238 do_invalidate:
1239 tb_invalidate_phys_page_range(start, start + len, 1);
1240 }
1241}
1242
1243#if !defined(CONFIG_SOFTMMU)
75692087 1244/* Called with mmap_lock held. */
5b6dd868 1245static void tb_invalidate_phys_page(tb_page_addr_t addr,
d02532f0
AG
1246 uintptr_t pc, void *puc,
1247 bool locked)
5b6dd868
BS
1248{
1249 TranslationBlock *tb;
1250 PageDesc *p;
1251 int n;
1252#ifdef TARGET_HAS_PRECISE_SMC
1253 TranslationBlock *current_tb = NULL;
4917cf44
AF
1254 CPUState *cpu = current_cpu;
1255 CPUArchState *env = NULL;
5b6dd868
BS
1256 int current_tb_modified = 0;
1257 target_ulong current_pc = 0;
1258 target_ulong current_cs_base = 0;
1259 int current_flags = 0;
1260#endif
1261
1262 addr &= TARGET_PAGE_MASK;
1263 p = page_find(addr >> TARGET_PAGE_BITS);
1264 if (!p) {
1265 return;
1266 }
1267 tb = p->first_tb;
1268#ifdef TARGET_HAS_PRECISE_SMC
1269 if (tb && pc != 0) {
1270 current_tb = tb_find_pc(pc);
1271 }
4917cf44
AF
1272 if (cpu != NULL) {
1273 env = cpu->env_ptr;
d77953b9 1274 }
5b6dd868
BS
1275#endif
1276 while (tb != NULL) {
1277 n = (uintptr_t)tb & 3;
1278 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1279#ifdef TARGET_HAS_PRECISE_SMC
1280 if (current_tb == tb &&
1281 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1282 /* If we are modifying the current TB, we must stop
1283 its execution. We could be more precise by checking
1284 that the modification is after the current PC, but it
1285 would require a specialized function to partially
1286 restore the CPU state */
1287
1288 current_tb_modified = 1;
74f10515 1289 cpu_restore_state_from_tb(cpu, current_tb, pc);
5b6dd868
BS
1290 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1291 &current_flags);
1292 }
1293#endif /* TARGET_HAS_PRECISE_SMC */
1294 tb_phys_invalidate(tb, addr);
1295 tb = tb->page_next[n];
1296 }
1297 p->first_tb = NULL;
1298#ifdef TARGET_HAS_PRECISE_SMC
1299 if (current_tb_modified) {
1300 /* we generate a block containing just the instruction
1301 modifying the memory. It will ensure that it cannot modify
1302 itself */
d77953b9 1303 cpu->current_tb = NULL;
648f034c 1304 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
d02532f0
AG
1305 if (locked) {
1306 mmap_unlock();
1307 }
0ea8cb88 1308 cpu_resume_from_signal(cpu, puc);
5b6dd868
BS
1309 }
1310#endif
1311}
1312#endif
1313
75692087
PB
1314/* add the tb in the target page and protect it if necessary
1315 *
1316 * Called with mmap_lock held for user-mode emulation.
1317 */
5b6dd868
BS
1318static inline void tb_alloc_page(TranslationBlock *tb,
1319 unsigned int n, tb_page_addr_t page_addr)
1320{
1321 PageDesc *p;
1322#ifndef CONFIG_USER_ONLY
1323 bool page_already_protected;
1324#endif
1325
1326 tb->page_addr[n] = page_addr;
1327 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1328 tb->page_next[n] = p->first_tb;
1329#ifndef CONFIG_USER_ONLY
1330 page_already_protected = p->first_tb != NULL;
1331#endif
1332 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1333 invalidate_page_bitmap(p);
1334
5b6dd868
BS
1335#if defined(CONFIG_USER_ONLY)
1336 if (p->flags & PAGE_WRITE) {
1337 target_ulong addr;
1338 PageDesc *p2;
1339 int prot;
1340
1341 /* force the host page as non writable (writes will have a
1342 page fault + mprotect overhead) */
1343 page_addr &= qemu_host_page_mask;
1344 prot = 0;
1345 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1346 addr += TARGET_PAGE_SIZE) {
1347
1348 p2 = page_find(addr >> TARGET_PAGE_BITS);
1349 if (!p2) {
1350 continue;
1351 }
1352 prot |= p2->flags;
1353 p2->flags &= ~PAGE_WRITE;
1354 }
1355 mprotect(g2h(page_addr), qemu_host_page_size,
1356 (prot & PAGE_BITS) & ~PAGE_WRITE);
1357#ifdef DEBUG_TB_INVALIDATE
1358 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1359 page_addr);
1360#endif
1361 }
1362#else
1363 /* if some code is already present, then the pages are already
1364 protected. So we handle the case where only the first TB is
1365 allocated in a physical page */
1366 if (!page_already_protected) {
1367 tlb_protect_code(page_addr);
1368 }
1369#endif
5b6dd868
BS
1370}
1371
1372/* add a new TB and link it to the physical page tables. phys_page2 is
75692087 1373 * (-1) to indicate that only one page contains the TB.
9fd1a948
PB
1374 *
1375 * Called with mmap_lock held for user-mode emulation.
75692087 1376 */
5b6dd868
BS
1377static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1378 tb_page_addr_t phys_page2)
1379{
1380 unsigned int h;
1381 TranslationBlock **ptb;
1382
5b6dd868
BS
1383 /* add in the physical hash table */
1384 h = tb_phys_hash_func(phys_pc);
5e5f07e0 1385 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
5b6dd868
BS
1386 tb->phys_hash_next = *ptb;
1387 *ptb = tb;
1388
1389 /* add in the page list */
1390 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1391 if (phys_page2 != -1) {
1392 tb_alloc_page(tb, 1, phys_page2);
1393 } else {
1394 tb->page_addr[1] = -1;
1395 }
1396
1397 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1398 tb->jmp_next[0] = NULL;
1399 tb->jmp_next[1] = NULL;
1400
1401 /* init original jump addresses */
1402 if (tb->tb_next_offset[0] != 0xffff) {
1403 tb_reset_jump(tb, 0);
1404 }
1405 if (tb->tb_next_offset[1] != 0xffff) {
1406 tb_reset_jump(tb, 1);
1407 }
1408
1409#ifdef DEBUG_TB_CHECK
1410 tb_page_check();
1411#endif
5b6dd868
BS
1412}
1413
5b6dd868
BS
1414/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1415 tb[1].tc_ptr. Return NULL if not found */
a8a826a3 1416static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868
BS
1417{
1418 int m_min, m_max, m;
1419 uintptr_t v;
1420 TranslationBlock *tb;
1421
5e5f07e0 1422 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
5b6dd868
BS
1423 return NULL;
1424 }
0b0d3320
EV
1425 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1426 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
5b6dd868
BS
1427 return NULL;
1428 }
1429 /* binary search (cf Knuth) */
1430 m_min = 0;
5e5f07e0 1431 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
5b6dd868
BS
1432 while (m_min <= m_max) {
1433 m = (m_min + m_max) >> 1;
5e5f07e0 1434 tb = &tcg_ctx.tb_ctx.tbs[m];
5b6dd868
BS
1435 v = (uintptr_t)tb->tc_ptr;
1436 if (v == tc_ptr) {
1437 return tb;
1438 } else if (tc_ptr < v) {
1439 m_max = m - 1;
1440 } else {
1441 m_min = m + 1;
1442 }
1443 }
5e5f07e0 1444 return &tcg_ctx.tb_ctx.tbs[m_max];
5b6dd868
BS
1445}
1446
ec53b45b 1447#if !defined(CONFIG_USER_ONLY)
29d8ec7b 1448void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
5b6dd868
BS
1449{
1450 ram_addr_t ram_addr;
5c8a00ce 1451 MemoryRegion *mr;
149f54b5 1452 hwaddr l = 1;
5b6dd868 1453
41063e1e 1454 rcu_read_lock();
29d8ec7b 1455 mr = address_space_translate(as, addr, &addr, &l, false);
5c8a00ce
PB
1456 if (!(memory_region_is_ram(mr)
1457 || memory_region_is_romd(mr))) {
41063e1e 1458 rcu_read_unlock();
5b6dd868
BS
1459 return;
1460 }
5c8a00ce 1461 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
149f54b5 1462 + addr;
5b6dd868 1463 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
41063e1e 1464 rcu_read_unlock();
5b6dd868 1465}
ec53b45b 1466#endif /* !defined(CONFIG_USER_ONLY) */
5b6dd868 1467
239c51a5 1468void tb_check_watchpoint(CPUState *cpu)
5b6dd868
BS
1469{
1470 TranslationBlock *tb;
1471
93afeade 1472 tb = tb_find_pc(cpu->mem_io_pc);
8d302e76
AJ
1473 if (tb) {
1474 /* We can use retranslation to find the PC. */
1475 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1476 tb_phys_invalidate(tb, -1);
1477 } else {
1478 /* The exception probably happened in a helper. The CPU state should
1479 have been saved before calling it. Fetch the PC from there. */
1480 CPUArchState *env = cpu->env_ptr;
1481 target_ulong pc, cs_base;
1482 tb_page_addr_t addr;
1483 int flags;
1484
1485 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1486 addr = get_page_addr_code(env, pc);
1487 tb_invalidate_phys_range(addr, addr + 1);
5b6dd868 1488 }
5b6dd868
BS
1489}
1490
1491#ifndef CONFIG_USER_ONLY
5b6dd868
BS
1492/* in deterministic execution mode, instructions doing device I/Os
1493 must be at the end of the TB */
90b40a69 1494void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 1495{
a47dddd7 1496#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 1497 CPUArchState *env = cpu->env_ptr;
a47dddd7 1498#endif
5b6dd868
BS
1499 TranslationBlock *tb;
1500 uint32_t n, cflags;
1501 target_ulong pc, cs_base;
1502 uint64_t flags;
1503
1504 tb = tb_find_pc(retaddr);
1505 if (!tb) {
a47dddd7 1506 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
1507 (void *)retaddr);
1508 }
28ecfd7a 1509 n = cpu->icount_decr.u16.low + tb->icount;
74f10515 1510 cpu_restore_state_from_tb(cpu, tb, retaddr);
5b6dd868
BS
1511 /* Calculate how many instructions had been executed before the fault
1512 occurred. */
28ecfd7a 1513 n = n - cpu->icount_decr.u16.low;
5b6dd868
BS
1514 /* Generate a new TB ending on the I/O insn. */
1515 n++;
1516 /* On MIPS and SH, delay slot instructions can only be restarted if
1517 they were already the first instruction in the TB. If this is not
1518 the first instruction in a TB then re-execute the preceding
1519 branch. */
1520#if defined(TARGET_MIPS)
1521 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
c3577479 1522 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
28ecfd7a 1523 cpu->icount_decr.u16.low++;
5b6dd868
BS
1524 env->hflags &= ~MIPS_HFLAG_BMASK;
1525 }
1526#elif defined(TARGET_SH4)
1527 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1528 && n > 1) {
1529 env->pc -= 2;
28ecfd7a 1530 cpu->icount_decr.u16.low++;
5b6dd868
BS
1531 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1532 }
1533#endif
1534 /* This should never happen. */
1535 if (n > CF_COUNT_MASK) {
a47dddd7 1536 cpu_abort(cpu, "TB too big during recompile");
5b6dd868
BS
1537 }
1538
1539 cflags = n | CF_LAST_IO;
1540 pc = tb->pc;
1541 cs_base = tb->cs_base;
1542 flags = tb->flags;
1543 tb_phys_invalidate(tb, -1);
02d57ea1
SF
1544 if (tb->cflags & CF_NOCACHE) {
1545 if (tb->orig_tb) {
1546 /* Invalidate original TB if this TB was generated in
1547 * cpu_exec_nocache() */
1548 tb_phys_invalidate(tb->orig_tb, -1);
1549 }
1550 tb_free(tb);
1551 }
5b6dd868
BS
1552 /* FIXME: In theory this could raise an exception. In practice
1553 we have already translated the block once so it's probably ok. */
648f034c 1554 tb_gen_code(cpu, pc, cs_base, flags, cflags);
5b6dd868
BS
1555 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1556 the first in the TB) then we end up generating a whole new TB and
1557 repeating the fault, which is horribly inefficient.
1558 Better would be to execute just this insn uncached, or generate a
1559 second new TB. */
0ea8cb88 1560 cpu_resume_from_signal(cpu, NULL);
5b6dd868
BS
1561}
1562
611d4f99 1563void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
5b6dd868
BS
1564{
1565 unsigned int i;
1566
1567 /* Discard jump cache entries for any tb which might potentially
1568 overlap the flushed page. */
1569 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
8cd70437 1570 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1571 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1572
1573 i = tb_jmp_cache_hash_page(addr);
8cd70437 1574 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1575 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1576}
1577
1578void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1579{
1580 int i, target_code_size, max_target_code_size;
1581 int direct_jmp_count, direct_jmp2_count, cross_page;
1582 TranslationBlock *tb;
1583
1584 target_code_size = 0;
1585 max_target_code_size = 0;
1586 cross_page = 0;
1587 direct_jmp_count = 0;
1588 direct_jmp2_count = 0;
5e5f07e0
EV
1589 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1590 tb = &tcg_ctx.tb_ctx.tbs[i];
5b6dd868
BS
1591 target_code_size += tb->size;
1592 if (tb->size > max_target_code_size) {
1593 max_target_code_size = tb->size;
1594 }
1595 if (tb->page_addr[1] != -1) {
1596 cross_page++;
1597 }
1598 if (tb->tb_next_offset[0] != 0xffff) {
1599 direct_jmp_count++;
1600 if (tb->tb_next_offset[1] != 0xffff) {
1601 direct_jmp2_count++;
1602 }
1603 }
1604 }
1605 /* XXX: avoid using doubles ? */
1606 cpu_fprintf(f, "Translation buffer state:\n");
1607 cpu_fprintf(f, "gen code size %td/%zd\n",
0b0d3320
EV
1608 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1609 tcg_ctx.code_gen_buffer_max_size);
5b6dd868 1610 cpu_fprintf(f, "TB count %d/%d\n",
5e5f07e0 1611 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
5b6dd868 1612 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
5e5f07e0
EV
1613 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1614 tcg_ctx.tb_ctx.nb_tbs : 0,
1615 max_target_code_size);
5b6dd868 1616 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
5e5f07e0
EV
1617 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1618 tcg_ctx.code_gen_buffer) /
1619 tcg_ctx.tb_ctx.nb_tbs : 0,
1620 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1621 tcg_ctx.code_gen_buffer) /
1622 target_code_size : 0);
1623 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1624 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1625 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868
BS
1626 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1627 direct_jmp_count,
5e5f07e0
EV
1628 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1629 tcg_ctx.tb_ctx.nb_tbs : 0,
5b6dd868 1630 direct_jmp2_count,
5e5f07e0
EV
1631 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1632 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 1633 cpu_fprintf(f, "\nStatistics:\n");
5e5f07e0
EV
1634 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1635 cpu_fprintf(f, "TB invalidate count %d\n",
1636 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
5b6dd868
BS
1637 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1638 tcg_dump_info(f, cpu_fprintf);
1639}
1640
246ae24d
MF
1641void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1642{
1643 tcg_dump_op_count(f, cpu_fprintf);
1644}
1645
5b6dd868
BS
1646#else /* CONFIG_USER_ONLY */
1647
c3affe56 1648void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1649{
259186a7 1650 cpu->interrupt_request |= mask;
378df4b2 1651 cpu->tcg_exit_req = 1;
5b6dd868
BS
1652}
1653
1654/*
1655 * Walks guest process memory "regions" one by one
1656 * and calls callback function 'fn' for each region.
1657 */
1658struct walk_memory_regions_data {
1659 walk_memory_regions_fn fn;
1660 void *priv;
1a1c4db9 1661 target_ulong start;
5b6dd868
BS
1662 int prot;
1663};
1664
1665static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1a1c4db9 1666 target_ulong end, int new_prot)
5b6dd868 1667{
1a1c4db9 1668 if (data->start != -1u) {
5b6dd868
BS
1669 int rc = data->fn(data->priv, data->start, end, data->prot);
1670 if (rc != 0) {
1671 return rc;
1672 }
1673 }
1674
1a1c4db9 1675 data->start = (new_prot ? end : -1u);
5b6dd868
BS
1676 data->prot = new_prot;
1677
1678 return 0;
1679}
1680
1681static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1a1c4db9 1682 target_ulong base, int level, void **lp)
5b6dd868 1683{
1a1c4db9 1684 target_ulong pa;
5b6dd868
BS
1685 int i, rc;
1686
1687 if (*lp == NULL) {
1688 return walk_memory_regions_end(data, base, 0);
1689 }
1690
1691 if (level == 0) {
1692 PageDesc *pd = *lp;
1693
03f49957 1694 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
1695 int prot = pd[i].flags;
1696
1697 pa = base | (i << TARGET_PAGE_BITS);
1698 if (prot != data->prot) {
1699 rc = walk_memory_regions_end(data, pa, prot);
1700 if (rc != 0) {
1701 return rc;
1702 }
1703 }
1704 }
1705 } else {
1706 void **pp = *lp;
1707
03f49957 1708 for (i = 0; i < V_L2_SIZE; ++i) {
1a1c4db9 1709 pa = base | ((target_ulong)i <<
03f49957 1710 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
1711 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1712 if (rc != 0) {
1713 return rc;
1714 }
1715 }
1716 }
1717
1718 return 0;
1719}
1720
1721int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1722{
1723 struct walk_memory_regions_data data;
1724 uintptr_t i;
1725
1726 data.fn = fn;
1727 data.priv = priv;
1a1c4db9 1728 data.start = -1u;
5b6dd868
BS
1729 data.prot = 0;
1730
1731 for (i = 0; i < V_L1_SIZE; i++) {
1a1c4db9 1732 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
03f49957 1733 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
1734 if (rc != 0) {
1735 return rc;
1736 }
1737 }
1738
1739 return walk_memory_regions_end(&data, 0, 0);
1740}
1741
1a1c4db9
MI
1742static int dump_region(void *priv, target_ulong start,
1743 target_ulong end, unsigned long prot)
5b6dd868
BS
1744{
1745 FILE *f = (FILE *)priv;
1746
1a1c4db9
MI
1747 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1748 " "TARGET_FMT_lx" %c%c%c\n",
5b6dd868
BS
1749 start, end, end - start,
1750 ((prot & PAGE_READ) ? 'r' : '-'),
1751 ((prot & PAGE_WRITE) ? 'w' : '-'),
1752 ((prot & PAGE_EXEC) ? 'x' : '-'));
1753
1754 return 0;
1755}
1756
1757/* dump memory mappings */
1758void page_dump(FILE *f)
1759{
1a1c4db9 1760 const int length = sizeof(target_ulong) * 2;
227b8175
SW
1761 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1762 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
1763 walk_memory_regions(f, dump_region);
1764}
1765
1766int page_get_flags(target_ulong address)
1767{
1768 PageDesc *p;
1769
1770 p = page_find(address >> TARGET_PAGE_BITS);
1771 if (!p) {
1772 return 0;
1773 }
1774 return p->flags;
1775}
1776
1777/* Modify the flags of a page and invalidate the code if necessary.
1778 The flag PAGE_WRITE_ORG is positioned automatically depending
1779 on PAGE_WRITE. The mmap_lock should already be held. */
1780void page_set_flags(target_ulong start, target_ulong end, int flags)
1781{
1782 target_ulong addr, len;
1783
1784 /* This function should never be called with addresses outside the
1785 guest address space. If this assert fires, it probably indicates
1786 a missing call to h2g_valid. */
1787#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 1788 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
1789#endif
1790 assert(start < end);
1791
1792 start = start & TARGET_PAGE_MASK;
1793 end = TARGET_PAGE_ALIGN(end);
1794
1795 if (flags & PAGE_WRITE) {
1796 flags |= PAGE_WRITE_ORG;
1797 }
1798
1799 for (addr = start, len = end - start;
1800 len != 0;
1801 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1802 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1803
1804 /* If the write protection bit is set, then we invalidate
1805 the code inside. */
1806 if (!(p->flags & PAGE_WRITE) &&
1807 (flags & PAGE_WRITE) &&
1808 p->first_tb) {
d02532f0 1809 tb_invalidate_phys_page(addr, 0, NULL, false);
5b6dd868
BS
1810 }
1811 p->flags = flags;
1812 }
1813}
1814
1815int page_check_range(target_ulong start, target_ulong len, int flags)
1816{
1817 PageDesc *p;
1818 target_ulong end;
1819 target_ulong addr;
1820
1821 /* This function should never be called with addresses outside the
1822 guest address space. If this assert fires, it probably indicates
1823 a missing call to h2g_valid. */
1824#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 1825 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
1826#endif
1827
1828 if (len == 0) {
1829 return 0;
1830 }
1831 if (start + len - 1 < start) {
1832 /* We've wrapped around. */
1833 return -1;
1834 }
1835
1836 /* must do before we loose bits in the next step */
1837 end = TARGET_PAGE_ALIGN(start + len);
1838 start = start & TARGET_PAGE_MASK;
1839
1840 for (addr = start, len = end - start;
1841 len != 0;
1842 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1843 p = page_find(addr >> TARGET_PAGE_BITS);
1844 if (!p) {
1845 return -1;
1846 }
1847 if (!(p->flags & PAGE_VALID)) {
1848 return -1;
1849 }
1850
1851 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1852 return -1;
1853 }
1854 if (flags & PAGE_WRITE) {
1855 if (!(p->flags & PAGE_WRITE_ORG)) {
1856 return -1;
1857 }
1858 /* unprotect the page if it was put read-only because it
1859 contains translated code */
1860 if (!(p->flags & PAGE_WRITE)) {
1861 if (!page_unprotect(addr, 0, NULL)) {
1862 return -1;
1863 }
1864 }
5b6dd868
BS
1865 }
1866 }
1867 return 0;
1868}
1869
1870/* called from signal handler: invalidate the code and unprotect the
1871 page. Return TRUE if the fault was successfully handled. */
1872int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1873{
1874 unsigned int prot;
1875 PageDesc *p;
1876 target_ulong host_start, host_end, addr;
1877
1878 /* Technically this isn't safe inside a signal handler. However we
1879 know this only ever happens in a synchronous SEGV handler, so in
1880 practice it seems to be ok. */
1881 mmap_lock();
1882
1883 p = page_find(address >> TARGET_PAGE_BITS);
1884 if (!p) {
1885 mmap_unlock();
1886 return 0;
1887 }
1888
1889 /* if the page was really writable, then we change its
1890 protection back to writable */
1891 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1892 host_start = address & qemu_host_page_mask;
1893 host_end = host_start + qemu_host_page_size;
1894
1895 prot = 0;
1896 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1897 p = page_find(addr >> TARGET_PAGE_BITS);
1898 p->flags |= PAGE_WRITE;
1899 prot |= p->flags;
1900
1901 /* and since the content will be modified, we must invalidate
1902 the corresponding translated code. */
d02532f0 1903 tb_invalidate_phys_page(addr, pc, puc, true);
5b6dd868
BS
1904#ifdef DEBUG_TB_CHECK
1905 tb_invalidate_check(addr);
1906#endif
1907 }
1908 mprotect((void *)g2h(host_start), qemu_host_page_size,
1909 prot & PAGE_BITS);
1910
1911 mmap_unlock();
1912 return 1;
1913 }
1914 mmap_unlock();
1915 return 0;
1916}
1917#endif /* CONFIG_USER_ONLY */