]> git.proxmox.com Git - mirror_qemu.git/blame - translate-all.c
tcg: comment on which functions have to be called with mmap_lock held
[mirror_qemu.git] / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/types.h>
23#include <sys/mman.h>
24#endif
d19893da
FB
25#include <stdarg.h>
26#include <stdlib.h>
27#include <stdio.h>
28#include <string.h>
29#include <inttypes.h>
30
31#include "config.h"
2054396a 32
5b6dd868 33#include "qemu-common.h"
af5ad107 34#define NO_CPU_IO_DEFS
d3eead2e 35#include "cpu.h"
6db8b538 36#include "trace.h"
76cad711 37#include "disas/disas.h"
57fec1fe 38#include "tcg.h"
5b6dd868
BS
39#if defined(CONFIG_USER_ONLY)
40#include "qemu.h"
41#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42#include <sys/param.h>
43#if __FreeBSD_version >= 700104
44#define HAVE_KINFO_GETVMMAP
45#define sigqueue sigqueue_freebsd /* avoid redefinition */
46#include <sys/time.h>
47#include <sys/proc.h>
48#include <machine/profile.h>
49#define _KERNEL
50#include <sys/user.h>
51#undef _KERNEL
52#undef sigqueue
53#include <libutil.h>
54#endif
55#endif
0bc3cd62
PB
56#else
57#include "exec/address-spaces.h"
5b6dd868
BS
58#endif
59
022c62cb 60#include "exec/cputlb.h"
e1b89321 61#include "exec/tb-hash.h"
5b6dd868 62#include "translate-all.h"
510a647f 63#include "qemu/bitmap.h"
0aa09897 64#include "qemu/timer.h"
5b6dd868
BS
65
66//#define DEBUG_TB_INVALIDATE
67//#define DEBUG_FLUSH
68/* make various TB consistency checks */
69//#define DEBUG_TB_CHECK
70
71#if !defined(CONFIG_USER_ONLY)
72/* TB consistency checks only implemented for usermode emulation. */
73#undef DEBUG_TB_CHECK
74#endif
75
76#define SMC_BITMAP_USE_THRESHOLD 10
77
5b6dd868
BS
78typedef struct PageDesc {
79 /* list of TBs intersecting this ram page */
80 TranslationBlock *first_tb;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count;
510a647f 84 unsigned long *code_bitmap;
5b6dd868
BS
85#if defined(CONFIG_USER_ONLY)
86 unsigned long flags;
87#endif
88} PageDesc;
89
90/* In system mode we want L1_MAP to be based on ram offsets,
91 while in user mode we want it to be based on virtual addresses. */
92#if !defined(CONFIG_USER_ONLY)
93#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
94# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
95#else
96# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
97#endif
98#else
99# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
100#endif
101
03f49957
PB
102/* Size of the L2 (and L3, etc) page tables. */
103#define V_L2_BITS 10
104#define V_L2_SIZE (1 << V_L2_BITS)
105
5b6dd868
BS
106/* The bits remaining after N lower levels of page tables. */
107#define V_L1_BITS_REM \
03f49957 108 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
5b6dd868
BS
109
110#if V_L1_BITS_REM < 4
03f49957 111#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
5b6dd868
BS
112#else
113#define V_L1_BITS V_L1_BITS_REM
114#endif
115
116#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
117
118#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
119
120uintptr_t qemu_real_host_page_size;
4e51361d 121uintptr_t qemu_real_host_page_mask;
5b6dd868
BS
122uintptr_t qemu_host_page_size;
123uintptr_t qemu_host_page_mask;
124
d1142fb8 125/* The bottom level has pointers to PageDesc */
5b6dd868
BS
126static void *l1_map[V_L1_SIZE];
127
57fec1fe
FB
128/* code generation context */
129TCGContext tcg_ctx;
d19893da 130
677ef623
FK
131/* translation block context */
132#ifdef CONFIG_USER_ONLY
133__thread int have_tb_lock;
134#endif
135
136void tb_lock(void)
137{
138#ifdef CONFIG_USER_ONLY
139 assert(!have_tb_lock);
140 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
141 have_tb_lock++;
142#endif
143}
144
145void tb_unlock(void)
146{
147#ifdef CONFIG_USER_ONLY
148 assert(have_tb_lock);
149 have_tb_lock--;
150 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
151#endif
152}
153
154void tb_lock_reset(void)
155{
156#ifdef CONFIG_USER_ONLY
157 if (have_tb_lock) {
158 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
159 have_tb_lock = 0;
160 }
161#endif
162}
163
5b6dd868
BS
164static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
165 tb_page_addr_t phys_page2);
a8a826a3 166static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 167
57fec1fe
FB
168void cpu_gen_init(void)
169{
170 tcg_context_init(&tcg_ctx);
57fec1fe
FB
171}
172
d19893da 173/* return non zero if the very first instruction is invalid so that
75692087
PB
174 * the virtual CPU can trigger an exception.
175 *
176 * '*gen_code_size_ptr' contains the size of the generated code (host
177 * code).
178 *
179 * Called with mmap_lock held for user-mode emulation.
180 */
9349b4f9 181int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
d19893da 182{
57fec1fe 183 TCGContext *s = &tcg_ctx;
1813e175 184 tcg_insn_unit *gen_code_buf;
d19893da 185 int gen_code_size;
57fec1fe
FB
186#ifdef CONFIG_PROFILER
187 int64_t ti;
188#endif
189
190#ifdef CONFIG_PROFILER
b67d9a52
FB
191 s->tb_count1++; /* includes aborted translations because of
192 exceptions */
57fec1fe
FB
193 ti = profile_getclock();
194#endif
195 tcg_func_start(s);
d19893da 196
2cfc5f17
TS
197 gen_intermediate_code(env, tb);
198
6db8b538
AB
199 trace_translate_block(tb, tb->pc, tb->tc_ptr);
200
ec6338ba 201 /* generate machine code */
57fec1fe 202 gen_code_buf = tb->tc_ptr;
ec6338ba
FB
203 tb->tb_next_offset[0] = 0xffff;
204 tb->tb_next_offset[1] = 0xffff;
57fec1fe 205 s->tb_next_offset = tb->tb_next_offset;
4cbb86e1 206#ifdef USE_DIRECT_JUMP
57fec1fe
FB
207 s->tb_jmp_offset = tb->tb_jmp_offset;
208 s->tb_next = NULL;
d19893da 209#else
57fec1fe
FB
210 s->tb_jmp_offset = NULL;
211 s->tb_next = tb->tb_next;
d19893da 212#endif
57fec1fe
FB
213
214#ifdef CONFIG_PROFILER
b67d9a52
FB
215 s->tb_count++;
216 s->interm_time += profile_getclock() - ti;
217 s->code_time -= profile_getclock();
57fec1fe 218#endif
54604f74 219 gen_code_size = tcg_gen_code(s, gen_code_buf);
d19893da 220 *gen_code_size_ptr = gen_code_size;
57fec1fe 221#ifdef CONFIG_PROFILER
b67d9a52
FB
222 s->code_time += profile_getclock();
223 s->code_in_len += tb->size;
224 s->code_out_len += gen_code_size;
57fec1fe
FB
225#endif
226
d19893da 227#ifdef DEBUG_DISAS
8fec2b8c 228 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1813e175
RH
229 qemu_log("OUT: [size=%d]\n", gen_code_size);
230 log_disas(tb->tc_ptr, gen_code_size);
93fcfe39 231 qemu_log("\n");
31b1a7b4 232 qemu_log_flush();
d19893da
FB
233 }
234#endif
235 return 0;
236}
237
5fafdf24 238/* The cpu state corresponding to 'searched_pc' is restored.
d19893da 239 */
74f10515 240static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
a8a826a3 241 uintptr_t searched_pc)
d19893da 242{
74f10515 243 CPUArchState *env = cpu->env_ptr;
57fec1fe
FB
244 TCGContext *s = &tcg_ctx;
245 int j;
6375e09e 246 uintptr_t tc_ptr;
57fec1fe
FB
247#ifdef CONFIG_PROFILER
248 int64_t ti;
249#endif
250
251#ifdef CONFIG_PROFILER
252 ti = profile_getclock();
253#endif
254 tcg_func_start(s);
d19893da 255
2cfc5f17 256 gen_intermediate_code_pc(env, tb);
3b46e624 257
bd79255d 258 if (tb->cflags & CF_USE_ICOUNT) {
414b15c9 259 assert(use_icount);
2e70f6ef 260 /* Reset the cycle counter to the start of the block. */
28ecfd7a 261 cpu->icount_decr.u16.low += tb->icount;
2e70f6ef 262 /* Clear the IO flag. */
99df7dce 263 cpu->can_do_io = 0;
2e70f6ef
PB
264 }
265
d19893da 266 /* find opc index corresponding to search_pc */
6375e09e 267 tc_ptr = (uintptr_t)tb->tc_ptr;
d19893da
FB
268 if (searched_pc < tc_ptr)
269 return -1;
57fec1fe
FB
270
271 s->tb_next_offset = tb->tb_next_offset;
272#ifdef USE_DIRECT_JUMP
273 s->tb_jmp_offset = tb->tb_jmp_offset;
274 s->tb_next = NULL;
275#else
276 s->tb_jmp_offset = NULL;
277 s->tb_next = tb->tb_next;
278#endif
1813e175
RH
279 j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
280 searched_pc - tc_ptr);
57fec1fe
FB
281 if (j < 0)
282 return -1;
d19893da 283 /* now find start of instruction before */
ab1103de 284 while (s->gen_opc_instr_start[j] == 0) {
d19893da 285 j--;
ab1103de 286 }
28ecfd7a 287 cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
3b46e624 288
e87b7cb0 289 restore_state_to_opc(env, tb, j);
57fec1fe
FB
290
291#ifdef CONFIG_PROFILER
b67d9a52
FB
292 s->restore_time += profile_getclock() - ti;
293 s->restore_count++;
57fec1fe 294#endif
d19893da
FB
295 return 0;
296}
5b6dd868 297
3f38f309 298bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
a8a826a3
BS
299{
300 TranslationBlock *tb;
301
302 tb = tb_find_pc(retaddr);
303 if (tb) {
74f10515 304 cpu_restore_state_from_tb(cpu, tb, retaddr);
d8a499f1
PD
305 if (tb->cflags & CF_NOCACHE) {
306 /* one-shot translation, invalidate it immediately */
307 cpu->current_tb = NULL;
308 tb_phys_invalidate(tb, -1);
309 tb_free(tb);
310 }
a8a826a3
BS
311 return true;
312 }
313 return false;
314}
315
5b6dd868 316#ifdef _WIN32
2d8ac5eb 317static __attribute__((unused)) void map_exec(void *addr, long size)
5b6dd868
BS
318{
319 DWORD old_protect;
320 VirtualProtect(addr, size,
321 PAGE_EXECUTE_READWRITE, &old_protect);
322}
323#else
2d8ac5eb 324static __attribute__((unused)) void map_exec(void *addr, long size)
5b6dd868
BS
325{
326 unsigned long start, end, page_size;
327
328 page_size = getpagesize();
329 start = (unsigned long)addr;
330 start &= ~(page_size - 1);
331
332 end = (unsigned long)addr + size;
333 end += page_size - 1;
334 end &= ~(page_size - 1);
335
336 mprotect((void *)start, end - start,
337 PROT_READ | PROT_WRITE | PROT_EXEC);
338}
339#endif
340
47c16ed5 341void page_size_init(void)
5b6dd868
BS
342{
343 /* NOTE: we can always suppose that qemu_host_page_size >=
344 TARGET_PAGE_SIZE */
5b6dd868 345 qemu_real_host_page_size = getpagesize();
4e51361d 346 qemu_real_host_page_mask = ~(qemu_real_host_page_size - 1);
5b6dd868
BS
347 if (qemu_host_page_size == 0) {
348 qemu_host_page_size = qemu_real_host_page_size;
349 }
350 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
351 qemu_host_page_size = TARGET_PAGE_SIZE;
352 }
353 qemu_host_page_mask = ~(qemu_host_page_size - 1);
47c16ed5 354}
5b6dd868 355
47c16ed5
AK
356static void page_init(void)
357{
358 page_size_init();
5b6dd868
BS
359#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
360 {
361#ifdef HAVE_KINFO_GETVMMAP
362 struct kinfo_vmentry *freep;
363 int i, cnt;
364
365 freep = kinfo_getvmmap(getpid(), &cnt);
366 if (freep) {
367 mmap_lock();
368 for (i = 0; i < cnt; i++) {
369 unsigned long startaddr, endaddr;
370
371 startaddr = freep[i].kve_start;
372 endaddr = freep[i].kve_end;
373 if (h2g_valid(startaddr)) {
374 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
375
376 if (h2g_valid(endaddr)) {
377 endaddr = h2g(endaddr);
378 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
379 } else {
380#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
381 endaddr = ~0ul;
382 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
383#endif
384 }
385 }
386 }
387 free(freep);
388 mmap_unlock();
389 }
390#else
391 FILE *f;
392
393 last_brk = (unsigned long)sbrk(0);
394
395 f = fopen("/compat/linux/proc/self/maps", "r");
396 if (f) {
397 mmap_lock();
398
399 do {
400 unsigned long startaddr, endaddr;
401 int n;
402
403 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
404
405 if (n == 2 && h2g_valid(startaddr)) {
406 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
407
408 if (h2g_valid(endaddr)) {
409 endaddr = h2g(endaddr);
410 } else {
411 endaddr = ~0ul;
412 }
413 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
414 }
415 } while (!feof(f));
416
417 fclose(f);
418 mmap_unlock();
419 }
420#endif
421 }
422#endif
423}
424
75692087
PB
425/* If alloc=1:
426 * Called with mmap_lock held for user-mode emulation.
427 */
5b6dd868
BS
428static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
429{
430 PageDesc *pd;
431 void **lp;
432 int i;
433
5b6dd868
BS
434 /* Level 1. Always allocated. */
435 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
436
437 /* Level 2..N-1. */
03f49957 438 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
6940fab8 439 void **p = atomic_rcu_read(lp);
5b6dd868
BS
440
441 if (p == NULL) {
442 if (!alloc) {
443 return NULL;
444 }
e3a0abfd 445 p = g_new0(void *, V_L2_SIZE);
6940fab8 446 atomic_rcu_set(lp, p);
5b6dd868
BS
447 }
448
03f49957 449 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
450 }
451
6940fab8 452 pd = atomic_rcu_read(lp);
5b6dd868
BS
453 if (pd == NULL) {
454 if (!alloc) {
455 return NULL;
456 }
e3a0abfd 457 pd = g_new0(PageDesc, V_L2_SIZE);
6940fab8 458 atomic_rcu_set(lp, pd);
5b6dd868
BS
459 }
460
03f49957 461 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
462}
463
464static inline PageDesc *page_find(tb_page_addr_t index)
465{
466 return page_find_alloc(index, 0);
467}
468
469#if !defined(CONFIG_USER_ONLY)
470#define mmap_lock() do { } while (0)
471#define mmap_unlock() do { } while (0)
472#endif
473
474#if defined(CONFIG_USER_ONLY)
475/* Currently it is not recommended to allocate big chunks of data in
476 user mode. It will change when a dedicated libc will be used. */
477/* ??? 64-bit hosts ought to have no problem mmaping data outside the
478 region in which the guest needs to run. Revisit this. */
479#define USE_STATIC_CODE_GEN_BUFFER
480#endif
481
482/* ??? Should configure for this, not list operating systems here. */
483#if (defined(__linux__) \
484 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
485 || defined(__DragonFly__) || defined(__OpenBSD__) \
486 || defined(__NetBSD__))
487# define USE_MMAP
488#endif
489
490/* Minimum size of the code gen buffer. This number is randomly chosen,
491 but not so small that we can't have a fair number of TB's live. */
492#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
493
494/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
495 indicated, this is constrained by the range of direct branches on the
496 host cpu, as used by the TCG implementation of goto_tb. */
497#if defined(__x86_64__)
498# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
499#elif defined(__sparc__)
500# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
4a136e0a
CF
501#elif defined(__aarch64__)
502# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
503#elif defined(__arm__)
504# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
505#elif defined(__s390x__)
506 /* We have a +- 4GB range on the branches; leave some slop. */
507# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
479eb121
RH
508#elif defined(__mips__)
509 /* We have a 256MB branch region, but leave room to make sure the
510 main executable is also within that region. */
511# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
512#else
513# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
514#endif
515
516#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
517
518#define DEFAULT_CODE_GEN_BUFFER_SIZE \
519 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
520 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
521
522static inline size_t size_code_gen_buffer(size_t tb_size)
523{
524 /* Size the buffer. */
525 if (tb_size == 0) {
526#ifdef USE_STATIC_CODE_GEN_BUFFER
527 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
528#else
529 /* ??? Needs adjustments. */
530 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
531 static buffer, we could size this on RESERVED_VA, on the text
532 segment size of the executable, or continue to use the default. */
533 tb_size = (unsigned long)(ram_size / 4);
534#endif
535 }
536 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
537 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
538 }
539 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
540 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
541 }
0b0d3320 542 tcg_ctx.code_gen_buffer_size = tb_size;
5b6dd868
BS
543 return tb_size;
544}
545
483c76e1
RH
546#ifdef __mips__
547/* In order to use J and JAL within the code_gen_buffer, we require
548 that the buffer not cross a 256MB boundary. */
549static inline bool cross_256mb(void *addr, size_t size)
550{
551 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
552}
553
554/* We weren't able to allocate a buffer without crossing that boundary,
555 so make do with the larger portion of the buffer that doesn't cross.
556 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
557static inline void *split_cross_256mb(void *buf1, size_t size1)
558{
559 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
560 size_t size2 = buf1 + size1 - buf2;
561
562 size1 = buf2 - buf1;
563 if (size1 < size2) {
564 size1 = size2;
565 buf1 = buf2;
566 }
567
568 tcg_ctx.code_gen_buffer_size = size1;
569 return buf1;
570}
571#endif
572
5b6dd868
BS
573#ifdef USE_STATIC_CODE_GEN_BUFFER
574static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
575 __attribute__((aligned(CODE_GEN_ALIGN)));
576
577static inline void *alloc_code_gen_buffer(void)
578{
483c76e1
RH
579 void *buf = static_code_gen_buffer;
580#ifdef __mips__
581 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
582 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
583 }
584#endif
585 map_exec(buf, tcg_ctx.code_gen_buffer_size);
586 return buf;
5b6dd868
BS
587}
588#elif defined(USE_MMAP)
589static inline void *alloc_code_gen_buffer(void)
590{
591 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
592 uintptr_t start = 0;
593 void *buf;
594
595 /* Constrain the position of the buffer based on the host cpu.
596 Note that these addresses are chosen in concert with the
597 addresses assigned in the relevant linker script file. */
598# if defined(__PIE__) || defined(__PIC__)
599 /* Don't bother setting a preferred location if we're building
600 a position-independent executable. We're more likely to get
601 an address near the main executable if we let the kernel
602 choose the address. */
603# elif defined(__x86_64__) && defined(MAP_32BIT)
604 /* Force the memory down into low memory with the executable.
605 Leave the choice of exact location with the kernel. */
606 flags |= MAP_32BIT;
607 /* Cannot expect to map more than 800MB in low memory. */
0b0d3320
EV
608 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
609 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
5b6dd868
BS
610 }
611# elif defined(__sparc__)
612 start = 0x40000000ul;
613# elif defined(__s390x__)
614 start = 0x90000000ul;
479eb121
RH
615# elif defined(__mips__)
616 /* ??? We ought to more explicitly manage layout for softmmu too. */
617# ifdef CONFIG_USER_ONLY
618 start = 0x68000000ul;
619# elif _MIPS_SIM == _ABI64
620 start = 0x128000000ul;
621# else
622 start = 0x08000000ul;
623# endif
5b6dd868
BS
624# endif
625
0b0d3320 626 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
5b6dd868 627 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
483c76e1
RH
628 if (buf == MAP_FAILED) {
629 return NULL;
630 }
631
632#ifdef __mips__
633 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
5d831be2 634 /* Try again, with the original still mapped, to avoid re-acquiring
483c76e1
RH
635 that 256mb crossing. This time don't specify an address. */
636 size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
637 void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
638 flags, -1, 0);
639 if (buf2 != MAP_FAILED) {
640 if (!cross_256mb(buf2, size1)) {
641 /* Success! Use the new buffer. */
642 munmap(buf, size1);
643 return buf2;
644 }
645 /* Failure. Work with what we had. */
646 munmap(buf2, size1);
647 }
648
649 /* Split the original buffer. Free the smaller half. */
650 buf2 = split_cross_256mb(buf, size1);
651 size2 = tcg_ctx.code_gen_buffer_size;
652 munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
653 return buf2;
654 }
655#endif
656
657 return buf;
5b6dd868
BS
658}
659#else
660static inline void *alloc_code_gen_buffer(void)
661{
8b98ade3 662 void *buf = g_try_malloc(tcg_ctx.code_gen_buffer_size);
5b6dd868 663
483c76e1
RH
664 if (buf == NULL) {
665 return NULL;
666 }
667
668#ifdef __mips__
669 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
670 void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
671 if (buf2 != NULL && !cross_256mb(buf2, size1)) {
672 /* Success! Use the new buffer. */
673 free(buf);
674 buf = buf2;
675 } else {
676 /* Failure. Work with what we had. Since this is malloc
677 and not mmap, we can't free the other half. */
678 free(buf2);
679 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
680 }
5b6dd868 681 }
483c76e1
RH
682#endif
683
684 map_exec(buf, tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
685 return buf;
686}
687#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
688
689static inline void code_gen_alloc(size_t tb_size)
690{
0b0d3320
EV
691 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
692 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
693 if (tcg_ctx.code_gen_buffer == NULL) {
5b6dd868
BS
694 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
695 exit(1);
696 }
697
0b0d3320
EV
698 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
699 QEMU_MADV_HUGEPAGE);
5b6dd868
BS
700
701 /* Steal room for the prologue at the end of the buffer. This ensures
702 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
703 from TB's to the prologue are going to be in range. It also means
704 that we don't need to mark (additional) portions of the data segment
705 as executable. */
0b0d3320
EV
706 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
707 tcg_ctx.code_gen_buffer_size - 1024;
708 tcg_ctx.code_gen_buffer_size -= 1024;
5b6dd868 709
0b0d3320 710 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
5b6dd868 711 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
0b0d3320
EV
712 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
713 CODE_GEN_AVG_BLOCK_SIZE;
5e5f07e0
EV
714 tcg_ctx.tb_ctx.tbs =
715 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
677ef623 716 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
5b6dd868
BS
717}
718
719/* Must be called before using the QEMU cpus. 'tb_size' is the size
720 (in bytes) allocated to the translation buffer. Zero means default
721 size. */
722void tcg_exec_init(unsigned long tb_size)
723{
724 cpu_gen_init();
725 code_gen_alloc(tb_size);
0b0d3320
EV
726 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
727 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
5b6dd868 728 page_init();
4cbea598 729#if defined(CONFIG_SOFTMMU)
5b6dd868
BS
730 /* There's no guest base to take into account, so go ahead and
731 initialize the prologue now. */
732 tcg_prologue_init(&tcg_ctx);
733#endif
734}
735
736bool tcg_enabled(void)
737{
0b0d3320 738 return tcg_ctx.code_gen_buffer != NULL;
5b6dd868
BS
739}
740
741/* Allocate a new translation block. Flush the translation buffer if
742 too many translation blocks or too much generated code. */
743static TranslationBlock *tb_alloc(target_ulong pc)
744{
745 TranslationBlock *tb;
746
5e5f07e0 747 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
0b0d3320
EV
748 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
749 tcg_ctx.code_gen_buffer_max_size) {
5b6dd868
BS
750 return NULL;
751 }
5e5f07e0 752 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
5b6dd868
BS
753 tb->pc = pc;
754 tb->cflags = 0;
755 return tb;
756}
757
758void tb_free(TranslationBlock *tb)
759{
760 /* In practice this is mostly used for single use temporary TB
761 Ignore the hard cases and just back up if this TB happens to
762 be the last one generated. */
5e5f07e0
EV
763 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
764 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
0b0d3320 765 tcg_ctx.code_gen_ptr = tb->tc_ptr;
5e5f07e0 766 tcg_ctx.tb_ctx.nb_tbs--;
5b6dd868
BS
767 }
768}
769
770static inline void invalidate_page_bitmap(PageDesc *p)
771{
772 if (p->code_bitmap) {
773 g_free(p->code_bitmap);
774 p->code_bitmap = NULL;
775 }
776 p->code_write_count = 0;
777}
778
779/* Set to NULL all the 'first_tb' fields in all PageDescs. */
780static void page_flush_tb_1(int level, void **lp)
781{
782 int i;
783
784 if (*lp == NULL) {
785 return;
786 }
787 if (level == 0) {
788 PageDesc *pd = *lp;
789
03f49957 790 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
791 pd[i].first_tb = NULL;
792 invalidate_page_bitmap(pd + i);
793 }
794 } else {
795 void **pp = *lp;
796
03f49957 797 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
798 page_flush_tb_1(level - 1, pp + i);
799 }
800 }
801}
802
803static void page_flush_tb(void)
804{
805 int i;
806
807 for (i = 0; i < V_L1_SIZE; i++) {
03f49957 808 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
809 }
810}
811
812/* flush all the translation blocks */
813/* XXX: tb_flush is currently not thread safe */
bbd77c18 814void tb_flush(CPUState *cpu)
5b6dd868 815{
5b6dd868
BS
816#if defined(DEBUG_FLUSH)
817 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
0b0d3320 818 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
5e5f07e0 819 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
0b0d3320 820 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
5e5f07e0 821 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 822#endif
0b0d3320
EV
823 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
824 > tcg_ctx.code_gen_buffer_size) {
a47dddd7 825 cpu_abort(cpu, "Internal error: code buffer overflow\n");
5b6dd868 826 }
5e5f07e0 827 tcg_ctx.tb_ctx.nb_tbs = 0;
5b6dd868 828
bdc44640 829 CPU_FOREACH(cpu) {
8cd70437 830 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
5b6dd868
BS
831 }
832
eb2535f4 833 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
5b6dd868
BS
834 page_flush_tb();
835
0b0d3320 836 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
5b6dd868
BS
837 /* XXX: flush processor icache at this point if cache flush is
838 expensive */
5e5f07e0 839 tcg_ctx.tb_ctx.tb_flush_count++;
5b6dd868
BS
840}
841
842#ifdef DEBUG_TB_CHECK
843
844static void tb_invalidate_check(target_ulong address)
845{
846 TranslationBlock *tb;
847 int i;
848
849 address &= TARGET_PAGE_MASK;
850 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0 851 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
5b6dd868
BS
852 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
853 address >= tb->pc + tb->size)) {
854 printf("ERROR invalidate: address=" TARGET_FMT_lx
855 " PC=%08lx size=%04x\n",
856 address, (long)tb->pc, tb->size);
857 }
858 }
859 }
860}
861
862/* verify that all the pages have correct rights for code */
863static void tb_page_check(void)
864{
865 TranslationBlock *tb;
866 int i, flags1, flags2;
867
868 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0
EV
869 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
870 tb = tb->phys_hash_next) {
5b6dd868
BS
871 flags1 = page_get_flags(tb->pc);
872 flags2 = page_get_flags(tb->pc + tb->size - 1);
873 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
874 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
875 (long)tb->pc, tb->size, flags1, flags2);
876 }
877 }
878 }
879}
880
881#endif
882
0c884d16 883static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
5b6dd868
BS
884{
885 TranslationBlock *tb1;
886
887 for (;;) {
888 tb1 = *ptb;
889 if (tb1 == tb) {
0c884d16 890 *ptb = tb1->phys_hash_next;
5b6dd868
BS
891 break;
892 }
0c884d16 893 ptb = &tb1->phys_hash_next;
5b6dd868
BS
894 }
895}
896
897static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
898{
899 TranslationBlock *tb1;
900 unsigned int n1;
901
902 for (;;) {
903 tb1 = *ptb;
904 n1 = (uintptr_t)tb1 & 3;
905 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
906 if (tb1 == tb) {
907 *ptb = tb1->page_next[n1];
908 break;
909 }
910 ptb = &tb1->page_next[n1];
911 }
912}
913
914static inline void tb_jmp_remove(TranslationBlock *tb, int n)
915{
916 TranslationBlock *tb1, **ptb;
917 unsigned int n1;
918
919 ptb = &tb->jmp_next[n];
920 tb1 = *ptb;
921 if (tb1) {
922 /* find tb(n) in circular list */
923 for (;;) {
924 tb1 = *ptb;
925 n1 = (uintptr_t)tb1 & 3;
926 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
927 if (n1 == n && tb1 == tb) {
928 break;
929 }
930 if (n1 == 2) {
931 ptb = &tb1->jmp_first;
932 } else {
933 ptb = &tb1->jmp_next[n1];
934 }
935 }
936 /* now we can suppress tb(n) from the list */
937 *ptb = tb->jmp_next[n];
938
939 tb->jmp_next[n] = NULL;
940 }
941}
942
943/* reset the jump entry 'n' of a TB so that it is not chained to
944 another TB */
945static inline void tb_reset_jump(TranslationBlock *tb, int n)
946{
947 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
948}
949
0c884d16 950/* invalidate one TB */
5b6dd868
BS
951void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
952{
182735ef 953 CPUState *cpu;
5b6dd868
BS
954 PageDesc *p;
955 unsigned int h, n1;
956 tb_page_addr_t phys_pc;
957 TranslationBlock *tb1, *tb2;
958
959 /* remove the TB from the hash list */
960 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
961 h = tb_phys_hash_func(phys_pc);
5e5f07e0 962 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
5b6dd868
BS
963
964 /* remove the TB from the page list */
965 if (tb->page_addr[0] != page_addr) {
966 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
967 tb_page_remove(&p->first_tb, tb);
968 invalidate_page_bitmap(p);
969 }
970 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
971 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
972 tb_page_remove(&p->first_tb, tb);
973 invalidate_page_bitmap(p);
974 }
975
5e5f07e0 976 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868
BS
977
978 /* remove the TB from the hash list */
979 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 980 CPU_FOREACH(cpu) {
8cd70437
AF
981 if (cpu->tb_jmp_cache[h] == tb) {
982 cpu->tb_jmp_cache[h] = NULL;
5b6dd868
BS
983 }
984 }
985
986 /* suppress this TB from the two jump lists */
987 tb_jmp_remove(tb, 0);
988 tb_jmp_remove(tb, 1);
989
990 /* suppress any remaining jumps to this TB */
991 tb1 = tb->jmp_first;
992 for (;;) {
993 n1 = (uintptr_t)tb1 & 3;
994 if (n1 == 2) {
995 break;
996 }
997 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
998 tb2 = tb1->jmp_next[n1];
999 tb_reset_jump(tb1, n1);
1000 tb1->jmp_next[n1] = NULL;
1001 tb1 = tb2;
1002 }
1003 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
1004
5e5f07e0 1005 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
1006}
1007
5b6dd868
BS
1008static void build_page_bitmap(PageDesc *p)
1009{
1010 int n, tb_start, tb_end;
1011 TranslationBlock *tb;
1012
510a647f 1013 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
5b6dd868
BS
1014
1015 tb = p->first_tb;
1016 while (tb != NULL) {
1017 n = (uintptr_t)tb & 3;
1018 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1019 /* NOTE: this is subtle as a TB may span two physical pages */
1020 if (n == 0) {
1021 /* NOTE: tb_end may be after the end of the page, but
1022 it is not a problem */
1023 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1024 tb_end = tb_start + tb->size;
1025 if (tb_end > TARGET_PAGE_SIZE) {
1026 tb_end = TARGET_PAGE_SIZE;
1027 }
1028 } else {
1029 tb_start = 0;
1030 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1031 }
510a647f 1032 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
5b6dd868
BS
1033 tb = tb->page_next[n];
1034 }
1035}
1036
75692087 1037/* Called with mmap_lock held for user mode emulation. */
648f034c 1038TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868
BS
1039 target_ulong pc, target_ulong cs_base,
1040 int flags, int cflags)
1041{
648f034c 1042 CPUArchState *env = cpu->env_ptr;
5b6dd868 1043 TranslationBlock *tb;
5b6dd868
BS
1044 tb_page_addr_t phys_pc, phys_page2;
1045 target_ulong virt_page2;
1046 int code_gen_size;
1047
1048 phys_pc = get_page_addr_code(env, pc);
0266359e
PB
1049 if (use_icount) {
1050 cflags |= CF_USE_ICOUNT;
1051 }
5b6dd868
BS
1052 tb = tb_alloc(pc);
1053 if (!tb) {
1054 /* flush must be done */
bbd77c18 1055 tb_flush(cpu);
5b6dd868
BS
1056 /* cannot fail at this point */
1057 tb = tb_alloc(pc);
1058 /* Don't forget to invalidate previous TB info. */
5e5f07e0 1059 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868 1060 }
1813e175 1061 tb->tc_ptr = tcg_ctx.code_gen_ptr;
5b6dd868
BS
1062 tb->cs_base = cs_base;
1063 tb->flags = flags;
1064 tb->cflags = cflags;
1065 cpu_gen_code(env, tb, &code_gen_size);
0b0d3320
EV
1066 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1067 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
5b6dd868
BS
1068
1069 /* check next page if needed */
1070 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1071 phys_page2 = -1;
1072 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1073 phys_page2 = get_page_addr_code(env, virt_page2);
1074 }
1075 tb_link_page(tb, phys_pc, phys_page2);
1076 return tb;
1077}
1078
1079/*
1080 * Invalidate all TBs which intersect with the target physical address range
1081 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1082 * 'is_cpu_write_access' should be true if called from a real cpu write
1083 * access: the virtual CPU will exit the current TB if code is modified inside
1084 * this TB.
75692087
PB
1085 *
1086 * Called with mmap_lock held for user-mode emulation
5b6dd868 1087 */
35865339 1088void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
5b6dd868
BS
1089{
1090 while (start < end) {
35865339 1091 tb_invalidate_phys_page_range(start, end, 0);
5b6dd868
BS
1092 start &= TARGET_PAGE_MASK;
1093 start += TARGET_PAGE_SIZE;
1094 }
1095}
1096
1097/*
1098 * Invalidate all TBs which intersect with the target physical address range
1099 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1100 * 'is_cpu_write_access' should be true if called from a real cpu write
1101 * access: the virtual CPU will exit the current TB if code is modified inside
1102 * this TB.
75692087
PB
1103 *
1104 * Called with mmap_lock held for user-mode emulation
5b6dd868
BS
1105 */
1106void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1107 int is_cpu_write_access)
1108{
1109 TranslationBlock *tb, *tb_next, *saved_tb;
4917cf44 1110 CPUState *cpu = current_cpu;
baea4fae 1111#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1112 CPUArchState *env = NULL;
1113#endif
5b6dd868
BS
1114 tb_page_addr_t tb_start, tb_end;
1115 PageDesc *p;
1116 int n;
1117#ifdef TARGET_HAS_PRECISE_SMC
1118 int current_tb_not_found = is_cpu_write_access;
1119 TranslationBlock *current_tb = NULL;
1120 int current_tb_modified = 0;
1121 target_ulong current_pc = 0;
1122 target_ulong current_cs_base = 0;
1123 int current_flags = 0;
1124#endif /* TARGET_HAS_PRECISE_SMC */
1125
1126 p = page_find(start >> TARGET_PAGE_BITS);
1127 if (!p) {
1128 return;
1129 }
baea4fae 1130#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1131 if (cpu != NULL) {
1132 env = cpu->env_ptr;
d77953b9 1133 }
4917cf44 1134#endif
5b6dd868
BS
1135
1136 /* we remove all the TBs in the range [start, end[ */
1137 /* XXX: see if in some cases it could be faster to invalidate all
1138 the code */
1139 tb = p->first_tb;
1140 while (tb != NULL) {
1141 n = (uintptr_t)tb & 3;
1142 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1143 tb_next = tb->page_next[n];
1144 /* NOTE: this is subtle as a TB may span two physical pages */
1145 if (n == 0) {
1146 /* NOTE: tb_end may be after the end of the page, but
1147 it is not a problem */
1148 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1149 tb_end = tb_start + tb->size;
1150 } else {
1151 tb_start = tb->page_addr[1];
1152 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1153 }
1154 if (!(tb_end <= start || tb_start >= end)) {
1155#ifdef TARGET_HAS_PRECISE_SMC
1156 if (current_tb_not_found) {
1157 current_tb_not_found = 0;
1158 current_tb = NULL;
93afeade 1159 if (cpu->mem_io_pc) {
5b6dd868 1160 /* now we have a real cpu fault */
93afeade 1161 current_tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868
BS
1162 }
1163 }
1164 if (current_tb == tb &&
1165 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1166 /* If we are modifying the current TB, we must stop
1167 its execution. We could be more precise by checking
1168 that the modification is after the current PC, but it
1169 would require a specialized function to partially
1170 restore the CPU state */
1171
1172 current_tb_modified = 1;
74f10515 1173 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
5b6dd868
BS
1174 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1175 &current_flags);
1176 }
1177#endif /* TARGET_HAS_PRECISE_SMC */
1178 /* we need to do that to handle the case where a signal
1179 occurs while doing tb_phys_invalidate() */
1180 saved_tb = NULL;
d77953b9
AF
1181 if (cpu != NULL) {
1182 saved_tb = cpu->current_tb;
1183 cpu->current_tb = NULL;
5b6dd868
BS
1184 }
1185 tb_phys_invalidate(tb, -1);
d77953b9
AF
1186 if (cpu != NULL) {
1187 cpu->current_tb = saved_tb;
c3affe56
AF
1188 if (cpu->interrupt_request && cpu->current_tb) {
1189 cpu_interrupt(cpu, cpu->interrupt_request);
5b6dd868
BS
1190 }
1191 }
1192 }
1193 tb = tb_next;
1194 }
1195#if !defined(CONFIG_USER_ONLY)
1196 /* if no code remaining, no need to continue to use slow writes */
1197 if (!p->first_tb) {
1198 invalidate_page_bitmap(p);
fc377bcf 1199 tlb_unprotect_code(start);
5b6dd868
BS
1200 }
1201#endif
1202#ifdef TARGET_HAS_PRECISE_SMC
1203 if (current_tb_modified) {
1204 /* we generate a block containing just the instruction
1205 modifying the memory. It will ensure that it cannot modify
1206 itself */
d77953b9 1207 cpu->current_tb = NULL;
648f034c 1208 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
0ea8cb88 1209 cpu_resume_from_signal(cpu, NULL);
5b6dd868
BS
1210 }
1211#endif
1212}
1213
1214/* len must be <= 8 and start must be a multiple of len */
1215void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1216{
1217 PageDesc *p;
5b6dd868
BS
1218
1219#if 0
1220 if (1) {
1221 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1222 cpu_single_env->mem_io_vaddr, len,
1223 cpu_single_env->eip,
1224 cpu_single_env->eip +
1225 (intptr_t)cpu_single_env->segs[R_CS].base);
1226 }
1227#endif
1228 p = page_find(start >> TARGET_PAGE_BITS);
1229 if (!p) {
1230 return;
1231 }
fc377bcf
PB
1232 if (!p->code_bitmap &&
1233 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1234 /* build code bitmap */
1235 build_page_bitmap(p);
1236 }
5b6dd868 1237 if (p->code_bitmap) {
510a647f
EC
1238 unsigned int nr;
1239 unsigned long b;
1240
1241 nr = start & ~TARGET_PAGE_MASK;
1242 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
5b6dd868
BS
1243 if (b & ((1 << len) - 1)) {
1244 goto do_invalidate;
1245 }
1246 } else {
1247 do_invalidate:
1248 tb_invalidate_phys_page_range(start, start + len, 1);
1249 }
1250}
1251
1252#if !defined(CONFIG_SOFTMMU)
75692087 1253/* Called with mmap_lock held. */
5b6dd868 1254static void tb_invalidate_phys_page(tb_page_addr_t addr,
d02532f0
AG
1255 uintptr_t pc, void *puc,
1256 bool locked)
5b6dd868
BS
1257{
1258 TranslationBlock *tb;
1259 PageDesc *p;
1260 int n;
1261#ifdef TARGET_HAS_PRECISE_SMC
1262 TranslationBlock *current_tb = NULL;
4917cf44
AF
1263 CPUState *cpu = current_cpu;
1264 CPUArchState *env = NULL;
5b6dd868
BS
1265 int current_tb_modified = 0;
1266 target_ulong current_pc = 0;
1267 target_ulong current_cs_base = 0;
1268 int current_flags = 0;
1269#endif
1270
1271 addr &= TARGET_PAGE_MASK;
1272 p = page_find(addr >> TARGET_PAGE_BITS);
1273 if (!p) {
1274 return;
1275 }
1276 tb = p->first_tb;
1277#ifdef TARGET_HAS_PRECISE_SMC
1278 if (tb && pc != 0) {
1279 current_tb = tb_find_pc(pc);
1280 }
4917cf44
AF
1281 if (cpu != NULL) {
1282 env = cpu->env_ptr;
d77953b9 1283 }
5b6dd868
BS
1284#endif
1285 while (tb != NULL) {
1286 n = (uintptr_t)tb & 3;
1287 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1288#ifdef TARGET_HAS_PRECISE_SMC
1289 if (current_tb == tb &&
1290 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1291 /* If we are modifying the current TB, we must stop
1292 its execution. We could be more precise by checking
1293 that the modification is after the current PC, but it
1294 would require a specialized function to partially
1295 restore the CPU state */
1296
1297 current_tb_modified = 1;
74f10515 1298 cpu_restore_state_from_tb(cpu, current_tb, pc);
5b6dd868
BS
1299 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1300 &current_flags);
1301 }
1302#endif /* TARGET_HAS_PRECISE_SMC */
1303 tb_phys_invalidate(tb, addr);
1304 tb = tb->page_next[n];
1305 }
1306 p->first_tb = NULL;
1307#ifdef TARGET_HAS_PRECISE_SMC
1308 if (current_tb_modified) {
1309 /* we generate a block containing just the instruction
1310 modifying the memory. It will ensure that it cannot modify
1311 itself */
d77953b9 1312 cpu->current_tb = NULL;
648f034c 1313 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
d02532f0
AG
1314 if (locked) {
1315 mmap_unlock();
1316 }
0ea8cb88 1317 cpu_resume_from_signal(cpu, puc);
5b6dd868
BS
1318 }
1319#endif
1320}
1321#endif
1322
75692087
PB
1323/* add the tb in the target page and protect it if necessary
1324 *
1325 * Called with mmap_lock held for user-mode emulation.
1326 */
5b6dd868
BS
1327static inline void tb_alloc_page(TranslationBlock *tb,
1328 unsigned int n, tb_page_addr_t page_addr)
1329{
1330 PageDesc *p;
1331#ifndef CONFIG_USER_ONLY
1332 bool page_already_protected;
1333#endif
1334
1335 tb->page_addr[n] = page_addr;
1336 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1337 tb->page_next[n] = p->first_tb;
1338#ifndef CONFIG_USER_ONLY
1339 page_already_protected = p->first_tb != NULL;
1340#endif
1341 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1342 invalidate_page_bitmap(p);
1343
5b6dd868
BS
1344#if defined(CONFIG_USER_ONLY)
1345 if (p->flags & PAGE_WRITE) {
1346 target_ulong addr;
1347 PageDesc *p2;
1348 int prot;
1349
1350 /* force the host page as non writable (writes will have a
1351 page fault + mprotect overhead) */
1352 page_addr &= qemu_host_page_mask;
1353 prot = 0;
1354 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1355 addr += TARGET_PAGE_SIZE) {
1356
1357 p2 = page_find(addr >> TARGET_PAGE_BITS);
1358 if (!p2) {
1359 continue;
1360 }
1361 prot |= p2->flags;
1362 p2->flags &= ~PAGE_WRITE;
1363 }
1364 mprotect(g2h(page_addr), qemu_host_page_size,
1365 (prot & PAGE_BITS) & ~PAGE_WRITE);
1366#ifdef DEBUG_TB_INVALIDATE
1367 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1368 page_addr);
1369#endif
1370 }
1371#else
1372 /* if some code is already present, then the pages are already
1373 protected. So we handle the case where only the first TB is
1374 allocated in a physical page */
1375 if (!page_already_protected) {
1376 tlb_protect_code(page_addr);
1377 }
1378#endif
5b6dd868
BS
1379}
1380
1381/* add a new TB and link it to the physical page tables. phys_page2 is
75692087
PB
1382 * (-1) to indicate that only one page contains the TB.
1383 */
5b6dd868
BS
1384static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1385 tb_page_addr_t phys_page2)
1386{
1387 unsigned int h;
1388 TranslationBlock **ptb;
1389
1390 /* Grab the mmap lock to stop another thread invalidating this TB
1391 before we are done. */
1392 mmap_lock();
1393 /* add in the physical hash table */
1394 h = tb_phys_hash_func(phys_pc);
5e5f07e0 1395 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
5b6dd868
BS
1396 tb->phys_hash_next = *ptb;
1397 *ptb = tb;
1398
1399 /* add in the page list */
1400 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1401 if (phys_page2 != -1) {
1402 tb_alloc_page(tb, 1, phys_page2);
1403 } else {
1404 tb->page_addr[1] = -1;
1405 }
1406
1407 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1408 tb->jmp_next[0] = NULL;
1409 tb->jmp_next[1] = NULL;
1410
1411 /* init original jump addresses */
1412 if (tb->tb_next_offset[0] != 0xffff) {
1413 tb_reset_jump(tb, 0);
1414 }
1415 if (tb->tb_next_offset[1] != 0xffff) {
1416 tb_reset_jump(tb, 1);
1417 }
1418
1419#ifdef DEBUG_TB_CHECK
1420 tb_page_check();
1421#endif
1422 mmap_unlock();
1423}
1424
5b6dd868
BS
1425/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1426 tb[1].tc_ptr. Return NULL if not found */
a8a826a3 1427static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868
BS
1428{
1429 int m_min, m_max, m;
1430 uintptr_t v;
1431 TranslationBlock *tb;
1432
5e5f07e0 1433 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
5b6dd868
BS
1434 return NULL;
1435 }
0b0d3320
EV
1436 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1437 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
5b6dd868
BS
1438 return NULL;
1439 }
1440 /* binary search (cf Knuth) */
1441 m_min = 0;
5e5f07e0 1442 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
5b6dd868
BS
1443 while (m_min <= m_max) {
1444 m = (m_min + m_max) >> 1;
5e5f07e0 1445 tb = &tcg_ctx.tb_ctx.tbs[m];
5b6dd868
BS
1446 v = (uintptr_t)tb->tc_ptr;
1447 if (v == tc_ptr) {
1448 return tb;
1449 } else if (tc_ptr < v) {
1450 m_max = m - 1;
1451 } else {
1452 m_min = m + 1;
1453 }
1454 }
5e5f07e0 1455 return &tcg_ctx.tb_ctx.tbs[m_max];
5b6dd868
BS
1456}
1457
ec53b45b 1458#if !defined(CONFIG_USER_ONLY)
29d8ec7b 1459void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
5b6dd868
BS
1460{
1461 ram_addr_t ram_addr;
5c8a00ce 1462 MemoryRegion *mr;
149f54b5 1463 hwaddr l = 1;
5b6dd868 1464
41063e1e 1465 rcu_read_lock();
29d8ec7b 1466 mr = address_space_translate(as, addr, &addr, &l, false);
5c8a00ce
PB
1467 if (!(memory_region_is_ram(mr)
1468 || memory_region_is_romd(mr))) {
41063e1e 1469 rcu_read_unlock();
5b6dd868
BS
1470 return;
1471 }
5c8a00ce 1472 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
149f54b5 1473 + addr;
5b6dd868 1474 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
41063e1e 1475 rcu_read_unlock();
5b6dd868 1476}
ec53b45b 1477#endif /* !defined(CONFIG_USER_ONLY) */
5b6dd868 1478
239c51a5 1479void tb_check_watchpoint(CPUState *cpu)
5b6dd868
BS
1480{
1481 TranslationBlock *tb;
1482
93afeade 1483 tb = tb_find_pc(cpu->mem_io_pc);
8d302e76
AJ
1484 if (tb) {
1485 /* We can use retranslation to find the PC. */
1486 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1487 tb_phys_invalidate(tb, -1);
1488 } else {
1489 /* The exception probably happened in a helper. The CPU state should
1490 have been saved before calling it. Fetch the PC from there. */
1491 CPUArchState *env = cpu->env_ptr;
1492 target_ulong pc, cs_base;
1493 tb_page_addr_t addr;
1494 int flags;
1495
1496 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1497 addr = get_page_addr_code(env, pc);
1498 tb_invalidate_phys_range(addr, addr + 1);
5b6dd868 1499 }
5b6dd868
BS
1500}
1501
1502#ifndef CONFIG_USER_ONLY
1503/* mask must never be zero, except for A20 change call */
c3affe56 1504static void tcg_handle_interrupt(CPUState *cpu, int mask)
5b6dd868 1505{
5b6dd868
BS
1506 int old_mask;
1507
259186a7
AF
1508 old_mask = cpu->interrupt_request;
1509 cpu->interrupt_request |= mask;
5b6dd868
BS
1510
1511 /*
1512 * If called from iothread context, wake the target cpu in
1513 * case its halted.
1514 */
1515 if (!qemu_cpu_is_self(cpu)) {
1516 qemu_cpu_kick(cpu);
1517 return;
1518 }
1519
1520 if (use_icount) {
28ecfd7a 1521 cpu->icount_decr.u16.high = 0xffff;
414b15c9 1522 if (!cpu->can_do_io
5b6dd868 1523 && (mask & ~old_mask) != 0) {
a47dddd7 1524 cpu_abort(cpu, "Raised interrupt while not in I/O function");
5b6dd868
BS
1525 }
1526 } else {
378df4b2 1527 cpu->tcg_exit_req = 1;
5b6dd868
BS
1528 }
1529}
1530
1531CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1532
1533/* in deterministic execution mode, instructions doing device I/Os
1534 must be at the end of the TB */
90b40a69 1535void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 1536{
a47dddd7 1537#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 1538 CPUArchState *env = cpu->env_ptr;
a47dddd7 1539#endif
5b6dd868
BS
1540 TranslationBlock *tb;
1541 uint32_t n, cflags;
1542 target_ulong pc, cs_base;
1543 uint64_t flags;
1544
1545 tb = tb_find_pc(retaddr);
1546 if (!tb) {
a47dddd7 1547 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
1548 (void *)retaddr);
1549 }
28ecfd7a 1550 n = cpu->icount_decr.u16.low + tb->icount;
74f10515 1551 cpu_restore_state_from_tb(cpu, tb, retaddr);
5b6dd868
BS
1552 /* Calculate how many instructions had been executed before the fault
1553 occurred. */
28ecfd7a 1554 n = n - cpu->icount_decr.u16.low;
5b6dd868
BS
1555 /* Generate a new TB ending on the I/O insn. */
1556 n++;
1557 /* On MIPS and SH, delay slot instructions can only be restarted if
1558 they were already the first instruction in the TB. If this is not
1559 the first instruction in a TB then re-execute the preceding
1560 branch. */
1561#if defined(TARGET_MIPS)
1562 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
c3577479 1563 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
28ecfd7a 1564 cpu->icount_decr.u16.low++;
5b6dd868
BS
1565 env->hflags &= ~MIPS_HFLAG_BMASK;
1566 }
1567#elif defined(TARGET_SH4)
1568 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1569 && n > 1) {
1570 env->pc -= 2;
28ecfd7a 1571 cpu->icount_decr.u16.low++;
5b6dd868
BS
1572 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1573 }
1574#endif
1575 /* This should never happen. */
1576 if (n > CF_COUNT_MASK) {
a47dddd7 1577 cpu_abort(cpu, "TB too big during recompile");
5b6dd868
BS
1578 }
1579
1580 cflags = n | CF_LAST_IO;
1581 pc = tb->pc;
1582 cs_base = tb->cs_base;
1583 flags = tb->flags;
1584 tb_phys_invalidate(tb, -1);
02d57ea1
SF
1585 if (tb->cflags & CF_NOCACHE) {
1586 if (tb->orig_tb) {
1587 /* Invalidate original TB if this TB was generated in
1588 * cpu_exec_nocache() */
1589 tb_phys_invalidate(tb->orig_tb, -1);
1590 }
1591 tb_free(tb);
1592 }
5b6dd868
BS
1593 /* FIXME: In theory this could raise an exception. In practice
1594 we have already translated the block once so it's probably ok. */
648f034c 1595 tb_gen_code(cpu, pc, cs_base, flags, cflags);
5b6dd868
BS
1596 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1597 the first in the TB) then we end up generating a whole new TB and
1598 repeating the fault, which is horribly inefficient.
1599 Better would be to execute just this insn uncached, or generate a
1600 second new TB. */
0ea8cb88 1601 cpu_resume_from_signal(cpu, NULL);
5b6dd868
BS
1602}
1603
611d4f99 1604void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
5b6dd868
BS
1605{
1606 unsigned int i;
1607
1608 /* Discard jump cache entries for any tb which might potentially
1609 overlap the flushed page. */
1610 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
8cd70437 1611 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1612 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1613
1614 i = tb_jmp_cache_hash_page(addr);
8cd70437 1615 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1616 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1617}
1618
1619void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1620{
1621 int i, target_code_size, max_target_code_size;
1622 int direct_jmp_count, direct_jmp2_count, cross_page;
1623 TranslationBlock *tb;
1624
1625 target_code_size = 0;
1626 max_target_code_size = 0;
1627 cross_page = 0;
1628 direct_jmp_count = 0;
1629 direct_jmp2_count = 0;
5e5f07e0
EV
1630 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1631 tb = &tcg_ctx.tb_ctx.tbs[i];
5b6dd868
BS
1632 target_code_size += tb->size;
1633 if (tb->size > max_target_code_size) {
1634 max_target_code_size = tb->size;
1635 }
1636 if (tb->page_addr[1] != -1) {
1637 cross_page++;
1638 }
1639 if (tb->tb_next_offset[0] != 0xffff) {
1640 direct_jmp_count++;
1641 if (tb->tb_next_offset[1] != 0xffff) {
1642 direct_jmp2_count++;
1643 }
1644 }
1645 }
1646 /* XXX: avoid using doubles ? */
1647 cpu_fprintf(f, "Translation buffer state:\n");
1648 cpu_fprintf(f, "gen code size %td/%zd\n",
0b0d3320
EV
1649 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1650 tcg_ctx.code_gen_buffer_max_size);
5b6dd868 1651 cpu_fprintf(f, "TB count %d/%d\n",
5e5f07e0 1652 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
5b6dd868 1653 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
5e5f07e0
EV
1654 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1655 tcg_ctx.tb_ctx.nb_tbs : 0,
1656 max_target_code_size);
5b6dd868 1657 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
5e5f07e0
EV
1658 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1659 tcg_ctx.code_gen_buffer) /
1660 tcg_ctx.tb_ctx.nb_tbs : 0,
1661 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1662 tcg_ctx.code_gen_buffer) /
1663 target_code_size : 0);
1664 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1665 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1666 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868
BS
1667 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1668 direct_jmp_count,
5e5f07e0
EV
1669 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1670 tcg_ctx.tb_ctx.nb_tbs : 0,
5b6dd868 1671 direct_jmp2_count,
5e5f07e0
EV
1672 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1673 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 1674 cpu_fprintf(f, "\nStatistics:\n");
5e5f07e0
EV
1675 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1676 cpu_fprintf(f, "TB invalidate count %d\n",
1677 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
5b6dd868
BS
1678 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1679 tcg_dump_info(f, cpu_fprintf);
1680}
1681
246ae24d
MF
1682void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1683{
1684 tcg_dump_op_count(f, cpu_fprintf);
1685}
1686
5b6dd868
BS
1687#else /* CONFIG_USER_ONLY */
1688
c3affe56 1689void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1690{
259186a7 1691 cpu->interrupt_request |= mask;
378df4b2 1692 cpu->tcg_exit_req = 1;
5b6dd868
BS
1693}
1694
1695/*
1696 * Walks guest process memory "regions" one by one
1697 * and calls callback function 'fn' for each region.
1698 */
1699struct walk_memory_regions_data {
1700 walk_memory_regions_fn fn;
1701 void *priv;
1a1c4db9 1702 target_ulong start;
5b6dd868
BS
1703 int prot;
1704};
1705
1706static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1a1c4db9 1707 target_ulong end, int new_prot)
5b6dd868 1708{
1a1c4db9 1709 if (data->start != -1u) {
5b6dd868
BS
1710 int rc = data->fn(data->priv, data->start, end, data->prot);
1711 if (rc != 0) {
1712 return rc;
1713 }
1714 }
1715
1a1c4db9 1716 data->start = (new_prot ? end : -1u);
5b6dd868
BS
1717 data->prot = new_prot;
1718
1719 return 0;
1720}
1721
1722static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1a1c4db9 1723 target_ulong base, int level, void **lp)
5b6dd868 1724{
1a1c4db9 1725 target_ulong pa;
5b6dd868
BS
1726 int i, rc;
1727
1728 if (*lp == NULL) {
1729 return walk_memory_regions_end(data, base, 0);
1730 }
1731
1732 if (level == 0) {
1733 PageDesc *pd = *lp;
1734
03f49957 1735 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
1736 int prot = pd[i].flags;
1737
1738 pa = base | (i << TARGET_PAGE_BITS);
1739 if (prot != data->prot) {
1740 rc = walk_memory_regions_end(data, pa, prot);
1741 if (rc != 0) {
1742 return rc;
1743 }
1744 }
1745 }
1746 } else {
1747 void **pp = *lp;
1748
03f49957 1749 for (i = 0; i < V_L2_SIZE; ++i) {
1a1c4db9 1750 pa = base | ((target_ulong)i <<
03f49957 1751 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
1752 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1753 if (rc != 0) {
1754 return rc;
1755 }
1756 }
1757 }
1758
1759 return 0;
1760}
1761
1762int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1763{
1764 struct walk_memory_regions_data data;
1765 uintptr_t i;
1766
1767 data.fn = fn;
1768 data.priv = priv;
1a1c4db9 1769 data.start = -1u;
5b6dd868
BS
1770 data.prot = 0;
1771
1772 for (i = 0; i < V_L1_SIZE; i++) {
1a1c4db9 1773 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
03f49957 1774 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
1775 if (rc != 0) {
1776 return rc;
1777 }
1778 }
1779
1780 return walk_memory_regions_end(&data, 0, 0);
1781}
1782
1a1c4db9
MI
1783static int dump_region(void *priv, target_ulong start,
1784 target_ulong end, unsigned long prot)
5b6dd868
BS
1785{
1786 FILE *f = (FILE *)priv;
1787
1a1c4db9
MI
1788 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1789 " "TARGET_FMT_lx" %c%c%c\n",
5b6dd868
BS
1790 start, end, end - start,
1791 ((prot & PAGE_READ) ? 'r' : '-'),
1792 ((prot & PAGE_WRITE) ? 'w' : '-'),
1793 ((prot & PAGE_EXEC) ? 'x' : '-'));
1794
1795 return 0;
1796}
1797
1798/* dump memory mappings */
1799void page_dump(FILE *f)
1800{
1a1c4db9 1801 const int length = sizeof(target_ulong) * 2;
227b8175
SW
1802 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1803 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
1804 walk_memory_regions(f, dump_region);
1805}
1806
1807int page_get_flags(target_ulong address)
1808{
1809 PageDesc *p;
1810
1811 p = page_find(address >> TARGET_PAGE_BITS);
1812 if (!p) {
1813 return 0;
1814 }
1815 return p->flags;
1816}
1817
1818/* Modify the flags of a page and invalidate the code if necessary.
1819 The flag PAGE_WRITE_ORG is positioned automatically depending
1820 on PAGE_WRITE. The mmap_lock should already be held. */
1821void page_set_flags(target_ulong start, target_ulong end, int flags)
1822{
1823 target_ulong addr, len;
1824
1825 /* This function should never be called with addresses outside the
1826 guest address space. If this assert fires, it probably indicates
1827 a missing call to h2g_valid. */
1828#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 1829 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
1830#endif
1831 assert(start < end);
1832
1833 start = start & TARGET_PAGE_MASK;
1834 end = TARGET_PAGE_ALIGN(end);
1835
1836 if (flags & PAGE_WRITE) {
1837 flags |= PAGE_WRITE_ORG;
1838 }
1839
1840 for (addr = start, len = end - start;
1841 len != 0;
1842 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1843 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1844
1845 /* If the write protection bit is set, then we invalidate
1846 the code inside. */
1847 if (!(p->flags & PAGE_WRITE) &&
1848 (flags & PAGE_WRITE) &&
1849 p->first_tb) {
d02532f0 1850 tb_invalidate_phys_page(addr, 0, NULL, false);
5b6dd868
BS
1851 }
1852 p->flags = flags;
1853 }
1854}
1855
1856int page_check_range(target_ulong start, target_ulong len, int flags)
1857{
1858 PageDesc *p;
1859 target_ulong end;
1860 target_ulong addr;
1861
1862 /* This function should never be called with addresses outside the
1863 guest address space. If this assert fires, it probably indicates
1864 a missing call to h2g_valid. */
1865#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 1866 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
1867#endif
1868
1869 if (len == 0) {
1870 return 0;
1871 }
1872 if (start + len - 1 < start) {
1873 /* We've wrapped around. */
1874 return -1;
1875 }
1876
1877 /* must do before we loose bits in the next step */
1878 end = TARGET_PAGE_ALIGN(start + len);
1879 start = start & TARGET_PAGE_MASK;
1880
1881 for (addr = start, len = end - start;
1882 len != 0;
1883 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1884 p = page_find(addr >> TARGET_PAGE_BITS);
1885 if (!p) {
1886 return -1;
1887 }
1888 if (!(p->flags & PAGE_VALID)) {
1889 return -1;
1890 }
1891
1892 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1893 return -1;
1894 }
1895 if (flags & PAGE_WRITE) {
1896 if (!(p->flags & PAGE_WRITE_ORG)) {
1897 return -1;
1898 }
1899 /* unprotect the page if it was put read-only because it
1900 contains translated code */
1901 if (!(p->flags & PAGE_WRITE)) {
1902 if (!page_unprotect(addr, 0, NULL)) {
1903 return -1;
1904 }
1905 }
5b6dd868
BS
1906 }
1907 }
1908 return 0;
1909}
1910
1911/* called from signal handler: invalidate the code and unprotect the
1912 page. Return TRUE if the fault was successfully handled. */
1913int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1914{
1915 unsigned int prot;
1916 PageDesc *p;
1917 target_ulong host_start, host_end, addr;
1918
1919 /* Technically this isn't safe inside a signal handler. However we
1920 know this only ever happens in a synchronous SEGV handler, so in
1921 practice it seems to be ok. */
1922 mmap_lock();
1923
1924 p = page_find(address >> TARGET_PAGE_BITS);
1925 if (!p) {
1926 mmap_unlock();
1927 return 0;
1928 }
1929
1930 /* if the page was really writable, then we change its
1931 protection back to writable */
1932 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1933 host_start = address & qemu_host_page_mask;
1934 host_end = host_start + qemu_host_page_size;
1935
1936 prot = 0;
1937 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1938 p = page_find(addr >> TARGET_PAGE_BITS);
1939 p->flags |= PAGE_WRITE;
1940 prot |= p->flags;
1941
1942 /* and since the content will be modified, we must invalidate
1943 the corresponding translated code. */
d02532f0 1944 tb_invalidate_phys_page(addr, pc, puc, true);
5b6dd868
BS
1945#ifdef DEBUG_TB_CHECK
1946 tb_invalidate_check(addr);
1947#endif
1948 }
1949 mprotect((void *)g2h(host_start), qemu_host_page_size,
1950 prot & PAGE_BITS);
1951
1952 mmap_unlock();
1953 return 1;
1954 }
1955 mmap_unlock();
1956 return 0;
1957}
1958#endif /* CONFIG_USER_ONLY */