]> git.proxmox.com Git - mirror_qemu.git/blame - translate-all.c
NUMA: Add numa_info structure to contain numa nodes info
[mirror_qemu.git] / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/types.h>
23#include <sys/mman.h>
24#endif
d19893da
FB
25#include <stdarg.h>
26#include <stdlib.h>
27#include <stdio.h>
28#include <string.h>
29#include <inttypes.h>
30
31#include "config.h"
2054396a 32
5b6dd868 33#include "qemu-common.h"
af5ad107 34#define NO_CPU_IO_DEFS
d3eead2e 35#include "cpu.h"
76cad711 36#include "disas/disas.h"
57fec1fe 37#include "tcg.h"
5b6dd868
BS
38#if defined(CONFIG_USER_ONLY)
39#include "qemu.h"
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
0bc3cd62
PB
55#else
56#include "exec/address-spaces.h"
5b6dd868
BS
57#endif
58
022c62cb 59#include "exec/cputlb.h"
5b6dd868 60#include "translate-all.h"
0aa09897 61#include "qemu/timer.h"
5b6dd868
BS
62
63//#define DEBUG_TB_INVALIDATE
64//#define DEBUG_FLUSH
65/* make various TB consistency checks */
66//#define DEBUG_TB_CHECK
67
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
73#define SMC_BITMAP_USE_THRESHOLD 10
74
5b6dd868
BS
75typedef struct PageDesc {
76 /* list of TBs intersecting this ram page */
77 TranslationBlock *first_tb;
78 /* in order to optimize self modifying code, we count the number
79 of lookups we do to a given page to use a bitmap */
80 unsigned int code_write_count;
81 uint8_t *code_bitmap;
82#if defined(CONFIG_USER_ONLY)
83 unsigned long flags;
84#endif
85} PageDesc;
86
87/* In system mode we want L1_MAP to be based on ram offsets,
88 while in user mode we want it to be based on virtual addresses. */
89#if !defined(CONFIG_USER_ONLY)
90#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
91# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
92#else
93# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
94#endif
95#else
96# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
97#endif
98
03f49957
PB
99/* Size of the L2 (and L3, etc) page tables. */
100#define V_L2_BITS 10
101#define V_L2_SIZE (1 << V_L2_BITS)
102
5b6dd868
BS
103/* The bits remaining after N lower levels of page tables. */
104#define V_L1_BITS_REM \
03f49957 105 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
5b6dd868
BS
106
107#if V_L1_BITS_REM < 4
03f49957 108#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
5b6dd868
BS
109#else
110#define V_L1_BITS V_L1_BITS_REM
111#endif
112
113#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
114
115#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
116
117uintptr_t qemu_real_host_page_size;
118uintptr_t qemu_host_page_size;
119uintptr_t qemu_host_page_mask;
120
121/* This is a multi-level map on the virtual address space.
122 The bottom level has pointers to PageDesc. */
123static void *l1_map[V_L1_SIZE];
124
57fec1fe
FB
125/* code generation context */
126TCGContext tcg_ctx;
d19893da 127
5b6dd868
BS
128static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
129 tb_page_addr_t phys_page2);
a8a826a3 130static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 131
57fec1fe
FB
132void cpu_gen_init(void)
133{
134 tcg_context_init(&tcg_ctx);
57fec1fe
FB
135}
136
d19893da 137/* return non zero if the very first instruction is invalid so that
5fafdf24 138 the virtual CPU can trigger an exception.
d19893da
FB
139
140 '*gen_code_size_ptr' contains the size of the generated code (host
141 code).
142*/
9349b4f9 143int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
d19893da 144{
57fec1fe 145 TCGContext *s = &tcg_ctx;
1813e175 146 tcg_insn_unit *gen_code_buf;
d19893da 147 int gen_code_size;
57fec1fe
FB
148#ifdef CONFIG_PROFILER
149 int64_t ti;
150#endif
151
152#ifdef CONFIG_PROFILER
b67d9a52
FB
153 s->tb_count1++; /* includes aborted translations because of
154 exceptions */
57fec1fe
FB
155 ti = profile_getclock();
156#endif
157 tcg_func_start(s);
d19893da 158
2cfc5f17
TS
159 gen_intermediate_code(env, tb);
160
ec6338ba 161 /* generate machine code */
57fec1fe 162 gen_code_buf = tb->tc_ptr;
ec6338ba
FB
163 tb->tb_next_offset[0] = 0xffff;
164 tb->tb_next_offset[1] = 0xffff;
57fec1fe 165 s->tb_next_offset = tb->tb_next_offset;
4cbb86e1 166#ifdef USE_DIRECT_JUMP
57fec1fe
FB
167 s->tb_jmp_offset = tb->tb_jmp_offset;
168 s->tb_next = NULL;
d19893da 169#else
57fec1fe
FB
170 s->tb_jmp_offset = NULL;
171 s->tb_next = tb->tb_next;
d19893da 172#endif
57fec1fe
FB
173
174#ifdef CONFIG_PROFILER
b67d9a52
FB
175 s->tb_count++;
176 s->interm_time += profile_getclock() - ti;
177 s->code_time -= profile_getclock();
57fec1fe 178#endif
54604f74 179 gen_code_size = tcg_gen_code(s, gen_code_buf);
d19893da 180 *gen_code_size_ptr = gen_code_size;
57fec1fe 181#ifdef CONFIG_PROFILER
b67d9a52
FB
182 s->code_time += profile_getclock();
183 s->code_in_len += tb->size;
184 s->code_out_len += gen_code_size;
57fec1fe
FB
185#endif
186
d19893da 187#ifdef DEBUG_DISAS
8fec2b8c 188 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1813e175
RH
189 qemu_log("OUT: [size=%d]\n", gen_code_size);
190 log_disas(tb->tc_ptr, gen_code_size);
93fcfe39 191 qemu_log("\n");
31b1a7b4 192 qemu_log_flush();
d19893da
FB
193 }
194#endif
195 return 0;
196}
197
5fafdf24 198/* The cpu state corresponding to 'searched_pc' is restored.
d19893da 199 */
74f10515 200static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
a8a826a3 201 uintptr_t searched_pc)
d19893da 202{
74f10515 203 CPUArchState *env = cpu->env_ptr;
57fec1fe
FB
204 TCGContext *s = &tcg_ctx;
205 int j;
6375e09e 206 uintptr_t tc_ptr;
57fec1fe
FB
207#ifdef CONFIG_PROFILER
208 int64_t ti;
209#endif
210
211#ifdef CONFIG_PROFILER
212 ti = profile_getclock();
213#endif
214 tcg_func_start(s);
d19893da 215
2cfc5f17 216 gen_intermediate_code_pc(env, tb);
3b46e624 217
2e70f6ef
PB
218 if (use_icount) {
219 /* Reset the cycle counter to the start of the block. */
28ecfd7a 220 cpu->icount_decr.u16.low += tb->icount;
2e70f6ef 221 /* Clear the IO flag. */
99df7dce 222 cpu->can_do_io = 0;
2e70f6ef
PB
223 }
224
d19893da 225 /* find opc index corresponding to search_pc */
6375e09e 226 tc_ptr = (uintptr_t)tb->tc_ptr;
d19893da
FB
227 if (searched_pc < tc_ptr)
228 return -1;
57fec1fe
FB
229
230 s->tb_next_offset = tb->tb_next_offset;
231#ifdef USE_DIRECT_JUMP
232 s->tb_jmp_offset = tb->tb_jmp_offset;
233 s->tb_next = NULL;
234#else
235 s->tb_jmp_offset = NULL;
236 s->tb_next = tb->tb_next;
237#endif
1813e175
RH
238 j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
239 searched_pc - tc_ptr);
57fec1fe
FB
240 if (j < 0)
241 return -1;
d19893da 242 /* now find start of instruction before */
ab1103de 243 while (s->gen_opc_instr_start[j] == 0) {
d19893da 244 j--;
ab1103de 245 }
28ecfd7a 246 cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
3b46e624 247
e87b7cb0 248 restore_state_to_opc(env, tb, j);
57fec1fe
FB
249
250#ifdef CONFIG_PROFILER
b67d9a52
FB
251 s->restore_time += profile_getclock() - ti;
252 s->restore_count++;
57fec1fe 253#endif
d19893da
FB
254 return 0;
255}
5b6dd868 256
3f38f309 257bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
a8a826a3
BS
258{
259 TranslationBlock *tb;
260
261 tb = tb_find_pc(retaddr);
262 if (tb) {
74f10515 263 cpu_restore_state_from_tb(cpu, tb, retaddr);
a8a826a3
BS
264 return true;
265 }
266 return false;
267}
268
5b6dd868
BS
269#ifdef _WIN32
270static inline void map_exec(void *addr, long size)
271{
272 DWORD old_protect;
273 VirtualProtect(addr, size,
274 PAGE_EXECUTE_READWRITE, &old_protect);
275}
276#else
277static inline void map_exec(void *addr, long size)
278{
279 unsigned long start, end, page_size;
280
281 page_size = getpagesize();
282 start = (unsigned long)addr;
283 start &= ~(page_size - 1);
284
285 end = (unsigned long)addr + size;
286 end += page_size - 1;
287 end &= ~(page_size - 1);
288
289 mprotect((void *)start, end - start,
290 PROT_READ | PROT_WRITE | PROT_EXEC);
291}
292#endif
293
47c16ed5 294void page_size_init(void)
5b6dd868
BS
295{
296 /* NOTE: we can always suppose that qemu_host_page_size >=
297 TARGET_PAGE_SIZE */
298#ifdef _WIN32
47c16ed5 299 SYSTEM_INFO system_info;
5b6dd868 300
47c16ed5
AK
301 GetSystemInfo(&system_info);
302 qemu_real_host_page_size = system_info.dwPageSize;
5b6dd868
BS
303#else
304 qemu_real_host_page_size = getpagesize();
305#endif
306 if (qemu_host_page_size == 0) {
307 qemu_host_page_size = qemu_real_host_page_size;
308 }
309 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
310 qemu_host_page_size = TARGET_PAGE_SIZE;
311 }
312 qemu_host_page_mask = ~(qemu_host_page_size - 1);
47c16ed5 313}
5b6dd868 314
47c16ed5
AK
315static void page_init(void)
316{
317 page_size_init();
5b6dd868
BS
318#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
319 {
320#ifdef HAVE_KINFO_GETVMMAP
321 struct kinfo_vmentry *freep;
322 int i, cnt;
323
324 freep = kinfo_getvmmap(getpid(), &cnt);
325 if (freep) {
326 mmap_lock();
327 for (i = 0; i < cnt; i++) {
328 unsigned long startaddr, endaddr;
329
330 startaddr = freep[i].kve_start;
331 endaddr = freep[i].kve_end;
332 if (h2g_valid(startaddr)) {
333 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
334
335 if (h2g_valid(endaddr)) {
336 endaddr = h2g(endaddr);
337 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
338 } else {
339#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
340 endaddr = ~0ul;
341 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
342#endif
343 }
344 }
345 }
346 free(freep);
347 mmap_unlock();
348 }
349#else
350 FILE *f;
351
352 last_brk = (unsigned long)sbrk(0);
353
354 f = fopen("/compat/linux/proc/self/maps", "r");
355 if (f) {
356 mmap_lock();
357
358 do {
359 unsigned long startaddr, endaddr;
360 int n;
361
362 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
363
364 if (n == 2 && h2g_valid(startaddr)) {
365 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
366
367 if (h2g_valid(endaddr)) {
368 endaddr = h2g(endaddr);
369 } else {
370 endaddr = ~0ul;
371 }
372 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
373 }
374 } while (!feof(f));
375
376 fclose(f);
377 mmap_unlock();
378 }
379#endif
380 }
381#endif
382}
383
384static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
385{
386 PageDesc *pd;
387 void **lp;
388 int i;
389
390#if defined(CONFIG_USER_ONLY)
391 /* We can't use g_malloc because it may recurse into a locked mutex. */
392# define ALLOC(P, SIZE) \
393 do { \
394 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
395 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
396 } while (0)
397#else
398# define ALLOC(P, SIZE) \
399 do { P = g_malloc0(SIZE); } while (0)
400#endif
401
402 /* Level 1. Always allocated. */
403 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
404
405 /* Level 2..N-1. */
03f49957 406 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
5b6dd868
BS
407 void **p = *lp;
408
409 if (p == NULL) {
410 if (!alloc) {
411 return NULL;
412 }
03f49957 413 ALLOC(p, sizeof(void *) * V_L2_SIZE);
5b6dd868
BS
414 *lp = p;
415 }
416
03f49957 417 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
418 }
419
420 pd = *lp;
421 if (pd == NULL) {
422 if (!alloc) {
423 return NULL;
424 }
03f49957 425 ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
5b6dd868
BS
426 *lp = pd;
427 }
428
429#undef ALLOC
430
03f49957 431 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
432}
433
434static inline PageDesc *page_find(tb_page_addr_t index)
435{
436 return page_find_alloc(index, 0);
437}
438
439#if !defined(CONFIG_USER_ONLY)
440#define mmap_lock() do { } while (0)
441#define mmap_unlock() do { } while (0)
442#endif
443
444#if defined(CONFIG_USER_ONLY)
445/* Currently it is not recommended to allocate big chunks of data in
446 user mode. It will change when a dedicated libc will be used. */
447/* ??? 64-bit hosts ought to have no problem mmaping data outside the
448 region in which the guest needs to run. Revisit this. */
449#define USE_STATIC_CODE_GEN_BUFFER
450#endif
451
452/* ??? Should configure for this, not list operating systems here. */
453#if (defined(__linux__) \
454 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
455 || defined(__DragonFly__) || defined(__OpenBSD__) \
456 || defined(__NetBSD__))
457# define USE_MMAP
458#endif
459
460/* Minimum size of the code gen buffer. This number is randomly chosen,
461 but not so small that we can't have a fair number of TB's live. */
462#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
463
464/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
465 indicated, this is constrained by the range of direct branches on the
466 host cpu, as used by the TCG implementation of goto_tb. */
467#if defined(__x86_64__)
468# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
469#elif defined(__sparc__)
470# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
4a136e0a
CF
471#elif defined(__aarch64__)
472# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
473#elif defined(__arm__)
474# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
475#elif defined(__s390x__)
476 /* We have a +- 4GB range on the branches; leave some slop. */
477# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
479eb121
RH
478#elif defined(__mips__)
479 /* We have a 256MB branch region, but leave room to make sure the
480 main executable is also within that region. */
481# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
482#else
483# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
484#endif
485
486#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
487
488#define DEFAULT_CODE_GEN_BUFFER_SIZE \
489 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
490 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
491
492static inline size_t size_code_gen_buffer(size_t tb_size)
493{
494 /* Size the buffer. */
495 if (tb_size == 0) {
496#ifdef USE_STATIC_CODE_GEN_BUFFER
497 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
498#else
499 /* ??? Needs adjustments. */
500 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
501 static buffer, we could size this on RESERVED_VA, on the text
502 segment size of the executable, or continue to use the default. */
503 tb_size = (unsigned long)(ram_size / 4);
504#endif
505 }
506 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
507 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
508 }
509 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
510 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
511 }
0b0d3320 512 tcg_ctx.code_gen_buffer_size = tb_size;
5b6dd868
BS
513 return tb_size;
514}
515
483c76e1
RH
516#ifdef __mips__
517/* In order to use J and JAL within the code_gen_buffer, we require
518 that the buffer not cross a 256MB boundary. */
519static inline bool cross_256mb(void *addr, size_t size)
520{
521 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
522}
523
524/* We weren't able to allocate a buffer without crossing that boundary,
525 so make do with the larger portion of the buffer that doesn't cross.
526 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
527static inline void *split_cross_256mb(void *buf1, size_t size1)
528{
529 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
530 size_t size2 = buf1 + size1 - buf2;
531
532 size1 = buf2 - buf1;
533 if (size1 < size2) {
534 size1 = size2;
535 buf1 = buf2;
536 }
537
538 tcg_ctx.code_gen_buffer_size = size1;
539 return buf1;
540}
541#endif
542
5b6dd868
BS
543#ifdef USE_STATIC_CODE_GEN_BUFFER
544static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
545 __attribute__((aligned(CODE_GEN_ALIGN)));
546
547static inline void *alloc_code_gen_buffer(void)
548{
483c76e1
RH
549 void *buf = static_code_gen_buffer;
550#ifdef __mips__
551 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
552 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
553 }
554#endif
555 map_exec(buf, tcg_ctx.code_gen_buffer_size);
556 return buf;
5b6dd868
BS
557}
558#elif defined(USE_MMAP)
559static inline void *alloc_code_gen_buffer(void)
560{
561 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
562 uintptr_t start = 0;
563 void *buf;
564
565 /* Constrain the position of the buffer based on the host cpu.
566 Note that these addresses are chosen in concert with the
567 addresses assigned in the relevant linker script file. */
568# if defined(__PIE__) || defined(__PIC__)
569 /* Don't bother setting a preferred location if we're building
570 a position-independent executable. We're more likely to get
571 an address near the main executable if we let the kernel
572 choose the address. */
573# elif defined(__x86_64__) && defined(MAP_32BIT)
574 /* Force the memory down into low memory with the executable.
575 Leave the choice of exact location with the kernel. */
576 flags |= MAP_32BIT;
577 /* Cannot expect to map more than 800MB in low memory. */
0b0d3320
EV
578 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
579 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
5b6dd868
BS
580 }
581# elif defined(__sparc__)
582 start = 0x40000000ul;
583# elif defined(__s390x__)
584 start = 0x90000000ul;
479eb121
RH
585# elif defined(__mips__)
586 /* ??? We ought to more explicitly manage layout for softmmu too. */
587# ifdef CONFIG_USER_ONLY
588 start = 0x68000000ul;
589# elif _MIPS_SIM == _ABI64
590 start = 0x128000000ul;
591# else
592 start = 0x08000000ul;
593# endif
5b6dd868
BS
594# endif
595
0b0d3320 596 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
5b6dd868 597 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
483c76e1
RH
598 if (buf == MAP_FAILED) {
599 return NULL;
600 }
601
602#ifdef __mips__
603 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
604 /* Try again, with the original still mapped, to avoid re-aquiring
605 that 256mb crossing. This time don't specify an address. */
606 size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
607 void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
608 flags, -1, 0);
609 if (buf2 != MAP_FAILED) {
610 if (!cross_256mb(buf2, size1)) {
611 /* Success! Use the new buffer. */
612 munmap(buf, size1);
613 return buf2;
614 }
615 /* Failure. Work with what we had. */
616 munmap(buf2, size1);
617 }
618
619 /* Split the original buffer. Free the smaller half. */
620 buf2 = split_cross_256mb(buf, size1);
621 size2 = tcg_ctx.code_gen_buffer_size;
622 munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
623 return buf2;
624 }
625#endif
626
627 return buf;
5b6dd868
BS
628}
629#else
630static inline void *alloc_code_gen_buffer(void)
631{
0b0d3320 632 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
5b6dd868 633
483c76e1
RH
634 if (buf == NULL) {
635 return NULL;
636 }
637
638#ifdef __mips__
639 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
640 void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
641 if (buf2 != NULL && !cross_256mb(buf2, size1)) {
642 /* Success! Use the new buffer. */
643 free(buf);
644 buf = buf2;
645 } else {
646 /* Failure. Work with what we had. Since this is malloc
647 and not mmap, we can't free the other half. */
648 free(buf2);
649 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
650 }
5b6dd868 651 }
483c76e1
RH
652#endif
653
654 map_exec(buf, tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
655 return buf;
656}
657#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
658
659static inline void code_gen_alloc(size_t tb_size)
660{
0b0d3320
EV
661 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
662 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
663 if (tcg_ctx.code_gen_buffer == NULL) {
5b6dd868
BS
664 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
665 exit(1);
666 }
667
0b0d3320
EV
668 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
669 QEMU_MADV_HUGEPAGE);
5b6dd868
BS
670
671 /* Steal room for the prologue at the end of the buffer. This ensures
672 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
673 from TB's to the prologue are going to be in range. It also means
674 that we don't need to mark (additional) portions of the data segment
675 as executable. */
0b0d3320
EV
676 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
677 tcg_ctx.code_gen_buffer_size - 1024;
678 tcg_ctx.code_gen_buffer_size -= 1024;
5b6dd868 679
0b0d3320 680 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
5b6dd868 681 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
0b0d3320
EV
682 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
683 CODE_GEN_AVG_BLOCK_SIZE;
5e5f07e0
EV
684 tcg_ctx.tb_ctx.tbs =
685 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
5b6dd868
BS
686}
687
688/* Must be called before using the QEMU cpus. 'tb_size' is the size
689 (in bytes) allocated to the translation buffer. Zero means default
690 size. */
691void tcg_exec_init(unsigned long tb_size)
692{
693 cpu_gen_init();
694 code_gen_alloc(tb_size);
0b0d3320
EV
695 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
696 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
697 page_init();
698#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
699 /* There's no guest base to take into account, so go ahead and
700 initialize the prologue now. */
701 tcg_prologue_init(&tcg_ctx);
702#endif
703}
704
705bool tcg_enabled(void)
706{
0b0d3320 707 return tcg_ctx.code_gen_buffer != NULL;
5b6dd868
BS
708}
709
710/* Allocate a new translation block. Flush the translation buffer if
711 too many translation blocks or too much generated code. */
712static TranslationBlock *tb_alloc(target_ulong pc)
713{
714 TranslationBlock *tb;
715
5e5f07e0 716 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
0b0d3320
EV
717 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
718 tcg_ctx.code_gen_buffer_max_size) {
5b6dd868
BS
719 return NULL;
720 }
5e5f07e0 721 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
5b6dd868
BS
722 tb->pc = pc;
723 tb->cflags = 0;
724 return tb;
725}
726
727void tb_free(TranslationBlock *tb)
728{
729 /* In practice this is mostly used for single use temporary TB
730 Ignore the hard cases and just back up if this TB happens to
731 be the last one generated. */
5e5f07e0
EV
732 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
733 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
0b0d3320 734 tcg_ctx.code_gen_ptr = tb->tc_ptr;
5e5f07e0 735 tcg_ctx.tb_ctx.nb_tbs--;
5b6dd868
BS
736 }
737}
738
739static inline void invalidate_page_bitmap(PageDesc *p)
740{
741 if (p->code_bitmap) {
742 g_free(p->code_bitmap);
743 p->code_bitmap = NULL;
744 }
745 p->code_write_count = 0;
746}
747
748/* Set to NULL all the 'first_tb' fields in all PageDescs. */
749static void page_flush_tb_1(int level, void **lp)
750{
751 int i;
752
753 if (*lp == NULL) {
754 return;
755 }
756 if (level == 0) {
757 PageDesc *pd = *lp;
758
03f49957 759 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
760 pd[i].first_tb = NULL;
761 invalidate_page_bitmap(pd + i);
762 }
763 } else {
764 void **pp = *lp;
765
03f49957 766 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
767 page_flush_tb_1(level - 1, pp + i);
768 }
769 }
770}
771
772static void page_flush_tb(void)
773{
774 int i;
775
776 for (i = 0; i < V_L1_SIZE; i++) {
03f49957 777 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
778 }
779}
780
781/* flush all the translation blocks */
782/* XXX: tb_flush is currently not thread safe */
783void tb_flush(CPUArchState *env1)
784{
a47dddd7 785 CPUState *cpu = ENV_GET_CPU(env1);
5b6dd868
BS
786
787#if defined(DEBUG_FLUSH)
788 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
0b0d3320 789 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
5e5f07e0 790 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
0b0d3320 791 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
5e5f07e0 792 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 793#endif
0b0d3320
EV
794 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
795 > tcg_ctx.code_gen_buffer_size) {
a47dddd7 796 cpu_abort(cpu, "Internal error: code buffer overflow\n");
5b6dd868 797 }
5e5f07e0 798 tcg_ctx.tb_ctx.nb_tbs = 0;
5b6dd868 799
bdc44640 800 CPU_FOREACH(cpu) {
8cd70437 801 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
5b6dd868
BS
802 }
803
eb2535f4 804 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
5b6dd868
BS
805 page_flush_tb();
806
0b0d3320 807 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
5b6dd868
BS
808 /* XXX: flush processor icache at this point if cache flush is
809 expensive */
5e5f07e0 810 tcg_ctx.tb_ctx.tb_flush_count++;
5b6dd868
BS
811}
812
813#ifdef DEBUG_TB_CHECK
814
815static void tb_invalidate_check(target_ulong address)
816{
817 TranslationBlock *tb;
818 int i;
819
820 address &= TARGET_PAGE_MASK;
821 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0 822 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
5b6dd868
BS
823 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
824 address >= tb->pc + tb->size)) {
825 printf("ERROR invalidate: address=" TARGET_FMT_lx
826 " PC=%08lx size=%04x\n",
827 address, (long)tb->pc, tb->size);
828 }
829 }
830 }
831}
832
833/* verify that all the pages have correct rights for code */
834static void tb_page_check(void)
835{
836 TranslationBlock *tb;
837 int i, flags1, flags2;
838
839 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0
EV
840 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
841 tb = tb->phys_hash_next) {
5b6dd868
BS
842 flags1 = page_get_flags(tb->pc);
843 flags2 = page_get_flags(tb->pc + tb->size - 1);
844 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
845 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
846 (long)tb->pc, tb->size, flags1, flags2);
847 }
848 }
849 }
850}
851
852#endif
853
0c884d16 854static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
5b6dd868
BS
855{
856 TranslationBlock *tb1;
857
858 for (;;) {
859 tb1 = *ptb;
860 if (tb1 == tb) {
0c884d16 861 *ptb = tb1->phys_hash_next;
5b6dd868
BS
862 break;
863 }
0c884d16 864 ptb = &tb1->phys_hash_next;
5b6dd868
BS
865 }
866}
867
868static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
869{
870 TranslationBlock *tb1;
871 unsigned int n1;
872
873 for (;;) {
874 tb1 = *ptb;
875 n1 = (uintptr_t)tb1 & 3;
876 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
877 if (tb1 == tb) {
878 *ptb = tb1->page_next[n1];
879 break;
880 }
881 ptb = &tb1->page_next[n1];
882 }
883}
884
885static inline void tb_jmp_remove(TranslationBlock *tb, int n)
886{
887 TranslationBlock *tb1, **ptb;
888 unsigned int n1;
889
890 ptb = &tb->jmp_next[n];
891 tb1 = *ptb;
892 if (tb1) {
893 /* find tb(n) in circular list */
894 for (;;) {
895 tb1 = *ptb;
896 n1 = (uintptr_t)tb1 & 3;
897 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
898 if (n1 == n && tb1 == tb) {
899 break;
900 }
901 if (n1 == 2) {
902 ptb = &tb1->jmp_first;
903 } else {
904 ptb = &tb1->jmp_next[n1];
905 }
906 }
907 /* now we can suppress tb(n) from the list */
908 *ptb = tb->jmp_next[n];
909
910 tb->jmp_next[n] = NULL;
911 }
912}
913
914/* reset the jump entry 'n' of a TB so that it is not chained to
915 another TB */
916static inline void tb_reset_jump(TranslationBlock *tb, int n)
917{
918 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
919}
920
0c884d16 921/* invalidate one TB */
5b6dd868
BS
922void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
923{
182735ef 924 CPUState *cpu;
5b6dd868
BS
925 PageDesc *p;
926 unsigned int h, n1;
927 tb_page_addr_t phys_pc;
928 TranslationBlock *tb1, *tb2;
929
930 /* remove the TB from the hash list */
931 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
932 h = tb_phys_hash_func(phys_pc);
5e5f07e0 933 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
5b6dd868
BS
934
935 /* remove the TB from the page list */
936 if (tb->page_addr[0] != page_addr) {
937 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
938 tb_page_remove(&p->first_tb, tb);
939 invalidate_page_bitmap(p);
940 }
941 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
942 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
943 tb_page_remove(&p->first_tb, tb);
944 invalidate_page_bitmap(p);
945 }
946
5e5f07e0 947 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868
BS
948
949 /* remove the TB from the hash list */
950 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 951 CPU_FOREACH(cpu) {
8cd70437
AF
952 if (cpu->tb_jmp_cache[h] == tb) {
953 cpu->tb_jmp_cache[h] = NULL;
5b6dd868
BS
954 }
955 }
956
957 /* suppress this TB from the two jump lists */
958 tb_jmp_remove(tb, 0);
959 tb_jmp_remove(tb, 1);
960
961 /* suppress any remaining jumps to this TB */
962 tb1 = tb->jmp_first;
963 for (;;) {
964 n1 = (uintptr_t)tb1 & 3;
965 if (n1 == 2) {
966 break;
967 }
968 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
969 tb2 = tb1->jmp_next[n1];
970 tb_reset_jump(tb1, n1);
971 tb1->jmp_next[n1] = NULL;
972 tb1 = tb2;
973 }
974 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
975
5e5f07e0 976 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
977}
978
979static inline void set_bits(uint8_t *tab, int start, int len)
980{
981 int end, mask, end1;
982
983 end = start + len;
984 tab += start >> 3;
985 mask = 0xff << (start & 7);
986 if ((start & ~7) == (end & ~7)) {
987 if (start < end) {
988 mask &= ~(0xff << (end & 7));
989 *tab |= mask;
990 }
991 } else {
992 *tab++ |= mask;
993 start = (start + 8) & ~7;
994 end1 = end & ~7;
995 while (start < end1) {
996 *tab++ = 0xff;
997 start += 8;
998 }
999 if (start < end) {
1000 mask = ~(0xff << (end & 7));
1001 *tab |= mask;
1002 }
1003 }
1004}
1005
1006static void build_page_bitmap(PageDesc *p)
1007{
1008 int n, tb_start, tb_end;
1009 TranslationBlock *tb;
1010
1011 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1012
1013 tb = p->first_tb;
1014 while (tb != NULL) {
1015 n = (uintptr_t)tb & 3;
1016 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1017 /* NOTE: this is subtle as a TB may span two physical pages */
1018 if (n == 0) {
1019 /* NOTE: tb_end may be after the end of the page, but
1020 it is not a problem */
1021 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1022 tb_end = tb_start + tb->size;
1023 if (tb_end > TARGET_PAGE_SIZE) {
1024 tb_end = TARGET_PAGE_SIZE;
1025 }
1026 } else {
1027 tb_start = 0;
1028 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1029 }
1030 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1031 tb = tb->page_next[n];
1032 }
1033}
1034
648f034c 1035TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868
BS
1036 target_ulong pc, target_ulong cs_base,
1037 int flags, int cflags)
1038{
648f034c 1039 CPUArchState *env = cpu->env_ptr;
5b6dd868 1040 TranslationBlock *tb;
5b6dd868
BS
1041 tb_page_addr_t phys_pc, phys_page2;
1042 target_ulong virt_page2;
1043 int code_gen_size;
1044
1045 phys_pc = get_page_addr_code(env, pc);
1046 tb = tb_alloc(pc);
1047 if (!tb) {
1048 /* flush must be done */
1049 tb_flush(env);
1050 /* cannot fail at this point */
1051 tb = tb_alloc(pc);
1052 /* Don't forget to invalidate previous TB info. */
5e5f07e0 1053 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868 1054 }
1813e175 1055 tb->tc_ptr = tcg_ctx.code_gen_ptr;
5b6dd868
BS
1056 tb->cs_base = cs_base;
1057 tb->flags = flags;
1058 tb->cflags = cflags;
1059 cpu_gen_code(env, tb, &code_gen_size);
0b0d3320
EV
1060 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1061 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
5b6dd868
BS
1062
1063 /* check next page if needed */
1064 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1065 phys_page2 = -1;
1066 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1067 phys_page2 = get_page_addr_code(env, virt_page2);
1068 }
1069 tb_link_page(tb, phys_pc, phys_page2);
1070 return tb;
1071}
1072
1073/*
1074 * Invalidate all TBs which intersect with the target physical address range
1075 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1076 * 'is_cpu_write_access' should be true if called from a real cpu write
1077 * access: the virtual CPU will exit the current TB if code is modified inside
1078 * this TB.
1079 */
1080void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1081 int is_cpu_write_access)
1082{
1083 while (start < end) {
1084 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1085 start &= TARGET_PAGE_MASK;
1086 start += TARGET_PAGE_SIZE;
1087 }
1088}
1089
1090/*
1091 * Invalidate all TBs which intersect with the target physical address range
1092 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1093 * 'is_cpu_write_access' should be true if called from a real cpu write
1094 * access: the virtual CPU will exit the current TB if code is modified inside
1095 * this TB.
1096 */
1097void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1098 int is_cpu_write_access)
1099{
1100 TranslationBlock *tb, *tb_next, *saved_tb;
4917cf44 1101 CPUState *cpu = current_cpu;
baea4fae 1102#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1103 CPUArchState *env = NULL;
1104#endif
5b6dd868
BS
1105 tb_page_addr_t tb_start, tb_end;
1106 PageDesc *p;
1107 int n;
1108#ifdef TARGET_HAS_PRECISE_SMC
1109 int current_tb_not_found = is_cpu_write_access;
1110 TranslationBlock *current_tb = NULL;
1111 int current_tb_modified = 0;
1112 target_ulong current_pc = 0;
1113 target_ulong current_cs_base = 0;
1114 int current_flags = 0;
1115#endif /* TARGET_HAS_PRECISE_SMC */
1116
1117 p = page_find(start >> TARGET_PAGE_BITS);
1118 if (!p) {
1119 return;
1120 }
1121 if (!p->code_bitmap &&
1122 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1123 is_cpu_write_access) {
1124 /* build code bitmap */
1125 build_page_bitmap(p);
1126 }
baea4fae 1127#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1128 if (cpu != NULL) {
1129 env = cpu->env_ptr;
d77953b9 1130 }
4917cf44 1131#endif
5b6dd868
BS
1132
1133 /* we remove all the TBs in the range [start, end[ */
1134 /* XXX: see if in some cases it could be faster to invalidate all
1135 the code */
1136 tb = p->first_tb;
1137 while (tb != NULL) {
1138 n = (uintptr_t)tb & 3;
1139 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1140 tb_next = tb->page_next[n];
1141 /* NOTE: this is subtle as a TB may span two physical pages */
1142 if (n == 0) {
1143 /* NOTE: tb_end may be after the end of the page, but
1144 it is not a problem */
1145 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1146 tb_end = tb_start + tb->size;
1147 } else {
1148 tb_start = tb->page_addr[1];
1149 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1150 }
1151 if (!(tb_end <= start || tb_start >= end)) {
1152#ifdef TARGET_HAS_PRECISE_SMC
1153 if (current_tb_not_found) {
1154 current_tb_not_found = 0;
1155 current_tb = NULL;
93afeade 1156 if (cpu->mem_io_pc) {
5b6dd868 1157 /* now we have a real cpu fault */
93afeade 1158 current_tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868
BS
1159 }
1160 }
1161 if (current_tb == tb &&
1162 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1163 /* If we are modifying the current TB, we must stop
1164 its execution. We could be more precise by checking
1165 that the modification is after the current PC, but it
1166 would require a specialized function to partially
1167 restore the CPU state */
1168
1169 current_tb_modified = 1;
74f10515 1170 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
5b6dd868
BS
1171 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1172 &current_flags);
1173 }
1174#endif /* TARGET_HAS_PRECISE_SMC */
1175 /* we need to do that to handle the case where a signal
1176 occurs while doing tb_phys_invalidate() */
1177 saved_tb = NULL;
d77953b9
AF
1178 if (cpu != NULL) {
1179 saved_tb = cpu->current_tb;
1180 cpu->current_tb = NULL;
5b6dd868
BS
1181 }
1182 tb_phys_invalidate(tb, -1);
d77953b9
AF
1183 if (cpu != NULL) {
1184 cpu->current_tb = saved_tb;
c3affe56
AF
1185 if (cpu->interrupt_request && cpu->current_tb) {
1186 cpu_interrupt(cpu, cpu->interrupt_request);
5b6dd868
BS
1187 }
1188 }
1189 }
1190 tb = tb_next;
1191 }
1192#if !defined(CONFIG_USER_ONLY)
1193 /* if no code remaining, no need to continue to use slow writes */
1194 if (!p->first_tb) {
1195 invalidate_page_bitmap(p);
1196 if (is_cpu_write_access) {
baea4fae 1197 tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr);
5b6dd868
BS
1198 }
1199 }
1200#endif
1201#ifdef TARGET_HAS_PRECISE_SMC
1202 if (current_tb_modified) {
1203 /* we generate a block containing just the instruction
1204 modifying the memory. It will ensure that it cannot modify
1205 itself */
d77953b9 1206 cpu->current_tb = NULL;
648f034c 1207 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
0ea8cb88 1208 cpu_resume_from_signal(cpu, NULL);
5b6dd868
BS
1209 }
1210#endif
1211}
1212
1213/* len must be <= 8 and start must be a multiple of len */
1214void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1215{
1216 PageDesc *p;
1217 int offset, b;
1218
1219#if 0
1220 if (1) {
1221 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1222 cpu_single_env->mem_io_vaddr, len,
1223 cpu_single_env->eip,
1224 cpu_single_env->eip +
1225 (intptr_t)cpu_single_env->segs[R_CS].base);
1226 }
1227#endif
1228 p = page_find(start >> TARGET_PAGE_BITS);
1229 if (!p) {
1230 return;
1231 }
1232 if (p->code_bitmap) {
1233 offset = start & ~TARGET_PAGE_MASK;
1234 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1235 if (b & ((1 << len) - 1)) {
1236 goto do_invalidate;
1237 }
1238 } else {
1239 do_invalidate:
1240 tb_invalidate_phys_page_range(start, start + len, 1);
1241 }
1242}
1243
1244#if !defined(CONFIG_SOFTMMU)
1245static void tb_invalidate_phys_page(tb_page_addr_t addr,
d02532f0
AG
1246 uintptr_t pc, void *puc,
1247 bool locked)
5b6dd868
BS
1248{
1249 TranslationBlock *tb;
1250 PageDesc *p;
1251 int n;
1252#ifdef TARGET_HAS_PRECISE_SMC
1253 TranslationBlock *current_tb = NULL;
4917cf44
AF
1254 CPUState *cpu = current_cpu;
1255 CPUArchState *env = NULL;
5b6dd868
BS
1256 int current_tb_modified = 0;
1257 target_ulong current_pc = 0;
1258 target_ulong current_cs_base = 0;
1259 int current_flags = 0;
1260#endif
1261
1262 addr &= TARGET_PAGE_MASK;
1263 p = page_find(addr >> TARGET_PAGE_BITS);
1264 if (!p) {
1265 return;
1266 }
1267 tb = p->first_tb;
1268#ifdef TARGET_HAS_PRECISE_SMC
1269 if (tb && pc != 0) {
1270 current_tb = tb_find_pc(pc);
1271 }
4917cf44
AF
1272 if (cpu != NULL) {
1273 env = cpu->env_ptr;
d77953b9 1274 }
5b6dd868
BS
1275#endif
1276 while (tb != NULL) {
1277 n = (uintptr_t)tb & 3;
1278 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1279#ifdef TARGET_HAS_PRECISE_SMC
1280 if (current_tb == tb &&
1281 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1282 /* If we are modifying the current TB, we must stop
1283 its execution. We could be more precise by checking
1284 that the modification is after the current PC, but it
1285 would require a specialized function to partially
1286 restore the CPU state */
1287
1288 current_tb_modified = 1;
74f10515 1289 cpu_restore_state_from_tb(cpu, current_tb, pc);
5b6dd868
BS
1290 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1291 &current_flags);
1292 }
1293#endif /* TARGET_HAS_PRECISE_SMC */
1294 tb_phys_invalidate(tb, addr);
1295 tb = tb->page_next[n];
1296 }
1297 p->first_tb = NULL;
1298#ifdef TARGET_HAS_PRECISE_SMC
1299 if (current_tb_modified) {
1300 /* we generate a block containing just the instruction
1301 modifying the memory. It will ensure that it cannot modify
1302 itself */
d77953b9 1303 cpu->current_tb = NULL;
648f034c 1304 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
d02532f0
AG
1305 if (locked) {
1306 mmap_unlock();
1307 }
0ea8cb88 1308 cpu_resume_from_signal(cpu, puc);
5b6dd868
BS
1309 }
1310#endif
1311}
1312#endif
1313
1314/* add the tb in the target page and protect it if necessary */
1315static inline void tb_alloc_page(TranslationBlock *tb,
1316 unsigned int n, tb_page_addr_t page_addr)
1317{
1318 PageDesc *p;
1319#ifndef CONFIG_USER_ONLY
1320 bool page_already_protected;
1321#endif
1322
1323 tb->page_addr[n] = page_addr;
1324 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1325 tb->page_next[n] = p->first_tb;
1326#ifndef CONFIG_USER_ONLY
1327 page_already_protected = p->first_tb != NULL;
1328#endif
1329 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1330 invalidate_page_bitmap(p);
1331
1332#if defined(TARGET_HAS_SMC) || 1
1333
1334#if defined(CONFIG_USER_ONLY)
1335 if (p->flags & PAGE_WRITE) {
1336 target_ulong addr;
1337 PageDesc *p2;
1338 int prot;
1339
1340 /* force the host page as non writable (writes will have a
1341 page fault + mprotect overhead) */
1342 page_addr &= qemu_host_page_mask;
1343 prot = 0;
1344 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1345 addr += TARGET_PAGE_SIZE) {
1346
1347 p2 = page_find(addr >> TARGET_PAGE_BITS);
1348 if (!p2) {
1349 continue;
1350 }
1351 prot |= p2->flags;
1352 p2->flags &= ~PAGE_WRITE;
1353 }
1354 mprotect(g2h(page_addr), qemu_host_page_size,
1355 (prot & PAGE_BITS) & ~PAGE_WRITE);
1356#ifdef DEBUG_TB_INVALIDATE
1357 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1358 page_addr);
1359#endif
1360 }
1361#else
1362 /* if some code is already present, then the pages are already
1363 protected. So we handle the case where only the first TB is
1364 allocated in a physical page */
1365 if (!page_already_protected) {
1366 tlb_protect_code(page_addr);
1367 }
1368#endif
1369
1370#endif /* TARGET_HAS_SMC */
1371}
1372
1373/* add a new TB and link it to the physical page tables. phys_page2 is
1374 (-1) to indicate that only one page contains the TB. */
1375static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1376 tb_page_addr_t phys_page2)
1377{
1378 unsigned int h;
1379 TranslationBlock **ptb;
1380
1381 /* Grab the mmap lock to stop another thread invalidating this TB
1382 before we are done. */
1383 mmap_lock();
1384 /* add in the physical hash table */
1385 h = tb_phys_hash_func(phys_pc);
5e5f07e0 1386 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
5b6dd868
BS
1387 tb->phys_hash_next = *ptb;
1388 *ptb = tb;
1389
1390 /* add in the page list */
1391 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1392 if (phys_page2 != -1) {
1393 tb_alloc_page(tb, 1, phys_page2);
1394 } else {
1395 tb->page_addr[1] = -1;
1396 }
1397
1398 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1399 tb->jmp_next[0] = NULL;
1400 tb->jmp_next[1] = NULL;
1401
1402 /* init original jump addresses */
1403 if (tb->tb_next_offset[0] != 0xffff) {
1404 tb_reset_jump(tb, 0);
1405 }
1406 if (tb->tb_next_offset[1] != 0xffff) {
1407 tb_reset_jump(tb, 1);
1408 }
1409
1410#ifdef DEBUG_TB_CHECK
1411 tb_page_check();
1412#endif
1413 mmap_unlock();
1414}
1415
5b6dd868
BS
1416/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1417 tb[1].tc_ptr. Return NULL if not found */
a8a826a3 1418static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868
BS
1419{
1420 int m_min, m_max, m;
1421 uintptr_t v;
1422 TranslationBlock *tb;
1423
5e5f07e0 1424 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
5b6dd868
BS
1425 return NULL;
1426 }
0b0d3320
EV
1427 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1428 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
5b6dd868
BS
1429 return NULL;
1430 }
1431 /* binary search (cf Knuth) */
1432 m_min = 0;
5e5f07e0 1433 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
5b6dd868
BS
1434 while (m_min <= m_max) {
1435 m = (m_min + m_max) >> 1;
5e5f07e0 1436 tb = &tcg_ctx.tb_ctx.tbs[m];
5b6dd868
BS
1437 v = (uintptr_t)tb->tc_ptr;
1438 if (v == tc_ptr) {
1439 return tb;
1440 } else if (tc_ptr < v) {
1441 m_max = m - 1;
1442 } else {
1443 m_min = m + 1;
1444 }
1445 }
5e5f07e0 1446 return &tcg_ctx.tb_ctx.tbs[m_max];
5b6dd868
BS
1447}
1448
5b6dd868 1449#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
29d8ec7b 1450void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
5b6dd868
BS
1451{
1452 ram_addr_t ram_addr;
5c8a00ce 1453 MemoryRegion *mr;
149f54b5 1454 hwaddr l = 1;
5b6dd868 1455
29d8ec7b 1456 mr = address_space_translate(as, addr, &addr, &l, false);
5c8a00ce
PB
1457 if (!(memory_region_is_ram(mr)
1458 || memory_region_is_romd(mr))) {
5b6dd868
BS
1459 return;
1460 }
5c8a00ce 1461 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
149f54b5 1462 + addr;
5b6dd868
BS
1463 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1464}
1465#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1466
239c51a5 1467void tb_check_watchpoint(CPUState *cpu)
5b6dd868
BS
1468{
1469 TranslationBlock *tb;
1470
93afeade 1471 tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868 1472 if (!tb) {
a47dddd7 1473 cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
93afeade 1474 (void *)cpu->mem_io_pc);
5b6dd868 1475 }
74f10515 1476 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
5b6dd868
BS
1477 tb_phys_invalidate(tb, -1);
1478}
1479
1480#ifndef CONFIG_USER_ONLY
1481/* mask must never be zero, except for A20 change call */
c3affe56 1482static void tcg_handle_interrupt(CPUState *cpu, int mask)
5b6dd868 1483{
5b6dd868
BS
1484 int old_mask;
1485
259186a7
AF
1486 old_mask = cpu->interrupt_request;
1487 cpu->interrupt_request |= mask;
5b6dd868
BS
1488
1489 /*
1490 * If called from iothread context, wake the target cpu in
1491 * case its halted.
1492 */
1493 if (!qemu_cpu_is_self(cpu)) {
1494 qemu_cpu_kick(cpu);
1495 return;
1496 }
1497
1498 if (use_icount) {
28ecfd7a 1499 cpu->icount_decr.u16.high = 0xffff;
99df7dce 1500 if (!cpu_can_do_io(cpu)
5b6dd868 1501 && (mask & ~old_mask) != 0) {
a47dddd7 1502 cpu_abort(cpu, "Raised interrupt while not in I/O function");
5b6dd868
BS
1503 }
1504 } else {
378df4b2 1505 cpu->tcg_exit_req = 1;
5b6dd868
BS
1506 }
1507}
1508
1509CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1510
1511/* in deterministic execution mode, instructions doing device I/Os
1512 must be at the end of the TB */
90b40a69 1513void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 1514{
a47dddd7 1515#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 1516 CPUArchState *env = cpu->env_ptr;
a47dddd7 1517#endif
5b6dd868
BS
1518 TranslationBlock *tb;
1519 uint32_t n, cflags;
1520 target_ulong pc, cs_base;
1521 uint64_t flags;
1522
1523 tb = tb_find_pc(retaddr);
1524 if (!tb) {
a47dddd7 1525 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
1526 (void *)retaddr);
1527 }
28ecfd7a 1528 n = cpu->icount_decr.u16.low + tb->icount;
74f10515 1529 cpu_restore_state_from_tb(cpu, tb, retaddr);
5b6dd868
BS
1530 /* Calculate how many instructions had been executed before the fault
1531 occurred. */
28ecfd7a 1532 n = n - cpu->icount_decr.u16.low;
5b6dd868
BS
1533 /* Generate a new TB ending on the I/O insn. */
1534 n++;
1535 /* On MIPS and SH, delay slot instructions can only be restarted if
1536 they were already the first instruction in the TB. If this is not
1537 the first instruction in a TB then re-execute the preceding
1538 branch. */
1539#if defined(TARGET_MIPS)
1540 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1541 env->active_tc.PC -= 4;
28ecfd7a 1542 cpu->icount_decr.u16.low++;
5b6dd868
BS
1543 env->hflags &= ~MIPS_HFLAG_BMASK;
1544 }
1545#elif defined(TARGET_SH4)
1546 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1547 && n > 1) {
1548 env->pc -= 2;
28ecfd7a 1549 cpu->icount_decr.u16.low++;
5b6dd868
BS
1550 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1551 }
1552#endif
1553 /* This should never happen. */
1554 if (n > CF_COUNT_MASK) {
a47dddd7 1555 cpu_abort(cpu, "TB too big during recompile");
5b6dd868
BS
1556 }
1557
1558 cflags = n | CF_LAST_IO;
1559 pc = tb->pc;
1560 cs_base = tb->cs_base;
1561 flags = tb->flags;
1562 tb_phys_invalidate(tb, -1);
1563 /* FIXME: In theory this could raise an exception. In practice
1564 we have already translated the block once so it's probably ok. */
648f034c 1565 tb_gen_code(cpu, pc, cs_base, flags, cflags);
5b6dd868
BS
1566 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1567 the first in the TB) then we end up generating a whole new TB and
1568 repeating the fault, which is horribly inefficient.
1569 Better would be to execute just this insn uncached, or generate a
1570 second new TB. */
0ea8cb88 1571 cpu_resume_from_signal(cpu, NULL);
5b6dd868
BS
1572}
1573
611d4f99 1574void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
5b6dd868
BS
1575{
1576 unsigned int i;
1577
1578 /* Discard jump cache entries for any tb which might potentially
1579 overlap the flushed page. */
1580 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
8cd70437 1581 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1582 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1583
1584 i = tb_jmp_cache_hash_page(addr);
8cd70437 1585 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1586 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1587}
1588
1589void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1590{
1591 int i, target_code_size, max_target_code_size;
1592 int direct_jmp_count, direct_jmp2_count, cross_page;
1593 TranslationBlock *tb;
1594
1595 target_code_size = 0;
1596 max_target_code_size = 0;
1597 cross_page = 0;
1598 direct_jmp_count = 0;
1599 direct_jmp2_count = 0;
5e5f07e0
EV
1600 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1601 tb = &tcg_ctx.tb_ctx.tbs[i];
5b6dd868
BS
1602 target_code_size += tb->size;
1603 if (tb->size > max_target_code_size) {
1604 max_target_code_size = tb->size;
1605 }
1606 if (tb->page_addr[1] != -1) {
1607 cross_page++;
1608 }
1609 if (tb->tb_next_offset[0] != 0xffff) {
1610 direct_jmp_count++;
1611 if (tb->tb_next_offset[1] != 0xffff) {
1612 direct_jmp2_count++;
1613 }
1614 }
1615 }
1616 /* XXX: avoid using doubles ? */
1617 cpu_fprintf(f, "Translation buffer state:\n");
1618 cpu_fprintf(f, "gen code size %td/%zd\n",
0b0d3320
EV
1619 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1620 tcg_ctx.code_gen_buffer_max_size);
5b6dd868 1621 cpu_fprintf(f, "TB count %d/%d\n",
5e5f07e0 1622 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
5b6dd868 1623 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
5e5f07e0
EV
1624 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1625 tcg_ctx.tb_ctx.nb_tbs : 0,
1626 max_target_code_size);
5b6dd868 1627 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
5e5f07e0
EV
1628 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1629 tcg_ctx.code_gen_buffer) /
1630 tcg_ctx.tb_ctx.nb_tbs : 0,
1631 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1632 tcg_ctx.code_gen_buffer) /
1633 target_code_size : 0);
1634 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1635 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1636 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868
BS
1637 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1638 direct_jmp_count,
5e5f07e0
EV
1639 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1640 tcg_ctx.tb_ctx.nb_tbs : 0,
5b6dd868 1641 direct_jmp2_count,
5e5f07e0
EV
1642 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1643 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 1644 cpu_fprintf(f, "\nStatistics:\n");
5e5f07e0
EV
1645 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1646 cpu_fprintf(f, "TB invalidate count %d\n",
1647 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
5b6dd868
BS
1648 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1649 tcg_dump_info(f, cpu_fprintf);
1650}
1651
1652#else /* CONFIG_USER_ONLY */
1653
c3affe56 1654void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1655{
259186a7 1656 cpu->interrupt_request |= mask;
378df4b2 1657 cpu->tcg_exit_req = 1;
5b6dd868
BS
1658}
1659
1660/*
1661 * Walks guest process memory "regions" one by one
1662 * and calls callback function 'fn' for each region.
1663 */
1664struct walk_memory_regions_data {
1665 walk_memory_regions_fn fn;
1666 void *priv;
1667 uintptr_t start;
1668 int prot;
1669};
1670
1671static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1672 abi_ulong end, int new_prot)
1673{
1674 if (data->start != -1ul) {
1675 int rc = data->fn(data->priv, data->start, end, data->prot);
1676 if (rc != 0) {
1677 return rc;
1678 }
1679 }
1680
1681 data->start = (new_prot ? end : -1ul);
1682 data->prot = new_prot;
1683
1684 return 0;
1685}
1686
1687static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1688 abi_ulong base, int level, void **lp)
1689{
1690 abi_ulong pa;
1691 int i, rc;
1692
1693 if (*lp == NULL) {
1694 return walk_memory_regions_end(data, base, 0);
1695 }
1696
1697 if (level == 0) {
1698 PageDesc *pd = *lp;
1699
03f49957 1700 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
1701 int prot = pd[i].flags;
1702
1703 pa = base | (i << TARGET_PAGE_BITS);
1704 if (prot != data->prot) {
1705 rc = walk_memory_regions_end(data, pa, prot);
1706 if (rc != 0) {
1707 return rc;
1708 }
1709 }
1710 }
1711 } else {
1712 void **pp = *lp;
1713
03f49957 1714 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868 1715 pa = base | ((abi_ulong)i <<
03f49957 1716 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
1717 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1718 if (rc != 0) {
1719 return rc;
1720 }
1721 }
1722 }
1723
1724 return 0;
1725}
1726
1727int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1728{
1729 struct walk_memory_regions_data data;
1730 uintptr_t i;
1731
1732 data.fn = fn;
1733 data.priv = priv;
1734 data.start = -1ul;
1735 data.prot = 0;
1736
1737 for (i = 0; i < V_L1_SIZE; i++) {
1738 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
03f49957 1739 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
1740
1741 if (rc != 0) {
1742 return rc;
1743 }
1744 }
1745
1746 return walk_memory_regions_end(&data, 0, 0);
1747}
1748
1749static int dump_region(void *priv, abi_ulong start,
1750 abi_ulong end, unsigned long prot)
1751{
1752 FILE *f = (FILE *)priv;
1753
1754 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1755 " "TARGET_ABI_FMT_lx" %c%c%c\n",
1756 start, end, end - start,
1757 ((prot & PAGE_READ) ? 'r' : '-'),
1758 ((prot & PAGE_WRITE) ? 'w' : '-'),
1759 ((prot & PAGE_EXEC) ? 'x' : '-'));
1760
1761 return 0;
1762}
1763
1764/* dump memory mappings */
1765void page_dump(FILE *f)
1766{
227b8175
SW
1767 const int length = sizeof(abi_ulong) * 2;
1768 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1769 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
1770 walk_memory_regions(f, dump_region);
1771}
1772
1773int page_get_flags(target_ulong address)
1774{
1775 PageDesc *p;
1776
1777 p = page_find(address >> TARGET_PAGE_BITS);
1778 if (!p) {
1779 return 0;
1780 }
1781 return p->flags;
1782}
1783
1784/* Modify the flags of a page and invalidate the code if necessary.
1785 The flag PAGE_WRITE_ORG is positioned automatically depending
1786 on PAGE_WRITE. The mmap_lock should already be held. */
1787void page_set_flags(target_ulong start, target_ulong end, int flags)
1788{
1789 target_ulong addr, len;
1790
1791 /* This function should never be called with addresses outside the
1792 guest address space. If this assert fires, it probably indicates
1793 a missing call to h2g_valid. */
1794#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1795 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1796#endif
1797 assert(start < end);
1798
1799 start = start & TARGET_PAGE_MASK;
1800 end = TARGET_PAGE_ALIGN(end);
1801
1802 if (flags & PAGE_WRITE) {
1803 flags |= PAGE_WRITE_ORG;
1804 }
1805
1806 for (addr = start, len = end - start;
1807 len != 0;
1808 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1809 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1810
1811 /* If the write protection bit is set, then we invalidate
1812 the code inside. */
1813 if (!(p->flags & PAGE_WRITE) &&
1814 (flags & PAGE_WRITE) &&
1815 p->first_tb) {
d02532f0 1816 tb_invalidate_phys_page(addr, 0, NULL, false);
5b6dd868
BS
1817 }
1818 p->flags = flags;
1819 }
1820}
1821
1822int page_check_range(target_ulong start, target_ulong len, int flags)
1823{
1824 PageDesc *p;
1825 target_ulong end;
1826 target_ulong addr;
1827
1828 /* This function should never be called with addresses outside the
1829 guest address space. If this assert fires, it probably indicates
1830 a missing call to h2g_valid. */
1831#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1832 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1833#endif
1834
1835 if (len == 0) {
1836 return 0;
1837 }
1838 if (start + len - 1 < start) {
1839 /* We've wrapped around. */
1840 return -1;
1841 }
1842
1843 /* must do before we loose bits in the next step */
1844 end = TARGET_PAGE_ALIGN(start + len);
1845 start = start & TARGET_PAGE_MASK;
1846
1847 for (addr = start, len = end - start;
1848 len != 0;
1849 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1850 p = page_find(addr >> TARGET_PAGE_BITS);
1851 if (!p) {
1852 return -1;
1853 }
1854 if (!(p->flags & PAGE_VALID)) {
1855 return -1;
1856 }
1857
1858 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1859 return -1;
1860 }
1861 if (flags & PAGE_WRITE) {
1862 if (!(p->flags & PAGE_WRITE_ORG)) {
1863 return -1;
1864 }
1865 /* unprotect the page if it was put read-only because it
1866 contains translated code */
1867 if (!(p->flags & PAGE_WRITE)) {
1868 if (!page_unprotect(addr, 0, NULL)) {
1869 return -1;
1870 }
1871 }
5b6dd868
BS
1872 }
1873 }
1874 return 0;
1875}
1876
1877/* called from signal handler: invalidate the code and unprotect the
1878 page. Return TRUE if the fault was successfully handled. */
1879int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1880{
1881 unsigned int prot;
1882 PageDesc *p;
1883 target_ulong host_start, host_end, addr;
1884
1885 /* Technically this isn't safe inside a signal handler. However we
1886 know this only ever happens in a synchronous SEGV handler, so in
1887 practice it seems to be ok. */
1888 mmap_lock();
1889
1890 p = page_find(address >> TARGET_PAGE_BITS);
1891 if (!p) {
1892 mmap_unlock();
1893 return 0;
1894 }
1895
1896 /* if the page was really writable, then we change its
1897 protection back to writable */
1898 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1899 host_start = address & qemu_host_page_mask;
1900 host_end = host_start + qemu_host_page_size;
1901
1902 prot = 0;
1903 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1904 p = page_find(addr >> TARGET_PAGE_BITS);
1905 p->flags |= PAGE_WRITE;
1906 prot |= p->flags;
1907
1908 /* and since the content will be modified, we must invalidate
1909 the corresponding translated code. */
d02532f0 1910 tb_invalidate_phys_page(addr, pc, puc, true);
5b6dd868
BS
1911#ifdef DEBUG_TB_CHECK
1912 tb_invalidate_check(addr);
1913#endif
1914 }
1915 mprotect((void *)g2h(host_start), qemu_host_page_size,
1916 prot & PAGE_BITS);
1917
1918 mmap_unlock();
1919 return 1;
1920 }
1921 mmap_unlock();
1922 return 0;
1923}
1924#endif /* CONFIG_USER_ONLY */