]> git.proxmox.com Git - mirror_qemu.git/blame - translate-all.c
spapr: Refactor spapr_populate_memory() to allow memoryless nodes
[mirror_qemu.git] / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/types.h>
23#include <sys/mman.h>
24#endif
d19893da
FB
25#include <stdarg.h>
26#include <stdlib.h>
27#include <stdio.h>
28#include <string.h>
29#include <inttypes.h>
30
31#include "config.h"
2054396a 32
5b6dd868 33#include "qemu-common.h"
af5ad107 34#define NO_CPU_IO_DEFS
d3eead2e 35#include "cpu.h"
6db8b538 36#include "trace.h"
76cad711 37#include "disas/disas.h"
57fec1fe 38#include "tcg.h"
5b6dd868
BS
39#if defined(CONFIG_USER_ONLY)
40#include "qemu.h"
41#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42#include <sys/param.h>
43#if __FreeBSD_version >= 700104
44#define HAVE_KINFO_GETVMMAP
45#define sigqueue sigqueue_freebsd /* avoid redefinition */
46#include <sys/time.h>
47#include <sys/proc.h>
48#include <machine/profile.h>
49#define _KERNEL
50#include <sys/user.h>
51#undef _KERNEL
52#undef sigqueue
53#include <libutil.h>
54#endif
55#endif
0bc3cd62
PB
56#else
57#include "exec/address-spaces.h"
5b6dd868
BS
58#endif
59
022c62cb 60#include "exec/cputlb.h"
5b6dd868 61#include "translate-all.h"
0aa09897 62#include "qemu/timer.h"
5b6dd868
BS
63
64//#define DEBUG_TB_INVALIDATE
65//#define DEBUG_FLUSH
66/* make various TB consistency checks */
67//#define DEBUG_TB_CHECK
68
69#if !defined(CONFIG_USER_ONLY)
70/* TB consistency checks only implemented for usermode emulation. */
71#undef DEBUG_TB_CHECK
72#endif
73
74#define SMC_BITMAP_USE_THRESHOLD 10
75
5b6dd868
BS
76typedef struct PageDesc {
77 /* list of TBs intersecting this ram page */
78 TranslationBlock *first_tb;
79 /* in order to optimize self modifying code, we count the number
80 of lookups we do to a given page to use a bitmap */
81 unsigned int code_write_count;
82 uint8_t *code_bitmap;
83#if defined(CONFIG_USER_ONLY)
84 unsigned long flags;
85#endif
86} PageDesc;
87
88/* In system mode we want L1_MAP to be based on ram offsets,
89 while in user mode we want it to be based on virtual addresses. */
90#if !defined(CONFIG_USER_ONLY)
91#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
92# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
93#else
94# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
95#endif
96#else
97# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
98#endif
99
03f49957
PB
100/* Size of the L2 (and L3, etc) page tables. */
101#define V_L2_BITS 10
102#define V_L2_SIZE (1 << V_L2_BITS)
103
5b6dd868
BS
104/* The bits remaining after N lower levels of page tables. */
105#define V_L1_BITS_REM \
03f49957 106 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
5b6dd868
BS
107
108#if V_L1_BITS_REM < 4
03f49957 109#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
5b6dd868
BS
110#else
111#define V_L1_BITS V_L1_BITS_REM
112#endif
113
114#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
115
116#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
117
118uintptr_t qemu_real_host_page_size;
119uintptr_t qemu_host_page_size;
120uintptr_t qemu_host_page_mask;
121
122/* This is a multi-level map on the virtual address space.
123 The bottom level has pointers to PageDesc. */
124static void *l1_map[V_L1_SIZE];
125
57fec1fe
FB
126/* code generation context */
127TCGContext tcg_ctx;
d19893da 128
5b6dd868
BS
129static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
130 tb_page_addr_t phys_page2);
a8a826a3 131static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 132
57fec1fe
FB
133void cpu_gen_init(void)
134{
135 tcg_context_init(&tcg_ctx);
57fec1fe
FB
136}
137
d19893da 138/* return non zero if the very first instruction is invalid so that
5fafdf24 139 the virtual CPU can trigger an exception.
d19893da
FB
140
141 '*gen_code_size_ptr' contains the size of the generated code (host
142 code).
143*/
9349b4f9 144int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
d19893da 145{
57fec1fe 146 TCGContext *s = &tcg_ctx;
1813e175 147 tcg_insn_unit *gen_code_buf;
d19893da 148 int gen_code_size;
57fec1fe
FB
149#ifdef CONFIG_PROFILER
150 int64_t ti;
151#endif
152
153#ifdef CONFIG_PROFILER
b67d9a52
FB
154 s->tb_count1++; /* includes aborted translations because of
155 exceptions */
57fec1fe
FB
156 ti = profile_getclock();
157#endif
158 tcg_func_start(s);
d19893da 159
2cfc5f17
TS
160 gen_intermediate_code(env, tb);
161
6db8b538
AB
162 trace_translate_block(tb, tb->pc, tb->tc_ptr);
163
ec6338ba 164 /* generate machine code */
57fec1fe 165 gen_code_buf = tb->tc_ptr;
ec6338ba
FB
166 tb->tb_next_offset[0] = 0xffff;
167 tb->tb_next_offset[1] = 0xffff;
57fec1fe 168 s->tb_next_offset = tb->tb_next_offset;
4cbb86e1 169#ifdef USE_DIRECT_JUMP
57fec1fe
FB
170 s->tb_jmp_offset = tb->tb_jmp_offset;
171 s->tb_next = NULL;
d19893da 172#else
57fec1fe
FB
173 s->tb_jmp_offset = NULL;
174 s->tb_next = tb->tb_next;
d19893da 175#endif
57fec1fe
FB
176
177#ifdef CONFIG_PROFILER
b67d9a52
FB
178 s->tb_count++;
179 s->interm_time += profile_getclock() - ti;
180 s->code_time -= profile_getclock();
57fec1fe 181#endif
54604f74 182 gen_code_size = tcg_gen_code(s, gen_code_buf);
d19893da 183 *gen_code_size_ptr = gen_code_size;
57fec1fe 184#ifdef CONFIG_PROFILER
b67d9a52
FB
185 s->code_time += profile_getclock();
186 s->code_in_len += tb->size;
187 s->code_out_len += gen_code_size;
57fec1fe
FB
188#endif
189
d19893da 190#ifdef DEBUG_DISAS
8fec2b8c 191 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1813e175
RH
192 qemu_log("OUT: [size=%d]\n", gen_code_size);
193 log_disas(tb->tc_ptr, gen_code_size);
93fcfe39 194 qemu_log("\n");
31b1a7b4 195 qemu_log_flush();
d19893da
FB
196 }
197#endif
198 return 0;
199}
200
5fafdf24 201/* The cpu state corresponding to 'searched_pc' is restored.
d19893da 202 */
74f10515 203static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
a8a826a3 204 uintptr_t searched_pc)
d19893da 205{
74f10515 206 CPUArchState *env = cpu->env_ptr;
57fec1fe
FB
207 TCGContext *s = &tcg_ctx;
208 int j;
6375e09e 209 uintptr_t tc_ptr;
57fec1fe
FB
210#ifdef CONFIG_PROFILER
211 int64_t ti;
212#endif
213
214#ifdef CONFIG_PROFILER
215 ti = profile_getclock();
216#endif
217 tcg_func_start(s);
d19893da 218
2cfc5f17 219 gen_intermediate_code_pc(env, tb);
3b46e624 220
2e70f6ef
PB
221 if (use_icount) {
222 /* Reset the cycle counter to the start of the block. */
28ecfd7a 223 cpu->icount_decr.u16.low += tb->icount;
2e70f6ef 224 /* Clear the IO flag. */
99df7dce 225 cpu->can_do_io = 0;
2e70f6ef
PB
226 }
227
d19893da 228 /* find opc index corresponding to search_pc */
6375e09e 229 tc_ptr = (uintptr_t)tb->tc_ptr;
d19893da
FB
230 if (searched_pc < tc_ptr)
231 return -1;
57fec1fe
FB
232
233 s->tb_next_offset = tb->tb_next_offset;
234#ifdef USE_DIRECT_JUMP
235 s->tb_jmp_offset = tb->tb_jmp_offset;
236 s->tb_next = NULL;
237#else
238 s->tb_jmp_offset = NULL;
239 s->tb_next = tb->tb_next;
240#endif
1813e175
RH
241 j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
242 searched_pc - tc_ptr);
57fec1fe
FB
243 if (j < 0)
244 return -1;
d19893da 245 /* now find start of instruction before */
ab1103de 246 while (s->gen_opc_instr_start[j] == 0) {
d19893da 247 j--;
ab1103de 248 }
28ecfd7a 249 cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
3b46e624 250
e87b7cb0 251 restore_state_to_opc(env, tb, j);
57fec1fe
FB
252
253#ifdef CONFIG_PROFILER
b67d9a52
FB
254 s->restore_time += profile_getclock() - ti;
255 s->restore_count++;
57fec1fe 256#endif
d19893da
FB
257 return 0;
258}
5b6dd868 259
3f38f309 260bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
a8a826a3
BS
261{
262 TranslationBlock *tb;
263
264 tb = tb_find_pc(retaddr);
265 if (tb) {
74f10515 266 cpu_restore_state_from_tb(cpu, tb, retaddr);
a8a826a3
BS
267 return true;
268 }
269 return false;
270}
271
5b6dd868
BS
272#ifdef _WIN32
273static inline void map_exec(void *addr, long size)
274{
275 DWORD old_protect;
276 VirtualProtect(addr, size,
277 PAGE_EXECUTE_READWRITE, &old_protect);
278}
279#else
280static inline void map_exec(void *addr, long size)
281{
282 unsigned long start, end, page_size;
283
284 page_size = getpagesize();
285 start = (unsigned long)addr;
286 start &= ~(page_size - 1);
287
288 end = (unsigned long)addr + size;
289 end += page_size - 1;
290 end &= ~(page_size - 1);
291
292 mprotect((void *)start, end - start,
293 PROT_READ | PROT_WRITE | PROT_EXEC);
294}
295#endif
296
47c16ed5 297void page_size_init(void)
5b6dd868
BS
298{
299 /* NOTE: we can always suppose that qemu_host_page_size >=
300 TARGET_PAGE_SIZE */
5b6dd868 301 qemu_real_host_page_size = getpagesize();
5b6dd868
BS
302 if (qemu_host_page_size == 0) {
303 qemu_host_page_size = qemu_real_host_page_size;
304 }
305 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
306 qemu_host_page_size = TARGET_PAGE_SIZE;
307 }
308 qemu_host_page_mask = ~(qemu_host_page_size - 1);
47c16ed5 309}
5b6dd868 310
47c16ed5
AK
311static void page_init(void)
312{
313 page_size_init();
5b6dd868
BS
314#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
315 {
316#ifdef HAVE_KINFO_GETVMMAP
317 struct kinfo_vmentry *freep;
318 int i, cnt;
319
320 freep = kinfo_getvmmap(getpid(), &cnt);
321 if (freep) {
322 mmap_lock();
323 for (i = 0; i < cnt; i++) {
324 unsigned long startaddr, endaddr;
325
326 startaddr = freep[i].kve_start;
327 endaddr = freep[i].kve_end;
328 if (h2g_valid(startaddr)) {
329 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
330
331 if (h2g_valid(endaddr)) {
332 endaddr = h2g(endaddr);
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 } else {
335#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
336 endaddr = ~0ul;
337 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
338#endif
339 }
340 }
341 }
342 free(freep);
343 mmap_unlock();
344 }
345#else
346 FILE *f;
347
348 last_brk = (unsigned long)sbrk(0);
349
350 f = fopen("/compat/linux/proc/self/maps", "r");
351 if (f) {
352 mmap_lock();
353
354 do {
355 unsigned long startaddr, endaddr;
356 int n;
357
358 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
359
360 if (n == 2 && h2g_valid(startaddr)) {
361 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
362
363 if (h2g_valid(endaddr)) {
364 endaddr = h2g(endaddr);
365 } else {
366 endaddr = ~0ul;
367 }
368 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
369 }
370 } while (!feof(f));
371
372 fclose(f);
373 mmap_unlock();
374 }
375#endif
376 }
377#endif
378}
379
380static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
381{
382 PageDesc *pd;
383 void **lp;
384 int i;
385
386#if defined(CONFIG_USER_ONLY)
387 /* We can't use g_malloc because it may recurse into a locked mutex. */
388# define ALLOC(P, SIZE) \
389 do { \
390 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
391 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
392 } while (0)
393#else
394# define ALLOC(P, SIZE) \
395 do { P = g_malloc0(SIZE); } while (0)
396#endif
397
398 /* Level 1. Always allocated. */
399 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
400
401 /* Level 2..N-1. */
03f49957 402 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
5b6dd868
BS
403 void **p = *lp;
404
405 if (p == NULL) {
406 if (!alloc) {
407 return NULL;
408 }
03f49957 409 ALLOC(p, sizeof(void *) * V_L2_SIZE);
5b6dd868
BS
410 *lp = p;
411 }
412
03f49957 413 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
414 }
415
416 pd = *lp;
417 if (pd == NULL) {
418 if (!alloc) {
419 return NULL;
420 }
03f49957 421 ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
5b6dd868
BS
422 *lp = pd;
423 }
424
425#undef ALLOC
426
03f49957 427 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
428}
429
430static inline PageDesc *page_find(tb_page_addr_t index)
431{
432 return page_find_alloc(index, 0);
433}
434
435#if !defined(CONFIG_USER_ONLY)
436#define mmap_lock() do { } while (0)
437#define mmap_unlock() do { } while (0)
438#endif
439
440#if defined(CONFIG_USER_ONLY)
441/* Currently it is not recommended to allocate big chunks of data in
442 user mode. It will change when a dedicated libc will be used. */
443/* ??? 64-bit hosts ought to have no problem mmaping data outside the
444 region in which the guest needs to run. Revisit this. */
445#define USE_STATIC_CODE_GEN_BUFFER
446#endif
447
448/* ??? Should configure for this, not list operating systems here. */
449#if (defined(__linux__) \
450 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
451 || defined(__DragonFly__) || defined(__OpenBSD__) \
452 || defined(__NetBSD__))
453# define USE_MMAP
454#endif
455
456/* Minimum size of the code gen buffer. This number is randomly chosen,
457 but not so small that we can't have a fair number of TB's live. */
458#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
459
460/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
461 indicated, this is constrained by the range of direct branches on the
462 host cpu, as used by the TCG implementation of goto_tb. */
463#if defined(__x86_64__)
464# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
465#elif defined(__sparc__)
466# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
4a136e0a
CF
467#elif defined(__aarch64__)
468# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
469#elif defined(__arm__)
470# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
471#elif defined(__s390x__)
472 /* We have a +- 4GB range on the branches; leave some slop. */
473# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
479eb121
RH
474#elif defined(__mips__)
475 /* We have a 256MB branch region, but leave room to make sure the
476 main executable is also within that region. */
477# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
478#else
479# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
480#endif
481
482#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
483
484#define DEFAULT_CODE_GEN_BUFFER_SIZE \
485 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
486 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
487
488static inline size_t size_code_gen_buffer(size_t tb_size)
489{
490 /* Size the buffer. */
491 if (tb_size == 0) {
492#ifdef USE_STATIC_CODE_GEN_BUFFER
493 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
494#else
495 /* ??? Needs adjustments. */
496 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
497 static buffer, we could size this on RESERVED_VA, on the text
498 segment size of the executable, or continue to use the default. */
499 tb_size = (unsigned long)(ram_size / 4);
500#endif
501 }
502 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
503 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
504 }
505 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
506 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
507 }
0b0d3320 508 tcg_ctx.code_gen_buffer_size = tb_size;
5b6dd868
BS
509 return tb_size;
510}
511
483c76e1
RH
512#ifdef __mips__
513/* In order to use J and JAL within the code_gen_buffer, we require
514 that the buffer not cross a 256MB boundary. */
515static inline bool cross_256mb(void *addr, size_t size)
516{
517 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
518}
519
520/* We weren't able to allocate a buffer without crossing that boundary,
521 so make do with the larger portion of the buffer that doesn't cross.
522 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
523static inline void *split_cross_256mb(void *buf1, size_t size1)
524{
525 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
526 size_t size2 = buf1 + size1 - buf2;
527
528 size1 = buf2 - buf1;
529 if (size1 < size2) {
530 size1 = size2;
531 buf1 = buf2;
532 }
533
534 tcg_ctx.code_gen_buffer_size = size1;
535 return buf1;
536}
537#endif
538
5b6dd868
BS
539#ifdef USE_STATIC_CODE_GEN_BUFFER
540static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
541 __attribute__((aligned(CODE_GEN_ALIGN)));
542
543static inline void *alloc_code_gen_buffer(void)
544{
483c76e1
RH
545 void *buf = static_code_gen_buffer;
546#ifdef __mips__
547 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
548 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
549 }
550#endif
551 map_exec(buf, tcg_ctx.code_gen_buffer_size);
552 return buf;
5b6dd868
BS
553}
554#elif defined(USE_MMAP)
555static inline void *alloc_code_gen_buffer(void)
556{
557 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
558 uintptr_t start = 0;
559 void *buf;
560
561 /* Constrain the position of the buffer based on the host cpu.
562 Note that these addresses are chosen in concert with the
563 addresses assigned in the relevant linker script file. */
564# if defined(__PIE__) || defined(__PIC__)
565 /* Don't bother setting a preferred location if we're building
566 a position-independent executable. We're more likely to get
567 an address near the main executable if we let the kernel
568 choose the address. */
569# elif defined(__x86_64__) && defined(MAP_32BIT)
570 /* Force the memory down into low memory with the executable.
571 Leave the choice of exact location with the kernel. */
572 flags |= MAP_32BIT;
573 /* Cannot expect to map more than 800MB in low memory. */
0b0d3320
EV
574 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
575 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
5b6dd868
BS
576 }
577# elif defined(__sparc__)
578 start = 0x40000000ul;
579# elif defined(__s390x__)
580 start = 0x90000000ul;
479eb121
RH
581# elif defined(__mips__)
582 /* ??? We ought to more explicitly manage layout for softmmu too. */
583# ifdef CONFIG_USER_ONLY
584 start = 0x68000000ul;
585# elif _MIPS_SIM == _ABI64
586 start = 0x128000000ul;
587# else
588 start = 0x08000000ul;
589# endif
5b6dd868
BS
590# endif
591
0b0d3320 592 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
5b6dd868 593 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
483c76e1
RH
594 if (buf == MAP_FAILED) {
595 return NULL;
596 }
597
598#ifdef __mips__
599 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
5d831be2 600 /* Try again, with the original still mapped, to avoid re-acquiring
483c76e1
RH
601 that 256mb crossing. This time don't specify an address. */
602 size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
603 void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
604 flags, -1, 0);
605 if (buf2 != MAP_FAILED) {
606 if (!cross_256mb(buf2, size1)) {
607 /* Success! Use the new buffer. */
608 munmap(buf, size1);
609 return buf2;
610 }
611 /* Failure. Work with what we had. */
612 munmap(buf2, size1);
613 }
614
615 /* Split the original buffer. Free the smaller half. */
616 buf2 = split_cross_256mb(buf, size1);
617 size2 = tcg_ctx.code_gen_buffer_size;
618 munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
619 return buf2;
620 }
621#endif
622
623 return buf;
5b6dd868
BS
624}
625#else
626static inline void *alloc_code_gen_buffer(void)
627{
0b0d3320 628 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
5b6dd868 629
483c76e1
RH
630 if (buf == NULL) {
631 return NULL;
632 }
633
634#ifdef __mips__
635 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
636 void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
637 if (buf2 != NULL && !cross_256mb(buf2, size1)) {
638 /* Success! Use the new buffer. */
639 free(buf);
640 buf = buf2;
641 } else {
642 /* Failure. Work with what we had. Since this is malloc
643 and not mmap, we can't free the other half. */
644 free(buf2);
645 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
646 }
5b6dd868 647 }
483c76e1
RH
648#endif
649
650 map_exec(buf, tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
651 return buf;
652}
653#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
654
655static inline void code_gen_alloc(size_t tb_size)
656{
0b0d3320
EV
657 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
658 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
659 if (tcg_ctx.code_gen_buffer == NULL) {
5b6dd868
BS
660 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
661 exit(1);
662 }
663
0b0d3320
EV
664 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
665 QEMU_MADV_HUGEPAGE);
5b6dd868
BS
666
667 /* Steal room for the prologue at the end of the buffer. This ensures
668 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
669 from TB's to the prologue are going to be in range. It also means
670 that we don't need to mark (additional) portions of the data segment
671 as executable. */
0b0d3320
EV
672 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
673 tcg_ctx.code_gen_buffer_size - 1024;
674 tcg_ctx.code_gen_buffer_size -= 1024;
5b6dd868 675
0b0d3320 676 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
5b6dd868 677 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
0b0d3320
EV
678 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
679 CODE_GEN_AVG_BLOCK_SIZE;
5e5f07e0
EV
680 tcg_ctx.tb_ctx.tbs =
681 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
5b6dd868
BS
682}
683
684/* Must be called before using the QEMU cpus. 'tb_size' is the size
685 (in bytes) allocated to the translation buffer. Zero means default
686 size. */
687void tcg_exec_init(unsigned long tb_size)
688{
689 cpu_gen_init();
690 code_gen_alloc(tb_size);
0b0d3320
EV
691 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
692 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
693 page_init();
694#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
695 /* There's no guest base to take into account, so go ahead and
696 initialize the prologue now. */
697 tcg_prologue_init(&tcg_ctx);
698#endif
699}
700
701bool tcg_enabled(void)
702{
0b0d3320 703 return tcg_ctx.code_gen_buffer != NULL;
5b6dd868
BS
704}
705
706/* Allocate a new translation block. Flush the translation buffer if
707 too many translation blocks or too much generated code. */
708static TranslationBlock *tb_alloc(target_ulong pc)
709{
710 TranslationBlock *tb;
711
5e5f07e0 712 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
0b0d3320
EV
713 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
714 tcg_ctx.code_gen_buffer_max_size) {
5b6dd868
BS
715 return NULL;
716 }
5e5f07e0 717 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
5b6dd868
BS
718 tb->pc = pc;
719 tb->cflags = 0;
720 return tb;
721}
722
723void tb_free(TranslationBlock *tb)
724{
725 /* In practice this is mostly used for single use temporary TB
726 Ignore the hard cases and just back up if this TB happens to
727 be the last one generated. */
5e5f07e0
EV
728 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
729 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
0b0d3320 730 tcg_ctx.code_gen_ptr = tb->tc_ptr;
5e5f07e0 731 tcg_ctx.tb_ctx.nb_tbs--;
5b6dd868
BS
732 }
733}
734
735static inline void invalidate_page_bitmap(PageDesc *p)
736{
737 if (p->code_bitmap) {
738 g_free(p->code_bitmap);
739 p->code_bitmap = NULL;
740 }
741 p->code_write_count = 0;
742}
743
744/* Set to NULL all the 'first_tb' fields in all PageDescs. */
745static void page_flush_tb_1(int level, void **lp)
746{
747 int i;
748
749 if (*lp == NULL) {
750 return;
751 }
752 if (level == 0) {
753 PageDesc *pd = *lp;
754
03f49957 755 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
756 pd[i].first_tb = NULL;
757 invalidate_page_bitmap(pd + i);
758 }
759 } else {
760 void **pp = *lp;
761
03f49957 762 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
763 page_flush_tb_1(level - 1, pp + i);
764 }
765 }
766}
767
768static void page_flush_tb(void)
769{
770 int i;
771
772 for (i = 0; i < V_L1_SIZE; i++) {
03f49957 773 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
774 }
775}
776
777/* flush all the translation blocks */
778/* XXX: tb_flush is currently not thread safe */
779void tb_flush(CPUArchState *env1)
780{
a47dddd7 781 CPUState *cpu = ENV_GET_CPU(env1);
5b6dd868
BS
782
783#if defined(DEBUG_FLUSH)
784 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
0b0d3320 785 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
5e5f07e0 786 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
0b0d3320 787 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
5e5f07e0 788 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 789#endif
0b0d3320
EV
790 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
791 > tcg_ctx.code_gen_buffer_size) {
a47dddd7 792 cpu_abort(cpu, "Internal error: code buffer overflow\n");
5b6dd868 793 }
5e5f07e0 794 tcg_ctx.tb_ctx.nb_tbs = 0;
5b6dd868 795
bdc44640 796 CPU_FOREACH(cpu) {
8cd70437 797 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
5b6dd868
BS
798 }
799
eb2535f4 800 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
5b6dd868
BS
801 page_flush_tb();
802
0b0d3320 803 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
5b6dd868
BS
804 /* XXX: flush processor icache at this point if cache flush is
805 expensive */
5e5f07e0 806 tcg_ctx.tb_ctx.tb_flush_count++;
5b6dd868
BS
807}
808
809#ifdef DEBUG_TB_CHECK
810
811static void tb_invalidate_check(target_ulong address)
812{
813 TranslationBlock *tb;
814 int i;
815
816 address &= TARGET_PAGE_MASK;
817 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0 818 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
5b6dd868
BS
819 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
820 address >= tb->pc + tb->size)) {
821 printf("ERROR invalidate: address=" TARGET_FMT_lx
822 " PC=%08lx size=%04x\n",
823 address, (long)tb->pc, tb->size);
824 }
825 }
826 }
827}
828
829/* verify that all the pages have correct rights for code */
830static void tb_page_check(void)
831{
832 TranslationBlock *tb;
833 int i, flags1, flags2;
834
835 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0
EV
836 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
837 tb = tb->phys_hash_next) {
5b6dd868
BS
838 flags1 = page_get_flags(tb->pc);
839 flags2 = page_get_flags(tb->pc + tb->size - 1);
840 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
841 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
842 (long)tb->pc, tb->size, flags1, flags2);
843 }
844 }
845 }
846}
847
848#endif
849
0c884d16 850static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
5b6dd868
BS
851{
852 TranslationBlock *tb1;
853
854 for (;;) {
855 tb1 = *ptb;
856 if (tb1 == tb) {
0c884d16 857 *ptb = tb1->phys_hash_next;
5b6dd868
BS
858 break;
859 }
0c884d16 860 ptb = &tb1->phys_hash_next;
5b6dd868
BS
861 }
862}
863
864static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
865{
866 TranslationBlock *tb1;
867 unsigned int n1;
868
869 for (;;) {
870 tb1 = *ptb;
871 n1 = (uintptr_t)tb1 & 3;
872 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
873 if (tb1 == tb) {
874 *ptb = tb1->page_next[n1];
875 break;
876 }
877 ptb = &tb1->page_next[n1];
878 }
879}
880
881static inline void tb_jmp_remove(TranslationBlock *tb, int n)
882{
883 TranslationBlock *tb1, **ptb;
884 unsigned int n1;
885
886 ptb = &tb->jmp_next[n];
887 tb1 = *ptb;
888 if (tb1) {
889 /* find tb(n) in circular list */
890 for (;;) {
891 tb1 = *ptb;
892 n1 = (uintptr_t)tb1 & 3;
893 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
894 if (n1 == n && tb1 == tb) {
895 break;
896 }
897 if (n1 == 2) {
898 ptb = &tb1->jmp_first;
899 } else {
900 ptb = &tb1->jmp_next[n1];
901 }
902 }
903 /* now we can suppress tb(n) from the list */
904 *ptb = tb->jmp_next[n];
905
906 tb->jmp_next[n] = NULL;
907 }
908}
909
910/* reset the jump entry 'n' of a TB so that it is not chained to
911 another TB */
912static inline void tb_reset_jump(TranslationBlock *tb, int n)
913{
914 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
915}
916
0c884d16 917/* invalidate one TB */
5b6dd868
BS
918void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
919{
182735ef 920 CPUState *cpu;
5b6dd868
BS
921 PageDesc *p;
922 unsigned int h, n1;
923 tb_page_addr_t phys_pc;
924 TranslationBlock *tb1, *tb2;
925
926 /* remove the TB from the hash list */
927 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
928 h = tb_phys_hash_func(phys_pc);
5e5f07e0 929 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
5b6dd868
BS
930
931 /* remove the TB from the page list */
932 if (tb->page_addr[0] != page_addr) {
933 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
934 tb_page_remove(&p->first_tb, tb);
935 invalidate_page_bitmap(p);
936 }
937 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
938 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
939 tb_page_remove(&p->first_tb, tb);
940 invalidate_page_bitmap(p);
941 }
942
5e5f07e0 943 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868
BS
944
945 /* remove the TB from the hash list */
946 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 947 CPU_FOREACH(cpu) {
8cd70437
AF
948 if (cpu->tb_jmp_cache[h] == tb) {
949 cpu->tb_jmp_cache[h] = NULL;
5b6dd868
BS
950 }
951 }
952
953 /* suppress this TB from the two jump lists */
954 tb_jmp_remove(tb, 0);
955 tb_jmp_remove(tb, 1);
956
957 /* suppress any remaining jumps to this TB */
958 tb1 = tb->jmp_first;
959 for (;;) {
960 n1 = (uintptr_t)tb1 & 3;
961 if (n1 == 2) {
962 break;
963 }
964 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
965 tb2 = tb1->jmp_next[n1];
966 tb_reset_jump(tb1, n1);
967 tb1->jmp_next[n1] = NULL;
968 tb1 = tb2;
969 }
970 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
971
5e5f07e0 972 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
973}
974
975static inline void set_bits(uint8_t *tab, int start, int len)
976{
977 int end, mask, end1;
978
979 end = start + len;
980 tab += start >> 3;
981 mask = 0xff << (start & 7);
982 if ((start & ~7) == (end & ~7)) {
983 if (start < end) {
984 mask &= ~(0xff << (end & 7));
985 *tab |= mask;
986 }
987 } else {
988 *tab++ |= mask;
989 start = (start + 8) & ~7;
990 end1 = end & ~7;
991 while (start < end1) {
992 *tab++ = 0xff;
993 start += 8;
994 }
995 if (start < end) {
996 mask = ~(0xff << (end & 7));
997 *tab |= mask;
998 }
999 }
1000}
1001
1002static void build_page_bitmap(PageDesc *p)
1003{
1004 int n, tb_start, tb_end;
1005 TranslationBlock *tb;
1006
1007 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1008
1009 tb = p->first_tb;
1010 while (tb != NULL) {
1011 n = (uintptr_t)tb & 3;
1012 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1013 /* NOTE: this is subtle as a TB may span two physical pages */
1014 if (n == 0) {
1015 /* NOTE: tb_end may be after the end of the page, but
1016 it is not a problem */
1017 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1018 tb_end = tb_start + tb->size;
1019 if (tb_end > TARGET_PAGE_SIZE) {
1020 tb_end = TARGET_PAGE_SIZE;
1021 }
1022 } else {
1023 tb_start = 0;
1024 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1025 }
1026 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1027 tb = tb->page_next[n];
1028 }
1029}
1030
648f034c 1031TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868
BS
1032 target_ulong pc, target_ulong cs_base,
1033 int flags, int cflags)
1034{
648f034c 1035 CPUArchState *env = cpu->env_ptr;
5b6dd868 1036 TranslationBlock *tb;
5b6dd868
BS
1037 tb_page_addr_t phys_pc, phys_page2;
1038 target_ulong virt_page2;
1039 int code_gen_size;
1040
1041 phys_pc = get_page_addr_code(env, pc);
1042 tb = tb_alloc(pc);
1043 if (!tb) {
1044 /* flush must be done */
1045 tb_flush(env);
1046 /* cannot fail at this point */
1047 tb = tb_alloc(pc);
1048 /* Don't forget to invalidate previous TB info. */
5e5f07e0 1049 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868 1050 }
1813e175 1051 tb->tc_ptr = tcg_ctx.code_gen_ptr;
5b6dd868
BS
1052 tb->cs_base = cs_base;
1053 tb->flags = flags;
1054 tb->cflags = cflags;
1055 cpu_gen_code(env, tb, &code_gen_size);
0b0d3320
EV
1056 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1057 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
5b6dd868
BS
1058
1059 /* check next page if needed */
1060 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1061 phys_page2 = -1;
1062 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1063 phys_page2 = get_page_addr_code(env, virt_page2);
1064 }
1065 tb_link_page(tb, phys_pc, phys_page2);
1066 return tb;
1067}
1068
1069/*
1070 * Invalidate all TBs which intersect with the target physical address range
1071 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1072 * 'is_cpu_write_access' should be true if called from a real cpu write
1073 * access: the virtual CPU will exit the current TB if code is modified inside
1074 * this TB.
1075 */
1076void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1077 int is_cpu_write_access)
1078{
1079 while (start < end) {
1080 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1081 start &= TARGET_PAGE_MASK;
1082 start += TARGET_PAGE_SIZE;
1083 }
1084}
1085
1086/*
1087 * Invalidate all TBs which intersect with the target physical address range
1088 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1089 * 'is_cpu_write_access' should be true if called from a real cpu write
1090 * access: the virtual CPU will exit the current TB if code is modified inside
1091 * this TB.
1092 */
1093void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1094 int is_cpu_write_access)
1095{
1096 TranslationBlock *tb, *tb_next, *saved_tb;
4917cf44 1097 CPUState *cpu = current_cpu;
baea4fae 1098#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1099 CPUArchState *env = NULL;
1100#endif
5b6dd868
BS
1101 tb_page_addr_t tb_start, tb_end;
1102 PageDesc *p;
1103 int n;
1104#ifdef TARGET_HAS_PRECISE_SMC
1105 int current_tb_not_found = is_cpu_write_access;
1106 TranslationBlock *current_tb = NULL;
1107 int current_tb_modified = 0;
1108 target_ulong current_pc = 0;
1109 target_ulong current_cs_base = 0;
1110 int current_flags = 0;
1111#endif /* TARGET_HAS_PRECISE_SMC */
1112
1113 p = page_find(start >> TARGET_PAGE_BITS);
1114 if (!p) {
1115 return;
1116 }
1117 if (!p->code_bitmap &&
1118 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1119 is_cpu_write_access) {
1120 /* build code bitmap */
1121 build_page_bitmap(p);
1122 }
baea4fae 1123#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1124 if (cpu != NULL) {
1125 env = cpu->env_ptr;
d77953b9 1126 }
4917cf44 1127#endif
5b6dd868
BS
1128
1129 /* we remove all the TBs in the range [start, end[ */
1130 /* XXX: see if in some cases it could be faster to invalidate all
1131 the code */
1132 tb = p->first_tb;
1133 while (tb != NULL) {
1134 n = (uintptr_t)tb & 3;
1135 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1136 tb_next = tb->page_next[n];
1137 /* NOTE: this is subtle as a TB may span two physical pages */
1138 if (n == 0) {
1139 /* NOTE: tb_end may be after the end of the page, but
1140 it is not a problem */
1141 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1142 tb_end = tb_start + tb->size;
1143 } else {
1144 tb_start = tb->page_addr[1];
1145 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1146 }
1147 if (!(tb_end <= start || tb_start >= end)) {
1148#ifdef TARGET_HAS_PRECISE_SMC
1149 if (current_tb_not_found) {
1150 current_tb_not_found = 0;
1151 current_tb = NULL;
93afeade 1152 if (cpu->mem_io_pc) {
5b6dd868 1153 /* now we have a real cpu fault */
93afeade 1154 current_tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868
BS
1155 }
1156 }
1157 if (current_tb == tb &&
1158 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1159 /* If we are modifying the current TB, we must stop
1160 its execution. We could be more precise by checking
1161 that the modification is after the current PC, but it
1162 would require a specialized function to partially
1163 restore the CPU state */
1164
1165 current_tb_modified = 1;
74f10515 1166 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
5b6dd868
BS
1167 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1168 &current_flags);
1169 }
1170#endif /* TARGET_HAS_PRECISE_SMC */
1171 /* we need to do that to handle the case where a signal
1172 occurs while doing tb_phys_invalidate() */
1173 saved_tb = NULL;
d77953b9
AF
1174 if (cpu != NULL) {
1175 saved_tb = cpu->current_tb;
1176 cpu->current_tb = NULL;
5b6dd868
BS
1177 }
1178 tb_phys_invalidate(tb, -1);
d77953b9
AF
1179 if (cpu != NULL) {
1180 cpu->current_tb = saved_tb;
c3affe56
AF
1181 if (cpu->interrupt_request && cpu->current_tb) {
1182 cpu_interrupt(cpu, cpu->interrupt_request);
5b6dd868
BS
1183 }
1184 }
1185 }
1186 tb = tb_next;
1187 }
1188#if !defined(CONFIG_USER_ONLY)
1189 /* if no code remaining, no need to continue to use slow writes */
1190 if (!p->first_tb) {
1191 invalidate_page_bitmap(p);
1192 if (is_cpu_write_access) {
baea4fae 1193 tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr);
5b6dd868
BS
1194 }
1195 }
1196#endif
1197#ifdef TARGET_HAS_PRECISE_SMC
1198 if (current_tb_modified) {
1199 /* we generate a block containing just the instruction
1200 modifying the memory. It will ensure that it cannot modify
1201 itself */
d77953b9 1202 cpu->current_tb = NULL;
648f034c 1203 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
0ea8cb88 1204 cpu_resume_from_signal(cpu, NULL);
5b6dd868
BS
1205 }
1206#endif
1207}
1208
1209/* len must be <= 8 and start must be a multiple of len */
1210void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1211{
1212 PageDesc *p;
1213 int offset, b;
1214
1215#if 0
1216 if (1) {
1217 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1218 cpu_single_env->mem_io_vaddr, len,
1219 cpu_single_env->eip,
1220 cpu_single_env->eip +
1221 (intptr_t)cpu_single_env->segs[R_CS].base);
1222 }
1223#endif
1224 p = page_find(start >> TARGET_PAGE_BITS);
1225 if (!p) {
1226 return;
1227 }
1228 if (p->code_bitmap) {
1229 offset = start & ~TARGET_PAGE_MASK;
1230 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1231 if (b & ((1 << len) - 1)) {
1232 goto do_invalidate;
1233 }
1234 } else {
1235 do_invalidate:
1236 tb_invalidate_phys_page_range(start, start + len, 1);
1237 }
1238}
1239
1240#if !defined(CONFIG_SOFTMMU)
1241static void tb_invalidate_phys_page(tb_page_addr_t addr,
d02532f0
AG
1242 uintptr_t pc, void *puc,
1243 bool locked)
5b6dd868
BS
1244{
1245 TranslationBlock *tb;
1246 PageDesc *p;
1247 int n;
1248#ifdef TARGET_HAS_PRECISE_SMC
1249 TranslationBlock *current_tb = NULL;
4917cf44
AF
1250 CPUState *cpu = current_cpu;
1251 CPUArchState *env = NULL;
5b6dd868
BS
1252 int current_tb_modified = 0;
1253 target_ulong current_pc = 0;
1254 target_ulong current_cs_base = 0;
1255 int current_flags = 0;
1256#endif
1257
1258 addr &= TARGET_PAGE_MASK;
1259 p = page_find(addr >> TARGET_PAGE_BITS);
1260 if (!p) {
1261 return;
1262 }
1263 tb = p->first_tb;
1264#ifdef TARGET_HAS_PRECISE_SMC
1265 if (tb && pc != 0) {
1266 current_tb = tb_find_pc(pc);
1267 }
4917cf44
AF
1268 if (cpu != NULL) {
1269 env = cpu->env_ptr;
d77953b9 1270 }
5b6dd868
BS
1271#endif
1272 while (tb != NULL) {
1273 n = (uintptr_t)tb & 3;
1274 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1275#ifdef TARGET_HAS_PRECISE_SMC
1276 if (current_tb == tb &&
1277 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1278 /* If we are modifying the current TB, we must stop
1279 its execution. We could be more precise by checking
1280 that the modification is after the current PC, but it
1281 would require a specialized function to partially
1282 restore the CPU state */
1283
1284 current_tb_modified = 1;
74f10515 1285 cpu_restore_state_from_tb(cpu, current_tb, pc);
5b6dd868
BS
1286 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1287 &current_flags);
1288 }
1289#endif /* TARGET_HAS_PRECISE_SMC */
1290 tb_phys_invalidate(tb, addr);
1291 tb = tb->page_next[n];
1292 }
1293 p->first_tb = NULL;
1294#ifdef TARGET_HAS_PRECISE_SMC
1295 if (current_tb_modified) {
1296 /* we generate a block containing just the instruction
1297 modifying the memory. It will ensure that it cannot modify
1298 itself */
d77953b9 1299 cpu->current_tb = NULL;
648f034c 1300 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
d02532f0
AG
1301 if (locked) {
1302 mmap_unlock();
1303 }
0ea8cb88 1304 cpu_resume_from_signal(cpu, puc);
5b6dd868
BS
1305 }
1306#endif
1307}
1308#endif
1309
1310/* add the tb in the target page and protect it if necessary */
1311static inline void tb_alloc_page(TranslationBlock *tb,
1312 unsigned int n, tb_page_addr_t page_addr)
1313{
1314 PageDesc *p;
1315#ifndef CONFIG_USER_ONLY
1316 bool page_already_protected;
1317#endif
1318
1319 tb->page_addr[n] = page_addr;
1320 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1321 tb->page_next[n] = p->first_tb;
1322#ifndef CONFIG_USER_ONLY
1323 page_already_protected = p->first_tb != NULL;
1324#endif
1325 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1326 invalidate_page_bitmap(p);
1327
1328#if defined(TARGET_HAS_SMC) || 1
1329
1330#if defined(CONFIG_USER_ONLY)
1331 if (p->flags & PAGE_WRITE) {
1332 target_ulong addr;
1333 PageDesc *p2;
1334 int prot;
1335
1336 /* force the host page as non writable (writes will have a
1337 page fault + mprotect overhead) */
1338 page_addr &= qemu_host_page_mask;
1339 prot = 0;
1340 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1341 addr += TARGET_PAGE_SIZE) {
1342
1343 p2 = page_find(addr >> TARGET_PAGE_BITS);
1344 if (!p2) {
1345 continue;
1346 }
1347 prot |= p2->flags;
1348 p2->flags &= ~PAGE_WRITE;
1349 }
1350 mprotect(g2h(page_addr), qemu_host_page_size,
1351 (prot & PAGE_BITS) & ~PAGE_WRITE);
1352#ifdef DEBUG_TB_INVALIDATE
1353 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1354 page_addr);
1355#endif
1356 }
1357#else
1358 /* if some code is already present, then the pages are already
1359 protected. So we handle the case where only the first TB is
1360 allocated in a physical page */
1361 if (!page_already_protected) {
1362 tlb_protect_code(page_addr);
1363 }
1364#endif
1365
1366#endif /* TARGET_HAS_SMC */
1367}
1368
1369/* add a new TB and link it to the physical page tables. phys_page2 is
1370 (-1) to indicate that only one page contains the TB. */
1371static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1372 tb_page_addr_t phys_page2)
1373{
1374 unsigned int h;
1375 TranslationBlock **ptb;
1376
1377 /* Grab the mmap lock to stop another thread invalidating this TB
1378 before we are done. */
1379 mmap_lock();
1380 /* add in the physical hash table */
1381 h = tb_phys_hash_func(phys_pc);
5e5f07e0 1382 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
5b6dd868
BS
1383 tb->phys_hash_next = *ptb;
1384 *ptb = tb;
1385
1386 /* add in the page list */
1387 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1388 if (phys_page2 != -1) {
1389 tb_alloc_page(tb, 1, phys_page2);
1390 } else {
1391 tb->page_addr[1] = -1;
1392 }
1393
1394 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1395 tb->jmp_next[0] = NULL;
1396 tb->jmp_next[1] = NULL;
1397
1398 /* init original jump addresses */
1399 if (tb->tb_next_offset[0] != 0xffff) {
1400 tb_reset_jump(tb, 0);
1401 }
1402 if (tb->tb_next_offset[1] != 0xffff) {
1403 tb_reset_jump(tb, 1);
1404 }
1405
1406#ifdef DEBUG_TB_CHECK
1407 tb_page_check();
1408#endif
1409 mmap_unlock();
1410}
1411
5b6dd868
BS
1412/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1413 tb[1].tc_ptr. Return NULL if not found */
a8a826a3 1414static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868
BS
1415{
1416 int m_min, m_max, m;
1417 uintptr_t v;
1418 TranslationBlock *tb;
1419
5e5f07e0 1420 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
5b6dd868
BS
1421 return NULL;
1422 }
0b0d3320
EV
1423 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1424 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
5b6dd868
BS
1425 return NULL;
1426 }
1427 /* binary search (cf Knuth) */
1428 m_min = 0;
5e5f07e0 1429 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
5b6dd868
BS
1430 while (m_min <= m_max) {
1431 m = (m_min + m_max) >> 1;
5e5f07e0 1432 tb = &tcg_ctx.tb_ctx.tbs[m];
5b6dd868
BS
1433 v = (uintptr_t)tb->tc_ptr;
1434 if (v == tc_ptr) {
1435 return tb;
1436 } else if (tc_ptr < v) {
1437 m_max = m - 1;
1438 } else {
1439 m_min = m + 1;
1440 }
1441 }
5e5f07e0 1442 return &tcg_ctx.tb_ctx.tbs[m_max];
5b6dd868
BS
1443}
1444
5b6dd868 1445#if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
29d8ec7b 1446void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
5b6dd868
BS
1447{
1448 ram_addr_t ram_addr;
5c8a00ce 1449 MemoryRegion *mr;
149f54b5 1450 hwaddr l = 1;
5b6dd868 1451
29d8ec7b 1452 mr = address_space_translate(as, addr, &addr, &l, false);
5c8a00ce
PB
1453 if (!(memory_region_is_ram(mr)
1454 || memory_region_is_romd(mr))) {
5b6dd868
BS
1455 return;
1456 }
5c8a00ce 1457 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
149f54b5 1458 + addr;
5b6dd868
BS
1459 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1460}
1461#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1462
239c51a5 1463void tb_check_watchpoint(CPUState *cpu)
5b6dd868
BS
1464{
1465 TranslationBlock *tb;
1466
93afeade 1467 tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868 1468 if (!tb) {
a47dddd7 1469 cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
93afeade 1470 (void *)cpu->mem_io_pc);
5b6dd868 1471 }
74f10515 1472 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
5b6dd868
BS
1473 tb_phys_invalidate(tb, -1);
1474}
1475
1476#ifndef CONFIG_USER_ONLY
1477/* mask must never be zero, except for A20 change call */
c3affe56 1478static void tcg_handle_interrupt(CPUState *cpu, int mask)
5b6dd868 1479{
5b6dd868
BS
1480 int old_mask;
1481
259186a7
AF
1482 old_mask = cpu->interrupt_request;
1483 cpu->interrupt_request |= mask;
5b6dd868
BS
1484
1485 /*
1486 * If called from iothread context, wake the target cpu in
1487 * case its halted.
1488 */
1489 if (!qemu_cpu_is_self(cpu)) {
1490 qemu_cpu_kick(cpu);
1491 return;
1492 }
1493
1494 if (use_icount) {
28ecfd7a 1495 cpu->icount_decr.u16.high = 0xffff;
99df7dce 1496 if (!cpu_can_do_io(cpu)
5b6dd868 1497 && (mask & ~old_mask) != 0) {
a47dddd7 1498 cpu_abort(cpu, "Raised interrupt while not in I/O function");
5b6dd868
BS
1499 }
1500 } else {
378df4b2 1501 cpu->tcg_exit_req = 1;
5b6dd868
BS
1502 }
1503}
1504
1505CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1506
1507/* in deterministic execution mode, instructions doing device I/Os
1508 must be at the end of the TB */
90b40a69 1509void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 1510{
a47dddd7 1511#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 1512 CPUArchState *env = cpu->env_ptr;
a47dddd7 1513#endif
5b6dd868
BS
1514 TranslationBlock *tb;
1515 uint32_t n, cflags;
1516 target_ulong pc, cs_base;
1517 uint64_t flags;
1518
1519 tb = tb_find_pc(retaddr);
1520 if (!tb) {
a47dddd7 1521 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
1522 (void *)retaddr);
1523 }
28ecfd7a 1524 n = cpu->icount_decr.u16.low + tb->icount;
74f10515 1525 cpu_restore_state_from_tb(cpu, tb, retaddr);
5b6dd868
BS
1526 /* Calculate how many instructions had been executed before the fault
1527 occurred. */
28ecfd7a 1528 n = n - cpu->icount_decr.u16.low;
5b6dd868
BS
1529 /* Generate a new TB ending on the I/O insn. */
1530 n++;
1531 /* On MIPS and SH, delay slot instructions can only be restarted if
1532 they were already the first instruction in the TB. If this is not
1533 the first instruction in a TB then re-execute the preceding
1534 branch. */
1535#if defined(TARGET_MIPS)
1536 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1537 env->active_tc.PC -= 4;
28ecfd7a 1538 cpu->icount_decr.u16.low++;
5b6dd868
BS
1539 env->hflags &= ~MIPS_HFLAG_BMASK;
1540 }
1541#elif defined(TARGET_SH4)
1542 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1543 && n > 1) {
1544 env->pc -= 2;
28ecfd7a 1545 cpu->icount_decr.u16.low++;
5b6dd868
BS
1546 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1547 }
1548#endif
1549 /* This should never happen. */
1550 if (n > CF_COUNT_MASK) {
a47dddd7 1551 cpu_abort(cpu, "TB too big during recompile");
5b6dd868
BS
1552 }
1553
1554 cflags = n | CF_LAST_IO;
1555 pc = tb->pc;
1556 cs_base = tb->cs_base;
1557 flags = tb->flags;
1558 tb_phys_invalidate(tb, -1);
1559 /* FIXME: In theory this could raise an exception. In practice
1560 we have already translated the block once so it's probably ok. */
648f034c 1561 tb_gen_code(cpu, pc, cs_base, flags, cflags);
5b6dd868
BS
1562 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1563 the first in the TB) then we end up generating a whole new TB and
1564 repeating the fault, which is horribly inefficient.
1565 Better would be to execute just this insn uncached, or generate a
1566 second new TB. */
0ea8cb88 1567 cpu_resume_from_signal(cpu, NULL);
5b6dd868
BS
1568}
1569
611d4f99 1570void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
5b6dd868
BS
1571{
1572 unsigned int i;
1573
1574 /* Discard jump cache entries for any tb which might potentially
1575 overlap the flushed page. */
1576 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
8cd70437 1577 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1578 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1579
1580 i = tb_jmp_cache_hash_page(addr);
8cd70437 1581 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1582 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1583}
1584
1585void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1586{
1587 int i, target_code_size, max_target_code_size;
1588 int direct_jmp_count, direct_jmp2_count, cross_page;
1589 TranslationBlock *tb;
1590
1591 target_code_size = 0;
1592 max_target_code_size = 0;
1593 cross_page = 0;
1594 direct_jmp_count = 0;
1595 direct_jmp2_count = 0;
5e5f07e0
EV
1596 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1597 tb = &tcg_ctx.tb_ctx.tbs[i];
5b6dd868
BS
1598 target_code_size += tb->size;
1599 if (tb->size > max_target_code_size) {
1600 max_target_code_size = tb->size;
1601 }
1602 if (tb->page_addr[1] != -1) {
1603 cross_page++;
1604 }
1605 if (tb->tb_next_offset[0] != 0xffff) {
1606 direct_jmp_count++;
1607 if (tb->tb_next_offset[1] != 0xffff) {
1608 direct_jmp2_count++;
1609 }
1610 }
1611 }
1612 /* XXX: avoid using doubles ? */
1613 cpu_fprintf(f, "Translation buffer state:\n");
1614 cpu_fprintf(f, "gen code size %td/%zd\n",
0b0d3320
EV
1615 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1616 tcg_ctx.code_gen_buffer_max_size);
5b6dd868 1617 cpu_fprintf(f, "TB count %d/%d\n",
5e5f07e0 1618 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
5b6dd868 1619 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
5e5f07e0
EV
1620 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1621 tcg_ctx.tb_ctx.nb_tbs : 0,
1622 max_target_code_size);
5b6dd868 1623 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
5e5f07e0
EV
1624 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1625 tcg_ctx.code_gen_buffer) /
1626 tcg_ctx.tb_ctx.nb_tbs : 0,
1627 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1628 tcg_ctx.code_gen_buffer) /
1629 target_code_size : 0);
1630 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1631 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1632 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868
BS
1633 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1634 direct_jmp_count,
5e5f07e0
EV
1635 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1636 tcg_ctx.tb_ctx.nb_tbs : 0,
5b6dd868 1637 direct_jmp2_count,
5e5f07e0
EV
1638 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1639 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 1640 cpu_fprintf(f, "\nStatistics:\n");
5e5f07e0
EV
1641 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1642 cpu_fprintf(f, "TB invalidate count %d\n",
1643 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
5b6dd868
BS
1644 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1645 tcg_dump_info(f, cpu_fprintf);
1646}
1647
1648#else /* CONFIG_USER_ONLY */
1649
c3affe56 1650void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1651{
259186a7 1652 cpu->interrupt_request |= mask;
378df4b2 1653 cpu->tcg_exit_req = 1;
5b6dd868
BS
1654}
1655
1656/*
1657 * Walks guest process memory "regions" one by one
1658 * and calls callback function 'fn' for each region.
1659 */
1660struct walk_memory_regions_data {
1661 walk_memory_regions_fn fn;
1662 void *priv;
1663 uintptr_t start;
1664 int prot;
1665};
1666
1667static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1668 abi_ulong end, int new_prot)
1669{
1670 if (data->start != -1ul) {
1671 int rc = data->fn(data->priv, data->start, end, data->prot);
1672 if (rc != 0) {
1673 return rc;
1674 }
1675 }
1676
1677 data->start = (new_prot ? end : -1ul);
1678 data->prot = new_prot;
1679
1680 return 0;
1681}
1682
1683static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1684 abi_ulong base, int level, void **lp)
1685{
1686 abi_ulong pa;
1687 int i, rc;
1688
1689 if (*lp == NULL) {
1690 return walk_memory_regions_end(data, base, 0);
1691 }
1692
1693 if (level == 0) {
1694 PageDesc *pd = *lp;
1695
03f49957 1696 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
1697 int prot = pd[i].flags;
1698
1699 pa = base | (i << TARGET_PAGE_BITS);
1700 if (prot != data->prot) {
1701 rc = walk_memory_regions_end(data, pa, prot);
1702 if (rc != 0) {
1703 return rc;
1704 }
1705 }
1706 }
1707 } else {
1708 void **pp = *lp;
1709
03f49957 1710 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868 1711 pa = base | ((abi_ulong)i <<
03f49957 1712 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
1713 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1714 if (rc != 0) {
1715 return rc;
1716 }
1717 }
1718 }
1719
1720 return 0;
1721}
1722
1723int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1724{
1725 struct walk_memory_regions_data data;
1726 uintptr_t i;
1727
1728 data.fn = fn;
1729 data.priv = priv;
1730 data.start = -1ul;
1731 data.prot = 0;
1732
1733 for (i = 0; i < V_L1_SIZE; i++) {
1734 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
03f49957 1735 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
1736
1737 if (rc != 0) {
1738 return rc;
1739 }
1740 }
1741
1742 return walk_memory_regions_end(&data, 0, 0);
1743}
1744
1745static int dump_region(void *priv, abi_ulong start,
1746 abi_ulong end, unsigned long prot)
1747{
1748 FILE *f = (FILE *)priv;
1749
1750 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1751 " "TARGET_ABI_FMT_lx" %c%c%c\n",
1752 start, end, end - start,
1753 ((prot & PAGE_READ) ? 'r' : '-'),
1754 ((prot & PAGE_WRITE) ? 'w' : '-'),
1755 ((prot & PAGE_EXEC) ? 'x' : '-'));
1756
1757 return 0;
1758}
1759
1760/* dump memory mappings */
1761void page_dump(FILE *f)
1762{
227b8175
SW
1763 const int length = sizeof(abi_ulong) * 2;
1764 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1765 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
1766 walk_memory_regions(f, dump_region);
1767}
1768
1769int page_get_flags(target_ulong address)
1770{
1771 PageDesc *p;
1772
1773 p = page_find(address >> TARGET_PAGE_BITS);
1774 if (!p) {
1775 return 0;
1776 }
1777 return p->flags;
1778}
1779
1780/* Modify the flags of a page and invalidate the code if necessary.
1781 The flag PAGE_WRITE_ORG is positioned automatically depending
1782 on PAGE_WRITE. The mmap_lock should already be held. */
1783void page_set_flags(target_ulong start, target_ulong end, int flags)
1784{
1785 target_ulong addr, len;
1786
1787 /* This function should never be called with addresses outside the
1788 guest address space. If this assert fires, it probably indicates
1789 a missing call to h2g_valid. */
1790#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1791 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1792#endif
1793 assert(start < end);
1794
1795 start = start & TARGET_PAGE_MASK;
1796 end = TARGET_PAGE_ALIGN(end);
1797
1798 if (flags & PAGE_WRITE) {
1799 flags |= PAGE_WRITE_ORG;
1800 }
1801
1802 for (addr = start, len = end - start;
1803 len != 0;
1804 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1805 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1806
1807 /* If the write protection bit is set, then we invalidate
1808 the code inside. */
1809 if (!(p->flags & PAGE_WRITE) &&
1810 (flags & PAGE_WRITE) &&
1811 p->first_tb) {
d02532f0 1812 tb_invalidate_phys_page(addr, 0, NULL, false);
5b6dd868
BS
1813 }
1814 p->flags = flags;
1815 }
1816}
1817
1818int page_check_range(target_ulong start, target_ulong len, int flags)
1819{
1820 PageDesc *p;
1821 target_ulong end;
1822 target_ulong addr;
1823
1824 /* This function should never be called with addresses outside the
1825 guest address space. If this assert fires, it probably indicates
1826 a missing call to h2g_valid. */
1827#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1828 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1829#endif
1830
1831 if (len == 0) {
1832 return 0;
1833 }
1834 if (start + len - 1 < start) {
1835 /* We've wrapped around. */
1836 return -1;
1837 }
1838
1839 /* must do before we loose bits in the next step */
1840 end = TARGET_PAGE_ALIGN(start + len);
1841 start = start & TARGET_PAGE_MASK;
1842
1843 for (addr = start, len = end - start;
1844 len != 0;
1845 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1846 p = page_find(addr >> TARGET_PAGE_BITS);
1847 if (!p) {
1848 return -1;
1849 }
1850 if (!(p->flags & PAGE_VALID)) {
1851 return -1;
1852 }
1853
1854 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1855 return -1;
1856 }
1857 if (flags & PAGE_WRITE) {
1858 if (!(p->flags & PAGE_WRITE_ORG)) {
1859 return -1;
1860 }
1861 /* unprotect the page if it was put read-only because it
1862 contains translated code */
1863 if (!(p->flags & PAGE_WRITE)) {
1864 if (!page_unprotect(addr, 0, NULL)) {
1865 return -1;
1866 }
1867 }
5b6dd868
BS
1868 }
1869 }
1870 return 0;
1871}
1872
1873/* called from signal handler: invalidate the code and unprotect the
1874 page. Return TRUE if the fault was successfully handled. */
1875int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1876{
1877 unsigned int prot;
1878 PageDesc *p;
1879 target_ulong host_start, host_end, addr;
1880
1881 /* Technically this isn't safe inside a signal handler. However we
1882 know this only ever happens in a synchronous SEGV handler, so in
1883 practice it seems to be ok. */
1884 mmap_lock();
1885
1886 p = page_find(address >> TARGET_PAGE_BITS);
1887 if (!p) {
1888 mmap_unlock();
1889 return 0;
1890 }
1891
1892 /* if the page was really writable, then we change its
1893 protection back to writable */
1894 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1895 host_start = address & qemu_host_page_mask;
1896 host_end = host_start + qemu_host_page_size;
1897
1898 prot = 0;
1899 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1900 p = page_find(addr >> TARGET_PAGE_BITS);
1901 p->flags |= PAGE_WRITE;
1902 prot |= p->flags;
1903
1904 /* and since the content will be modified, we must invalidate
1905 the corresponding translated code. */
d02532f0 1906 tb_invalidate_phys_page(addr, pc, puc, true);
5b6dd868
BS
1907#ifdef DEBUG_TB_CHECK
1908 tb_invalidate_check(addr);
1909#endif
1910 }
1911 mprotect((void *)g2h(host_start), qemu_host_page_size,
1912 prot & PAGE_BITS);
1913
1914 mmap_unlock();
1915 return 1;
1916 }
1917 mmap_unlock();
1918 return 0;
1919}
1920#endif /* CONFIG_USER_ONLY */