]> git.proxmox.com Git - mirror_qemu.git/blame - translate-all.c
tcg: Remove tcg_gen_code_search_pc
[mirror_qemu.git] / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
21#else
22#include <sys/types.h>
23#include <sys/mman.h>
24#endif
d19893da
FB
25#include <stdarg.h>
26#include <stdlib.h>
27#include <stdio.h>
28#include <string.h>
29#include <inttypes.h>
30
31#include "config.h"
2054396a 32
5b6dd868 33#include "qemu-common.h"
af5ad107 34#define NO_CPU_IO_DEFS
d3eead2e 35#include "cpu.h"
6db8b538 36#include "trace.h"
76cad711 37#include "disas/disas.h"
57fec1fe 38#include "tcg.h"
5b6dd868
BS
39#if defined(CONFIG_USER_ONLY)
40#include "qemu.h"
41#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42#include <sys/param.h>
43#if __FreeBSD_version >= 700104
44#define HAVE_KINFO_GETVMMAP
45#define sigqueue sigqueue_freebsd /* avoid redefinition */
46#include <sys/time.h>
47#include <sys/proc.h>
48#include <machine/profile.h>
49#define _KERNEL
50#include <sys/user.h>
51#undef _KERNEL
52#undef sigqueue
53#include <libutil.h>
54#endif
55#endif
0bc3cd62
PB
56#else
57#include "exec/address-spaces.h"
5b6dd868
BS
58#endif
59
022c62cb 60#include "exec/cputlb.h"
e1b89321 61#include "exec/tb-hash.h"
5b6dd868 62#include "translate-all.h"
510a647f 63#include "qemu/bitmap.h"
0aa09897 64#include "qemu/timer.h"
5b6dd868
BS
65
66//#define DEBUG_TB_INVALIDATE
67//#define DEBUG_FLUSH
68/* make various TB consistency checks */
69//#define DEBUG_TB_CHECK
70
71#if !defined(CONFIG_USER_ONLY)
72/* TB consistency checks only implemented for usermode emulation. */
73#undef DEBUG_TB_CHECK
74#endif
75
76#define SMC_BITMAP_USE_THRESHOLD 10
77
5b6dd868
BS
78typedef struct PageDesc {
79 /* list of TBs intersecting this ram page */
80 TranslationBlock *first_tb;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count;
510a647f 84 unsigned long *code_bitmap;
5b6dd868
BS
85#if defined(CONFIG_USER_ONLY)
86 unsigned long flags;
87#endif
88} PageDesc;
89
90/* In system mode we want L1_MAP to be based on ram offsets,
91 while in user mode we want it to be based on virtual addresses. */
92#if !defined(CONFIG_USER_ONLY)
93#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
94# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
95#else
96# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
97#endif
98#else
99# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
100#endif
101
03f49957
PB
102/* Size of the L2 (and L3, etc) page tables. */
103#define V_L2_BITS 10
104#define V_L2_SIZE (1 << V_L2_BITS)
105
5b6dd868
BS
106/* The bits remaining after N lower levels of page tables. */
107#define V_L1_BITS_REM \
03f49957 108 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
5b6dd868
BS
109
110#if V_L1_BITS_REM < 4
03f49957 111#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
5b6dd868
BS
112#else
113#define V_L1_BITS V_L1_BITS_REM
114#endif
115
116#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
117
118#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
119
5b6dd868
BS
120uintptr_t qemu_host_page_size;
121uintptr_t qemu_host_page_mask;
122
d1142fb8 123/* The bottom level has pointers to PageDesc */
5b6dd868
BS
124static void *l1_map[V_L1_SIZE];
125
57fec1fe
FB
126/* code generation context */
127TCGContext tcg_ctx;
d19893da 128
677ef623
FK
129/* translation block context */
130#ifdef CONFIG_USER_ONLY
131__thread int have_tb_lock;
132#endif
133
134void tb_lock(void)
135{
136#ifdef CONFIG_USER_ONLY
137 assert(!have_tb_lock);
138 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
139 have_tb_lock++;
140#endif
141}
142
143void tb_unlock(void)
144{
145#ifdef CONFIG_USER_ONLY
146 assert(have_tb_lock);
147 have_tb_lock--;
148 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
149#endif
150}
151
152void tb_lock_reset(void)
153{
154#ifdef CONFIG_USER_ONLY
155 if (have_tb_lock) {
156 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
157 have_tb_lock = 0;
158 }
159#endif
160}
161
5b6dd868
BS
162static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
163 tb_page_addr_t phys_page2);
a8a826a3 164static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 165
57fec1fe
FB
166void cpu_gen_init(void)
167{
168 tcg_context_init(&tcg_ctx);
57fec1fe
FB
169}
170
fca8a500
RH
171/* Encode VAL as a signed leb128 sequence at P.
172 Return P incremented past the encoded value. */
173static uint8_t *encode_sleb128(uint8_t *p, target_long val)
174{
175 int more, byte;
176
177 do {
178 byte = val & 0x7f;
179 val >>= 7;
180 more = !((val == 0 && (byte & 0x40) == 0)
181 || (val == -1 && (byte & 0x40) != 0));
182 if (more) {
183 byte |= 0x80;
184 }
185 *p++ = byte;
186 } while (more);
187
188 return p;
189}
190
191/* Decode a signed leb128 sequence at *PP; increment *PP past the
192 decoded value. Return the decoded value. */
193static target_long decode_sleb128(uint8_t **pp)
194{
195 uint8_t *p = *pp;
196 target_long val = 0;
197 int byte, shift = 0;
198
199 do {
200 byte = *p++;
201 val |= (target_ulong)(byte & 0x7f) << shift;
202 shift += 7;
203 } while (byte & 0x80);
204 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
205 val |= -(target_ulong)1 << shift;
206 }
207
208 *pp = p;
209 return val;
210}
211
212/* Encode the data collected about the instructions while compiling TB.
213 Place the data at BLOCK, and return the number of bytes consumed.
214
215 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
216 which come from the target's insn_start data, followed by a uintptr_t
217 which comes from the host pc of the end of the code implementing the insn.
218
219 Each line of the table is encoded as sleb128 deltas from the previous
220 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
221 That is, the first column is seeded with the guest pc, the last column
222 with the host pc, and the middle columns with zeros. */
223
224static int encode_search(TranslationBlock *tb, uint8_t *block)
225{
226 uint8_t *p = block;
227 int i, j, n;
228
229 tb->tc_search = block;
230
231 for (i = 0, n = tb->icount; i < n; ++i) {
232 target_ulong prev;
233
234 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
235 if (i == 0) {
236 prev = (j == 0 ? tb->pc : 0);
237 } else {
238 prev = tcg_ctx.gen_insn_data[i - 1][j];
239 }
240 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
241 }
242 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
243 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
244 }
245
246 return p - block;
247}
248
fec88f64 249/* The cpu state corresponding to 'searched_pc' is restored. */
74f10515 250static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
a8a826a3 251 uintptr_t searched_pc)
d19893da 252{
fca8a500
RH
253 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
254 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
74f10515 255 CPUArchState *env = cpu->env_ptr;
fca8a500
RH
256 uint8_t *p = tb->tc_search;
257 int i, j, num_insns = tb->icount;
57fec1fe 258#ifdef CONFIG_PROFILER
fca8a500 259 int64_t ti = profile_getclock();
57fec1fe
FB
260#endif
261
fca8a500
RH
262 if (searched_pc < host_pc) {
263 return -1;
264 }
d19893da 265
fca8a500
RH
266 /* Reconstruct the stored insn data while looking for the point at
267 which the end of the insn exceeds the searched_pc. */
268 for (i = 0; i < num_insns; ++i) {
269 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
270 data[j] += decode_sleb128(&p);
271 }
272 host_pc += decode_sleb128(&p);
273 if (host_pc > searched_pc) {
274 goto found;
275 }
276 }
277 return -1;
3b46e624 278
fca8a500 279 found:
bd79255d 280 if (tb->cflags & CF_USE_ICOUNT) {
414b15c9 281 assert(use_icount);
2e70f6ef 282 /* Reset the cycle counter to the start of the block. */
fca8a500 283 cpu->icount_decr.u16.low += num_insns;
2e70f6ef 284 /* Clear the IO flag. */
99df7dce 285 cpu->can_do_io = 0;
2e70f6ef 286 }
fca8a500
RH
287 cpu->icount_decr.u16.low -= i;
288 restore_state_to_opc(env, tb, data);
57fec1fe
FB
289
290#ifdef CONFIG_PROFILER
fca8a500
RH
291 tcg_ctx.restore_time += profile_getclock() - ti;
292 tcg_ctx.restore_count++;
57fec1fe 293#endif
d19893da
FB
294 return 0;
295}
5b6dd868 296
3f38f309 297bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
a8a826a3
BS
298{
299 TranslationBlock *tb;
300
301 tb = tb_find_pc(retaddr);
302 if (tb) {
74f10515 303 cpu_restore_state_from_tb(cpu, tb, retaddr);
d8a499f1
PD
304 if (tb->cflags & CF_NOCACHE) {
305 /* one-shot translation, invalidate it immediately */
306 cpu->current_tb = NULL;
307 tb_phys_invalidate(tb, -1);
308 tb_free(tb);
309 }
a8a826a3
BS
310 return true;
311 }
312 return false;
313}
314
5b6dd868 315#ifdef _WIN32
2d8ac5eb 316static __attribute__((unused)) void map_exec(void *addr, long size)
5b6dd868
BS
317{
318 DWORD old_protect;
319 VirtualProtect(addr, size,
320 PAGE_EXECUTE_READWRITE, &old_protect);
321}
322#else
2d8ac5eb 323static __attribute__((unused)) void map_exec(void *addr, long size)
5b6dd868
BS
324{
325 unsigned long start, end, page_size;
326
327 page_size = getpagesize();
328 start = (unsigned long)addr;
329 start &= ~(page_size - 1);
330
331 end = (unsigned long)addr + size;
332 end += page_size - 1;
333 end &= ~(page_size - 1);
334
335 mprotect((void *)start, end - start,
336 PROT_READ | PROT_WRITE | PROT_EXEC);
337}
338#endif
339
47c16ed5 340void page_size_init(void)
5b6dd868
BS
341{
342 /* NOTE: we can always suppose that qemu_host_page_size >=
343 TARGET_PAGE_SIZE */
5b6dd868 344 qemu_real_host_page_size = getpagesize();
4e51361d 345 qemu_real_host_page_mask = ~(qemu_real_host_page_size - 1);
5b6dd868
BS
346 if (qemu_host_page_size == 0) {
347 qemu_host_page_size = qemu_real_host_page_size;
348 }
349 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
350 qemu_host_page_size = TARGET_PAGE_SIZE;
351 }
352 qemu_host_page_mask = ~(qemu_host_page_size - 1);
47c16ed5 353}
5b6dd868 354
47c16ed5
AK
355static void page_init(void)
356{
357 page_size_init();
5b6dd868
BS
358#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
359 {
360#ifdef HAVE_KINFO_GETVMMAP
361 struct kinfo_vmentry *freep;
362 int i, cnt;
363
364 freep = kinfo_getvmmap(getpid(), &cnt);
365 if (freep) {
366 mmap_lock();
367 for (i = 0; i < cnt; i++) {
368 unsigned long startaddr, endaddr;
369
370 startaddr = freep[i].kve_start;
371 endaddr = freep[i].kve_end;
372 if (h2g_valid(startaddr)) {
373 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
374
375 if (h2g_valid(endaddr)) {
376 endaddr = h2g(endaddr);
377 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
378 } else {
379#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
380 endaddr = ~0ul;
381 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
382#endif
383 }
384 }
385 }
386 free(freep);
387 mmap_unlock();
388 }
389#else
390 FILE *f;
391
392 last_brk = (unsigned long)sbrk(0);
393
394 f = fopen("/compat/linux/proc/self/maps", "r");
395 if (f) {
396 mmap_lock();
397
398 do {
399 unsigned long startaddr, endaddr;
400 int n;
401
402 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
403
404 if (n == 2 && h2g_valid(startaddr)) {
405 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
406
407 if (h2g_valid(endaddr)) {
408 endaddr = h2g(endaddr);
409 } else {
410 endaddr = ~0ul;
411 }
412 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
413 }
414 } while (!feof(f));
415
416 fclose(f);
417 mmap_unlock();
418 }
419#endif
420 }
421#endif
422}
423
75692087
PB
424/* If alloc=1:
425 * Called with mmap_lock held for user-mode emulation.
426 */
5b6dd868
BS
427static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
428{
429 PageDesc *pd;
430 void **lp;
431 int i;
432
5b6dd868
BS
433 /* Level 1. Always allocated. */
434 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
435
436 /* Level 2..N-1. */
03f49957 437 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
6940fab8 438 void **p = atomic_rcu_read(lp);
5b6dd868
BS
439
440 if (p == NULL) {
441 if (!alloc) {
442 return NULL;
443 }
e3a0abfd 444 p = g_new0(void *, V_L2_SIZE);
6940fab8 445 atomic_rcu_set(lp, p);
5b6dd868
BS
446 }
447
03f49957 448 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
449 }
450
6940fab8 451 pd = atomic_rcu_read(lp);
5b6dd868
BS
452 if (pd == NULL) {
453 if (!alloc) {
454 return NULL;
455 }
e3a0abfd 456 pd = g_new0(PageDesc, V_L2_SIZE);
6940fab8 457 atomic_rcu_set(lp, pd);
5b6dd868
BS
458 }
459
03f49957 460 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
461}
462
463static inline PageDesc *page_find(tb_page_addr_t index)
464{
465 return page_find_alloc(index, 0);
466}
467
5b6dd868
BS
468#if defined(CONFIG_USER_ONLY)
469/* Currently it is not recommended to allocate big chunks of data in
470 user mode. It will change when a dedicated libc will be used. */
471/* ??? 64-bit hosts ought to have no problem mmaping data outside the
472 region in which the guest needs to run. Revisit this. */
473#define USE_STATIC_CODE_GEN_BUFFER
474#endif
475
476/* ??? Should configure for this, not list operating systems here. */
477#if (defined(__linux__) \
478 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
479 || defined(__DragonFly__) || defined(__OpenBSD__) \
480 || defined(__NetBSD__))
481# define USE_MMAP
482#endif
483
484/* Minimum size of the code gen buffer. This number is randomly chosen,
485 but not so small that we can't have a fair number of TB's live. */
486#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
487
488/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
489 indicated, this is constrained by the range of direct branches on the
490 host cpu, as used by the TCG implementation of goto_tb. */
491#if defined(__x86_64__)
492# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
493#elif defined(__sparc__)
494# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
4a136e0a
CF
495#elif defined(__aarch64__)
496# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
497#elif defined(__arm__)
498# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
499#elif defined(__s390x__)
500 /* We have a +- 4GB range on the branches; leave some slop. */
501# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
479eb121
RH
502#elif defined(__mips__)
503 /* We have a 256MB branch region, but leave room to make sure the
504 main executable is also within that region. */
505# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
506#else
507# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
508#endif
509
510#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
511
512#define DEFAULT_CODE_GEN_BUFFER_SIZE \
513 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
514 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
515
516static inline size_t size_code_gen_buffer(size_t tb_size)
517{
518 /* Size the buffer. */
519 if (tb_size == 0) {
520#ifdef USE_STATIC_CODE_GEN_BUFFER
521 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
522#else
523 /* ??? Needs adjustments. */
524 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
525 static buffer, we could size this on RESERVED_VA, on the text
526 segment size of the executable, or continue to use the default. */
527 tb_size = (unsigned long)(ram_size / 4);
528#endif
529 }
530 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
531 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
532 }
533 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
534 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
535 }
0b0d3320 536 tcg_ctx.code_gen_buffer_size = tb_size;
5b6dd868
BS
537 return tb_size;
538}
539
483c76e1
RH
540#ifdef __mips__
541/* In order to use J and JAL within the code_gen_buffer, we require
542 that the buffer not cross a 256MB boundary. */
543static inline bool cross_256mb(void *addr, size_t size)
544{
545 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
546}
547
548/* We weren't able to allocate a buffer without crossing that boundary,
549 so make do with the larger portion of the buffer that doesn't cross.
550 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
551static inline void *split_cross_256mb(void *buf1, size_t size1)
552{
553 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
554 size_t size2 = buf1 + size1 - buf2;
555
556 size1 = buf2 - buf1;
557 if (size1 < size2) {
558 size1 = size2;
559 buf1 = buf2;
560 }
561
562 tcg_ctx.code_gen_buffer_size = size1;
563 return buf1;
564}
565#endif
566
5b6dd868
BS
567#ifdef USE_STATIC_CODE_GEN_BUFFER
568static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
569 __attribute__((aligned(CODE_GEN_ALIGN)));
570
571static inline void *alloc_code_gen_buffer(void)
572{
483c76e1
RH
573 void *buf = static_code_gen_buffer;
574#ifdef __mips__
575 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
576 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
577 }
578#endif
579 map_exec(buf, tcg_ctx.code_gen_buffer_size);
580 return buf;
5b6dd868
BS
581}
582#elif defined(USE_MMAP)
583static inline void *alloc_code_gen_buffer(void)
584{
585 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
586 uintptr_t start = 0;
587 void *buf;
588
589 /* Constrain the position of the buffer based on the host cpu.
590 Note that these addresses are chosen in concert with the
591 addresses assigned in the relevant linker script file. */
592# if defined(__PIE__) || defined(__PIC__)
593 /* Don't bother setting a preferred location if we're building
594 a position-independent executable. We're more likely to get
595 an address near the main executable if we let the kernel
596 choose the address. */
597# elif defined(__x86_64__) && defined(MAP_32BIT)
598 /* Force the memory down into low memory with the executable.
599 Leave the choice of exact location with the kernel. */
600 flags |= MAP_32BIT;
601 /* Cannot expect to map more than 800MB in low memory. */
0b0d3320
EV
602 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
603 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
5b6dd868
BS
604 }
605# elif defined(__sparc__)
606 start = 0x40000000ul;
607# elif defined(__s390x__)
608 start = 0x90000000ul;
479eb121
RH
609# elif defined(__mips__)
610 /* ??? We ought to more explicitly manage layout for softmmu too. */
611# ifdef CONFIG_USER_ONLY
612 start = 0x68000000ul;
613# elif _MIPS_SIM == _ABI64
614 start = 0x128000000ul;
615# else
616 start = 0x08000000ul;
617# endif
5b6dd868
BS
618# endif
619
0b0d3320 620 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
5b6dd868 621 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
483c76e1
RH
622 if (buf == MAP_FAILED) {
623 return NULL;
624 }
625
626#ifdef __mips__
627 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
5d831be2 628 /* Try again, with the original still mapped, to avoid re-acquiring
483c76e1
RH
629 that 256mb crossing. This time don't specify an address. */
630 size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
631 void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
632 flags, -1, 0);
633 if (buf2 != MAP_FAILED) {
634 if (!cross_256mb(buf2, size1)) {
635 /* Success! Use the new buffer. */
636 munmap(buf, size1);
637 return buf2;
638 }
639 /* Failure. Work with what we had. */
640 munmap(buf2, size1);
641 }
642
643 /* Split the original buffer. Free the smaller half. */
644 buf2 = split_cross_256mb(buf, size1);
645 size2 = tcg_ctx.code_gen_buffer_size;
646 munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
647 return buf2;
648 }
649#endif
650
651 return buf;
5b6dd868
BS
652}
653#else
654static inline void *alloc_code_gen_buffer(void)
655{
8b98ade3 656 void *buf = g_try_malloc(tcg_ctx.code_gen_buffer_size);
5b6dd868 657
483c76e1
RH
658 if (buf == NULL) {
659 return NULL;
660 }
661
662#ifdef __mips__
663 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
664 void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
665 if (buf2 != NULL && !cross_256mb(buf2, size1)) {
666 /* Success! Use the new buffer. */
667 free(buf);
668 buf = buf2;
669 } else {
670 /* Failure. Work with what we had. Since this is malloc
671 and not mmap, we can't free the other half. */
672 free(buf2);
673 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
674 }
5b6dd868 675 }
483c76e1
RH
676#endif
677
678 map_exec(buf, tcg_ctx.code_gen_buffer_size);
5b6dd868
BS
679 return buf;
680}
681#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
682
683static inline void code_gen_alloc(size_t tb_size)
684{
0b0d3320
EV
685 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
686 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
687 if (tcg_ctx.code_gen_buffer == NULL) {
5b6dd868
BS
688 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
689 exit(1);
690 }
691
0b0d3320
EV
692 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
693 QEMU_MADV_HUGEPAGE);
5b6dd868
BS
694
695 /* Steal room for the prologue at the end of the buffer. This ensures
696 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
697 from TB's to the prologue are going to be in range. It also means
698 that we don't need to mark (additional) portions of the data segment
699 as executable. */
0b0d3320
EV
700 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
701 tcg_ctx.code_gen_buffer_size - 1024;
702 tcg_ctx.code_gen_buffer_size -= 1024;
5b6dd868 703
0b0d3320 704 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
5b6dd868 705 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
0b0d3320
EV
706 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
707 CODE_GEN_AVG_BLOCK_SIZE;
5e5f07e0
EV
708 tcg_ctx.tb_ctx.tbs =
709 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
677ef623 710 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
5b6dd868
BS
711}
712
713/* Must be called before using the QEMU cpus. 'tb_size' is the size
714 (in bytes) allocated to the translation buffer. Zero means default
715 size. */
716void tcg_exec_init(unsigned long tb_size)
717{
718 cpu_gen_init();
719 code_gen_alloc(tb_size);
0b0d3320
EV
720 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
721 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
5b6dd868 722 page_init();
4cbea598 723#if defined(CONFIG_SOFTMMU)
5b6dd868
BS
724 /* There's no guest base to take into account, so go ahead and
725 initialize the prologue now. */
726 tcg_prologue_init(&tcg_ctx);
727#endif
728}
729
730bool tcg_enabled(void)
731{
0b0d3320 732 return tcg_ctx.code_gen_buffer != NULL;
5b6dd868
BS
733}
734
735/* Allocate a new translation block. Flush the translation buffer if
736 too many translation blocks or too much generated code. */
737static TranslationBlock *tb_alloc(target_ulong pc)
738{
739 TranslationBlock *tb;
740
5e5f07e0 741 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
0b0d3320
EV
742 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
743 tcg_ctx.code_gen_buffer_max_size) {
5b6dd868
BS
744 return NULL;
745 }
5e5f07e0 746 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
5b6dd868
BS
747 tb->pc = pc;
748 tb->cflags = 0;
749 return tb;
750}
751
752void tb_free(TranslationBlock *tb)
753{
754 /* In practice this is mostly used for single use temporary TB
755 Ignore the hard cases and just back up if this TB happens to
756 be the last one generated. */
5e5f07e0
EV
757 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
758 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
0b0d3320 759 tcg_ctx.code_gen_ptr = tb->tc_ptr;
5e5f07e0 760 tcg_ctx.tb_ctx.nb_tbs--;
5b6dd868
BS
761 }
762}
763
764static inline void invalidate_page_bitmap(PageDesc *p)
765{
012aef07
MA
766 g_free(p->code_bitmap);
767 p->code_bitmap = NULL;
5b6dd868
BS
768 p->code_write_count = 0;
769}
770
771/* Set to NULL all the 'first_tb' fields in all PageDescs. */
772static void page_flush_tb_1(int level, void **lp)
773{
774 int i;
775
776 if (*lp == NULL) {
777 return;
778 }
779 if (level == 0) {
780 PageDesc *pd = *lp;
781
03f49957 782 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
783 pd[i].first_tb = NULL;
784 invalidate_page_bitmap(pd + i);
785 }
786 } else {
787 void **pp = *lp;
788
03f49957 789 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
790 page_flush_tb_1(level - 1, pp + i);
791 }
792 }
793}
794
795static void page_flush_tb(void)
796{
797 int i;
798
799 for (i = 0; i < V_L1_SIZE; i++) {
03f49957 800 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
801 }
802}
803
804/* flush all the translation blocks */
805/* XXX: tb_flush is currently not thread safe */
bbd77c18 806void tb_flush(CPUState *cpu)
5b6dd868 807{
5b6dd868
BS
808#if defined(DEBUG_FLUSH)
809 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
0b0d3320 810 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
5e5f07e0 811 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
0b0d3320 812 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
5e5f07e0 813 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 814#endif
0b0d3320
EV
815 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
816 > tcg_ctx.code_gen_buffer_size) {
a47dddd7 817 cpu_abort(cpu, "Internal error: code buffer overflow\n");
5b6dd868 818 }
5e5f07e0 819 tcg_ctx.tb_ctx.nb_tbs = 0;
5b6dd868 820
bdc44640 821 CPU_FOREACH(cpu) {
8cd70437 822 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
5b6dd868
BS
823 }
824
eb2535f4 825 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
5b6dd868
BS
826 page_flush_tb();
827
0b0d3320 828 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
5b6dd868
BS
829 /* XXX: flush processor icache at this point if cache flush is
830 expensive */
5e5f07e0 831 tcg_ctx.tb_ctx.tb_flush_count++;
5b6dd868
BS
832}
833
834#ifdef DEBUG_TB_CHECK
835
836static void tb_invalidate_check(target_ulong address)
837{
838 TranslationBlock *tb;
839 int i;
840
841 address &= TARGET_PAGE_MASK;
842 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0 843 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
5b6dd868
BS
844 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
845 address >= tb->pc + tb->size)) {
846 printf("ERROR invalidate: address=" TARGET_FMT_lx
847 " PC=%08lx size=%04x\n",
848 address, (long)tb->pc, tb->size);
849 }
850 }
851 }
852}
853
854/* verify that all the pages have correct rights for code */
855static void tb_page_check(void)
856{
857 TranslationBlock *tb;
858 int i, flags1, flags2;
859
860 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
5e5f07e0
EV
861 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
862 tb = tb->phys_hash_next) {
5b6dd868
BS
863 flags1 = page_get_flags(tb->pc);
864 flags2 = page_get_flags(tb->pc + tb->size - 1);
865 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
866 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
867 (long)tb->pc, tb->size, flags1, flags2);
868 }
869 }
870 }
871}
872
873#endif
874
0c884d16 875static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
5b6dd868
BS
876{
877 TranslationBlock *tb1;
878
879 for (;;) {
880 tb1 = *ptb;
881 if (tb1 == tb) {
0c884d16 882 *ptb = tb1->phys_hash_next;
5b6dd868
BS
883 break;
884 }
0c884d16 885 ptb = &tb1->phys_hash_next;
5b6dd868
BS
886 }
887}
888
889static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
890{
891 TranslationBlock *tb1;
892 unsigned int n1;
893
894 for (;;) {
895 tb1 = *ptb;
896 n1 = (uintptr_t)tb1 & 3;
897 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
898 if (tb1 == tb) {
899 *ptb = tb1->page_next[n1];
900 break;
901 }
902 ptb = &tb1->page_next[n1];
903 }
904}
905
906static inline void tb_jmp_remove(TranslationBlock *tb, int n)
907{
908 TranslationBlock *tb1, **ptb;
909 unsigned int n1;
910
911 ptb = &tb->jmp_next[n];
912 tb1 = *ptb;
913 if (tb1) {
914 /* find tb(n) in circular list */
915 for (;;) {
916 tb1 = *ptb;
917 n1 = (uintptr_t)tb1 & 3;
918 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
919 if (n1 == n && tb1 == tb) {
920 break;
921 }
922 if (n1 == 2) {
923 ptb = &tb1->jmp_first;
924 } else {
925 ptb = &tb1->jmp_next[n1];
926 }
927 }
928 /* now we can suppress tb(n) from the list */
929 *ptb = tb->jmp_next[n];
930
931 tb->jmp_next[n] = NULL;
932 }
933}
934
935/* reset the jump entry 'n' of a TB so that it is not chained to
936 another TB */
937static inline void tb_reset_jump(TranslationBlock *tb, int n)
938{
939 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
940}
941
0c884d16 942/* invalidate one TB */
5b6dd868
BS
943void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
944{
182735ef 945 CPUState *cpu;
5b6dd868
BS
946 PageDesc *p;
947 unsigned int h, n1;
948 tb_page_addr_t phys_pc;
949 TranslationBlock *tb1, *tb2;
950
951 /* remove the TB from the hash list */
952 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
953 h = tb_phys_hash_func(phys_pc);
5e5f07e0 954 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
5b6dd868
BS
955
956 /* remove the TB from the page list */
957 if (tb->page_addr[0] != page_addr) {
958 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
959 tb_page_remove(&p->first_tb, tb);
960 invalidate_page_bitmap(p);
961 }
962 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
963 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
964 tb_page_remove(&p->first_tb, tb);
965 invalidate_page_bitmap(p);
966 }
967
5e5f07e0 968 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868
BS
969
970 /* remove the TB from the hash list */
971 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 972 CPU_FOREACH(cpu) {
8cd70437
AF
973 if (cpu->tb_jmp_cache[h] == tb) {
974 cpu->tb_jmp_cache[h] = NULL;
5b6dd868
BS
975 }
976 }
977
978 /* suppress this TB from the two jump lists */
979 tb_jmp_remove(tb, 0);
980 tb_jmp_remove(tb, 1);
981
982 /* suppress any remaining jumps to this TB */
983 tb1 = tb->jmp_first;
984 for (;;) {
985 n1 = (uintptr_t)tb1 & 3;
986 if (n1 == 2) {
987 break;
988 }
989 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
990 tb2 = tb1->jmp_next[n1];
991 tb_reset_jump(tb1, n1);
992 tb1->jmp_next[n1] = NULL;
993 tb1 = tb2;
994 }
995 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
996
5e5f07e0 997 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
998}
999
5b6dd868
BS
1000static void build_page_bitmap(PageDesc *p)
1001{
1002 int n, tb_start, tb_end;
1003 TranslationBlock *tb;
1004
510a647f 1005 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
5b6dd868
BS
1006
1007 tb = p->first_tb;
1008 while (tb != NULL) {
1009 n = (uintptr_t)tb & 3;
1010 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1011 /* NOTE: this is subtle as a TB may span two physical pages */
1012 if (n == 0) {
1013 /* NOTE: tb_end may be after the end of the page, but
1014 it is not a problem */
1015 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1016 tb_end = tb_start + tb->size;
1017 if (tb_end > TARGET_PAGE_SIZE) {
1018 tb_end = TARGET_PAGE_SIZE;
1019 }
1020 } else {
1021 tb_start = 0;
1022 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1023 }
510a647f 1024 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
5b6dd868
BS
1025 tb = tb->page_next[n];
1026 }
1027}
1028
75692087 1029/* Called with mmap_lock held for user mode emulation. */
648f034c 1030TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868
BS
1031 target_ulong pc, target_ulong cs_base,
1032 int flags, int cflags)
1033{
648f034c 1034 CPUArchState *env = cpu->env_ptr;
5b6dd868 1035 TranslationBlock *tb;
5b6dd868
BS
1036 tb_page_addr_t phys_pc, phys_page2;
1037 target_ulong virt_page2;
fec88f64 1038 tcg_insn_unit *gen_code_buf;
fca8a500 1039 int gen_code_size, search_size;
fec88f64
RH
1040#ifdef CONFIG_PROFILER
1041 int64_t ti;
1042#endif
5b6dd868
BS
1043
1044 phys_pc = get_page_addr_code(env, pc);
0266359e
PB
1045 if (use_icount) {
1046 cflags |= CF_USE_ICOUNT;
1047 }
5b6dd868
BS
1048 tb = tb_alloc(pc);
1049 if (!tb) {
1050 /* flush must be done */
bbd77c18 1051 tb_flush(cpu);
5b6dd868
BS
1052 /* cannot fail at this point */
1053 tb = tb_alloc(pc);
1054 /* Don't forget to invalidate previous TB info. */
5e5f07e0 1055 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
5b6dd868 1056 }
fec88f64
RH
1057
1058 gen_code_buf = tcg_ctx.code_gen_ptr;
1059 tb->tc_ptr = gen_code_buf;
5b6dd868
BS
1060 tb->cs_base = cs_base;
1061 tb->flags = flags;
1062 tb->cflags = cflags;
fec88f64
RH
1063
1064#ifdef CONFIG_PROFILER
1065 tcg_ctx.tb_count1++; /* includes aborted translations because of
1066 exceptions */
1067 ti = profile_getclock();
1068#endif
1069
1070 tcg_func_start(&tcg_ctx);
1071
1072 gen_intermediate_code(env, tb);
1073
1074 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1075
1076 /* generate machine code */
1077 tb->tb_next_offset[0] = 0xffff;
1078 tb->tb_next_offset[1] = 0xffff;
1079 tcg_ctx.tb_next_offset = tb->tb_next_offset;
1080#ifdef USE_DIRECT_JUMP
1081 tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
1082 tcg_ctx.tb_next = NULL;
1083#else
1084 tcg_ctx.tb_jmp_offset = NULL;
1085 tcg_ctx.tb_next = tb->tb_next;
1086#endif
1087
1088#ifdef CONFIG_PROFILER
1089 tcg_ctx.tb_count++;
1090 tcg_ctx.interm_time += profile_getclock() - ti;
1091 tcg_ctx.code_time -= profile_getclock();
1092#endif
1093
1094 gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf);
fca8a500 1095 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
fec88f64
RH
1096
1097#ifdef CONFIG_PROFILER
1098 tcg_ctx.code_time += profile_getclock();
1099 tcg_ctx.code_in_len += tb->size;
1100 tcg_ctx.code_out_len += gen_code_size;
fca8a500 1101 tcg_ctx.search_out_len += search_size;
fec88f64
RH
1102#endif
1103
1104#ifdef DEBUG_DISAS
1105 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1106 qemu_log("OUT: [size=%d]\n", gen_code_size);
1107 log_disas(tb->tc_ptr, gen_code_size);
1108 qemu_log("\n");
1109 qemu_log_flush();
1110 }
1111#endif
1112
fca8a500
RH
1113 tcg_ctx.code_gen_ptr = (void *)
1114 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1115 CODE_GEN_ALIGN);
5b6dd868
BS
1116
1117 /* check next page if needed */
1118 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1119 phys_page2 = -1;
1120 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1121 phys_page2 = get_page_addr_code(env, virt_page2);
1122 }
1123 tb_link_page(tb, phys_pc, phys_page2);
1124 return tb;
1125}
1126
1127/*
1128 * Invalidate all TBs which intersect with the target physical address range
1129 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1130 * 'is_cpu_write_access' should be true if called from a real cpu write
1131 * access: the virtual CPU will exit the current TB if code is modified inside
1132 * this TB.
75692087
PB
1133 *
1134 * Called with mmap_lock held for user-mode emulation
5b6dd868 1135 */
35865339 1136void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
5b6dd868
BS
1137{
1138 while (start < end) {
35865339 1139 tb_invalidate_phys_page_range(start, end, 0);
5b6dd868
BS
1140 start &= TARGET_PAGE_MASK;
1141 start += TARGET_PAGE_SIZE;
1142 }
1143}
1144
1145/*
1146 * Invalidate all TBs which intersect with the target physical address range
1147 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1148 * 'is_cpu_write_access' should be true if called from a real cpu write
1149 * access: the virtual CPU will exit the current TB if code is modified inside
1150 * this TB.
75692087
PB
1151 *
1152 * Called with mmap_lock held for user-mode emulation
5b6dd868
BS
1153 */
1154void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1155 int is_cpu_write_access)
1156{
1157 TranslationBlock *tb, *tb_next, *saved_tb;
4917cf44 1158 CPUState *cpu = current_cpu;
baea4fae 1159#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1160 CPUArchState *env = NULL;
1161#endif
5b6dd868
BS
1162 tb_page_addr_t tb_start, tb_end;
1163 PageDesc *p;
1164 int n;
1165#ifdef TARGET_HAS_PRECISE_SMC
1166 int current_tb_not_found = is_cpu_write_access;
1167 TranslationBlock *current_tb = NULL;
1168 int current_tb_modified = 0;
1169 target_ulong current_pc = 0;
1170 target_ulong current_cs_base = 0;
1171 int current_flags = 0;
1172#endif /* TARGET_HAS_PRECISE_SMC */
1173
1174 p = page_find(start >> TARGET_PAGE_BITS);
1175 if (!p) {
1176 return;
1177 }
baea4fae 1178#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1179 if (cpu != NULL) {
1180 env = cpu->env_ptr;
d77953b9 1181 }
4917cf44 1182#endif
5b6dd868
BS
1183
1184 /* we remove all the TBs in the range [start, end[ */
1185 /* XXX: see if in some cases it could be faster to invalidate all
1186 the code */
1187 tb = p->first_tb;
1188 while (tb != NULL) {
1189 n = (uintptr_t)tb & 3;
1190 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1191 tb_next = tb->page_next[n];
1192 /* NOTE: this is subtle as a TB may span two physical pages */
1193 if (n == 0) {
1194 /* NOTE: tb_end may be after the end of the page, but
1195 it is not a problem */
1196 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1197 tb_end = tb_start + tb->size;
1198 } else {
1199 tb_start = tb->page_addr[1];
1200 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1201 }
1202 if (!(tb_end <= start || tb_start >= end)) {
1203#ifdef TARGET_HAS_PRECISE_SMC
1204 if (current_tb_not_found) {
1205 current_tb_not_found = 0;
1206 current_tb = NULL;
93afeade 1207 if (cpu->mem_io_pc) {
5b6dd868 1208 /* now we have a real cpu fault */
93afeade 1209 current_tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868
BS
1210 }
1211 }
1212 if (current_tb == tb &&
1213 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1214 /* If we are modifying the current TB, we must stop
1215 its execution. We could be more precise by checking
1216 that the modification is after the current PC, but it
1217 would require a specialized function to partially
1218 restore the CPU state */
1219
1220 current_tb_modified = 1;
74f10515 1221 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
5b6dd868
BS
1222 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1223 &current_flags);
1224 }
1225#endif /* TARGET_HAS_PRECISE_SMC */
1226 /* we need to do that to handle the case where a signal
1227 occurs while doing tb_phys_invalidate() */
1228 saved_tb = NULL;
d77953b9
AF
1229 if (cpu != NULL) {
1230 saved_tb = cpu->current_tb;
1231 cpu->current_tb = NULL;
5b6dd868
BS
1232 }
1233 tb_phys_invalidate(tb, -1);
d77953b9
AF
1234 if (cpu != NULL) {
1235 cpu->current_tb = saved_tb;
c3affe56
AF
1236 if (cpu->interrupt_request && cpu->current_tb) {
1237 cpu_interrupt(cpu, cpu->interrupt_request);
5b6dd868
BS
1238 }
1239 }
1240 }
1241 tb = tb_next;
1242 }
1243#if !defined(CONFIG_USER_ONLY)
1244 /* if no code remaining, no need to continue to use slow writes */
1245 if (!p->first_tb) {
1246 invalidate_page_bitmap(p);
fc377bcf 1247 tlb_unprotect_code(start);
5b6dd868
BS
1248 }
1249#endif
1250#ifdef TARGET_HAS_PRECISE_SMC
1251 if (current_tb_modified) {
1252 /* we generate a block containing just the instruction
1253 modifying the memory. It will ensure that it cannot modify
1254 itself */
d77953b9 1255 cpu->current_tb = NULL;
648f034c 1256 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
0ea8cb88 1257 cpu_resume_from_signal(cpu, NULL);
5b6dd868
BS
1258 }
1259#endif
1260}
1261
1262/* len must be <= 8 and start must be a multiple of len */
1263void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1264{
1265 PageDesc *p;
5b6dd868
BS
1266
1267#if 0
1268 if (1) {
1269 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1270 cpu_single_env->mem_io_vaddr, len,
1271 cpu_single_env->eip,
1272 cpu_single_env->eip +
1273 (intptr_t)cpu_single_env->segs[R_CS].base);
1274 }
1275#endif
1276 p = page_find(start >> TARGET_PAGE_BITS);
1277 if (!p) {
1278 return;
1279 }
fc377bcf
PB
1280 if (!p->code_bitmap &&
1281 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1282 /* build code bitmap */
1283 build_page_bitmap(p);
1284 }
5b6dd868 1285 if (p->code_bitmap) {
510a647f
EC
1286 unsigned int nr;
1287 unsigned long b;
1288
1289 nr = start & ~TARGET_PAGE_MASK;
1290 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
5b6dd868
BS
1291 if (b & ((1 << len) - 1)) {
1292 goto do_invalidate;
1293 }
1294 } else {
1295 do_invalidate:
1296 tb_invalidate_phys_page_range(start, start + len, 1);
1297 }
1298}
1299
1300#if !defined(CONFIG_SOFTMMU)
75692087 1301/* Called with mmap_lock held. */
5b6dd868 1302static void tb_invalidate_phys_page(tb_page_addr_t addr,
d02532f0
AG
1303 uintptr_t pc, void *puc,
1304 bool locked)
5b6dd868
BS
1305{
1306 TranslationBlock *tb;
1307 PageDesc *p;
1308 int n;
1309#ifdef TARGET_HAS_PRECISE_SMC
1310 TranslationBlock *current_tb = NULL;
4917cf44
AF
1311 CPUState *cpu = current_cpu;
1312 CPUArchState *env = NULL;
5b6dd868
BS
1313 int current_tb_modified = 0;
1314 target_ulong current_pc = 0;
1315 target_ulong current_cs_base = 0;
1316 int current_flags = 0;
1317#endif
1318
1319 addr &= TARGET_PAGE_MASK;
1320 p = page_find(addr >> TARGET_PAGE_BITS);
1321 if (!p) {
1322 return;
1323 }
1324 tb = p->first_tb;
1325#ifdef TARGET_HAS_PRECISE_SMC
1326 if (tb && pc != 0) {
1327 current_tb = tb_find_pc(pc);
1328 }
4917cf44
AF
1329 if (cpu != NULL) {
1330 env = cpu->env_ptr;
d77953b9 1331 }
5b6dd868
BS
1332#endif
1333 while (tb != NULL) {
1334 n = (uintptr_t)tb & 3;
1335 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1336#ifdef TARGET_HAS_PRECISE_SMC
1337 if (current_tb == tb &&
1338 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1339 /* If we are modifying the current TB, we must stop
1340 its execution. We could be more precise by checking
1341 that the modification is after the current PC, but it
1342 would require a specialized function to partially
1343 restore the CPU state */
1344
1345 current_tb_modified = 1;
74f10515 1346 cpu_restore_state_from_tb(cpu, current_tb, pc);
5b6dd868
BS
1347 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1348 &current_flags);
1349 }
1350#endif /* TARGET_HAS_PRECISE_SMC */
1351 tb_phys_invalidate(tb, addr);
1352 tb = tb->page_next[n];
1353 }
1354 p->first_tb = NULL;
1355#ifdef TARGET_HAS_PRECISE_SMC
1356 if (current_tb_modified) {
1357 /* we generate a block containing just the instruction
1358 modifying the memory. It will ensure that it cannot modify
1359 itself */
d77953b9 1360 cpu->current_tb = NULL;
648f034c 1361 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
d02532f0
AG
1362 if (locked) {
1363 mmap_unlock();
1364 }
0ea8cb88 1365 cpu_resume_from_signal(cpu, puc);
5b6dd868
BS
1366 }
1367#endif
1368}
1369#endif
1370
75692087
PB
1371/* add the tb in the target page and protect it if necessary
1372 *
1373 * Called with mmap_lock held for user-mode emulation.
1374 */
5b6dd868
BS
1375static inline void tb_alloc_page(TranslationBlock *tb,
1376 unsigned int n, tb_page_addr_t page_addr)
1377{
1378 PageDesc *p;
1379#ifndef CONFIG_USER_ONLY
1380 bool page_already_protected;
1381#endif
1382
1383 tb->page_addr[n] = page_addr;
1384 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1385 tb->page_next[n] = p->first_tb;
1386#ifndef CONFIG_USER_ONLY
1387 page_already_protected = p->first_tb != NULL;
1388#endif
1389 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1390 invalidate_page_bitmap(p);
1391
5b6dd868
BS
1392#if defined(CONFIG_USER_ONLY)
1393 if (p->flags & PAGE_WRITE) {
1394 target_ulong addr;
1395 PageDesc *p2;
1396 int prot;
1397
1398 /* force the host page as non writable (writes will have a
1399 page fault + mprotect overhead) */
1400 page_addr &= qemu_host_page_mask;
1401 prot = 0;
1402 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1403 addr += TARGET_PAGE_SIZE) {
1404
1405 p2 = page_find(addr >> TARGET_PAGE_BITS);
1406 if (!p2) {
1407 continue;
1408 }
1409 prot |= p2->flags;
1410 p2->flags &= ~PAGE_WRITE;
1411 }
1412 mprotect(g2h(page_addr), qemu_host_page_size,
1413 (prot & PAGE_BITS) & ~PAGE_WRITE);
1414#ifdef DEBUG_TB_INVALIDATE
1415 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1416 page_addr);
1417#endif
1418 }
1419#else
1420 /* if some code is already present, then the pages are already
1421 protected. So we handle the case where only the first TB is
1422 allocated in a physical page */
1423 if (!page_already_protected) {
1424 tlb_protect_code(page_addr);
1425 }
1426#endif
5b6dd868
BS
1427}
1428
1429/* add a new TB and link it to the physical page tables. phys_page2 is
75692087 1430 * (-1) to indicate that only one page contains the TB.
9fd1a948
PB
1431 *
1432 * Called with mmap_lock held for user-mode emulation.
75692087 1433 */
5b6dd868
BS
1434static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1435 tb_page_addr_t phys_page2)
1436{
1437 unsigned int h;
1438 TranslationBlock **ptb;
1439
5b6dd868
BS
1440 /* add in the physical hash table */
1441 h = tb_phys_hash_func(phys_pc);
5e5f07e0 1442 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
5b6dd868
BS
1443 tb->phys_hash_next = *ptb;
1444 *ptb = tb;
1445
1446 /* add in the page list */
1447 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1448 if (phys_page2 != -1) {
1449 tb_alloc_page(tb, 1, phys_page2);
1450 } else {
1451 tb->page_addr[1] = -1;
1452 }
1453
1454 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1455 tb->jmp_next[0] = NULL;
1456 tb->jmp_next[1] = NULL;
1457
1458 /* init original jump addresses */
1459 if (tb->tb_next_offset[0] != 0xffff) {
1460 tb_reset_jump(tb, 0);
1461 }
1462 if (tb->tb_next_offset[1] != 0xffff) {
1463 tb_reset_jump(tb, 1);
1464 }
1465
1466#ifdef DEBUG_TB_CHECK
1467 tb_page_check();
1468#endif
5b6dd868
BS
1469}
1470
5b6dd868
BS
1471/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1472 tb[1].tc_ptr. Return NULL if not found */
a8a826a3 1473static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868
BS
1474{
1475 int m_min, m_max, m;
1476 uintptr_t v;
1477 TranslationBlock *tb;
1478
5e5f07e0 1479 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
5b6dd868
BS
1480 return NULL;
1481 }
0b0d3320
EV
1482 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1483 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
5b6dd868
BS
1484 return NULL;
1485 }
1486 /* binary search (cf Knuth) */
1487 m_min = 0;
5e5f07e0 1488 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
5b6dd868
BS
1489 while (m_min <= m_max) {
1490 m = (m_min + m_max) >> 1;
5e5f07e0 1491 tb = &tcg_ctx.tb_ctx.tbs[m];
5b6dd868
BS
1492 v = (uintptr_t)tb->tc_ptr;
1493 if (v == tc_ptr) {
1494 return tb;
1495 } else if (tc_ptr < v) {
1496 m_max = m - 1;
1497 } else {
1498 m_min = m + 1;
1499 }
1500 }
5e5f07e0 1501 return &tcg_ctx.tb_ctx.tbs[m_max];
5b6dd868
BS
1502}
1503
ec53b45b 1504#if !defined(CONFIG_USER_ONLY)
29d8ec7b 1505void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
5b6dd868
BS
1506{
1507 ram_addr_t ram_addr;
5c8a00ce 1508 MemoryRegion *mr;
149f54b5 1509 hwaddr l = 1;
5b6dd868 1510
41063e1e 1511 rcu_read_lock();
29d8ec7b 1512 mr = address_space_translate(as, addr, &addr, &l, false);
5c8a00ce
PB
1513 if (!(memory_region_is_ram(mr)
1514 || memory_region_is_romd(mr))) {
41063e1e 1515 rcu_read_unlock();
5b6dd868
BS
1516 return;
1517 }
5c8a00ce 1518 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
149f54b5 1519 + addr;
5b6dd868 1520 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
41063e1e 1521 rcu_read_unlock();
5b6dd868 1522}
ec53b45b 1523#endif /* !defined(CONFIG_USER_ONLY) */
5b6dd868 1524
239c51a5 1525void tb_check_watchpoint(CPUState *cpu)
5b6dd868
BS
1526{
1527 TranslationBlock *tb;
1528
93afeade 1529 tb = tb_find_pc(cpu->mem_io_pc);
8d302e76
AJ
1530 if (tb) {
1531 /* We can use retranslation to find the PC. */
1532 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1533 tb_phys_invalidate(tb, -1);
1534 } else {
1535 /* The exception probably happened in a helper. The CPU state should
1536 have been saved before calling it. Fetch the PC from there. */
1537 CPUArchState *env = cpu->env_ptr;
1538 target_ulong pc, cs_base;
1539 tb_page_addr_t addr;
1540 int flags;
1541
1542 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1543 addr = get_page_addr_code(env, pc);
1544 tb_invalidate_phys_range(addr, addr + 1);
5b6dd868 1545 }
5b6dd868
BS
1546}
1547
1548#ifndef CONFIG_USER_ONLY
5b6dd868
BS
1549/* in deterministic execution mode, instructions doing device I/Os
1550 must be at the end of the TB */
90b40a69 1551void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 1552{
a47dddd7 1553#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 1554 CPUArchState *env = cpu->env_ptr;
a47dddd7 1555#endif
5b6dd868
BS
1556 TranslationBlock *tb;
1557 uint32_t n, cflags;
1558 target_ulong pc, cs_base;
1559 uint64_t flags;
1560
1561 tb = tb_find_pc(retaddr);
1562 if (!tb) {
a47dddd7 1563 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
1564 (void *)retaddr);
1565 }
28ecfd7a 1566 n = cpu->icount_decr.u16.low + tb->icount;
74f10515 1567 cpu_restore_state_from_tb(cpu, tb, retaddr);
5b6dd868
BS
1568 /* Calculate how many instructions had been executed before the fault
1569 occurred. */
28ecfd7a 1570 n = n - cpu->icount_decr.u16.low;
5b6dd868
BS
1571 /* Generate a new TB ending on the I/O insn. */
1572 n++;
1573 /* On MIPS and SH, delay slot instructions can only be restarted if
1574 they were already the first instruction in the TB. If this is not
1575 the first instruction in a TB then re-execute the preceding
1576 branch. */
1577#if defined(TARGET_MIPS)
1578 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
c3577479 1579 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
28ecfd7a 1580 cpu->icount_decr.u16.low++;
5b6dd868
BS
1581 env->hflags &= ~MIPS_HFLAG_BMASK;
1582 }
1583#elif defined(TARGET_SH4)
1584 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1585 && n > 1) {
1586 env->pc -= 2;
28ecfd7a 1587 cpu->icount_decr.u16.low++;
5b6dd868
BS
1588 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1589 }
1590#endif
1591 /* This should never happen. */
1592 if (n > CF_COUNT_MASK) {
a47dddd7 1593 cpu_abort(cpu, "TB too big during recompile");
5b6dd868
BS
1594 }
1595
1596 cflags = n | CF_LAST_IO;
1597 pc = tb->pc;
1598 cs_base = tb->cs_base;
1599 flags = tb->flags;
1600 tb_phys_invalidate(tb, -1);
02d57ea1
SF
1601 if (tb->cflags & CF_NOCACHE) {
1602 if (tb->orig_tb) {
1603 /* Invalidate original TB if this TB was generated in
1604 * cpu_exec_nocache() */
1605 tb_phys_invalidate(tb->orig_tb, -1);
1606 }
1607 tb_free(tb);
1608 }
5b6dd868
BS
1609 /* FIXME: In theory this could raise an exception. In practice
1610 we have already translated the block once so it's probably ok. */
648f034c 1611 tb_gen_code(cpu, pc, cs_base, flags, cflags);
5b6dd868
BS
1612 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1613 the first in the TB) then we end up generating a whole new TB and
1614 repeating the fault, which is horribly inefficient.
1615 Better would be to execute just this insn uncached, or generate a
1616 second new TB. */
0ea8cb88 1617 cpu_resume_from_signal(cpu, NULL);
5b6dd868
BS
1618}
1619
611d4f99 1620void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
5b6dd868
BS
1621{
1622 unsigned int i;
1623
1624 /* Discard jump cache entries for any tb which might potentially
1625 overlap the flushed page. */
1626 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
8cd70437 1627 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1628 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1629
1630 i = tb_jmp_cache_hash_page(addr);
8cd70437 1631 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1632 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1633}
1634
1635void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1636{
1637 int i, target_code_size, max_target_code_size;
1638 int direct_jmp_count, direct_jmp2_count, cross_page;
1639 TranslationBlock *tb;
1640
1641 target_code_size = 0;
1642 max_target_code_size = 0;
1643 cross_page = 0;
1644 direct_jmp_count = 0;
1645 direct_jmp2_count = 0;
5e5f07e0
EV
1646 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1647 tb = &tcg_ctx.tb_ctx.tbs[i];
5b6dd868
BS
1648 target_code_size += tb->size;
1649 if (tb->size > max_target_code_size) {
1650 max_target_code_size = tb->size;
1651 }
1652 if (tb->page_addr[1] != -1) {
1653 cross_page++;
1654 }
1655 if (tb->tb_next_offset[0] != 0xffff) {
1656 direct_jmp_count++;
1657 if (tb->tb_next_offset[1] != 0xffff) {
1658 direct_jmp2_count++;
1659 }
1660 }
1661 }
1662 /* XXX: avoid using doubles ? */
1663 cpu_fprintf(f, "Translation buffer state:\n");
1664 cpu_fprintf(f, "gen code size %td/%zd\n",
0b0d3320
EV
1665 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1666 tcg_ctx.code_gen_buffer_max_size);
5b6dd868 1667 cpu_fprintf(f, "TB count %d/%d\n",
5e5f07e0 1668 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
5b6dd868 1669 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
5e5f07e0
EV
1670 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1671 tcg_ctx.tb_ctx.nb_tbs : 0,
1672 max_target_code_size);
5b6dd868 1673 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
5e5f07e0
EV
1674 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1675 tcg_ctx.code_gen_buffer) /
1676 tcg_ctx.tb_ctx.nb_tbs : 0,
1677 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1678 tcg_ctx.code_gen_buffer) /
1679 target_code_size : 0);
1680 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1681 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1682 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868
BS
1683 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1684 direct_jmp_count,
5e5f07e0
EV
1685 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1686 tcg_ctx.tb_ctx.nb_tbs : 0,
5b6dd868 1687 direct_jmp2_count,
5e5f07e0
EV
1688 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1689 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 1690 cpu_fprintf(f, "\nStatistics:\n");
5e5f07e0
EV
1691 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1692 cpu_fprintf(f, "TB invalidate count %d\n",
1693 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
5b6dd868
BS
1694 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1695 tcg_dump_info(f, cpu_fprintf);
1696}
1697
246ae24d
MF
1698void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1699{
1700 tcg_dump_op_count(f, cpu_fprintf);
1701}
1702
5b6dd868
BS
1703#else /* CONFIG_USER_ONLY */
1704
c3affe56 1705void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1706{
259186a7 1707 cpu->interrupt_request |= mask;
378df4b2 1708 cpu->tcg_exit_req = 1;
5b6dd868
BS
1709}
1710
1711/*
1712 * Walks guest process memory "regions" one by one
1713 * and calls callback function 'fn' for each region.
1714 */
1715struct walk_memory_regions_data {
1716 walk_memory_regions_fn fn;
1717 void *priv;
1a1c4db9 1718 target_ulong start;
5b6dd868
BS
1719 int prot;
1720};
1721
1722static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1a1c4db9 1723 target_ulong end, int new_prot)
5b6dd868 1724{
1a1c4db9 1725 if (data->start != -1u) {
5b6dd868
BS
1726 int rc = data->fn(data->priv, data->start, end, data->prot);
1727 if (rc != 0) {
1728 return rc;
1729 }
1730 }
1731
1a1c4db9 1732 data->start = (new_prot ? end : -1u);
5b6dd868
BS
1733 data->prot = new_prot;
1734
1735 return 0;
1736}
1737
1738static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1a1c4db9 1739 target_ulong base, int level, void **lp)
5b6dd868 1740{
1a1c4db9 1741 target_ulong pa;
5b6dd868
BS
1742 int i, rc;
1743
1744 if (*lp == NULL) {
1745 return walk_memory_regions_end(data, base, 0);
1746 }
1747
1748 if (level == 0) {
1749 PageDesc *pd = *lp;
1750
03f49957 1751 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
1752 int prot = pd[i].flags;
1753
1754 pa = base | (i << TARGET_PAGE_BITS);
1755 if (prot != data->prot) {
1756 rc = walk_memory_regions_end(data, pa, prot);
1757 if (rc != 0) {
1758 return rc;
1759 }
1760 }
1761 }
1762 } else {
1763 void **pp = *lp;
1764
03f49957 1765 for (i = 0; i < V_L2_SIZE; ++i) {
1a1c4db9 1766 pa = base | ((target_ulong)i <<
03f49957 1767 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
1768 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1769 if (rc != 0) {
1770 return rc;
1771 }
1772 }
1773 }
1774
1775 return 0;
1776}
1777
1778int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1779{
1780 struct walk_memory_regions_data data;
1781 uintptr_t i;
1782
1783 data.fn = fn;
1784 data.priv = priv;
1a1c4db9 1785 data.start = -1u;
5b6dd868
BS
1786 data.prot = 0;
1787
1788 for (i = 0; i < V_L1_SIZE; i++) {
1a1c4db9 1789 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
03f49957 1790 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
1791 if (rc != 0) {
1792 return rc;
1793 }
1794 }
1795
1796 return walk_memory_regions_end(&data, 0, 0);
1797}
1798
1a1c4db9
MI
1799static int dump_region(void *priv, target_ulong start,
1800 target_ulong end, unsigned long prot)
5b6dd868
BS
1801{
1802 FILE *f = (FILE *)priv;
1803
1a1c4db9
MI
1804 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1805 " "TARGET_FMT_lx" %c%c%c\n",
5b6dd868
BS
1806 start, end, end - start,
1807 ((prot & PAGE_READ) ? 'r' : '-'),
1808 ((prot & PAGE_WRITE) ? 'w' : '-'),
1809 ((prot & PAGE_EXEC) ? 'x' : '-'));
1810
1811 return 0;
1812}
1813
1814/* dump memory mappings */
1815void page_dump(FILE *f)
1816{
1a1c4db9 1817 const int length = sizeof(target_ulong) * 2;
227b8175
SW
1818 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1819 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
1820 walk_memory_regions(f, dump_region);
1821}
1822
1823int page_get_flags(target_ulong address)
1824{
1825 PageDesc *p;
1826
1827 p = page_find(address >> TARGET_PAGE_BITS);
1828 if (!p) {
1829 return 0;
1830 }
1831 return p->flags;
1832}
1833
1834/* Modify the flags of a page and invalidate the code if necessary.
1835 The flag PAGE_WRITE_ORG is positioned automatically depending
1836 on PAGE_WRITE. The mmap_lock should already be held. */
1837void page_set_flags(target_ulong start, target_ulong end, int flags)
1838{
1839 target_ulong addr, len;
1840
1841 /* This function should never be called with addresses outside the
1842 guest address space. If this assert fires, it probably indicates
1843 a missing call to h2g_valid. */
1844#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 1845 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
1846#endif
1847 assert(start < end);
1848
1849 start = start & TARGET_PAGE_MASK;
1850 end = TARGET_PAGE_ALIGN(end);
1851
1852 if (flags & PAGE_WRITE) {
1853 flags |= PAGE_WRITE_ORG;
1854 }
1855
1856 for (addr = start, len = end - start;
1857 len != 0;
1858 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1859 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1860
1861 /* If the write protection bit is set, then we invalidate
1862 the code inside. */
1863 if (!(p->flags & PAGE_WRITE) &&
1864 (flags & PAGE_WRITE) &&
1865 p->first_tb) {
d02532f0 1866 tb_invalidate_phys_page(addr, 0, NULL, false);
5b6dd868
BS
1867 }
1868 p->flags = flags;
1869 }
1870}
1871
1872int page_check_range(target_ulong start, target_ulong len, int flags)
1873{
1874 PageDesc *p;
1875 target_ulong end;
1876 target_ulong addr;
1877
1878 /* This function should never be called with addresses outside the
1879 guest address space. If this assert fires, it probably indicates
1880 a missing call to h2g_valid. */
1881#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 1882 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
1883#endif
1884
1885 if (len == 0) {
1886 return 0;
1887 }
1888 if (start + len - 1 < start) {
1889 /* We've wrapped around. */
1890 return -1;
1891 }
1892
1893 /* must do before we loose bits in the next step */
1894 end = TARGET_PAGE_ALIGN(start + len);
1895 start = start & TARGET_PAGE_MASK;
1896
1897 for (addr = start, len = end - start;
1898 len != 0;
1899 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1900 p = page_find(addr >> TARGET_PAGE_BITS);
1901 if (!p) {
1902 return -1;
1903 }
1904 if (!(p->flags & PAGE_VALID)) {
1905 return -1;
1906 }
1907
1908 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1909 return -1;
1910 }
1911 if (flags & PAGE_WRITE) {
1912 if (!(p->flags & PAGE_WRITE_ORG)) {
1913 return -1;
1914 }
1915 /* unprotect the page if it was put read-only because it
1916 contains translated code */
1917 if (!(p->flags & PAGE_WRITE)) {
1918 if (!page_unprotect(addr, 0, NULL)) {
1919 return -1;
1920 }
1921 }
5b6dd868
BS
1922 }
1923 }
1924 return 0;
1925}
1926
1927/* called from signal handler: invalidate the code and unprotect the
1928 page. Return TRUE if the fault was successfully handled. */
1929int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1930{
1931 unsigned int prot;
1932 PageDesc *p;
1933 target_ulong host_start, host_end, addr;
1934
1935 /* Technically this isn't safe inside a signal handler. However we
1936 know this only ever happens in a synchronous SEGV handler, so in
1937 practice it seems to be ok. */
1938 mmap_lock();
1939
1940 p = page_find(address >> TARGET_PAGE_BITS);
1941 if (!p) {
1942 mmap_unlock();
1943 return 0;
1944 }
1945
1946 /* if the page was really writable, then we change its
1947 protection back to writable */
1948 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1949 host_start = address & qemu_host_page_mask;
1950 host_end = host_start + qemu_host_page_size;
1951
1952 prot = 0;
1953 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1954 p = page_find(addr >> TARGET_PAGE_BITS);
1955 p->flags |= PAGE_WRITE;
1956 prot |= p->flags;
1957
1958 /* and since the content will be modified, we must invalidate
1959 the corresponding translated code. */
d02532f0 1960 tb_invalidate_phys_page(addr, pc, puc, true);
5b6dd868
BS
1961#ifdef DEBUG_TB_CHECK
1962 tb_invalidate_check(addr);
1963#endif
1964 }
1965 mprotect((void *)g2h(host_start), qemu_host_page_size,
1966 prot & PAGE_BITS);
1967
1968 mmap_unlock();
1969 return 1;
1970 }
1971 mmap_unlock();
1972 return 0;
1973}
1974#endif /* CONFIG_USER_ONLY */