]> git.proxmox.com Git - mirror_qemu.git/blame - translate-all.c
ivshmem: Fix 64 bit memory bar configuration
[mirror_qemu.git] / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
5b6dd868 21#endif
7b31bbc2 22#include "qemu/osdep.h"
d19893da 23
2054396a 24
5b6dd868 25#include "qemu-common.h"
af5ad107 26#define NO_CPU_IO_DEFS
d3eead2e 27#include "cpu.h"
6db8b538 28#include "trace.h"
76cad711 29#include "disas/disas.h"
63c91552 30#include "exec/exec-all.h"
57fec1fe 31#include "tcg.h"
5b6dd868
BS
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
34#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35#include <sys/param.h>
36#if __FreeBSD_version >= 700104
37#define HAVE_KINFO_GETVMMAP
38#define sigqueue sigqueue_freebsd /* avoid redefinition */
5b6dd868
BS
39#include <sys/proc.h>
40#include <machine/profile.h>
41#define _KERNEL
42#include <sys/user.h>
43#undef _KERNEL
44#undef sigqueue
45#include <libutil.h>
46#endif
47#endif
0bc3cd62
PB
48#else
49#include "exec/address-spaces.h"
5b6dd868
BS
50#endif
51
022c62cb 52#include "exec/cputlb.h"
e1b89321 53#include "exec/tb-hash.h"
5b6dd868 54#include "translate-all.h"
510a647f 55#include "qemu/bitmap.h"
0aa09897 56#include "qemu/timer.h"
508127e2 57#include "exec/log.h"
5b6dd868
BS
58
59//#define DEBUG_TB_INVALIDATE
60//#define DEBUG_FLUSH
61/* make various TB consistency checks */
62//#define DEBUG_TB_CHECK
63
64#if !defined(CONFIG_USER_ONLY)
65/* TB consistency checks only implemented for usermode emulation. */
66#undef DEBUG_TB_CHECK
67#endif
68
69#define SMC_BITMAP_USE_THRESHOLD 10
70
5b6dd868
BS
71typedef struct PageDesc {
72 /* list of TBs intersecting this ram page */
73 TranslationBlock *first_tb;
6fad459c 74#ifdef CONFIG_SOFTMMU
5b6dd868
BS
75 /* in order to optimize self modifying code, we count the number
76 of lookups we do to a given page to use a bitmap */
77 unsigned int code_write_count;
510a647f 78 unsigned long *code_bitmap;
6fad459c 79#else
5b6dd868
BS
80 unsigned long flags;
81#endif
82} PageDesc;
83
84/* In system mode we want L1_MAP to be based on ram offsets,
85 while in user mode we want it to be based on virtual addresses. */
86#if !defined(CONFIG_USER_ONLY)
87#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
88# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
89#else
90# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
91#endif
92#else
93# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
94#endif
95
03f49957
PB
96/* Size of the L2 (and L3, etc) page tables. */
97#define V_L2_BITS 10
98#define V_L2_SIZE (1 << V_L2_BITS)
99
5b6dd868
BS
100/* The bits remaining after N lower levels of page tables. */
101#define V_L1_BITS_REM \
03f49957 102 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
5b6dd868
BS
103
104#if V_L1_BITS_REM < 4
03f49957 105#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
5b6dd868
BS
106#else
107#define V_L1_BITS V_L1_BITS_REM
108#endif
109
110#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
111
112#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
113
5b6dd868 114uintptr_t qemu_host_page_size;
0c2d70c4 115intptr_t qemu_host_page_mask;
5b6dd868 116
d1142fb8 117/* The bottom level has pointers to PageDesc */
5b6dd868
BS
118static void *l1_map[V_L1_SIZE];
119
57fec1fe
FB
120/* code generation context */
121TCGContext tcg_ctx;
d19893da 122
677ef623
FK
123/* translation block context */
124#ifdef CONFIG_USER_ONLY
125__thread int have_tb_lock;
126#endif
127
128void tb_lock(void)
129{
130#ifdef CONFIG_USER_ONLY
131 assert(!have_tb_lock);
132 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
133 have_tb_lock++;
134#endif
135}
136
137void tb_unlock(void)
138{
139#ifdef CONFIG_USER_ONLY
140 assert(have_tb_lock);
141 have_tb_lock--;
142 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
143#endif
144}
145
146void tb_lock_reset(void)
147{
148#ifdef CONFIG_USER_ONLY
149 if (have_tb_lock) {
150 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
151 have_tb_lock = 0;
152 }
153#endif
154}
155
a8a826a3 156static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 157
57fec1fe
FB
158void cpu_gen_init(void)
159{
160 tcg_context_init(&tcg_ctx);
57fec1fe
FB
161}
162
fca8a500
RH
163/* Encode VAL as a signed leb128 sequence at P.
164 Return P incremented past the encoded value. */
165static uint8_t *encode_sleb128(uint8_t *p, target_long val)
166{
167 int more, byte;
168
169 do {
170 byte = val & 0x7f;
171 val >>= 7;
172 more = !((val == 0 && (byte & 0x40) == 0)
173 || (val == -1 && (byte & 0x40) != 0));
174 if (more) {
175 byte |= 0x80;
176 }
177 *p++ = byte;
178 } while (more);
179
180 return p;
181}
182
183/* Decode a signed leb128 sequence at *PP; increment *PP past the
184 decoded value. Return the decoded value. */
185static target_long decode_sleb128(uint8_t **pp)
186{
187 uint8_t *p = *pp;
188 target_long val = 0;
189 int byte, shift = 0;
190
191 do {
192 byte = *p++;
193 val |= (target_ulong)(byte & 0x7f) << shift;
194 shift += 7;
195 } while (byte & 0x80);
196 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
197 val |= -(target_ulong)1 << shift;
198 }
199
200 *pp = p;
201 return val;
202}
203
204/* Encode the data collected about the instructions while compiling TB.
205 Place the data at BLOCK, and return the number of bytes consumed.
206
207 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
208 which come from the target's insn_start data, followed by a uintptr_t
209 which comes from the host pc of the end of the code implementing the insn.
210
211 Each line of the table is encoded as sleb128 deltas from the previous
212 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
213 That is, the first column is seeded with the guest pc, the last column
214 with the host pc, and the middle columns with zeros. */
215
216static int encode_search(TranslationBlock *tb, uint8_t *block)
217{
b125f9dc 218 uint8_t *highwater = tcg_ctx.code_gen_highwater;
fca8a500
RH
219 uint8_t *p = block;
220 int i, j, n;
221
222 tb->tc_search = block;
223
224 for (i = 0, n = tb->icount; i < n; ++i) {
225 target_ulong prev;
226
227 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
228 if (i == 0) {
229 prev = (j == 0 ? tb->pc : 0);
230 } else {
231 prev = tcg_ctx.gen_insn_data[i - 1][j];
232 }
233 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
234 }
235 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
236 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
b125f9dc
RH
237
238 /* Test for (pending) buffer overflow. The assumption is that any
239 one row beginning below the high water mark cannot overrun
240 the buffer completely. Thus we can test for overflow after
241 encoding a row without having to check during encoding. */
242 if (unlikely(p > highwater)) {
243 return -1;
244 }
fca8a500
RH
245 }
246
247 return p - block;
248}
249
fec88f64 250/* The cpu state corresponding to 'searched_pc' is restored. */
74f10515 251static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
a8a826a3 252 uintptr_t searched_pc)
d19893da 253{
fca8a500
RH
254 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
255 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
74f10515 256 CPUArchState *env = cpu->env_ptr;
fca8a500
RH
257 uint8_t *p = tb->tc_search;
258 int i, j, num_insns = tb->icount;
57fec1fe 259#ifdef CONFIG_PROFILER
fca8a500 260 int64_t ti = profile_getclock();
57fec1fe
FB
261#endif
262
fca8a500
RH
263 if (searched_pc < host_pc) {
264 return -1;
265 }
d19893da 266
fca8a500
RH
267 /* Reconstruct the stored insn data while looking for the point at
268 which the end of the insn exceeds the searched_pc. */
269 for (i = 0; i < num_insns; ++i) {
270 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
271 data[j] += decode_sleb128(&p);
272 }
273 host_pc += decode_sleb128(&p);
274 if (host_pc > searched_pc) {
275 goto found;
276 }
277 }
278 return -1;
3b46e624 279
fca8a500 280 found:
bd79255d 281 if (tb->cflags & CF_USE_ICOUNT) {
414b15c9 282 assert(use_icount);
2e70f6ef 283 /* Reset the cycle counter to the start of the block. */
fca8a500 284 cpu->icount_decr.u16.low += num_insns;
2e70f6ef 285 /* Clear the IO flag. */
99df7dce 286 cpu->can_do_io = 0;
2e70f6ef 287 }
fca8a500
RH
288 cpu->icount_decr.u16.low -= i;
289 restore_state_to_opc(env, tb, data);
57fec1fe
FB
290
291#ifdef CONFIG_PROFILER
fca8a500
RH
292 tcg_ctx.restore_time += profile_getclock() - ti;
293 tcg_ctx.restore_count++;
57fec1fe 294#endif
d19893da
FB
295 return 0;
296}
5b6dd868 297
3f38f309 298bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
a8a826a3
BS
299{
300 TranslationBlock *tb;
301
302 tb = tb_find_pc(retaddr);
303 if (tb) {
74f10515 304 cpu_restore_state_from_tb(cpu, tb, retaddr);
d8a499f1
PD
305 if (tb->cflags & CF_NOCACHE) {
306 /* one-shot translation, invalidate it immediately */
d8a499f1
PD
307 tb_phys_invalidate(tb, -1);
308 tb_free(tb);
309 }
a8a826a3
BS
310 return true;
311 }
312 return false;
313}
314
47c16ed5 315void page_size_init(void)
5b6dd868
BS
316{
317 /* NOTE: we can always suppose that qemu_host_page_size >=
318 TARGET_PAGE_SIZE */
5b6dd868 319 qemu_real_host_page_size = getpagesize();
0c2d70c4 320 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
5b6dd868
BS
321 if (qemu_host_page_size == 0) {
322 qemu_host_page_size = qemu_real_host_page_size;
323 }
324 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
325 qemu_host_page_size = TARGET_PAGE_SIZE;
326 }
0c2d70c4 327 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
47c16ed5 328}
5b6dd868 329
47c16ed5
AK
330static void page_init(void)
331{
332 page_size_init();
5b6dd868
BS
333#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
334 {
335#ifdef HAVE_KINFO_GETVMMAP
336 struct kinfo_vmentry *freep;
337 int i, cnt;
338
339 freep = kinfo_getvmmap(getpid(), &cnt);
340 if (freep) {
341 mmap_lock();
342 for (i = 0; i < cnt; i++) {
343 unsigned long startaddr, endaddr;
344
345 startaddr = freep[i].kve_start;
346 endaddr = freep[i].kve_end;
347 if (h2g_valid(startaddr)) {
348 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
349
350 if (h2g_valid(endaddr)) {
351 endaddr = h2g(endaddr);
352 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
353 } else {
354#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
355 endaddr = ~0ul;
356 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
357#endif
358 }
359 }
360 }
361 free(freep);
362 mmap_unlock();
363 }
364#else
365 FILE *f;
366
367 last_brk = (unsigned long)sbrk(0);
368
369 f = fopen("/compat/linux/proc/self/maps", "r");
370 if (f) {
371 mmap_lock();
372
373 do {
374 unsigned long startaddr, endaddr;
375 int n;
376
377 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
378
379 if (n == 2 && h2g_valid(startaddr)) {
380 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
381
382 if (h2g_valid(endaddr)) {
383 endaddr = h2g(endaddr);
384 } else {
385 endaddr = ~0ul;
386 }
387 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
388 }
389 } while (!feof(f));
390
391 fclose(f);
392 mmap_unlock();
393 }
394#endif
395 }
396#endif
397}
398
75692087
PB
399/* If alloc=1:
400 * Called with mmap_lock held for user-mode emulation.
401 */
5b6dd868
BS
402static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
403{
404 PageDesc *pd;
405 void **lp;
406 int i;
407
5b6dd868
BS
408 /* Level 1. Always allocated. */
409 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
410
411 /* Level 2..N-1. */
03f49957 412 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
6940fab8 413 void **p = atomic_rcu_read(lp);
5b6dd868
BS
414
415 if (p == NULL) {
416 if (!alloc) {
417 return NULL;
418 }
e3a0abfd 419 p = g_new0(void *, V_L2_SIZE);
6940fab8 420 atomic_rcu_set(lp, p);
5b6dd868
BS
421 }
422
03f49957 423 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
424 }
425
6940fab8 426 pd = atomic_rcu_read(lp);
5b6dd868
BS
427 if (pd == NULL) {
428 if (!alloc) {
429 return NULL;
430 }
e3a0abfd 431 pd = g_new0(PageDesc, V_L2_SIZE);
6940fab8 432 atomic_rcu_set(lp, pd);
5b6dd868
BS
433 }
434
03f49957 435 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
436}
437
438static inline PageDesc *page_find(tb_page_addr_t index)
439{
440 return page_find_alloc(index, 0);
441}
442
5b6dd868
BS
443#if defined(CONFIG_USER_ONLY)
444/* Currently it is not recommended to allocate big chunks of data in
445 user mode. It will change when a dedicated libc will be used. */
446/* ??? 64-bit hosts ought to have no problem mmaping data outside the
447 region in which the guest needs to run. Revisit this. */
448#define USE_STATIC_CODE_GEN_BUFFER
449#endif
450
5b6dd868
BS
451/* Minimum size of the code gen buffer. This number is randomly chosen,
452 but not so small that we can't have a fair number of TB's live. */
453#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
454
455/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
456 indicated, this is constrained by the range of direct branches on the
457 host cpu, as used by the TCG implementation of goto_tb. */
458#if defined(__x86_64__)
459# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
460#elif defined(__sparc__)
461# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
5bfd75a3
RH
462#elif defined(__powerpc64__)
463# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
399f1648
SF
464#elif defined(__powerpc__)
465# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
4a136e0a
CF
466#elif defined(__aarch64__)
467# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
468#elif defined(__arm__)
469# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
470#elif defined(__s390x__)
471 /* We have a +- 4GB range on the branches; leave some slop. */
472# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
479eb121
RH
473#elif defined(__mips__)
474 /* We have a 256MB branch region, but leave room to make sure the
475 main executable is also within that region. */
476# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
477#else
478# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
479#endif
480
481#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
482
483#define DEFAULT_CODE_GEN_BUFFER_SIZE \
484 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
485 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
486
487static inline size_t size_code_gen_buffer(size_t tb_size)
488{
489 /* Size the buffer. */
490 if (tb_size == 0) {
491#ifdef USE_STATIC_CODE_GEN_BUFFER
492 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
493#else
494 /* ??? Needs adjustments. */
495 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
496 static buffer, we could size this on RESERVED_VA, on the text
497 segment size of the executable, or continue to use the default. */
498 tb_size = (unsigned long)(ram_size / 4);
499#endif
500 }
501 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
502 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
503 }
504 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
505 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
506 }
5b6dd868
BS
507 return tb_size;
508}
509
483c76e1
RH
510#ifdef __mips__
511/* In order to use J and JAL within the code_gen_buffer, we require
512 that the buffer not cross a 256MB boundary. */
513static inline bool cross_256mb(void *addr, size_t size)
514{
7ba6a512 515 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
483c76e1
RH
516}
517
518/* We weren't able to allocate a buffer without crossing that boundary,
519 so make do with the larger portion of the buffer that doesn't cross.
520 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
521static inline void *split_cross_256mb(void *buf1, size_t size1)
522{
7ba6a512 523 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
483c76e1
RH
524 size_t size2 = buf1 + size1 - buf2;
525
526 size1 = buf2 - buf1;
527 if (size1 < size2) {
528 size1 = size2;
529 buf1 = buf2;
530 }
531
532 tcg_ctx.code_gen_buffer_size = size1;
533 return buf1;
534}
535#endif
536
5b6dd868
BS
537#ifdef USE_STATIC_CODE_GEN_BUFFER
538static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
539 __attribute__((aligned(CODE_GEN_ALIGN)));
540
f293709c
RH
541# ifdef _WIN32
542static inline void do_protect(void *addr, long size, int prot)
543{
544 DWORD old_protect;
545 VirtualProtect(addr, size, prot, &old_protect);
546}
547
548static inline void map_exec(void *addr, long size)
549{
550 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
551}
552
553static inline void map_none(void *addr, long size)
554{
555 do_protect(addr, size, PAGE_NOACCESS);
556}
557# else
558static inline void do_protect(void *addr, long size, int prot)
559{
560 uintptr_t start, end;
561
562 start = (uintptr_t)addr;
563 start &= qemu_real_host_page_mask;
564
565 end = (uintptr_t)addr + size;
566 end = ROUND_UP(end, qemu_real_host_page_size);
567
568 mprotect((void *)start, end - start, prot);
569}
570
571static inline void map_exec(void *addr, long size)
572{
573 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
574}
575
576static inline void map_none(void *addr, long size)
577{
578 do_protect(addr, size, PROT_NONE);
579}
580# endif /* WIN32 */
581
5b6dd868
BS
582static inline void *alloc_code_gen_buffer(void)
583{
483c76e1 584 void *buf = static_code_gen_buffer;
f293709c
RH
585 size_t full_size, size;
586
587 /* The size of the buffer, rounded down to end on a page boundary. */
588 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
589 & qemu_real_host_page_mask) - (uintptr_t)buf;
590
591 /* Reserve a guard page. */
592 size = full_size - qemu_real_host_page_size;
593
594 /* Honor a command-line option limiting the size of the buffer. */
595 if (size > tcg_ctx.code_gen_buffer_size) {
596 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
597 & qemu_real_host_page_mask) - (uintptr_t)buf;
598 }
599 tcg_ctx.code_gen_buffer_size = size;
600
483c76e1 601#ifdef __mips__
f293709c
RH
602 if (cross_256mb(buf, size)) {
603 buf = split_cross_256mb(buf, size);
604 size = tcg_ctx.code_gen_buffer_size;
483c76e1
RH
605 }
606#endif
f293709c
RH
607
608 map_exec(buf, size);
609 map_none(buf + size, qemu_real_host_page_size);
610 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
611
483c76e1 612 return buf;
5b6dd868 613}
f293709c
RH
614#elif defined(_WIN32)
615static inline void *alloc_code_gen_buffer(void)
616{
617 size_t size = tcg_ctx.code_gen_buffer_size;
618 void *buf1, *buf2;
619
620 /* Perform the allocation in two steps, so that the guard page
621 is reserved but uncommitted. */
622 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
623 MEM_RESERVE, PAGE_NOACCESS);
624 if (buf1 != NULL) {
625 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
626 assert(buf1 == buf2);
627 }
628
629 return buf1;
630}
631#else
5b6dd868
BS
632static inline void *alloc_code_gen_buffer(void)
633{
634 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
635 uintptr_t start = 0;
f293709c 636 size_t size = tcg_ctx.code_gen_buffer_size;
5b6dd868
BS
637 void *buf;
638
639 /* Constrain the position of the buffer based on the host cpu.
640 Note that these addresses are chosen in concert with the
641 addresses assigned in the relevant linker script file. */
642# if defined(__PIE__) || defined(__PIC__)
643 /* Don't bother setting a preferred location if we're building
644 a position-independent executable. We're more likely to get
645 an address near the main executable if we let the kernel
646 choose the address. */
647# elif defined(__x86_64__) && defined(MAP_32BIT)
648 /* Force the memory down into low memory with the executable.
649 Leave the choice of exact location with the kernel. */
650 flags |= MAP_32BIT;
651 /* Cannot expect to map more than 800MB in low memory. */
f293709c
RH
652 if (size > 800u * 1024 * 1024) {
653 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
5b6dd868
BS
654 }
655# elif defined(__sparc__)
656 start = 0x40000000ul;
657# elif defined(__s390x__)
658 start = 0x90000000ul;
479eb121 659# elif defined(__mips__)
f293709c 660# if _MIPS_SIM == _ABI64
479eb121
RH
661 start = 0x128000000ul;
662# else
663 start = 0x08000000ul;
664# endif
5b6dd868
BS
665# endif
666
f293709c
RH
667 buf = mmap((void *)start, size + qemu_real_host_page_size,
668 PROT_NONE, flags, -1, 0);
483c76e1
RH
669 if (buf == MAP_FAILED) {
670 return NULL;
671 }
672
673#ifdef __mips__
f293709c 674 if (cross_256mb(buf, size)) {
5d831be2 675 /* Try again, with the original still mapped, to avoid re-acquiring
483c76e1 676 that 256mb crossing. This time don't specify an address. */
f293709c
RH
677 size_t size2;
678 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
679 PROT_NONE, flags, -1, 0);
680 switch (buf2 != MAP_FAILED) {
681 case 1:
682 if (!cross_256mb(buf2, size)) {
483c76e1 683 /* Success! Use the new buffer. */
8bdf4997 684 munmap(buf, size + qemu_real_host_page_size);
f293709c 685 break;
483c76e1
RH
686 }
687 /* Failure. Work with what we had. */
8bdf4997 688 munmap(buf2, size + qemu_real_host_page_size);
f293709c
RH
689 /* fallthru */
690 default:
691 /* Split the original buffer. Free the smaller half. */
692 buf2 = split_cross_256mb(buf, size);
693 size2 = tcg_ctx.code_gen_buffer_size;
694 if (buf == buf2) {
695 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
696 } else {
697 munmap(buf, size - size2);
698 }
699 size = size2;
700 break;
483c76e1 701 }
f293709c 702 buf = buf2;
483c76e1
RH
703 }
704#endif
705
f293709c
RH
706 /* Make the final buffer accessible. The guard page at the end
707 will remain inaccessible with PROT_NONE. */
708 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
483c76e1 709
f293709c
RH
710 /* Request large pages for the buffer. */
711 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
483c76e1 712
5b6dd868
BS
713 return buf;
714}
f293709c 715#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
5b6dd868
BS
716
717static inline void code_gen_alloc(size_t tb_size)
718{
0b0d3320
EV
719 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
720 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
721 if (tcg_ctx.code_gen_buffer == NULL) {
5b6dd868
BS
722 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
723 exit(1);
724 }
725
8163b749
RH
726 /* Estimate a good size for the number of TBs we can support. We
727 still haven't deducted the prologue from the buffer size here,
728 but that's minimal and won't affect the estimate much. */
729 tcg_ctx.code_gen_max_blocks
730 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
731 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
732
677ef623 733 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
5b6dd868
BS
734}
735
909eaac9
EC
736static void tb_htable_init(void)
737{
738 unsigned int mode = QHT_MODE_AUTO_RESIZE;
739
740 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
741}
742
5b6dd868
BS
743/* Must be called before using the QEMU cpus. 'tb_size' is the size
744 (in bytes) allocated to the translation buffer. Zero means default
745 size. */
746void tcg_exec_init(unsigned long tb_size)
747{
748 cpu_gen_init();
5b6dd868 749 page_init();
909eaac9 750 tb_htable_init();
f293709c 751 code_gen_alloc(tb_size);
4cbea598 752#if defined(CONFIG_SOFTMMU)
5b6dd868
BS
753 /* There's no guest base to take into account, so go ahead and
754 initialize the prologue now. */
755 tcg_prologue_init(&tcg_ctx);
756#endif
757}
758
759bool tcg_enabled(void)
760{
0b0d3320 761 return tcg_ctx.code_gen_buffer != NULL;
5b6dd868
BS
762}
763
764/* Allocate a new translation block. Flush the translation buffer if
765 too many translation blocks or too much generated code. */
766static TranslationBlock *tb_alloc(target_ulong pc)
767{
768 TranslationBlock *tb;
769
b125f9dc 770 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
5b6dd868
BS
771 return NULL;
772 }
5e5f07e0 773 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
5b6dd868
BS
774 tb->pc = pc;
775 tb->cflags = 0;
776 return tb;
777}
778
779void tb_free(TranslationBlock *tb)
780{
781 /* In practice this is mostly used for single use temporary TB
782 Ignore the hard cases and just back up if this TB happens to
783 be the last one generated. */
5e5f07e0
EV
784 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
785 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
0b0d3320 786 tcg_ctx.code_gen_ptr = tb->tc_ptr;
5e5f07e0 787 tcg_ctx.tb_ctx.nb_tbs--;
5b6dd868
BS
788 }
789}
790
791static inline void invalidate_page_bitmap(PageDesc *p)
792{
6fad459c 793#ifdef CONFIG_SOFTMMU
012aef07
MA
794 g_free(p->code_bitmap);
795 p->code_bitmap = NULL;
5b6dd868 796 p->code_write_count = 0;
6fad459c 797#endif
5b6dd868
BS
798}
799
800/* Set to NULL all the 'first_tb' fields in all PageDescs. */
801static void page_flush_tb_1(int level, void **lp)
802{
803 int i;
804
805 if (*lp == NULL) {
806 return;
807 }
808 if (level == 0) {
809 PageDesc *pd = *lp;
810
03f49957 811 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
812 pd[i].first_tb = NULL;
813 invalidate_page_bitmap(pd + i);
814 }
815 } else {
816 void **pp = *lp;
817
03f49957 818 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
819 page_flush_tb_1(level - 1, pp + i);
820 }
821 }
822}
823
824static void page_flush_tb(void)
825{
826 int i;
827
828 for (i = 0; i < V_L1_SIZE; i++) {
03f49957 829 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
830 }
831}
832
833/* flush all the translation blocks */
834/* XXX: tb_flush is currently not thread safe */
bbd77c18 835void tb_flush(CPUState *cpu)
5b6dd868 836{
135a972b
CB
837 if (!tcg_enabled()) {
838 return;
839 }
5b6dd868
BS
840#if defined(DEBUG_FLUSH)
841 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
0b0d3320 842 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
5e5f07e0 843 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
0b0d3320 844 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
5e5f07e0 845 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 846#endif
0b0d3320
EV
847 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
848 > tcg_ctx.code_gen_buffer_size) {
a47dddd7 849 cpu_abort(cpu, "Internal error: code buffer overflow\n");
5b6dd868 850 }
5e5f07e0 851 tcg_ctx.tb_ctx.nb_tbs = 0;
5b6dd868 852
bdc44640 853 CPU_FOREACH(cpu) {
8cd70437 854 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
6f789be5 855 cpu->tb_flushed = true;
5b6dd868
BS
856 }
857
909eaac9 858 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
5b6dd868
BS
859 page_flush_tb();
860
0b0d3320 861 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
5b6dd868
BS
862 /* XXX: flush processor icache at this point if cache flush is
863 expensive */
5e5f07e0 864 tcg_ctx.tb_ctx.tb_flush_count++;
5b6dd868
BS
865}
866
867#ifdef DEBUG_TB_CHECK
868
909eaac9
EC
869static void
870do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
5b6dd868 871{
909eaac9
EC
872 TranslationBlock *tb = p;
873 target_ulong addr = *(target_ulong *)userp;
874
875 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
876 printf("ERROR invalidate: address=" TARGET_FMT_lx
877 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
878 }
879}
5b6dd868 880
909eaac9
EC
881static void tb_invalidate_check(target_ulong address)
882{
5b6dd868 883 address &= TARGET_PAGE_MASK;
909eaac9
EC
884 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
885}
886
887static void
888do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
889{
890 TranslationBlock *tb = p;
891 int flags1, flags2;
892
893 flags1 = page_get_flags(tb->pc);
894 flags2 = page_get_flags(tb->pc + tb->size - 1);
895 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
896 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
897 (long)tb->pc, tb->size, flags1, flags2);
5b6dd868
BS
898 }
899}
900
901/* verify that all the pages have correct rights for code */
902static void tb_page_check(void)
903{
909eaac9 904 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
5b6dd868
BS
905}
906
907#endif
908
5b6dd868
BS
909static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
910{
911 TranslationBlock *tb1;
912 unsigned int n1;
913
914 for (;;) {
915 tb1 = *ptb;
916 n1 = (uintptr_t)tb1 & 3;
917 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
918 if (tb1 == tb) {
919 *ptb = tb1->page_next[n1];
920 break;
921 }
922 ptb = &tb1->page_next[n1];
923 }
924}
925
13362678
SF
926/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
927static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
5b6dd868 928{
c37e6d7e
SF
929 TranslationBlock *tb1;
930 uintptr_t *ptb, ntb;
5b6dd868
BS
931 unsigned int n1;
932
f309101c 933 ptb = &tb->jmp_list_next[n];
c37e6d7e 934 if (*ptb) {
5b6dd868
BS
935 /* find tb(n) in circular list */
936 for (;;) {
c37e6d7e
SF
937 ntb = *ptb;
938 n1 = ntb & 3;
939 tb1 = (TranslationBlock *)(ntb & ~3);
5b6dd868
BS
940 if (n1 == n && tb1 == tb) {
941 break;
942 }
943 if (n1 == 2) {
f309101c 944 ptb = &tb1->jmp_list_first;
5b6dd868 945 } else {
f309101c 946 ptb = &tb1->jmp_list_next[n1];
5b6dd868
BS
947 }
948 }
949 /* now we can suppress tb(n) from the list */
f309101c 950 *ptb = tb->jmp_list_next[n];
5b6dd868 951
c37e6d7e 952 tb->jmp_list_next[n] = (uintptr_t)NULL;
5b6dd868
BS
953 }
954}
955
956/* reset the jump entry 'n' of a TB so that it is not chained to
957 another TB */
958static inline void tb_reset_jump(TranslationBlock *tb, int n)
959{
f309101c
SF
960 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
961 tb_set_jmp_target(tb, n, addr);
5b6dd868
BS
962}
963
89bba496
SF
964/* remove any jumps to the TB */
965static inline void tb_jmp_unlink(TranslationBlock *tb)
966{
f9c5b66f
SF
967 TranslationBlock *tb1;
968 uintptr_t *ptb, ntb;
89bba496
SF
969 unsigned int n1;
970
f9c5b66f 971 ptb = &tb->jmp_list_first;
89bba496 972 for (;;) {
f9c5b66f
SF
973 ntb = *ptb;
974 n1 = ntb & 3;
975 tb1 = (TranslationBlock *)(ntb & ~3);
89bba496
SF
976 if (n1 == 2) {
977 break;
978 }
f9c5b66f
SF
979 tb_reset_jump(tb1, n1);
980 *ptb = tb1->jmp_list_next[n1];
981 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
89bba496 982 }
89bba496
SF
983}
984
0c884d16 985/* invalidate one TB */
5b6dd868
BS
986void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
987{
182735ef 988 CPUState *cpu;
5b6dd868 989 PageDesc *p;
42bd3228 990 uint32_t h;
5b6dd868 991 tb_page_addr_t phys_pc;
5b6dd868
BS
992
993 /* remove the TB from the hash list */
994 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
42bd3228 995 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
909eaac9 996 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
5b6dd868
BS
997
998 /* remove the TB from the page list */
999 if (tb->page_addr[0] != page_addr) {
1000 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1001 tb_page_remove(&p->first_tb, tb);
1002 invalidate_page_bitmap(p);
1003 }
1004 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1005 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1006 tb_page_remove(&p->first_tb, tb);
1007 invalidate_page_bitmap(p);
1008 }
1009
5b6dd868
BS
1010 /* remove the TB from the hash list */
1011 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 1012 CPU_FOREACH(cpu) {
8cd70437
AF
1013 if (cpu->tb_jmp_cache[h] == tb) {
1014 cpu->tb_jmp_cache[h] = NULL;
5b6dd868
BS
1015 }
1016 }
1017
1018 /* suppress this TB from the two jump lists */
13362678
SF
1019 tb_remove_from_jmp_list(tb, 0);
1020 tb_remove_from_jmp_list(tb, 1);
5b6dd868
BS
1021
1022 /* suppress any remaining jumps to this TB */
89bba496 1023 tb_jmp_unlink(tb);
5b6dd868 1024
5e5f07e0 1025 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
1026}
1027
6fad459c 1028#ifdef CONFIG_SOFTMMU
5b6dd868
BS
1029static void build_page_bitmap(PageDesc *p)
1030{
1031 int n, tb_start, tb_end;
1032 TranslationBlock *tb;
1033
510a647f 1034 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
5b6dd868
BS
1035
1036 tb = p->first_tb;
1037 while (tb != NULL) {
1038 n = (uintptr_t)tb & 3;
1039 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1040 /* NOTE: this is subtle as a TB may span two physical pages */
1041 if (n == 0) {
1042 /* NOTE: tb_end may be after the end of the page, but
1043 it is not a problem */
1044 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1045 tb_end = tb_start + tb->size;
1046 if (tb_end > TARGET_PAGE_SIZE) {
1047 tb_end = TARGET_PAGE_SIZE;
1048 }
1049 } else {
1050 tb_start = 0;
1051 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1052 }
510a647f 1053 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
5b6dd868
BS
1054 tb = tb->page_next[n];
1055 }
1056}
6fad459c 1057#endif
5b6dd868 1058
e90d96b1
SF
1059/* add the tb in the target page and protect it if necessary
1060 *
1061 * Called with mmap_lock held for user-mode emulation.
1062 */
1063static inline void tb_alloc_page(TranslationBlock *tb,
1064 unsigned int n, tb_page_addr_t page_addr)
1065{
1066 PageDesc *p;
1067#ifndef CONFIG_USER_ONLY
1068 bool page_already_protected;
1069#endif
1070
1071 tb->page_addr[n] = page_addr;
1072 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1073 tb->page_next[n] = p->first_tb;
1074#ifndef CONFIG_USER_ONLY
1075 page_already_protected = p->first_tb != NULL;
1076#endif
1077 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1078 invalidate_page_bitmap(p);
1079
1080#if defined(CONFIG_USER_ONLY)
1081 if (p->flags & PAGE_WRITE) {
1082 target_ulong addr;
1083 PageDesc *p2;
1084 int prot;
1085
1086 /* force the host page as non writable (writes will have a
1087 page fault + mprotect overhead) */
1088 page_addr &= qemu_host_page_mask;
1089 prot = 0;
1090 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1091 addr += TARGET_PAGE_SIZE) {
1092
1093 p2 = page_find(addr >> TARGET_PAGE_BITS);
1094 if (!p2) {
1095 continue;
1096 }
1097 prot |= p2->flags;
1098 p2->flags &= ~PAGE_WRITE;
1099 }
1100 mprotect(g2h(page_addr), qemu_host_page_size,
1101 (prot & PAGE_BITS) & ~PAGE_WRITE);
1102#ifdef DEBUG_TB_INVALIDATE
1103 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1104 page_addr);
1105#endif
1106 }
1107#else
1108 /* if some code is already present, then the pages are already
1109 protected. So we handle the case where only the first TB is
1110 allocated in a physical page */
1111 if (!page_already_protected) {
1112 tlb_protect_code(page_addr);
1113 }
1114#endif
1115}
1116
1117/* add a new TB and link it to the physical page tables. phys_page2 is
1118 * (-1) to indicate that only one page contains the TB.
1119 *
1120 * Called with mmap_lock held for user-mode emulation.
1121 */
1122static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1123 tb_page_addr_t phys_page2)
1124{
42bd3228 1125 uint32_t h;
e90d96b1 1126
42bd3228
EC
1127 /* add in the hash table */
1128 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
909eaac9 1129 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
e90d96b1
SF
1130
1131 /* add in the page list */
1132 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1133 if (phys_page2 != -1) {
1134 tb_alloc_page(tb, 1, phys_page2);
1135 } else {
1136 tb->page_addr[1] = -1;
1137 }
1138
e90d96b1
SF
1139#ifdef DEBUG_TB_CHECK
1140 tb_page_check();
1141#endif
1142}
1143
75692087 1144/* Called with mmap_lock held for user mode emulation. */
648f034c 1145TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868 1146 target_ulong pc, target_ulong cs_base,
89fee74a 1147 uint32_t flags, int cflags)
5b6dd868 1148{
648f034c 1149 CPUArchState *env = cpu->env_ptr;
5b6dd868 1150 TranslationBlock *tb;
5b6dd868
BS
1151 tb_page_addr_t phys_pc, phys_page2;
1152 target_ulong virt_page2;
fec88f64 1153 tcg_insn_unit *gen_code_buf;
fca8a500 1154 int gen_code_size, search_size;
fec88f64
RH
1155#ifdef CONFIG_PROFILER
1156 int64_t ti;
1157#endif
5b6dd868
BS
1158
1159 phys_pc = get_page_addr_code(env, pc);
56c0269a 1160 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
0266359e
PB
1161 cflags |= CF_USE_ICOUNT;
1162 }
b125f9dc 1163
5b6dd868 1164 tb = tb_alloc(pc);
b125f9dc
RH
1165 if (unlikely(!tb)) {
1166 buffer_overflow:
5b6dd868 1167 /* flush must be done */
bbd77c18 1168 tb_flush(cpu);
5b6dd868
BS
1169 /* cannot fail at this point */
1170 tb = tb_alloc(pc);
b125f9dc 1171 assert(tb != NULL);
5b6dd868 1172 }
fec88f64
RH
1173
1174 gen_code_buf = tcg_ctx.code_gen_ptr;
1175 tb->tc_ptr = gen_code_buf;
5b6dd868
BS
1176 tb->cs_base = cs_base;
1177 tb->flags = flags;
1178 tb->cflags = cflags;
fec88f64
RH
1179
1180#ifdef CONFIG_PROFILER
1181 tcg_ctx.tb_count1++; /* includes aborted translations because of
1182 exceptions */
1183 ti = profile_getclock();
1184#endif
1185
1186 tcg_func_start(&tcg_ctx);
1187
7c255043 1188 tcg_ctx.cpu = ENV_GET_CPU(env);
fec88f64 1189 gen_intermediate_code(env, tb);
7c255043 1190 tcg_ctx.cpu = NULL;
fec88f64
RH
1191
1192 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1193
1194 /* generate machine code */
f309101c
SF
1195 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1196 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1197 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
fec88f64 1198#ifdef USE_DIRECT_JUMP
f309101c
SF
1199 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1200 tcg_ctx.tb_jmp_target_addr = NULL;
fec88f64 1201#else
f309101c
SF
1202 tcg_ctx.tb_jmp_insn_offset = NULL;
1203 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
fec88f64
RH
1204#endif
1205
1206#ifdef CONFIG_PROFILER
1207 tcg_ctx.tb_count++;
1208 tcg_ctx.interm_time += profile_getclock() - ti;
1209 tcg_ctx.code_time -= profile_getclock();
1210#endif
1211
b125f9dc
RH
1212 /* ??? Overflow could be handled better here. In particular, we
1213 don't need to re-do gen_intermediate_code, nor should we re-do
1214 the tcg optimization currently hidden inside tcg_gen_code. All
1215 that should be required is to flush the TBs, allocate a new TB,
1216 re-initialize it per above, and re-do the actual code generation. */
5bd2ec3d 1217 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
b125f9dc
RH
1218 if (unlikely(gen_code_size < 0)) {
1219 goto buffer_overflow;
1220 }
fca8a500 1221 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
b125f9dc
RH
1222 if (unlikely(search_size < 0)) {
1223 goto buffer_overflow;
1224 }
fec88f64
RH
1225
1226#ifdef CONFIG_PROFILER
1227 tcg_ctx.code_time += profile_getclock();
1228 tcg_ctx.code_in_len += tb->size;
1229 tcg_ctx.code_out_len += gen_code_size;
fca8a500 1230 tcg_ctx.search_out_len += search_size;
fec88f64
RH
1231#endif
1232
1233#ifdef DEBUG_DISAS
d977e1c2
AB
1234 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1235 qemu_log_in_addr_range(tb->pc)) {
fec88f64
RH
1236 qemu_log("OUT: [size=%d]\n", gen_code_size);
1237 log_disas(tb->tc_ptr, gen_code_size);
1238 qemu_log("\n");
1239 qemu_log_flush();
1240 }
1241#endif
1242
fca8a500
RH
1243 tcg_ctx.code_gen_ptr = (void *)
1244 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1245 CODE_GEN_ALIGN);
5b6dd868 1246
901bc3de
SF
1247 /* init jump list */
1248 assert(((uintptr_t)tb & 3) == 0);
1249 tb->jmp_list_first = (uintptr_t)tb | 2;
1250 tb->jmp_list_next[0] = (uintptr_t)NULL;
1251 tb->jmp_list_next[1] = (uintptr_t)NULL;
1252
1253 /* init original jump addresses wich has been set during tcg_gen_code() */
1254 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1255 tb_reset_jump(tb, 0);
1256 }
1257 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1258 tb_reset_jump(tb, 1);
1259 }
1260
5b6dd868
BS
1261 /* check next page if needed */
1262 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1263 phys_page2 = -1;
1264 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1265 phys_page2 = get_page_addr_code(env, virt_page2);
1266 }
901bc3de
SF
1267 /* As long as consistency of the TB stuff is provided by tb_lock in user
1268 * mode and is implicit in single-threaded softmmu emulation, no explicit
1269 * memory barrier is required before tb_link_page() makes the TB visible
1270 * through the physical hash table and physical page list.
1271 */
5b6dd868
BS
1272 tb_link_page(tb, phys_pc, phys_page2);
1273 return tb;
1274}
1275
1276/*
1277 * Invalidate all TBs which intersect with the target physical address range
1278 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1279 * 'is_cpu_write_access' should be true if called from a real cpu write
1280 * access: the virtual CPU will exit the current TB if code is modified inside
1281 * this TB.
75692087
PB
1282 *
1283 * Called with mmap_lock held for user-mode emulation
5b6dd868 1284 */
35865339 1285void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
5b6dd868
BS
1286{
1287 while (start < end) {
35865339 1288 tb_invalidate_phys_page_range(start, end, 0);
5b6dd868
BS
1289 start &= TARGET_PAGE_MASK;
1290 start += TARGET_PAGE_SIZE;
1291 }
1292}
1293
1294/*
1295 * Invalidate all TBs which intersect with the target physical address range
1296 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1297 * 'is_cpu_write_access' should be true if called from a real cpu write
1298 * access: the virtual CPU will exit the current TB if code is modified inside
1299 * this TB.
75692087
PB
1300 *
1301 * Called with mmap_lock held for user-mode emulation
5b6dd868
BS
1302 */
1303void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1304 int is_cpu_write_access)
1305{
3213525f 1306 TranslationBlock *tb, *tb_next;
baea4fae 1307#if defined(TARGET_HAS_PRECISE_SMC)
3213525f 1308 CPUState *cpu = current_cpu;
4917cf44
AF
1309 CPUArchState *env = NULL;
1310#endif
5b6dd868
BS
1311 tb_page_addr_t tb_start, tb_end;
1312 PageDesc *p;
1313 int n;
1314#ifdef TARGET_HAS_PRECISE_SMC
1315 int current_tb_not_found = is_cpu_write_access;
1316 TranslationBlock *current_tb = NULL;
1317 int current_tb_modified = 0;
1318 target_ulong current_pc = 0;
1319 target_ulong current_cs_base = 0;
89fee74a 1320 uint32_t current_flags = 0;
5b6dd868
BS
1321#endif /* TARGET_HAS_PRECISE_SMC */
1322
1323 p = page_find(start >> TARGET_PAGE_BITS);
1324 if (!p) {
1325 return;
1326 }
baea4fae 1327#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1328 if (cpu != NULL) {
1329 env = cpu->env_ptr;
d77953b9 1330 }
4917cf44 1331#endif
5b6dd868
BS
1332
1333 /* we remove all the TBs in the range [start, end[ */
1334 /* XXX: see if in some cases it could be faster to invalidate all
1335 the code */
1336 tb = p->first_tb;
1337 while (tb != NULL) {
1338 n = (uintptr_t)tb & 3;
1339 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1340 tb_next = tb->page_next[n];
1341 /* NOTE: this is subtle as a TB may span two physical pages */
1342 if (n == 0) {
1343 /* NOTE: tb_end may be after the end of the page, but
1344 it is not a problem */
1345 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1346 tb_end = tb_start + tb->size;
1347 } else {
1348 tb_start = tb->page_addr[1];
1349 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1350 }
1351 if (!(tb_end <= start || tb_start >= end)) {
1352#ifdef TARGET_HAS_PRECISE_SMC
1353 if (current_tb_not_found) {
1354 current_tb_not_found = 0;
1355 current_tb = NULL;
93afeade 1356 if (cpu->mem_io_pc) {
5b6dd868 1357 /* now we have a real cpu fault */
93afeade 1358 current_tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868
BS
1359 }
1360 }
1361 if (current_tb == tb &&
1362 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1363 /* If we are modifying the current TB, we must stop
1364 its execution. We could be more precise by checking
1365 that the modification is after the current PC, but it
1366 would require a specialized function to partially
1367 restore the CPU state */
1368
1369 current_tb_modified = 1;
74f10515 1370 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
5b6dd868
BS
1371 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1372 &current_flags);
1373 }
1374#endif /* TARGET_HAS_PRECISE_SMC */
5b6dd868 1375 tb_phys_invalidate(tb, -1);
5b6dd868
BS
1376 }
1377 tb = tb_next;
1378 }
1379#if !defined(CONFIG_USER_ONLY)
1380 /* if no code remaining, no need to continue to use slow writes */
1381 if (!p->first_tb) {
1382 invalidate_page_bitmap(p);
fc377bcf 1383 tlb_unprotect_code(start);
5b6dd868
BS
1384 }
1385#endif
1386#ifdef TARGET_HAS_PRECISE_SMC
1387 if (current_tb_modified) {
1388 /* we generate a block containing just the instruction
1389 modifying the memory. It will ensure that it cannot modify
1390 itself */
648f034c 1391 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
6886b980 1392 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1393 }
1394#endif
1395}
1396
6fad459c 1397#ifdef CONFIG_SOFTMMU
5b6dd868
BS
1398/* len must be <= 8 and start must be a multiple of len */
1399void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1400{
1401 PageDesc *p;
5b6dd868
BS
1402
1403#if 0
1404 if (1) {
1405 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1406 cpu_single_env->mem_io_vaddr, len,
1407 cpu_single_env->eip,
1408 cpu_single_env->eip +
1409 (intptr_t)cpu_single_env->segs[R_CS].base);
1410 }
1411#endif
1412 p = page_find(start >> TARGET_PAGE_BITS);
1413 if (!p) {
1414 return;
1415 }
fc377bcf
PB
1416 if (!p->code_bitmap &&
1417 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1418 /* build code bitmap */
1419 build_page_bitmap(p);
1420 }
5b6dd868 1421 if (p->code_bitmap) {
510a647f
EC
1422 unsigned int nr;
1423 unsigned long b;
1424
1425 nr = start & ~TARGET_PAGE_MASK;
1426 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
5b6dd868
BS
1427 if (b & ((1 << len) - 1)) {
1428 goto do_invalidate;
1429 }
1430 } else {
1431 do_invalidate:
1432 tb_invalidate_phys_page_range(start, start + len, 1);
1433 }
1434}
6fad459c 1435#else
75809229
PM
1436/* Called with mmap_lock held. If pc is not 0 then it indicates the
1437 * host PC of the faulting store instruction that caused this invalidate.
1438 * Returns true if the caller needs to abort execution of the current
1439 * TB (because it was modified by this store and the guest CPU has
1440 * precise-SMC semantics).
1441 */
1442static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
5b6dd868
BS
1443{
1444 TranslationBlock *tb;
1445 PageDesc *p;
1446 int n;
1447#ifdef TARGET_HAS_PRECISE_SMC
1448 TranslationBlock *current_tb = NULL;
4917cf44
AF
1449 CPUState *cpu = current_cpu;
1450 CPUArchState *env = NULL;
5b6dd868
BS
1451 int current_tb_modified = 0;
1452 target_ulong current_pc = 0;
1453 target_ulong current_cs_base = 0;
89fee74a 1454 uint32_t current_flags = 0;
5b6dd868
BS
1455#endif
1456
1457 addr &= TARGET_PAGE_MASK;
1458 p = page_find(addr >> TARGET_PAGE_BITS);
1459 if (!p) {
75809229 1460 return false;
5b6dd868
BS
1461 }
1462 tb = p->first_tb;
1463#ifdef TARGET_HAS_PRECISE_SMC
1464 if (tb && pc != 0) {
1465 current_tb = tb_find_pc(pc);
1466 }
4917cf44
AF
1467 if (cpu != NULL) {
1468 env = cpu->env_ptr;
d77953b9 1469 }
5b6dd868
BS
1470#endif
1471 while (tb != NULL) {
1472 n = (uintptr_t)tb & 3;
1473 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1474#ifdef TARGET_HAS_PRECISE_SMC
1475 if (current_tb == tb &&
1476 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1477 /* If we are modifying the current TB, we must stop
1478 its execution. We could be more precise by checking
1479 that the modification is after the current PC, but it
1480 would require a specialized function to partially
1481 restore the CPU state */
1482
1483 current_tb_modified = 1;
74f10515 1484 cpu_restore_state_from_tb(cpu, current_tb, pc);
5b6dd868
BS
1485 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1486 &current_flags);
1487 }
1488#endif /* TARGET_HAS_PRECISE_SMC */
1489 tb_phys_invalidate(tb, addr);
1490 tb = tb->page_next[n];
1491 }
1492 p->first_tb = NULL;
1493#ifdef TARGET_HAS_PRECISE_SMC
1494 if (current_tb_modified) {
1495 /* we generate a block containing just the instruction
1496 modifying the memory. It will ensure that it cannot modify
1497 itself */
648f034c 1498 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
75809229 1499 return true;
5b6dd868
BS
1500 }
1501#endif
75809229 1502 return false;
5b6dd868
BS
1503}
1504#endif
1505
5b6dd868
BS
1506/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1507 tb[1].tc_ptr. Return NULL if not found */
a8a826a3 1508static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868
BS
1509{
1510 int m_min, m_max, m;
1511 uintptr_t v;
1512 TranslationBlock *tb;
1513
5e5f07e0 1514 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
5b6dd868
BS
1515 return NULL;
1516 }
0b0d3320
EV
1517 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1518 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
5b6dd868
BS
1519 return NULL;
1520 }
1521 /* binary search (cf Knuth) */
1522 m_min = 0;
5e5f07e0 1523 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
5b6dd868
BS
1524 while (m_min <= m_max) {
1525 m = (m_min + m_max) >> 1;
5e5f07e0 1526 tb = &tcg_ctx.tb_ctx.tbs[m];
5b6dd868
BS
1527 v = (uintptr_t)tb->tc_ptr;
1528 if (v == tc_ptr) {
1529 return tb;
1530 } else if (tc_ptr < v) {
1531 m_max = m - 1;
1532 } else {
1533 m_min = m + 1;
1534 }
1535 }
5e5f07e0 1536 return &tcg_ctx.tb_ctx.tbs[m_max];
5b6dd868
BS
1537}
1538
ec53b45b 1539#if !defined(CONFIG_USER_ONLY)
29d8ec7b 1540void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
5b6dd868
BS
1541{
1542 ram_addr_t ram_addr;
5c8a00ce 1543 MemoryRegion *mr;
149f54b5 1544 hwaddr l = 1;
5b6dd868 1545
41063e1e 1546 rcu_read_lock();
29d8ec7b 1547 mr = address_space_translate(as, addr, &addr, &l, false);
5c8a00ce
PB
1548 if (!(memory_region_is_ram(mr)
1549 || memory_region_is_romd(mr))) {
41063e1e 1550 rcu_read_unlock();
5b6dd868
BS
1551 return;
1552 }
e4e69794 1553 ram_addr = memory_region_get_ram_addr(mr) + addr;
5b6dd868 1554 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
41063e1e 1555 rcu_read_unlock();
5b6dd868 1556}
ec53b45b 1557#endif /* !defined(CONFIG_USER_ONLY) */
5b6dd868 1558
239c51a5 1559void tb_check_watchpoint(CPUState *cpu)
5b6dd868
BS
1560{
1561 TranslationBlock *tb;
1562
93afeade 1563 tb = tb_find_pc(cpu->mem_io_pc);
8d302e76
AJ
1564 if (tb) {
1565 /* We can use retranslation to find the PC. */
1566 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1567 tb_phys_invalidate(tb, -1);
1568 } else {
1569 /* The exception probably happened in a helper. The CPU state should
1570 have been saved before calling it. Fetch the PC from there. */
1571 CPUArchState *env = cpu->env_ptr;
1572 target_ulong pc, cs_base;
1573 tb_page_addr_t addr;
89fee74a 1574 uint32_t flags;
8d302e76
AJ
1575
1576 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1577 addr = get_page_addr_code(env, pc);
1578 tb_invalidate_phys_range(addr, addr + 1);
5b6dd868 1579 }
5b6dd868
BS
1580}
1581
1582#ifndef CONFIG_USER_ONLY
5b6dd868
BS
1583/* in deterministic execution mode, instructions doing device I/Os
1584 must be at the end of the TB */
90b40a69 1585void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 1586{
a47dddd7 1587#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 1588 CPUArchState *env = cpu->env_ptr;
a47dddd7 1589#endif
5b6dd868
BS
1590 TranslationBlock *tb;
1591 uint32_t n, cflags;
1592 target_ulong pc, cs_base;
89fee74a 1593 uint32_t flags;
5b6dd868
BS
1594
1595 tb = tb_find_pc(retaddr);
1596 if (!tb) {
a47dddd7 1597 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
1598 (void *)retaddr);
1599 }
28ecfd7a 1600 n = cpu->icount_decr.u16.low + tb->icount;
74f10515 1601 cpu_restore_state_from_tb(cpu, tb, retaddr);
5b6dd868
BS
1602 /* Calculate how many instructions had been executed before the fault
1603 occurred. */
28ecfd7a 1604 n = n - cpu->icount_decr.u16.low;
5b6dd868
BS
1605 /* Generate a new TB ending on the I/O insn. */
1606 n++;
1607 /* On MIPS and SH, delay slot instructions can only be restarted if
1608 they were already the first instruction in the TB. If this is not
1609 the first instruction in a TB then re-execute the preceding
1610 branch. */
1611#if defined(TARGET_MIPS)
1612 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
c3577479 1613 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
28ecfd7a 1614 cpu->icount_decr.u16.low++;
5b6dd868
BS
1615 env->hflags &= ~MIPS_HFLAG_BMASK;
1616 }
1617#elif defined(TARGET_SH4)
1618 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1619 && n > 1) {
1620 env->pc -= 2;
28ecfd7a 1621 cpu->icount_decr.u16.low++;
5b6dd868
BS
1622 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1623 }
1624#endif
1625 /* This should never happen. */
1626 if (n > CF_COUNT_MASK) {
a47dddd7 1627 cpu_abort(cpu, "TB too big during recompile");
5b6dd868
BS
1628 }
1629
1630 cflags = n | CF_LAST_IO;
1631 pc = tb->pc;
1632 cs_base = tb->cs_base;
1633 flags = tb->flags;
1634 tb_phys_invalidate(tb, -1);
02d57ea1
SF
1635 if (tb->cflags & CF_NOCACHE) {
1636 if (tb->orig_tb) {
1637 /* Invalidate original TB if this TB was generated in
1638 * cpu_exec_nocache() */
1639 tb_phys_invalidate(tb->orig_tb, -1);
1640 }
1641 tb_free(tb);
1642 }
5b6dd868
BS
1643 /* FIXME: In theory this could raise an exception. In practice
1644 we have already translated the block once so it's probably ok. */
648f034c 1645 tb_gen_code(cpu, pc, cs_base, flags, cflags);
5b6dd868
BS
1646 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1647 the first in the TB) then we end up generating a whole new TB and
1648 repeating the fault, which is horribly inefficient.
1649 Better would be to execute just this insn uncached, or generate a
1650 second new TB. */
6886b980 1651 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1652}
1653
611d4f99 1654void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
5b6dd868
BS
1655{
1656 unsigned int i;
1657
1658 /* Discard jump cache entries for any tb which might potentially
1659 overlap the flushed page. */
1660 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
8cd70437 1661 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1662 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1663
1664 i = tb_jmp_cache_hash_page(addr);
8cd70437 1665 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1666 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1667}
1668
7266ae91
EC
1669static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1670 struct qht_stats hst)
1671{
1672 uint32_t hgram_opts;
1673 size_t hgram_bins;
1674 char *hgram;
1675
1676 if (!hst.head_buckets) {
1677 return;
1678 }
1679 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1680 hst.used_head_buckets, hst.head_buckets,
1681 (double)hst.used_head_buckets / hst.head_buckets * 100);
1682
1683 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1684 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1685 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1686 hgram_opts |= QDIST_PR_NODECIMAL;
1687 }
1688 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1689 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1690 qdist_avg(&hst.occupancy) * 100, hgram);
1691 g_free(hgram);
1692
1693 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1694 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1695 if (hgram_bins > 10) {
1696 hgram_bins = 10;
1697 } else {
1698 hgram_bins = 0;
1699 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1700 }
1701 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1702 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1703 qdist_avg(&hst.chain), hgram);
1704 g_free(hgram);
1705}
1706
5b6dd868
BS
1707void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1708{
1709 int i, target_code_size, max_target_code_size;
1710 int direct_jmp_count, direct_jmp2_count, cross_page;
1711 TranslationBlock *tb;
329844d4 1712 struct qht_stats hst;
5b6dd868
BS
1713
1714 target_code_size = 0;
1715 max_target_code_size = 0;
1716 cross_page = 0;
1717 direct_jmp_count = 0;
1718 direct_jmp2_count = 0;
5e5f07e0
EV
1719 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1720 tb = &tcg_ctx.tb_ctx.tbs[i];
5b6dd868
BS
1721 target_code_size += tb->size;
1722 if (tb->size > max_target_code_size) {
1723 max_target_code_size = tb->size;
1724 }
1725 if (tb->page_addr[1] != -1) {
1726 cross_page++;
1727 }
f309101c 1728 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
5b6dd868 1729 direct_jmp_count++;
f309101c 1730 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
5b6dd868
BS
1731 direct_jmp2_count++;
1732 }
1733 }
1734 }
1735 /* XXX: avoid using doubles ? */
1736 cpu_fprintf(f, "Translation buffer state:\n");
1737 cpu_fprintf(f, "gen code size %td/%zd\n",
0b0d3320 1738 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
b125f9dc 1739 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
5b6dd868 1740 cpu_fprintf(f, "TB count %d/%d\n",
5e5f07e0 1741 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
5b6dd868 1742 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
5e5f07e0
EV
1743 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1744 tcg_ctx.tb_ctx.nb_tbs : 0,
1745 max_target_code_size);
5b6dd868 1746 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
5e5f07e0
EV
1747 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1748 tcg_ctx.code_gen_buffer) /
1749 tcg_ctx.tb_ctx.nb_tbs : 0,
1750 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1751 tcg_ctx.code_gen_buffer) /
1752 target_code_size : 0);
1753 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1754 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1755 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868
BS
1756 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1757 direct_jmp_count,
5e5f07e0
EV
1758 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1759 tcg_ctx.tb_ctx.nb_tbs : 0,
5b6dd868 1760 direct_jmp2_count,
5e5f07e0
EV
1761 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1762 tcg_ctx.tb_ctx.nb_tbs : 0);
329844d4
EC
1763
1764 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
7266ae91 1765 print_qht_statistics(f, cpu_fprintf, hst);
329844d4
EC
1766 qht_statistics_destroy(&hst);
1767
5b6dd868 1768 cpu_fprintf(f, "\nStatistics:\n");
5e5f07e0
EV
1769 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1770 cpu_fprintf(f, "TB invalidate count %d\n",
1771 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
5b6dd868
BS
1772 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1773 tcg_dump_info(f, cpu_fprintf);
1774}
1775
246ae24d
MF
1776void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1777{
1778 tcg_dump_op_count(f, cpu_fprintf);
1779}
1780
5b6dd868
BS
1781#else /* CONFIG_USER_ONLY */
1782
c3affe56 1783void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1784{
259186a7 1785 cpu->interrupt_request |= mask;
378df4b2 1786 cpu->tcg_exit_req = 1;
5b6dd868
BS
1787}
1788
1789/*
1790 * Walks guest process memory "regions" one by one
1791 * and calls callback function 'fn' for each region.
1792 */
1793struct walk_memory_regions_data {
1794 walk_memory_regions_fn fn;
1795 void *priv;
1a1c4db9 1796 target_ulong start;
5b6dd868
BS
1797 int prot;
1798};
1799
1800static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1a1c4db9 1801 target_ulong end, int new_prot)
5b6dd868 1802{
1a1c4db9 1803 if (data->start != -1u) {
5b6dd868
BS
1804 int rc = data->fn(data->priv, data->start, end, data->prot);
1805 if (rc != 0) {
1806 return rc;
1807 }
1808 }
1809
1a1c4db9 1810 data->start = (new_prot ? end : -1u);
5b6dd868
BS
1811 data->prot = new_prot;
1812
1813 return 0;
1814}
1815
1816static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1a1c4db9 1817 target_ulong base, int level, void **lp)
5b6dd868 1818{
1a1c4db9 1819 target_ulong pa;
5b6dd868
BS
1820 int i, rc;
1821
1822 if (*lp == NULL) {
1823 return walk_memory_regions_end(data, base, 0);
1824 }
1825
1826 if (level == 0) {
1827 PageDesc *pd = *lp;
1828
03f49957 1829 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
1830 int prot = pd[i].flags;
1831
1832 pa = base | (i << TARGET_PAGE_BITS);
1833 if (prot != data->prot) {
1834 rc = walk_memory_regions_end(data, pa, prot);
1835 if (rc != 0) {
1836 return rc;
1837 }
1838 }
1839 }
1840 } else {
1841 void **pp = *lp;
1842
03f49957 1843 for (i = 0; i < V_L2_SIZE; ++i) {
1a1c4db9 1844 pa = base | ((target_ulong)i <<
03f49957 1845 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
1846 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1847 if (rc != 0) {
1848 return rc;
1849 }
1850 }
1851 }
1852
1853 return 0;
1854}
1855
1856int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1857{
1858 struct walk_memory_regions_data data;
1859 uintptr_t i;
1860
1861 data.fn = fn;
1862 data.priv = priv;
1a1c4db9 1863 data.start = -1u;
5b6dd868
BS
1864 data.prot = 0;
1865
1866 for (i = 0; i < V_L1_SIZE; i++) {
1a1c4db9 1867 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
03f49957 1868 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
5b6dd868
BS
1869 if (rc != 0) {
1870 return rc;
1871 }
1872 }
1873
1874 return walk_memory_regions_end(&data, 0, 0);
1875}
1876
1a1c4db9
MI
1877static int dump_region(void *priv, target_ulong start,
1878 target_ulong end, unsigned long prot)
5b6dd868
BS
1879{
1880 FILE *f = (FILE *)priv;
1881
1a1c4db9
MI
1882 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1883 " "TARGET_FMT_lx" %c%c%c\n",
5b6dd868
BS
1884 start, end, end - start,
1885 ((prot & PAGE_READ) ? 'r' : '-'),
1886 ((prot & PAGE_WRITE) ? 'w' : '-'),
1887 ((prot & PAGE_EXEC) ? 'x' : '-'));
1888
1889 return 0;
1890}
1891
1892/* dump memory mappings */
1893void page_dump(FILE *f)
1894{
1a1c4db9 1895 const int length = sizeof(target_ulong) * 2;
227b8175
SW
1896 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1897 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
1898 walk_memory_regions(f, dump_region);
1899}
1900
1901int page_get_flags(target_ulong address)
1902{
1903 PageDesc *p;
1904
1905 p = page_find(address >> TARGET_PAGE_BITS);
1906 if (!p) {
1907 return 0;
1908 }
1909 return p->flags;
1910}
1911
1912/* Modify the flags of a page and invalidate the code if necessary.
1913 The flag PAGE_WRITE_ORG is positioned automatically depending
1914 on PAGE_WRITE. The mmap_lock should already be held. */
1915void page_set_flags(target_ulong start, target_ulong end, int flags)
1916{
1917 target_ulong addr, len;
1918
1919 /* This function should never be called with addresses outside the
1920 guest address space. If this assert fires, it probably indicates
1921 a missing call to h2g_valid. */
1922#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 1923 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
1924#endif
1925 assert(start < end);
1926
1927 start = start & TARGET_PAGE_MASK;
1928 end = TARGET_PAGE_ALIGN(end);
1929
1930 if (flags & PAGE_WRITE) {
1931 flags |= PAGE_WRITE_ORG;
1932 }
1933
1934 for (addr = start, len = end - start;
1935 len != 0;
1936 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1937 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1938
1939 /* If the write protection bit is set, then we invalidate
1940 the code inside. */
1941 if (!(p->flags & PAGE_WRITE) &&
1942 (flags & PAGE_WRITE) &&
1943 p->first_tb) {
75809229 1944 tb_invalidate_phys_page(addr, 0);
5b6dd868
BS
1945 }
1946 p->flags = flags;
1947 }
1948}
1949
1950int page_check_range(target_ulong start, target_ulong len, int flags)
1951{
1952 PageDesc *p;
1953 target_ulong end;
1954 target_ulong addr;
1955
1956 /* This function should never be called with addresses outside the
1957 guest address space. If this assert fires, it probably indicates
1958 a missing call to h2g_valid. */
1959#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 1960 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
1961#endif
1962
1963 if (len == 0) {
1964 return 0;
1965 }
1966 if (start + len - 1 < start) {
1967 /* We've wrapped around. */
1968 return -1;
1969 }
1970
1971 /* must do before we loose bits in the next step */
1972 end = TARGET_PAGE_ALIGN(start + len);
1973 start = start & TARGET_PAGE_MASK;
1974
1975 for (addr = start, len = end - start;
1976 len != 0;
1977 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1978 p = page_find(addr >> TARGET_PAGE_BITS);
1979 if (!p) {
1980 return -1;
1981 }
1982 if (!(p->flags & PAGE_VALID)) {
1983 return -1;
1984 }
1985
1986 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1987 return -1;
1988 }
1989 if (flags & PAGE_WRITE) {
1990 if (!(p->flags & PAGE_WRITE_ORG)) {
1991 return -1;
1992 }
1993 /* unprotect the page if it was put read-only because it
1994 contains translated code */
1995 if (!(p->flags & PAGE_WRITE)) {
f213e72f 1996 if (!page_unprotect(addr, 0)) {
5b6dd868
BS
1997 return -1;
1998 }
1999 }
5b6dd868
BS
2000 }
2001 }
2002 return 0;
2003}
2004
2005/* called from signal handler: invalidate the code and unprotect the
f213e72f
PM
2006 * page. Return 0 if the fault was not handled, 1 if it was handled,
2007 * and 2 if it was handled but the caller must cause the TB to be
2008 * immediately exited. (We can only return 2 if the 'pc' argument is
2009 * non-zero.)
2010 */
2011int page_unprotect(target_ulong address, uintptr_t pc)
5b6dd868
BS
2012{
2013 unsigned int prot;
7399a337 2014 bool current_tb_invalidated;
5b6dd868
BS
2015 PageDesc *p;
2016 target_ulong host_start, host_end, addr;
2017
2018 /* Technically this isn't safe inside a signal handler. However we
2019 know this only ever happens in a synchronous SEGV handler, so in
2020 practice it seems to be ok. */
2021 mmap_lock();
2022
2023 p = page_find(address >> TARGET_PAGE_BITS);
2024 if (!p) {
2025 mmap_unlock();
2026 return 0;
2027 }
2028
2029 /* if the page was really writable, then we change its
2030 protection back to writable */
2031 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2032 host_start = address & qemu_host_page_mask;
2033 host_end = host_start + qemu_host_page_size;
2034
2035 prot = 0;
7399a337 2036 current_tb_invalidated = false;
5b6dd868
BS
2037 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2038 p = page_find(addr >> TARGET_PAGE_BITS);
2039 p->flags |= PAGE_WRITE;
2040 prot |= p->flags;
2041
2042 /* and since the content will be modified, we must invalidate
2043 the corresponding translated code. */
7399a337 2044 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
5b6dd868
BS
2045#ifdef DEBUG_TB_CHECK
2046 tb_invalidate_check(addr);
2047#endif
2048 }
2049 mprotect((void *)g2h(host_start), qemu_host_page_size,
2050 prot & PAGE_BITS);
2051
2052 mmap_unlock();
7399a337
SS
2053 /* If current TB was invalidated return to main loop */
2054 return current_tb_invalidated ? 2 : 1;
5b6dd868
BS
2055 }
2056 mmap_unlock();
2057 return 0;
2058}
2059#endif /* CONFIG_USER_ONLY */