]> git.proxmox.com Git - mirror_qemu.git/blame - translate-all.c
translate-all: Add assert_(memory|tb)_lock annotations
[mirror_qemu.git] / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
5b6dd868 21#endif
7b31bbc2 22#include "qemu/osdep.h"
d19893da 23
2054396a 24
5b6dd868 25#include "qemu-common.h"
af5ad107 26#define NO_CPU_IO_DEFS
d3eead2e 27#include "cpu.h"
6db8b538 28#include "trace.h"
76cad711 29#include "disas/disas.h"
63c91552 30#include "exec/exec-all.h"
57fec1fe 31#include "tcg.h"
5b6dd868
BS
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
301e40ed 34#include "exec/exec-all.h"
5b6dd868
BS
35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd /* avoid redefinition */
5b6dd868
BS
40#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
0bc3cd62
PB
49#else
50#include "exec/address-spaces.h"
5b6dd868
BS
51#endif
52
022c62cb 53#include "exec/cputlb.h"
e1b89321 54#include "exec/tb-hash.h"
5b6dd868 55#include "translate-all.h"
510a647f 56#include "qemu/bitmap.h"
0aa09897 57#include "qemu/timer.h"
508127e2 58#include "exec/log.h"
5b6dd868 59
955939a2
AB
60/* #define DEBUG_TB_INVALIDATE */
61/* #define DEBUG_TB_FLUSH */
301e40ed 62/* #define DEBUG_LOCKING */
5b6dd868 63/* make various TB consistency checks */
955939a2 64/* #define DEBUG_TB_CHECK */
5b6dd868
BS
65
66#if !defined(CONFIG_USER_ONLY)
67/* TB consistency checks only implemented for usermode emulation. */
68#undef DEBUG_TB_CHECK
69#endif
70
301e40ed
AB
71/* Access to the various translations structures need to be serialised via locks
72 * for consistency. This is automatic for SoftMMU based system
73 * emulation due to its single threaded nature. In user-mode emulation
74 * access to the memory related structures are protected with the
75 * mmap_lock.
76 */
77#ifdef DEBUG_LOCKING
78#define DEBUG_MEM_LOCKS 1
79#else
80#define DEBUG_MEM_LOCKS 0
81#endif
82
83#ifdef CONFIG_SOFTMMU
84#define assert_memory_lock() do { /* nothing */ } while (0)
85#else
86#define assert_memory_lock() do { \
87 if (DEBUG_MEM_LOCKS) { \
88 g_assert(have_mmap_lock()); \
89 } \
90 } while (0)
91#endif
92
5b6dd868
BS
93#define SMC_BITMAP_USE_THRESHOLD 10
94
5b6dd868
BS
95typedef struct PageDesc {
96 /* list of TBs intersecting this ram page */
97 TranslationBlock *first_tb;
6fad459c 98#ifdef CONFIG_SOFTMMU
5b6dd868
BS
99 /* in order to optimize self modifying code, we count the number
100 of lookups we do to a given page to use a bitmap */
101 unsigned int code_write_count;
510a647f 102 unsigned long *code_bitmap;
6fad459c 103#else
5b6dd868
BS
104 unsigned long flags;
105#endif
106} PageDesc;
107
108/* In system mode we want L1_MAP to be based on ram offsets,
109 while in user mode we want it to be based on virtual addresses. */
110#if !defined(CONFIG_USER_ONLY)
111#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
112# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
113#else
114# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
115#endif
116#else
117# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
118#endif
119
03f49957
PB
120/* Size of the L2 (and L3, etc) page tables. */
121#define V_L2_BITS 10
122#define V_L2_SIZE (1 << V_L2_BITS)
123
5b6dd868 124uintptr_t qemu_host_page_size;
0c2d70c4 125intptr_t qemu_host_page_mask;
5b6dd868 126
66ec9f49
VK
127/*
128 * L1 Mapping properties
129 */
130static int v_l1_size;
131static int v_l1_shift;
132static int v_l2_levels;
133
134/* The bottom level has pointers to PageDesc, and is indexed by
135 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
136 */
137#define V_L1_MIN_BITS 4
138#define V_L1_MAX_BITS (V_L2_BITS + 3)
139#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
140
141static void *l1_map[V_L1_MAX_SIZE];
5b6dd868 142
57fec1fe
FB
143/* code generation context */
144TCGContext tcg_ctx;
fdbc2b57 145bool parallel_cpus;
d19893da 146
677ef623
FK
147/* translation block context */
148#ifdef CONFIG_USER_ONLY
149__thread int have_tb_lock;
150#endif
151
66ec9f49
VK
152static void page_table_config_init(void)
153{
154 uint32_t v_l1_bits;
155
156 assert(TARGET_PAGE_BITS);
157 /* The bits remaining after N lower levels of page tables. */
158 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
159 if (v_l1_bits < V_L1_MIN_BITS) {
160 v_l1_bits += V_L2_BITS;
161 }
162
163 v_l1_size = 1 << v_l1_bits;
164 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
165 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
166
167 assert(v_l1_bits <= V_L1_MAX_BITS);
168 assert(v_l1_shift % V_L2_BITS == 0);
169 assert(v_l2_levels >= 0);
170}
171
677ef623
FK
172void tb_lock(void)
173{
174#ifdef CONFIG_USER_ONLY
175 assert(!have_tb_lock);
176 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
177 have_tb_lock++;
178#endif
179}
180
181void tb_unlock(void)
182{
183#ifdef CONFIG_USER_ONLY
184 assert(have_tb_lock);
185 have_tb_lock--;
186 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
187#endif
188}
189
190void tb_lock_reset(void)
191{
192#ifdef CONFIG_USER_ONLY
193 if (have_tb_lock) {
194 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
195 have_tb_lock = 0;
196 }
197#endif
198}
199
301e40ed
AB
200#ifdef DEBUG_LOCKING
201#define DEBUG_TB_LOCKS 1
202#else
203#define DEBUG_TB_LOCKS 0
204#endif
205
206#ifdef CONFIG_SOFTMMU
207#define assert_tb_lock() do { /* nothing */ } while (0)
208#else
209#define assert_tb_lock() do { \
210 if (DEBUG_TB_LOCKS) { \
211 g_assert(have_tb_lock); \
212 } \
213 } while (0)
214#endif
215
216
a8a826a3 217static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 218
57fec1fe
FB
219void cpu_gen_init(void)
220{
221 tcg_context_init(&tcg_ctx);
57fec1fe
FB
222}
223
fca8a500
RH
224/* Encode VAL as a signed leb128 sequence at P.
225 Return P incremented past the encoded value. */
226static uint8_t *encode_sleb128(uint8_t *p, target_long val)
227{
228 int more, byte;
229
230 do {
231 byte = val & 0x7f;
232 val >>= 7;
233 more = !((val == 0 && (byte & 0x40) == 0)
234 || (val == -1 && (byte & 0x40) != 0));
235 if (more) {
236 byte |= 0x80;
237 }
238 *p++ = byte;
239 } while (more);
240
241 return p;
242}
243
244/* Decode a signed leb128 sequence at *PP; increment *PP past the
245 decoded value. Return the decoded value. */
246static target_long decode_sleb128(uint8_t **pp)
247{
248 uint8_t *p = *pp;
249 target_long val = 0;
250 int byte, shift = 0;
251
252 do {
253 byte = *p++;
254 val |= (target_ulong)(byte & 0x7f) << shift;
255 shift += 7;
256 } while (byte & 0x80);
257 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
258 val |= -(target_ulong)1 << shift;
259 }
260
261 *pp = p;
262 return val;
263}
264
265/* Encode the data collected about the instructions while compiling TB.
266 Place the data at BLOCK, and return the number of bytes consumed.
267
268 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
269 which come from the target's insn_start data, followed by a uintptr_t
270 which comes from the host pc of the end of the code implementing the insn.
271
272 Each line of the table is encoded as sleb128 deltas from the previous
273 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
274 That is, the first column is seeded with the guest pc, the last column
275 with the host pc, and the middle columns with zeros. */
276
277static int encode_search(TranslationBlock *tb, uint8_t *block)
278{
b125f9dc 279 uint8_t *highwater = tcg_ctx.code_gen_highwater;
fca8a500
RH
280 uint8_t *p = block;
281 int i, j, n;
282
283 tb->tc_search = block;
284
285 for (i = 0, n = tb->icount; i < n; ++i) {
286 target_ulong prev;
287
288 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
289 if (i == 0) {
290 prev = (j == 0 ? tb->pc : 0);
291 } else {
292 prev = tcg_ctx.gen_insn_data[i - 1][j];
293 }
294 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
295 }
296 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
297 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
b125f9dc
RH
298
299 /* Test for (pending) buffer overflow. The assumption is that any
300 one row beginning below the high water mark cannot overrun
301 the buffer completely. Thus we can test for overflow after
302 encoding a row without having to check during encoding. */
303 if (unlikely(p > highwater)) {
304 return -1;
305 }
fca8a500
RH
306 }
307
308 return p - block;
309}
310
7d7500d9
PB
311/* The cpu state corresponding to 'searched_pc' is restored.
312 * Called with tb_lock held.
313 */
74f10515 314static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
a8a826a3 315 uintptr_t searched_pc)
d19893da 316{
fca8a500
RH
317 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
318 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
74f10515 319 CPUArchState *env = cpu->env_ptr;
fca8a500
RH
320 uint8_t *p = tb->tc_search;
321 int i, j, num_insns = tb->icount;
57fec1fe 322#ifdef CONFIG_PROFILER
fca8a500 323 int64_t ti = profile_getclock();
57fec1fe
FB
324#endif
325
01ecaf43
RH
326 searched_pc -= GETPC_ADJ;
327
fca8a500
RH
328 if (searched_pc < host_pc) {
329 return -1;
330 }
d19893da 331
fca8a500
RH
332 /* Reconstruct the stored insn data while looking for the point at
333 which the end of the insn exceeds the searched_pc. */
334 for (i = 0; i < num_insns; ++i) {
335 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
336 data[j] += decode_sleb128(&p);
337 }
338 host_pc += decode_sleb128(&p);
339 if (host_pc > searched_pc) {
340 goto found;
341 }
342 }
343 return -1;
3b46e624 344
fca8a500 345 found:
bd79255d 346 if (tb->cflags & CF_USE_ICOUNT) {
414b15c9 347 assert(use_icount);
2e70f6ef 348 /* Reset the cycle counter to the start of the block. */
fca8a500 349 cpu->icount_decr.u16.low += num_insns;
2e70f6ef 350 /* Clear the IO flag. */
99df7dce 351 cpu->can_do_io = 0;
2e70f6ef 352 }
fca8a500
RH
353 cpu->icount_decr.u16.low -= i;
354 restore_state_to_opc(env, tb, data);
57fec1fe
FB
355
356#ifdef CONFIG_PROFILER
fca8a500
RH
357 tcg_ctx.restore_time += profile_getclock() - ti;
358 tcg_ctx.restore_count++;
57fec1fe 359#endif
d19893da
FB
360 return 0;
361}
5b6dd868 362
3f38f309 363bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
a8a826a3
BS
364{
365 TranslationBlock *tb;
366
367 tb = tb_find_pc(retaddr);
368 if (tb) {
74f10515 369 cpu_restore_state_from_tb(cpu, tb, retaddr);
d8a499f1
PD
370 if (tb->cflags & CF_NOCACHE) {
371 /* one-shot translation, invalidate it immediately */
d8a499f1
PD
372 tb_phys_invalidate(tb, -1);
373 tb_free(tb);
374 }
a8a826a3
BS
375 return true;
376 }
377 return false;
378}
379
47c16ed5 380void page_size_init(void)
5b6dd868
BS
381{
382 /* NOTE: we can always suppose that qemu_host_page_size >=
383 TARGET_PAGE_SIZE */
5b6dd868 384 qemu_real_host_page_size = getpagesize();
0c2d70c4 385 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
5b6dd868
BS
386 if (qemu_host_page_size == 0) {
387 qemu_host_page_size = qemu_real_host_page_size;
388 }
389 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
390 qemu_host_page_size = TARGET_PAGE_SIZE;
391 }
0c2d70c4 392 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
47c16ed5 393}
5b6dd868 394
47c16ed5
AK
395static void page_init(void)
396{
397 page_size_init();
66ec9f49
VK
398 page_table_config_init();
399
5b6dd868
BS
400#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
401 {
402#ifdef HAVE_KINFO_GETVMMAP
403 struct kinfo_vmentry *freep;
404 int i, cnt;
405
406 freep = kinfo_getvmmap(getpid(), &cnt);
407 if (freep) {
408 mmap_lock();
409 for (i = 0; i < cnt; i++) {
410 unsigned long startaddr, endaddr;
411
412 startaddr = freep[i].kve_start;
413 endaddr = freep[i].kve_end;
414 if (h2g_valid(startaddr)) {
415 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
416
417 if (h2g_valid(endaddr)) {
418 endaddr = h2g(endaddr);
419 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
420 } else {
421#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
422 endaddr = ~0ul;
423 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
424#endif
425 }
426 }
427 }
428 free(freep);
429 mmap_unlock();
430 }
431#else
432 FILE *f;
433
434 last_brk = (unsigned long)sbrk(0);
435
436 f = fopen("/compat/linux/proc/self/maps", "r");
437 if (f) {
438 mmap_lock();
439
440 do {
441 unsigned long startaddr, endaddr;
442 int n;
443
444 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
445
446 if (n == 2 && h2g_valid(startaddr)) {
447 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
448
449 if (h2g_valid(endaddr)) {
450 endaddr = h2g(endaddr);
451 } else {
452 endaddr = ~0ul;
453 }
454 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
455 }
456 } while (!feof(f));
457
458 fclose(f);
459 mmap_unlock();
460 }
461#endif
462 }
463#endif
464}
465
75692087 466/* If alloc=1:
7d7500d9 467 * Called with tb_lock held for system emulation.
75692087
PB
468 * Called with mmap_lock held for user-mode emulation.
469 */
5b6dd868
BS
470static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
471{
472 PageDesc *pd;
473 void **lp;
474 int i;
475
e505a063
AB
476 if (alloc) {
477 assert_memory_lock();
478 }
479
5b6dd868 480 /* Level 1. Always allocated. */
66ec9f49 481 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
5b6dd868
BS
482
483 /* Level 2..N-1. */
66ec9f49 484 for (i = v_l2_levels; i > 0; i--) {
6940fab8 485 void **p = atomic_rcu_read(lp);
5b6dd868
BS
486
487 if (p == NULL) {
488 if (!alloc) {
489 return NULL;
490 }
e3a0abfd 491 p = g_new0(void *, V_L2_SIZE);
6940fab8 492 atomic_rcu_set(lp, p);
5b6dd868
BS
493 }
494
03f49957 495 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
496 }
497
6940fab8 498 pd = atomic_rcu_read(lp);
5b6dd868
BS
499 if (pd == NULL) {
500 if (!alloc) {
501 return NULL;
502 }
e3a0abfd 503 pd = g_new0(PageDesc, V_L2_SIZE);
6940fab8 504 atomic_rcu_set(lp, pd);
5b6dd868
BS
505 }
506
03f49957 507 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
508}
509
510static inline PageDesc *page_find(tb_page_addr_t index)
511{
512 return page_find_alloc(index, 0);
513}
514
5b6dd868
BS
515#if defined(CONFIG_USER_ONLY)
516/* Currently it is not recommended to allocate big chunks of data in
517 user mode. It will change when a dedicated libc will be used. */
518/* ??? 64-bit hosts ought to have no problem mmaping data outside the
519 region in which the guest needs to run. Revisit this. */
520#define USE_STATIC_CODE_GEN_BUFFER
521#endif
522
5b6dd868
BS
523/* Minimum size of the code gen buffer. This number is randomly chosen,
524 but not so small that we can't have a fair number of TB's live. */
525#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
526
527/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
528 indicated, this is constrained by the range of direct branches on the
529 host cpu, as used by the TCG implementation of goto_tb. */
530#if defined(__x86_64__)
531# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
532#elif defined(__sparc__)
533# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
5bfd75a3
RH
534#elif defined(__powerpc64__)
535# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
399f1648
SF
536#elif defined(__powerpc__)
537# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
4a136e0a
CF
538#elif defined(__aarch64__)
539# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
540#elif defined(__arm__)
541# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
542#elif defined(__s390x__)
543 /* We have a +- 4GB range on the branches; leave some slop. */
544# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
479eb121
RH
545#elif defined(__mips__)
546 /* We have a 256MB branch region, but leave room to make sure the
547 main executable is also within that region. */
548# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
549#else
550# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
551#endif
552
553#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
554
555#define DEFAULT_CODE_GEN_BUFFER_SIZE \
556 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
557 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
558
559static inline size_t size_code_gen_buffer(size_t tb_size)
560{
561 /* Size the buffer. */
562 if (tb_size == 0) {
563#ifdef USE_STATIC_CODE_GEN_BUFFER
564 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
565#else
566 /* ??? Needs adjustments. */
567 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
568 static buffer, we could size this on RESERVED_VA, on the text
569 segment size of the executable, or continue to use the default. */
570 tb_size = (unsigned long)(ram_size / 4);
571#endif
572 }
573 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
574 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
575 }
576 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
577 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
578 }
5b6dd868
BS
579 return tb_size;
580}
581
483c76e1
RH
582#ifdef __mips__
583/* In order to use J and JAL within the code_gen_buffer, we require
584 that the buffer not cross a 256MB boundary. */
585static inline bool cross_256mb(void *addr, size_t size)
586{
7ba6a512 587 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
483c76e1
RH
588}
589
590/* We weren't able to allocate a buffer without crossing that boundary,
591 so make do with the larger portion of the buffer that doesn't cross.
592 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
593static inline void *split_cross_256mb(void *buf1, size_t size1)
594{
7ba6a512 595 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
483c76e1
RH
596 size_t size2 = buf1 + size1 - buf2;
597
598 size1 = buf2 - buf1;
599 if (size1 < size2) {
600 size1 = size2;
601 buf1 = buf2;
602 }
603
604 tcg_ctx.code_gen_buffer_size = size1;
605 return buf1;
606}
607#endif
608
5b6dd868
BS
609#ifdef USE_STATIC_CODE_GEN_BUFFER
610static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
611 __attribute__((aligned(CODE_GEN_ALIGN)));
612
f293709c
RH
613# ifdef _WIN32
614static inline void do_protect(void *addr, long size, int prot)
615{
616 DWORD old_protect;
617 VirtualProtect(addr, size, prot, &old_protect);
618}
619
620static inline void map_exec(void *addr, long size)
621{
622 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
623}
624
625static inline void map_none(void *addr, long size)
626{
627 do_protect(addr, size, PAGE_NOACCESS);
628}
629# else
630static inline void do_protect(void *addr, long size, int prot)
631{
632 uintptr_t start, end;
633
634 start = (uintptr_t)addr;
635 start &= qemu_real_host_page_mask;
636
637 end = (uintptr_t)addr + size;
638 end = ROUND_UP(end, qemu_real_host_page_size);
639
640 mprotect((void *)start, end - start, prot);
641}
642
643static inline void map_exec(void *addr, long size)
644{
645 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
646}
647
648static inline void map_none(void *addr, long size)
649{
650 do_protect(addr, size, PROT_NONE);
651}
652# endif /* WIN32 */
653
5b6dd868
BS
654static inline void *alloc_code_gen_buffer(void)
655{
483c76e1 656 void *buf = static_code_gen_buffer;
f293709c
RH
657 size_t full_size, size;
658
659 /* The size of the buffer, rounded down to end on a page boundary. */
660 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
661 & qemu_real_host_page_mask) - (uintptr_t)buf;
662
663 /* Reserve a guard page. */
664 size = full_size - qemu_real_host_page_size;
665
666 /* Honor a command-line option limiting the size of the buffer. */
667 if (size > tcg_ctx.code_gen_buffer_size) {
668 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
669 & qemu_real_host_page_mask) - (uintptr_t)buf;
670 }
671 tcg_ctx.code_gen_buffer_size = size;
672
483c76e1 673#ifdef __mips__
f293709c
RH
674 if (cross_256mb(buf, size)) {
675 buf = split_cross_256mb(buf, size);
676 size = tcg_ctx.code_gen_buffer_size;
483c76e1
RH
677 }
678#endif
f293709c
RH
679
680 map_exec(buf, size);
681 map_none(buf + size, qemu_real_host_page_size);
682 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
683
483c76e1 684 return buf;
5b6dd868 685}
f293709c
RH
686#elif defined(_WIN32)
687static inline void *alloc_code_gen_buffer(void)
688{
689 size_t size = tcg_ctx.code_gen_buffer_size;
690 void *buf1, *buf2;
691
692 /* Perform the allocation in two steps, so that the guard page
693 is reserved but uncommitted. */
694 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
695 MEM_RESERVE, PAGE_NOACCESS);
696 if (buf1 != NULL) {
697 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
698 assert(buf1 == buf2);
699 }
700
701 return buf1;
702}
703#else
5b6dd868
BS
704static inline void *alloc_code_gen_buffer(void)
705{
706 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
707 uintptr_t start = 0;
f293709c 708 size_t size = tcg_ctx.code_gen_buffer_size;
5b6dd868
BS
709 void *buf;
710
711 /* Constrain the position of the buffer based on the host cpu.
712 Note that these addresses are chosen in concert with the
713 addresses assigned in the relevant linker script file. */
714# if defined(__PIE__) || defined(__PIC__)
715 /* Don't bother setting a preferred location if we're building
716 a position-independent executable. We're more likely to get
717 an address near the main executable if we let the kernel
718 choose the address. */
719# elif defined(__x86_64__) && defined(MAP_32BIT)
720 /* Force the memory down into low memory with the executable.
721 Leave the choice of exact location with the kernel. */
722 flags |= MAP_32BIT;
723 /* Cannot expect to map more than 800MB in low memory. */
f293709c
RH
724 if (size > 800u * 1024 * 1024) {
725 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
5b6dd868
BS
726 }
727# elif defined(__sparc__)
728 start = 0x40000000ul;
729# elif defined(__s390x__)
730 start = 0x90000000ul;
479eb121 731# elif defined(__mips__)
f293709c 732# if _MIPS_SIM == _ABI64
479eb121
RH
733 start = 0x128000000ul;
734# else
735 start = 0x08000000ul;
736# endif
5b6dd868
BS
737# endif
738
f293709c
RH
739 buf = mmap((void *)start, size + qemu_real_host_page_size,
740 PROT_NONE, flags, -1, 0);
483c76e1
RH
741 if (buf == MAP_FAILED) {
742 return NULL;
743 }
744
745#ifdef __mips__
f293709c 746 if (cross_256mb(buf, size)) {
5d831be2 747 /* Try again, with the original still mapped, to avoid re-acquiring
483c76e1 748 that 256mb crossing. This time don't specify an address. */
f293709c
RH
749 size_t size2;
750 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
751 PROT_NONE, flags, -1, 0);
752 switch (buf2 != MAP_FAILED) {
753 case 1:
754 if (!cross_256mb(buf2, size)) {
483c76e1 755 /* Success! Use the new buffer. */
8bdf4997 756 munmap(buf, size + qemu_real_host_page_size);
f293709c 757 break;
483c76e1
RH
758 }
759 /* Failure. Work with what we had. */
8bdf4997 760 munmap(buf2, size + qemu_real_host_page_size);
f293709c
RH
761 /* fallthru */
762 default:
763 /* Split the original buffer. Free the smaller half. */
764 buf2 = split_cross_256mb(buf, size);
765 size2 = tcg_ctx.code_gen_buffer_size;
766 if (buf == buf2) {
767 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
768 } else {
769 munmap(buf, size - size2);
770 }
771 size = size2;
772 break;
483c76e1 773 }
f293709c 774 buf = buf2;
483c76e1
RH
775 }
776#endif
777
f293709c
RH
778 /* Make the final buffer accessible. The guard page at the end
779 will remain inaccessible with PROT_NONE. */
780 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
483c76e1 781
f293709c
RH
782 /* Request large pages for the buffer. */
783 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
483c76e1 784
5b6dd868
BS
785 return buf;
786}
f293709c 787#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
5b6dd868
BS
788
789static inline void code_gen_alloc(size_t tb_size)
790{
0b0d3320
EV
791 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
792 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
793 if (tcg_ctx.code_gen_buffer == NULL) {
5b6dd868
BS
794 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
795 exit(1);
796 }
797
8163b749
RH
798 /* Estimate a good size for the number of TBs we can support. We
799 still haven't deducted the prologue from the buffer size here,
800 but that's minimal and won't affect the estimate much. */
801 tcg_ctx.code_gen_max_blocks
802 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
803 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
804
677ef623 805 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
5b6dd868
BS
806}
807
909eaac9
EC
808static void tb_htable_init(void)
809{
810 unsigned int mode = QHT_MODE_AUTO_RESIZE;
811
812 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
813}
814
5b6dd868
BS
815/* Must be called before using the QEMU cpus. 'tb_size' is the size
816 (in bytes) allocated to the translation buffer. Zero means default
817 size. */
818void tcg_exec_init(unsigned long tb_size)
819{
820 cpu_gen_init();
5b6dd868 821 page_init();
909eaac9 822 tb_htable_init();
f293709c 823 code_gen_alloc(tb_size);
4cbea598 824#if defined(CONFIG_SOFTMMU)
5b6dd868
BS
825 /* There's no guest base to take into account, so go ahead and
826 initialize the prologue now. */
827 tcg_prologue_init(&tcg_ctx);
828#endif
829}
830
831bool tcg_enabled(void)
832{
0b0d3320 833 return tcg_ctx.code_gen_buffer != NULL;
5b6dd868
BS
834}
835
7d7500d9
PB
836/*
837 * Allocate a new translation block. Flush the translation buffer if
838 * too many translation blocks or too much generated code.
839 *
840 * Called with tb_lock held.
841 */
5b6dd868
BS
842static TranslationBlock *tb_alloc(target_ulong pc)
843{
844 TranslationBlock *tb;
845
e505a063
AB
846 assert_tb_lock();
847
b125f9dc 848 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
5b6dd868
BS
849 return NULL;
850 }
5e5f07e0 851 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
5b6dd868
BS
852 tb->pc = pc;
853 tb->cflags = 0;
6d21e420 854 tb->invalid = false;
5b6dd868
BS
855 return tb;
856}
857
7d7500d9 858/* Called with tb_lock held. */
5b6dd868
BS
859void tb_free(TranslationBlock *tb)
860{
e505a063
AB
861 assert_tb_lock();
862
5b6dd868
BS
863 /* In practice this is mostly used for single use temporary TB
864 Ignore the hard cases and just back up if this TB happens to
865 be the last one generated. */
5e5f07e0
EV
866 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
867 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
0b0d3320 868 tcg_ctx.code_gen_ptr = tb->tc_ptr;
5e5f07e0 869 tcg_ctx.tb_ctx.nb_tbs--;
5b6dd868
BS
870 }
871}
872
873static inline void invalidate_page_bitmap(PageDesc *p)
874{
6fad459c 875#ifdef CONFIG_SOFTMMU
012aef07
MA
876 g_free(p->code_bitmap);
877 p->code_bitmap = NULL;
5b6dd868 878 p->code_write_count = 0;
6fad459c 879#endif
5b6dd868
BS
880}
881
882/* Set to NULL all the 'first_tb' fields in all PageDescs. */
883static void page_flush_tb_1(int level, void **lp)
884{
885 int i;
886
887 if (*lp == NULL) {
888 return;
889 }
890 if (level == 0) {
891 PageDesc *pd = *lp;
892
03f49957 893 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
894 pd[i].first_tb = NULL;
895 invalidate_page_bitmap(pd + i);
896 }
897 } else {
898 void **pp = *lp;
899
03f49957 900 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
901 page_flush_tb_1(level - 1, pp + i);
902 }
903 }
904}
905
906static void page_flush_tb(void)
907{
66ec9f49 908 int i, l1_sz = v_l1_size;
5b6dd868 909
66ec9f49
VK
910 for (i = 0; i < l1_sz; i++) {
911 page_flush_tb_1(v_l2_levels, l1_map + i);
5b6dd868
BS
912 }
913}
914
915/* flush all the translation blocks */
3359baad 916static void do_tb_flush(CPUState *cpu, void *data)
5b6dd868 917{
3359baad
SF
918 unsigned tb_flush_req = (unsigned) (uintptr_t) data;
919
920 tb_lock();
921
922 /* If it's already been done on request of another CPU,
923 * just retry.
924 */
925 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) {
926 goto done;
135a972b 927 }
3359baad 928
955939a2 929#if defined(DEBUG_TB_FLUSH)
5b6dd868 930 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
0b0d3320 931 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
5e5f07e0 932 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
0b0d3320 933 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
5e5f07e0 934 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 935#endif
0b0d3320
EV
936 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
937 > tcg_ctx.code_gen_buffer_size) {
a47dddd7 938 cpu_abort(cpu, "Internal error: code buffer overflow\n");
5b6dd868 939 }
5b6dd868 940
bdc44640 941 CPU_FOREACH(cpu) {
89a16b1e
SF
942 int i;
943
944 for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
945 atomic_set(&cpu->tb_jmp_cache[i], NULL);
946 }
5b6dd868
BS
947 }
948
118b0730 949 tcg_ctx.tb_ctx.nb_tbs = 0;
909eaac9 950 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
5b6dd868
BS
951 page_flush_tb();
952
0b0d3320 953 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
5b6dd868
BS
954 /* XXX: flush processor icache at this point if cache flush is
955 expensive */
3359baad
SF
956 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
957 tcg_ctx.tb_ctx.tb_flush_count + 1);
958
959done:
960 tb_unlock();
961}
962
963void tb_flush(CPUState *cpu)
964{
965 if (tcg_enabled()) {
966 uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
967 async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req);
968 }
5b6dd868
BS
969}
970
971#ifdef DEBUG_TB_CHECK
972
909eaac9
EC
973static void
974do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
5b6dd868 975{
909eaac9
EC
976 TranslationBlock *tb = p;
977 target_ulong addr = *(target_ulong *)userp;
978
979 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
980 printf("ERROR invalidate: address=" TARGET_FMT_lx
981 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
982 }
983}
5b6dd868 984
7d7500d9
PB
985/* verify that all the pages have correct rights for code
986 *
987 * Called with tb_lock held.
988 */
909eaac9
EC
989static void tb_invalidate_check(target_ulong address)
990{
5b6dd868 991 address &= TARGET_PAGE_MASK;
909eaac9
EC
992 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
993}
994
995static void
996do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
997{
998 TranslationBlock *tb = p;
999 int flags1, flags2;
1000
1001 flags1 = page_get_flags(tb->pc);
1002 flags2 = page_get_flags(tb->pc + tb->size - 1);
1003 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1004 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1005 (long)tb->pc, tb->size, flags1, flags2);
5b6dd868
BS
1006 }
1007}
1008
1009/* verify that all the pages have correct rights for code */
1010static void tb_page_check(void)
1011{
909eaac9 1012 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
5b6dd868
BS
1013}
1014
1015#endif
1016
5b6dd868
BS
1017static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
1018{
1019 TranslationBlock *tb1;
1020 unsigned int n1;
1021
1022 for (;;) {
1023 tb1 = *ptb;
1024 n1 = (uintptr_t)tb1 & 3;
1025 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1026 if (tb1 == tb) {
1027 *ptb = tb1->page_next[n1];
1028 break;
1029 }
1030 ptb = &tb1->page_next[n1];
1031 }
1032}
1033
13362678
SF
1034/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1035static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
5b6dd868 1036{
c37e6d7e
SF
1037 TranslationBlock *tb1;
1038 uintptr_t *ptb, ntb;
5b6dd868
BS
1039 unsigned int n1;
1040
f309101c 1041 ptb = &tb->jmp_list_next[n];
c37e6d7e 1042 if (*ptb) {
5b6dd868
BS
1043 /* find tb(n) in circular list */
1044 for (;;) {
c37e6d7e
SF
1045 ntb = *ptb;
1046 n1 = ntb & 3;
1047 tb1 = (TranslationBlock *)(ntb & ~3);
5b6dd868
BS
1048 if (n1 == n && tb1 == tb) {
1049 break;
1050 }
1051 if (n1 == 2) {
f309101c 1052 ptb = &tb1->jmp_list_first;
5b6dd868 1053 } else {
f309101c 1054 ptb = &tb1->jmp_list_next[n1];
5b6dd868
BS
1055 }
1056 }
1057 /* now we can suppress tb(n) from the list */
f309101c 1058 *ptb = tb->jmp_list_next[n];
5b6dd868 1059
c37e6d7e 1060 tb->jmp_list_next[n] = (uintptr_t)NULL;
5b6dd868
BS
1061 }
1062}
1063
1064/* reset the jump entry 'n' of a TB so that it is not chained to
1065 another TB */
1066static inline void tb_reset_jump(TranslationBlock *tb, int n)
1067{
f309101c
SF
1068 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1069 tb_set_jmp_target(tb, n, addr);
5b6dd868
BS
1070}
1071
89bba496
SF
1072/* remove any jumps to the TB */
1073static inline void tb_jmp_unlink(TranslationBlock *tb)
1074{
f9c5b66f
SF
1075 TranslationBlock *tb1;
1076 uintptr_t *ptb, ntb;
89bba496
SF
1077 unsigned int n1;
1078
f9c5b66f 1079 ptb = &tb->jmp_list_first;
89bba496 1080 for (;;) {
f9c5b66f
SF
1081 ntb = *ptb;
1082 n1 = ntb & 3;
1083 tb1 = (TranslationBlock *)(ntb & ~3);
89bba496
SF
1084 if (n1 == 2) {
1085 break;
1086 }
f9c5b66f
SF
1087 tb_reset_jump(tb1, n1);
1088 *ptb = tb1->jmp_list_next[n1];
1089 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
89bba496 1090 }
89bba496
SF
1091}
1092
7d7500d9
PB
1093/* invalidate one TB
1094 *
1095 * Called with tb_lock held.
1096 */
5b6dd868
BS
1097void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1098{
182735ef 1099 CPUState *cpu;
5b6dd868 1100 PageDesc *p;
42bd3228 1101 uint32_t h;
5b6dd868 1102 tb_page_addr_t phys_pc;
5b6dd868 1103
e505a063
AB
1104 assert_tb_lock();
1105
6d21e420
PB
1106 atomic_set(&tb->invalid, true);
1107
5b6dd868
BS
1108 /* remove the TB from the hash list */
1109 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
42bd3228 1110 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
909eaac9 1111 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
5b6dd868
BS
1112
1113 /* remove the TB from the page list */
1114 if (tb->page_addr[0] != page_addr) {
1115 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1116 tb_page_remove(&p->first_tb, tb);
1117 invalidate_page_bitmap(p);
1118 }
1119 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1120 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1121 tb_page_remove(&p->first_tb, tb);
1122 invalidate_page_bitmap(p);
1123 }
1124
5b6dd868
BS
1125 /* remove the TB from the hash list */
1126 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 1127 CPU_FOREACH(cpu) {
89a16b1e
SF
1128 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1129 atomic_set(&cpu->tb_jmp_cache[h], NULL);
5b6dd868
BS
1130 }
1131 }
1132
1133 /* suppress this TB from the two jump lists */
13362678
SF
1134 tb_remove_from_jmp_list(tb, 0);
1135 tb_remove_from_jmp_list(tb, 1);
5b6dd868
BS
1136
1137 /* suppress any remaining jumps to this TB */
89bba496 1138 tb_jmp_unlink(tb);
5b6dd868 1139
5e5f07e0 1140 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
1141}
1142
6fad459c 1143#ifdef CONFIG_SOFTMMU
5b6dd868
BS
1144static void build_page_bitmap(PageDesc *p)
1145{
1146 int n, tb_start, tb_end;
1147 TranslationBlock *tb;
1148
510a647f 1149 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
5b6dd868
BS
1150
1151 tb = p->first_tb;
1152 while (tb != NULL) {
1153 n = (uintptr_t)tb & 3;
1154 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1155 /* NOTE: this is subtle as a TB may span two physical pages */
1156 if (n == 0) {
1157 /* NOTE: tb_end may be after the end of the page, but
1158 it is not a problem */
1159 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1160 tb_end = tb_start + tb->size;
1161 if (tb_end > TARGET_PAGE_SIZE) {
1162 tb_end = TARGET_PAGE_SIZE;
e505a063 1163 }
5b6dd868
BS
1164 } else {
1165 tb_start = 0;
1166 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1167 }
510a647f 1168 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
5b6dd868
BS
1169 tb = tb->page_next[n];
1170 }
1171}
6fad459c 1172#endif
5b6dd868 1173
e90d96b1
SF
1174/* add the tb in the target page and protect it if necessary
1175 *
1176 * Called with mmap_lock held for user-mode emulation.
1177 */
1178static inline void tb_alloc_page(TranslationBlock *tb,
1179 unsigned int n, tb_page_addr_t page_addr)
1180{
1181 PageDesc *p;
1182#ifndef CONFIG_USER_ONLY
1183 bool page_already_protected;
1184#endif
1185
e505a063
AB
1186 assert_memory_lock();
1187
e90d96b1
SF
1188 tb->page_addr[n] = page_addr;
1189 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1190 tb->page_next[n] = p->first_tb;
1191#ifndef CONFIG_USER_ONLY
1192 page_already_protected = p->first_tb != NULL;
1193#endif
1194 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1195 invalidate_page_bitmap(p);
1196
1197#if defined(CONFIG_USER_ONLY)
1198 if (p->flags & PAGE_WRITE) {
1199 target_ulong addr;
1200 PageDesc *p2;
1201 int prot;
1202
1203 /* force the host page as non writable (writes will have a
1204 page fault + mprotect overhead) */
1205 page_addr &= qemu_host_page_mask;
1206 prot = 0;
1207 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1208 addr += TARGET_PAGE_SIZE) {
1209
1210 p2 = page_find(addr >> TARGET_PAGE_BITS);
1211 if (!p2) {
1212 continue;
1213 }
1214 prot |= p2->flags;
1215 p2->flags &= ~PAGE_WRITE;
1216 }
1217 mprotect(g2h(page_addr), qemu_host_page_size,
1218 (prot & PAGE_BITS) & ~PAGE_WRITE);
1219#ifdef DEBUG_TB_INVALIDATE
1220 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1221 page_addr);
1222#endif
1223 }
1224#else
1225 /* if some code is already present, then the pages are already
1226 protected. So we handle the case where only the first TB is
1227 allocated in a physical page */
1228 if (!page_already_protected) {
1229 tlb_protect_code(page_addr);
1230 }
1231#endif
1232}
1233
1234/* add a new TB and link it to the physical page tables. phys_page2 is
1235 * (-1) to indicate that only one page contains the TB.
1236 *
1237 * Called with mmap_lock held for user-mode emulation.
1238 */
1239static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1240 tb_page_addr_t phys_page2)
1241{
42bd3228 1242 uint32_t h;
e90d96b1 1243
e505a063
AB
1244 assert_memory_lock();
1245
e90d96b1
SF
1246 /* add in the page list */
1247 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1248 if (phys_page2 != -1) {
1249 tb_alloc_page(tb, 1, phys_page2);
1250 } else {
1251 tb->page_addr[1] = -1;
1252 }
1253
2e1ae44a
AB
1254 /* add in the hash table */
1255 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1256 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1257
e90d96b1
SF
1258#ifdef DEBUG_TB_CHECK
1259 tb_page_check();
1260#endif
1261}
1262
75692087 1263/* Called with mmap_lock held for user mode emulation. */
648f034c 1264TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868 1265 target_ulong pc, target_ulong cs_base,
89fee74a 1266 uint32_t flags, int cflags)
5b6dd868 1267{
648f034c 1268 CPUArchState *env = cpu->env_ptr;
5b6dd868 1269 TranslationBlock *tb;
5b6dd868
BS
1270 tb_page_addr_t phys_pc, phys_page2;
1271 target_ulong virt_page2;
fec88f64 1272 tcg_insn_unit *gen_code_buf;
fca8a500 1273 int gen_code_size, search_size;
fec88f64
RH
1274#ifdef CONFIG_PROFILER
1275 int64_t ti;
1276#endif
e505a063 1277 assert_memory_lock();
5b6dd868
BS
1278
1279 phys_pc = get_page_addr_code(env, pc);
56c0269a 1280 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
0266359e
PB
1281 cflags |= CF_USE_ICOUNT;
1282 }
b125f9dc 1283
5b6dd868 1284 tb = tb_alloc(pc);
b125f9dc
RH
1285 if (unlikely(!tb)) {
1286 buffer_overflow:
5b6dd868 1287 /* flush must be done */
bbd77c18 1288 tb_flush(cpu);
3359baad
SF
1289 mmap_unlock();
1290 cpu_loop_exit(cpu);
5b6dd868 1291 }
fec88f64
RH
1292
1293 gen_code_buf = tcg_ctx.code_gen_ptr;
1294 tb->tc_ptr = gen_code_buf;
5b6dd868
BS
1295 tb->cs_base = cs_base;
1296 tb->flags = flags;
1297 tb->cflags = cflags;
fec88f64
RH
1298
1299#ifdef CONFIG_PROFILER
1300 tcg_ctx.tb_count1++; /* includes aborted translations because of
1301 exceptions */
1302 ti = profile_getclock();
1303#endif
1304
1305 tcg_func_start(&tcg_ctx);
1306
7c255043 1307 tcg_ctx.cpu = ENV_GET_CPU(env);
fec88f64 1308 gen_intermediate_code(env, tb);
7c255043 1309 tcg_ctx.cpu = NULL;
fec88f64
RH
1310
1311 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1312
1313 /* generate machine code */
f309101c
SF
1314 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1315 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1316 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
fec88f64 1317#ifdef USE_DIRECT_JUMP
f309101c
SF
1318 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1319 tcg_ctx.tb_jmp_target_addr = NULL;
fec88f64 1320#else
f309101c
SF
1321 tcg_ctx.tb_jmp_insn_offset = NULL;
1322 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
fec88f64
RH
1323#endif
1324
1325#ifdef CONFIG_PROFILER
1326 tcg_ctx.tb_count++;
1327 tcg_ctx.interm_time += profile_getclock() - ti;
1328 tcg_ctx.code_time -= profile_getclock();
1329#endif
1330
b125f9dc
RH
1331 /* ??? Overflow could be handled better here. In particular, we
1332 don't need to re-do gen_intermediate_code, nor should we re-do
1333 the tcg optimization currently hidden inside tcg_gen_code. All
1334 that should be required is to flush the TBs, allocate a new TB,
1335 re-initialize it per above, and re-do the actual code generation. */
5bd2ec3d 1336 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
b125f9dc
RH
1337 if (unlikely(gen_code_size < 0)) {
1338 goto buffer_overflow;
1339 }
fca8a500 1340 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
b125f9dc
RH
1341 if (unlikely(search_size < 0)) {
1342 goto buffer_overflow;
1343 }
fec88f64
RH
1344
1345#ifdef CONFIG_PROFILER
1346 tcg_ctx.code_time += profile_getclock();
1347 tcg_ctx.code_in_len += tb->size;
1348 tcg_ctx.code_out_len += gen_code_size;
fca8a500 1349 tcg_ctx.search_out_len += search_size;
fec88f64
RH
1350#endif
1351
1352#ifdef DEBUG_DISAS
d977e1c2
AB
1353 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1354 qemu_log_in_addr_range(tb->pc)) {
fec88f64
RH
1355 qemu_log("OUT: [size=%d]\n", gen_code_size);
1356 log_disas(tb->tc_ptr, gen_code_size);
1357 qemu_log("\n");
1358 qemu_log_flush();
1359 }
1360#endif
1361
fca8a500
RH
1362 tcg_ctx.code_gen_ptr = (void *)
1363 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1364 CODE_GEN_ALIGN);
5b6dd868 1365
901bc3de
SF
1366 /* init jump list */
1367 assert(((uintptr_t)tb & 3) == 0);
1368 tb->jmp_list_first = (uintptr_t)tb | 2;
1369 tb->jmp_list_next[0] = (uintptr_t)NULL;
1370 tb->jmp_list_next[1] = (uintptr_t)NULL;
1371
1372 /* init original jump addresses wich has been set during tcg_gen_code() */
1373 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1374 tb_reset_jump(tb, 0);
1375 }
1376 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1377 tb_reset_jump(tb, 1);
1378 }
1379
5b6dd868
BS
1380 /* check next page if needed */
1381 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1382 phys_page2 = -1;
1383 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1384 phys_page2 = get_page_addr_code(env, virt_page2);
1385 }
901bc3de
SF
1386 /* As long as consistency of the TB stuff is provided by tb_lock in user
1387 * mode and is implicit in single-threaded softmmu emulation, no explicit
1388 * memory barrier is required before tb_link_page() makes the TB visible
1389 * through the physical hash table and physical page list.
1390 */
5b6dd868
BS
1391 tb_link_page(tb, phys_pc, phys_page2);
1392 return tb;
1393}
1394
1395/*
1396 * Invalidate all TBs which intersect with the target physical address range
1397 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1398 * 'is_cpu_write_access' should be true if called from a real cpu write
1399 * access: the virtual CPU will exit the current TB if code is modified inside
1400 * this TB.
75692087
PB
1401 *
1402 * Called with mmap_lock held for user-mode emulation
5b6dd868 1403 */
35865339 1404void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
5b6dd868 1405{
e505a063
AB
1406 assert_memory_lock();
1407
5b6dd868 1408 while (start < end) {
35865339 1409 tb_invalidate_phys_page_range(start, end, 0);
5b6dd868
BS
1410 start &= TARGET_PAGE_MASK;
1411 start += TARGET_PAGE_SIZE;
1412 }
1413}
1414
1415/*
1416 * Invalidate all TBs which intersect with the target physical address range
1417 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1418 * 'is_cpu_write_access' should be true if called from a real cpu write
1419 * access: the virtual CPU will exit the current TB if code is modified inside
1420 * this TB.
75692087
PB
1421 *
1422 * Called with mmap_lock held for user-mode emulation
5b6dd868
BS
1423 */
1424void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1425 int is_cpu_write_access)
1426{
3213525f 1427 TranslationBlock *tb, *tb_next;
baea4fae 1428#if defined(TARGET_HAS_PRECISE_SMC)
3213525f 1429 CPUState *cpu = current_cpu;
4917cf44
AF
1430 CPUArchState *env = NULL;
1431#endif
5b6dd868
BS
1432 tb_page_addr_t tb_start, tb_end;
1433 PageDesc *p;
1434 int n;
1435#ifdef TARGET_HAS_PRECISE_SMC
1436 int current_tb_not_found = is_cpu_write_access;
1437 TranslationBlock *current_tb = NULL;
1438 int current_tb_modified = 0;
1439 target_ulong current_pc = 0;
1440 target_ulong current_cs_base = 0;
89fee74a 1441 uint32_t current_flags = 0;
5b6dd868
BS
1442#endif /* TARGET_HAS_PRECISE_SMC */
1443
e505a063
AB
1444 assert_memory_lock();
1445
5b6dd868
BS
1446 p = page_find(start >> TARGET_PAGE_BITS);
1447 if (!p) {
1448 return;
1449 }
baea4fae 1450#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1451 if (cpu != NULL) {
1452 env = cpu->env_ptr;
d77953b9 1453 }
4917cf44 1454#endif
5b6dd868
BS
1455
1456 /* we remove all the TBs in the range [start, end[ */
1457 /* XXX: see if in some cases it could be faster to invalidate all
1458 the code */
1459 tb = p->first_tb;
1460 while (tb != NULL) {
1461 n = (uintptr_t)tb & 3;
1462 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1463 tb_next = tb->page_next[n];
1464 /* NOTE: this is subtle as a TB may span two physical pages */
1465 if (n == 0) {
1466 /* NOTE: tb_end may be after the end of the page, but
1467 it is not a problem */
1468 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1469 tb_end = tb_start + tb->size;
1470 } else {
1471 tb_start = tb->page_addr[1];
1472 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1473 }
1474 if (!(tb_end <= start || tb_start >= end)) {
1475#ifdef TARGET_HAS_PRECISE_SMC
1476 if (current_tb_not_found) {
1477 current_tb_not_found = 0;
1478 current_tb = NULL;
93afeade 1479 if (cpu->mem_io_pc) {
5b6dd868 1480 /* now we have a real cpu fault */
93afeade 1481 current_tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868
BS
1482 }
1483 }
1484 if (current_tb == tb &&
1485 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1486 /* If we are modifying the current TB, we must stop
1487 its execution. We could be more precise by checking
1488 that the modification is after the current PC, but it
1489 would require a specialized function to partially
1490 restore the CPU state */
1491
1492 current_tb_modified = 1;
74f10515 1493 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
5b6dd868
BS
1494 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1495 &current_flags);
1496 }
1497#endif /* TARGET_HAS_PRECISE_SMC */
5b6dd868 1498 tb_phys_invalidate(tb, -1);
5b6dd868
BS
1499 }
1500 tb = tb_next;
1501 }
1502#if !defined(CONFIG_USER_ONLY)
1503 /* if no code remaining, no need to continue to use slow writes */
1504 if (!p->first_tb) {
1505 invalidate_page_bitmap(p);
fc377bcf 1506 tlb_unprotect_code(start);
5b6dd868
BS
1507 }
1508#endif
1509#ifdef TARGET_HAS_PRECISE_SMC
1510 if (current_tb_modified) {
1511 /* we generate a block containing just the instruction
1512 modifying the memory. It will ensure that it cannot modify
1513 itself */
648f034c 1514 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
6886b980 1515 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1516 }
1517#endif
1518}
1519
6fad459c 1520#ifdef CONFIG_SOFTMMU
5b6dd868
BS
1521/* len must be <= 8 and start must be a multiple of len */
1522void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1523{
1524 PageDesc *p;
5b6dd868
BS
1525
1526#if 0
1527 if (1) {
1528 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1529 cpu_single_env->mem_io_vaddr, len,
1530 cpu_single_env->eip,
1531 cpu_single_env->eip +
1532 (intptr_t)cpu_single_env->segs[R_CS].base);
1533 }
1534#endif
1535 p = page_find(start >> TARGET_PAGE_BITS);
1536 if (!p) {
1537 return;
1538 }
fc377bcf
PB
1539 if (!p->code_bitmap &&
1540 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
7d7500d9
PB
1541 /* build code bitmap. FIXME: writes should be protected by
1542 * tb_lock, reads by tb_lock or RCU.
1543 */
fc377bcf
PB
1544 build_page_bitmap(p);
1545 }
5b6dd868 1546 if (p->code_bitmap) {
510a647f
EC
1547 unsigned int nr;
1548 unsigned long b;
1549
1550 nr = start & ~TARGET_PAGE_MASK;
1551 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
5b6dd868
BS
1552 if (b & ((1 << len) - 1)) {
1553 goto do_invalidate;
1554 }
1555 } else {
1556 do_invalidate:
1557 tb_invalidate_phys_page_range(start, start + len, 1);
1558 }
1559}
6fad459c 1560#else
75809229
PM
1561/* Called with mmap_lock held. If pc is not 0 then it indicates the
1562 * host PC of the faulting store instruction that caused this invalidate.
1563 * Returns true if the caller needs to abort execution of the current
1564 * TB (because it was modified by this store and the guest CPU has
1565 * precise-SMC semantics).
1566 */
1567static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
5b6dd868
BS
1568{
1569 TranslationBlock *tb;
1570 PageDesc *p;
1571 int n;
1572#ifdef TARGET_HAS_PRECISE_SMC
1573 TranslationBlock *current_tb = NULL;
4917cf44
AF
1574 CPUState *cpu = current_cpu;
1575 CPUArchState *env = NULL;
5b6dd868
BS
1576 int current_tb_modified = 0;
1577 target_ulong current_pc = 0;
1578 target_ulong current_cs_base = 0;
89fee74a 1579 uint32_t current_flags = 0;
5b6dd868
BS
1580#endif
1581
1582 addr &= TARGET_PAGE_MASK;
1583 p = page_find(addr >> TARGET_PAGE_BITS);
1584 if (!p) {
75809229 1585 return false;
5b6dd868
BS
1586 }
1587 tb = p->first_tb;
1588#ifdef TARGET_HAS_PRECISE_SMC
1589 if (tb && pc != 0) {
1590 current_tb = tb_find_pc(pc);
1591 }
4917cf44
AF
1592 if (cpu != NULL) {
1593 env = cpu->env_ptr;
d77953b9 1594 }
5b6dd868
BS
1595#endif
1596 while (tb != NULL) {
1597 n = (uintptr_t)tb & 3;
1598 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1599#ifdef TARGET_HAS_PRECISE_SMC
1600 if (current_tb == tb &&
1601 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1602 /* If we are modifying the current TB, we must stop
1603 its execution. We could be more precise by checking
1604 that the modification is after the current PC, but it
1605 would require a specialized function to partially
1606 restore the CPU state */
1607
1608 current_tb_modified = 1;
74f10515 1609 cpu_restore_state_from_tb(cpu, current_tb, pc);
5b6dd868
BS
1610 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1611 &current_flags);
1612 }
1613#endif /* TARGET_HAS_PRECISE_SMC */
1614 tb_phys_invalidate(tb, addr);
1615 tb = tb->page_next[n];
1616 }
1617 p->first_tb = NULL;
1618#ifdef TARGET_HAS_PRECISE_SMC
1619 if (current_tb_modified) {
1620 /* we generate a block containing just the instruction
1621 modifying the memory. It will ensure that it cannot modify
1622 itself */
648f034c 1623 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
75809229 1624 return true;
5b6dd868
BS
1625 }
1626#endif
75809229 1627 return false;
5b6dd868
BS
1628}
1629#endif
1630
5b6dd868
BS
1631/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1632 tb[1].tc_ptr. Return NULL if not found */
a8a826a3 1633static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868
BS
1634{
1635 int m_min, m_max, m;
1636 uintptr_t v;
1637 TranslationBlock *tb;
1638
5e5f07e0 1639 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
5b6dd868
BS
1640 return NULL;
1641 }
0b0d3320
EV
1642 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1643 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
5b6dd868
BS
1644 return NULL;
1645 }
1646 /* binary search (cf Knuth) */
1647 m_min = 0;
5e5f07e0 1648 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
5b6dd868
BS
1649 while (m_min <= m_max) {
1650 m = (m_min + m_max) >> 1;
5e5f07e0 1651 tb = &tcg_ctx.tb_ctx.tbs[m];
5b6dd868
BS
1652 v = (uintptr_t)tb->tc_ptr;
1653 if (v == tc_ptr) {
1654 return tb;
1655 } else if (tc_ptr < v) {
1656 m_max = m - 1;
1657 } else {
1658 m_min = m + 1;
1659 }
1660 }
5e5f07e0 1661 return &tcg_ctx.tb_ctx.tbs[m_max];
5b6dd868
BS
1662}
1663
ec53b45b 1664#if !defined(CONFIG_USER_ONLY)
29d8ec7b 1665void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
5b6dd868
BS
1666{
1667 ram_addr_t ram_addr;
5c8a00ce 1668 MemoryRegion *mr;
149f54b5 1669 hwaddr l = 1;
5b6dd868 1670
41063e1e 1671 rcu_read_lock();
29d8ec7b 1672 mr = address_space_translate(as, addr, &addr, &l, false);
5c8a00ce
PB
1673 if (!(memory_region_is_ram(mr)
1674 || memory_region_is_romd(mr))) {
41063e1e 1675 rcu_read_unlock();
5b6dd868
BS
1676 return;
1677 }
e4e69794 1678 ram_addr = memory_region_get_ram_addr(mr) + addr;
5b6dd868 1679 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
41063e1e 1680 rcu_read_unlock();
5b6dd868 1681}
ec53b45b 1682#endif /* !defined(CONFIG_USER_ONLY) */
5b6dd868 1683
7d7500d9 1684/* Called with tb_lock held. */
239c51a5 1685void tb_check_watchpoint(CPUState *cpu)
5b6dd868
BS
1686{
1687 TranslationBlock *tb;
1688
93afeade 1689 tb = tb_find_pc(cpu->mem_io_pc);
8d302e76
AJ
1690 if (tb) {
1691 /* We can use retranslation to find the PC. */
1692 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1693 tb_phys_invalidate(tb, -1);
1694 } else {
1695 /* The exception probably happened in a helper. The CPU state should
1696 have been saved before calling it. Fetch the PC from there. */
1697 CPUArchState *env = cpu->env_ptr;
1698 target_ulong pc, cs_base;
1699 tb_page_addr_t addr;
89fee74a 1700 uint32_t flags;
8d302e76
AJ
1701
1702 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1703 addr = get_page_addr_code(env, pc);
1704 tb_invalidate_phys_range(addr, addr + 1);
5b6dd868 1705 }
5b6dd868
BS
1706}
1707
1708#ifndef CONFIG_USER_ONLY
5b6dd868
BS
1709/* in deterministic execution mode, instructions doing device I/Os
1710 must be at the end of the TB */
90b40a69 1711void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 1712{
a47dddd7 1713#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 1714 CPUArchState *env = cpu->env_ptr;
a47dddd7 1715#endif
5b6dd868
BS
1716 TranslationBlock *tb;
1717 uint32_t n, cflags;
1718 target_ulong pc, cs_base;
89fee74a 1719 uint32_t flags;
5b6dd868
BS
1720
1721 tb = tb_find_pc(retaddr);
1722 if (!tb) {
a47dddd7 1723 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
1724 (void *)retaddr);
1725 }
28ecfd7a 1726 n = cpu->icount_decr.u16.low + tb->icount;
74f10515 1727 cpu_restore_state_from_tb(cpu, tb, retaddr);
5b6dd868
BS
1728 /* Calculate how many instructions had been executed before the fault
1729 occurred. */
28ecfd7a 1730 n = n - cpu->icount_decr.u16.low;
5b6dd868
BS
1731 /* Generate a new TB ending on the I/O insn. */
1732 n++;
1733 /* On MIPS and SH, delay slot instructions can only be restarted if
1734 they were already the first instruction in the TB. If this is not
1735 the first instruction in a TB then re-execute the preceding
1736 branch. */
1737#if defined(TARGET_MIPS)
1738 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
c3577479 1739 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
28ecfd7a 1740 cpu->icount_decr.u16.low++;
5b6dd868
BS
1741 env->hflags &= ~MIPS_HFLAG_BMASK;
1742 }
1743#elif defined(TARGET_SH4)
1744 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1745 && n > 1) {
1746 env->pc -= 2;
28ecfd7a 1747 cpu->icount_decr.u16.low++;
5b6dd868
BS
1748 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1749 }
1750#endif
1751 /* This should never happen. */
1752 if (n > CF_COUNT_MASK) {
a47dddd7 1753 cpu_abort(cpu, "TB too big during recompile");
5b6dd868
BS
1754 }
1755
1756 cflags = n | CF_LAST_IO;
1757 pc = tb->pc;
1758 cs_base = tb->cs_base;
1759 flags = tb->flags;
1760 tb_phys_invalidate(tb, -1);
02d57ea1
SF
1761 if (tb->cflags & CF_NOCACHE) {
1762 if (tb->orig_tb) {
1763 /* Invalidate original TB if this TB was generated in
1764 * cpu_exec_nocache() */
1765 tb_phys_invalidate(tb->orig_tb, -1);
1766 }
1767 tb_free(tb);
1768 }
5b6dd868
BS
1769 /* FIXME: In theory this could raise an exception. In practice
1770 we have already translated the block once so it's probably ok. */
648f034c 1771 tb_gen_code(cpu, pc, cs_base, flags, cflags);
5b6dd868
BS
1772 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1773 the first in the TB) then we end up generating a whole new TB and
1774 repeating the fault, which is horribly inefficient.
1775 Better would be to execute just this insn uncached, or generate a
1776 second new TB. */
6886b980 1777 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1778}
1779
611d4f99 1780void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
5b6dd868
BS
1781{
1782 unsigned int i;
1783
1784 /* Discard jump cache entries for any tb which might potentially
1785 overlap the flushed page. */
1786 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
8cd70437 1787 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1788 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1789
1790 i = tb_jmp_cache_hash_page(addr);
8cd70437 1791 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1792 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1793}
1794
7266ae91
EC
1795static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1796 struct qht_stats hst)
1797{
1798 uint32_t hgram_opts;
1799 size_t hgram_bins;
1800 char *hgram;
1801
1802 if (!hst.head_buckets) {
1803 return;
1804 }
1805 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1806 hst.used_head_buckets, hst.head_buckets,
1807 (double)hst.used_head_buckets / hst.head_buckets * 100);
1808
1809 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1810 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1811 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1812 hgram_opts |= QDIST_PR_NODECIMAL;
1813 }
1814 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1815 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1816 qdist_avg(&hst.occupancy) * 100, hgram);
1817 g_free(hgram);
1818
1819 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1820 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1821 if (hgram_bins > 10) {
1822 hgram_bins = 10;
1823 } else {
1824 hgram_bins = 0;
1825 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1826 }
1827 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1828 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1829 qdist_avg(&hst.chain), hgram);
1830 g_free(hgram);
1831}
1832
5b6dd868
BS
1833void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1834{
1835 int i, target_code_size, max_target_code_size;
1836 int direct_jmp_count, direct_jmp2_count, cross_page;
1837 TranslationBlock *tb;
329844d4 1838 struct qht_stats hst;
5b6dd868
BS
1839
1840 target_code_size = 0;
1841 max_target_code_size = 0;
1842 cross_page = 0;
1843 direct_jmp_count = 0;
1844 direct_jmp2_count = 0;
5e5f07e0
EV
1845 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1846 tb = &tcg_ctx.tb_ctx.tbs[i];
5b6dd868
BS
1847 target_code_size += tb->size;
1848 if (tb->size > max_target_code_size) {
1849 max_target_code_size = tb->size;
1850 }
1851 if (tb->page_addr[1] != -1) {
1852 cross_page++;
1853 }
f309101c 1854 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
5b6dd868 1855 direct_jmp_count++;
f309101c 1856 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
5b6dd868
BS
1857 direct_jmp2_count++;
1858 }
1859 }
1860 }
1861 /* XXX: avoid using doubles ? */
1862 cpu_fprintf(f, "Translation buffer state:\n");
1863 cpu_fprintf(f, "gen code size %td/%zd\n",
0b0d3320 1864 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
b125f9dc 1865 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
5b6dd868 1866 cpu_fprintf(f, "TB count %d/%d\n",
5e5f07e0 1867 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
5b6dd868 1868 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
5e5f07e0
EV
1869 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1870 tcg_ctx.tb_ctx.nb_tbs : 0,
1871 max_target_code_size);
5b6dd868 1872 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
5e5f07e0
EV
1873 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1874 tcg_ctx.code_gen_buffer) /
1875 tcg_ctx.tb_ctx.nb_tbs : 0,
1876 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1877 tcg_ctx.code_gen_buffer) /
1878 target_code_size : 0);
1879 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1880 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1881 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868
BS
1882 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1883 direct_jmp_count,
5e5f07e0
EV
1884 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1885 tcg_ctx.tb_ctx.nb_tbs : 0,
5b6dd868 1886 direct_jmp2_count,
5e5f07e0
EV
1887 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1888 tcg_ctx.tb_ctx.nb_tbs : 0);
329844d4
EC
1889
1890 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
7266ae91 1891 print_qht_statistics(f, cpu_fprintf, hst);
329844d4
EC
1892 qht_statistics_destroy(&hst);
1893
5b6dd868 1894 cpu_fprintf(f, "\nStatistics:\n");
3359baad
SF
1895 cpu_fprintf(f, "TB flush count %u\n",
1896 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
5e5f07e0
EV
1897 cpu_fprintf(f, "TB invalidate count %d\n",
1898 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
5b6dd868
BS
1899 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1900 tcg_dump_info(f, cpu_fprintf);
1901}
1902
246ae24d
MF
1903void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1904{
1905 tcg_dump_op_count(f, cpu_fprintf);
1906}
1907
5b6dd868
BS
1908#else /* CONFIG_USER_ONLY */
1909
c3affe56 1910void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1911{
259186a7 1912 cpu->interrupt_request |= mask;
378df4b2 1913 cpu->tcg_exit_req = 1;
5b6dd868
BS
1914}
1915
1916/*
1917 * Walks guest process memory "regions" one by one
1918 * and calls callback function 'fn' for each region.
1919 */
1920struct walk_memory_regions_data {
1921 walk_memory_regions_fn fn;
1922 void *priv;
1a1c4db9 1923 target_ulong start;
5b6dd868
BS
1924 int prot;
1925};
1926
1927static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1a1c4db9 1928 target_ulong end, int new_prot)
5b6dd868 1929{
1a1c4db9 1930 if (data->start != -1u) {
5b6dd868
BS
1931 int rc = data->fn(data->priv, data->start, end, data->prot);
1932 if (rc != 0) {
1933 return rc;
1934 }
1935 }
1936
1a1c4db9 1937 data->start = (new_prot ? end : -1u);
5b6dd868
BS
1938 data->prot = new_prot;
1939
1940 return 0;
1941}
1942
1943static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1a1c4db9 1944 target_ulong base, int level, void **lp)
5b6dd868 1945{
1a1c4db9 1946 target_ulong pa;
5b6dd868
BS
1947 int i, rc;
1948
1949 if (*lp == NULL) {
1950 return walk_memory_regions_end(data, base, 0);
1951 }
1952
1953 if (level == 0) {
1954 PageDesc *pd = *lp;
1955
03f49957 1956 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
1957 int prot = pd[i].flags;
1958
1959 pa = base | (i << TARGET_PAGE_BITS);
1960 if (prot != data->prot) {
1961 rc = walk_memory_regions_end(data, pa, prot);
1962 if (rc != 0) {
1963 return rc;
1964 }
1965 }
1966 }
1967 } else {
1968 void **pp = *lp;
1969
03f49957 1970 for (i = 0; i < V_L2_SIZE; ++i) {
1a1c4db9 1971 pa = base | ((target_ulong)i <<
03f49957 1972 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
1973 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1974 if (rc != 0) {
1975 return rc;
1976 }
1977 }
1978 }
1979
1980 return 0;
1981}
1982
1983int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1984{
1985 struct walk_memory_regions_data data;
66ec9f49 1986 uintptr_t i, l1_sz = v_l1_size;
5b6dd868
BS
1987
1988 data.fn = fn;
1989 data.priv = priv;
1a1c4db9 1990 data.start = -1u;
5b6dd868
BS
1991 data.prot = 0;
1992
66ec9f49
VK
1993 for (i = 0; i < l1_sz; i++) {
1994 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
1995 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
5b6dd868
BS
1996 if (rc != 0) {
1997 return rc;
1998 }
1999 }
2000
2001 return walk_memory_regions_end(&data, 0, 0);
2002}
2003
1a1c4db9
MI
2004static int dump_region(void *priv, target_ulong start,
2005 target_ulong end, unsigned long prot)
5b6dd868
BS
2006{
2007 FILE *f = (FILE *)priv;
2008
1a1c4db9
MI
2009 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2010 " "TARGET_FMT_lx" %c%c%c\n",
5b6dd868
BS
2011 start, end, end - start,
2012 ((prot & PAGE_READ) ? 'r' : '-'),
2013 ((prot & PAGE_WRITE) ? 'w' : '-'),
2014 ((prot & PAGE_EXEC) ? 'x' : '-'));
2015
2016 return 0;
2017}
2018
2019/* dump memory mappings */
2020void page_dump(FILE *f)
2021{
1a1c4db9 2022 const int length = sizeof(target_ulong) * 2;
227b8175
SW
2023 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2024 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
2025 walk_memory_regions(f, dump_region);
2026}
2027
2028int page_get_flags(target_ulong address)
2029{
2030 PageDesc *p;
2031
2032 p = page_find(address >> TARGET_PAGE_BITS);
2033 if (!p) {
2034 return 0;
2035 }
2036 return p->flags;
2037}
2038
2039/* Modify the flags of a page and invalidate the code if necessary.
2040 The flag PAGE_WRITE_ORG is positioned automatically depending
2041 on PAGE_WRITE. The mmap_lock should already be held. */
2042void page_set_flags(target_ulong start, target_ulong end, int flags)
2043{
2044 target_ulong addr, len;
2045
2046 /* This function should never be called with addresses outside the
2047 guest address space. If this assert fires, it probably indicates
2048 a missing call to h2g_valid. */
2049#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 2050 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
2051#endif
2052 assert(start < end);
e505a063 2053 assert_memory_lock();
5b6dd868
BS
2054
2055 start = start & TARGET_PAGE_MASK;
2056 end = TARGET_PAGE_ALIGN(end);
2057
2058 if (flags & PAGE_WRITE) {
2059 flags |= PAGE_WRITE_ORG;
2060 }
2061
2062 for (addr = start, len = end - start;
2063 len != 0;
2064 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2065 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2066
2067 /* If the write protection bit is set, then we invalidate
2068 the code inside. */
2069 if (!(p->flags & PAGE_WRITE) &&
2070 (flags & PAGE_WRITE) &&
2071 p->first_tb) {
75809229 2072 tb_invalidate_phys_page(addr, 0);
5b6dd868
BS
2073 }
2074 p->flags = flags;
2075 }
2076}
2077
2078int page_check_range(target_ulong start, target_ulong len, int flags)
2079{
2080 PageDesc *p;
2081 target_ulong end;
2082 target_ulong addr;
2083
2084 /* This function should never be called with addresses outside the
2085 guest address space. If this assert fires, it probably indicates
2086 a missing call to h2g_valid. */
2087#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 2088 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
2089#endif
2090
2091 if (len == 0) {
2092 return 0;
2093 }
2094 if (start + len - 1 < start) {
2095 /* We've wrapped around. */
2096 return -1;
2097 }
2098
2099 /* must do before we loose bits in the next step */
2100 end = TARGET_PAGE_ALIGN(start + len);
2101 start = start & TARGET_PAGE_MASK;
2102
2103 for (addr = start, len = end - start;
2104 len != 0;
2105 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2106 p = page_find(addr >> TARGET_PAGE_BITS);
2107 if (!p) {
2108 return -1;
2109 }
2110 if (!(p->flags & PAGE_VALID)) {
2111 return -1;
2112 }
2113
2114 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2115 return -1;
2116 }
2117 if (flags & PAGE_WRITE) {
2118 if (!(p->flags & PAGE_WRITE_ORG)) {
2119 return -1;
2120 }
2121 /* unprotect the page if it was put read-only because it
2122 contains translated code */
2123 if (!(p->flags & PAGE_WRITE)) {
f213e72f 2124 if (!page_unprotect(addr, 0)) {
5b6dd868
BS
2125 return -1;
2126 }
2127 }
5b6dd868
BS
2128 }
2129 }
2130 return 0;
2131}
2132
2133/* called from signal handler: invalidate the code and unprotect the
f213e72f
PM
2134 * page. Return 0 if the fault was not handled, 1 if it was handled,
2135 * and 2 if it was handled but the caller must cause the TB to be
2136 * immediately exited. (We can only return 2 if the 'pc' argument is
2137 * non-zero.)
2138 */
2139int page_unprotect(target_ulong address, uintptr_t pc)
5b6dd868
BS
2140{
2141 unsigned int prot;
7399a337 2142 bool current_tb_invalidated;
5b6dd868
BS
2143 PageDesc *p;
2144 target_ulong host_start, host_end, addr;
2145
2146 /* Technically this isn't safe inside a signal handler. However we
2147 know this only ever happens in a synchronous SEGV handler, so in
2148 practice it seems to be ok. */
2149 mmap_lock();
2150
2151 p = page_find(address >> TARGET_PAGE_BITS);
2152 if (!p) {
2153 mmap_unlock();
2154 return 0;
2155 }
2156
2157 /* if the page was really writable, then we change its
2158 protection back to writable */
2159 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2160 host_start = address & qemu_host_page_mask;
2161 host_end = host_start + qemu_host_page_size;
2162
2163 prot = 0;
7399a337 2164 current_tb_invalidated = false;
5b6dd868
BS
2165 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2166 p = page_find(addr >> TARGET_PAGE_BITS);
2167 p->flags |= PAGE_WRITE;
2168 prot |= p->flags;
2169
2170 /* and since the content will be modified, we must invalidate
2171 the corresponding translated code. */
7399a337 2172 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
5b6dd868
BS
2173#ifdef DEBUG_TB_CHECK
2174 tb_invalidate_check(addr);
2175#endif
2176 }
2177 mprotect((void *)g2h(host_start), qemu_host_page_size,
2178 prot & PAGE_BITS);
2179
2180 mmap_unlock();
7399a337
SS
2181 /* If current TB was invalidated return to main loop */
2182 return current_tb_invalidated ? 2 : 1;
5b6dd868
BS
2183 }
2184 mmap_unlock();
2185 return 0;
2186}
2187#endif /* CONFIG_USER_ONLY */