]> git.proxmox.com Git - mirror_qemu.git/blame - translate-all.c
qemu-timer: fix off-by-one
[mirror_qemu.git] / translate-all.c
CommitLineData
d19893da
FB
1/*
2 * Host code generation
5fafdf24 3 *
d19893da
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d19893da 18 */
5b6dd868
BS
19#ifdef _WIN32
20#include <windows.h>
5b6dd868 21#endif
7b31bbc2 22#include "qemu/osdep.h"
d19893da 23
2054396a 24
5b6dd868 25#include "qemu-common.h"
af5ad107 26#define NO_CPU_IO_DEFS
d3eead2e 27#include "cpu.h"
0ab8ed18 28#include "trace-root.h"
76cad711 29#include "disas/disas.h"
63c91552 30#include "exec/exec-all.h"
57fec1fe 31#include "tcg.h"
5b6dd868
BS
32#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
301e40ed 34#include "exec/exec-all.h"
5b6dd868
BS
35#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd /* avoid redefinition */
5b6dd868
BS
40#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
0bc3cd62
PB
49#else
50#include "exec/address-spaces.h"
5b6dd868
BS
51#endif
52
022c62cb 53#include "exec/cputlb.h"
e1b89321 54#include "exec/tb-hash.h"
5b6dd868 55#include "translate-all.h"
510a647f 56#include "qemu/bitmap.h"
0aa09897 57#include "qemu/timer.h"
8d04fb55 58#include "qemu/main-loop.h"
508127e2 59#include "exec/log.h"
5b6dd868 60
955939a2
AB
61/* #define DEBUG_TB_INVALIDATE */
62/* #define DEBUG_TB_FLUSH */
5b6dd868 63/* make various TB consistency checks */
955939a2 64/* #define DEBUG_TB_CHECK */
5b6dd868
BS
65
66#if !defined(CONFIG_USER_ONLY)
67/* TB consistency checks only implemented for usermode emulation. */
68#undef DEBUG_TB_CHECK
69#endif
70
301e40ed
AB
71/* Access to the various translations structures need to be serialised via locks
72 * for consistency. This is automatic for SoftMMU based system
73 * emulation due to its single threaded nature. In user-mode emulation
74 * access to the memory related structures are protected with the
75 * mmap_lock.
76 */
301e40ed 77#ifdef CONFIG_SOFTMMU
2f169606 78#define assert_memory_lock() tcg_debug_assert(have_tb_lock)
301e40ed 79#else
6ac3d7e8 80#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
301e40ed
AB
81#endif
82
5b6dd868
BS
83#define SMC_BITMAP_USE_THRESHOLD 10
84
5b6dd868
BS
85typedef struct PageDesc {
86 /* list of TBs intersecting this ram page */
87 TranslationBlock *first_tb;
6fad459c 88#ifdef CONFIG_SOFTMMU
5b6dd868
BS
89 /* in order to optimize self modifying code, we count the number
90 of lookups we do to a given page to use a bitmap */
91 unsigned int code_write_count;
510a647f 92 unsigned long *code_bitmap;
6fad459c 93#else
5b6dd868
BS
94 unsigned long flags;
95#endif
96} PageDesc;
97
98/* In system mode we want L1_MAP to be based on ram offsets,
99 while in user mode we want it to be based on virtual addresses. */
100#if !defined(CONFIG_USER_ONLY)
101#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
102# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
103#else
104# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
105#endif
106#else
107# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
108#endif
109
03f49957
PB
110/* Size of the L2 (and L3, etc) page tables. */
111#define V_L2_BITS 10
112#define V_L2_SIZE (1 << V_L2_BITS)
113
5b6dd868 114uintptr_t qemu_host_page_size;
0c2d70c4 115intptr_t qemu_host_page_mask;
5b6dd868 116
66ec9f49
VK
117/*
118 * L1 Mapping properties
119 */
120static int v_l1_size;
121static int v_l1_shift;
122static int v_l2_levels;
123
124/* The bottom level has pointers to PageDesc, and is indexed by
125 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
126 */
127#define V_L1_MIN_BITS 4
128#define V_L1_MAX_BITS (V_L2_BITS + 3)
129#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
130
131static void *l1_map[V_L1_MAX_SIZE];
5b6dd868 132
57fec1fe
FB
133/* code generation context */
134TCGContext tcg_ctx;
fdbc2b57 135bool parallel_cpus;
d19893da 136
677ef623 137/* translation block context */
677ef623 138__thread int have_tb_lock;
677ef623 139
66ec9f49
VK
140static void page_table_config_init(void)
141{
142 uint32_t v_l1_bits;
143
144 assert(TARGET_PAGE_BITS);
145 /* The bits remaining after N lower levels of page tables. */
146 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
147 if (v_l1_bits < V_L1_MIN_BITS) {
148 v_l1_bits += V_L2_BITS;
149 }
150
151 v_l1_size = 1 << v_l1_bits;
152 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
153 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
154
155 assert(v_l1_bits <= V_L1_MAX_BITS);
156 assert(v_l1_shift % V_L2_BITS == 0);
157 assert(v_l2_levels >= 0);
158}
159
6ac3d7e8
PK
160#define assert_tb_locked() tcg_debug_assert(have_tb_lock)
161#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
6ac3d7e8 162
677ef623
FK
163void tb_lock(void)
164{
6ac3d7e8 165 assert_tb_unlocked();
677ef623
FK
166 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
167 have_tb_lock++;
677ef623
FK
168}
169
170void tb_unlock(void)
171{
6ac3d7e8 172 assert_tb_locked();
677ef623
FK
173 have_tb_lock--;
174 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
677ef623
FK
175}
176
177void tb_lock_reset(void)
178{
677ef623
FK
179 if (have_tb_lock) {
180 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
181 have_tb_lock = 0;
182 }
677ef623
FK
183}
184
a8a826a3 185static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
5b6dd868 186
57fec1fe
FB
187void cpu_gen_init(void)
188{
189 tcg_context_init(&tcg_ctx);
57fec1fe
FB
190}
191
fca8a500
RH
192/* Encode VAL as a signed leb128 sequence at P.
193 Return P incremented past the encoded value. */
194static uint8_t *encode_sleb128(uint8_t *p, target_long val)
195{
196 int more, byte;
197
198 do {
199 byte = val & 0x7f;
200 val >>= 7;
201 more = !((val == 0 && (byte & 0x40) == 0)
202 || (val == -1 && (byte & 0x40) != 0));
203 if (more) {
204 byte |= 0x80;
205 }
206 *p++ = byte;
207 } while (more);
208
209 return p;
210}
211
212/* Decode a signed leb128 sequence at *PP; increment *PP past the
213 decoded value. Return the decoded value. */
214static target_long decode_sleb128(uint8_t **pp)
215{
216 uint8_t *p = *pp;
217 target_long val = 0;
218 int byte, shift = 0;
219
220 do {
221 byte = *p++;
222 val |= (target_ulong)(byte & 0x7f) << shift;
223 shift += 7;
224 } while (byte & 0x80);
225 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
226 val |= -(target_ulong)1 << shift;
227 }
228
229 *pp = p;
230 return val;
231}
232
233/* Encode the data collected about the instructions while compiling TB.
234 Place the data at BLOCK, and return the number of bytes consumed.
235
236 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
237 which come from the target's insn_start data, followed by a uintptr_t
238 which comes from the host pc of the end of the code implementing the insn.
239
240 Each line of the table is encoded as sleb128 deltas from the previous
241 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
242 That is, the first column is seeded with the guest pc, the last column
243 with the host pc, and the middle columns with zeros. */
244
245static int encode_search(TranslationBlock *tb, uint8_t *block)
246{
b125f9dc 247 uint8_t *highwater = tcg_ctx.code_gen_highwater;
fca8a500
RH
248 uint8_t *p = block;
249 int i, j, n;
250
251 tb->tc_search = block;
252
253 for (i = 0, n = tb->icount; i < n; ++i) {
254 target_ulong prev;
255
256 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
257 if (i == 0) {
258 prev = (j == 0 ? tb->pc : 0);
259 } else {
260 prev = tcg_ctx.gen_insn_data[i - 1][j];
261 }
262 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
263 }
264 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
265 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
b125f9dc
RH
266
267 /* Test for (pending) buffer overflow. The assumption is that any
268 one row beginning below the high water mark cannot overrun
269 the buffer completely. Thus we can test for overflow after
270 encoding a row without having to check during encoding. */
271 if (unlikely(p > highwater)) {
272 return -1;
273 }
fca8a500
RH
274 }
275
276 return p - block;
277}
278
7d7500d9
PB
279/* The cpu state corresponding to 'searched_pc' is restored.
280 * Called with tb_lock held.
281 */
74f10515 282static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
a8a826a3 283 uintptr_t searched_pc)
d19893da 284{
fca8a500
RH
285 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
286 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
74f10515 287 CPUArchState *env = cpu->env_ptr;
fca8a500
RH
288 uint8_t *p = tb->tc_search;
289 int i, j, num_insns = tb->icount;
57fec1fe 290#ifdef CONFIG_PROFILER
fca8a500 291 int64_t ti = profile_getclock();
57fec1fe
FB
292#endif
293
01ecaf43
RH
294 searched_pc -= GETPC_ADJ;
295
fca8a500
RH
296 if (searched_pc < host_pc) {
297 return -1;
298 }
d19893da 299
fca8a500
RH
300 /* Reconstruct the stored insn data while looking for the point at
301 which the end of the insn exceeds the searched_pc. */
302 for (i = 0; i < num_insns; ++i) {
303 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
304 data[j] += decode_sleb128(&p);
305 }
306 host_pc += decode_sleb128(&p);
307 if (host_pc > searched_pc) {
308 goto found;
309 }
310 }
311 return -1;
3b46e624 312
fca8a500 313 found:
bd79255d 314 if (tb->cflags & CF_USE_ICOUNT) {
414b15c9 315 assert(use_icount);
2e70f6ef 316 /* Reset the cycle counter to the start of the block. */
fca8a500 317 cpu->icount_decr.u16.low += num_insns;
2e70f6ef 318 /* Clear the IO flag. */
99df7dce 319 cpu->can_do_io = 0;
2e70f6ef 320 }
fca8a500
RH
321 cpu->icount_decr.u16.low -= i;
322 restore_state_to_opc(env, tb, data);
57fec1fe
FB
323
324#ifdef CONFIG_PROFILER
fca8a500
RH
325 tcg_ctx.restore_time += profile_getclock() - ti;
326 tcg_ctx.restore_count++;
57fec1fe 327#endif
d19893da
FB
328 return 0;
329}
5b6dd868 330
3f38f309 331bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
a8a826a3
BS
332{
333 TranslationBlock *tb;
a5e99826 334 bool r = false;
a8a826a3 335
d8b2239b
AB
336 /* A retaddr of zero is invalid so we really shouldn't have ended
337 * up here. The target code has likely forgotten to check retaddr
338 * != 0 before attempting to restore state. We return early to
339 * avoid blowing up on a recursive tb_lock(). The target must have
340 * previously survived a failed cpu_restore_state because
341 * tb_find_pc(0) would have failed anyway. It still should be
342 * fixed though.
343 */
344
345 if (!retaddr) {
346 return r;
347 }
348
a5e99826 349 tb_lock();
a8a826a3
BS
350 tb = tb_find_pc(retaddr);
351 if (tb) {
74f10515 352 cpu_restore_state_from_tb(cpu, tb, retaddr);
d8a499f1
PD
353 if (tb->cflags & CF_NOCACHE) {
354 /* one-shot translation, invalidate it immediately */
d8a499f1
PD
355 tb_phys_invalidate(tb, -1);
356 tb_free(tb);
357 }
a5e99826 358 r = true;
a8a826a3 359 }
a5e99826
FK
360 tb_unlock();
361
362 return r;
a8a826a3
BS
363}
364
47c16ed5 365void page_size_init(void)
5b6dd868
BS
366{
367 /* NOTE: we can always suppose that qemu_host_page_size >=
368 TARGET_PAGE_SIZE */
5b6dd868 369 qemu_real_host_page_size = getpagesize();
0c2d70c4 370 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
5b6dd868
BS
371 if (qemu_host_page_size == 0) {
372 qemu_host_page_size = qemu_real_host_page_size;
373 }
374 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
375 qemu_host_page_size = TARGET_PAGE_SIZE;
376 }
0c2d70c4 377 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
47c16ed5 378}
5b6dd868 379
47c16ed5
AK
380static void page_init(void)
381{
382 page_size_init();
66ec9f49
VK
383 page_table_config_init();
384
5b6dd868
BS
385#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
386 {
387#ifdef HAVE_KINFO_GETVMMAP
388 struct kinfo_vmentry *freep;
389 int i, cnt;
390
391 freep = kinfo_getvmmap(getpid(), &cnt);
392 if (freep) {
393 mmap_lock();
394 for (i = 0; i < cnt; i++) {
395 unsigned long startaddr, endaddr;
396
397 startaddr = freep[i].kve_start;
398 endaddr = freep[i].kve_end;
399 if (h2g_valid(startaddr)) {
400 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
401
402 if (h2g_valid(endaddr)) {
403 endaddr = h2g(endaddr);
404 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
405 } else {
406#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
407 endaddr = ~0ul;
408 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
409#endif
410 }
411 }
412 }
413 free(freep);
414 mmap_unlock();
415 }
416#else
417 FILE *f;
418
419 last_brk = (unsigned long)sbrk(0);
420
421 f = fopen("/compat/linux/proc/self/maps", "r");
422 if (f) {
423 mmap_lock();
424
425 do {
426 unsigned long startaddr, endaddr;
427 int n;
428
429 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
430
431 if (n == 2 && h2g_valid(startaddr)) {
432 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
433
434 if (h2g_valid(endaddr)) {
435 endaddr = h2g(endaddr);
436 } else {
437 endaddr = ~0ul;
438 }
439 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
440 }
441 } while (!feof(f));
442
443 fclose(f);
444 mmap_unlock();
445 }
446#endif
447 }
448#endif
449}
450
75692087 451/* If alloc=1:
7d7500d9 452 * Called with tb_lock held for system emulation.
75692087
PB
453 * Called with mmap_lock held for user-mode emulation.
454 */
5b6dd868
BS
455static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
456{
457 PageDesc *pd;
458 void **lp;
459 int i;
460
e505a063
AB
461 if (alloc) {
462 assert_memory_lock();
463 }
464
5b6dd868 465 /* Level 1. Always allocated. */
66ec9f49 466 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
5b6dd868
BS
467
468 /* Level 2..N-1. */
66ec9f49 469 for (i = v_l2_levels; i > 0; i--) {
6940fab8 470 void **p = atomic_rcu_read(lp);
5b6dd868
BS
471
472 if (p == NULL) {
473 if (!alloc) {
474 return NULL;
475 }
e3a0abfd 476 p = g_new0(void *, V_L2_SIZE);
6940fab8 477 atomic_rcu_set(lp, p);
5b6dd868
BS
478 }
479
03f49957 480 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
5b6dd868
BS
481 }
482
6940fab8 483 pd = atomic_rcu_read(lp);
5b6dd868
BS
484 if (pd == NULL) {
485 if (!alloc) {
486 return NULL;
487 }
e3a0abfd 488 pd = g_new0(PageDesc, V_L2_SIZE);
6940fab8 489 atomic_rcu_set(lp, pd);
5b6dd868
BS
490 }
491
03f49957 492 return pd + (index & (V_L2_SIZE - 1));
5b6dd868
BS
493}
494
495static inline PageDesc *page_find(tb_page_addr_t index)
496{
497 return page_find_alloc(index, 0);
498}
499
5b6dd868
BS
500#if defined(CONFIG_USER_ONLY)
501/* Currently it is not recommended to allocate big chunks of data in
502 user mode. It will change when a dedicated libc will be used. */
503/* ??? 64-bit hosts ought to have no problem mmaping data outside the
504 region in which the guest needs to run. Revisit this. */
505#define USE_STATIC_CODE_GEN_BUFFER
506#endif
507
5b6dd868
BS
508/* Minimum size of the code gen buffer. This number is randomly chosen,
509 but not so small that we can't have a fair number of TB's live. */
510#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
511
512/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
513 indicated, this is constrained by the range of direct branches on the
514 host cpu, as used by the TCG implementation of goto_tb. */
515#if defined(__x86_64__)
516# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
517#elif defined(__sparc__)
518# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
5bfd75a3
RH
519#elif defined(__powerpc64__)
520# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
399f1648
SF
521#elif defined(__powerpc__)
522# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
4a136e0a
CF
523#elif defined(__aarch64__)
524# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
525#elif defined(__arm__)
526# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
527#elif defined(__s390x__)
528 /* We have a +- 4GB range on the branches; leave some slop. */
529# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
479eb121
RH
530#elif defined(__mips__)
531 /* We have a 256MB branch region, but leave room to make sure the
532 main executable is also within that region. */
533# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
5b6dd868
BS
534#else
535# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
536#endif
537
538#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
539
540#define DEFAULT_CODE_GEN_BUFFER_SIZE \
541 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
542 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
543
544static inline size_t size_code_gen_buffer(size_t tb_size)
545{
546 /* Size the buffer. */
547 if (tb_size == 0) {
548#ifdef USE_STATIC_CODE_GEN_BUFFER
549 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
550#else
551 /* ??? Needs adjustments. */
552 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
553 static buffer, we could size this on RESERVED_VA, on the text
554 segment size of the executable, or continue to use the default. */
555 tb_size = (unsigned long)(ram_size / 4);
556#endif
557 }
558 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
559 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
560 }
561 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
562 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
563 }
5b6dd868
BS
564 return tb_size;
565}
566
483c76e1
RH
567#ifdef __mips__
568/* In order to use J and JAL within the code_gen_buffer, we require
569 that the buffer not cross a 256MB boundary. */
570static inline bool cross_256mb(void *addr, size_t size)
571{
7ba6a512 572 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
483c76e1
RH
573}
574
575/* We weren't able to allocate a buffer without crossing that boundary,
576 so make do with the larger portion of the buffer that doesn't cross.
577 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
578static inline void *split_cross_256mb(void *buf1, size_t size1)
579{
7ba6a512 580 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
483c76e1
RH
581 size_t size2 = buf1 + size1 - buf2;
582
583 size1 = buf2 - buf1;
584 if (size1 < size2) {
585 size1 = size2;
586 buf1 = buf2;
587 }
588
589 tcg_ctx.code_gen_buffer_size = size1;
590 return buf1;
591}
592#endif
593
5b6dd868
BS
594#ifdef USE_STATIC_CODE_GEN_BUFFER
595static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
596 __attribute__((aligned(CODE_GEN_ALIGN)));
597
f293709c
RH
598# ifdef _WIN32
599static inline void do_protect(void *addr, long size, int prot)
600{
601 DWORD old_protect;
602 VirtualProtect(addr, size, prot, &old_protect);
603}
604
605static inline void map_exec(void *addr, long size)
606{
607 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
608}
609
610static inline void map_none(void *addr, long size)
611{
612 do_protect(addr, size, PAGE_NOACCESS);
613}
614# else
615static inline void do_protect(void *addr, long size, int prot)
616{
617 uintptr_t start, end;
618
619 start = (uintptr_t)addr;
620 start &= qemu_real_host_page_mask;
621
622 end = (uintptr_t)addr + size;
623 end = ROUND_UP(end, qemu_real_host_page_size);
624
625 mprotect((void *)start, end - start, prot);
626}
627
628static inline void map_exec(void *addr, long size)
629{
630 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
631}
632
633static inline void map_none(void *addr, long size)
634{
635 do_protect(addr, size, PROT_NONE);
636}
637# endif /* WIN32 */
638
5b6dd868
BS
639static inline void *alloc_code_gen_buffer(void)
640{
483c76e1 641 void *buf = static_code_gen_buffer;
f293709c
RH
642 size_t full_size, size;
643
644 /* The size of the buffer, rounded down to end on a page boundary. */
645 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
646 & qemu_real_host_page_mask) - (uintptr_t)buf;
647
648 /* Reserve a guard page. */
649 size = full_size - qemu_real_host_page_size;
650
651 /* Honor a command-line option limiting the size of the buffer. */
652 if (size > tcg_ctx.code_gen_buffer_size) {
653 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
654 & qemu_real_host_page_mask) - (uintptr_t)buf;
655 }
656 tcg_ctx.code_gen_buffer_size = size;
657
483c76e1 658#ifdef __mips__
f293709c
RH
659 if (cross_256mb(buf, size)) {
660 buf = split_cross_256mb(buf, size);
661 size = tcg_ctx.code_gen_buffer_size;
483c76e1
RH
662 }
663#endif
f293709c
RH
664
665 map_exec(buf, size);
666 map_none(buf + size, qemu_real_host_page_size);
667 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
668
483c76e1 669 return buf;
5b6dd868 670}
f293709c
RH
671#elif defined(_WIN32)
672static inline void *alloc_code_gen_buffer(void)
673{
674 size_t size = tcg_ctx.code_gen_buffer_size;
675 void *buf1, *buf2;
676
677 /* Perform the allocation in two steps, so that the guard page
678 is reserved but uncommitted. */
679 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
680 MEM_RESERVE, PAGE_NOACCESS);
681 if (buf1 != NULL) {
682 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
683 assert(buf1 == buf2);
684 }
685
686 return buf1;
687}
688#else
5b6dd868
BS
689static inline void *alloc_code_gen_buffer(void)
690{
691 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
692 uintptr_t start = 0;
f293709c 693 size_t size = tcg_ctx.code_gen_buffer_size;
5b6dd868
BS
694 void *buf;
695
696 /* Constrain the position of the buffer based on the host cpu.
697 Note that these addresses are chosen in concert with the
698 addresses assigned in the relevant linker script file. */
699# if defined(__PIE__) || defined(__PIC__)
700 /* Don't bother setting a preferred location if we're building
701 a position-independent executable. We're more likely to get
702 an address near the main executable if we let the kernel
703 choose the address. */
704# elif defined(__x86_64__) && defined(MAP_32BIT)
705 /* Force the memory down into low memory with the executable.
706 Leave the choice of exact location with the kernel. */
707 flags |= MAP_32BIT;
708 /* Cannot expect to map more than 800MB in low memory. */
f293709c
RH
709 if (size > 800u * 1024 * 1024) {
710 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
5b6dd868
BS
711 }
712# elif defined(__sparc__)
713 start = 0x40000000ul;
714# elif defined(__s390x__)
715 start = 0x90000000ul;
479eb121 716# elif defined(__mips__)
f293709c 717# if _MIPS_SIM == _ABI64
479eb121
RH
718 start = 0x128000000ul;
719# else
720 start = 0x08000000ul;
721# endif
5b6dd868
BS
722# endif
723
f293709c
RH
724 buf = mmap((void *)start, size + qemu_real_host_page_size,
725 PROT_NONE, flags, -1, 0);
483c76e1
RH
726 if (buf == MAP_FAILED) {
727 return NULL;
728 }
729
730#ifdef __mips__
f293709c 731 if (cross_256mb(buf, size)) {
5d831be2 732 /* Try again, with the original still mapped, to avoid re-acquiring
483c76e1 733 that 256mb crossing. This time don't specify an address. */
f293709c
RH
734 size_t size2;
735 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
736 PROT_NONE, flags, -1, 0);
f68808c7 737 switch ((int)(buf2 != MAP_FAILED)) {
f293709c
RH
738 case 1:
739 if (!cross_256mb(buf2, size)) {
483c76e1 740 /* Success! Use the new buffer. */
8bdf4997 741 munmap(buf, size + qemu_real_host_page_size);
f293709c 742 break;
483c76e1
RH
743 }
744 /* Failure. Work with what we had. */
8bdf4997 745 munmap(buf2, size + qemu_real_host_page_size);
f293709c
RH
746 /* fallthru */
747 default:
748 /* Split the original buffer. Free the smaller half. */
749 buf2 = split_cross_256mb(buf, size);
750 size2 = tcg_ctx.code_gen_buffer_size;
751 if (buf == buf2) {
752 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
753 } else {
754 munmap(buf, size - size2);
755 }
756 size = size2;
757 break;
483c76e1 758 }
f293709c 759 buf = buf2;
483c76e1
RH
760 }
761#endif
762
f293709c
RH
763 /* Make the final buffer accessible. The guard page at the end
764 will remain inaccessible with PROT_NONE. */
765 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
483c76e1 766
f293709c
RH
767 /* Request large pages for the buffer. */
768 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
483c76e1 769
5b6dd868
BS
770 return buf;
771}
f293709c 772#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
5b6dd868
BS
773
774static inline void code_gen_alloc(size_t tb_size)
775{
0b0d3320
EV
776 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
777 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
778 if (tcg_ctx.code_gen_buffer == NULL) {
5b6dd868
BS
779 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
780 exit(1);
781 }
782
8163b749
RH
783 /* Estimate a good size for the number of TBs we can support. We
784 still haven't deducted the prologue from the buffer size here,
785 but that's minimal and won't affect the estimate much. */
786 tcg_ctx.code_gen_max_blocks
787 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
788 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
789
677ef623 790 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
5b6dd868
BS
791}
792
909eaac9
EC
793static void tb_htable_init(void)
794{
795 unsigned int mode = QHT_MODE_AUTO_RESIZE;
796
797 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
798}
799
5b6dd868
BS
800/* Must be called before using the QEMU cpus. 'tb_size' is the size
801 (in bytes) allocated to the translation buffer. Zero means default
802 size. */
803void tcg_exec_init(unsigned long tb_size)
804{
805 cpu_gen_init();
5b6dd868 806 page_init();
909eaac9 807 tb_htable_init();
f293709c 808 code_gen_alloc(tb_size);
4cbea598 809#if defined(CONFIG_SOFTMMU)
5b6dd868
BS
810 /* There's no guest base to take into account, so go ahead and
811 initialize the prologue now. */
812 tcg_prologue_init(&tcg_ctx);
813#endif
814}
815
816bool tcg_enabled(void)
817{
0b0d3320 818 return tcg_ctx.code_gen_buffer != NULL;
5b6dd868
BS
819}
820
7d7500d9
PB
821/*
822 * Allocate a new translation block. Flush the translation buffer if
823 * too many translation blocks or too much generated code.
824 *
825 * Called with tb_lock held.
826 */
5b6dd868
BS
827static TranslationBlock *tb_alloc(target_ulong pc)
828{
829 TranslationBlock *tb;
830
6ac3d7e8 831 assert_tb_locked();
e505a063 832
b125f9dc 833 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
5b6dd868
BS
834 return NULL;
835 }
5e5f07e0 836 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
5b6dd868
BS
837 tb->pc = pc;
838 tb->cflags = 0;
6d21e420 839 tb->invalid = false;
5b6dd868
BS
840 return tb;
841}
842
7d7500d9 843/* Called with tb_lock held. */
5b6dd868
BS
844void tb_free(TranslationBlock *tb)
845{
6ac3d7e8 846 assert_tb_locked();
e505a063 847
5b6dd868
BS
848 /* In practice this is mostly used for single use temporary TB
849 Ignore the hard cases and just back up if this TB happens to
850 be the last one generated. */
5e5f07e0
EV
851 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
852 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
0b0d3320 853 tcg_ctx.code_gen_ptr = tb->tc_ptr;
5e5f07e0 854 tcg_ctx.tb_ctx.nb_tbs--;
5b6dd868
BS
855 }
856}
857
858static inline void invalidate_page_bitmap(PageDesc *p)
859{
6fad459c 860#ifdef CONFIG_SOFTMMU
012aef07
MA
861 g_free(p->code_bitmap);
862 p->code_bitmap = NULL;
5b6dd868 863 p->code_write_count = 0;
6fad459c 864#endif
5b6dd868
BS
865}
866
867/* Set to NULL all the 'first_tb' fields in all PageDescs. */
868static void page_flush_tb_1(int level, void **lp)
869{
870 int i;
871
872 if (*lp == NULL) {
873 return;
874 }
875 if (level == 0) {
876 PageDesc *pd = *lp;
877
03f49957 878 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
879 pd[i].first_tb = NULL;
880 invalidate_page_bitmap(pd + i);
881 }
882 } else {
883 void **pp = *lp;
884
03f49957 885 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
886 page_flush_tb_1(level - 1, pp + i);
887 }
888 }
889}
890
891static void page_flush_tb(void)
892{
66ec9f49 893 int i, l1_sz = v_l1_size;
5b6dd868 894
66ec9f49
VK
895 for (i = 0; i < l1_sz; i++) {
896 page_flush_tb_1(v_l2_levels, l1_map + i);
5b6dd868
BS
897 }
898}
899
900/* flush all the translation blocks */
14e6fe12 901static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
5b6dd868 902{
3359baad
SF
903 tb_lock();
904
14e6fe12 905 /* If it is already been done on request of another CPU,
3359baad
SF
906 * just retry.
907 */
14e6fe12 908 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
3359baad 909 goto done;
135a972b 910 }
3359baad 911
955939a2 912#if defined(DEBUG_TB_FLUSH)
5b6dd868 913 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
0b0d3320 914 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
5e5f07e0 915 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
0b0d3320 916 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
5e5f07e0 917 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868 918#endif
0b0d3320
EV
919 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
920 > tcg_ctx.code_gen_buffer_size) {
a47dddd7 921 cpu_abort(cpu, "Internal error: code buffer overflow\n");
5b6dd868 922 }
5b6dd868 923
bdc44640 924 CPU_FOREACH(cpu) {
89a16b1e
SF
925 int i;
926
927 for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
928 atomic_set(&cpu->tb_jmp_cache[i], NULL);
929 }
5b6dd868
BS
930 }
931
118b0730 932 tcg_ctx.tb_ctx.nb_tbs = 0;
909eaac9 933 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
5b6dd868
BS
934 page_flush_tb();
935
0b0d3320 936 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
5b6dd868
BS
937 /* XXX: flush processor icache at this point if cache flush is
938 expensive */
3359baad
SF
939 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
940 tcg_ctx.tb_ctx.tb_flush_count + 1);
941
942done:
943 tb_unlock();
944}
945
946void tb_flush(CPUState *cpu)
947{
948 if (tcg_enabled()) {
14e6fe12
PB
949 unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
950 async_safe_run_on_cpu(cpu, do_tb_flush,
951 RUN_ON_CPU_HOST_INT(tb_flush_count));
3359baad 952 }
5b6dd868
BS
953}
954
955#ifdef DEBUG_TB_CHECK
956
909eaac9
EC
957static void
958do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
5b6dd868 959{
909eaac9
EC
960 TranslationBlock *tb = p;
961 target_ulong addr = *(target_ulong *)userp;
962
963 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
964 printf("ERROR invalidate: address=" TARGET_FMT_lx
965 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
966 }
967}
5b6dd868 968
7d7500d9
PB
969/* verify that all the pages have correct rights for code
970 *
971 * Called with tb_lock held.
972 */
909eaac9
EC
973static void tb_invalidate_check(target_ulong address)
974{
5b6dd868 975 address &= TARGET_PAGE_MASK;
909eaac9
EC
976 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
977}
978
979static void
980do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
981{
982 TranslationBlock *tb = p;
983 int flags1, flags2;
984
985 flags1 = page_get_flags(tb->pc);
986 flags2 = page_get_flags(tb->pc + tb->size - 1);
987 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
988 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
989 (long)tb->pc, tb->size, flags1, flags2);
5b6dd868
BS
990 }
991}
992
993/* verify that all the pages have correct rights for code */
994static void tb_page_check(void)
995{
909eaac9 996 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
5b6dd868
BS
997}
998
999#endif
1000
5b6dd868
BS
1001static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
1002{
1003 TranslationBlock *tb1;
1004 unsigned int n1;
1005
1006 for (;;) {
1007 tb1 = *ptb;
1008 n1 = (uintptr_t)tb1 & 3;
1009 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1010 if (tb1 == tb) {
1011 *ptb = tb1->page_next[n1];
1012 break;
1013 }
1014 ptb = &tb1->page_next[n1];
1015 }
1016}
1017
13362678
SF
1018/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1019static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
5b6dd868 1020{
c37e6d7e
SF
1021 TranslationBlock *tb1;
1022 uintptr_t *ptb, ntb;
5b6dd868
BS
1023 unsigned int n1;
1024
f309101c 1025 ptb = &tb->jmp_list_next[n];
c37e6d7e 1026 if (*ptb) {
5b6dd868
BS
1027 /* find tb(n) in circular list */
1028 for (;;) {
c37e6d7e
SF
1029 ntb = *ptb;
1030 n1 = ntb & 3;
1031 tb1 = (TranslationBlock *)(ntb & ~3);
5b6dd868
BS
1032 if (n1 == n && tb1 == tb) {
1033 break;
1034 }
1035 if (n1 == 2) {
f309101c 1036 ptb = &tb1->jmp_list_first;
5b6dd868 1037 } else {
f309101c 1038 ptb = &tb1->jmp_list_next[n1];
5b6dd868
BS
1039 }
1040 }
1041 /* now we can suppress tb(n) from the list */
f309101c 1042 *ptb = tb->jmp_list_next[n];
5b6dd868 1043
c37e6d7e 1044 tb->jmp_list_next[n] = (uintptr_t)NULL;
5b6dd868
BS
1045 }
1046}
1047
1048/* reset the jump entry 'n' of a TB so that it is not chained to
1049 another TB */
1050static inline void tb_reset_jump(TranslationBlock *tb, int n)
1051{
f309101c
SF
1052 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1053 tb_set_jmp_target(tb, n, addr);
5b6dd868
BS
1054}
1055
89bba496
SF
1056/* remove any jumps to the TB */
1057static inline void tb_jmp_unlink(TranslationBlock *tb)
1058{
f9c5b66f
SF
1059 TranslationBlock *tb1;
1060 uintptr_t *ptb, ntb;
89bba496
SF
1061 unsigned int n1;
1062
f9c5b66f 1063 ptb = &tb->jmp_list_first;
89bba496 1064 for (;;) {
f9c5b66f
SF
1065 ntb = *ptb;
1066 n1 = ntb & 3;
1067 tb1 = (TranslationBlock *)(ntb & ~3);
89bba496
SF
1068 if (n1 == 2) {
1069 break;
1070 }
f9c5b66f
SF
1071 tb_reset_jump(tb1, n1);
1072 *ptb = tb1->jmp_list_next[n1];
1073 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
89bba496 1074 }
89bba496
SF
1075}
1076
7d7500d9
PB
1077/* invalidate one TB
1078 *
1079 * Called with tb_lock held.
1080 */
5b6dd868
BS
1081void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1082{
182735ef 1083 CPUState *cpu;
5b6dd868 1084 PageDesc *p;
42bd3228 1085 uint32_t h;
5b6dd868 1086 tb_page_addr_t phys_pc;
5b6dd868 1087
6ac3d7e8 1088 assert_tb_locked();
e505a063 1089
6d21e420
PB
1090 atomic_set(&tb->invalid, true);
1091
5b6dd868
BS
1092 /* remove the TB from the hash list */
1093 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
42bd3228 1094 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
909eaac9 1095 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
5b6dd868
BS
1096
1097 /* remove the TB from the page list */
1098 if (tb->page_addr[0] != page_addr) {
1099 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1100 tb_page_remove(&p->first_tb, tb);
1101 invalidate_page_bitmap(p);
1102 }
1103 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1104 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1105 tb_page_remove(&p->first_tb, tb);
1106 invalidate_page_bitmap(p);
1107 }
1108
5b6dd868
BS
1109 /* remove the TB from the hash list */
1110 h = tb_jmp_cache_hash_func(tb->pc);
bdc44640 1111 CPU_FOREACH(cpu) {
89a16b1e
SF
1112 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1113 atomic_set(&cpu->tb_jmp_cache[h], NULL);
5b6dd868
BS
1114 }
1115 }
1116
1117 /* suppress this TB from the two jump lists */
13362678
SF
1118 tb_remove_from_jmp_list(tb, 0);
1119 tb_remove_from_jmp_list(tb, 1);
5b6dd868
BS
1120
1121 /* suppress any remaining jumps to this TB */
89bba496 1122 tb_jmp_unlink(tb);
5b6dd868 1123
5e5f07e0 1124 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
5b6dd868
BS
1125}
1126
6fad459c 1127#ifdef CONFIG_SOFTMMU
5b6dd868
BS
1128static void build_page_bitmap(PageDesc *p)
1129{
1130 int n, tb_start, tb_end;
1131 TranslationBlock *tb;
1132
510a647f 1133 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
5b6dd868
BS
1134
1135 tb = p->first_tb;
1136 while (tb != NULL) {
1137 n = (uintptr_t)tb & 3;
1138 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1139 /* NOTE: this is subtle as a TB may span two physical pages */
1140 if (n == 0) {
1141 /* NOTE: tb_end may be after the end of the page, but
1142 it is not a problem */
1143 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1144 tb_end = tb_start + tb->size;
1145 if (tb_end > TARGET_PAGE_SIZE) {
1146 tb_end = TARGET_PAGE_SIZE;
e505a063 1147 }
5b6dd868
BS
1148 } else {
1149 tb_start = 0;
1150 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1151 }
510a647f 1152 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
5b6dd868
BS
1153 tb = tb->page_next[n];
1154 }
1155}
6fad459c 1156#endif
5b6dd868 1157
e90d96b1
SF
1158/* add the tb in the target page and protect it if necessary
1159 *
1160 * Called with mmap_lock held for user-mode emulation.
1161 */
1162static inline void tb_alloc_page(TranslationBlock *tb,
1163 unsigned int n, tb_page_addr_t page_addr)
1164{
1165 PageDesc *p;
1166#ifndef CONFIG_USER_ONLY
1167 bool page_already_protected;
1168#endif
1169
e505a063
AB
1170 assert_memory_lock();
1171
e90d96b1
SF
1172 tb->page_addr[n] = page_addr;
1173 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1174 tb->page_next[n] = p->first_tb;
1175#ifndef CONFIG_USER_ONLY
1176 page_already_protected = p->first_tb != NULL;
1177#endif
1178 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1179 invalidate_page_bitmap(p);
1180
1181#if defined(CONFIG_USER_ONLY)
1182 if (p->flags & PAGE_WRITE) {
1183 target_ulong addr;
1184 PageDesc *p2;
1185 int prot;
1186
1187 /* force the host page as non writable (writes will have a
1188 page fault + mprotect overhead) */
1189 page_addr &= qemu_host_page_mask;
1190 prot = 0;
1191 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1192 addr += TARGET_PAGE_SIZE) {
1193
1194 p2 = page_find(addr >> TARGET_PAGE_BITS);
1195 if (!p2) {
1196 continue;
1197 }
1198 prot |= p2->flags;
1199 p2->flags &= ~PAGE_WRITE;
1200 }
1201 mprotect(g2h(page_addr), qemu_host_page_size,
1202 (prot & PAGE_BITS) & ~PAGE_WRITE);
1203#ifdef DEBUG_TB_INVALIDATE
1204 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1205 page_addr);
1206#endif
1207 }
1208#else
1209 /* if some code is already present, then the pages are already
1210 protected. So we handle the case where only the first TB is
1211 allocated in a physical page */
1212 if (!page_already_protected) {
1213 tlb_protect_code(page_addr);
1214 }
1215#endif
1216}
1217
1218/* add a new TB and link it to the physical page tables. phys_page2 is
1219 * (-1) to indicate that only one page contains the TB.
1220 *
1221 * Called with mmap_lock held for user-mode emulation.
1222 */
1223static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1224 tb_page_addr_t phys_page2)
1225{
42bd3228 1226 uint32_t h;
e90d96b1 1227
e505a063
AB
1228 assert_memory_lock();
1229
e90d96b1
SF
1230 /* add in the page list */
1231 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1232 if (phys_page2 != -1) {
1233 tb_alloc_page(tb, 1, phys_page2);
1234 } else {
1235 tb->page_addr[1] = -1;
1236 }
1237
2e1ae44a
AB
1238 /* add in the hash table */
1239 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1240 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1241
e90d96b1
SF
1242#ifdef DEBUG_TB_CHECK
1243 tb_page_check();
1244#endif
1245}
1246
75692087 1247/* Called with mmap_lock held for user mode emulation. */
648f034c 1248TranslationBlock *tb_gen_code(CPUState *cpu,
5b6dd868 1249 target_ulong pc, target_ulong cs_base,
89fee74a 1250 uint32_t flags, int cflags)
5b6dd868 1251{
648f034c 1252 CPUArchState *env = cpu->env_ptr;
5b6dd868 1253 TranslationBlock *tb;
5b6dd868
BS
1254 tb_page_addr_t phys_pc, phys_page2;
1255 target_ulong virt_page2;
fec88f64 1256 tcg_insn_unit *gen_code_buf;
fca8a500 1257 int gen_code_size, search_size;
fec88f64
RH
1258#ifdef CONFIG_PROFILER
1259 int64_t ti;
1260#endif
e505a063 1261 assert_memory_lock();
5b6dd868
BS
1262
1263 phys_pc = get_page_addr_code(env, pc);
56c0269a 1264 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
0266359e
PB
1265 cflags |= CF_USE_ICOUNT;
1266 }
b125f9dc 1267
5b6dd868 1268 tb = tb_alloc(pc);
b125f9dc
RH
1269 if (unlikely(!tb)) {
1270 buffer_overflow:
5b6dd868 1271 /* flush must be done */
bbd77c18 1272 tb_flush(cpu);
3359baad 1273 mmap_unlock();
8499c8fc
PD
1274 /* Make the execution loop process the flush as soon as possible. */
1275 cpu->exception_index = EXCP_INTERRUPT;
3359baad 1276 cpu_loop_exit(cpu);
5b6dd868 1277 }
fec88f64
RH
1278
1279 gen_code_buf = tcg_ctx.code_gen_ptr;
1280 tb->tc_ptr = gen_code_buf;
5b6dd868
BS
1281 tb->cs_base = cs_base;
1282 tb->flags = flags;
1283 tb->cflags = cflags;
fec88f64
RH
1284
1285#ifdef CONFIG_PROFILER
1286 tcg_ctx.tb_count1++; /* includes aborted translations because of
1287 exceptions */
1288 ti = profile_getclock();
1289#endif
1290
1291 tcg_func_start(&tcg_ctx);
1292
7c255043 1293 tcg_ctx.cpu = ENV_GET_CPU(env);
fec88f64 1294 gen_intermediate_code(env, tb);
7c255043 1295 tcg_ctx.cpu = NULL;
fec88f64
RH
1296
1297 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1298
1299 /* generate machine code */
f309101c
SF
1300 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1301 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1302 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
fec88f64 1303#ifdef USE_DIRECT_JUMP
f309101c
SF
1304 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1305 tcg_ctx.tb_jmp_target_addr = NULL;
fec88f64 1306#else
f309101c
SF
1307 tcg_ctx.tb_jmp_insn_offset = NULL;
1308 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
fec88f64
RH
1309#endif
1310
1311#ifdef CONFIG_PROFILER
1312 tcg_ctx.tb_count++;
1313 tcg_ctx.interm_time += profile_getclock() - ti;
1314 tcg_ctx.code_time -= profile_getclock();
1315#endif
1316
b125f9dc
RH
1317 /* ??? Overflow could be handled better here. In particular, we
1318 don't need to re-do gen_intermediate_code, nor should we re-do
1319 the tcg optimization currently hidden inside tcg_gen_code. All
1320 that should be required is to flush the TBs, allocate a new TB,
1321 re-initialize it per above, and re-do the actual code generation. */
5bd2ec3d 1322 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
b125f9dc
RH
1323 if (unlikely(gen_code_size < 0)) {
1324 goto buffer_overflow;
1325 }
fca8a500 1326 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
b125f9dc
RH
1327 if (unlikely(search_size < 0)) {
1328 goto buffer_overflow;
1329 }
fec88f64
RH
1330
1331#ifdef CONFIG_PROFILER
1332 tcg_ctx.code_time += profile_getclock();
1333 tcg_ctx.code_in_len += tb->size;
1334 tcg_ctx.code_out_len += gen_code_size;
fca8a500 1335 tcg_ctx.search_out_len += search_size;
fec88f64
RH
1336#endif
1337
1338#ifdef DEBUG_DISAS
d977e1c2
AB
1339 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1340 qemu_log_in_addr_range(tb->pc)) {
1ee73216 1341 qemu_log_lock();
fec88f64
RH
1342 qemu_log("OUT: [size=%d]\n", gen_code_size);
1343 log_disas(tb->tc_ptr, gen_code_size);
1344 qemu_log("\n");
1345 qemu_log_flush();
1ee73216 1346 qemu_log_unlock();
fec88f64
RH
1347 }
1348#endif
1349
fca8a500
RH
1350 tcg_ctx.code_gen_ptr = (void *)
1351 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1352 CODE_GEN_ALIGN);
5b6dd868 1353
901bc3de
SF
1354 /* init jump list */
1355 assert(((uintptr_t)tb & 3) == 0);
1356 tb->jmp_list_first = (uintptr_t)tb | 2;
1357 tb->jmp_list_next[0] = (uintptr_t)NULL;
1358 tb->jmp_list_next[1] = (uintptr_t)NULL;
1359
1360 /* init original jump addresses wich has been set during tcg_gen_code() */
1361 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1362 tb_reset_jump(tb, 0);
1363 }
1364 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1365 tb_reset_jump(tb, 1);
1366 }
1367
5b6dd868
BS
1368 /* check next page if needed */
1369 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1370 phys_page2 = -1;
1371 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1372 phys_page2 = get_page_addr_code(env, virt_page2);
1373 }
901bc3de
SF
1374 /* As long as consistency of the TB stuff is provided by tb_lock in user
1375 * mode and is implicit in single-threaded softmmu emulation, no explicit
1376 * memory barrier is required before tb_link_page() makes the TB visible
1377 * through the physical hash table and physical page list.
1378 */
5b6dd868
BS
1379 tb_link_page(tb, phys_pc, phys_page2);
1380 return tb;
1381}
1382
1383/*
1384 * Invalidate all TBs which intersect with the target physical address range
1385 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1386 * 'is_cpu_write_access' should be true if called from a real cpu write
1387 * access: the virtual CPU will exit the current TB if code is modified inside
1388 * this TB.
75692087 1389 *
ba051fb5
AB
1390 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1391 * Called with tb_lock held for system-mode emulation
5b6dd868 1392 */
ba051fb5 1393static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
5b6dd868
BS
1394{
1395 while (start < end) {
35865339 1396 tb_invalidate_phys_page_range(start, end, 0);
5b6dd868
BS
1397 start &= TARGET_PAGE_MASK;
1398 start += TARGET_PAGE_SIZE;
1399 }
1400}
1401
ba051fb5
AB
1402#ifdef CONFIG_SOFTMMU
1403void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1404{
6ac3d7e8 1405 assert_tb_locked();
ba051fb5
AB
1406 tb_invalidate_phys_range_1(start, end);
1407}
1408#else
1409void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1410{
1411 assert_memory_lock();
1412 tb_lock();
1413 tb_invalidate_phys_range_1(start, end);
1414 tb_unlock();
1415}
1416#endif
5b6dd868
BS
1417/*
1418 * Invalidate all TBs which intersect with the target physical address range
1419 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1420 * 'is_cpu_write_access' should be true if called from a real cpu write
1421 * access: the virtual CPU will exit the current TB if code is modified inside
1422 * this TB.
75692087 1423 *
ba051fb5
AB
1424 * Called with tb_lock/mmap_lock held for user-mode emulation
1425 * Called with tb_lock held for system-mode emulation
5b6dd868
BS
1426 */
1427void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1428 int is_cpu_write_access)
1429{
3213525f 1430 TranslationBlock *tb, *tb_next;
baea4fae 1431#if defined(TARGET_HAS_PRECISE_SMC)
3213525f 1432 CPUState *cpu = current_cpu;
4917cf44
AF
1433 CPUArchState *env = NULL;
1434#endif
5b6dd868
BS
1435 tb_page_addr_t tb_start, tb_end;
1436 PageDesc *p;
1437 int n;
1438#ifdef TARGET_HAS_PRECISE_SMC
1439 int current_tb_not_found = is_cpu_write_access;
1440 TranslationBlock *current_tb = NULL;
1441 int current_tb_modified = 0;
1442 target_ulong current_pc = 0;
1443 target_ulong current_cs_base = 0;
89fee74a 1444 uint32_t current_flags = 0;
5b6dd868
BS
1445#endif /* TARGET_HAS_PRECISE_SMC */
1446
e505a063 1447 assert_memory_lock();
6ac3d7e8 1448 assert_tb_locked();
e505a063 1449
5b6dd868
BS
1450 p = page_find(start >> TARGET_PAGE_BITS);
1451 if (!p) {
1452 return;
1453 }
baea4fae 1454#if defined(TARGET_HAS_PRECISE_SMC)
4917cf44
AF
1455 if (cpu != NULL) {
1456 env = cpu->env_ptr;
d77953b9 1457 }
4917cf44 1458#endif
5b6dd868
BS
1459
1460 /* we remove all the TBs in the range [start, end[ */
1461 /* XXX: see if in some cases it could be faster to invalidate all
1462 the code */
1463 tb = p->first_tb;
1464 while (tb != NULL) {
1465 n = (uintptr_t)tb & 3;
1466 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1467 tb_next = tb->page_next[n];
1468 /* NOTE: this is subtle as a TB may span two physical pages */
1469 if (n == 0) {
1470 /* NOTE: tb_end may be after the end of the page, but
1471 it is not a problem */
1472 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1473 tb_end = tb_start + tb->size;
1474 } else {
1475 tb_start = tb->page_addr[1];
1476 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1477 }
1478 if (!(tb_end <= start || tb_start >= end)) {
1479#ifdef TARGET_HAS_PRECISE_SMC
1480 if (current_tb_not_found) {
1481 current_tb_not_found = 0;
1482 current_tb = NULL;
93afeade 1483 if (cpu->mem_io_pc) {
5b6dd868 1484 /* now we have a real cpu fault */
93afeade 1485 current_tb = tb_find_pc(cpu->mem_io_pc);
5b6dd868
BS
1486 }
1487 }
1488 if (current_tb == tb &&
1489 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1490 /* If we are modifying the current TB, we must stop
1491 its execution. We could be more precise by checking
1492 that the modification is after the current PC, but it
1493 would require a specialized function to partially
1494 restore the CPU state */
1495
1496 current_tb_modified = 1;
74f10515 1497 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
5b6dd868
BS
1498 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1499 &current_flags);
1500 }
1501#endif /* TARGET_HAS_PRECISE_SMC */
5b6dd868 1502 tb_phys_invalidate(tb, -1);
5b6dd868
BS
1503 }
1504 tb = tb_next;
1505 }
1506#if !defined(CONFIG_USER_ONLY)
1507 /* if no code remaining, no need to continue to use slow writes */
1508 if (!p->first_tb) {
1509 invalidate_page_bitmap(p);
fc377bcf 1510 tlb_unprotect_code(start);
5b6dd868
BS
1511 }
1512#endif
1513#ifdef TARGET_HAS_PRECISE_SMC
1514 if (current_tb_modified) {
1515 /* we generate a block containing just the instruction
1516 modifying the memory. It will ensure that it cannot modify
1517 itself */
648f034c 1518 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
6886b980 1519 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1520 }
1521#endif
1522}
1523
6fad459c 1524#ifdef CONFIG_SOFTMMU
ba051fb5
AB
1525/* len must be <= 8 and start must be a multiple of len.
1526 * Called via softmmu_template.h when code areas are written to with
8d04fb55 1527 * iothread mutex not held.
ba051fb5 1528 */
5b6dd868
BS
1529void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1530{
1531 PageDesc *p;
5b6dd868
BS
1532
1533#if 0
1534 if (1) {
1535 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1536 cpu_single_env->mem_io_vaddr, len,
1537 cpu_single_env->eip,
1538 cpu_single_env->eip +
1539 (intptr_t)cpu_single_env->segs[R_CS].base);
1540 }
1541#endif
ba051fb5
AB
1542 assert_memory_lock();
1543
5b6dd868
BS
1544 p = page_find(start >> TARGET_PAGE_BITS);
1545 if (!p) {
1546 return;
1547 }
fc377bcf
PB
1548 if (!p->code_bitmap &&
1549 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
7d7500d9
PB
1550 /* build code bitmap. FIXME: writes should be protected by
1551 * tb_lock, reads by tb_lock or RCU.
1552 */
fc377bcf
PB
1553 build_page_bitmap(p);
1554 }
5b6dd868 1555 if (p->code_bitmap) {
510a647f
EC
1556 unsigned int nr;
1557 unsigned long b;
1558
1559 nr = start & ~TARGET_PAGE_MASK;
1560 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
5b6dd868
BS
1561 if (b & ((1 << len) - 1)) {
1562 goto do_invalidate;
1563 }
1564 } else {
1565 do_invalidate:
1566 tb_invalidate_phys_page_range(start, start + len, 1);
1567 }
1568}
6fad459c 1569#else
75809229
PM
1570/* Called with mmap_lock held. If pc is not 0 then it indicates the
1571 * host PC of the faulting store instruction that caused this invalidate.
1572 * Returns true if the caller needs to abort execution of the current
1573 * TB (because it was modified by this store and the guest CPU has
1574 * precise-SMC semantics).
1575 */
1576static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
5b6dd868
BS
1577{
1578 TranslationBlock *tb;
1579 PageDesc *p;
1580 int n;
1581#ifdef TARGET_HAS_PRECISE_SMC
1582 TranslationBlock *current_tb = NULL;
4917cf44
AF
1583 CPUState *cpu = current_cpu;
1584 CPUArchState *env = NULL;
5b6dd868
BS
1585 int current_tb_modified = 0;
1586 target_ulong current_pc = 0;
1587 target_ulong current_cs_base = 0;
89fee74a 1588 uint32_t current_flags = 0;
5b6dd868
BS
1589#endif
1590
ba051fb5
AB
1591 assert_memory_lock();
1592
5b6dd868
BS
1593 addr &= TARGET_PAGE_MASK;
1594 p = page_find(addr >> TARGET_PAGE_BITS);
1595 if (!p) {
75809229 1596 return false;
5b6dd868 1597 }
a5e99826
FK
1598
1599 tb_lock();
5b6dd868
BS
1600 tb = p->first_tb;
1601#ifdef TARGET_HAS_PRECISE_SMC
1602 if (tb && pc != 0) {
1603 current_tb = tb_find_pc(pc);
1604 }
4917cf44
AF
1605 if (cpu != NULL) {
1606 env = cpu->env_ptr;
d77953b9 1607 }
5b6dd868
BS
1608#endif
1609 while (tb != NULL) {
1610 n = (uintptr_t)tb & 3;
1611 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1612#ifdef TARGET_HAS_PRECISE_SMC
1613 if (current_tb == tb &&
1614 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1615 /* If we are modifying the current TB, we must stop
1616 its execution. We could be more precise by checking
1617 that the modification is after the current PC, but it
1618 would require a specialized function to partially
1619 restore the CPU state */
1620
1621 current_tb_modified = 1;
74f10515 1622 cpu_restore_state_from_tb(cpu, current_tb, pc);
5b6dd868
BS
1623 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1624 &current_flags);
1625 }
1626#endif /* TARGET_HAS_PRECISE_SMC */
1627 tb_phys_invalidate(tb, addr);
1628 tb = tb->page_next[n];
1629 }
1630 p->first_tb = NULL;
1631#ifdef TARGET_HAS_PRECISE_SMC
1632 if (current_tb_modified) {
1633 /* we generate a block containing just the instruction
1634 modifying the memory. It will ensure that it cannot modify
1635 itself */
648f034c 1636 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
a5e99826
FK
1637 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1638 * back into the cpu_exec loop. */
75809229 1639 return true;
5b6dd868
BS
1640 }
1641#endif
a5e99826
FK
1642 tb_unlock();
1643
75809229 1644 return false;
5b6dd868
BS
1645}
1646#endif
1647
5b6dd868
BS
1648/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1649 tb[1].tc_ptr. Return NULL if not found */
a8a826a3 1650static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
5b6dd868
BS
1651{
1652 int m_min, m_max, m;
1653 uintptr_t v;
1654 TranslationBlock *tb;
1655
5e5f07e0 1656 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
5b6dd868
BS
1657 return NULL;
1658 }
0b0d3320
EV
1659 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1660 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
5b6dd868
BS
1661 return NULL;
1662 }
1663 /* binary search (cf Knuth) */
1664 m_min = 0;
5e5f07e0 1665 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
5b6dd868
BS
1666 while (m_min <= m_max) {
1667 m = (m_min + m_max) >> 1;
5e5f07e0 1668 tb = &tcg_ctx.tb_ctx.tbs[m];
5b6dd868
BS
1669 v = (uintptr_t)tb->tc_ptr;
1670 if (v == tc_ptr) {
1671 return tb;
1672 } else if (tc_ptr < v) {
1673 m_max = m - 1;
1674 } else {
1675 m_min = m + 1;
1676 }
1677 }
5e5f07e0 1678 return &tcg_ctx.tb_ctx.tbs[m_max];
5b6dd868
BS
1679}
1680
ec53b45b 1681#if !defined(CONFIG_USER_ONLY)
29d8ec7b 1682void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
5b6dd868
BS
1683{
1684 ram_addr_t ram_addr;
5c8a00ce 1685 MemoryRegion *mr;
149f54b5 1686 hwaddr l = 1;
5b6dd868 1687
41063e1e 1688 rcu_read_lock();
29d8ec7b 1689 mr = address_space_translate(as, addr, &addr, &l, false);
5c8a00ce
PB
1690 if (!(memory_region_is_ram(mr)
1691 || memory_region_is_romd(mr))) {
41063e1e 1692 rcu_read_unlock();
5b6dd868
BS
1693 return;
1694 }
e4e69794 1695 ram_addr = memory_region_get_ram_addr(mr) + addr;
ba051fb5 1696 tb_lock();
5b6dd868 1697 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
ba051fb5 1698 tb_unlock();
41063e1e 1699 rcu_read_unlock();
5b6dd868 1700}
ec53b45b 1701#endif /* !defined(CONFIG_USER_ONLY) */
5b6dd868 1702
7d7500d9 1703/* Called with tb_lock held. */
239c51a5 1704void tb_check_watchpoint(CPUState *cpu)
5b6dd868
BS
1705{
1706 TranslationBlock *tb;
1707
93afeade 1708 tb = tb_find_pc(cpu->mem_io_pc);
8d302e76
AJ
1709 if (tb) {
1710 /* We can use retranslation to find the PC. */
1711 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1712 tb_phys_invalidate(tb, -1);
1713 } else {
1714 /* The exception probably happened in a helper. The CPU state should
1715 have been saved before calling it. Fetch the PC from there. */
1716 CPUArchState *env = cpu->env_ptr;
1717 target_ulong pc, cs_base;
1718 tb_page_addr_t addr;
89fee74a 1719 uint32_t flags;
8d302e76
AJ
1720
1721 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1722 addr = get_page_addr_code(env, pc);
1723 tb_invalidate_phys_range(addr, addr + 1);
5b6dd868 1724 }
5b6dd868
BS
1725}
1726
1727#ifndef CONFIG_USER_ONLY
5b6dd868 1728/* in deterministic execution mode, instructions doing device I/Os
8d04fb55
JK
1729 * must be at the end of the TB.
1730 *
1731 * Called by softmmu_template.h, with iothread mutex not held.
1732 */
90b40a69 1733void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
5b6dd868 1734{
a47dddd7 1735#if defined(TARGET_MIPS) || defined(TARGET_SH4)
90b40a69 1736 CPUArchState *env = cpu->env_ptr;
a47dddd7 1737#endif
5b6dd868
BS
1738 TranslationBlock *tb;
1739 uint32_t n, cflags;
1740 target_ulong pc, cs_base;
89fee74a 1741 uint32_t flags;
5b6dd868 1742
a5e99826 1743 tb_lock();
5b6dd868
BS
1744 tb = tb_find_pc(retaddr);
1745 if (!tb) {
a47dddd7 1746 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
5b6dd868
BS
1747 (void *)retaddr);
1748 }
28ecfd7a 1749 n = cpu->icount_decr.u16.low + tb->icount;
74f10515 1750 cpu_restore_state_from_tb(cpu, tb, retaddr);
5b6dd868
BS
1751 /* Calculate how many instructions had been executed before the fault
1752 occurred. */
28ecfd7a 1753 n = n - cpu->icount_decr.u16.low;
5b6dd868
BS
1754 /* Generate a new TB ending on the I/O insn. */
1755 n++;
1756 /* On MIPS and SH, delay slot instructions can only be restarted if
1757 they were already the first instruction in the TB. If this is not
1758 the first instruction in a TB then re-execute the preceding
1759 branch. */
1760#if defined(TARGET_MIPS)
1761 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
c3577479 1762 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
28ecfd7a 1763 cpu->icount_decr.u16.low++;
5b6dd868
BS
1764 env->hflags &= ~MIPS_HFLAG_BMASK;
1765 }
1766#elif defined(TARGET_SH4)
1767 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1768 && n > 1) {
1769 env->pc -= 2;
28ecfd7a 1770 cpu->icount_decr.u16.low++;
5b6dd868
BS
1771 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1772 }
1773#endif
1774 /* This should never happen. */
1775 if (n > CF_COUNT_MASK) {
a47dddd7 1776 cpu_abort(cpu, "TB too big during recompile");
5b6dd868
BS
1777 }
1778
1779 cflags = n | CF_LAST_IO;
1780 pc = tb->pc;
1781 cs_base = tb->cs_base;
1782 flags = tb->flags;
1783 tb_phys_invalidate(tb, -1);
02d57ea1
SF
1784 if (tb->cflags & CF_NOCACHE) {
1785 if (tb->orig_tb) {
1786 /* Invalidate original TB if this TB was generated in
1787 * cpu_exec_nocache() */
1788 tb_phys_invalidate(tb->orig_tb, -1);
1789 }
1790 tb_free(tb);
1791 }
5b6dd868
BS
1792 /* FIXME: In theory this could raise an exception. In practice
1793 we have already translated the block once so it's probably ok. */
648f034c 1794 tb_gen_code(cpu, pc, cs_base, flags, cflags);
a5e99826 1795
5b6dd868 1796 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
a5e99826
FK
1797 * the first in the TB) then we end up generating a whole new TB and
1798 * repeating the fault, which is horribly inefficient.
1799 * Better would be to execute just this insn uncached, or generate a
1800 * second new TB.
1801 *
1802 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1803 * tb_lock gets reset.
1804 */
6886b980 1805 cpu_loop_exit_noexc(cpu);
5b6dd868
BS
1806}
1807
611d4f99 1808void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
5b6dd868
BS
1809{
1810 unsigned int i;
1811
1812 /* Discard jump cache entries for any tb which might potentially
1813 overlap the flushed page. */
1814 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
8cd70437 1815 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1816 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1817
1818 i = tb_jmp_cache_hash_page(addr);
8cd70437 1819 memset(&cpu->tb_jmp_cache[i], 0,
5b6dd868
BS
1820 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1821}
1822
7266ae91
EC
1823static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1824 struct qht_stats hst)
1825{
1826 uint32_t hgram_opts;
1827 size_t hgram_bins;
1828 char *hgram;
1829
1830 if (!hst.head_buckets) {
1831 return;
1832 }
1833 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1834 hst.used_head_buckets, hst.head_buckets,
1835 (double)hst.used_head_buckets / hst.head_buckets * 100);
1836
1837 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1838 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1839 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1840 hgram_opts |= QDIST_PR_NODECIMAL;
1841 }
1842 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1843 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1844 qdist_avg(&hst.occupancy) * 100, hgram);
1845 g_free(hgram);
1846
1847 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1848 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1849 if (hgram_bins > 10) {
1850 hgram_bins = 10;
1851 } else {
1852 hgram_bins = 0;
1853 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1854 }
1855 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1856 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1857 qdist_avg(&hst.chain), hgram);
1858 g_free(hgram);
1859}
1860
5b6dd868
BS
1861void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1862{
1863 int i, target_code_size, max_target_code_size;
1864 int direct_jmp_count, direct_jmp2_count, cross_page;
1865 TranslationBlock *tb;
329844d4 1866 struct qht_stats hst;
5b6dd868 1867
a5e99826
FK
1868 tb_lock();
1869
5b6dd868
BS
1870 target_code_size = 0;
1871 max_target_code_size = 0;
1872 cross_page = 0;
1873 direct_jmp_count = 0;
1874 direct_jmp2_count = 0;
5e5f07e0
EV
1875 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1876 tb = &tcg_ctx.tb_ctx.tbs[i];
5b6dd868
BS
1877 target_code_size += tb->size;
1878 if (tb->size > max_target_code_size) {
1879 max_target_code_size = tb->size;
1880 }
1881 if (tb->page_addr[1] != -1) {
1882 cross_page++;
1883 }
f309101c 1884 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
5b6dd868 1885 direct_jmp_count++;
f309101c 1886 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
5b6dd868
BS
1887 direct_jmp2_count++;
1888 }
1889 }
1890 }
1891 /* XXX: avoid using doubles ? */
1892 cpu_fprintf(f, "Translation buffer state:\n");
1893 cpu_fprintf(f, "gen code size %td/%zd\n",
0b0d3320 1894 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
b125f9dc 1895 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
5b6dd868 1896 cpu_fprintf(f, "TB count %d/%d\n",
5e5f07e0 1897 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
5b6dd868 1898 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
5e5f07e0
EV
1899 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1900 tcg_ctx.tb_ctx.nb_tbs : 0,
1901 max_target_code_size);
5b6dd868 1902 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
5e5f07e0
EV
1903 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1904 tcg_ctx.code_gen_buffer) /
1905 tcg_ctx.tb_ctx.nb_tbs : 0,
1906 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1907 tcg_ctx.code_gen_buffer) /
1908 target_code_size : 0);
1909 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1910 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1911 tcg_ctx.tb_ctx.nb_tbs : 0);
5b6dd868
BS
1912 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1913 direct_jmp_count,
5e5f07e0
EV
1914 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1915 tcg_ctx.tb_ctx.nb_tbs : 0,
5b6dd868 1916 direct_jmp2_count,
5e5f07e0
EV
1917 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1918 tcg_ctx.tb_ctx.nb_tbs : 0);
329844d4
EC
1919
1920 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
7266ae91 1921 print_qht_statistics(f, cpu_fprintf, hst);
329844d4
EC
1922 qht_statistics_destroy(&hst);
1923
5b6dd868 1924 cpu_fprintf(f, "\nStatistics:\n");
3359baad
SF
1925 cpu_fprintf(f, "TB flush count %u\n",
1926 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
5e5f07e0
EV
1927 cpu_fprintf(f, "TB invalidate count %d\n",
1928 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
5b6dd868
BS
1929 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1930 tcg_dump_info(f, cpu_fprintf);
a5e99826
FK
1931
1932 tb_unlock();
5b6dd868
BS
1933}
1934
246ae24d
MF
1935void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1936{
1937 tcg_dump_op_count(f, cpu_fprintf);
1938}
1939
5b6dd868
BS
1940#else /* CONFIG_USER_ONLY */
1941
c3affe56 1942void cpu_interrupt(CPUState *cpu, int mask)
5b6dd868 1943{
8d04fb55 1944 g_assert(qemu_mutex_iothread_locked());
259186a7 1945 cpu->interrupt_request |= mask;
1aab16c2 1946 cpu->icount_decr.u16.high = -1;
5b6dd868
BS
1947}
1948
1949/*
1950 * Walks guest process memory "regions" one by one
1951 * and calls callback function 'fn' for each region.
1952 */
1953struct walk_memory_regions_data {
1954 walk_memory_regions_fn fn;
1955 void *priv;
1a1c4db9 1956 target_ulong start;
5b6dd868
BS
1957 int prot;
1958};
1959
1960static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1a1c4db9 1961 target_ulong end, int new_prot)
5b6dd868 1962{
1a1c4db9 1963 if (data->start != -1u) {
5b6dd868
BS
1964 int rc = data->fn(data->priv, data->start, end, data->prot);
1965 if (rc != 0) {
1966 return rc;
1967 }
1968 }
1969
1a1c4db9 1970 data->start = (new_prot ? end : -1u);
5b6dd868
BS
1971 data->prot = new_prot;
1972
1973 return 0;
1974}
1975
1976static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1a1c4db9 1977 target_ulong base, int level, void **lp)
5b6dd868 1978{
1a1c4db9 1979 target_ulong pa;
5b6dd868
BS
1980 int i, rc;
1981
1982 if (*lp == NULL) {
1983 return walk_memory_regions_end(data, base, 0);
1984 }
1985
1986 if (level == 0) {
1987 PageDesc *pd = *lp;
1988
03f49957 1989 for (i = 0; i < V_L2_SIZE; ++i) {
5b6dd868
BS
1990 int prot = pd[i].flags;
1991
1992 pa = base | (i << TARGET_PAGE_BITS);
1993 if (prot != data->prot) {
1994 rc = walk_memory_regions_end(data, pa, prot);
1995 if (rc != 0) {
1996 return rc;
1997 }
1998 }
1999 }
2000 } else {
2001 void **pp = *lp;
2002
03f49957 2003 for (i = 0; i < V_L2_SIZE; ++i) {
1a1c4db9 2004 pa = base | ((target_ulong)i <<
03f49957 2005 (TARGET_PAGE_BITS + V_L2_BITS * level));
5b6dd868
BS
2006 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2007 if (rc != 0) {
2008 return rc;
2009 }
2010 }
2011 }
2012
2013 return 0;
2014}
2015
2016int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2017{
2018 struct walk_memory_regions_data data;
66ec9f49 2019 uintptr_t i, l1_sz = v_l1_size;
5b6dd868
BS
2020
2021 data.fn = fn;
2022 data.priv = priv;
1a1c4db9 2023 data.start = -1u;
5b6dd868
BS
2024 data.prot = 0;
2025
66ec9f49
VK
2026 for (i = 0; i < l1_sz; i++) {
2027 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2028 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
5b6dd868
BS
2029 if (rc != 0) {
2030 return rc;
2031 }
2032 }
2033
2034 return walk_memory_regions_end(&data, 0, 0);
2035}
2036
1a1c4db9
MI
2037static int dump_region(void *priv, target_ulong start,
2038 target_ulong end, unsigned long prot)
5b6dd868
BS
2039{
2040 FILE *f = (FILE *)priv;
2041
1a1c4db9
MI
2042 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2043 " "TARGET_FMT_lx" %c%c%c\n",
5b6dd868
BS
2044 start, end, end - start,
2045 ((prot & PAGE_READ) ? 'r' : '-'),
2046 ((prot & PAGE_WRITE) ? 'w' : '-'),
2047 ((prot & PAGE_EXEC) ? 'x' : '-'));
2048
2049 return 0;
2050}
2051
2052/* dump memory mappings */
2053void page_dump(FILE *f)
2054{
1a1c4db9 2055 const int length = sizeof(target_ulong) * 2;
227b8175
SW
2056 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2057 length, "start", length, "end", length, "size", "prot");
5b6dd868
BS
2058 walk_memory_regions(f, dump_region);
2059}
2060
2061int page_get_flags(target_ulong address)
2062{
2063 PageDesc *p;
2064
2065 p = page_find(address >> TARGET_PAGE_BITS);
2066 if (!p) {
2067 return 0;
2068 }
2069 return p->flags;
2070}
2071
2072/* Modify the flags of a page and invalidate the code if necessary.
2073 The flag PAGE_WRITE_ORG is positioned automatically depending
2074 on PAGE_WRITE. The mmap_lock should already be held. */
2075void page_set_flags(target_ulong start, target_ulong end, int flags)
2076{
2077 target_ulong addr, len;
2078
2079 /* This function should never be called with addresses outside the
2080 guest address space. If this assert fires, it probably indicates
2081 a missing call to h2g_valid. */
2082#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 2083 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
2084#endif
2085 assert(start < end);
e505a063 2086 assert_memory_lock();
5b6dd868
BS
2087
2088 start = start & TARGET_PAGE_MASK;
2089 end = TARGET_PAGE_ALIGN(end);
2090
2091 if (flags & PAGE_WRITE) {
2092 flags |= PAGE_WRITE_ORG;
2093 }
2094
2095 for (addr = start, len = end - start;
2096 len != 0;
2097 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2098 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2099
2100 /* If the write protection bit is set, then we invalidate
2101 the code inside. */
2102 if (!(p->flags & PAGE_WRITE) &&
2103 (flags & PAGE_WRITE) &&
2104 p->first_tb) {
75809229 2105 tb_invalidate_phys_page(addr, 0);
5b6dd868
BS
2106 }
2107 p->flags = flags;
2108 }
2109}
2110
2111int page_check_range(target_ulong start, target_ulong len, int flags)
2112{
2113 PageDesc *p;
2114 target_ulong end;
2115 target_ulong addr;
2116
2117 /* This function should never be called with addresses outside the
2118 guest address space. If this assert fires, it probably indicates
2119 a missing call to h2g_valid. */
2120#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1a1c4db9 2121 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
5b6dd868
BS
2122#endif
2123
2124 if (len == 0) {
2125 return 0;
2126 }
2127 if (start + len - 1 < start) {
2128 /* We've wrapped around. */
2129 return -1;
2130 }
2131
2132 /* must do before we loose bits in the next step */
2133 end = TARGET_PAGE_ALIGN(start + len);
2134 start = start & TARGET_PAGE_MASK;
2135
2136 for (addr = start, len = end - start;
2137 len != 0;
2138 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2139 p = page_find(addr >> TARGET_PAGE_BITS);
2140 if (!p) {
2141 return -1;
2142 }
2143 if (!(p->flags & PAGE_VALID)) {
2144 return -1;
2145 }
2146
2147 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2148 return -1;
2149 }
2150 if (flags & PAGE_WRITE) {
2151 if (!(p->flags & PAGE_WRITE_ORG)) {
2152 return -1;
2153 }
2154 /* unprotect the page if it was put read-only because it
2155 contains translated code */
2156 if (!(p->flags & PAGE_WRITE)) {
f213e72f 2157 if (!page_unprotect(addr, 0)) {
5b6dd868
BS
2158 return -1;
2159 }
2160 }
5b6dd868
BS
2161 }
2162 }
2163 return 0;
2164}
2165
2166/* called from signal handler: invalidate the code and unprotect the
f213e72f
PM
2167 * page. Return 0 if the fault was not handled, 1 if it was handled,
2168 * and 2 if it was handled but the caller must cause the TB to be
2169 * immediately exited. (We can only return 2 if the 'pc' argument is
2170 * non-zero.)
2171 */
2172int page_unprotect(target_ulong address, uintptr_t pc)
5b6dd868
BS
2173{
2174 unsigned int prot;
7399a337 2175 bool current_tb_invalidated;
5b6dd868
BS
2176 PageDesc *p;
2177 target_ulong host_start, host_end, addr;
2178
2179 /* Technically this isn't safe inside a signal handler. However we
2180 know this only ever happens in a synchronous SEGV handler, so in
2181 practice it seems to be ok. */
2182 mmap_lock();
2183
2184 p = page_find(address >> TARGET_PAGE_BITS);
2185 if (!p) {
2186 mmap_unlock();
2187 return 0;
2188 }
2189
2190 /* if the page was really writable, then we change its
2191 protection back to writable */
2192 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2193 host_start = address & qemu_host_page_mask;
2194 host_end = host_start + qemu_host_page_size;
2195
2196 prot = 0;
7399a337 2197 current_tb_invalidated = false;
5b6dd868
BS
2198 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2199 p = page_find(addr >> TARGET_PAGE_BITS);
2200 p->flags |= PAGE_WRITE;
2201 prot |= p->flags;
2202
2203 /* and since the content will be modified, we must invalidate
2204 the corresponding translated code. */
7399a337 2205 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
5b6dd868
BS
2206#ifdef DEBUG_TB_CHECK
2207 tb_invalidate_check(addr);
2208#endif
2209 }
2210 mprotect((void *)g2h(host_start), qemu_host_page_size,
2211 prot & PAGE_BITS);
2212
2213 mmap_unlock();
7399a337
SS
2214 /* If current TB was invalidated return to main loop */
2215 return current_tb_invalidated ? 2 : 1;
5b6dd868
BS
2216 }
2217 mmap_unlock();
2218 return 0;
2219}
2220#endif /* CONFIG_USER_ONLY */