]> git.proxmox.com Git - mirror_qemu.git/blob - translate-all.c
translate-all: Add assert_(memory|tb)_lock annotations
[mirror_qemu.git] / translate-all.c
1 /*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifdef _WIN32
20 #include <windows.h>
21 #endif
22 #include "qemu/osdep.h"
23
24
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
27 #include "cpu.h"
28 #include "trace.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
31 #include "tcg.h"
32 #if defined(CONFIG_USER_ONLY)
33 #include "qemu.h"
34 #include "exec/exec-all.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
40 #include <sys/proc.h>
41 #include <machine/profile.h>
42 #define _KERNEL
43 #include <sys/user.h>
44 #undef _KERNEL
45 #undef sigqueue
46 #include <libutil.h>
47 #endif
48 #endif
49 #else
50 #include "exec/address-spaces.h"
51 #endif
52
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/timer.h"
58 #include "exec/log.h"
59
60 /* #define DEBUG_TB_INVALIDATE */
61 /* #define DEBUG_TB_FLUSH */
62 /* #define DEBUG_LOCKING */
63 /* make various TB consistency checks */
64 /* #define DEBUG_TB_CHECK */
65
66 #if !defined(CONFIG_USER_ONLY)
67 /* TB consistency checks only implemented for usermode emulation. */
68 #undef DEBUG_TB_CHECK
69 #endif
70
71 /* Access to the various translations structures need to be serialised via locks
72 * for consistency. This is automatic for SoftMMU based system
73 * emulation due to its single threaded nature. In user-mode emulation
74 * access to the memory related structures are protected with the
75 * mmap_lock.
76 */
77 #ifdef DEBUG_LOCKING
78 #define DEBUG_MEM_LOCKS 1
79 #else
80 #define DEBUG_MEM_LOCKS 0
81 #endif
82
83 #ifdef CONFIG_SOFTMMU
84 #define assert_memory_lock() do { /* nothing */ } while (0)
85 #else
86 #define assert_memory_lock() do { \
87 if (DEBUG_MEM_LOCKS) { \
88 g_assert(have_mmap_lock()); \
89 } \
90 } while (0)
91 #endif
92
93 #define SMC_BITMAP_USE_THRESHOLD 10
94
95 typedef struct PageDesc {
96 /* list of TBs intersecting this ram page */
97 TranslationBlock *first_tb;
98 #ifdef CONFIG_SOFTMMU
99 /* in order to optimize self modifying code, we count the number
100 of lookups we do to a given page to use a bitmap */
101 unsigned int code_write_count;
102 unsigned long *code_bitmap;
103 #else
104 unsigned long flags;
105 #endif
106 } PageDesc;
107
108 /* In system mode we want L1_MAP to be based on ram offsets,
109 while in user mode we want it to be based on virtual addresses. */
110 #if !defined(CONFIG_USER_ONLY)
111 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
112 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
113 #else
114 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
115 #endif
116 #else
117 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
118 #endif
119
120 /* Size of the L2 (and L3, etc) page tables. */
121 #define V_L2_BITS 10
122 #define V_L2_SIZE (1 << V_L2_BITS)
123
124 uintptr_t qemu_host_page_size;
125 intptr_t qemu_host_page_mask;
126
127 /*
128 * L1 Mapping properties
129 */
130 static int v_l1_size;
131 static int v_l1_shift;
132 static int v_l2_levels;
133
134 /* The bottom level has pointers to PageDesc, and is indexed by
135 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
136 */
137 #define V_L1_MIN_BITS 4
138 #define V_L1_MAX_BITS (V_L2_BITS + 3)
139 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
140
141 static void *l1_map[V_L1_MAX_SIZE];
142
143 /* code generation context */
144 TCGContext tcg_ctx;
145 bool parallel_cpus;
146
147 /* translation block context */
148 #ifdef CONFIG_USER_ONLY
149 __thread int have_tb_lock;
150 #endif
151
152 static void page_table_config_init(void)
153 {
154 uint32_t v_l1_bits;
155
156 assert(TARGET_PAGE_BITS);
157 /* The bits remaining after N lower levels of page tables. */
158 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
159 if (v_l1_bits < V_L1_MIN_BITS) {
160 v_l1_bits += V_L2_BITS;
161 }
162
163 v_l1_size = 1 << v_l1_bits;
164 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
165 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
166
167 assert(v_l1_bits <= V_L1_MAX_BITS);
168 assert(v_l1_shift % V_L2_BITS == 0);
169 assert(v_l2_levels >= 0);
170 }
171
172 void tb_lock(void)
173 {
174 #ifdef CONFIG_USER_ONLY
175 assert(!have_tb_lock);
176 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
177 have_tb_lock++;
178 #endif
179 }
180
181 void tb_unlock(void)
182 {
183 #ifdef CONFIG_USER_ONLY
184 assert(have_tb_lock);
185 have_tb_lock--;
186 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
187 #endif
188 }
189
190 void tb_lock_reset(void)
191 {
192 #ifdef CONFIG_USER_ONLY
193 if (have_tb_lock) {
194 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
195 have_tb_lock = 0;
196 }
197 #endif
198 }
199
200 #ifdef DEBUG_LOCKING
201 #define DEBUG_TB_LOCKS 1
202 #else
203 #define DEBUG_TB_LOCKS 0
204 #endif
205
206 #ifdef CONFIG_SOFTMMU
207 #define assert_tb_lock() do { /* nothing */ } while (0)
208 #else
209 #define assert_tb_lock() do { \
210 if (DEBUG_TB_LOCKS) { \
211 g_assert(have_tb_lock); \
212 } \
213 } while (0)
214 #endif
215
216
217 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
218
219 void cpu_gen_init(void)
220 {
221 tcg_context_init(&tcg_ctx);
222 }
223
224 /* Encode VAL as a signed leb128 sequence at P.
225 Return P incremented past the encoded value. */
226 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
227 {
228 int more, byte;
229
230 do {
231 byte = val & 0x7f;
232 val >>= 7;
233 more = !((val == 0 && (byte & 0x40) == 0)
234 || (val == -1 && (byte & 0x40) != 0));
235 if (more) {
236 byte |= 0x80;
237 }
238 *p++ = byte;
239 } while (more);
240
241 return p;
242 }
243
244 /* Decode a signed leb128 sequence at *PP; increment *PP past the
245 decoded value. Return the decoded value. */
246 static target_long decode_sleb128(uint8_t **pp)
247 {
248 uint8_t *p = *pp;
249 target_long val = 0;
250 int byte, shift = 0;
251
252 do {
253 byte = *p++;
254 val |= (target_ulong)(byte & 0x7f) << shift;
255 shift += 7;
256 } while (byte & 0x80);
257 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
258 val |= -(target_ulong)1 << shift;
259 }
260
261 *pp = p;
262 return val;
263 }
264
265 /* Encode the data collected about the instructions while compiling TB.
266 Place the data at BLOCK, and return the number of bytes consumed.
267
268 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
269 which come from the target's insn_start data, followed by a uintptr_t
270 which comes from the host pc of the end of the code implementing the insn.
271
272 Each line of the table is encoded as sleb128 deltas from the previous
273 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
274 That is, the first column is seeded with the guest pc, the last column
275 with the host pc, and the middle columns with zeros. */
276
277 static int encode_search(TranslationBlock *tb, uint8_t *block)
278 {
279 uint8_t *highwater = tcg_ctx.code_gen_highwater;
280 uint8_t *p = block;
281 int i, j, n;
282
283 tb->tc_search = block;
284
285 for (i = 0, n = tb->icount; i < n; ++i) {
286 target_ulong prev;
287
288 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
289 if (i == 0) {
290 prev = (j == 0 ? tb->pc : 0);
291 } else {
292 prev = tcg_ctx.gen_insn_data[i - 1][j];
293 }
294 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
295 }
296 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
297 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
298
299 /* Test for (pending) buffer overflow. The assumption is that any
300 one row beginning below the high water mark cannot overrun
301 the buffer completely. Thus we can test for overflow after
302 encoding a row without having to check during encoding. */
303 if (unlikely(p > highwater)) {
304 return -1;
305 }
306 }
307
308 return p - block;
309 }
310
311 /* The cpu state corresponding to 'searched_pc' is restored.
312 * Called with tb_lock held.
313 */
314 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
315 uintptr_t searched_pc)
316 {
317 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
318 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
319 CPUArchState *env = cpu->env_ptr;
320 uint8_t *p = tb->tc_search;
321 int i, j, num_insns = tb->icount;
322 #ifdef CONFIG_PROFILER
323 int64_t ti = profile_getclock();
324 #endif
325
326 searched_pc -= GETPC_ADJ;
327
328 if (searched_pc < host_pc) {
329 return -1;
330 }
331
332 /* Reconstruct the stored insn data while looking for the point at
333 which the end of the insn exceeds the searched_pc. */
334 for (i = 0; i < num_insns; ++i) {
335 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
336 data[j] += decode_sleb128(&p);
337 }
338 host_pc += decode_sleb128(&p);
339 if (host_pc > searched_pc) {
340 goto found;
341 }
342 }
343 return -1;
344
345 found:
346 if (tb->cflags & CF_USE_ICOUNT) {
347 assert(use_icount);
348 /* Reset the cycle counter to the start of the block. */
349 cpu->icount_decr.u16.low += num_insns;
350 /* Clear the IO flag. */
351 cpu->can_do_io = 0;
352 }
353 cpu->icount_decr.u16.low -= i;
354 restore_state_to_opc(env, tb, data);
355
356 #ifdef CONFIG_PROFILER
357 tcg_ctx.restore_time += profile_getclock() - ti;
358 tcg_ctx.restore_count++;
359 #endif
360 return 0;
361 }
362
363 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
364 {
365 TranslationBlock *tb;
366
367 tb = tb_find_pc(retaddr);
368 if (tb) {
369 cpu_restore_state_from_tb(cpu, tb, retaddr);
370 if (tb->cflags & CF_NOCACHE) {
371 /* one-shot translation, invalidate it immediately */
372 tb_phys_invalidate(tb, -1);
373 tb_free(tb);
374 }
375 return true;
376 }
377 return false;
378 }
379
380 void page_size_init(void)
381 {
382 /* NOTE: we can always suppose that qemu_host_page_size >=
383 TARGET_PAGE_SIZE */
384 qemu_real_host_page_size = getpagesize();
385 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
386 if (qemu_host_page_size == 0) {
387 qemu_host_page_size = qemu_real_host_page_size;
388 }
389 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
390 qemu_host_page_size = TARGET_PAGE_SIZE;
391 }
392 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
393 }
394
395 static void page_init(void)
396 {
397 page_size_init();
398 page_table_config_init();
399
400 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
401 {
402 #ifdef HAVE_KINFO_GETVMMAP
403 struct kinfo_vmentry *freep;
404 int i, cnt;
405
406 freep = kinfo_getvmmap(getpid(), &cnt);
407 if (freep) {
408 mmap_lock();
409 for (i = 0; i < cnt; i++) {
410 unsigned long startaddr, endaddr;
411
412 startaddr = freep[i].kve_start;
413 endaddr = freep[i].kve_end;
414 if (h2g_valid(startaddr)) {
415 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
416
417 if (h2g_valid(endaddr)) {
418 endaddr = h2g(endaddr);
419 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
420 } else {
421 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
422 endaddr = ~0ul;
423 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
424 #endif
425 }
426 }
427 }
428 free(freep);
429 mmap_unlock();
430 }
431 #else
432 FILE *f;
433
434 last_brk = (unsigned long)sbrk(0);
435
436 f = fopen("/compat/linux/proc/self/maps", "r");
437 if (f) {
438 mmap_lock();
439
440 do {
441 unsigned long startaddr, endaddr;
442 int n;
443
444 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
445
446 if (n == 2 && h2g_valid(startaddr)) {
447 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
448
449 if (h2g_valid(endaddr)) {
450 endaddr = h2g(endaddr);
451 } else {
452 endaddr = ~0ul;
453 }
454 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
455 }
456 } while (!feof(f));
457
458 fclose(f);
459 mmap_unlock();
460 }
461 #endif
462 }
463 #endif
464 }
465
466 /* If alloc=1:
467 * Called with tb_lock held for system emulation.
468 * Called with mmap_lock held for user-mode emulation.
469 */
470 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
471 {
472 PageDesc *pd;
473 void **lp;
474 int i;
475
476 if (alloc) {
477 assert_memory_lock();
478 }
479
480 /* Level 1. Always allocated. */
481 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
482
483 /* Level 2..N-1. */
484 for (i = v_l2_levels; i > 0; i--) {
485 void **p = atomic_rcu_read(lp);
486
487 if (p == NULL) {
488 if (!alloc) {
489 return NULL;
490 }
491 p = g_new0(void *, V_L2_SIZE);
492 atomic_rcu_set(lp, p);
493 }
494
495 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
496 }
497
498 pd = atomic_rcu_read(lp);
499 if (pd == NULL) {
500 if (!alloc) {
501 return NULL;
502 }
503 pd = g_new0(PageDesc, V_L2_SIZE);
504 atomic_rcu_set(lp, pd);
505 }
506
507 return pd + (index & (V_L2_SIZE - 1));
508 }
509
510 static inline PageDesc *page_find(tb_page_addr_t index)
511 {
512 return page_find_alloc(index, 0);
513 }
514
515 #if defined(CONFIG_USER_ONLY)
516 /* Currently it is not recommended to allocate big chunks of data in
517 user mode. It will change when a dedicated libc will be used. */
518 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
519 region in which the guest needs to run. Revisit this. */
520 #define USE_STATIC_CODE_GEN_BUFFER
521 #endif
522
523 /* Minimum size of the code gen buffer. This number is randomly chosen,
524 but not so small that we can't have a fair number of TB's live. */
525 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
526
527 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
528 indicated, this is constrained by the range of direct branches on the
529 host cpu, as used by the TCG implementation of goto_tb. */
530 #if defined(__x86_64__)
531 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
532 #elif defined(__sparc__)
533 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
534 #elif defined(__powerpc64__)
535 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
536 #elif defined(__powerpc__)
537 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
538 #elif defined(__aarch64__)
539 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
540 #elif defined(__arm__)
541 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
542 #elif defined(__s390x__)
543 /* We have a +- 4GB range on the branches; leave some slop. */
544 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
545 #elif defined(__mips__)
546 /* We have a 256MB branch region, but leave room to make sure the
547 main executable is also within that region. */
548 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
549 #else
550 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
551 #endif
552
553 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
554
555 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
556 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
557 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
558
559 static inline size_t size_code_gen_buffer(size_t tb_size)
560 {
561 /* Size the buffer. */
562 if (tb_size == 0) {
563 #ifdef USE_STATIC_CODE_GEN_BUFFER
564 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
565 #else
566 /* ??? Needs adjustments. */
567 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
568 static buffer, we could size this on RESERVED_VA, on the text
569 segment size of the executable, or continue to use the default. */
570 tb_size = (unsigned long)(ram_size / 4);
571 #endif
572 }
573 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
574 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
575 }
576 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
577 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
578 }
579 return tb_size;
580 }
581
582 #ifdef __mips__
583 /* In order to use J and JAL within the code_gen_buffer, we require
584 that the buffer not cross a 256MB boundary. */
585 static inline bool cross_256mb(void *addr, size_t size)
586 {
587 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
588 }
589
590 /* We weren't able to allocate a buffer without crossing that boundary,
591 so make do with the larger portion of the buffer that doesn't cross.
592 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
593 static inline void *split_cross_256mb(void *buf1, size_t size1)
594 {
595 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
596 size_t size2 = buf1 + size1 - buf2;
597
598 size1 = buf2 - buf1;
599 if (size1 < size2) {
600 size1 = size2;
601 buf1 = buf2;
602 }
603
604 tcg_ctx.code_gen_buffer_size = size1;
605 return buf1;
606 }
607 #endif
608
609 #ifdef USE_STATIC_CODE_GEN_BUFFER
610 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
611 __attribute__((aligned(CODE_GEN_ALIGN)));
612
613 # ifdef _WIN32
614 static inline void do_protect(void *addr, long size, int prot)
615 {
616 DWORD old_protect;
617 VirtualProtect(addr, size, prot, &old_protect);
618 }
619
620 static inline void map_exec(void *addr, long size)
621 {
622 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
623 }
624
625 static inline void map_none(void *addr, long size)
626 {
627 do_protect(addr, size, PAGE_NOACCESS);
628 }
629 # else
630 static inline void do_protect(void *addr, long size, int prot)
631 {
632 uintptr_t start, end;
633
634 start = (uintptr_t)addr;
635 start &= qemu_real_host_page_mask;
636
637 end = (uintptr_t)addr + size;
638 end = ROUND_UP(end, qemu_real_host_page_size);
639
640 mprotect((void *)start, end - start, prot);
641 }
642
643 static inline void map_exec(void *addr, long size)
644 {
645 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
646 }
647
648 static inline void map_none(void *addr, long size)
649 {
650 do_protect(addr, size, PROT_NONE);
651 }
652 # endif /* WIN32 */
653
654 static inline void *alloc_code_gen_buffer(void)
655 {
656 void *buf = static_code_gen_buffer;
657 size_t full_size, size;
658
659 /* The size of the buffer, rounded down to end on a page boundary. */
660 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
661 & qemu_real_host_page_mask) - (uintptr_t)buf;
662
663 /* Reserve a guard page. */
664 size = full_size - qemu_real_host_page_size;
665
666 /* Honor a command-line option limiting the size of the buffer. */
667 if (size > tcg_ctx.code_gen_buffer_size) {
668 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
669 & qemu_real_host_page_mask) - (uintptr_t)buf;
670 }
671 tcg_ctx.code_gen_buffer_size = size;
672
673 #ifdef __mips__
674 if (cross_256mb(buf, size)) {
675 buf = split_cross_256mb(buf, size);
676 size = tcg_ctx.code_gen_buffer_size;
677 }
678 #endif
679
680 map_exec(buf, size);
681 map_none(buf + size, qemu_real_host_page_size);
682 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
683
684 return buf;
685 }
686 #elif defined(_WIN32)
687 static inline void *alloc_code_gen_buffer(void)
688 {
689 size_t size = tcg_ctx.code_gen_buffer_size;
690 void *buf1, *buf2;
691
692 /* Perform the allocation in two steps, so that the guard page
693 is reserved but uncommitted. */
694 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
695 MEM_RESERVE, PAGE_NOACCESS);
696 if (buf1 != NULL) {
697 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
698 assert(buf1 == buf2);
699 }
700
701 return buf1;
702 }
703 #else
704 static inline void *alloc_code_gen_buffer(void)
705 {
706 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
707 uintptr_t start = 0;
708 size_t size = tcg_ctx.code_gen_buffer_size;
709 void *buf;
710
711 /* Constrain the position of the buffer based on the host cpu.
712 Note that these addresses are chosen in concert with the
713 addresses assigned in the relevant linker script file. */
714 # if defined(__PIE__) || defined(__PIC__)
715 /* Don't bother setting a preferred location if we're building
716 a position-independent executable. We're more likely to get
717 an address near the main executable if we let the kernel
718 choose the address. */
719 # elif defined(__x86_64__) && defined(MAP_32BIT)
720 /* Force the memory down into low memory with the executable.
721 Leave the choice of exact location with the kernel. */
722 flags |= MAP_32BIT;
723 /* Cannot expect to map more than 800MB in low memory. */
724 if (size > 800u * 1024 * 1024) {
725 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
726 }
727 # elif defined(__sparc__)
728 start = 0x40000000ul;
729 # elif defined(__s390x__)
730 start = 0x90000000ul;
731 # elif defined(__mips__)
732 # if _MIPS_SIM == _ABI64
733 start = 0x128000000ul;
734 # else
735 start = 0x08000000ul;
736 # endif
737 # endif
738
739 buf = mmap((void *)start, size + qemu_real_host_page_size,
740 PROT_NONE, flags, -1, 0);
741 if (buf == MAP_FAILED) {
742 return NULL;
743 }
744
745 #ifdef __mips__
746 if (cross_256mb(buf, size)) {
747 /* Try again, with the original still mapped, to avoid re-acquiring
748 that 256mb crossing. This time don't specify an address. */
749 size_t size2;
750 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
751 PROT_NONE, flags, -1, 0);
752 switch (buf2 != MAP_FAILED) {
753 case 1:
754 if (!cross_256mb(buf2, size)) {
755 /* Success! Use the new buffer. */
756 munmap(buf, size + qemu_real_host_page_size);
757 break;
758 }
759 /* Failure. Work with what we had. */
760 munmap(buf2, size + qemu_real_host_page_size);
761 /* fallthru */
762 default:
763 /* Split the original buffer. Free the smaller half. */
764 buf2 = split_cross_256mb(buf, size);
765 size2 = tcg_ctx.code_gen_buffer_size;
766 if (buf == buf2) {
767 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
768 } else {
769 munmap(buf, size - size2);
770 }
771 size = size2;
772 break;
773 }
774 buf = buf2;
775 }
776 #endif
777
778 /* Make the final buffer accessible. The guard page at the end
779 will remain inaccessible with PROT_NONE. */
780 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
781
782 /* Request large pages for the buffer. */
783 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
784
785 return buf;
786 }
787 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
788
789 static inline void code_gen_alloc(size_t tb_size)
790 {
791 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
792 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
793 if (tcg_ctx.code_gen_buffer == NULL) {
794 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
795 exit(1);
796 }
797
798 /* Estimate a good size for the number of TBs we can support. We
799 still haven't deducted the prologue from the buffer size here,
800 but that's minimal and won't affect the estimate much. */
801 tcg_ctx.code_gen_max_blocks
802 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
803 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
804
805 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
806 }
807
808 static void tb_htable_init(void)
809 {
810 unsigned int mode = QHT_MODE_AUTO_RESIZE;
811
812 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
813 }
814
815 /* Must be called before using the QEMU cpus. 'tb_size' is the size
816 (in bytes) allocated to the translation buffer. Zero means default
817 size. */
818 void tcg_exec_init(unsigned long tb_size)
819 {
820 cpu_gen_init();
821 page_init();
822 tb_htable_init();
823 code_gen_alloc(tb_size);
824 #if defined(CONFIG_SOFTMMU)
825 /* There's no guest base to take into account, so go ahead and
826 initialize the prologue now. */
827 tcg_prologue_init(&tcg_ctx);
828 #endif
829 }
830
831 bool tcg_enabled(void)
832 {
833 return tcg_ctx.code_gen_buffer != NULL;
834 }
835
836 /*
837 * Allocate a new translation block. Flush the translation buffer if
838 * too many translation blocks or too much generated code.
839 *
840 * Called with tb_lock held.
841 */
842 static TranslationBlock *tb_alloc(target_ulong pc)
843 {
844 TranslationBlock *tb;
845
846 assert_tb_lock();
847
848 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
849 return NULL;
850 }
851 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
852 tb->pc = pc;
853 tb->cflags = 0;
854 tb->invalid = false;
855 return tb;
856 }
857
858 /* Called with tb_lock held. */
859 void tb_free(TranslationBlock *tb)
860 {
861 assert_tb_lock();
862
863 /* In practice this is mostly used for single use temporary TB
864 Ignore the hard cases and just back up if this TB happens to
865 be the last one generated. */
866 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
867 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
868 tcg_ctx.code_gen_ptr = tb->tc_ptr;
869 tcg_ctx.tb_ctx.nb_tbs--;
870 }
871 }
872
873 static inline void invalidate_page_bitmap(PageDesc *p)
874 {
875 #ifdef CONFIG_SOFTMMU
876 g_free(p->code_bitmap);
877 p->code_bitmap = NULL;
878 p->code_write_count = 0;
879 #endif
880 }
881
882 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
883 static void page_flush_tb_1(int level, void **lp)
884 {
885 int i;
886
887 if (*lp == NULL) {
888 return;
889 }
890 if (level == 0) {
891 PageDesc *pd = *lp;
892
893 for (i = 0; i < V_L2_SIZE; ++i) {
894 pd[i].first_tb = NULL;
895 invalidate_page_bitmap(pd + i);
896 }
897 } else {
898 void **pp = *lp;
899
900 for (i = 0; i < V_L2_SIZE; ++i) {
901 page_flush_tb_1(level - 1, pp + i);
902 }
903 }
904 }
905
906 static void page_flush_tb(void)
907 {
908 int i, l1_sz = v_l1_size;
909
910 for (i = 0; i < l1_sz; i++) {
911 page_flush_tb_1(v_l2_levels, l1_map + i);
912 }
913 }
914
915 /* flush all the translation blocks */
916 static void do_tb_flush(CPUState *cpu, void *data)
917 {
918 unsigned tb_flush_req = (unsigned) (uintptr_t) data;
919
920 tb_lock();
921
922 /* If it's already been done on request of another CPU,
923 * just retry.
924 */
925 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) {
926 goto done;
927 }
928
929 #if defined(DEBUG_TB_FLUSH)
930 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
931 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
932 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
933 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
934 tcg_ctx.tb_ctx.nb_tbs : 0);
935 #endif
936 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
937 > tcg_ctx.code_gen_buffer_size) {
938 cpu_abort(cpu, "Internal error: code buffer overflow\n");
939 }
940
941 CPU_FOREACH(cpu) {
942 int i;
943
944 for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
945 atomic_set(&cpu->tb_jmp_cache[i], NULL);
946 }
947 }
948
949 tcg_ctx.tb_ctx.nb_tbs = 0;
950 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
951 page_flush_tb();
952
953 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
954 /* XXX: flush processor icache at this point if cache flush is
955 expensive */
956 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
957 tcg_ctx.tb_ctx.tb_flush_count + 1);
958
959 done:
960 tb_unlock();
961 }
962
963 void tb_flush(CPUState *cpu)
964 {
965 if (tcg_enabled()) {
966 uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
967 async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req);
968 }
969 }
970
971 #ifdef DEBUG_TB_CHECK
972
973 static void
974 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
975 {
976 TranslationBlock *tb = p;
977 target_ulong addr = *(target_ulong *)userp;
978
979 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
980 printf("ERROR invalidate: address=" TARGET_FMT_lx
981 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
982 }
983 }
984
985 /* verify that all the pages have correct rights for code
986 *
987 * Called with tb_lock held.
988 */
989 static void tb_invalidate_check(target_ulong address)
990 {
991 address &= TARGET_PAGE_MASK;
992 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
993 }
994
995 static void
996 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
997 {
998 TranslationBlock *tb = p;
999 int flags1, flags2;
1000
1001 flags1 = page_get_flags(tb->pc);
1002 flags2 = page_get_flags(tb->pc + tb->size - 1);
1003 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1004 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1005 (long)tb->pc, tb->size, flags1, flags2);
1006 }
1007 }
1008
1009 /* verify that all the pages have correct rights for code */
1010 static void tb_page_check(void)
1011 {
1012 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
1013 }
1014
1015 #endif
1016
1017 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
1018 {
1019 TranslationBlock *tb1;
1020 unsigned int n1;
1021
1022 for (;;) {
1023 tb1 = *ptb;
1024 n1 = (uintptr_t)tb1 & 3;
1025 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1026 if (tb1 == tb) {
1027 *ptb = tb1->page_next[n1];
1028 break;
1029 }
1030 ptb = &tb1->page_next[n1];
1031 }
1032 }
1033
1034 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1035 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1036 {
1037 TranslationBlock *tb1;
1038 uintptr_t *ptb, ntb;
1039 unsigned int n1;
1040
1041 ptb = &tb->jmp_list_next[n];
1042 if (*ptb) {
1043 /* find tb(n) in circular list */
1044 for (;;) {
1045 ntb = *ptb;
1046 n1 = ntb & 3;
1047 tb1 = (TranslationBlock *)(ntb & ~3);
1048 if (n1 == n && tb1 == tb) {
1049 break;
1050 }
1051 if (n1 == 2) {
1052 ptb = &tb1->jmp_list_first;
1053 } else {
1054 ptb = &tb1->jmp_list_next[n1];
1055 }
1056 }
1057 /* now we can suppress tb(n) from the list */
1058 *ptb = tb->jmp_list_next[n];
1059
1060 tb->jmp_list_next[n] = (uintptr_t)NULL;
1061 }
1062 }
1063
1064 /* reset the jump entry 'n' of a TB so that it is not chained to
1065 another TB */
1066 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1067 {
1068 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1069 tb_set_jmp_target(tb, n, addr);
1070 }
1071
1072 /* remove any jumps to the TB */
1073 static inline void tb_jmp_unlink(TranslationBlock *tb)
1074 {
1075 TranslationBlock *tb1;
1076 uintptr_t *ptb, ntb;
1077 unsigned int n1;
1078
1079 ptb = &tb->jmp_list_first;
1080 for (;;) {
1081 ntb = *ptb;
1082 n1 = ntb & 3;
1083 tb1 = (TranslationBlock *)(ntb & ~3);
1084 if (n1 == 2) {
1085 break;
1086 }
1087 tb_reset_jump(tb1, n1);
1088 *ptb = tb1->jmp_list_next[n1];
1089 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1090 }
1091 }
1092
1093 /* invalidate one TB
1094 *
1095 * Called with tb_lock held.
1096 */
1097 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1098 {
1099 CPUState *cpu;
1100 PageDesc *p;
1101 uint32_t h;
1102 tb_page_addr_t phys_pc;
1103
1104 assert_tb_lock();
1105
1106 atomic_set(&tb->invalid, true);
1107
1108 /* remove the TB from the hash list */
1109 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1110 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1111 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
1112
1113 /* remove the TB from the page list */
1114 if (tb->page_addr[0] != page_addr) {
1115 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1116 tb_page_remove(&p->first_tb, tb);
1117 invalidate_page_bitmap(p);
1118 }
1119 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1120 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1121 tb_page_remove(&p->first_tb, tb);
1122 invalidate_page_bitmap(p);
1123 }
1124
1125 /* remove the TB from the hash list */
1126 h = tb_jmp_cache_hash_func(tb->pc);
1127 CPU_FOREACH(cpu) {
1128 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1129 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1130 }
1131 }
1132
1133 /* suppress this TB from the two jump lists */
1134 tb_remove_from_jmp_list(tb, 0);
1135 tb_remove_from_jmp_list(tb, 1);
1136
1137 /* suppress any remaining jumps to this TB */
1138 tb_jmp_unlink(tb);
1139
1140 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1141 }
1142
1143 #ifdef CONFIG_SOFTMMU
1144 static void build_page_bitmap(PageDesc *p)
1145 {
1146 int n, tb_start, tb_end;
1147 TranslationBlock *tb;
1148
1149 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1150
1151 tb = p->first_tb;
1152 while (tb != NULL) {
1153 n = (uintptr_t)tb & 3;
1154 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1155 /* NOTE: this is subtle as a TB may span two physical pages */
1156 if (n == 0) {
1157 /* NOTE: tb_end may be after the end of the page, but
1158 it is not a problem */
1159 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1160 tb_end = tb_start + tb->size;
1161 if (tb_end > TARGET_PAGE_SIZE) {
1162 tb_end = TARGET_PAGE_SIZE;
1163 }
1164 } else {
1165 tb_start = 0;
1166 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1167 }
1168 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1169 tb = tb->page_next[n];
1170 }
1171 }
1172 #endif
1173
1174 /* add the tb in the target page and protect it if necessary
1175 *
1176 * Called with mmap_lock held for user-mode emulation.
1177 */
1178 static inline void tb_alloc_page(TranslationBlock *tb,
1179 unsigned int n, tb_page_addr_t page_addr)
1180 {
1181 PageDesc *p;
1182 #ifndef CONFIG_USER_ONLY
1183 bool page_already_protected;
1184 #endif
1185
1186 assert_memory_lock();
1187
1188 tb->page_addr[n] = page_addr;
1189 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1190 tb->page_next[n] = p->first_tb;
1191 #ifndef CONFIG_USER_ONLY
1192 page_already_protected = p->first_tb != NULL;
1193 #endif
1194 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1195 invalidate_page_bitmap(p);
1196
1197 #if defined(CONFIG_USER_ONLY)
1198 if (p->flags & PAGE_WRITE) {
1199 target_ulong addr;
1200 PageDesc *p2;
1201 int prot;
1202
1203 /* force the host page as non writable (writes will have a
1204 page fault + mprotect overhead) */
1205 page_addr &= qemu_host_page_mask;
1206 prot = 0;
1207 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1208 addr += TARGET_PAGE_SIZE) {
1209
1210 p2 = page_find(addr >> TARGET_PAGE_BITS);
1211 if (!p2) {
1212 continue;
1213 }
1214 prot |= p2->flags;
1215 p2->flags &= ~PAGE_WRITE;
1216 }
1217 mprotect(g2h(page_addr), qemu_host_page_size,
1218 (prot & PAGE_BITS) & ~PAGE_WRITE);
1219 #ifdef DEBUG_TB_INVALIDATE
1220 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1221 page_addr);
1222 #endif
1223 }
1224 #else
1225 /* if some code is already present, then the pages are already
1226 protected. So we handle the case where only the first TB is
1227 allocated in a physical page */
1228 if (!page_already_protected) {
1229 tlb_protect_code(page_addr);
1230 }
1231 #endif
1232 }
1233
1234 /* add a new TB and link it to the physical page tables. phys_page2 is
1235 * (-1) to indicate that only one page contains the TB.
1236 *
1237 * Called with mmap_lock held for user-mode emulation.
1238 */
1239 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1240 tb_page_addr_t phys_page2)
1241 {
1242 uint32_t h;
1243
1244 assert_memory_lock();
1245
1246 /* add in the page list */
1247 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1248 if (phys_page2 != -1) {
1249 tb_alloc_page(tb, 1, phys_page2);
1250 } else {
1251 tb->page_addr[1] = -1;
1252 }
1253
1254 /* add in the hash table */
1255 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1256 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1257
1258 #ifdef DEBUG_TB_CHECK
1259 tb_page_check();
1260 #endif
1261 }
1262
1263 /* Called with mmap_lock held for user mode emulation. */
1264 TranslationBlock *tb_gen_code(CPUState *cpu,
1265 target_ulong pc, target_ulong cs_base,
1266 uint32_t flags, int cflags)
1267 {
1268 CPUArchState *env = cpu->env_ptr;
1269 TranslationBlock *tb;
1270 tb_page_addr_t phys_pc, phys_page2;
1271 target_ulong virt_page2;
1272 tcg_insn_unit *gen_code_buf;
1273 int gen_code_size, search_size;
1274 #ifdef CONFIG_PROFILER
1275 int64_t ti;
1276 #endif
1277 assert_memory_lock();
1278
1279 phys_pc = get_page_addr_code(env, pc);
1280 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1281 cflags |= CF_USE_ICOUNT;
1282 }
1283
1284 tb = tb_alloc(pc);
1285 if (unlikely(!tb)) {
1286 buffer_overflow:
1287 /* flush must be done */
1288 tb_flush(cpu);
1289 mmap_unlock();
1290 cpu_loop_exit(cpu);
1291 }
1292
1293 gen_code_buf = tcg_ctx.code_gen_ptr;
1294 tb->tc_ptr = gen_code_buf;
1295 tb->cs_base = cs_base;
1296 tb->flags = flags;
1297 tb->cflags = cflags;
1298
1299 #ifdef CONFIG_PROFILER
1300 tcg_ctx.tb_count1++; /* includes aborted translations because of
1301 exceptions */
1302 ti = profile_getclock();
1303 #endif
1304
1305 tcg_func_start(&tcg_ctx);
1306
1307 tcg_ctx.cpu = ENV_GET_CPU(env);
1308 gen_intermediate_code(env, tb);
1309 tcg_ctx.cpu = NULL;
1310
1311 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1312
1313 /* generate machine code */
1314 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1315 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1316 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1317 #ifdef USE_DIRECT_JUMP
1318 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1319 tcg_ctx.tb_jmp_target_addr = NULL;
1320 #else
1321 tcg_ctx.tb_jmp_insn_offset = NULL;
1322 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1323 #endif
1324
1325 #ifdef CONFIG_PROFILER
1326 tcg_ctx.tb_count++;
1327 tcg_ctx.interm_time += profile_getclock() - ti;
1328 tcg_ctx.code_time -= profile_getclock();
1329 #endif
1330
1331 /* ??? Overflow could be handled better here. In particular, we
1332 don't need to re-do gen_intermediate_code, nor should we re-do
1333 the tcg optimization currently hidden inside tcg_gen_code. All
1334 that should be required is to flush the TBs, allocate a new TB,
1335 re-initialize it per above, and re-do the actual code generation. */
1336 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1337 if (unlikely(gen_code_size < 0)) {
1338 goto buffer_overflow;
1339 }
1340 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1341 if (unlikely(search_size < 0)) {
1342 goto buffer_overflow;
1343 }
1344
1345 #ifdef CONFIG_PROFILER
1346 tcg_ctx.code_time += profile_getclock();
1347 tcg_ctx.code_in_len += tb->size;
1348 tcg_ctx.code_out_len += gen_code_size;
1349 tcg_ctx.search_out_len += search_size;
1350 #endif
1351
1352 #ifdef DEBUG_DISAS
1353 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1354 qemu_log_in_addr_range(tb->pc)) {
1355 qemu_log("OUT: [size=%d]\n", gen_code_size);
1356 log_disas(tb->tc_ptr, gen_code_size);
1357 qemu_log("\n");
1358 qemu_log_flush();
1359 }
1360 #endif
1361
1362 tcg_ctx.code_gen_ptr = (void *)
1363 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1364 CODE_GEN_ALIGN);
1365
1366 /* init jump list */
1367 assert(((uintptr_t)tb & 3) == 0);
1368 tb->jmp_list_first = (uintptr_t)tb | 2;
1369 tb->jmp_list_next[0] = (uintptr_t)NULL;
1370 tb->jmp_list_next[1] = (uintptr_t)NULL;
1371
1372 /* init original jump addresses wich has been set during tcg_gen_code() */
1373 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1374 tb_reset_jump(tb, 0);
1375 }
1376 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1377 tb_reset_jump(tb, 1);
1378 }
1379
1380 /* check next page if needed */
1381 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1382 phys_page2 = -1;
1383 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1384 phys_page2 = get_page_addr_code(env, virt_page2);
1385 }
1386 /* As long as consistency of the TB stuff is provided by tb_lock in user
1387 * mode and is implicit in single-threaded softmmu emulation, no explicit
1388 * memory barrier is required before tb_link_page() makes the TB visible
1389 * through the physical hash table and physical page list.
1390 */
1391 tb_link_page(tb, phys_pc, phys_page2);
1392 return tb;
1393 }
1394
1395 /*
1396 * Invalidate all TBs which intersect with the target physical address range
1397 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1398 * 'is_cpu_write_access' should be true if called from a real cpu write
1399 * access: the virtual CPU will exit the current TB if code is modified inside
1400 * this TB.
1401 *
1402 * Called with mmap_lock held for user-mode emulation
1403 */
1404 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1405 {
1406 assert_memory_lock();
1407
1408 while (start < end) {
1409 tb_invalidate_phys_page_range(start, end, 0);
1410 start &= TARGET_PAGE_MASK;
1411 start += TARGET_PAGE_SIZE;
1412 }
1413 }
1414
1415 /*
1416 * Invalidate all TBs which intersect with the target physical address range
1417 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1418 * 'is_cpu_write_access' should be true if called from a real cpu write
1419 * access: the virtual CPU will exit the current TB if code is modified inside
1420 * this TB.
1421 *
1422 * Called with mmap_lock held for user-mode emulation
1423 */
1424 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1425 int is_cpu_write_access)
1426 {
1427 TranslationBlock *tb, *tb_next;
1428 #if defined(TARGET_HAS_PRECISE_SMC)
1429 CPUState *cpu = current_cpu;
1430 CPUArchState *env = NULL;
1431 #endif
1432 tb_page_addr_t tb_start, tb_end;
1433 PageDesc *p;
1434 int n;
1435 #ifdef TARGET_HAS_PRECISE_SMC
1436 int current_tb_not_found = is_cpu_write_access;
1437 TranslationBlock *current_tb = NULL;
1438 int current_tb_modified = 0;
1439 target_ulong current_pc = 0;
1440 target_ulong current_cs_base = 0;
1441 uint32_t current_flags = 0;
1442 #endif /* TARGET_HAS_PRECISE_SMC */
1443
1444 assert_memory_lock();
1445
1446 p = page_find(start >> TARGET_PAGE_BITS);
1447 if (!p) {
1448 return;
1449 }
1450 #if defined(TARGET_HAS_PRECISE_SMC)
1451 if (cpu != NULL) {
1452 env = cpu->env_ptr;
1453 }
1454 #endif
1455
1456 /* we remove all the TBs in the range [start, end[ */
1457 /* XXX: see if in some cases it could be faster to invalidate all
1458 the code */
1459 tb = p->first_tb;
1460 while (tb != NULL) {
1461 n = (uintptr_t)tb & 3;
1462 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1463 tb_next = tb->page_next[n];
1464 /* NOTE: this is subtle as a TB may span two physical pages */
1465 if (n == 0) {
1466 /* NOTE: tb_end may be after the end of the page, but
1467 it is not a problem */
1468 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1469 tb_end = tb_start + tb->size;
1470 } else {
1471 tb_start = tb->page_addr[1];
1472 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1473 }
1474 if (!(tb_end <= start || tb_start >= end)) {
1475 #ifdef TARGET_HAS_PRECISE_SMC
1476 if (current_tb_not_found) {
1477 current_tb_not_found = 0;
1478 current_tb = NULL;
1479 if (cpu->mem_io_pc) {
1480 /* now we have a real cpu fault */
1481 current_tb = tb_find_pc(cpu->mem_io_pc);
1482 }
1483 }
1484 if (current_tb == tb &&
1485 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1486 /* If we are modifying the current TB, we must stop
1487 its execution. We could be more precise by checking
1488 that the modification is after the current PC, but it
1489 would require a specialized function to partially
1490 restore the CPU state */
1491
1492 current_tb_modified = 1;
1493 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1494 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1495 &current_flags);
1496 }
1497 #endif /* TARGET_HAS_PRECISE_SMC */
1498 tb_phys_invalidate(tb, -1);
1499 }
1500 tb = tb_next;
1501 }
1502 #if !defined(CONFIG_USER_ONLY)
1503 /* if no code remaining, no need to continue to use slow writes */
1504 if (!p->first_tb) {
1505 invalidate_page_bitmap(p);
1506 tlb_unprotect_code(start);
1507 }
1508 #endif
1509 #ifdef TARGET_HAS_PRECISE_SMC
1510 if (current_tb_modified) {
1511 /* we generate a block containing just the instruction
1512 modifying the memory. It will ensure that it cannot modify
1513 itself */
1514 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1515 cpu_loop_exit_noexc(cpu);
1516 }
1517 #endif
1518 }
1519
1520 #ifdef CONFIG_SOFTMMU
1521 /* len must be <= 8 and start must be a multiple of len */
1522 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1523 {
1524 PageDesc *p;
1525
1526 #if 0
1527 if (1) {
1528 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1529 cpu_single_env->mem_io_vaddr, len,
1530 cpu_single_env->eip,
1531 cpu_single_env->eip +
1532 (intptr_t)cpu_single_env->segs[R_CS].base);
1533 }
1534 #endif
1535 p = page_find(start >> TARGET_PAGE_BITS);
1536 if (!p) {
1537 return;
1538 }
1539 if (!p->code_bitmap &&
1540 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1541 /* build code bitmap. FIXME: writes should be protected by
1542 * tb_lock, reads by tb_lock or RCU.
1543 */
1544 build_page_bitmap(p);
1545 }
1546 if (p->code_bitmap) {
1547 unsigned int nr;
1548 unsigned long b;
1549
1550 nr = start & ~TARGET_PAGE_MASK;
1551 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1552 if (b & ((1 << len) - 1)) {
1553 goto do_invalidate;
1554 }
1555 } else {
1556 do_invalidate:
1557 tb_invalidate_phys_page_range(start, start + len, 1);
1558 }
1559 }
1560 #else
1561 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1562 * host PC of the faulting store instruction that caused this invalidate.
1563 * Returns true if the caller needs to abort execution of the current
1564 * TB (because it was modified by this store and the guest CPU has
1565 * precise-SMC semantics).
1566 */
1567 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1568 {
1569 TranslationBlock *tb;
1570 PageDesc *p;
1571 int n;
1572 #ifdef TARGET_HAS_PRECISE_SMC
1573 TranslationBlock *current_tb = NULL;
1574 CPUState *cpu = current_cpu;
1575 CPUArchState *env = NULL;
1576 int current_tb_modified = 0;
1577 target_ulong current_pc = 0;
1578 target_ulong current_cs_base = 0;
1579 uint32_t current_flags = 0;
1580 #endif
1581
1582 addr &= TARGET_PAGE_MASK;
1583 p = page_find(addr >> TARGET_PAGE_BITS);
1584 if (!p) {
1585 return false;
1586 }
1587 tb = p->first_tb;
1588 #ifdef TARGET_HAS_PRECISE_SMC
1589 if (tb && pc != 0) {
1590 current_tb = tb_find_pc(pc);
1591 }
1592 if (cpu != NULL) {
1593 env = cpu->env_ptr;
1594 }
1595 #endif
1596 while (tb != NULL) {
1597 n = (uintptr_t)tb & 3;
1598 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1599 #ifdef TARGET_HAS_PRECISE_SMC
1600 if (current_tb == tb &&
1601 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1602 /* If we are modifying the current TB, we must stop
1603 its execution. We could be more precise by checking
1604 that the modification is after the current PC, but it
1605 would require a specialized function to partially
1606 restore the CPU state */
1607
1608 current_tb_modified = 1;
1609 cpu_restore_state_from_tb(cpu, current_tb, pc);
1610 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1611 &current_flags);
1612 }
1613 #endif /* TARGET_HAS_PRECISE_SMC */
1614 tb_phys_invalidate(tb, addr);
1615 tb = tb->page_next[n];
1616 }
1617 p->first_tb = NULL;
1618 #ifdef TARGET_HAS_PRECISE_SMC
1619 if (current_tb_modified) {
1620 /* we generate a block containing just the instruction
1621 modifying the memory. It will ensure that it cannot modify
1622 itself */
1623 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1624 return true;
1625 }
1626 #endif
1627 return false;
1628 }
1629 #endif
1630
1631 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1632 tb[1].tc_ptr. Return NULL if not found */
1633 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1634 {
1635 int m_min, m_max, m;
1636 uintptr_t v;
1637 TranslationBlock *tb;
1638
1639 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1640 return NULL;
1641 }
1642 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1643 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1644 return NULL;
1645 }
1646 /* binary search (cf Knuth) */
1647 m_min = 0;
1648 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1649 while (m_min <= m_max) {
1650 m = (m_min + m_max) >> 1;
1651 tb = &tcg_ctx.tb_ctx.tbs[m];
1652 v = (uintptr_t)tb->tc_ptr;
1653 if (v == tc_ptr) {
1654 return tb;
1655 } else if (tc_ptr < v) {
1656 m_max = m - 1;
1657 } else {
1658 m_min = m + 1;
1659 }
1660 }
1661 return &tcg_ctx.tb_ctx.tbs[m_max];
1662 }
1663
1664 #if !defined(CONFIG_USER_ONLY)
1665 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1666 {
1667 ram_addr_t ram_addr;
1668 MemoryRegion *mr;
1669 hwaddr l = 1;
1670
1671 rcu_read_lock();
1672 mr = address_space_translate(as, addr, &addr, &l, false);
1673 if (!(memory_region_is_ram(mr)
1674 || memory_region_is_romd(mr))) {
1675 rcu_read_unlock();
1676 return;
1677 }
1678 ram_addr = memory_region_get_ram_addr(mr) + addr;
1679 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1680 rcu_read_unlock();
1681 }
1682 #endif /* !defined(CONFIG_USER_ONLY) */
1683
1684 /* Called with tb_lock held. */
1685 void tb_check_watchpoint(CPUState *cpu)
1686 {
1687 TranslationBlock *tb;
1688
1689 tb = tb_find_pc(cpu->mem_io_pc);
1690 if (tb) {
1691 /* We can use retranslation to find the PC. */
1692 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1693 tb_phys_invalidate(tb, -1);
1694 } else {
1695 /* The exception probably happened in a helper. The CPU state should
1696 have been saved before calling it. Fetch the PC from there. */
1697 CPUArchState *env = cpu->env_ptr;
1698 target_ulong pc, cs_base;
1699 tb_page_addr_t addr;
1700 uint32_t flags;
1701
1702 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1703 addr = get_page_addr_code(env, pc);
1704 tb_invalidate_phys_range(addr, addr + 1);
1705 }
1706 }
1707
1708 #ifndef CONFIG_USER_ONLY
1709 /* in deterministic execution mode, instructions doing device I/Os
1710 must be at the end of the TB */
1711 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1712 {
1713 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1714 CPUArchState *env = cpu->env_ptr;
1715 #endif
1716 TranslationBlock *tb;
1717 uint32_t n, cflags;
1718 target_ulong pc, cs_base;
1719 uint32_t flags;
1720
1721 tb = tb_find_pc(retaddr);
1722 if (!tb) {
1723 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1724 (void *)retaddr);
1725 }
1726 n = cpu->icount_decr.u16.low + tb->icount;
1727 cpu_restore_state_from_tb(cpu, tb, retaddr);
1728 /* Calculate how many instructions had been executed before the fault
1729 occurred. */
1730 n = n - cpu->icount_decr.u16.low;
1731 /* Generate a new TB ending on the I/O insn. */
1732 n++;
1733 /* On MIPS and SH, delay slot instructions can only be restarted if
1734 they were already the first instruction in the TB. If this is not
1735 the first instruction in a TB then re-execute the preceding
1736 branch. */
1737 #if defined(TARGET_MIPS)
1738 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1739 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1740 cpu->icount_decr.u16.low++;
1741 env->hflags &= ~MIPS_HFLAG_BMASK;
1742 }
1743 #elif defined(TARGET_SH4)
1744 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1745 && n > 1) {
1746 env->pc -= 2;
1747 cpu->icount_decr.u16.low++;
1748 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1749 }
1750 #endif
1751 /* This should never happen. */
1752 if (n > CF_COUNT_MASK) {
1753 cpu_abort(cpu, "TB too big during recompile");
1754 }
1755
1756 cflags = n | CF_LAST_IO;
1757 pc = tb->pc;
1758 cs_base = tb->cs_base;
1759 flags = tb->flags;
1760 tb_phys_invalidate(tb, -1);
1761 if (tb->cflags & CF_NOCACHE) {
1762 if (tb->orig_tb) {
1763 /* Invalidate original TB if this TB was generated in
1764 * cpu_exec_nocache() */
1765 tb_phys_invalidate(tb->orig_tb, -1);
1766 }
1767 tb_free(tb);
1768 }
1769 /* FIXME: In theory this could raise an exception. In practice
1770 we have already translated the block once so it's probably ok. */
1771 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1772 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1773 the first in the TB) then we end up generating a whole new TB and
1774 repeating the fault, which is horribly inefficient.
1775 Better would be to execute just this insn uncached, or generate a
1776 second new TB. */
1777 cpu_loop_exit_noexc(cpu);
1778 }
1779
1780 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1781 {
1782 unsigned int i;
1783
1784 /* Discard jump cache entries for any tb which might potentially
1785 overlap the flushed page. */
1786 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1787 memset(&cpu->tb_jmp_cache[i], 0,
1788 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1789
1790 i = tb_jmp_cache_hash_page(addr);
1791 memset(&cpu->tb_jmp_cache[i], 0,
1792 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1793 }
1794
1795 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1796 struct qht_stats hst)
1797 {
1798 uint32_t hgram_opts;
1799 size_t hgram_bins;
1800 char *hgram;
1801
1802 if (!hst.head_buckets) {
1803 return;
1804 }
1805 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1806 hst.used_head_buckets, hst.head_buckets,
1807 (double)hst.used_head_buckets / hst.head_buckets * 100);
1808
1809 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1810 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1811 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1812 hgram_opts |= QDIST_PR_NODECIMAL;
1813 }
1814 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1815 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1816 qdist_avg(&hst.occupancy) * 100, hgram);
1817 g_free(hgram);
1818
1819 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1820 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1821 if (hgram_bins > 10) {
1822 hgram_bins = 10;
1823 } else {
1824 hgram_bins = 0;
1825 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1826 }
1827 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1828 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1829 qdist_avg(&hst.chain), hgram);
1830 g_free(hgram);
1831 }
1832
1833 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1834 {
1835 int i, target_code_size, max_target_code_size;
1836 int direct_jmp_count, direct_jmp2_count, cross_page;
1837 TranslationBlock *tb;
1838 struct qht_stats hst;
1839
1840 target_code_size = 0;
1841 max_target_code_size = 0;
1842 cross_page = 0;
1843 direct_jmp_count = 0;
1844 direct_jmp2_count = 0;
1845 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1846 tb = &tcg_ctx.tb_ctx.tbs[i];
1847 target_code_size += tb->size;
1848 if (tb->size > max_target_code_size) {
1849 max_target_code_size = tb->size;
1850 }
1851 if (tb->page_addr[1] != -1) {
1852 cross_page++;
1853 }
1854 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1855 direct_jmp_count++;
1856 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1857 direct_jmp2_count++;
1858 }
1859 }
1860 }
1861 /* XXX: avoid using doubles ? */
1862 cpu_fprintf(f, "Translation buffer state:\n");
1863 cpu_fprintf(f, "gen code size %td/%zd\n",
1864 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1865 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1866 cpu_fprintf(f, "TB count %d/%d\n",
1867 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1868 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1869 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1870 tcg_ctx.tb_ctx.nb_tbs : 0,
1871 max_target_code_size);
1872 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1873 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1874 tcg_ctx.code_gen_buffer) /
1875 tcg_ctx.tb_ctx.nb_tbs : 0,
1876 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1877 tcg_ctx.code_gen_buffer) /
1878 target_code_size : 0);
1879 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1880 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1881 tcg_ctx.tb_ctx.nb_tbs : 0);
1882 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1883 direct_jmp_count,
1884 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1885 tcg_ctx.tb_ctx.nb_tbs : 0,
1886 direct_jmp2_count,
1887 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1888 tcg_ctx.tb_ctx.nb_tbs : 0);
1889
1890 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
1891 print_qht_statistics(f, cpu_fprintf, hst);
1892 qht_statistics_destroy(&hst);
1893
1894 cpu_fprintf(f, "\nStatistics:\n");
1895 cpu_fprintf(f, "TB flush count %u\n",
1896 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
1897 cpu_fprintf(f, "TB invalidate count %d\n",
1898 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1899 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1900 tcg_dump_info(f, cpu_fprintf);
1901 }
1902
1903 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1904 {
1905 tcg_dump_op_count(f, cpu_fprintf);
1906 }
1907
1908 #else /* CONFIG_USER_ONLY */
1909
1910 void cpu_interrupt(CPUState *cpu, int mask)
1911 {
1912 cpu->interrupt_request |= mask;
1913 cpu->tcg_exit_req = 1;
1914 }
1915
1916 /*
1917 * Walks guest process memory "regions" one by one
1918 * and calls callback function 'fn' for each region.
1919 */
1920 struct walk_memory_regions_data {
1921 walk_memory_regions_fn fn;
1922 void *priv;
1923 target_ulong start;
1924 int prot;
1925 };
1926
1927 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1928 target_ulong end, int new_prot)
1929 {
1930 if (data->start != -1u) {
1931 int rc = data->fn(data->priv, data->start, end, data->prot);
1932 if (rc != 0) {
1933 return rc;
1934 }
1935 }
1936
1937 data->start = (new_prot ? end : -1u);
1938 data->prot = new_prot;
1939
1940 return 0;
1941 }
1942
1943 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1944 target_ulong base, int level, void **lp)
1945 {
1946 target_ulong pa;
1947 int i, rc;
1948
1949 if (*lp == NULL) {
1950 return walk_memory_regions_end(data, base, 0);
1951 }
1952
1953 if (level == 0) {
1954 PageDesc *pd = *lp;
1955
1956 for (i = 0; i < V_L2_SIZE; ++i) {
1957 int prot = pd[i].flags;
1958
1959 pa = base | (i << TARGET_PAGE_BITS);
1960 if (prot != data->prot) {
1961 rc = walk_memory_regions_end(data, pa, prot);
1962 if (rc != 0) {
1963 return rc;
1964 }
1965 }
1966 }
1967 } else {
1968 void **pp = *lp;
1969
1970 for (i = 0; i < V_L2_SIZE; ++i) {
1971 pa = base | ((target_ulong)i <<
1972 (TARGET_PAGE_BITS + V_L2_BITS * level));
1973 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1974 if (rc != 0) {
1975 return rc;
1976 }
1977 }
1978 }
1979
1980 return 0;
1981 }
1982
1983 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1984 {
1985 struct walk_memory_regions_data data;
1986 uintptr_t i, l1_sz = v_l1_size;
1987
1988 data.fn = fn;
1989 data.priv = priv;
1990 data.start = -1u;
1991 data.prot = 0;
1992
1993 for (i = 0; i < l1_sz; i++) {
1994 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
1995 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
1996 if (rc != 0) {
1997 return rc;
1998 }
1999 }
2000
2001 return walk_memory_regions_end(&data, 0, 0);
2002 }
2003
2004 static int dump_region(void *priv, target_ulong start,
2005 target_ulong end, unsigned long prot)
2006 {
2007 FILE *f = (FILE *)priv;
2008
2009 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2010 " "TARGET_FMT_lx" %c%c%c\n",
2011 start, end, end - start,
2012 ((prot & PAGE_READ) ? 'r' : '-'),
2013 ((prot & PAGE_WRITE) ? 'w' : '-'),
2014 ((prot & PAGE_EXEC) ? 'x' : '-'));
2015
2016 return 0;
2017 }
2018
2019 /* dump memory mappings */
2020 void page_dump(FILE *f)
2021 {
2022 const int length = sizeof(target_ulong) * 2;
2023 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2024 length, "start", length, "end", length, "size", "prot");
2025 walk_memory_regions(f, dump_region);
2026 }
2027
2028 int page_get_flags(target_ulong address)
2029 {
2030 PageDesc *p;
2031
2032 p = page_find(address >> TARGET_PAGE_BITS);
2033 if (!p) {
2034 return 0;
2035 }
2036 return p->flags;
2037 }
2038
2039 /* Modify the flags of a page and invalidate the code if necessary.
2040 The flag PAGE_WRITE_ORG is positioned automatically depending
2041 on PAGE_WRITE. The mmap_lock should already be held. */
2042 void page_set_flags(target_ulong start, target_ulong end, int flags)
2043 {
2044 target_ulong addr, len;
2045
2046 /* This function should never be called with addresses outside the
2047 guest address space. If this assert fires, it probably indicates
2048 a missing call to h2g_valid. */
2049 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2050 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2051 #endif
2052 assert(start < end);
2053 assert_memory_lock();
2054
2055 start = start & TARGET_PAGE_MASK;
2056 end = TARGET_PAGE_ALIGN(end);
2057
2058 if (flags & PAGE_WRITE) {
2059 flags |= PAGE_WRITE_ORG;
2060 }
2061
2062 for (addr = start, len = end - start;
2063 len != 0;
2064 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2065 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2066
2067 /* If the write protection bit is set, then we invalidate
2068 the code inside. */
2069 if (!(p->flags & PAGE_WRITE) &&
2070 (flags & PAGE_WRITE) &&
2071 p->first_tb) {
2072 tb_invalidate_phys_page(addr, 0);
2073 }
2074 p->flags = flags;
2075 }
2076 }
2077
2078 int page_check_range(target_ulong start, target_ulong len, int flags)
2079 {
2080 PageDesc *p;
2081 target_ulong end;
2082 target_ulong addr;
2083
2084 /* This function should never be called with addresses outside the
2085 guest address space. If this assert fires, it probably indicates
2086 a missing call to h2g_valid. */
2087 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2088 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2089 #endif
2090
2091 if (len == 0) {
2092 return 0;
2093 }
2094 if (start + len - 1 < start) {
2095 /* We've wrapped around. */
2096 return -1;
2097 }
2098
2099 /* must do before we loose bits in the next step */
2100 end = TARGET_PAGE_ALIGN(start + len);
2101 start = start & TARGET_PAGE_MASK;
2102
2103 for (addr = start, len = end - start;
2104 len != 0;
2105 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2106 p = page_find(addr >> TARGET_PAGE_BITS);
2107 if (!p) {
2108 return -1;
2109 }
2110 if (!(p->flags & PAGE_VALID)) {
2111 return -1;
2112 }
2113
2114 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2115 return -1;
2116 }
2117 if (flags & PAGE_WRITE) {
2118 if (!(p->flags & PAGE_WRITE_ORG)) {
2119 return -1;
2120 }
2121 /* unprotect the page if it was put read-only because it
2122 contains translated code */
2123 if (!(p->flags & PAGE_WRITE)) {
2124 if (!page_unprotect(addr, 0)) {
2125 return -1;
2126 }
2127 }
2128 }
2129 }
2130 return 0;
2131 }
2132
2133 /* called from signal handler: invalidate the code and unprotect the
2134 * page. Return 0 if the fault was not handled, 1 if it was handled,
2135 * and 2 if it was handled but the caller must cause the TB to be
2136 * immediately exited. (We can only return 2 if the 'pc' argument is
2137 * non-zero.)
2138 */
2139 int page_unprotect(target_ulong address, uintptr_t pc)
2140 {
2141 unsigned int prot;
2142 bool current_tb_invalidated;
2143 PageDesc *p;
2144 target_ulong host_start, host_end, addr;
2145
2146 /* Technically this isn't safe inside a signal handler. However we
2147 know this only ever happens in a synchronous SEGV handler, so in
2148 practice it seems to be ok. */
2149 mmap_lock();
2150
2151 p = page_find(address >> TARGET_PAGE_BITS);
2152 if (!p) {
2153 mmap_unlock();
2154 return 0;
2155 }
2156
2157 /* if the page was really writable, then we change its
2158 protection back to writable */
2159 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2160 host_start = address & qemu_host_page_mask;
2161 host_end = host_start + qemu_host_page_size;
2162
2163 prot = 0;
2164 current_tb_invalidated = false;
2165 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2166 p = page_find(addr >> TARGET_PAGE_BITS);
2167 p->flags |= PAGE_WRITE;
2168 prot |= p->flags;
2169
2170 /* and since the content will be modified, we must invalidate
2171 the corresponding translated code. */
2172 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2173 #ifdef DEBUG_TB_CHECK
2174 tb_invalidate_check(addr);
2175 #endif
2176 }
2177 mprotect((void *)g2h(host_start), qemu_host_page_size,
2178 prot & PAGE_BITS);
2179
2180 mmap_unlock();
2181 /* If current TB was invalidated return to main loop */
2182 return current_tb_invalidated ? 2 : 1;
2183 }
2184 mmap_unlock();
2185 return 0;
2186 }
2187 #endif /* CONFIG_USER_ONLY */