]> git.proxmox.com Git - mirror_qemu.git/blob - translate-all.c
tcg: Prepare safe tb_jmp_cache lookup out of tb_lock
[mirror_qemu.git] / translate-all.c
1 /*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifdef _WIN32
20 #include <windows.h>
21 #endif
22 #include "qemu/osdep.h"
23
24
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
27 #include "cpu.h"
28 #include "trace.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
31 #include "tcg.h"
32 #if defined(CONFIG_USER_ONLY)
33 #include "qemu.h"
34 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35 #include <sys/param.h>
36 #if __FreeBSD_version >= 700104
37 #define HAVE_KINFO_GETVMMAP
38 #define sigqueue sigqueue_freebsd /* avoid redefinition */
39 #include <sys/proc.h>
40 #include <machine/profile.h>
41 #define _KERNEL
42 #include <sys/user.h>
43 #undef _KERNEL
44 #undef sigqueue
45 #include <libutil.h>
46 #endif
47 #endif
48 #else
49 #include "exec/address-spaces.h"
50 #endif
51
52 #include "exec/cputlb.h"
53 #include "exec/tb-hash.h"
54 #include "translate-all.h"
55 #include "qemu/bitmap.h"
56 #include "qemu/timer.h"
57 #include "exec/log.h"
58
59 //#define DEBUG_TB_INVALIDATE
60 //#define DEBUG_FLUSH
61 /* make various TB consistency checks */
62 //#define DEBUG_TB_CHECK
63
64 #if !defined(CONFIG_USER_ONLY)
65 /* TB consistency checks only implemented for usermode emulation. */
66 #undef DEBUG_TB_CHECK
67 #endif
68
69 #define SMC_BITMAP_USE_THRESHOLD 10
70
71 typedef struct PageDesc {
72 /* list of TBs intersecting this ram page */
73 TranslationBlock *first_tb;
74 #ifdef CONFIG_SOFTMMU
75 /* in order to optimize self modifying code, we count the number
76 of lookups we do to a given page to use a bitmap */
77 unsigned int code_write_count;
78 unsigned long *code_bitmap;
79 #else
80 unsigned long flags;
81 #endif
82 } PageDesc;
83
84 /* In system mode we want L1_MAP to be based on ram offsets,
85 while in user mode we want it to be based on virtual addresses. */
86 #if !defined(CONFIG_USER_ONLY)
87 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
88 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
89 #else
90 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
91 #endif
92 #else
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
94 #endif
95
96 /* Size of the L2 (and L3, etc) page tables. */
97 #define V_L2_BITS 10
98 #define V_L2_SIZE (1 << V_L2_BITS)
99
100 /* The bits remaining after N lower levels of page tables. */
101 #define V_L1_BITS_REM \
102 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
103
104 #if V_L1_BITS_REM < 4
105 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
106 #else
107 #define V_L1_BITS V_L1_BITS_REM
108 #endif
109
110 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
111
112 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
113
114 uintptr_t qemu_host_page_size;
115 intptr_t qemu_host_page_mask;
116
117 /* The bottom level has pointers to PageDesc */
118 static void *l1_map[V_L1_SIZE];
119
120 /* code generation context */
121 TCGContext tcg_ctx;
122
123 /* translation block context */
124 #ifdef CONFIG_USER_ONLY
125 __thread int have_tb_lock;
126 #endif
127
128 void tb_lock(void)
129 {
130 #ifdef CONFIG_USER_ONLY
131 assert(!have_tb_lock);
132 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
133 have_tb_lock++;
134 #endif
135 }
136
137 void tb_unlock(void)
138 {
139 #ifdef CONFIG_USER_ONLY
140 assert(have_tb_lock);
141 have_tb_lock--;
142 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
143 #endif
144 }
145
146 void tb_lock_reset(void)
147 {
148 #ifdef CONFIG_USER_ONLY
149 if (have_tb_lock) {
150 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
151 have_tb_lock = 0;
152 }
153 #endif
154 }
155
156 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
157
158 void cpu_gen_init(void)
159 {
160 tcg_context_init(&tcg_ctx);
161 }
162
163 /* Encode VAL as a signed leb128 sequence at P.
164 Return P incremented past the encoded value. */
165 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
166 {
167 int more, byte;
168
169 do {
170 byte = val & 0x7f;
171 val >>= 7;
172 more = !((val == 0 && (byte & 0x40) == 0)
173 || (val == -1 && (byte & 0x40) != 0));
174 if (more) {
175 byte |= 0x80;
176 }
177 *p++ = byte;
178 } while (more);
179
180 return p;
181 }
182
183 /* Decode a signed leb128 sequence at *PP; increment *PP past the
184 decoded value. Return the decoded value. */
185 static target_long decode_sleb128(uint8_t **pp)
186 {
187 uint8_t *p = *pp;
188 target_long val = 0;
189 int byte, shift = 0;
190
191 do {
192 byte = *p++;
193 val |= (target_ulong)(byte & 0x7f) << shift;
194 shift += 7;
195 } while (byte & 0x80);
196 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
197 val |= -(target_ulong)1 << shift;
198 }
199
200 *pp = p;
201 return val;
202 }
203
204 /* Encode the data collected about the instructions while compiling TB.
205 Place the data at BLOCK, and return the number of bytes consumed.
206
207 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
208 which come from the target's insn_start data, followed by a uintptr_t
209 which comes from the host pc of the end of the code implementing the insn.
210
211 Each line of the table is encoded as sleb128 deltas from the previous
212 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
213 That is, the first column is seeded with the guest pc, the last column
214 with the host pc, and the middle columns with zeros. */
215
216 static int encode_search(TranslationBlock *tb, uint8_t *block)
217 {
218 uint8_t *highwater = tcg_ctx.code_gen_highwater;
219 uint8_t *p = block;
220 int i, j, n;
221
222 tb->tc_search = block;
223
224 for (i = 0, n = tb->icount; i < n; ++i) {
225 target_ulong prev;
226
227 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
228 if (i == 0) {
229 prev = (j == 0 ? tb->pc : 0);
230 } else {
231 prev = tcg_ctx.gen_insn_data[i - 1][j];
232 }
233 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
234 }
235 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
236 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
237
238 /* Test for (pending) buffer overflow. The assumption is that any
239 one row beginning below the high water mark cannot overrun
240 the buffer completely. Thus we can test for overflow after
241 encoding a row without having to check during encoding. */
242 if (unlikely(p > highwater)) {
243 return -1;
244 }
245 }
246
247 return p - block;
248 }
249
250 /* The cpu state corresponding to 'searched_pc' is restored. */
251 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
252 uintptr_t searched_pc)
253 {
254 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
255 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
256 CPUArchState *env = cpu->env_ptr;
257 uint8_t *p = tb->tc_search;
258 int i, j, num_insns = tb->icount;
259 #ifdef CONFIG_PROFILER
260 int64_t ti = profile_getclock();
261 #endif
262
263 if (searched_pc < host_pc) {
264 return -1;
265 }
266
267 /* Reconstruct the stored insn data while looking for the point at
268 which the end of the insn exceeds the searched_pc. */
269 for (i = 0; i < num_insns; ++i) {
270 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
271 data[j] += decode_sleb128(&p);
272 }
273 host_pc += decode_sleb128(&p);
274 if (host_pc > searched_pc) {
275 goto found;
276 }
277 }
278 return -1;
279
280 found:
281 if (tb->cflags & CF_USE_ICOUNT) {
282 assert(use_icount);
283 /* Reset the cycle counter to the start of the block. */
284 cpu->icount_decr.u16.low += num_insns;
285 /* Clear the IO flag. */
286 cpu->can_do_io = 0;
287 }
288 cpu->icount_decr.u16.low -= i;
289 restore_state_to_opc(env, tb, data);
290
291 #ifdef CONFIG_PROFILER
292 tcg_ctx.restore_time += profile_getclock() - ti;
293 tcg_ctx.restore_count++;
294 #endif
295 return 0;
296 }
297
298 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
299 {
300 TranslationBlock *tb;
301
302 tb = tb_find_pc(retaddr);
303 if (tb) {
304 cpu_restore_state_from_tb(cpu, tb, retaddr);
305 if (tb->cflags & CF_NOCACHE) {
306 /* one-shot translation, invalidate it immediately */
307 tb_phys_invalidate(tb, -1);
308 tb_free(tb);
309 }
310 return true;
311 }
312 return false;
313 }
314
315 void page_size_init(void)
316 {
317 /* NOTE: we can always suppose that qemu_host_page_size >=
318 TARGET_PAGE_SIZE */
319 qemu_real_host_page_size = getpagesize();
320 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
321 if (qemu_host_page_size == 0) {
322 qemu_host_page_size = qemu_real_host_page_size;
323 }
324 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
325 qemu_host_page_size = TARGET_PAGE_SIZE;
326 }
327 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
328 }
329
330 static void page_init(void)
331 {
332 page_size_init();
333 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
334 {
335 #ifdef HAVE_KINFO_GETVMMAP
336 struct kinfo_vmentry *freep;
337 int i, cnt;
338
339 freep = kinfo_getvmmap(getpid(), &cnt);
340 if (freep) {
341 mmap_lock();
342 for (i = 0; i < cnt; i++) {
343 unsigned long startaddr, endaddr;
344
345 startaddr = freep[i].kve_start;
346 endaddr = freep[i].kve_end;
347 if (h2g_valid(startaddr)) {
348 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
349
350 if (h2g_valid(endaddr)) {
351 endaddr = h2g(endaddr);
352 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
353 } else {
354 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
355 endaddr = ~0ul;
356 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
357 #endif
358 }
359 }
360 }
361 free(freep);
362 mmap_unlock();
363 }
364 #else
365 FILE *f;
366
367 last_brk = (unsigned long)sbrk(0);
368
369 f = fopen("/compat/linux/proc/self/maps", "r");
370 if (f) {
371 mmap_lock();
372
373 do {
374 unsigned long startaddr, endaddr;
375 int n;
376
377 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
378
379 if (n == 2 && h2g_valid(startaddr)) {
380 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
381
382 if (h2g_valid(endaddr)) {
383 endaddr = h2g(endaddr);
384 } else {
385 endaddr = ~0ul;
386 }
387 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
388 }
389 } while (!feof(f));
390
391 fclose(f);
392 mmap_unlock();
393 }
394 #endif
395 }
396 #endif
397 }
398
399 /* If alloc=1:
400 * Called with mmap_lock held for user-mode emulation.
401 */
402 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
403 {
404 PageDesc *pd;
405 void **lp;
406 int i;
407
408 /* Level 1. Always allocated. */
409 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
410
411 /* Level 2..N-1. */
412 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
413 void **p = atomic_rcu_read(lp);
414
415 if (p == NULL) {
416 if (!alloc) {
417 return NULL;
418 }
419 p = g_new0(void *, V_L2_SIZE);
420 atomic_rcu_set(lp, p);
421 }
422
423 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
424 }
425
426 pd = atomic_rcu_read(lp);
427 if (pd == NULL) {
428 if (!alloc) {
429 return NULL;
430 }
431 pd = g_new0(PageDesc, V_L2_SIZE);
432 atomic_rcu_set(lp, pd);
433 }
434
435 return pd + (index & (V_L2_SIZE - 1));
436 }
437
438 static inline PageDesc *page_find(tb_page_addr_t index)
439 {
440 return page_find_alloc(index, 0);
441 }
442
443 #if defined(CONFIG_USER_ONLY)
444 /* Currently it is not recommended to allocate big chunks of data in
445 user mode. It will change when a dedicated libc will be used. */
446 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
447 region in which the guest needs to run. Revisit this. */
448 #define USE_STATIC_CODE_GEN_BUFFER
449 #endif
450
451 /* Minimum size of the code gen buffer. This number is randomly chosen,
452 but not so small that we can't have a fair number of TB's live. */
453 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
454
455 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
456 indicated, this is constrained by the range of direct branches on the
457 host cpu, as used by the TCG implementation of goto_tb. */
458 #if defined(__x86_64__)
459 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
460 #elif defined(__sparc__)
461 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
462 #elif defined(__powerpc64__)
463 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
464 #elif defined(__powerpc__)
465 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
466 #elif defined(__aarch64__)
467 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
468 #elif defined(__arm__)
469 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
470 #elif defined(__s390x__)
471 /* We have a +- 4GB range on the branches; leave some slop. */
472 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
473 #elif defined(__mips__)
474 /* We have a 256MB branch region, but leave room to make sure the
475 main executable is also within that region. */
476 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
477 #else
478 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
479 #endif
480
481 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
482
483 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
484 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
485 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
486
487 static inline size_t size_code_gen_buffer(size_t tb_size)
488 {
489 /* Size the buffer. */
490 if (tb_size == 0) {
491 #ifdef USE_STATIC_CODE_GEN_BUFFER
492 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
493 #else
494 /* ??? Needs adjustments. */
495 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
496 static buffer, we could size this on RESERVED_VA, on the text
497 segment size of the executable, or continue to use the default. */
498 tb_size = (unsigned long)(ram_size / 4);
499 #endif
500 }
501 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
502 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
503 }
504 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
505 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
506 }
507 return tb_size;
508 }
509
510 #ifdef __mips__
511 /* In order to use J and JAL within the code_gen_buffer, we require
512 that the buffer not cross a 256MB boundary. */
513 static inline bool cross_256mb(void *addr, size_t size)
514 {
515 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
516 }
517
518 /* We weren't able to allocate a buffer without crossing that boundary,
519 so make do with the larger portion of the buffer that doesn't cross.
520 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
521 static inline void *split_cross_256mb(void *buf1, size_t size1)
522 {
523 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
524 size_t size2 = buf1 + size1 - buf2;
525
526 size1 = buf2 - buf1;
527 if (size1 < size2) {
528 size1 = size2;
529 buf1 = buf2;
530 }
531
532 tcg_ctx.code_gen_buffer_size = size1;
533 return buf1;
534 }
535 #endif
536
537 #ifdef USE_STATIC_CODE_GEN_BUFFER
538 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
539 __attribute__((aligned(CODE_GEN_ALIGN)));
540
541 # ifdef _WIN32
542 static inline void do_protect(void *addr, long size, int prot)
543 {
544 DWORD old_protect;
545 VirtualProtect(addr, size, prot, &old_protect);
546 }
547
548 static inline void map_exec(void *addr, long size)
549 {
550 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
551 }
552
553 static inline void map_none(void *addr, long size)
554 {
555 do_protect(addr, size, PAGE_NOACCESS);
556 }
557 # else
558 static inline void do_protect(void *addr, long size, int prot)
559 {
560 uintptr_t start, end;
561
562 start = (uintptr_t)addr;
563 start &= qemu_real_host_page_mask;
564
565 end = (uintptr_t)addr + size;
566 end = ROUND_UP(end, qemu_real_host_page_size);
567
568 mprotect((void *)start, end - start, prot);
569 }
570
571 static inline void map_exec(void *addr, long size)
572 {
573 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
574 }
575
576 static inline void map_none(void *addr, long size)
577 {
578 do_protect(addr, size, PROT_NONE);
579 }
580 # endif /* WIN32 */
581
582 static inline void *alloc_code_gen_buffer(void)
583 {
584 void *buf = static_code_gen_buffer;
585 size_t full_size, size;
586
587 /* The size of the buffer, rounded down to end on a page boundary. */
588 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
589 & qemu_real_host_page_mask) - (uintptr_t)buf;
590
591 /* Reserve a guard page. */
592 size = full_size - qemu_real_host_page_size;
593
594 /* Honor a command-line option limiting the size of the buffer. */
595 if (size > tcg_ctx.code_gen_buffer_size) {
596 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
597 & qemu_real_host_page_mask) - (uintptr_t)buf;
598 }
599 tcg_ctx.code_gen_buffer_size = size;
600
601 #ifdef __mips__
602 if (cross_256mb(buf, size)) {
603 buf = split_cross_256mb(buf, size);
604 size = tcg_ctx.code_gen_buffer_size;
605 }
606 #endif
607
608 map_exec(buf, size);
609 map_none(buf + size, qemu_real_host_page_size);
610 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
611
612 return buf;
613 }
614 #elif defined(_WIN32)
615 static inline void *alloc_code_gen_buffer(void)
616 {
617 size_t size = tcg_ctx.code_gen_buffer_size;
618 void *buf1, *buf2;
619
620 /* Perform the allocation in two steps, so that the guard page
621 is reserved but uncommitted. */
622 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
623 MEM_RESERVE, PAGE_NOACCESS);
624 if (buf1 != NULL) {
625 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
626 assert(buf1 == buf2);
627 }
628
629 return buf1;
630 }
631 #else
632 static inline void *alloc_code_gen_buffer(void)
633 {
634 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
635 uintptr_t start = 0;
636 size_t size = tcg_ctx.code_gen_buffer_size;
637 void *buf;
638
639 /* Constrain the position of the buffer based on the host cpu.
640 Note that these addresses are chosen in concert with the
641 addresses assigned in the relevant linker script file. */
642 # if defined(__PIE__) || defined(__PIC__)
643 /* Don't bother setting a preferred location if we're building
644 a position-independent executable. We're more likely to get
645 an address near the main executable if we let the kernel
646 choose the address. */
647 # elif defined(__x86_64__) && defined(MAP_32BIT)
648 /* Force the memory down into low memory with the executable.
649 Leave the choice of exact location with the kernel. */
650 flags |= MAP_32BIT;
651 /* Cannot expect to map more than 800MB in low memory. */
652 if (size > 800u * 1024 * 1024) {
653 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
654 }
655 # elif defined(__sparc__)
656 start = 0x40000000ul;
657 # elif defined(__s390x__)
658 start = 0x90000000ul;
659 # elif defined(__mips__)
660 # if _MIPS_SIM == _ABI64
661 start = 0x128000000ul;
662 # else
663 start = 0x08000000ul;
664 # endif
665 # endif
666
667 buf = mmap((void *)start, size + qemu_real_host_page_size,
668 PROT_NONE, flags, -1, 0);
669 if (buf == MAP_FAILED) {
670 return NULL;
671 }
672
673 #ifdef __mips__
674 if (cross_256mb(buf, size)) {
675 /* Try again, with the original still mapped, to avoid re-acquiring
676 that 256mb crossing. This time don't specify an address. */
677 size_t size2;
678 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
679 PROT_NONE, flags, -1, 0);
680 switch (buf2 != MAP_FAILED) {
681 case 1:
682 if (!cross_256mb(buf2, size)) {
683 /* Success! Use the new buffer. */
684 munmap(buf, size + qemu_real_host_page_size);
685 break;
686 }
687 /* Failure. Work with what we had. */
688 munmap(buf2, size + qemu_real_host_page_size);
689 /* fallthru */
690 default:
691 /* Split the original buffer. Free the smaller half. */
692 buf2 = split_cross_256mb(buf, size);
693 size2 = tcg_ctx.code_gen_buffer_size;
694 if (buf == buf2) {
695 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
696 } else {
697 munmap(buf, size - size2);
698 }
699 size = size2;
700 break;
701 }
702 buf = buf2;
703 }
704 #endif
705
706 /* Make the final buffer accessible. The guard page at the end
707 will remain inaccessible with PROT_NONE. */
708 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
709
710 /* Request large pages for the buffer. */
711 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
712
713 return buf;
714 }
715 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
716
717 static inline void code_gen_alloc(size_t tb_size)
718 {
719 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
720 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
721 if (tcg_ctx.code_gen_buffer == NULL) {
722 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
723 exit(1);
724 }
725
726 /* Estimate a good size for the number of TBs we can support. We
727 still haven't deducted the prologue from the buffer size here,
728 but that's minimal and won't affect the estimate much. */
729 tcg_ctx.code_gen_max_blocks
730 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
731 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
732
733 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
734 }
735
736 static void tb_htable_init(void)
737 {
738 unsigned int mode = QHT_MODE_AUTO_RESIZE;
739
740 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
741 }
742
743 /* Must be called before using the QEMU cpus. 'tb_size' is the size
744 (in bytes) allocated to the translation buffer. Zero means default
745 size. */
746 void tcg_exec_init(unsigned long tb_size)
747 {
748 cpu_gen_init();
749 page_init();
750 tb_htable_init();
751 code_gen_alloc(tb_size);
752 #if defined(CONFIG_SOFTMMU)
753 /* There's no guest base to take into account, so go ahead and
754 initialize the prologue now. */
755 tcg_prologue_init(&tcg_ctx);
756 #endif
757 }
758
759 bool tcg_enabled(void)
760 {
761 return tcg_ctx.code_gen_buffer != NULL;
762 }
763
764 /* Allocate a new translation block. Flush the translation buffer if
765 too many translation blocks or too much generated code. */
766 static TranslationBlock *tb_alloc(target_ulong pc)
767 {
768 TranslationBlock *tb;
769
770 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
771 return NULL;
772 }
773 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
774 tb->pc = pc;
775 tb->cflags = 0;
776 return tb;
777 }
778
779 void tb_free(TranslationBlock *tb)
780 {
781 /* In practice this is mostly used for single use temporary TB
782 Ignore the hard cases and just back up if this TB happens to
783 be the last one generated. */
784 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
785 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
786 tcg_ctx.code_gen_ptr = tb->tc_ptr;
787 tcg_ctx.tb_ctx.nb_tbs--;
788 }
789 }
790
791 static inline void invalidate_page_bitmap(PageDesc *p)
792 {
793 #ifdef CONFIG_SOFTMMU
794 g_free(p->code_bitmap);
795 p->code_bitmap = NULL;
796 p->code_write_count = 0;
797 #endif
798 }
799
800 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
801 static void page_flush_tb_1(int level, void **lp)
802 {
803 int i;
804
805 if (*lp == NULL) {
806 return;
807 }
808 if (level == 0) {
809 PageDesc *pd = *lp;
810
811 for (i = 0; i < V_L2_SIZE; ++i) {
812 pd[i].first_tb = NULL;
813 invalidate_page_bitmap(pd + i);
814 }
815 } else {
816 void **pp = *lp;
817
818 for (i = 0; i < V_L2_SIZE; ++i) {
819 page_flush_tb_1(level - 1, pp + i);
820 }
821 }
822 }
823
824 static void page_flush_tb(void)
825 {
826 int i;
827
828 for (i = 0; i < V_L1_SIZE; i++) {
829 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
830 }
831 }
832
833 /* flush all the translation blocks */
834 /* XXX: tb_flush is currently not thread safe */
835 void tb_flush(CPUState *cpu)
836 {
837 if (!tcg_enabled()) {
838 return;
839 }
840 #if defined(DEBUG_FLUSH)
841 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
842 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
843 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
844 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
845 tcg_ctx.tb_ctx.nb_tbs : 0);
846 #endif
847 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
848 > tcg_ctx.code_gen_buffer_size) {
849 cpu_abort(cpu, "Internal error: code buffer overflow\n");
850 }
851 tcg_ctx.tb_ctx.nb_tbs = 0;
852
853 CPU_FOREACH(cpu) {
854 int i;
855
856 for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
857 atomic_set(&cpu->tb_jmp_cache[i], NULL);
858 }
859 cpu->tb_flushed = true;
860 }
861
862 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
863 page_flush_tb();
864
865 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
866 /* XXX: flush processor icache at this point if cache flush is
867 expensive */
868 tcg_ctx.tb_ctx.tb_flush_count++;
869 }
870
871 #ifdef DEBUG_TB_CHECK
872
873 static void
874 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
875 {
876 TranslationBlock *tb = p;
877 target_ulong addr = *(target_ulong *)userp;
878
879 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
880 printf("ERROR invalidate: address=" TARGET_FMT_lx
881 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
882 }
883 }
884
885 static void tb_invalidate_check(target_ulong address)
886 {
887 address &= TARGET_PAGE_MASK;
888 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
889 }
890
891 static void
892 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
893 {
894 TranslationBlock *tb = p;
895 int flags1, flags2;
896
897 flags1 = page_get_flags(tb->pc);
898 flags2 = page_get_flags(tb->pc + tb->size - 1);
899 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
900 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
901 (long)tb->pc, tb->size, flags1, flags2);
902 }
903 }
904
905 /* verify that all the pages have correct rights for code */
906 static void tb_page_check(void)
907 {
908 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
909 }
910
911 #endif
912
913 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
914 {
915 TranslationBlock *tb1;
916 unsigned int n1;
917
918 for (;;) {
919 tb1 = *ptb;
920 n1 = (uintptr_t)tb1 & 3;
921 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
922 if (tb1 == tb) {
923 *ptb = tb1->page_next[n1];
924 break;
925 }
926 ptb = &tb1->page_next[n1];
927 }
928 }
929
930 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
931 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
932 {
933 TranslationBlock *tb1;
934 uintptr_t *ptb, ntb;
935 unsigned int n1;
936
937 ptb = &tb->jmp_list_next[n];
938 if (*ptb) {
939 /* find tb(n) in circular list */
940 for (;;) {
941 ntb = *ptb;
942 n1 = ntb & 3;
943 tb1 = (TranslationBlock *)(ntb & ~3);
944 if (n1 == n && tb1 == tb) {
945 break;
946 }
947 if (n1 == 2) {
948 ptb = &tb1->jmp_list_first;
949 } else {
950 ptb = &tb1->jmp_list_next[n1];
951 }
952 }
953 /* now we can suppress tb(n) from the list */
954 *ptb = tb->jmp_list_next[n];
955
956 tb->jmp_list_next[n] = (uintptr_t)NULL;
957 }
958 }
959
960 /* reset the jump entry 'n' of a TB so that it is not chained to
961 another TB */
962 static inline void tb_reset_jump(TranslationBlock *tb, int n)
963 {
964 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
965 tb_set_jmp_target(tb, n, addr);
966 }
967
968 /* remove any jumps to the TB */
969 static inline void tb_jmp_unlink(TranslationBlock *tb)
970 {
971 TranslationBlock *tb1;
972 uintptr_t *ptb, ntb;
973 unsigned int n1;
974
975 ptb = &tb->jmp_list_first;
976 for (;;) {
977 ntb = *ptb;
978 n1 = ntb & 3;
979 tb1 = (TranslationBlock *)(ntb & ~3);
980 if (n1 == 2) {
981 break;
982 }
983 tb_reset_jump(tb1, n1);
984 *ptb = tb1->jmp_list_next[n1];
985 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
986 }
987 }
988
989 /* invalidate one TB */
990 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
991 {
992 CPUState *cpu;
993 PageDesc *p;
994 uint32_t h;
995 tb_page_addr_t phys_pc;
996
997 /* remove the TB from the hash list */
998 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
999 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1000 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
1001
1002 /* remove the TB from the page list */
1003 if (tb->page_addr[0] != page_addr) {
1004 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1005 tb_page_remove(&p->first_tb, tb);
1006 invalidate_page_bitmap(p);
1007 }
1008 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1009 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1010 tb_page_remove(&p->first_tb, tb);
1011 invalidate_page_bitmap(p);
1012 }
1013
1014 /* remove the TB from the hash list */
1015 h = tb_jmp_cache_hash_func(tb->pc);
1016 CPU_FOREACH(cpu) {
1017 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1018 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1019 }
1020 }
1021
1022 /* suppress this TB from the two jump lists */
1023 tb_remove_from_jmp_list(tb, 0);
1024 tb_remove_from_jmp_list(tb, 1);
1025
1026 /* suppress any remaining jumps to this TB */
1027 tb_jmp_unlink(tb);
1028
1029 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1030 }
1031
1032 #ifdef CONFIG_SOFTMMU
1033 static void build_page_bitmap(PageDesc *p)
1034 {
1035 int n, tb_start, tb_end;
1036 TranslationBlock *tb;
1037
1038 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1039
1040 tb = p->first_tb;
1041 while (tb != NULL) {
1042 n = (uintptr_t)tb & 3;
1043 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1044 /* NOTE: this is subtle as a TB may span two physical pages */
1045 if (n == 0) {
1046 /* NOTE: tb_end may be after the end of the page, but
1047 it is not a problem */
1048 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1049 tb_end = tb_start + tb->size;
1050 if (tb_end > TARGET_PAGE_SIZE) {
1051 tb_end = TARGET_PAGE_SIZE;
1052 }
1053 } else {
1054 tb_start = 0;
1055 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1056 }
1057 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1058 tb = tb->page_next[n];
1059 }
1060 }
1061 #endif
1062
1063 /* add the tb in the target page and protect it if necessary
1064 *
1065 * Called with mmap_lock held for user-mode emulation.
1066 */
1067 static inline void tb_alloc_page(TranslationBlock *tb,
1068 unsigned int n, tb_page_addr_t page_addr)
1069 {
1070 PageDesc *p;
1071 #ifndef CONFIG_USER_ONLY
1072 bool page_already_protected;
1073 #endif
1074
1075 tb->page_addr[n] = page_addr;
1076 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1077 tb->page_next[n] = p->first_tb;
1078 #ifndef CONFIG_USER_ONLY
1079 page_already_protected = p->first_tb != NULL;
1080 #endif
1081 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1082 invalidate_page_bitmap(p);
1083
1084 #if defined(CONFIG_USER_ONLY)
1085 if (p->flags & PAGE_WRITE) {
1086 target_ulong addr;
1087 PageDesc *p2;
1088 int prot;
1089
1090 /* force the host page as non writable (writes will have a
1091 page fault + mprotect overhead) */
1092 page_addr &= qemu_host_page_mask;
1093 prot = 0;
1094 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1095 addr += TARGET_PAGE_SIZE) {
1096
1097 p2 = page_find(addr >> TARGET_PAGE_BITS);
1098 if (!p2) {
1099 continue;
1100 }
1101 prot |= p2->flags;
1102 p2->flags &= ~PAGE_WRITE;
1103 }
1104 mprotect(g2h(page_addr), qemu_host_page_size,
1105 (prot & PAGE_BITS) & ~PAGE_WRITE);
1106 #ifdef DEBUG_TB_INVALIDATE
1107 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1108 page_addr);
1109 #endif
1110 }
1111 #else
1112 /* if some code is already present, then the pages are already
1113 protected. So we handle the case where only the first TB is
1114 allocated in a physical page */
1115 if (!page_already_protected) {
1116 tlb_protect_code(page_addr);
1117 }
1118 #endif
1119 }
1120
1121 /* add a new TB and link it to the physical page tables. phys_page2 is
1122 * (-1) to indicate that only one page contains the TB.
1123 *
1124 * Called with mmap_lock held for user-mode emulation.
1125 */
1126 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1127 tb_page_addr_t phys_page2)
1128 {
1129 uint32_t h;
1130
1131 /* add in the hash table */
1132 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1133 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1134
1135 /* add in the page list */
1136 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1137 if (phys_page2 != -1) {
1138 tb_alloc_page(tb, 1, phys_page2);
1139 } else {
1140 tb->page_addr[1] = -1;
1141 }
1142
1143 #ifdef DEBUG_TB_CHECK
1144 tb_page_check();
1145 #endif
1146 }
1147
1148 /* Called with mmap_lock held for user mode emulation. */
1149 TranslationBlock *tb_gen_code(CPUState *cpu,
1150 target_ulong pc, target_ulong cs_base,
1151 uint32_t flags, int cflags)
1152 {
1153 CPUArchState *env = cpu->env_ptr;
1154 TranslationBlock *tb;
1155 tb_page_addr_t phys_pc, phys_page2;
1156 target_ulong virt_page2;
1157 tcg_insn_unit *gen_code_buf;
1158 int gen_code_size, search_size;
1159 #ifdef CONFIG_PROFILER
1160 int64_t ti;
1161 #endif
1162
1163 phys_pc = get_page_addr_code(env, pc);
1164 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1165 cflags |= CF_USE_ICOUNT;
1166 }
1167
1168 tb = tb_alloc(pc);
1169 if (unlikely(!tb)) {
1170 buffer_overflow:
1171 /* flush must be done */
1172 tb_flush(cpu);
1173 /* cannot fail at this point */
1174 tb = tb_alloc(pc);
1175 assert(tb != NULL);
1176 }
1177
1178 gen_code_buf = tcg_ctx.code_gen_ptr;
1179 tb->tc_ptr = gen_code_buf;
1180 tb->cs_base = cs_base;
1181 tb->flags = flags;
1182 tb->cflags = cflags;
1183
1184 #ifdef CONFIG_PROFILER
1185 tcg_ctx.tb_count1++; /* includes aborted translations because of
1186 exceptions */
1187 ti = profile_getclock();
1188 #endif
1189
1190 tcg_func_start(&tcg_ctx);
1191
1192 tcg_ctx.cpu = ENV_GET_CPU(env);
1193 gen_intermediate_code(env, tb);
1194 tcg_ctx.cpu = NULL;
1195
1196 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1197
1198 /* generate machine code */
1199 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1200 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1201 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1202 #ifdef USE_DIRECT_JUMP
1203 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1204 tcg_ctx.tb_jmp_target_addr = NULL;
1205 #else
1206 tcg_ctx.tb_jmp_insn_offset = NULL;
1207 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1208 #endif
1209
1210 #ifdef CONFIG_PROFILER
1211 tcg_ctx.tb_count++;
1212 tcg_ctx.interm_time += profile_getclock() - ti;
1213 tcg_ctx.code_time -= profile_getclock();
1214 #endif
1215
1216 /* ??? Overflow could be handled better here. In particular, we
1217 don't need to re-do gen_intermediate_code, nor should we re-do
1218 the tcg optimization currently hidden inside tcg_gen_code. All
1219 that should be required is to flush the TBs, allocate a new TB,
1220 re-initialize it per above, and re-do the actual code generation. */
1221 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1222 if (unlikely(gen_code_size < 0)) {
1223 goto buffer_overflow;
1224 }
1225 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1226 if (unlikely(search_size < 0)) {
1227 goto buffer_overflow;
1228 }
1229
1230 #ifdef CONFIG_PROFILER
1231 tcg_ctx.code_time += profile_getclock();
1232 tcg_ctx.code_in_len += tb->size;
1233 tcg_ctx.code_out_len += gen_code_size;
1234 tcg_ctx.search_out_len += search_size;
1235 #endif
1236
1237 #ifdef DEBUG_DISAS
1238 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1239 qemu_log_in_addr_range(tb->pc)) {
1240 qemu_log("OUT: [size=%d]\n", gen_code_size);
1241 log_disas(tb->tc_ptr, gen_code_size);
1242 qemu_log("\n");
1243 qemu_log_flush();
1244 }
1245 #endif
1246
1247 tcg_ctx.code_gen_ptr = (void *)
1248 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1249 CODE_GEN_ALIGN);
1250
1251 /* init jump list */
1252 assert(((uintptr_t)tb & 3) == 0);
1253 tb->jmp_list_first = (uintptr_t)tb | 2;
1254 tb->jmp_list_next[0] = (uintptr_t)NULL;
1255 tb->jmp_list_next[1] = (uintptr_t)NULL;
1256
1257 /* init original jump addresses wich has been set during tcg_gen_code() */
1258 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1259 tb_reset_jump(tb, 0);
1260 }
1261 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1262 tb_reset_jump(tb, 1);
1263 }
1264
1265 /* check next page if needed */
1266 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1267 phys_page2 = -1;
1268 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1269 phys_page2 = get_page_addr_code(env, virt_page2);
1270 }
1271 /* As long as consistency of the TB stuff is provided by tb_lock in user
1272 * mode and is implicit in single-threaded softmmu emulation, no explicit
1273 * memory barrier is required before tb_link_page() makes the TB visible
1274 * through the physical hash table and physical page list.
1275 */
1276 tb_link_page(tb, phys_pc, phys_page2);
1277 return tb;
1278 }
1279
1280 /*
1281 * Invalidate all TBs which intersect with the target physical address range
1282 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1283 * 'is_cpu_write_access' should be true if called from a real cpu write
1284 * access: the virtual CPU will exit the current TB if code is modified inside
1285 * this TB.
1286 *
1287 * Called with mmap_lock held for user-mode emulation
1288 */
1289 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1290 {
1291 while (start < end) {
1292 tb_invalidate_phys_page_range(start, end, 0);
1293 start &= TARGET_PAGE_MASK;
1294 start += TARGET_PAGE_SIZE;
1295 }
1296 }
1297
1298 /*
1299 * Invalidate all TBs which intersect with the target physical address range
1300 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1301 * 'is_cpu_write_access' should be true if called from a real cpu write
1302 * access: the virtual CPU will exit the current TB if code is modified inside
1303 * this TB.
1304 *
1305 * Called with mmap_lock held for user-mode emulation
1306 */
1307 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1308 int is_cpu_write_access)
1309 {
1310 TranslationBlock *tb, *tb_next;
1311 #if defined(TARGET_HAS_PRECISE_SMC)
1312 CPUState *cpu = current_cpu;
1313 CPUArchState *env = NULL;
1314 #endif
1315 tb_page_addr_t tb_start, tb_end;
1316 PageDesc *p;
1317 int n;
1318 #ifdef TARGET_HAS_PRECISE_SMC
1319 int current_tb_not_found = is_cpu_write_access;
1320 TranslationBlock *current_tb = NULL;
1321 int current_tb_modified = 0;
1322 target_ulong current_pc = 0;
1323 target_ulong current_cs_base = 0;
1324 uint32_t current_flags = 0;
1325 #endif /* TARGET_HAS_PRECISE_SMC */
1326
1327 p = page_find(start >> TARGET_PAGE_BITS);
1328 if (!p) {
1329 return;
1330 }
1331 #if defined(TARGET_HAS_PRECISE_SMC)
1332 if (cpu != NULL) {
1333 env = cpu->env_ptr;
1334 }
1335 #endif
1336
1337 /* we remove all the TBs in the range [start, end[ */
1338 /* XXX: see if in some cases it could be faster to invalidate all
1339 the code */
1340 tb = p->first_tb;
1341 while (tb != NULL) {
1342 n = (uintptr_t)tb & 3;
1343 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1344 tb_next = tb->page_next[n];
1345 /* NOTE: this is subtle as a TB may span two physical pages */
1346 if (n == 0) {
1347 /* NOTE: tb_end may be after the end of the page, but
1348 it is not a problem */
1349 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1350 tb_end = tb_start + tb->size;
1351 } else {
1352 tb_start = tb->page_addr[1];
1353 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1354 }
1355 if (!(tb_end <= start || tb_start >= end)) {
1356 #ifdef TARGET_HAS_PRECISE_SMC
1357 if (current_tb_not_found) {
1358 current_tb_not_found = 0;
1359 current_tb = NULL;
1360 if (cpu->mem_io_pc) {
1361 /* now we have a real cpu fault */
1362 current_tb = tb_find_pc(cpu->mem_io_pc);
1363 }
1364 }
1365 if (current_tb == tb &&
1366 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1367 /* If we are modifying the current TB, we must stop
1368 its execution. We could be more precise by checking
1369 that the modification is after the current PC, but it
1370 would require a specialized function to partially
1371 restore the CPU state */
1372
1373 current_tb_modified = 1;
1374 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1375 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1376 &current_flags);
1377 }
1378 #endif /* TARGET_HAS_PRECISE_SMC */
1379 tb_phys_invalidate(tb, -1);
1380 }
1381 tb = tb_next;
1382 }
1383 #if !defined(CONFIG_USER_ONLY)
1384 /* if no code remaining, no need to continue to use slow writes */
1385 if (!p->first_tb) {
1386 invalidate_page_bitmap(p);
1387 tlb_unprotect_code(start);
1388 }
1389 #endif
1390 #ifdef TARGET_HAS_PRECISE_SMC
1391 if (current_tb_modified) {
1392 /* we generate a block containing just the instruction
1393 modifying the memory. It will ensure that it cannot modify
1394 itself */
1395 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1396 cpu_loop_exit_noexc(cpu);
1397 }
1398 #endif
1399 }
1400
1401 #ifdef CONFIG_SOFTMMU
1402 /* len must be <= 8 and start must be a multiple of len */
1403 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1404 {
1405 PageDesc *p;
1406
1407 #if 0
1408 if (1) {
1409 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1410 cpu_single_env->mem_io_vaddr, len,
1411 cpu_single_env->eip,
1412 cpu_single_env->eip +
1413 (intptr_t)cpu_single_env->segs[R_CS].base);
1414 }
1415 #endif
1416 p = page_find(start >> TARGET_PAGE_BITS);
1417 if (!p) {
1418 return;
1419 }
1420 if (!p->code_bitmap &&
1421 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1422 /* build code bitmap */
1423 build_page_bitmap(p);
1424 }
1425 if (p->code_bitmap) {
1426 unsigned int nr;
1427 unsigned long b;
1428
1429 nr = start & ~TARGET_PAGE_MASK;
1430 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1431 if (b & ((1 << len) - 1)) {
1432 goto do_invalidate;
1433 }
1434 } else {
1435 do_invalidate:
1436 tb_invalidate_phys_page_range(start, start + len, 1);
1437 }
1438 }
1439 #else
1440 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1441 * host PC of the faulting store instruction that caused this invalidate.
1442 * Returns true if the caller needs to abort execution of the current
1443 * TB (because it was modified by this store and the guest CPU has
1444 * precise-SMC semantics).
1445 */
1446 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1447 {
1448 TranslationBlock *tb;
1449 PageDesc *p;
1450 int n;
1451 #ifdef TARGET_HAS_PRECISE_SMC
1452 TranslationBlock *current_tb = NULL;
1453 CPUState *cpu = current_cpu;
1454 CPUArchState *env = NULL;
1455 int current_tb_modified = 0;
1456 target_ulong current_pc = 0;
1457 target_ulong current_cs_base = 0;
1458 uint32_t current_flags = 0;
1459 #endif
1460
1461 addr &= TARGET_PAGE_MASK;
1462 p = page_find(addr >> TARGET_PAGE_BITS);
1463 if (!p) {
1464 return false;
1465 }
1466 tb = p->first_tb;
1467 #ifdef TARGET_HAS_PRECISE_SMC
1468 if (tb && pc != 0) {
1469 current_tb = tb_find_pc(pc);
1470 }
1471 if (cpu != NULL) {
1472 env = cpu->env_ptr;
1473 }
1474 #endif
1475 while (tb != NULL) {
1476 n = (uintptr_t)tb & 3;
1477 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1478 #ifdef TARGET_HAS_PRECISE_SMC
1479 if (current_tb == tb &&
1480 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1481 /* If we are modifying the current TB, we must stop
1482 its execution. We could be more precise by checking
1483 that the modification is after the current PC, but it
1484 would require a specialized function to partially
1485 restore the CPU state */
1486
1487 current_tb_modified = 1;
1488 cpu_restore_state_from_tb(cpu, current_tb, pc);
1489 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1490 &current_flags);
1491 }
1492 #endif /* TARGET_HAS_PRECISE_SMC */
1493 tb_phys_invalidate(tb, addr);
1494 tb = tb->page_next[n];
1495 }
1496 p->first_tb = NULL;
1497 #ifdef TARGET_HAS_PRECISE_SMC
1498 if (current_tb_modified) {
1499 /* we generate a block containing just the instruction
1500 modifying the memory. It will ensure that it cannot modify
1501 itself */
1502 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1503 return true;
1504 }
1505 #endif
1506 return false;
1507 }
1508 #endif
1509
1510 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1511 tb[1].tc_ptr. Return NULL if not found */
1512 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1513 {
1514 int m_min, m_max, m;
1515 uintptr_t v;
1516 TranslationBlock *tb;
1517
1518 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1519 return NULL;
1520 }
1521 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1522 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1523 return NULL;
1524 }
1525 /* binary search (cf Knuth) */
1526 m_min = 0;
1527 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1528 while (m_min <= m_max) {
1529 m = (m_min + m_max) >> 1;
1530 tb = &tcg_ctx.tb_ctx.tbs[m];
1531 v = (uintptr_t)tb->tc_ptr;
1532 if (v == tc_ptr) {
1533 return tb;
1534 } else if (tc_ptr < v) {
1535 m_max = m - 1;
1536 } else {
1537 m_min = m + 1;
1538 }
1539 }
1540 return &tcg_ctx.tb_ctx.tbs[m_max];
1541 }
1542
1543 #if !defined(CONFIG_USER_ONLY)
1544 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1545 {
1546 ram_addr_t ram_addr;
1547 MemoryRegion *mr;
1548 hwaddr l = 1;
1549
1550 rcu_read_lock();
1551 mr = address_space_translate(as, addr, &addr, &l, false);
1552 if (!(memory_region_is_ram(mr)
1553 || memory_region_is_romd(mr))) {
1554 rcu_read_unlock();
1555 return;
1556 }
1557 ram_addr = memory_region_get_ram_addr(mr) + addr;
1558 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1559 rcu_read_unlock();
1560 }
1561 #endif /* !defined(CONFIG_USER_ONLY) */
1562
1563 void tb_check_watchpoint(CPUState *cpu)
1564 {
1565 TranslationBlock *tb;
1566
1567 tb = tb_find_pc(cpu->mem_io_pc);
1568 if (tb) {
1569 /* We can use retranslation to find the PC. */
1570 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1571 tb_phys_invalidate(tb, -1);
1572 } else {
1573 /* The exception probably happened in a helper. The CPU state should
1574 have been saved before calling it. Fetch the PC from there. */
1575 CPUArchState *env = cpu->env_ptr;
1576 target_ulong pc, cs_base;
1577 tb_page_addr_t addr;
1578 uint32_t flags;
1579
1580 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1581 addr = get_page_addr_code(env, pc);
1582 tb_invalidate_phys_range(addr, addr + 1);
1583 }
1584 }
1585
1586 #ifndef CONFIG_USER_ONLY
1587 /* in deterministic execution mode, instructions doing device I/Os
1588 must be at the end of the TB */
1589 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1590 {
1591 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1592 CPUArchState *env = cpu->env_ptr;
1593 #endif
1594 TranslationBlock *tb;
1595 uint32_t n, cflags;
1596 target_ulong pc, cs_base;
1597 uint32_t flags;
1598
1599 tb = tb_find_pc(retaddr);
1600 if (!tb) {
1601 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1602 (void *)retaddr);
1603 }
1604 n = cpu->icount_decr.u16.low + tb->icount;
1605 cpu_restore_state_from_tb(cpu, tb, retaddr);
1606 /* Calculate how many instructions had been executed before the fault
1607 occurred. */
1608 n = n - cpu->icount_decr.u16.low;
1609 /* Generate a new TB ending on the I/O insn. */
1610 n++;
1611 /* On MIPS and SH, delay slot instructions can only be restarted if
1612 they were already the first instruction in the TB. If this is not
1613 the first instruction in a TB then re-execute the preceding
1614 branch. */
1615 #if defined(TARGET_MIPS)
1616 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1617 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1618 cpu->icount_decr.u16.low++;
1619 env->hflags &= ~MIPS_HFLAG_BMASK;
1620 }
1621 #elif defined(TARGET_SH4)
1622 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1623 && n > 1) {
1624 env->pc -= 2;
1625 cpu->icount_decr.u16.low++;
1626 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1627 }
1628 #endif
1629 /* This should never happen. */
1630 if (n > CF_COUNT_MASK) {
1631 cpu_abort(cpu, "TB too big during recompile");
1632 }
1633
1634 cflags = n | CF_LAST_IO;
1635 pc = tb->pc;
1636 cs_base = tb->cs_base;
1637 flags = tb->flags;
1638 tb_phys_invalidate(tb, -1);
1639 if (tb->cflags & CF_NOCACHE) {
1640 if (tb->orig_tb) {
1641 /* Invalidate original TB if this TB was generated in
1642 * cpu_exec_nocache() */
1643 tb_phys_invalidate(tb->orig_tb, -1);
1644 }
1645 tb_free(tb);
1646 }
1647 /* FIXME: In theory this could raise an exception. In practice
1648 we have already translated the block once so it's probably ok. */
1649 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1650 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1651 the first in the TB) then we end up generating a whole new TB and
1652 repeating the fault, which is horribly inefficient.
1653 Better would be to execute just this insn uncached, or generate a
1654 second new TB. */
1655 cpu_loop_exit_noexc(cpu);
1656 }
1657
1658 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1659 {
1660 unsigned int i;
1661
1662 /* Discard jump cache entries for any tb which might potentially
1663 overlap the flushed page. */
1664 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1665 memset(&cpu->tb_jmp_cache[i], 0,
1666 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1667
1668 i = tb_jmp_cache_hash_page(addr);
1669 memset(&cpu->tb_jmp_cache[i], 0,
1670 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1671 }
1672
1673 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1674 struct qht_stats hst)
1675 {
1676 uint32_t hgram_opts;
1677 size_t hgram_bins;
1678 char *hgram;
1679
1680 if (!hst.head_buckets) {
1681 return;
1682 }
1683 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1684 hst.used_head_buckets, hst.head_buckets,
1685 (double)hst.used_head_buckets / hst.head_buckets * 100);
1686
1687 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1688 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1689 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1690 hgram_opts |= QDIST_PR_NODECIMAL;
1691 }
1692 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1693 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1694 qdist_avg(&hst.occupancy) * 100, hgram);
1695 g_free(hgram);
1696
1697 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1698 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1699 if (hgram_bins > 10) {
1700 hgram_bins = 10;
1701 } else {
1702 hgram_bins = 0;
1703 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1704 }
1705 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1706 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1707 qdist_avg(&hst.chain), hgram);
1708 g_free(hgram);
1709 }
1710
1711 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1712 {
1713 int i, target_code_size, max_target_code_size;
1714 int direct_jmp_count, direct_jmp2_count, cross_page;
1715 TranslationBlock *tb;
1716 struct qht_stats hst;
1717
1718 target_code_size = 0;
1719 max_target_code_size = 0;
1720 cross_page = 0;
1721 direct_jmp_count = 0;
1722 direct_jmp2_count = 0;
1723 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1724 tb = &tcg_ctx.tb_ctx.tbs[i];
1725 target_code_size += tb->size;
1726 if (tb->size > max_target_code_size) {
1727 max_target_code_size = tb->size;
1728 }
1729 if (tb->page_addr[1] != -1) {
1730 cross_page++;
1731 }
1732 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1733 direct_jmp_count++;
1734 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1735 direct_jmp2_count++;
1736 }
1737 }
1738 }
1739 /* XXX: avoid using doubles ? */
1740 cpu_fprintf(f, "Translation buffer state:\n");
1741 cpu_fprintf(f, "gen code size %td/%zd\n",
1742 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1743 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1744 cpu_fprintf(f, "TB count %d/%d\n",
1745 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1746 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1747 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1748 tcg_ctx.tb_ctx.nb_tbs : 0,
1749 max_target_code_size);
1750 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1751 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1752 tcg_ctx.code_gen_buffer) /
1753 tcg_ctx.tb_ctx.nb_tbs : 0,
1754 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1755 tcg_ctx.code_gen_buffer) /
1756 target_code_size : 0);
1757 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1758 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1759 tcg_ctx.tb_ctx.nb_tbs : 0);
1760 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1761 direct_jmp_count,
1762 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1763 tcg_ctx.tb_ctx.nb_tbs : 0,
1764 direct_jmp2_count,
1765 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1766 tcg_ctx.tb_ctx.nb_tbs : 0);
1767
1768 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
1769 print_qht_statistics(f, cpu_fprintf, hst);
1770 qht_statistics_destroy(&hst);
1771
1772 cpu_fprintf(f, "\nStatistics:\n");
1773 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1774 cpu_fprintf(f, "TB invalidate count %d\n",
1775 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1776 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1777 tcg_dump_info(f, cpu_fprintf);
1778 }
1779
1780 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1781 {
1782 tcg_dump_op_count(f, cpu_fprintf);
1783 }
1784
1785 #else /* CONFIG_USER_ONLY */
1786
1787 void cpu_interrupt(CPUState *cpu, int mask)
1788 {
1789 cpu->interrupt_request |= mask;
1790 cpu->tcg_exit_req = 1;
1791 }
1792
1793 /*
1794 * Walks guest process memory "regions" one by one
1795 * and calls callback function 'fn' for each region.
1796 */
1797 struct walk_memory_regions_data {
1798 walk_memory_regions_fn fn;
1799 void *priv;
1800 target_ulong start;
1801 int prot;
1802 };
1803
1804 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1805 target_ulong end, int new_prot)
1806 {
1807 if (data->start != -1u) {
1808 int rc = data->fn(data->priv, data->start, end, data->prot);
1809 if (rc != 0) {
1810 return rc;
1811 }
1812 }
1813
1814 data->start = (new_prot ? end : -1u);
1815 data->prot = new_prot;
1816
1817 return 0;
1818 }
1819
1820 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1821 target_ulong base, int level, void **lp)
1822 {
1823 target_ulong pa;
1824 int i, rc;
1825
1826 if (*lp == NULL) {
1827 return walk_memory_regions_end(data, base, 0);
1828 }
1829
1830 if (level == 0) {
1831 PageDesc *pd = *lp;
1832
1833 for (i = 0; i < V_L2_SIZE; ++i) {
1834 int prot = pd[i].flags;
1835
1836 pa = base | (i << TARGET_PAGE_BITS);
1837 if (prot != data->prot) {
1838 rc = walk_memory_regions_end(data, pa, prot);
1839 if (rc != 0) {
1840 return rc;
1841 }
1842 }
1843 }
1844 } else {
1845 void **pp = *lp;
1846
1847 for (i = 0; i < V_L2_SIZE; ++i) {
1848 pa = base | ((target_ulong)i <<
1849 (TARGET_PAGE_BITS + V_L2_BITS * level));
1850 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1851 if (rc != 0) {
1852 return rc;
1853 }
1854 }
1855 }
1856
1857 return 0;
1858 }
1859
1860 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1861 {
1862 struct walk_memory_regions_data data;
1863 uintptr_t i;
1864
1865 data.fn = fn;
1866 data.priv = priv;
1867 data.start = -1u;
1868 data.prot = 0;
1869
1870 for (i = 0; i < V_L1_SIZE; i++) {
1871 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
1872 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1873 if (rc != 0) {
1874 return rc;
1875 }
1876 }
1877
1878 return walk_memory_regions_end(&data, 0, 0);
1879 }
1880
1881 static int dump_region(void *priv, target_ulong start,
1882 target_ulong end, unsigned long prot)
1883 {
1884 FILE *f = (FILE *)priv;
1885
1886 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1887 " "TARGET_FMT_lx" %c%c%c\n",
1888 start, end, end - start,
1889 ((prot & PAGE_READ) ? 'r' : '-'),
1890 ((prot & PAGE_WRITE) ? 'w' : '-'),
1891 ((prot & PAGE_EXEC) ? 'x' : '-'));
1892
1893 return 0;
1894 }
1895
1896 /* dump memory mappings */
1897 void page_dump(FILE *f)
1898 {
1899 const int length = sizeof(target_ulong) * 2;
1900 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1901 length, "start", length, "end", length, "size", "prot");
1902 walk_memory_regions(f, dump_region);
1903 }
1904
1905 int page_get_flags(target_ulong address)
1906 {
1907 PageDesc *p;
1908
1909 p = page_find(address >> TARGET_PAGE_BITS);
1910 if (!p) {
1911 return 0;
1912 }
1913 return p->flags;
1914 }
1915
1916 /* Modify the flags of a page and invalidate the code if necessary.
1917 The flag PAGE_WRITE_ORG is positioned automatically depending
1918 on PAGE_WRITE. The mmap_lock should already be held. */
1919 void page_set_flags(target_ulong start, target_ulong end, int flags)
1920 {
1921 target_ulong addr, len;
1922
1923 /* This function should never be called with addresses outside the
1924 guest address space. If this assert fires, it probably indicates
1925 a missing call to h2g_valid. */
1926 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1927 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1928 #endif
1929 assert(start < end);
1930
1931 start = start & TARGET_PAGE_MASK;
1932 end = TARGET_PAGE_ALIGN(end);
1933
1934 if (flags & PAGE_WRITE) {
1935 flags |= PAGE_WRITE_ORG;
1936 }
1937
1938 for (addr = start, len = end - start;
1939 len != 0;
1940 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1941 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1942
1943 /* If the write protection bit is set, then we invalidate
1944 the code inside. */
1945 if (!(p->flags & PAGE_WRITE) &&
1946 (flags & PAGE_WRITE) &&
1947 p->first_tb) {
1948 tb_invalidate_phys_page(addr, 0);
1949 }
1950 p->flags = flags;
1951 }
1952 }
1953
1954 int page_check_range(target_ulong start, target_ulong len, int flags)
1955 {
1956 PageDesc *p;
1957 target_ulong end;
1958 target_ulong addr;
1959
1960 /* This function should never be called with addresses outside the
1961 guest address space. If this assert fires, it probably indicates
1962 a missing call to h2g_valid. */
1963 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1964 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1965 #endif
1966
1967 if (len == 0) {
1968 return 0;
1969 }
1970 if (start + len - 1 < start) {
1971 /* We've wrapped around. */
1972 return -1;
1973 }
1974
1975 /* must do before we loose bits in the next step */
1976 end = TARGET_PAGE_ALIGN(start + len);
1977 start = start & TARGET_PAGE_MASK;
1978
1979 for (addr = start, len = end - start;
1980 len != 0;
1981 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1982 p = page_find(addr >> TARGET_PAGE_BITS);
1983 if (!p) {
1984 return -1;
1985 }
1986 if (!(p->flags & PAGE_VALID)) {
1987 return -1;
1988 }
1989
1990 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1991 return -1;
1992 }
1993 if (flags & PAGE_WRITE) {
1994 if (!(p->flags & PAGE_WRITE_ORG)) {
1995 return -1;
1996 }
1997 /* unprotect the page if it was put read-only because it
1998 contains translated code */
1999 if (!(p->flags & PAGE_WRITE)) {
2000 if (!page_unprotect(addr, 0)) {
2001 return -1;
2002 }
2003 }
2004 }
2005 }
2006 return 0;
2007 }
2008
2009 /* called from signal handler: invalidate the code and unprotect the
2010 * page. Return 0 if the fault was not handled, 1 if it was handled,
2011 * and 2 if it was handled but the caller must cause the TB to be
2012 * immediately exited. (We can only return 2 if the 'pc' argument is
2013 * non-zero.)
2014 */
2015 int page_unprotect(target_ulong address, uintptr_t pc)
2016 {
2017 unsigned int prot;
2018 bool current_tb_invalidated;
2019 PageDesc *p;
2020 target_ulong host_start, host_end, addr;
2021
2022 /* Technically this isn't safe inside a signal handler. However we
2023 know this only ever happens in a synchronous SEGV handler, so in
2024 practice it seems to be ok. */
2025 mmap_lock();
2026
2027 p = page_find(address >> TARGET_PAGE_BITS);
2028 if (!p) {
2029 mmap_unlock();
2030 return 0;
2031 }
2032
2033 /* if the page was really writable, then we change its
2034 protection back to writable */
2035 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2036 host_start = address & qemu_host_page_mask;
2037 host_end = host_start + qemu_host_page_size;
2038
2039 prot = 0;
2040 current_tb_invalidated = false;
2041 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2042 p = page_find(addr >> TARGET_PAGE_BITS);
2043 p->flags |= PAGE_WRITE;
2044 prot |= p->flags;
2045
2046 /* and since the content will be modified, we must invalidate
2047 the corresponding translated code. */
2048 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2049 #ifdef DEBUG_TB_CHECK
2050 tb_invalidate_check(addr);
2051 #endif
2052 }
2053 mprotect((void *)g2h(host_start), qemu_host_page_size,
2054 prot & PAGE_BITS);
2055
2056 mmap_unlock();
2057 /* If current TB was invalidated return to main loop */
2058 return current_tb_invalidated ? 2 : 1;
2059 }
2060 mmap_unlock();
2061 return 0;
2062 }
2063 #endif /* CONFIG_USER_ONLY */