]> git.proxmox.com Git - mirror_qemu.git/blob - translate-all.c
tb hash: track translated blocks with qht
[mirror_qemu.git] / translate-all.c
1 /*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifdef _WIN32
20 #include <windows.h>
21 #else
22 #include <sys/mman.h>
23 #endif
24 #include "qemu/osdep.h"
25
26
27 #include "qemu-common.h"
28 #define NO_CPU_IO_DEFS
29 #include "cpu.h"
30 #include "trace.h"
31 #include "disas/disas.h"
32 #include "exec/exec-all.h"
33 #include "tcg.h"
34 #if defined(CONFIG_USER_ONLY)
35 #include "qemu.h"
36 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
37 #include <sys/param.h>
38 #if __FreeBSD_version >= 700104
39 #define HAVE_KINFO_GETVMMAP
40 #define sigqueue sigqueue_freebsd /* avoid redefinition */
41 #include <sys/proc.h>
42 #include <machine/profile.h>
43 #define _KERNEL
44 #include <sys/user.h>
45 #undef _KERNEL
46 #undef sigqueue
47 #include <libutil.h>
48 #endif
49 #endif
50 #else
51 #include "exec/address-spaces.h"
52 #endif
53
54 #include "exec/cputlb.h"
55 #include "exec/tb-hash.h"
56 #include "translate-all.h"
57 #include "qemu/bitmap.h"
58 #include "qemu/timer.h"
59 #include "exec/log.h"
60
61 //#define DEBUG_TB_INVALIDATE
62 //#define DEBUG_FLUSH
63 /* make various TB consistency checks */
64 //#define DEBUG_TB_CHECK
65
66 #if !defined(CONFIG_USER_ONLY)
67 /* TB consistency checks only implemented for usermode emulation. */
68 #undef DEBUG_TB_CHECK
69 #endif
70
71 #define SMC_BITMAP_USE_THRESHOLD 10
72
73 typedef struct PageDesc {
74 /* list of TBs intersecting this ram page */
75 TranslationBlock *first_tb;
76 #ifdef CONFIG_SOFTMMU
77 /* in order to optimize self modifying code, we count the number
78 of lookups we do to a given page to use a bitmap */
79 unsigned int code_write_count;
80 unsigned long *code_bitmap;
81 #else
82 unsigned long flags;
83 #endif
84 } PageDesc;
85
86 /* In system mode we want L1_MAP to be based on ram offsets,
87 while in user mode we want it to be based on virtual addresses. */
88 #if !defined(CONFIG_USER_ONLY)
89 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
90 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
91 #else
92 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
93 #endif
94 #else
95 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
96 #endif
97
98 /* Size of the L2 (and L3, etc) page tables. */
99 #define V_L2_BITS 10
100 #define V_L2_SIZE (1 << V_L2_BITS)
101
102 /* The bits remaining after N lower levels of page tables. */
103 #define V_L1_BITS_REM \
104 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
105
106 #if V_L1_BITS_REM < 4
107 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
108 #else
109 #define V_L1_BITS V_L1_BITS_REM
110 #endif
111
112 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
113
114 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
115
116 uintptr_t qemu_host_page_size;
117 intptr_t qemu_host_page_mask;
118
119 /* The bottom level has pointers to PageDesc */
120 static void *l1_map[V_L1_SIZE];
121
122 /* code generation context */
123 TCGContext tcg_ctx;
124
125 /* translation block context */
126 #ifdef CONFIG_USER_ONLY
127 __thread int have_tb_lock;
128 #endif
129
130 void tb_lock(void)
131 {
132 #ifdef CONFIG_USER_ONLY
133 assert(!have_tb_lock);
134 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
135 have_tb_lock++;
136 #endif
137 }
138
139 void tb_unlock(void)
140 {
141 #ifdef CONFIG_USER_ONLY
142 assert(have_tb_lock);
143 have_tb_lock--;
144 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
145 #endif
146 }
147
148 void tb_lock_reset(void)
149 {
150 #ifdef CONFIG_USER_ONLY
151 if (have_tb_lock) {
152 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
153 have_tb_lock = 0;
154 }
155 #endif
156 }
157
158 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
159
160 void cpu_gen_init(void)
161 {
162 tcg_context_init(&tcg_ctx);
163 }
164
165 /* Encode VAL as a signed leb128 sequence at P.
166 Return P incremented past the encoded value. */
167 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
168 {
169 int more, byte;
170
171 do {
172 byte = val & 0x7f;
173 val >>= 7;
174 more = !((val == 0 && (byte & 0x40) == 0)
175 || (val == -1 && (byte & 0x40) != 0));
176 if (more) {
177 byte |= 0x80;
178 }
179 *p++ = byte;
180 } while (more);
181
182 return p;
183 }
184
185 /* Decode a signed leb128 sequence at *PP; increment *PP past the
186 decoded value. Return the decoded value. */
187 static target_long decode_sleb128(uint8_t **pp)
188 {
189 uint8_t *p = *pp;
190 target_long val = 0;
191 int byte, shift = 0;
192
193 do {
194 byte = *p++;
195 val |= (target_ulong)(byte & 0x7f) << shift;
196 shift += 7;
197 } while (byte & 0x80);
198 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
199 val |= -(target_ulong)1 << shift;
200 }
201
202 *pp = p;
203 return val;
204 }
205
206 /* Encode the data collected about the instructions while compiling TB.
207 Place the data at BLOCK, and return the number of bytes consumed.
208
209 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
210 which come from the target's insn_start data, followed by a uintptr_t
211 which comes from the host pc of the end of the code implementing the insn.
212
213 Each line of the table is encoded as sleb128 deltas from the previous
214 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
215 That is, the first column is seeded with the guest pc, the last column
216 with the host pc, and the middle columns with zeros. */
217
218 static int encode_search(TranslationBlock *tb, uint8_t *block)
219 {
220 uint8_t *highwater = tcg_ctx.code_gen_highwater;
221 uint8_t *p = block;
222 int i, j, n;
223
224 tb->tc_search = block;
225
226 for (i = 0, n = tb->icount; i < n; ++i) {
227 target_ulong prev;
228
229 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
230 if (i == 0) {
231 prev = (j == 0 ? tb->pc : 0);
232 } else {
233 prev = tcg_ctx.gen_insn_data[i - 1][j];
234 }
235 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
236 }
237 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
238 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
239
240 /* Test for (pending) buffer overflow. The assumption is that any
241 one row beginning below the high water mark cannot overrun
242 the buffer completely. Thus we can test for overflow after
243 encoding a row without having to check during encoding. */
244 if (unlikely(p > highwater)) {
245 return -1;
246 }
247 }
248
249 return p - block;
250 }
251
252 /* The cpu state corresponding to 'searched_pc' is restored. */
253 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
254 uintptr_t searched_pc)
255 {
256 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
257 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
258 CPUArchState *env = cpu->env_ptr;
259 uint8_t *p = tb->tc_search;
260 int i, j, num_insns = tb->icount;
261 #ifdef CONFIG_PROFILER
262 int64_t ti = profile_getclock();
263 #endif
264
265 if (searched_pc < host_pc) {
266 return -1;
267 }
268
269 /* Reconstruct the stored insn data while looking for the point at
270 which the end of the insn exceeds the searched_pc. */
271 for (i = 0; i < num_insns; ++i) {
272 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
273 data[j] += decode_sleb128(&p);
274 }
275 host_pc += decode_sleb128(&p);
276 if (host_pc > searched_pc) {
277 goto found;
278 }
279 }
280 return -1;
281
282 found:
283 if (tb->cflags & CF_USE_ICOUNT) {
284 assert(use_icount);
285 /* Reset the cycle counter to the start of the block. */
286 cpu->icount_decr.u16.low += num_insns;
287 /* Clear the IO flag. */
288 cpu->can_do_io = 0;
289 }
290 cpu->icount_decr.u16.low -= i;
291 restore_state_to_opc(env, tb, data);
292
293 #ifdef CONFIG_PROFILER
294 tcg_ctx.restore_time += profile_getclock() - ti;
295 tcg_ctx.restore_count++;
296 #endif
297 return 0;
298 }
299
300 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
301 {
302 TranslationBlock *tb;
303
304 tb = tb_find_pc(retaddr);
305 if (tb) {
306 cpu_restore_state_from_tb(cpu, tb, retaddr);
307 if (tb->cflags & CF_NOCACHE) {
308 /* one-shot translation, invalidate it immediately */
309 tb_phys_invalidate(tb, -1);
310 tb_free(tb);
311 }
312 return true;
313 }
314 return false;
315 }
316
317 void page_size_init(void)
318 {
319 /* NOTE: we can always suppose that qemu_host_page_size >=
320 TARGET_PAGE_SIZE */
321 qemu_real_host_page_size = getpagesize();
322 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
323 if (qemu_host_page_size == 0) {
324 qemu_host_page_size = qemu_real_host_page_size;
325 }
326 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
327 qemu_host_page_size = TARGET_PAGE_SIZE;
328 }
329 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
330 }
331
332 static void page_init(void)
333 {
334 page_size_init();
335 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
336 {
337 #ifdef HAVE_KINFO_GETVMMAP
338 struct kinfo_vmentry *freep;
339 int i, cnt;
340
341 freep = kinfo_getvmmap(getpid(), &cnt);
342 if (freep) {
343 mmap_lock();
344 for (i = 0; i < cnt; i++) {
345 unsigned long startaddr, endaddr;
346
347 startaddr = freep[i].kve_start;
348 endaddr = freep[i].kve_end;
349 if (h2g_valid(startaddr)) {
350 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
351
352 if (h2g_valid(endaddr)) {
353 endaddr = h2g(endaddr);
354 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
355 } else {
356 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
357 endaddr = ~0ul;
358 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
359 #endif
360 }
361 }
362 }
363 free(freep);
364 mmap_unlock();
365 }
366 #else
367 FILE *f;
368
369 last_brk = (unsigned long)sbrk(0);
370
371 f = fopen("/compat/linux/proc/self/maps", "r");
372 if (f) {
373 mmap_lock();
374
375 do {
376 unsigned long startaddr, endaddr;
377 int n;
378
379 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
380
381 if (n == 2 && h2g_valid(startaddr)) {
382 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
383
384 if (h2g_valid(endaddr)) {
385 endaddr = h2g(endaddr);
386 } else {
387 endaddr = ~0ul;
388 }
389 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
390 }
391 } while (!feof(f));
392
393 fclose(f);
394 mmap_unlock();
395 }
396 #endif
397 }
398 #endif
399 }
400
401 /* If alloc=1:
402 * Called with mmap_lock held for user-mode emulation.
403 */
404 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
405 {
406 PageDesc *pd;
407 void **lp;
408 int i;
409
410 /* Level 1. Always allocated. */
411 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
412
413 /* Level 2..N-1. */
414 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
415 void **p = atomic_rcu_read(lp);
416
417 if (p == NULL) {
418 if (!alloc) {
419 return NULL;
420 }
421 p = g_new0(void *, V_L2_SIZE);
422 atomic_rcu_set(lp, p);
423 }
424
425 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
426 }
427
428 pd = atomic_rcu_read(lp);
429 if (pd == NULL) {
430 if (!alloc) {
431 return NULL;
432 }
433 pd = g_new0(PageDesc, V_L2_SIZE);
434 atomic_rcu_set(lp, pd);
435 }
436
437 return pd + (index & (V_L2_SIZE - 1));
438 }
439
440 static inline PageDesc *page_find(tb_page_addr_t index)
441 {
442 return page_find_alloc(index, 0);
443 }
444
445 #if defined(CONFIG_USER_ONLY)
446 /* Currently it is not recommended to allocate big chunks of data in
447 user mode. It will change when a dedicated libc will be used. */
448 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
449 region in which the guest needs to run. Revisit this. */
450 #define USE_STATIC_CODE_GEN_BUFFER
451 #endif
452
453 /* Minimum size of the code gen buffer. This number is randomly chosen,
454 but not so small that we can't have a fair number of TB's live. */
455 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
456
457 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
458 indicated, this is constrained by the range of direct branches on the
459 host cpu, as used by the TCG implementation of goto_tb. */
460 #if defined(__x86_64__)
461 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
462 #elif defined(__sparc__)
463 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
464 #elif defined(__powerpc64__)
465 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
466 #elif defined(__powerpc__)
467 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
468 #elif defined(__aarch64__)
469 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
470 #elif defined(__arm__)
471 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
472 #elif defined(__s390x__)
473 /* We have a +- 4GB range on the branches; leave some slop. */
474 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
475 #elif defined(__mips__)
476 /* We have a 256MB branch region, but leave room to make sure the
477 main executable is also within that region. */
478 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
479 #else
480 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
481 #endif
482
483 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
484
485 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
486 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
487 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
488
489 static inline size_t size_code_gen_buffer(size_t tb_size)
490 {
491 /* Size the buffer. */
492 if (tb_size == 0) {
493 #ifdef USE_STATIC_CODE_GEN_BUFFER
494 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
495 #else
496 /* ??? Needs adjustments. */
497 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
498 static buffer, we could size this on RESERVED_VA, on the text
499 segment size of the executable, or continue to use the default. */
500 tb_size = (unsigned long)(ram_size / 4);
501 #endif
502 }
503 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
504 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
505 }
506 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
507 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
508 }
509 return tb_size;
510 }
511
512 #ifdef __mips__
513 /* In order to use J and JAL within the code_gen_buffer, we require
514 that the buffer not cross a 256MB boundary. */
515 static inline bool cross_256mb(void *addr, size_t size)
516 {
517 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
518 }
519
520 /* We weren't able to allocate a buffer without crossing that boundary,
521 so make do with the larger portion of the buffer that doesn't cross.
522 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
523 static inline void *split_cross_256mb(void *buf1, size_t size1)
524 {
525 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
526 size_t size2 = buf1 + size1 - buf2;
527
528 size1 = buf2 - buf1;
529 if (size1 < size2) {
530 size1 = size2;
531 buf1 = buf2;
532 }
533
534 tcg_ctx.code_gen_buffer_size = size1;
535 return buf1;
536 }
537 #endif
538
539 #ifdef USE_STATIC_CODE_GEN_BUFFER
540 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
541 __attribute__((aligned(CODE_GEN_ALIGN)));
542
543 # ifdef _WIN32
544 static inline void do_protect(void *addr, long size, int prot)
545 {
546 DWORD old_protect;
547 VirtualProtect(addr, size, prot, &old_protect);
548 }
549
550 static inline void map_exec(void *addr, long size)
551 {
552 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
553 }
554
555 static inline void map_none(void *addr, long size)
556 {
557 do_protect(addr, size, PAGE_NOACCESS);
558 }
559 # else
560 static inline void do_protect(void *addr, long size, int prot)
561 {
562 uintptr_t start, end;
563
564 start = (uintptr_t)addr;
565 start &= qemu_real_host_page_mask;
566
567 end = (uintptr_t)addr + size;
568 end = ROUND_UP(end, qemu_real_host_page_size);
569
570 mprotect((void *)start, end - start, prot);
571 }
572
573 static inline void map_exec(void *addr, long size)
574 {
575 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
576 }
577
578 static inline void map_none(void *addr, long size)
579 {
580 do_protect(addr, size, PROT_NONE);
581 }
582 # endif /* WIN32 */
583
584 static inline void *alloc_code_gen_buffer(void)
585 {
586 void *buf = static_code_gen_buffer;
587 size_t full_size, size;
588
589 /* The size of the buffer, rounded down to end on a page boundary. */
590 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
591 & qemu_real_host_page_mask) - (uintptr_t)buf;
592
593 /* Reserve a guard page. */
594 size = full_size - qemu_real_host_page_size;
595
596 /* Honor a command-line option limiting the size of the buffer. */
597 if (size > tcg_ctx.code_gen_buffer_size) {
598 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
599 & qemu_real_host_page_mask) - (uintptr_t)buf;
600 }
601 tcg_ctx.code_gen_buffer_size = size;
602
603 #ifdef __mips__
604 if (cross_256mb(buf, size)) {
605 buf = split_cross_256mb(buf, size);
606 size = tcg_ctx.code_gen_buffer_size;
607 }
608 #endif
609
610 map_exec(buf, size);
611 map_none(buf + size, qemu_real_host_page_size);
612 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
613
614 return buf;
615 }
616 #elif defined(_WIN32)
617 static inline void *alloc_code_gen_buffer(void)
618 {
619 size_t size = tcg_ctx.code_gen_buffer_size;
620 void *buf1, *buf2;
621
622 /* Perform the allocation in two steps, so that the guard page
623 is reserved but uncommitted. */
624 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
625 MEM_RESERVE, PAGE_NOACCESS);
626 if (buf1 != NULL) {
627 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
628 assert(buf1 == buf2);
629 }
630
631 return buf1;
632 }
633 #else
634 static inline void *alloc_code_gen_buffer(void)
635 {
636 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
637 uintptr_t start = 0;
638 size_t size = tcg_ctx.code_gen_buffer_size;
639 void *buf;
640
641 /* Constrain the position of the buffer based on the host cpu.
642 Note that these addresses are chosen in concert with the
643 addresses assigned in the relevant linker script file. */
644 # if defined(__PIE__) || defined(__PIC__)
645 /* Don't bother setting a preferred location if we're building
646 a position-independent executable. We're more likely to get
647 an address near the main executable if we let the kernel
648 choose the address. */
649 # elif defined(__x86_64__) && defined(MAP_32BIT)
650 /* Force the memory down into low memory with the executable.
651 Leave the choice of exact location with the kernel. */
652 flags |= MAP_32BIT;
653 /* Cannot expect to map more than 800MB in low memory. */
654 if (size > 800u * 1024 * 1024) {
655 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
656 }
657 # elif defined(__sparc__)
658 start = 0x40000000ul;
659 # elif defined(__s390x__)
660 start = 0x90000000ul;
661 # elif defined(__mips__)
662 # if _MIPS_SIM == _ABI64
663 start = 0x128000000ul;
664 # else
665 start = 0x08000000ul;
666 # endif
667 # endif
668
669 buf = mmap((void *)start, size + qemu_real_host_page_size,
670 PROT_NONE, flags, -1, 0);
671 if (buf == MAP_FAILED) {
672 return NULL;
673 }
674
675 #ifdef __mips__
676 if (cross_256mb(buf, size)) {
677 /* Try again, with the original still mapped, to avoid re-acquiring
678 that 256mb crossing. This time don't specify an address. */
679 size_t size2;
680 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
681 PROT_NONE, flags, -1, 0);
682 switch (buf2 != MAP_FAILED) {
683 case 1:
684 if (!cross_256mb(buf2, size)) {
685 /* Success! Use the new buffer. */
686 munmap(buf, size + qemu_real_host_page_size);
687 break;
688 }
689 /* Failure. Work with what we had. */
690 munmap(buf2, size + qemu_real_host_page_size);
691 /* fallthru */
692 default:
693 /* Split the original buffer. Free the smaller half. */
694 buf2 = split_cross_256mb(buf, size);
695 size2 = tcg_ctx.code_gen_buffer_size;
696 if (buf == buf2) {
697 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
698 } else {
699 munmap(buf, size - size2);
700 }
701 size = size2;
702 break;
703 }
704 buf = buf2;
705 }
706 #endif
707
708 /* Make the final buffer accessible. The guard page at the end
709 will remain inaccessible with PROT_NONE. */
710 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
711
712 /* Request large pages for the buffer. */
713 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
714
715 return buf;
716 }
717 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
718
719 static inline void code_gen_alloc(size_t tb_size)
720 {
721 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
722 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
723 if (tcg_ctx.code_gen_buffer == NULL) {
724 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
725 exit(1);
726 }
727
728 /* Estimate a good size for the number of TBs we can support. We
729 still haven't deducted the prologue from the buffer size here,
730 but that's minimal and won't affect the estimate much. */
731 tcg_ctx.code_gen_max_blocks
732 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
733 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
734
735 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
736 }
737
738 static void tb_htable_init(void)
739 {
740 unsigned int mode = QHT_MODE_AUTO_RESIZE;
741
742 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
743 }
744
745 /* Must be called before using the QEMU cpus. 'tb_size' is the size
746 (in bytes) allocated to the translation buffer. Zero means default
747 size. */
748 void tcg_exec_init(unsigned long tb_size)
749 {
750 cpu_gen_init();
751 page_init();
752 tb_htable_init();
753 code_gen_alloc(tb_size);
754 #if defined(CONFIG_SOFTMMU)
755 /* There's no guest base to take into account, so go ahead and
756 initialize the prologue now. */
757 tcg_prologue_init(&tcg_ctx);
758 #endif
759 }
760
761 bool tcg_enabled(void)
762 {
763 return tcg_ctx.code_gen_buffer != NULL;
764 }
765
766 /* Allocate a new translation block. Flush the translation buffer if
767 too many translation blocks or too much generated code. */
768 static TranslationBlock *tb_alloc(target_ulong pc)
769 {
770 TranslationBlock *tb;
771
772 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
773 return NULL;
774 }
775 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
776 tb->pc = pc;
777 tb->cflags = 0;
778 return tb;
779 }
780
781 void tb_free(TranslationBlock *tb)
782 {
783 /* In practice this is mostly used for single use temporary TB
784 Ignore the hard cases and just back up if this TB happens to
785 be the last one generated. */
786 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
787 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
788 tcg_ctx.code_gen_ptr = tb->tc_ptr;
789 tcg_ctx.tb_ctx.nb_tbs--;
790 }
791 }
792
793 static inline void invalidate_page_bitmap(PageDesc *p)
794 {
795 #ifdef CONFIG_SOFTMMU
796 g_free(p->code_bitmap);
797 p->code_bitmap = NULL;
798 p->code_write_count = 0;
799 #endif
800 }
801
802 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
803 static void page_flush_tb_1(int level, void **lp)
804 {
805 int i;
806
807 if (*lp == NULL) {
808 return;
809 }
810 if (level == 0) {
811 PageDesc *pd = *lp;
812
813 for (i = 0; i < V_L2_SIZE; ++i) {
814 pd[i].first_tb = NULL;
815 invalidate_page_bitmap(pd + i);
816 }
817 } else {
818 void **pp = *lp;
819
820 for (i = 0; i < V_L2_SIZE; ++i) {
821 page_flush_tb_1(level - 1, pp + i);
822 }
823 }
824 }
825
826 static void page_flush_tb(void)
827 {
828 int i;
829
830 for (i = 0; i < V_L1_SIZE; i++) {
831 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
832 }
833 }
834
835 /* flush all the translation blocks */
836 /* XXX: tb_flush is currently not thread safe */
837 void tb_flush(CPUState *cpu)
838 {
839 #if defined(DEBUG_FLUSH)
840 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
841 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
842 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
843 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
844 tcg_ctx.tb_ctx.nb_tbs : 0);
845 #endif
846 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
847 > tcg_ctx.code_gen_buffer_size) {
848 cpu_abort(cpu, "Internal error: code buffer overflow\n");
849 }
850 tcg_ctx.tb_ctx.nb_tbs = 0;
851
852 CPU_FOREACH(cpu) {
853 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
854 cpu->tb_flushed = true;
855 }
856
857 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
858 page_flush_tb();
859
860 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
861 /* XXX: flush processor icache at this point if cache flush is
862 expensive */
863 tcg_ctx.tb_ctx.tb_flush_count++;
864 }
865
866 #ifdef DEBUG_TB_CHECK
867
868 static void
869 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
870 {
871 TranslationBlock *tb = p;
872 target_ulong addr = *(target_ulong *)userp;
873
874 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
875 printf("ERROR invalidate: address=" TARGET_FMT_lx
876 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
877 }
878 }
879
880 static void tb_invalidate_check(target_ulong address)
881 {
882 address &= TARGET_PAGE_MASK;
883 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
884 }
885
886 static void
887 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
888 {
889 TranslationBlock *tb = p;
890 int flags1, flags2;
891
892 flags1 = page_get_flags(tb->pc);
893 flags2 = page_get_flags(tb->pc + tb->size - 1);
894 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
895 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
896 (long)tb->pc, tb->size, flags1, flags2);
897 }
898 }
899
900 /* verify that all the pages have correct rights for code */
901 static void tb_page_check(void)
902 {
903 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
904 }
905
906 #endif
907
908 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
909 {
910 TranslationBlock *tb1;
911 unsigned int n1;
912
913 for (;;) {
914 tb1 = *ptb;
915 n1 = (uintptr_t)tb1 & 3;
916 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
917 if (tb1 == tb) {
918 *ptb = tb1->page_next[n1];
919 break;
920 }
921 ptb = &tb1->page_next[n1];
922 }
923 }
924
925 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
926 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
927 {
928 TranslationBlock *tb1;
929 uintptr_t *ptb, ntb;
930 unsigned int n1;
931
932 ptb = &tb->jmp_list_next[n];
933 if (*ptb) {
934 /* find tb(n) in circular list */
935 for (;;) {
936 ntb = *ptb;
937 n1 = ntb & 3;
938 tb1 = (TranslationBlock *)(ntb & ~3);
939 if (n1 == n && tb1 == tb) {
940 break;
941 }
942 if (n1 == 2) {
943 ptb = &tb1->jmp_list_first;
944 } else {
945 ptb = &tb1->jmp_list_next[n1];
946 }
947 }
948 /* now we can suppress tb(n) from the list */
949 *ptb = tb->jmp_list_next[n];
950
951 tb->jmp_list_next[n] = (uintptr_t)NULL;
952 }
953 }
954
955 /* reset the jump entry 'n' of a TB so that it is not chained to
956 another TB */
957 static inline void tb_reset_jump(TranslationBlock *tb, int n)
958 {
959 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
960 tb_set_jmp_target(tb, n, addr);
961 }
962
963 /* remove any jumps to the TB */
964 static inline void tb_jmp_unlink(TranslationBlock *tb)
965 {
966 TranslationBlock *tb1;
967 uintptr_t *ptb, ntb;
968 unsigned int n1;
969
970 ptb = &tb->jmp_list_first;
971 for (;;) {
972 ntb = *ptb;
973 n1 = ntb & 3;
974 tb1 = (TranslationBlock *)(ntb & ~3);
975 if (n1 == 2) {
976 break;
977 }
978 tb_reset_jump(tb1, n1);
979 *ptb = tb1->jmp_list_next[n1];
980 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
981 }
982 }
983
984 /* invalidate one TB */
985 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
986 {
987 CPUState *cpu;
988 PageDesc *p;
989 uint32_t h;
990 tb_page_addr_t phys_pc;
991
992 /* remove the TB from the hash list */
993 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
994 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
995 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
996
997 /* remove the TB from the page list */
998 if (tb->page_addr[0] != page_addr) {
999 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1000 tb_page_remove(&p->first_tb, tb);
1001 invalidate_page_bitmap(p);
1002 }
1003 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1004 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1005 tb_page_remove(&p->first_tb, tb);
1006 invalidate_page_bitmap(p);
1007 }
1008
1009 /* remove the TB from the hash list */
1010 h = tb_jmp_cache_hash_func(tb->pc);
1011 CPU_FOREACH(cpu) {
1012 if (cpu->tb_jmp_cache[h] == tb) {
1013 cpu->tb_jmp_cache[h] = NULL;
1014 }
1015 }
1016
1017 /* suppress this TB from the two jump lists */
1018 tb_remove_from_jmp_list(tb, 0);
1019 tb_remove_from_jmp_list(tb, 1);
1020
1021 /* suppress any remaining jumps to this TB */
1022 tb_jmp_unlink(tb);
1023
1024 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1025 }
1026
1027 #ifdef CONFIG_SOFTMMU
1028 static void build_page_bitmap(PageDesc *p)
1029 {
1030 int n, tb_start, tb_end;
1031 TranslationBlock *tb;
1032
1033 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1034
1035 tb = p->first_tb;
1036 while (tb != NULL) {
1037 n = (uintptr_t)tb & 3;
1038 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1039 /* NOTE: this is subtle as a TB may span two physical pages */
1040 if (n == 0) {
1041 /* NOTE: tb_end may be after the end of the page, but
1042 it is not a problem */
1043 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1044 tb_end = tb_start + tb->size;
1045 if (tb_end > TARGET_PAGE_SIZE) {
1046 tb_end = TARGET_PAGE_SIZE;
1047 }
1048 } else {
1049 tb_start = 0;
1050 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1051 }
1052 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1053 tb = tb->page_next[n];
1054 }
1055 }
1056 #endif
1057
1058 /* add the tb in the target page and protect it if necessary
1059 *
1060 * Called with mmap_lock held for user-mode emulation.
1061 */
1062 static inline void tb_alloc_page(TranslationBlock *tb,
1063 unsigned int n, tb_page_addr_t page_addr)
1064 {
1065 PageDesc *p;
1066 #ifndef CONFIG_USER_ONLY
1067 bool page_already_protected;
1068 #endif
1069
1070 tb->page_addr[n] = page_addr;
1071 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1072 tb->page_next[n] = p->first_tb;
1073 #ifndef CONFIG_USER_ONLY
1074 page_already_protected = p->first_tb != NULL;
1075 #endif
1076 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1077 invalidate_page_bitmap(p);
1078
1079 #if defined(CONFIG_USER_ONLY)
1080 if (p->flags & PAGE_WRITE) {
1081 target_ulong addr;
1082 PageDesc *p2;
1083 int prot;
1084
1085 /* force the host page as non writable (writes will have a
1086 page fault + mprotect overhead) */
1087 page_addr &= qemu_host_page_mask;
1088 prot = 0;
1089 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1090 addr += TARGET_PAGE_SIZE) {
1091
1092 p2 = page_find(addr >> TARGET_PAGE_BITS);
1093 if (!p2) {
1094 continue;
1095 }
1096 prot |= p2->flags;
1097 p2->flags &= ~PAGE_WRITE;
1098 }
1099 mprotect(g2h(page_addr), qemu_host_page_size,
1100 (prot & PAGE_BITS) & ~PAGE_WRITE);
1101 #ifdef DEBUG_TB_INVALIDATE
1102 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1103 page_addr);
1104 #endif
1105 }
1106 #else
1107 /* if some code is already present, then the pages are already
1108 protected. So we handle the case where only the first TB is
1109 allocated in a physical page */
1110 if (!page_already_protected) {
1111 tlb_protect_code(page_addr);
1112 }
1113 #endif
1114 }
1115
1116 /* add a new TB and link it to the physical page tables. phys_page2 is
1117 * (-1) to indicate that only one page contains the TB.
1118 *
1119 * Called with mmap_lock held for user-mode emulation.
1120 */
1121 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1122 tb_page_addr_t phys_page2)
1123 {
1124 uint32_t h;
1125
1126 /* add in the hash table */
1127 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1128 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1129
1130 /* add in the page list */
1131 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1132 if (phys_page2 != -1) {
1133 tb_alloc_page(tb, 1, phys_page2);
1134 } else {
1135 tb->page_addr[1] = -1;
1136 }
1137
1138 #ifdef DEBUG_TB_CHECK
1139 tb_page_check();
1140 #endif
1141 }
1142
1143 /* Called with mmap_lock held for user mode emulation. */
1144 TranslationBlock *tb_gen_code(CPUState *cpu,
1145 target_ulong pc, target_ulong cs_base,
1146 uint32_t flags, int cflags)
1147 {
1148 CPUArchState *env = cpu->env_ptr;
1149 TranslationBlock *tb;
1150 tb_page_addr_t phys_pc, phys_page2;
1151 target_ulong virt_page2;
1152 tcg_insn_unit *gen_code_buf;
1153 int gen_code_size, search_size;
1154 #ifdef CONFIG_PROFILER
1155 int64_t ti;
1156 #endif
1157
1158 phys_pc = get_page_addr_code(env, pc);
1159 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1160 cflags |= CF_USE_ICOUNT;
1161 }
1162
1163 tb = tb_alloc(pc);
1164 if (unlikely(!tb)) {
1165 buffer_overflow:
1166 /* flush must be done */
1167 tb_flush(cpu);
1168 /* cannot fail at this point */
1169 tb = tb_alloc(pc);
1170 assert(tb != NULL);
1171 }
1172
1173 gen_code_buf = tcg_ctx.code_gen_ptr;
1174 tb->tc_ptr = gen_code_buf;
1175 tb->cs_base = cs_base;
1176 tb->flags = flags;
1177 tb->cflags = cflags;
1178
1179 #ifdef CONFIG_PROFILER
1180 tcg_ctx.tb_count1++; /* includes aborted translations because of
1181 exceptions */
1182 ti = profile_getclock();
1183 #endif
1184
1185 tcg_func_start(&tcg_ctx);
1186
1187 gen_intermediate_code(env, tb);
1188
1189 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1190
1191 /* generate machine code */
1192 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1193 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1194 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1195 #ifdef USE_DIRECT_JUMP
1196 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1197 tcg_ctx.tb_jmp_target_addr = NULL;
1198 #else
1199 tcg_ctx.tb_jmp_insn_offset = NULL;
1200 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1201 #endif
1202
1203 #ifdef CONFIG_PROFILER
1204 tcg_ctx.tb_count++;
1205 tcg_ctx.interm_time += profile_getclock() - ti;
1206 tcg_ctx.code_time -= profile_getclock();
1207 #endif
1208
1209 /* ??? Overflow could be handled better here. In particular, we
1210 don't need to re-do gen_intermediate_code, nor should we re-do
1211 the tcg optimization currently hidden inside tcg_gen_code. All
1212 that should be required is to flush the TBs, allocate a new TB,
1213 re-initialize it per above, and re-do the actual code generation. */
1214 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1215 if (unlikely(gen_code_size < 0)) {
1216 goto buffer_overflow;
1217 }
1218 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1219 if (unlikely(search_size < 0)) {
1220 goto buffer_overflow;
1221 }
1222
1223 #ifdef CONFIG_PROFILER
1224 tcg_ctx.code_time += profile_getclock();
1225 tcg_ctx.code_in_len += tb->size;
1226 tcg_ctx.code_out_len += gen_code_size;
1227 tcg_ctx.search_out_len += search_size;
1228 #endif
1229
1230 #ifdef DEBUG_DISAS
1231 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1232 qemu_log_in_addr_range(tb->pc)) {
1233 qemu_log("OUT: [size=%d]\n", gen_code_size);
1234 log_disas(tb->tc_ptr, gen_code_size);
1235 qemu_log("\n");
1236 qemu_log_flush();
1237 }
1238 #endif
1239
1240 tcg_ctx.code_gen_ptr = (void *)
1241 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1242 CODE_GEN_ALIGN);
1243
1244 /* init jump list */
1245 assert(((uintptr_t)tb & 3) == 0);
1246 tb->jmp_list_first = (uintptr_t)tb | 2;
1247 tb->jmp_list_next[0] = (uintptr_t)NULL;
1248 tb->jmp_list_next[1] = (uintptr_t)NULL;
1249
1250 /* init original jump addresses wich has been set during tcg_gen_code() */
1251 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1252 tb_reset_jump(tb, 0);
1253 }
1254 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1255 tb_reset_jump(tb, 1);
1256 }
1257
1258 /* check next page if needed */
1259 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1260 phys_page2 = -1;
1261 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1262 phys_page2 = get_page_addr_code(env, virt_page2);
1263 }
1264 /* As long as consistency of the TB stuff is provided by tb_lock in user
1265 * mode and is implicit in single-threaded softmmu emulation, no explicit
1266 * memory barrier is required before tb_link_page() makes the TB visible
1267 * through the physical hash table and physical page list.
1268 */
1269 tb_link_page(tb, phys_pc, phys_page2);
1270 return tb;
1271 }
1272
1273 /*
1274 * Invalidate all TBs which intersect with the target physical address range
1275 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1276 * 'is_cpu_write_access' should be true if called from a real cpu write
1277 * access: the virtual CPU will exit the current TB if code is modified inside
1278 * this TB.
1279 *
1280 * Called with mmap_lock held for user-mode emulation
1281 */
1282 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1283 {
1284 while (start < end) {
1285 tb_invalidate_phys_page_range(start, end, 0);
1286 start &= TARGET_PAGE_MASK;
1287 start += TARGET_PAGE_SIZE;
1288 }
1289 }
1290
1291 /*
1292 * Invalidate all TBs which intersect with the target physical address range
1293 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1294 * 'is_cpu_write_access' should be true if called from a real cpu write
1295 * access: the virtual CPU will exit the current TB if code is modified inside
1296 * this TB.
1297 *
1298 * Called with mmap_lock held for user-mode emulation
1299 */
1300 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1301 int is_cpu_write_access)
1302 {
1303 TranslationBlock *tb, *tb_next;
1304 #if defined(TARGET_HAS_PRECISE_SMC)
1305 CPUState *cpu = current_cpu;
1306 CPUArchState *env = NULL;
1307 #endif
1308 tb_page_addr_t tb_start, tb_end;
1309 PageDesc *p;
1310 int n;
1311 #ifdef TARGET_HAS_PRECISE_SMC
1312 int current_tb_not_found = is_cpu_write_access;
1313 TranslationBlock *current_tb = NULL;
1314 int current_tb_modified = 0;
1315 target_ulong current_pc = 0;
1316 target_ulong current_cs_base = 0;
1317 uint32_t current_flags = 0;
1318 #endif /* TARGET_HAS_PRECISE_SMC */
1319
1320 p = page_find(start >> TARGET_PAGE_BITS);
1321 if (!p) {
1322 return;
1323 }
1324 #if defined(TARGET_HAS_PRECISE_SMC)
1325 if (cpu != NULL) {
1326 env = cpu->env_ptr;
1327 }
1328 #endif
1329
1330 /* we remove all the TBs in the range [start, end[ */
1331 /* XXX: see if in some cases it could be faster to invalidate all
1332 the code */
1333 tb = p->first_tb;
1334 while (tb != NULL) {
1335 n = (uintptr_t)tb & 3;
1336 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1337 tb_next = tb->page_next[n];
1338 /* NOTE: this is subtle as a TB may span two physical pages */
1339 if (n == 0) {
1340 /* NOTE: tb_end may be after the end of the page, but
1341 it is not a problem */
1342 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1343 tb_end = tb_start + tb->size;
1344 } else {
1345 tb_start = tb->page_addr[1];
1346 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1347 }
1348 if (!(tb_end <= start || tb_start >= end)) {
1349 #ifdef TARGET_HAS_PRECISE_SMC
1350 if (current_tb_not_found) {
1351 current_tb_not_found = 0;
1352 current_tb = NULL;
1353 if (cpu->mem_io_pc) {
1354 /* now we have a real cpu fault */
1355 current_tb = tb_find_pc(cpu->mem_io_pc);
1356 }
1357 }
1358 if (current_tb == tb &&
1359 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1360 /* If we are modifying the current TB, we must stop
1361 its execution. We could be more precise by checking
1362 that the modification is after the current PC, but it
1363 would require a specialized function to partially
1364 restore the CPU state */
1365
1366 current_tb_modified = 1;
1367 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1368 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1369 &current_flags);
1370 }
1371 #endif /* TARGET_HAS_PRECISE_SMC */
1372 tb_phys_invalidate(tb, -1);
1373 }
1374 tb = tb_next;
1375 }
1376 #if !defined(CONFIG_USER_ONLY)
1377 /* if no code remaining, no need to continue to use slow writes */
1378 if (!p->first_tb) {
1379 invalidate_page_bitmap(p);
1380 tlb_unprotect_code(start);
1381 }
1382 #endif
1383 #ifdef TARGET_HAS_PRECISE_SMC
1384 if (current_tb_modified) {
1385 /* we generate a block containing just the instruction
1386 modifying the memory. It will ensure that it cannot modify
1387 itself */
1388 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1389 cpu_loop_exit_noexc(cpu);
1390 }
1391 #endif
1392 }
1393
1394 #ifdef CONFIG_SOFTMMU
1395 /* len must be <= 8 and start must be a multiple of len */
1396 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1397 {
1398 PageDesc *p;
1399
1400 #if 0
1401 if (1) {
1402 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1403 cpu_single_env->mem_io_vaddr, len,
1404 cpu_single_env->eip,
1405 cpu_single_env->eip +
1406 (intptr_t)cpu_single_env->segs[R_CS].base);
1407 }
1408 #endif
1409 p = page_find(start >> TARGET_PAGE_BITS);
1410 if (!p) {
1411 return;
1412 }
1413 if (!p->code_bitmap &&
1414 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1415 /* build code bitmap */
1416 build_page_bitmap(p);
1417 }
1418 if (p->code_bitmap) {
1419 unsigned int nr;
1420 unsigned long b;
1421
1422 nr = start & ~TARGET_PAGE_MASK;
1423 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1424 if (b & ((1 << len) - 1)) {
1425 goto do_invalidate;
1426 }
1427 } else {
1428 do_invalidate:
1429 tb_invalidate_phys_page_range(start, start + len, 1);
1430 }
1431 }
1432 #else
1433 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1434 * host PC of the faulting store instruction that caused this invalidate.
1435 * Returns true if the caller needs to abort execution of the current
1436 * TB (because it was modified by this store and the guest CPU has
1437 * precise-SMC semantics).
1438 */
1439 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1440 {
1441 TranslationBlock *tb;
1442 PageDesc *p;
1443 int n;
1444 #ifdef TARGET_HAS_PRECISE_SMC
1445 TranslationBlock *current_tb = NULL;
1446 CPUState *cpu = current_cpu;
1447 CPUArchState *env = NULL;
1448 int current_tb_modified = 0;
1449 target_ulong current_pc = 0;
1450 target_ulong current_cs_base = 0;
1451 uint32_t current_flags = 0;
1452 #endif
1453
1454 addr &= TARGET_PAGE_MASK;
1455 p = page_find(addr >> TARGET_PAGE_BITS);
1456 if (!p) {
1457 return false;
1458 }
1459 tb = p->first_tb;
1460 #ifdef TARGET_HAS_PRECISE_SMC
1461 if (tb && pc != 0) {
1462 current_tb = tb_find_pc(pc);
1463 }
1464 if (cpu != NULL) {
1465 env = cpu->env_ptr;
1466 }
1467 #endif
1468 while (tb != NULL) {
1469 n = (uintptr_t)tb & 3;
1470 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1471 #ifdef TARGET_HAS_PRECISE_SMC
1472 if (current_tb == tb &&
1473 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1474 /* If we are modifying the current TB, we must stop
1475 its execution. We could be more precise by checking
1476 that the modification is after the current PC, but it
1477 would require a specialized function to partially
1478 restore the CPU state */
1479
1480 current_tb_modified = 1;
1481 cpu_restore_state_from_tb(cpu, current_tb, pc);
1482 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1483 &current_flags);
1484 }
1485 #endif /* TARGET_HAS_PRECISE_SMC */
1486 tb_phys_invalidate(tb, addr);
1487 tb = tb->page_next[n];
1488 }
1489 p->first_tb = NULL;
1490 #ifdef TARGET_HAS_PRECISE_SMC
1491 if (current_tb_modified) {
1492 /* we generate a block containing just the instruction
1493 modifying the memory. It will ensure that it cannot modify
1494 itself */
1495 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1496 return true;
1497 }
1498 #endif
1499 return false;
1500 }
1501 #endif
1502
1503 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1504 tb[1].tc_ptr. Return NULL if not found */
1505 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1506 {
1507 int m_min, m_max, m;
1508 uintptr_t v;
1509 TranslationBlock *tb;
1510
1511 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1512 return NULL;
1513 }
1514 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1515 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1516 return NULL;
1517 }
1518 /* binary search (cf Knuth) */
1519 m_min = 0;
1520 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1521 while (m_min <= m_max) {
1522 m = (m_min + m_max) >> 1;
1523 tb = &tcg_ctx.tb_ctx.tbs[m];
1524 v = (uintptr_t)tb->tc_ptr;
1525 if (v == tc_ptr) {
1526 return tb;
1527 } else if (tc_ptr < v) {
1528 m_max = m - 1;
1529 } else {
1530 m_min = m + 1;
1531 }
1532 }
1533 return &tcg_ctx.tb_ctx.tbs[m_max];
1534 }
1535
1536 #if !defined(CONFIG_USER_ONLY)
1537 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1538 {
1539 ram_addr_t ram_addr;
1540 MemoryRegion *mr;
1541 hwaddr l = 1;
1542
1543 rcu_read_lock();
1544 mr = address_space_translate(as, addr, &addr, &l, false);
1545 if (!(memory_region_is_ram(mr)
1546 || memory_region_is_romd(mr))) {
1547 rcu_read_unlock();
1548 return;
1549 }
1550 ram_addr = memory_region_get_ram_addr(mr) + addr;
1551 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1552 rcu_read_unlock();
1553 }
1554 #endif /* !defined(CONFIG_USER_ONLY) */
1555
1556 void tb_check_watchpoint(CPUState *cpu)
1557 {
1558 TranslationBlock *tb;
1559
1560 tb = tb_find_pc(cpu->mem_io_pc);
1561 if (tb) {
1562 /* We can use retranslation to find the PC. */
1563 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1564 tb_phys_invalidate(tb, -1);
1565 } else {
1566 /* The exception probably happened in a helper. The CPU state should
1567 have been saved before calling it. Fetch the PC from there. */
1568 CPUArchState *env = cpu->env_ptr;
1569 target_ulong pc, cs_base;
1570 tb_page_addr_t addr;
1571 uint32_t flags;
1572
1573 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1574 addr = get_page_addr_code(env, pc);
1575 tb_invalidate_phys_range(addr, addr + 1);
1576 }
1577 }
1578
1579 #ifndef CONFIG_USER_ONLY
1580 /* in deterministic execution mode, instructions doing device I/Os
1581 must be at the end of the TB */
1582 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1583 {
1584 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1585 CPUArchState *env = cpu->env_ptr;
1586 #endif
1587 TranslationBlock *tb;
1588 uint32_t n, cflags;
1589 target_ulong pc, cs_base;
1590 uint32_t flags;
1591
1592 tb = tb_find_pc(retaddr);
1593 if (!tb) {
1594 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1595 (void *)retaddr);
1596 }
1597 n = cpu->icount_decr.u16.low + tb->icount;
1598 cpu_restore_state_from_tb(cpu, tb, retaddr);
1599 /* Calculate how many instructions had been executed before the fault
1600 occurred. */
1601 n = n - cpu->icount_decr.u16.low;
1602 /* Generate a new TB ending on the I/O insn. */
1603 n++;
1604 /* On MIPS and SH, delay slot instructions can only be restarted if
1605 they were already the first instruction in the TB. If this is not
1606 the first instruction in a TB then re-execute the preceding
1607 branch. */
1608 #if defined(TARGET_MIPS)
1609 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1610 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1611 cpu->icount_decr.u16.low++;
1612 env->hflags &= ~MIPS_HFLAG_BMASK;
1613 }
1614 #elif defined(TARGET_SH4)
1615 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1616 && n > 1) {
1617 env->pc -= 2;
1618 cpu->icount_decr.u16.low++;
1619 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1620 }
1621 #endif
1622 /* This should never happen. */
1623 if (n > CF_COUNT_MASK) {
1624 cpu_abort(cpu, "TB too big during recompile");
1625 }
1626
1627 cflags = n | CF_LAST_IO;
1628 pc = tb->pc;
1629 cs_base = tb->cs_base;
1630 flags = tb->flags;
1631 tb_phys_invalidate(tb, -1);
1632 if (tb->cflags & CF_NOCACHE) {
1633 if (tb->orig_tb) {
1634 /* Invalidate original TB if this TB was generated in
1635 * cpu_exec_nocache() */
1636 tb_phys_invalidate(tb->orig_tb, -1);
1637 }
1638 tb_free(tb);
1639 }
1640 /* FIXME: In theory this could raise an exception. In practice
1641 we have already translated the block once so it's probably ok. */
1642 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1643 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1644 the first in the TB) then we end up generating a whole new TB and
1645 repeating the fault, which is horribly inefficient.
1646 Better would be to execute just this insn uncached, or generate a
1647 second new TB. */
1648 cpu_loop_exit_noexc(cpu);
1649 }
1650
1651 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1652 {
1653 unsigned int i;
1654
1655 /* Discard jump cache entries for any tb which might potentially
1656 overlap the flushed page. */
1657 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1658 memset(&cpu->tb_jmp_cache[i], 0,
1659 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1660
1661 i = tb_jmp_cache_hash_page(addr);
1662 memset(&cpu->tb_jmp_cache[i], 0,
1663 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1664 }
1665
1666 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1667 {
1668 int i, target_code_size, max_target_code_size;
1669 int direct_jmp_count, direct_jmp2_count, cross_page;
1670 TranslationBlock *tb;
1671
1672 target_code_size = 0;
1673 max_target_code_size = 0;
1674 cross_page = 0;
1675 direct_jmp_count = 0;
1676 direct_jmp2_count = 0;
1677 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1678 tb = &tcg_ctx.tb_ctx.tbs[i];
1679 target_code_size += tb->size;
1680 if (tb->size > max_target_code_size) {
1681 max_target_code_size = tb->size;
1682 }
1683 if (tb->page_addr[1] != -1) {
1684 cross_page++;
1685 }
1686 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1687 direct_jmp_count++;
1688 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1689 direct_jmp2_count++;
1690 }
1691 }
1692 }
1693 /* XXX: avoid using doubles ? */
1694 cpu_fprintf(f, "Translation buffer state:\n");
1695 cpu_fprintf(f, "gen code size %td/%zd\n",
1696 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1697 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1698 cpu_fprintf(f, "TB count %d/%d\n",
1699 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1700 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1701 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1702 tcg_ctx.tb_ctx.nb_tbs : 0,
1703 max_target_code_size);
1704 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1705 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1706 tcg_ctx.code_gen_buffer) /
1707 tcg_ctx.tb_ctx.nb_tbs : 0,
1708 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1709 tcg_ctx.code_gen_buffer) /
1710 target_code_size : 0);
1711 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1712 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1713 tcg_ctx.tb_ctx.nb_tbs : 0);
1714 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1715 direct_jmp_count,
1716 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1717 tcg_ctx.tb_ctx.nb_tbs : 0,
1718 direct_jmp2_count,
1719 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1720 tcg_ctx.tb_ctx.nb_tbs : 0);
1721 cpu_fprintf(f, "\nStatistics:\n");
1722 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1723 cpu_fprintf(f, "TB invalidate count %d\n",
1724 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1725 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1726 tcg_dump_info(f, cpu_fprintf);
1727 }
1728
1729 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1730 {
1731 tcg_dump_op_count(f, cpu_fprintf);
1732 }
1733
1734 #else /* CONFIG_USER_ONLY */
1735
1736 void cpu_interrupt(CPUState *cpu, int mask)
1737 {
1738 cpu->interrupt_request |= mask;
1739 cpu->tcg_exit_req = 1;
1740 }
1741
1742 /*
1743 * Walks guest process memory "regions" one by one
1744 * and calls callback function 'fn' for each region.
1745 */
1746 struct walk_memory_regions_data {
1747 walk_memory_regions_fn fn;
1748 void *priv;
1749 target_ulong start;
1750 int prot;
1751 };
1752
1753 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1754 target_ulong end, int new_prot)
1755 {
1756 if (data->start != -1u) {
1757 int rc = data->fn(data->priv, data->start, end, data->prot);
1758 if (rc != 0) {
1759 return rc;
1760 }
1761 }
1762
1763 data->start = (new_prot ? end : -1u);
1764 data->prot = new_prot;
1765
1766 return 0;
1767 }
1768
1769 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1770 target_ulong base, int level, void **lp)
1771 {
1772 target_ulong pa;
1773 int i, rc;
1774
1775 if (*lp == NULL) {
1776 return walk_memory_regions_end(data, base, 0);
1777 }
1778
1779 if (level == 0) {
1780 PageDesc *pd = *lp;
1781
1782 for (i = 0; i < V_L2_SIZE; ++i) {
1783 int prot = pd[i].flags;
1784
1785 pa = base | (i << TARGET_PAGE_BITS);
1786 if (prot != data->prot) {
1787 rc = walk_memory_regions_end(data, pa, prot);
1788 if (rc != 0) {
1789 return rc;
1790 }
1791 }
1792 }
1793 } else {
1794 void **pp = *lp;
1795
1796 for (i = 0; i < V_L2_SIZE; ++i) {
1797 pa = base | ((target_ulong)i <<
1798 (TARGET_PAGE_BITS + V_L2_BITS * level));
1799 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1800 if (rc != 0) {
1801 return rc;
1802 }
1803 }
1804 }
1805
1806 return 0;
1807 }
1808
1809 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1810 {
1811 struct walk_memory_regions_data data;
1812 uintptr_t i;
1813
1814 data.fn = fn;
1815 data.priv = priv;
1816 data.start = -1u;
1817 data.prot = 0;
1818
1819 for (i = 0; i < V_L1_SIZE; i++) {
1820 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
1821 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1822 if (rc != 0) {
1823 return rc;
1824 }
1825 }
1826
1827 return walk_memory_regions_end(&data, 0, 0);
1828 }
1829
1830 static int dump_region(void *priv, target_ulong start,
1831 target_ulong end, unsigned long prot)
1832 {
1833 FILE *f = (FILE *)priv;
1834
1835 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1836 " "TARGET_FMT_lx" %c%c%c\n",
1837 start, end, end - start,
1838 ((prot & PAGE_READ) ? 'r' : '-'),
1839 ((prot & PAGE_WRITE) ? 'w' : '-'),
1840 ((prot & PAGE_EXEC) ? 'x' : '-'));
1841
1842 return 0;
1843 }
1844
1845 /* dump memory mappings */
1846 void page_dump(FILE *f)
1847 {
1848 const int length = sizeof(target_ulong) * 2;
1849 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1850 length, "start", length, "end", length, "size", "prot");
1851 walk_memory_regions(f, dump_region);
1852 }
1853
1854 int page_get_flags(target_ulong address)
1855 {
1856 PageDesc *p;
1857
1858 p = page_find(address >> TARGET_PAGE_BITS);
1859 if (!p) {
1860 return 0;
1861 }
1862 return p->flags;
1863 }
1864
1865 /* Modify the flags of a page and invalidate the code if necessary.
1866 The flag PAGE_WRITE_ORG is positioned automatically depending
1867 on PAGE_WRITE. The mmap_lock should already be held. */
1868 void page_set_flags(target_ulong start, target_ulong end, int flags)
1869 {
1870 target_ulong addr, len;
1871
1872 /* This function should never be called with addresses outside the
1873 guest address space. If this assert fires, it probably indicates
1874 a missing call to h2g_valid. */
1875 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1876 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1877 #endif
1878 assert(start < end);
1879
1880 start = start & TARGET_PAGE_MASK;
1881 end = TARGET_PAGE_ALIGN(end);
1882
1883 if (flags & PAGE_WRITE) {
1884 flags |= PAGE_WRITE_ORG;
1885 }
1886
1887 for (addr = start, len = end - start;
1888 len != 0;
1889 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1890 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1891
1892 /* If the write protection bit is set, then we invalidate
1893 the code inside. */
1894 if (!(p->flags & PAGE_WRITE) &&
1895 (flags & PAGE_WRITE) &&
1896 p->first_tb) {
1897 tb_invalidate_phys_page(addr, 0);
1898 }
1899 p->flags = flags;
1900 }
1901 }
1902
1903 int page_check_range(target_ulong start, target_ulong len, int flags)
1904 {
1905 PageDesc *p;
1906 target_ulong end;
1907 target_ulong addr;
1908
1909 /* This function should never be called with addresses outside the
1910 guest address space. If this assert fires, it probably indicates
1911 a missing call to h2g_valid. */
1912 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1913 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1914 #endif
1915
1916 if (len == 0) {
1917 return 0;
1918 }
1919 if (start + len - 1 < start) {
1920 /* We've wrapped around. */
1921 return -1;
1922 }
1923
1924 /* must do before we loose bits in the next step */
1925 end = TARGET_PAGE_ALIGN(start + len);
1926 start = start & TARGET_PAGE_MASK;
1927
1928 for (addr = start, len = end - start;
1929 len != 0;
1930 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1931 p = page_find(addr >> TARGET_PAGE_BITS);
1932 if (!p) {
1933 return -1;
1934 }
1935 if (!(p->flags & PAGE_VALID)) {
1936 return -1;
1937 }
1938
1939 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1940 return -1;
1941 }
1942 if (flags & PAGE_WRITE) {
1943 if (!(p->flags & PAGE_WRITE_ORG)) {
1944 return -1;
1945 }
1946 /* unprotect the page if it was put read-only because it
1947 contains translated code */
1948 if (!(p->flags & PAGE_WRITE)) {
1949 if (!page_unprotect(addr, 0)) {
1950 return -1;
1951 }
1952 }
1953 }
1954 }
1955 return 0;
1956 }
1957
1958 /* called from signal handler: invalidate the code and unprotect the
1959 * page. Return 0 if the fault was not handled, 1 if it was handled,
1960 * and 2 if it was handled but the caller must cause the TB to be
1961 * immediately exited. (We can only return 2 if the 'pc' argument is
1962 * non-zero.)
1963 */
1964 int page_unprotect(target_ulong address, uintptr_t pc)
1965 {
1966 unsigned int prot;
1967 PageDesc *p;
1968 target_ulong host_start, host_end, addr;
1969
1970 /* Technically this isn't safe inside a signal handler. However we
1971 know this only ever happens in a synchronous SEGV handler, so in
1972 practice it seems to be ok. */
1973 mmap_lock();
1974
1975 p = page_find(address >> TARGET_PAGE_BITS);
1976 if (!p) {
1977 mmap_unlock();
1978 return 0;
1979 }
1980
1981 /* if the page was really writable, then we change its
1982 protection back to writable */
1983 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1984 host_start = address & qemu_host_page_mask;
1985 host_end = host_start + qemu_host_page_size;
1986
1987 prot = 0;
1988 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1989 p = page_find(addr >> TARGET_PAGE_BITS);
1990 p->flags |= PAGE_WRITE;
1991 prot |= p->flags;
1992
1993 /* and since the content will be modified, we must invalidate
1994 the corresponding translated code. */
1995 if (tb_invalidate_phys_page(addr, pc)) {
1996 mmap_unlock();
1997 return 2;
1998 }
1999 #ifdef DEBUG_TB_CHECK
2000 tb_invalidate_check(addr);
2001 #endif
2002 }
2003 mprotect((void *)g2h(host_start), qemu_host_page_size,
2004 prot & PAGE_BITS);
2005
2006 mmap_unlock();
2007 return 1;
2008 }
2009 mmap_unlock();
2010 return 0;
2011 }
2012 #endif /* CONFIG_USER_ONLY */