]> git.proxmox.com Git - mirror_qemu.git/blob - translate-all.c
1dc1a73c1d5512ed357651d56073d75d19e7915d
[mirror_qemu.git] / translate-all.c
1 /*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifdef _WIN32
20 #include <windows.h>
21 #else
22 #include <sys/mman.h>
23 #endif
24 #include "qemu/osdep.h"
25
26
27 #include "qemu-common.h"
28 #define NO_CPU_IO_DEFS
29 #include "cpu.h"
30 #include "trace.h"
31 #include "disas/disas.h"
32 #include "tcg.h"
33 #if defined(CONFIG_USER_ONLY)
34 #include "qemu.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd /* avoid redefinition */
40 #include <sys/proc.h>
41 #include <machine/profile.h>
42 #define _KERNEL
43 #include <sys/user.h>
44 #undef _KERNEL
45 #undef sigqueue
46 #include <libutil.h>
47 #endif
48 #endif
49 #else
50 #include "exec/address-spaces.h"
51 #endif
52
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/timer.h"
58 #include "exec/log.h"
59
60 //#define DEBUG_TB_INVALIDATE
61 //#define DEBUG_FLUSH
62 /* make various TB consistency checks */
63 //#define DEBUG_TB_CHECK
64
65 #if !defined(CONFIG_USER_ONLY)
66 /* TB consistency checks only implemented for usermode emulation. */
67 #undef DEBUG_TB_CHECK
68 #endif
69
70 #define SMC_BITMAP_USE_THRESHOLD 10
71
72 typedef struct PageDesc {
73 /* list of TBs intersecting this ram page */
74 TranslationBlock *first_tb;
75 /* in order to optimize self modifying code, we count the number
76 of lookups we do to a given page to use a bitmap */
77 unsigned int code_write_count;
78 unsigned long *code_bitmap;
79 #if defined(CONFIG_USER_ONLY)
80 unsigned long flags;
81 #endif
82 } PageDesc;
83
84 /* In system mode we want L1_MAP to be based on ram offsets,
85 while in user mode we want it to be based on virtual addresses. */
86 #if !defined(CONFIG_USER_ONLY)
87 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
88 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
89 #else
90 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
91 #endif
92 #else
93 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
94 #endif
95
96 /* Size of the L2 (and L3, etc) page tables. */
97 #define V_L2_BITS 10
98 #define V_L2_SIZE (1 << V_L2_BITS)
99
100 /* The bits remaining after N lower levels of page tables. */
101 #define V_L1_BITS_REM \
102 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
103
104 #if V_L1_BITS_REM < 4
105 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
106 #else
107 #define V_L1_BITS V_L1_BITS_REM
108 #endif
109
110 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
111
112 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
113
114 uintptr_t qemu_host_page_size;
115 intptr_t qemu_host_page_mask;
116
117 /* The bottom level has pointers to PageDesc */
118 static void *l1_map[V_L1_SIZE];
119
120 /* code generation context */
121 TCGContext tcg_ctx;
122
123 /* translation block context */
124 #ifdef CONFIG_USER_ONLY
125 __thread int have_tb_lock;
126 #endif
127
128 void tb_lock(void)
129 {
130 #ifdef CONFIG_USER_ONLY
131 assert(!have_tb_lock);
132 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
133 have_tb_lock++;
134 #endif
135 }
136
137 void tb_unlock(void)
138 {
139 #ifdef CONFIG_USER_ONLY
140 assert(have_tb_lock);
141 have_tb_lock--;
142 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
143 #endif
144 }
145
146 void tb_lock_reset(void)
147 {
148 #ifdef CONFIG_USER_ONLY
149 if (have_tb_lock) {
150 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
151 have_tb_lock = 0;
152 }
153 #endif
154 }
155
156 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
157
158 void cpu_gen_init(void)
159 {
160 tcg_context_init(&tcg_ctx);
161 }
162
163 /* Encode VAL as a signed leb128 sequence at P.
164 Return P incremented past the encoded value. */
165 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
166 {
167 int more, byte;
168
169 do {
170 byte = val & 0x7f;
171 val >>= 7;
172 more = !((val == 0 && (byte & 0x40) == 0)
173 || (val == -1 && (byte & 0x40) != 0));
174 if (more) {
175 byte |= 0x80;
176 }
177 *p++ = byte;
178 } while (more);
179
180 return p;
181 }
182
183 /* Decode a signed leb128 sequence at *PP; increment *PP past the
184 decoded value. Return the decoded value. */
185 static target_long decode_sleb128(uint8_t **pp)
186 {
187 uint8_t *p = *pp;
188 target_long val = 0;
189 int byte, shift = 0;
190
191 do {
192 byte = *p++;
193 val |= (target_ulong)(byte & 0x7f) << shift;
194 shift += 7;
195 } while (byte & 0x80);
196 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
197 val |= -(target_ulong)1 << shift;
198 }
199
200 *pp = p;
201 return val;
202 }
203
204 /* Encode the data collected about the instructions while compiling TB.
205 Place the data at BLOCK, and return the number of bytes consumed.
206
207 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
208 which come from the target's insn_start data, followed by a uintptr_t
209 which comes from the host pc of the end of the code implementing the insn.
210
211 Each line of the table is encoded as sleb128 deltas from the previous
212 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
213 That is, the first column is seeded with the guest pc, the last column
214 with the host pc, and the middle columns with zeros. */
215
216 static int encode_search(TranslationBlock *tb, uint8_t *block)
217 {
218 uint8_t *highwater = tcg_ctx.code_gen_highwater;
219 uint8_t *p = block;
220 int i, j, n;
221
222 tb->tc_search = block;
223
224 for (i = 0, n = tb->icount; i < n; ++i) {
225 target_ulong prev;
226
227 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
228 if (i == 0) {
229 prev = (j == 0 ? tb->pc : 0);
230 } else {
231 prev = tcg_ctx.gen_insn_data[i - 1][j];
232 }
233 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
234 }
235 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
236 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
237
238 /* Test for (pending) buffer overflow. The assumption is that any
239 one row beginning below the high water mark cannot overrun
240 the buffer completely. Thus we can test for overflow after
241 encoding a row without having to check during encoding. */
242 if (unlikely(p > highwater)) {
243 return -1;
244 }
245 }
246
247 return p - block;
248 }
249
250 /* The cpu state corresponding to 'searched_pc' is restored. */
251 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
252 uintptr_t searched_pc)
253 {
254 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
255 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
256 CPUArchState *env = cpu->env_ptr;
257 uint8_t *p = tb->tc_search;
258 int i, j, num_insns = tb->icount;
259 #ifdef CONFIG_PROFILER
260 int64_t ti = profile_getclock();
261 #endif
262
263 if (searched_pc < host_pc) {
264 return -1;
265 }
266
267 /* Reconstruct the stored insn data while looking for the point at
268 which the end of the insn exceeds the searched_pc. */
269 for (i = 0; i < num_insns; ++i) {
270 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
271 data[j] += decode_sleb128(&p);
272 }
273 host_pc += decode_sleb128(&p);
274 if (host_pc > searched_pc) {
275 goto found;
276 }
277 }
278 return -1;
279
280 found:
281 if (tb->cflags & CF_USE_ICOUNT) {
282 assert(use_icount);
283 /* Reset the cycle counter to the start of the block. */
284 cpu->icount_decr.u16.low += num_insns;
285 /* Clear the IO flag. */
286 cpu->can_do_io = 0;
287 }
288 cpu->icount_decr.u16.low -= i;
289 restore_state_to_opc(env, tb, data);
290
291 #ifdef CONFIG_PROFILER
292 tcg_ctx.restore_time += profile_getclock() - ti;
293 tcg_ctx.restore_count++;
294 #endif
295 return 0;
296 }
297
298 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
299 {
300 TranslationBlock *tb;
301
302 tb = tb_find_pc(retaddr);
303 if (tb) {
304 cpu_restore_state_from_tb(cpu, tb, retaddr);
305 if (tb->cflags & CF_NOCACHE) {
306 /* one-shot translation, invalidate it immediately */
307 cpu->current_tb = NULL;
308 tb_phys_invalidate(tb, -1);
309 tb_free(tb);
310 }
311 return true;
312 }
313 return false;
314 }
315
316 void page_size_init(void)
317 {
318 /* NOTE: we can always suppose that qemu_host_page_size >=
319 TARGET_PAGE_SIZE */
320 qemu_real_host_page_size = getpagesize();
321 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
322 if (qemu_host_page_size == 0) {
323 qemu_host_page_size = qemu_real_host_page_size;
324 }
325 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
326 qemu_host_page_size = TARGET_PAGE_SIZE;
327 }
328 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
329 }
330
331 static void page_init(void)
332 {
333 page_size_init();
334 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
335 {
336 #ifdef HAVE_KINFO_GETVMMAP
337 struct kinfo_vmentry *freep;
338 int i, cnt;
339
340 freep = kinfo_getvmmap(getpid(), &cnt);
341 if (freep) {
342 mmap_lock();
343 for (i = 0; i < cnt; i++) {
344 unsigned long startaddr, endaddr;
345
346 startaddr = freep[i].kve_start;
347 endaddr = freep[i].kve_end;
348 if (h2g_valid(startaddr)) {
349 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
350
351 if (h2g_valid(endaddr)) {
352 endaddr = h2g(endaddr);
353 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
354 } else {
355 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
356 endaddr = ~0ul;
357 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
358 #endif
359 }
360 }
361 }
362 free(freep);
363 mmap_unlock();
364 }
365 #else
366 FILE *f;
367
368 last_brk = (unsigned long)sbrk(0);
369
370 f = fopen("/compat/linux/proc/self/maps", "r");
371 if (f) {
372 mmap_lock();
373
374 do {
375 unsigned long startaddr, endaddr;
376 int n;
377
378 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
379
380 if (n == 2 && h2g_valid(startaddr)) {
381 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
382
383 if (h2g_valid(endaddr)) {
384 endaddr = h2g(endaddr);
385 } else {
386 endaddr = ~0ul;
387 }
388 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
389 }
390 } while (!feof(f));
391
392 fclose(f);
393 mmap_unlock();
394 }
395 #endif
396 }
397 #endif
398 }
399
400 /* If alloc=1:
401 * Called with mmap_lock held for user-mode emulation.
402 */
403 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
404 {
405 PageDesc *pd;
406 void **lp;
407 int i;
408
409 /* Level 1. Always allocated. */
410 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
411
412 /* Level 2..N-1. */
413 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
414 void **p = atomic_rcu_read(lp);
415
416 if (p == NULL) {
417 if (!alloc) {
418 return NULL;
419 }
420 p = g_new0(void *, V_L2_SIZE);
421 atomic_rcu_set(lp, p);
422 }
423
424 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
425 }
426
427 pd = atomic_rcu_read(lp);
428 if (pd == NULL) {
429 if (!alloc) {
430 return NULL;
431 }
432 pd = g_new0(PageDesc, V_L2_SIZE);
433 atomic_rcu_set(lp, pd);
434 }
435
436 return pd + (index & (V_L2_SIZE - 1));
437 }
438
439 static inline PageDesc *page_find(tb_page_addr_t index)
440 {
441 return page_find_alloc(index, 0);
442 }
443
444 #if defined(CONFIG_USER_ONLY)
445 /* Currently it is not recommended to allocate big chunks of data in
446 user mode. It will change when a dedicated libc will be used. */
447 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
448 region in which the guest needs to run. Revisit this. */
449 #define USE_STATIC_CODE_GEN_BUFFER
450 #endif
451
452 /* Minimum size of the code gen buffer. This number is randomly chosen,
453 but not so small that we can't have a fair number of TB's live. */
454 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
455
456 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
457 indicated, this is constrained by the range of direct branches on the
458 host cpu, as used by the TCG implementation of goto_tb. */
459 #if defined(__x86_64__)
460 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
461 #elif defined(__sparc__)
462 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
463 #elif defined(__powerpc64__)
464 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
465 #elif defined(__powerpc__)
466 # define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
467 #elif defined(__aarch64__)
468 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
469 #elif defined(__arm__)
470 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
471 #elif defined(__s390x__)
472 /* We have a +- 4GB range on the branches; leave some slop. */
473 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
474 #elif defined(__mips__)
475 /* We have a 256MB branch region, but leave room to make sure the
476 main executable is also within that region. */
477 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
478 #else
479 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
480 #endif
481
482 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
483
484 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
485 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
486 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
487
488 static inline size_t size_code_gen_buffer(size_t tb_size)
489 {
490 /* Size the buffer. */
491 if (tb_size == 0) {
492 #ifdef USE_STATIC_CODE_GEN_BUFFER
493 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
494 #else
495 /* ??? Needs adjustments. */
496 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
497 static buffer, we could size this on RESERVED_VA, on the text
498 segment size of the executable, or continue to use the default. */
499 tb_size = (unsigned long)(ram_size / 4);
500 #endif
501 }
502 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
503 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
504 }
505 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
506 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
507 }
508 return tb_size;
509 }
510
511 #ifdef __mips__
512 /* In order to use J and JAL within the code_gen_buffer, we require
513 that the buffer not cross a 256MB boundary. */
514 static inline bool cross_256mb(void *addr, size_t size)
515 {
516 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
517 }
518
519 /* We weren't able to allocate a buffer without crossing that boundary,
520 so make do with the larger portion of the buffer that doesn't cross.
521 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
522 static inline void *split_cross_256mb(void *buf1, size_t size1)
523 {
524 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
525 size_t size2 = buf1 + size1 - buf2;
526
527 size1 = buf2 - buf1;
528 if (size1 < size2) {
529 size1 = size2;
530 buf1 = buf2;
531 }
532
533 tcg_ctx.code_gen_buffer_size = size1;
534 return buf1;
535 }
536 #endif
537
538 #ifdef USE_STATIC_CODE_GEN_BUFFER
539 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
540 __attribute__((aligned(CODE_GEN_ALIGN)));
541
542 # ifdef _WIN32
543 static inline void do_protect(void *addr, long size, int prot)
544 {
545 DWORD old_protect;
546 VirtualProtect(addr, size, prot, &old_protect);
547 }
548
549 static inline void map_exec(void *addr, long size)
550 {
551 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
552 }
553
554 static inline void map_none(void *addr, long size)
555 {
556 do_protect(addr, size, PAGE_NOACCESS);
557 }
558 # else
559 static inline void do_protect(void *addr, long size, int prot)
560 {
561 uintptr_t start, end;
562
563 start = (uintptr_t)addr;
564 start &= qemu_real_host_page_mask;
565
566 end = (uintptr_t)addr + size;
567 end = ROUND_UP(end, qemu_real_host_page_size);
568
569 mprotect((void *)start, end - start, prot);
570 }
571
572 static inline void map_exec(void *addr, long size)
573 {
574 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
575 }
576
577 static inline void map_none(void *addr, long size)
578 {
579 do_protect(addr, size, PROT_NONE);
580 }
581 # endif /* WIN32 */
582
583 static inline void *alloc_code_gen_buffer(void)
584 {
585 void *buf = static_code_gen_buffer;
586 size_t full_size, size;
587
588 /* The size of the buffer, rounded down to end on a page boundary. */
589 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
590 & qemu_real_host_page_mask) - (uintptr_t)buf;
591
592 /* Reserve a guard page. */
593 size = full_size - qemu_real_host_page_size;
594
595 /* Honor a command-line option limiting the size of the buffer. */
596 if (size > tcg_ctx.code_gen_buffer_size) {
597 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
598 & qemu_real_host_page_mask) - (uintptr_t)buf;
599 }
600 tcg_ctx.code_gen_buffer_size = size;
601
602 #ifdef __mips__
603 if (cross_256mb(buf, size)) {
604 buf = split_cross_256mb(buf, size);
605 size = tcg_ctx.code_gen_buffer_size;
606 }
607 #endif
608
609 map_exec(buf, size);
610 map_none(buf + size, qemu_real_host_page_size);
611 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
612
613 return buf;
614 }
615 #elif defined(_WIN32)
616 static inline void *alloc_code_gen_buffer(void)
617 {
618 size_t size = tcg_ctx.code_gen_buffer_size;
619 void *buf1, *buf2;
620
621 /* Perform the allocation in two steps, so that the guard page
622 is reserved but uncommitted. */
623 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
624 MEM_RESERVE, PAGE_NOACCESS);
625 if (buf1 != NULL) {
626 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
627 assert(buf1 == buf2);
628 }
629
630 return buf1;
631 }
632 #else
633 static inline void *alloc_code_gen_buffer(void)
634 {
635 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
636 uintptr_t start = 0;
637 size_t size = tcg_ctx.code_gen_buffer_size;
638 void *buf;
639
640 /* Constrain the position of the buffer based on the host cpu.
641 Note that these addresses are chosen in concert with the
642 addresses assigned in the relevant linker script file. */
643 # if defined(__PIE__) || defined(__PIC__)
644 /* Don't bother setting a preferred location if we're building
645 a position-independent executable. We're more likely to get
646 an address near the main executable if we let the kernel
647 choose the address. */
648 # elif defined(__x86_64__) && defined(MAP_32BIT)
649 /* Force the memory down into low memory with the executable.
650 Leave the choice of exact location with the kernel. */
651 flags |= MAP_32BIT;
652 /* Cannot expect to map more than 800MB in low memory. */
653 if (size > 800u * 1024 * 1024) {
654 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
655 }
656 # elif defined(__sparc__)
657 start = 0x40000000ul;
658 # elif defined(__s390x__)
659 start = 0x90000000ul;
660 # elif defined(__mips__)
661 # if _MIPS_SIM == _ABI64
662 start = 0x128000000ul;
663 # else
664 start = 0x08000000ul;
665 # endif
666 # endif
667
668 buf = mmap((void *)start, size + qemu_real_host_page_size,
669 PROT_NONE, flags, -1, 0);
670 if (buf == MAP_FAILED) {
671 return NULL;
672 }
673
674 #ifdef __mips__
675 if (cross_256mb(buf, size)) {
676 /* Try again, with the original still mapped, to avoid re-acquiring
677 that 256mb crossing. This time don't specify an address. */
678 size_t size2;
679 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
680 PROT_NONE, flags, -1, 0);
681 switch (buf2 != MAP_FAILED) {
682 case 1:
683 if (!cross_256mb(buf2, size)) {
684 /* Success! Use the new buffer. */
685 munmap(buf, size + qemu_real_host_page_size);
686 break;
687 }
688 /* Failure. Work with what we had. */
689 munmap(buf2, size + qemu_real_host_page_size);
690 /* fallthru */
691 default:
692 /* Split the original buffer. Free the smaller half. */
693 buf2 = split_cross_256mb(buf, size);
694 size2 = tcg_ctx.code_gen_buffer_size;
695 if (buf == buf2) {
696 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
697 } else {
698 munmap(buf, size - size2);
699 }
700 size = size2;
701 break;
702 }
703 buf = buf2;
704 }
705 #endif
706
707 /* Make the final buffer accessible. The guard page at the end
708 will remain inaccessible with PROT_NONE. */
709 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
710
711 /* Request large pages for the buffer. */
712 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
713
714 return buf;
715 }
716 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
717
718 static inline void code_gen_alloc(size_t tb_size)
719 {
720 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
721 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
722 if (tcg_ctx.code_gen_buffer == NULL) {
723 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
724 exit(1);
725 }
726
727 /* Estimate a good size for the number of TBs we can support. We
728 still haven't deducted the prologue from the buffer size here,
729 but that's minimal and won't affect the estimate much. */
730 tcg_ctx.code_gen_max_blocks
731 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
732 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
733
734 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
735 }
736
737 /* Must be called before using the QEMU cpus. 'tb_size' is the size
738 (in bytes) allocated to the translation buffer. Zero means default
739 size. */
740 void tcg_exec_init(unsigned long tb_size)
741 {
742 cpu_gen_init();
743 page_init();
744 code_gen_alloc(tb_size);
745 #if defined(CONFIG_SOFTMMU)
746 /* There's no guest base to take into account, so go ahead and
747 initialize the prologue now. */
748 tcg_prologue_init(&tcg_ctx);
749 #endif
750 }
751
752 bool tcg_enabled(void)
753 {
754 return tcg_ctx.code_gen_buffer != NULL;
755 }
756
757 /* Allocate a new translation block. Flush the translation buffer if
758 too many translation blocks or too much generated code. */
759 static TranslationBlock *tb_alloc(target_ulong pc)
760 {
761 TranslationBlock *tb;
762
763 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
764 return NULL;
765 }
766 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
767 tb->pc = pc;
768 tb->cflags = 0;
769 return tb;
770 }
771
772 void tb_free(TranslationBlock *tb)
773 {
774 /* In practice this is mostly used for single use temporary TB
775 Ignore the hard cases and just back up if this TB happens to
776 be the last one generated. */
777 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
778 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
779 tcg_ctx.code_gen_ptr = tb->tc_ptr;
780 tcg_ctx.tb_ctx.nb_tbs--;
781 }
782 }
783
784 static inline void invalidate_page_bitmap(PageDesc *p)
785 {
786 g_free(p->code_bitmap);
787 p->code_bitmap = NULL;
788 p->code_write_count = 0;
789 }
790
791 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
792 static void page_flush_tb_1(int level, void **lp)
793 {
794 int i;
795
796 if (*lp == NULL) {
797 return;
798 }
799 if (level == 0) {
800 PageDesc *pd = *lp;
801
802 for (i = 0; i < V_L2_SIZE; ++i) {
803 pd[i].first_tb = NULL;
804 invalidate_page_bitmap(pd + i);
805 }
806 } else {
807 void **pp = *lp;
808
809 for (i = 0; i < V_L2_SIZE; ++i) {
810 page_flush_tb_1(level - 1, pp + i);
811 }
812 }
813 }
814
815 static void page_flush_tb(void)
816 {
817 int i;
818
819 for (i = 0; i < V_L1_SIZE; i++) {
820 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
821 }
822 }
823
824 /* flush all the translation blocks */
825 /* XXX: tb_flush is currently not thread safe */
826 void tb_flush(CPUState *cpu)
827 {
828 #if defined(DEBUG_FLUSH)
829 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
830 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
831 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
832 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
833 tcg_ctx.tb_ctx.nb_tbs : 0);
834 #endif
835 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
836 > tcg_ctx.code_gen_buffer_size) {
837 cpu_abort(cpu, "Internal error: code buffer overflow\n");
838 }
839 tcg_ctx.tb_ctx.nb_tbs = 0;
840
841 CPU_FOREACH(cpu) {
842 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
843 }
844
845 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
846 page_flush_tb();
847
848 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
849 /* XXX: flush processor icache at this point if cache flush is
850 expensive */
851 tcg_ctx.tb_ctx.tb_flush_count++;
852 }
853
854 #ifdef DEBUG_TB_CHECK
855
856 static void tb_invalidate_check(target_ulong address)
857 {
858 TranslationBlock *tb;
859 int i;
860
861 address &= TARGET_PAGE_MASK;
862 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
863 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
864 tb = tb->phys_hash_next) {
865 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
866 address >= tb->pc + tb->size)) {
867 printf("ERROR invalidate: address=" TARGET_FMT_lx
868 " PC=%08lx size=%04x\n",
869 address, (long)tb->pc, tb->size);
870 }
871 }
872 }
873 }
874
875 /* verify that all the pages have correct rights for code */
876 static void tb_page_check(void)
877 {
878 TranslationBlock *tb;
879 int i, flags1, flags2;
880
881 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
882 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
883 tb = tb->phys_hash_next) {
884 flags1 = page_get_flags(tb->pc);
885 flags2 = page_get_flags(tb->pc + tb->size - 1);
886 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
887 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
888 (long)tb->pc, tb->size, flags1, flags2);
889 }
890 }
891 }
892 }
893
894 #endif
895
896 static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
897 {
898 TranslationBlock *tb1;
899
900 for (;;) {
901 tb1 = *ptb;
902 if (tb1 == tb) {
903 *ptb = tb1->phys_hash_next;
904 break;
905 }
906 ptb = &tb1->phys_hash_next;
907 }
908 }
909
910 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
911 {
912 TranslationBlock *tb1;
913 unsigned int n1;
914
915 for (;;) {
916 tb1 = *ptb;
917 n1 = (uintptr_t)tb1 & 3;
918 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
919 if (tb1 == tb) {
920 *ptb = tb1->page_next[n1];
921 break;
922 }
923 ptb = &tb1->page_next[n1];
924 }
925 }
926
927 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
928 {
929 TranslationBlock *tb1;
930 uintptr_t *ptb, ntb;
931 unsigned int n1;
932
933 ptb = &tb->jmp_list_next[n];
934 if (*ptb) {
935 /* find tb(n) in circular list */
936 for (;;) {
937 ntb = *ptb;
938 n1 = ntb & 3;
939 tb1 = (TranslationBlock *)(ntb & ~3);
940 if (n1 == n && tb1 == tb) {
941 break;
942 }
943 if (n1 == 2) {
944 ptb = &tb1->jmp_list_first;
945 } else {
946 ptb = &tb1->jmp_list_next[n1];
947 }
948 }
949 /* now we can suppress tb(n) from the list */
950 *ptb = tb->jmp_list_next[n];
951
952 tb->jmp_list_next[n] = (uintptr_t)NULL;
953 }
954 }
955
956 /* reset the jump entry 'n' of a TB so that it is not chained to
957 another TB */
958 static inline void tb_reset_jump(TranslationBlock *tb, int n)
959 {
960 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
961 tb_set_jmp_target(tb, n, addr);
962 }
963
964 /* invalidate one TB */
965 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
966 {
967 CPUState *cpu;
968 PageDesc *p;
969 unsigned int h, n1;
970 tb_page_addr_t phys_pc;
971 uintptr_t tb1, tb2;
972
973 /* remove the TB from the hash list */
974 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
975 h = tb_phys_hash_func(phys_pc);
976 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
977
978 /* remove the TB from the page list */
979 if (tb->page_addr[0] != page_addr) {
980 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
981 tb_page_remove(&p->first_tb, tb);
982 invalidate_page_bitmap(p);
983 }
984 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
985 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
986 tb_page_remove(&p->first_tb, tb);
987 invalidate_page_bitmap(p);
988 }
989
990 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
991
992 /* remove the TB from the hash list */
993 h = tb_jmp_cache_hash_func(tb->pc);
994 CPU_FOREACH(cpu) {
995 if (cpu->tb_jmp_cache[h] == tb) {
996 cpu->tb_jmp_cache[h] = NULL;
997 }
998 }
999
1000 /* suppress this TB from the two jump lists */
1001 tb_jmp_remove(tb, 0);
1002 tb_jmp_remove(tb, 1);
1003
1004 /* suppress any remaining jumps to this TB */
1005 tb1 = tb->jmp_list_first;
1006 for (;;) {
1007 TranslationBlock *tmp_tb;
1008 n1 = tb1 & 3;
1009 if (n1 == 2) {
1010 break;
1011 }
1012 tmp_tb = (TranslationBlock *)(tb1 & ~3);
1013 tb2 = tmp_tb->jmp_list_next[n1];
1014 tb_reset_jump(tmp_tb, n1);
1015 tmp_tb->jmp_list_next[n1] = (uintptr_t)NULL;
1016 tb1 = tb2;
1017 }
1018
1019 assert(((uintptr_t)tb & 3) == 0);
1020 tb->jmp_list_first = (uintptr_t)tb | 2; /* fail safe */
1021
1022 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1023 }
1024
1025 static void build_page_bitmap(PageDesc *p)
1026 {
1027 int n, tb_start, tb_end;
1028 TranslationBlock *tb;
1029
1030 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1031
1032 tb = p->first_tb;
1033 while (tb != NULL) {
1034 n = (uintptr_t)tb & 3;
1035 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1036 /* NOTE: this is subtle as a TB may span two physical pages */
1037 if (n == 0) {
1038 /* NOTE: tb_end may be after the end of the page, but
1039 it is not a problem */
1040 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1041 tb_end = tb_start + tb->size;
1042 if (tb_end > TARGET_PAGE_SIZE) {
1043 tb_end = TARGET_PAGE_SIZE;
1044 }
1045 } else {
1046 tb_start = 0;
1047 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1048 }
1049 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1050 tb = tb->page_next[n];
1051 }
1052 }
1053
1054 /* add the tb in the target page and protect it if necessary
1055 *
1056 * Called with mmap_lock held for user-mode emulation.
1057 */
1058 static inline void tb_alloc_page(TranslationBlock *tb,
1059 unsigned int n, tb_page_addr_t page_addr)
1060 {
1061 PageDesc *p;
1062 #ifndef CONFIG_USER_ONLY
1063 bool page_already_protected;
1064 #endif
1065
1066 tb->page_addr[n] = page_addr;
1067 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1068 tb->page_next[n] = p->first_tb;
1069 #ifndef CONFIG_USER_ONLY
1070 page_already_protected = p->first_tb != NULL;
1071 #endif
1072 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1073 invalidate_page_bitmap(p);
1074
1075 #if defined(CONFIG_USER_ONLY)
1076 if (p->flags & PAGE_WRITE) {
1077 target_ulong addr;
1078 PageDesc *p2;
1079 int prot;
1080
1081 /* force the host page as non writable (writes will have a
1082 page fault + mprotect overhead) */
1083 page_addr &= qemu_host_page_mask;
1084 prot = 0;
1085 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1086 addr += TARGET_PAGE_SIZE) {
1087
1088 p2 = page_find(addr >> TARGET_PAGE_BITS);
1089 if (!p2) {
1090 continue;
1091 }
1092 prot |= p2->flags;
1093 p2->flags &= ~PAGE_WRITE;
1094 }
1095 mprotect(g2h(page_addr), qemu_host_page_size,
1096 (prot & PAGE_BITS) & ~PAGE_WRITE);
1097 #ifdef DEBUG_TB_INVALIDATE
1098 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1099 page_addr);
1100 #endif
1101 }
1102 #else
1103 /* if some code is already present, then the pages are already
1104 protected. So we handle the case where only the first TB is
1105 allocated in a physical page */
1106 if (!page_already_protected) {
1107 tlb_protect_code(page_addr);
1108 }
1109 #endif
1110 }
1111
1112 /* add a new TB and link it to the physical page tables. phys_page2 is
1113 * (-1) to indicate that only one page contains the TB.
1114 *
1115 * Called with mmap_lock held for user-mode emulation.
1116 */
1117 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1118 tb_page_addr_t phys_page2)
1119 {
1120 unsigned int h;
1121 TranslationBlock **ptb;
1122
1123 /* add in the physical hash table */
1124 h = tb_phys_hash_func(phys_pc);
1125 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1126 tb->phys_hash_next = *ptb;
1127 *ptb = tb;
1128
1129 /* add in the page list */
1130 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1131 if (phys_page2 != -1) {
1132 tb_alloc_page(tb, 1, phys_page2);
1133 } else {
1134 tb->page_addr[1] = -1;
1135 }
1136
1137 #ifdef DEBUG_TB_CHECK
1138 tb_page_check();
1139 #endif
1140 }
1141
1142 /* Called with mmap_lock held for user mode emulation. */
1143 TranslationBlock *tb_gen_code(CPUState *cpu,
1144 target_ulong pc, target_ulong cs_base,
1145 uint32_t flags, int cflags)
1146 {
1147 CPUArchState *env = cpu->env_ptr;
1148 TranslationBlock *tb;
1149 tb_page_addr_t phys_pc, phys_page2;
1150 target_ulong virt_page2;
1151 tcg_insn_unit *gen_code_buf;
1152 int gen_code_size, search_size;
1153 #ifdef CONFIG_PROFILER
1154 int64_t ti;
1155 #endif
1156
1157 phys_pc = get_page_addr_code(env, pc);
1158 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1159 cflags |= CF_USE_ICOUNT;
1160 }
1161
1162 tb = tb_alloc(pc);
1163 if (unlikely(!tb)) {
1164 buffer_overflow:
1165 /* flush must be done */
1166 tb_flush(cpu);
1167 /* cannot fail at this point */
1168 tb = tb_alloc(pc);
1169 assert(tb != NULL);
1170 /* Don't forget to invalidate previous TB info. */
1171 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1172 }
1173
1174 gen_code_buf = tcg_ctx.code_gen_ptr;
1175 tb->tc_ptr = gen_code_buf;
1176 tb->cs_base = cs_base;
1177 tb->flags = flags;
1178 tb->cflags = cflags;
1179
1180 #ifdef CONFIG_PROFILER
1181 tcg_ctx.tb_count1++; /* includes aborted translations because of
1182 exceptions */
1183 ti = profile_getclock();
1184 #endif
1185
1186 tcg_func_start(&tcg_ctx);
1187
1188 gen_intermediate_code(env, tb);
1189
1190 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1191
1192 /* generate machine code */
1193 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1194 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1195 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1196 #ifdef USE_DIRECT_JUMP
1197 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1198 tcg_ctx.tb_jmp_target_addr = NULL;
1199 #else
1200 tcg_ctx.tb_jmp_insn_offset = NULL;
1201 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
1202 #endif
1203
1204 #ifdef CONFIG_PROFILER
1205 tcg_ctx.tb_count++;
1206 tcg_ctx.interm_time += profile_getclock() - ti;
1207 tcg_ctx.code_time -= profile_getclock();
1208 #endif
1209
1210 /* ??? Overflow could be handled better here. In particular, we
1211 don't need to re-do gen_intermediate_code, nor should we re-do
1212 the tcg optimization currently hidden inside tcg_gen_code. All
1213 that should be required is to flush the TBs, allocate a new TB,
1214 re-initialize it per above, and re-do the actual code generation. */
1215 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1216 if (unlikely(gen_code_size < 0)) {
1217 goto buffer_overflow;
1218 }
1219 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1220 if (unlikely(search_size < 0)) {
1221 goto buffer_overflow;
1222 }
1223
1224 #ifdef CONFIG_PROFILER
1225 tcg_ctx.code_time += profile_getclock();
1226 tcg_ctx.code_in_len += tb->size;
1227 tcg_ctx.code_out_len += gen_code_size;
1228 tcg_ctx.search_out_len += search_size;
1229 #endif
1230
1231 #ifdef DEBUG_DISAS
1232 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1233 qemu_log_in_addr_range(tb->pc)) {
1234 qemu_log("OUT: [size=%d]\n", gen_code_size);
1235 log_disas(tb->tc_ptr, gen_code_size);
1236 qemu_log("\n");
1237 qemu_log_flush();
1238 }
1239 #endif
1240
1241 tcg_ctx.code_gen_ptr = (void *)
1242 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1243 CODE_GEN_ALIGN);
1244
1245 /* init jump list */
1246 assert(((uintptr_t)tb & 3) == 0);
1247 tb->jmp_list_first = (uintptr_t)tb | 2;
1248 tb->jmp_list_next[0] = (uintptr_t)NULL;
1249 tb->jmp_list_next[1] = (uintptr_t)NULL;
1250
1251 /* init original jump addresses wich has been set during tcg_gen_code() */
1252 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1253 tb_reset_jump(tb, 0);
1254 }
1255 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1256 tb_reset_jump(tb, 1);
1257 }
1258
1259 /* check next page if needed */
1260 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1261 phys_page2 = -1;
1262 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1263 phys_page2 = get_page_addr_code(env, virt_page2);
1264 }
1265 /* As long as consistency of the TB stuff is provided by tb_lock in user
1266 * mode and is implicit in single-threaded softmmu emulation, no explicit
1267 * memory barrier is required before tb_link_page() makes the TB visible
1268 * through the physical hash table and physical page list.
1269 */
1270 tb_link_page(tb, phys_pc, phys_page2);
1271 return tb;
1272 }
1273
1274 /*
1275 * Invalidate all TBs which intersect with the target physical address range
1276 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1277 * 'is_cpu_write_access' should be true if called from a real cpu write
1278 * access: the virtual CPU will exit the current TB if code is modified inside
1279 * this TB.
1280 *
1281 * Called with mmap_lock held for user-mode emulation
1282 */
1283 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1284 {
1285 while (start < end) {
1286 tb_invalidate_phys_page_range(start, end, 0);
1287 start &= TARGET_PAGE_MASK;
1288 start += TARGET_PAGE_SIZE;
1289 }
1290 }
1291
1292 /*
1293 * Invalidate all TBs which intersect with the target physical address range
1294 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1295 * 'is_cpu_write_access' should be true if called from a real cpu write
1296 * access: the virtual CPU will exit the current TB if code is modified inside
1297 * this TB.
1298 *
1299 * Called with mmap_lock held for user-mode emulation
1300 */
1301 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1302 int is_cpu_write_access)
1303 {
1304 TranslationBlock *tb, *tb_next, *saved_tb;
1305 CPUState *cpu = current_cpu;
1306 #if defined(TARGET_HAS_PRECISE_SMC)
1307 CPUArchState *env = NULL;
1308 #endif
1309 tb_page_addr_t tb_start, tb_end;
1310 PageDesc *p;
1311 int n;
1312 #ifdef TARGET_HAS_PRECISE_SMC
1313 int current_tb_not_found = is_cpu_write_access;
1314 TranslationBlock *current_tb = NULL;
1315 int current_tb_modified = 0;
1316 target_ulong current_pc = 0;
1317 target_ulong current_cs_base = 0;
1318 uint32_t current_flags = 0;
1319 #endif /* TARGET_HAS_PRECISE_SMC */
1320
1321 p = page_find(start >> TARGET_PAGE_BITS);
1322 if (!p) {
1323 return;
1324 }
1325 #if defined(TARGET_HAS_PRECISE_SMC)
1326 if (cpu != NULL) {
1327 env = cpu->env_ptr;
1328 }
1329 #endif
1330
1331 /* we remove all the TBs in the range [start, end[ */
1332 /* XXX: see if in some cases it could be faster to invalidate all
1333 the code */
1334 tb = p->first_tb;
1335 while (tb != NULL) {
1336 n = (uintptr_t)tb & 3;
1337 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1338 tb_next = tb->page_next[n];
1339 /* NOTE: this is subtle as a TB may span two physical pages */
1340 if (n == 0) {
1341 /* NOTE: tb_end may be after the end of the page, but
1342 it is not a problem */
1343 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1344 tb_end = tb_start + tb->size;
1345 } else {
1346 tb_start = tb->page_addr[1];
1347 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1348 }
1349 if (!(tb_end <= start || tb_start >= end)) {
1350 #ifdef TARGET_HAS_PRECISE_SMC
1351 if (current_tb_not_found) {
1352 current_tb_not_found = 0;
1353 current_tb = NULL;
1354 if (cpu->mem_io_pc) {
1355 /* now we have a real cpu fault */
1356 current_tb = tb_find_pc(cpu->mem_io_pc);
1357 }
1358 }
1359 if (current_tb == tb &&
1360 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1361 /* If we are modifying the current TB, we must stop
1362 its execution. We could be more precise by checking
1363 that the modification is after the current PC, but it
1364 would require a specialized function to partially
1365 restore the CPU state */
1366
1367 current_tb_modified = 1;
1368 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1369 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1370 &current_flags);
1371 }
1372 #endif /* TARGET_HAS_PRECISE_SMC */
1373 /* we need to do that to handle the case where a signal
1374 occurs while doing tb_phys_invalidate() */
1375 saved_tb = NULL;
1376 if (cpu != NULL) {
1377 saved_tb = cpu->current_tb;
1378 cpu->current_tb = NULL;
1379 }
1380 tb_phys_invalidate(tb, -1);
1381 if (cpu != NULL) {
1382 cpu->current_tb = saved_tb;
1383 if (cpu->interrupt_request && cpu->current_tb) {
1384 cpu_interrupt(cpu, cpu->interrupt_request);
1385 }
1386 }
1387 }
1388 tb = tb_next;
1389 }
1390 #if !defined(CONFIG_USER_ONLY)
1391 /* if no code remaining, no need to continue to use slow writes */
1392 if (!p->first_tb) {
1393 invalidate_page_bitmap(p);
1394 tlb_unprotect_code(start);
1395 }
1396 #endif
1397 #ifdef TARGET_HAS_PRECISE_SMC
1398 if (current_tb_modified) {
1399 /* we generate a block containing just the instruction
1400 modifying the memory. It will ensure that it cannot modify
1401 itself */
1402 cpu->current_tb = NULL;
1403 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1404 cpu_resume_from_signal(cpu, NULL);
1405 }
1406 #endif
1407 }
1408
1409 /* len must be <= 8 and start must be a multiple of len */
1410 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1411 {
1412 PageDesc *p;
1413
1414 #if 0
1415 if (1) {
1416 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1417 cpu_single_env->mem_io_vaddr, len,
1418 cpu_single_env->eip,
1419 cpu_single_env->eip +
1420 (intptr_t)cpu_single_env->segs[R_CS].base);
1421 }
1422 #endif
1423 p = page_find(start >> TARGET_PAGE_BITS);
1424 if (!p) {
1425 return;
1426 }
1427 if (!p->code_bitmap &&
1428 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1429 /* build code bitmap */
1430 build_page_bitmap(p);
1431 }
1432 if (p->code_bitmap) {
1433 unsigned int nr;
1434 unsigned long b;
1435
1436 nr = start & ~TARGET_PAGE_MASK;
1437 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1438 if (b & ((1 << len) - 1)) {
1439 goto do_invalidate;
1440 }
1441 } else {
1442 do_invalidate:
1443 tb_invalidate_phys_page_range(start, start + len, 1);
1444 }
1445 }
1446
1447 #if !defined(CONFIG_SOFTMMU)
1448 /* Called with mmap_lock held. */
1449 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1450 uintptr_t pc, void *puc,
1451 bool locked)
1452 {
1453 TranslationBlock *tb;
1454 PageDesc *p;
1455 int n;
1456 #ifdef TARGET_HAS_PRECISE_SMC
1457 TranslationBlock *current_tb = NULL;
1458 CPUState *cpu = current_cpu;
1459 CPUArchState *env = NULL;
1460 int current_tb_modified = 0;
1461 target_ulong current_pc = 0;
1462 target_ulong current_cs_base = 0;
1463 uint32_t current_flags = 0;
1464 #endif
1465
1466 addr &= TARGET_PAGE_MASK;
1467 p = page_find(addr >> TARGET_PAGE_BITS);
1468 if (!p) {
1469 return;
1470 }
1471 tb = p->first_tb;
1472 #ifdef TARGET_HAS_PRECISE_SMC
1473 if (tb && pc != 0) {
1474 current_tb = tb_find_pc(pc);
1475 }
1476 if (cpu != NULL) {
1477 env = cpu->env_ptr;
1478 }
1479 #endif
1480 while (tb != NULL) {
1481 n = (uintptr_t)tb & 3;
1482 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1483 #ifdef TARGET_HAS_PRECISE_SMC
1484 if (current_tb == tb &&
1485 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1486 /* If we are modifying the current TB, we must stop
1487 its execution. We could be more precise by checking
1488 that the modification is after the current PC, but it
1489 would require a specialized function to partially
1490 restore the CPU state */
1491
1492 current_tb_modified = 1;
1493 cpu_restore_state_from_tb(cpu, current_tb, pc);
1494 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1495 &current_flags);
1496 }
1497 #endif /* TARGET_HAS_PRECISE_SMC */
1498 tb_phys_invalidate(tb, addr);
1499 tb = tb->page_next[n];
1500 }
1501 p->first_tb = NULL;
1502 #ifdef TARGET_HAS_PRECISE_SMC
1503 if (current_tb_modified) {
1504 /* we generate a block containing just the instruction
1505 modifying the memory. It will ensure that it cannot modify
1506 itself */
1507 cpu->current_tb = NULL;
1508 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1509 if (locked) {
1510 mmap_unlock();
1511 }
1512 cpu_resume_from_signal(cpu, puc);
1513 }
1514 #endif
1515 }
1516 #endif
1517
1518 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1519 tb[1].tc_ptr. Return NULL if not found */
1520 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1521 {
1522 int m_min, m_max, m;
1523 uintptr_t v;
1524 TranslationBlock *tb;
1525
1526 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1527 return NULL;
1528 }
1529 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1530 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1531 return NULL;
1532 }
1533 /* binary search (cf Knuth) */
1534 m_min = 0;
1535 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1536 while (m_min <= m_max) {
1537 m = (m_min + m_max) >> 1;
1538 tb = &tcg_ctx.tb_ctx.tbs[m];
1539 v = (uintptr_t)tb->tc_ptr;
1540 if (v == tc_ptr) {
1541 return tb;
1542 } else if (tc_ptr < v) {
1543 m_max = m - 1;
1544 } else {
1545 m_min = m + 1;
1546 }
1547 }
1548 return &tcg_ctx.tb_ctx.tbs[m_max];
1549 }
1550
1551 #if !defined(CONFIG_USER_ONLY)
1552 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1553 {
1554 ram_addr_t ram_addr;
1555 MemoryRegion *mr;
1556 hwaddr l = 1;
1557
1558 rcu_read_lock();
1559 mr = address_space_translate(as, addr, &addr, &l, false);
1560 if (!(memory_region_is_ram(mr)
1561 || memory_region_is_romd(mr))) {
1562 rcu_read_unlock();
1563 return;
1564 }
1565 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1566 + addr;
1567 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1568 rcu_read_unlock();
1569 }
1570 #endif /* !defined(CONFIG_USER_ONLY) */
1571
1572 void tb_check_watchpoint(CPUState *cpu)
1573 {
1574 TranslationBlock *tb;
1575
1576 tb = tb_find_pc(cpu->mem_io_pc);
1577 if (tb) {
1578 /* We can use retranslation to find the PC. */
1579 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1580 tb_phys_invalidate(tb, -1);
1581 } else {
1582 /* The exception probably happened in a helper. The CPU state should
1583 have been saved before calling it. Fetch the PC from there. */
1584 CPUArchState *env = cpu->env_ptr;
1585 target_ulong pc, cs_base;
1586 tb_page_addr_t addr;
1587 uint32_t flags;
1588
1589 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1590 addr = get_page_addr_code(env, pc);
1591 tb_invalidate_phys_range(addr, addr + 1);
1592 }
1593 }
1594
1595 #ifndef CONFIG_USER_ONLY
1596 /* in deterministic execution mode, instructions doing device I/Os
1597 must be at the end of the TB */
1598 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1599 {
1600 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1601 CPUArchState *env = cpu->env_ptr;
1602 #endif
1603 TranslationBlock *tb;
1604 uint32_t n, cflags;
1605 target_ulong pc, cs_base;
1606 uint32_t flags;
1607
1608 tb = tb_find_pc(retaddr);
1609 if (!tb) {
1610 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1611 (void *)retaddr);
1612 }
1613 n = cpu->icount_decr.u16.low + tb->icount;
1614 cpu_restore_state_from_tb(cpu, tb, retaddr);
1615 /* Calculate how many instructions had been executed before the fault
1616 occurred. */
1617 n = n - cpu->icount_decr.u16.low;
1618 /* Generate a new TB ending on the I/O insn. */
1619 n++;
1620 /* On MIPS and SH, delay slot instructions can only be restarted if
1621 they were already the first instruction in the TB. If this is not
1622 the first instruction in a TB then re-execute the preceding
1623 branch. */
1624 #if defined(TARGET_MIPS)
1625 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1626 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1627 cpu->icount_decr.u16.low++;
1628 env->hflags &= ~MIPS_HFLAG_BMASK;
1629 }
1630 #elif defined(TARGET_SH4)
1631 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1632 && n > 1) {
1633 env->pc -= 2;
1634 cpu->icount_decr.u16.low++;
1635 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1636 }
1637 #endif
1638 /* This should never happen. */
1639 if (n > CF_COUNT_MASK) {
1640 cpu_abort(cpu, "TB too big during recompile");
1641 }
1642
1643 cflags = n | CF_LAST_IO;
1644 pc = tb->pc;
1645 cs_base = tb->cs_base;
1646 flags = tb->flags;
1647 tb_phys_invalidate(tb, -1);
1648 if (tb->cflags & CF_NOCACHE) {
1649 if (tb->orig_tb) {
1650 /* Invalidate original TB if this TB was generated in
1651 * cpu_exec_nocache() */
1652 tb_phys_invalidate(tb->orig_tb, -1);
1653 }
1654 tb_free(tb);
1655 }
1656 /* FIXME: In theory this could raise an exception. In practice
1657 we have already translated the block once so it's probably ok. */
1658 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1659 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1660 the first in the TB) then we end up generating a whole new TB and
1661 repeating the fault, which is horribly inefficient.
1662 Better would be to execute just this insn uncached, or generate a
1663 second new TB. */
1664 cpu_resume_from_signal(cpu, NULL);
1665 }
1666
1667 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1668 {
1669 unsigned int i;
1670
1671 /* Discard jump cache entries for any tb which might potentially
1672 overlap the flushed page. */
1673 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1674 memset(&cpu->tb_jmp_cache[i], 0,
1675 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1676
1677 i = tb_jmp_cache_hash_page(addr);
1678 memset(&cpu->tb_jmp_cache[i], 0,
1679 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1680 }
1681
1682 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1683 {
1684 int i, target_code_size, max_target_code_size;
1685 int direct_jmp_count, direct_jmp2_count, cross_page;
1686 TranslationBlock *tb;
1687
1688 target_code_size = 0;
1689 max_target_code_size = 0;
1690 cross_page = 0;
1691 direct_jmp_count = 0;
1692 direct_jmp2_count = 0;
1693 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1694 tb = &tcg_ctx.tb_ctx.tbs[i];
1695 target_code_size += tb->size;
1696 if (tb->size > max_target_code_size) {
1697 max_target_code_size = tb->size;
1698 }
1699 if (tb->page_addr[1] != -1) {
1700 cross_page++;
1701 }
1702 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1703 direct_jmp_count++;
1704 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1705 direct_jmp2_count++;
1706 }
1707 }
1708 }
1709 /* XXX: avoid using doubles ? */
1710 cpu_fprintf(f, "Translation buffer state:\n");
1711 cpu_fprintf(f, "gen code size %td/%zd\n",
1712 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1713 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1714 cpu_fprintf(f, "TB count %d/%d\n",
1715 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1716 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1717 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1718 tcg_ctx.tb_ctx.nb_tbs : 0,
1719 max_target_code_size);
1720 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1721 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1722 tcg_ctx.code_gen_buffer) /
1723 tcg_ctx.tb_ctx.nb_tbs : 0,
1724 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1725 tcg_ctx.code_gen_buffer) /
1726 target_code_size : 0);
1727 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1728 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1729 tcg_ctx.tb_ctx.nb_tbs : 0);
1730 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1731 direct_jmp_count,
1732 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1733 tcg_ctx.tb_ctx.nb_tbs : 0,
1734 direct_jmp2_count,
1735 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1736 tcg_ctx.tb_ctx.nb_tbs : 0);
1737 cpu_fprintf(f, "\nStatistics:\n");
1738 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1739 cpu_fprintf(f, "TB invalidate count %d\n",
1740 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1741 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1742 tcg_dump_info(f, cpu_fprintf);
1743 }
1744
1745 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1746 {
1747 tcg_dump_op_count(f, cpu_fprintf);
1748 }
1749
1750 #else /* CONFIG_USER_ONLY */
1751
1752 void cpu_interrupt(CPUState *cpu, int mask)
1753 {
1754 cpu->interrupt_request |= mask;
1755 cpu->tcg_exit_req = 1;
1756 }
1757
1758 /*
1759 * Walks guest process memory "regions" one by one
1760 * and calls callback function 'fn' for each region.
1761 */
1762 struct walk_memory_regions_data {
1763 walk_memory_regions_fn fn;
1764 void *priv;
1765 target_ulong start;
1766 int prot;
1767 };
1768
1769 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1770 target_ulong end, int new_prot)
1771 {
1772 if (data->start != -1u) {
1773 int rc = data->fn(data->priv, data->start, end, data->prot);
1774 if (rc != 0) {
1775 return rc;
1776 }
1777 }
1778
1779 data->start = (new_prot ? end : -1u);
1780 data->prot = new_prot;
1781
1782 return 0;
1783 }
1784
1785 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1786 target_ulong base, int level, void **lp)
1787 {
1788 target_ulong pa;
1789 int i, rc;
1790
1791 if (*lp == NULL) {
1792 return walk_memory_regions_end(data, base, 0);
1793 }
1794
1795 if (level == 0) {
1796 PageDesc *pd = *lp;
1797
1798 for (i = 0; i < V_L2_SIZE; ++i) {
1799 int prot = pd[i].flags;
1800
1801 pa = base | (i << TARGET_PAGE_BITS);
1802 if (prot != data->prot) {
1803 rc = walk_memory_regions_end(data, pa, prot);
1804 if (rc != 0) {
1805 return rc;
1806 }
1807 }
1808 }
1809 } else {
1810 void **pp = *lp;
1811
1812 for (i = 0; i < V_L2_SIZE; ++i) {
1813 pa = base | ((target_ulong)i <<
1814 (TARGET_PAGE_BITS + V_L2_BITS * level));
1815 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1816 if (rc != 0) {
1817 return rc;
1818 }
1819 }
1820 }
1821
1822 return 0;
1823 }
1824
1825 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1826 {
1827 struct walk_memory_regions_data data;
1828 uintptr_t i;
1829
1830 data.fn = fn;
1831 data.priv = priv;
1832 data.start = -1u;
1833 data.prot = 0;
1834
1835 for (i = 0; i < V_L1_SIZE; i++) {
1836 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
1837 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1838 if (rc != 0) {
1839 return rc;
1840 }
1841 }
1842
1843 return walk_memory_regions_end(&data, 0, 0);
1844 }
1845
1846 static int dump_region(void *priv, target_ulong start,
1847 target_ulong end, unsigned long prot)
1848 {
1849 FILE *f = (FILE *)priv;
1850
1851 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1852 " "TARGET_FMT_lx" %c%c%c\n",
1853 start, end, end - start,
1854 ((prot & PAGE_READ) ? 'r' : '-'),
1855 ((prot & PAGE_WRITE) ? 'w' : '-'),
1856 ((prot & PAGE_EXEC) ? 'x' : '-'));
1857
1858 return 0;
1859 }
1860
1861 /* dump memory mappings */
1862 void page_dump(FILE *f)
1863 {
1864 const int length = sizeof(target_ulong) * 2;
1865 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1866 length, "start", length, "end", length, "size", "prot");
1867 walk_memory_regions(f, dump_region);
1868 }
1869
1870 int page_get_flags(target_ulong address)
1871 {
1872 PageDesc *p;
1873
1874 p = page_find(address >> TARGET_PAGE_BITS);
1875 if (!p) {
1876 return 0;
1877 }
1878 return p->flags;
1879 }
1880
1881 /* Modify the flags of a page and invalidate the code if necessary.
1882 The flag PAGE_WRITE_ORG is positioned automatically depending
1883 on PAGE_WRITE. The mmap_lock should already be held. */
1884 void page_set_flags(target_ulong start, target_ulong end, int flags)
1885 {
1886 target_ulong addr, len;
1887
1888 /* This function should never be called with addresses outside the
1889 guest address space. If this assert fires, it probably indicates
1890 a missing call to h2g_valid. */
1891 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1892 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1893 #endif
1894 assert(start < end);
1895
1896 start = start & TARGET_PAGE_MASK;
1897 end = TARGET_PAGE_ALIGN(end);
1898
1899 if (flags & PAGE_WRITE) {
1900 flags |= PAGE_WRITE_ORG;
1901 }
1902
1903 for (addr = start, len = end - start;
1904 len != 0;
1905 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1906 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1907
1908 /* If the write protection bit is set, then we invalidate
1909 the code inside. */
1910 if (!(p->flags & PAGE_WRITE) &&
1911 (flags & PAGE_WRITE) &&
1912 p->first_tb) {
1913 tb_invalidate_phys_page(addr, 0, NULL, false);
1914 }
1915 p->flags = flags;
1916 }
1917 }
1918
1919 int page_check_range(target_ulong start, target_ulong len, int flags)
1920 {
1921 PageDesc *p;
1922 target_ulong end;
1923 target_ulong addr;
1924
1925 /* This function should never be called with addresses outside the
1926 guest address space. If this assert fires, it probably indicates
1927 a missing call to h2g_valid. */
1928 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1929 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1930 #endif
1931
1932 if (len == 0) {
1933 return 0;
1934 }
1935 if (start + len - 1 < start) {
1936 /* We've wrapped around. */
1937 return -1;
1938 }
1939
1940 /* must do before we loose bits in the next step */
1941 end = TARGET_PAGE_ALIGN(start + len);
1942 start = start & TARGET_PAGE_MASK;
1943
1944 for (addr = start, len = end - start;
1945 len != 0;
1946 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1947 p = page_find(addr >> TARGET_PAGE_BITS);
1948 if (!p) {
1949 return -1;
1950 }
1951 if (!(p->flags & PAGE_VALID)) {
1952 return -1;
1953 }
1954
1955 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1956 return -1;
1957 }
1958 if (flags & PAGE_WRITE) {
1959 if (!(p->flags & PAGE_WRITE_ORG)) {
1960 return -1;
1961 }
1962 /* unprotect the page if it was put read-only because it
1963 contains translated code */
1964 if (!(p->flags & PAGE_WRITE)) {
1965 if (!page_unprotect(addr, 0, NULL)) {
1966 return -1;
1967 }
1968 }
1969 }
1970 }
1971 return 0;
1972 }
1973
1974 /* called from signal handler: invalidate the code and unprotect the
1975 page. Return TRUE if the fault was successfully handled. */
1976 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1977 {
1978 unsigned int prot;
1979 PageDesc *p;
1980 target_ulong host_start, host_end, addr;
1981
1982 /* Technically this isn't safe inside a signal handler. However we
1983 know this only ever happens in a synchronous SEGV handler, so in
1984 practice it seems to be ok. */
1985 mmap_lock();
1986
1987 p = page_find(address >> TARGET_PAGE_BITS);
1988 if (!p) {
1989 mmap_unlock();
1990 return 0;
1991 }
1992
1993 /* if the page was really writable, then we change its
1994 protection back to writable */
1995 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1996 host_start = address & qemu_host_page_mask;
1997 host_end = host_start + qemu_host_page_size;
1998
1999 prot = 0;
2000 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2001 p = page_find(addr >> TARGET_PAGE_BITS);
2002 p->flags |= PAGE_WRITE;
2003 prot |= p->flags;
2004
2005 /* and since the content will be modified, we must invalidate
2006 the corresponding translated code. */
2007 tb_invalidate_phys_page(addr, pc, puc, true);
2008 #ifdef DEBUG_TB_CHECK
2009 tb_invalidate_check(addr);
2010 #endif
2011 }
2012 mprotect((void *)g2h(host_start), qemu_host_page_size,
2013 prot & PAGE_BITS);
2014
2015 mmap_unlock();
2016 return 1;
2017 }
2018 mmap_unlock();
2019 return 0;
2020 }
2021 #endif /* CONFIG_USER_ONLY */