]> git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/translate-all.c
Merge remote-tracking branch 'remotes/ehabkost/tags/machine-next-pull-request' into...
[mirror_qemu.git] / accel / tcg / translate-all.c
1 /*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu-common.h"
23
24 #define NO_CPU_IO_DEFS
25 #include "cpu.h"
26 #include "trace.h"
27 #include "disas/disas.h"
28 #include "exec/exec-all.h"
29 #include "tcg/tcg.h"
30 #if defined(CONFIG_USER_ONLY)
31 #include "qemu.h"
32 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33 #include <sys/param.h>
34 #if __FreeBSD_version >= 700104
35 #define HAVE_KINFO_GETVMMAP
36 #define sigqueue sigqueue_freebsd /* avoid redefinition */
37 #include <sys/proc.h>
38 #include <machine/profile.h>
39 #define _KERNEL
40 #include <sys/user.h>
41 #undef _KERNEL
42 #undef sigqueue
43 #include <libutil.h>
44 #endif
45 #endif
46 #else
47 #include "exec/ram_addr.h"
48 #endif
49
50 #include "exec/cputlb.h"
51 #include "exec/tb-hash.h"
52 #include "translate-all.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/error-report.h"
55 #include "qemu/qemu-print.h"
56 #include "qemu/timer.h"
57 #include "qemu/main-loop.h"
58 #include "exec/log.h"
59 #include "sysemu/cpus.h"
60 #include "sysemu/tcg.h"
61
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
66
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
69 #else
70 #define DEBUG_TB_INVALIDATE_GATE 0
71 #endif
72
73 #ifdef DEBUG_TB_FLUSH
74 #define DEBUG_TB_FLUSH_GATE 1
75 #else
76 #define DEBUG_TB_FLUSH_GATE 0
77 #endif
78
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation. */
81 #undef DEBUG_TB_CHECK
82 #endif
83
84 #ifdef DEBUG_TB_CHECK
85 #define DEBUG_TB_CHECK_GATE 1
86 #else
87 #define DEBUG_TB_CHECK_GATE 0
88 #endif
89
90 /* Access to the various translations structures need to be serialised via locks
91 * for consistency.
92 * In user-mode emulation access to the memory related structures are protected
93 * with mmap_lock.
94 * In !user-mode we use per-page locks.
95 */
96 #ifdef CONFIG_SOFTMMU
97 #define assert_memory_lock()
98 #else
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
100 #endif
101
102 #define SMC_BITMAP_USE_THRESHOLD 10
103
104 typedef struct PageDesc {
105 /* list of TBs intersecting this ram page */
106 uintptr_t first_tb;
107 #ifdef CONFIG_SOFTMMU
108 /* in order to optimize self modifying code, we count the number
109 of lookups we do to a given page to use a bitmap */
110 unsigned long *code_bitmap;
111 unsigned int code_write_count;
112 #else
113 unsigned long flags;
114 #endif
115 #ifndef CONFIG_USER_ONLY
116 QemuSpin lock;
117 #endif
118 } PageDesc;
119
120 /**
121 * struct page_entry - page descriptor entry
122 * @pd: pointer to the &struct PageDesc of the page this entry represents
123 * @index: page index of the page
124 * @locked: whether the page is locked
125 *
126 * This struct helps us keep track of the locked state of a page, without
127 * bloating &struct PageDesc.
128 *
129 * A page lock protects accesses to all fields of &struct PageDesc.
130 *
131 * See also: &struct page_collection.
132 */
133 struct page_entry {
134 PageDesc *pd;
135 tb_page_addr_t index;
136 bool locked;
137 };
138
139 /**
140 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
141 * @tree: Binary search tree (BST) of the pages, with key == page index
142 * @max: Pointer to the page in @tree with the highest page index
143 *
144 * To avoid deadlock we lock pages in ascending order of page index.
145 * When operating on a set of pages, we need to keep track of them so that
146 * we can lock them in order and also unlock them later. For this we collect
147 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
148 * @tree implementation we use does not provide an O(1) operation to obtain the
149 * highest-ranked element, we use @max to keep track of the inserted page
150 * with the highest index. This is valuable because if a page is not in
151 * the tree and its index is higher than @max's, then we can lock it
152 * without breaking the locking order rule.
153 *
154 * Note on naming: 'struct page_set' would be shorter, but we already have a few
155 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
156 *
157 * See also: page_collection_lock().
158 */
159 struct page_collection {
160 GTree *tree;
161 struct page_entry *max;
162 };
163
164 /* list iterators for lists of tagged pointers in TranslationBlock */
165 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
166 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
167 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
169
170 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
171 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
172
173 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
174 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
175
176 /*
177 * In system mode we want L1_MAP to be based on ram offsets,
178 * while in user mode we want it to be based on virtual addresses.
179 *
180 * TODO: For user mode, see the caveat re host vs guest virtual
181 * address spaces near GUEST_ADDR_MAX.
182 */
183 #if !defined(CONFIG_USER_ONLY)
184 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
185 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
186 #else
187 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
188 #endif
189 #else
190 # define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
191 #endif
192
193 /* Size of the L2 (and L3, etc) page tables. */
194 #define V_L2_BITS 10
195 #define V_L2_SIZE (1 << V_L2_BITS)
196
197 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
198 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
199 sizeof_field(TranslationBlock, trace_vcpu_dstate)
200 * BITS_PER_BYTE);
201
202 /*
203 * L1 Mapping properties
204 */
205 static int v_l1_size;
206 static int v_l1_shift;
207 static int v_l2_levels;
208
209 /* The bottom level has pointers to PageDesc, and is indexed by
210 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
211 */
212 #define V_L1_MIN_BITS 4
213 #define V_L1_MAX_BITS (V_L2_BITS + 3)
214 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
215
216 static void *l1_map[V_L1_MAX_SIZE];
217
218 /* code generation context */
219 TCGContext tcg_init_ctx;
220 __thread TCGContext *tcg_ctx;
221 TBContext tb_ctx;
222 bool parallel_cpus;
223
224 static void page_table_config_init(void)
225 {
226 uint32_t v_l1_bits;
227
228 assert(TARGET_PAGE_BITS);
229 /* The bits remaining after N lower levels of page tables. */
230 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
231 if (v_l1_bits < V_L1_MIN_BITS) {
232 v_l1_bits += V_L2_BITS;
233 }
234
235 v_l1_size = 1 << v_l1_bits;
236 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
237 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
238
239 assert(v_l1_bits <= V_L1_MAX_BITS);
240 assert(v_l1_shift % V_L2_BITS == 0);
241 assert(v_l2_levels >= 0);
242 }
243
244 void cpu_gen_init(void)
245 {
246 tcg_context_init(&tcg_init_ctx);
247 }
248
249 /* Encode VAL as a signed leb128 sequence at P.
250 Return P incremented past the encoded value. */
251 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
252 {
253 int more, byte;
254
255 do {
256 byte = val & 0x7f;
257 val >>= 7;
258 more = !((val == 0 && (byte & 0x40) == 0)
259 || (val == -1 && (byte & 0x40) != 0));
260 if (more) {
261 byte |= 0x80;
262 }
263 *p++ = byte;
264 } while (more);
265
266 return p;
267 }
268
269 /* Decode a signed leb128 sequence at *PP; increment *PP past the
270 decoded value. Return the decoded value. */
271 static target_long decode_sleb128(uint8_t **pp)
272 {
273 uint8_t *p = *pp;
274 target_long val = 0;
275 int byte, shift = 0;
276
277 do {
278 byte = *p++;
279 val |= (target_ulong)(byte & 0x7f) << shift;
280 shift += 7;
281 } while (byte & 0x80);
282 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
283 val |= -(target_ulong)1 << shift;
284 }
285
286 *pp = p;
287 return val;
288 }
289
290 /* Encode the data collected about the instructions while compiling TB.
291 Place the data at BLOCK, and return the number of bytes consumed.
292
293 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
294 which come from the target's insn_start data, followed by a uintptr_t
295 which comes from the host pc of the end of the code implementing the insn.
296
297 Each line of the table is encoded as sleb128 deltas from the previous
298 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
299 That is, the first column is seeded with the guest pc, the last column
300 with the host pc, and the middle columns with zeros. */
301
302 static int encode_search(TranslationBlock *tb, uint8_t *block)
303 {
304 uint8_t *highwater = tcg_ctx->code_gen_highwater;
305 uint8_t *p = block;
306 int i, j, n;
307
308 for (i = 0, n = tb->icount; i < n; ++i) {
309 target_ulong prev;
310
311 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
312 if (i == 0) {
313 prev = (j == 0 ? tb->pc : 0);
314 } else {
315 prev = tcg_ctx->gen_insn_data[i - 1][j];
316 }
317 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
318 }
319 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
320 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
321
322 /* Test for (pending) buffer overflow. The assumption is that any
323 one row beginning below the high water mark cannot overrun
324 the buffer completely. Thus we can test for overflow after
325 encoding a row without having to check during encoding. */
326 if (unlikely(p > highwater)) {
327 return -1;
328 }
329 }
330
331 return p - block;
332 }
333
334 /* The cpu state corresponding to 'searched_pc' is restored.
335 * When reset_icount is true, current TB will be interrupted and
336 * icount should be recalculated.
337 */
338 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
339 uintptr_t searched_pc, bool reset_icount)
340 {
341 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
342 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
343 CPUArchState *env = cpu->env_ptr;
344 uint8_t *p = tb->tc.ptr + tb->tc.size;
345 int i, j, num_insns = tb->icount;
346 #ifdef CONFIG_PROFILER
347 TCGProfile *prof = &tcg_ctx->prof;
348 int64_t ti = profile_getclock();
349 #endif
350
351 searched_pc -= GETPC_ADJ;
352
353 if (searched_pc < host_pc) {
354 return -1;
355 }
356
357 /* Reconstruct the stored insn data while looking for the point at
358 which the end of the insn exceeds the searched_pc. */
359 for (i = 0; i < num_insns; ++i) {
360 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
361 data[j] += decode_sleb128(&p);
362 }
363 host_pc += decode_sleb128(&p);
364 if (host_pc > searched_pc) {
365 goto found;
366 }
367 }
368 return -1;
369
370 found:
371 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
372 assert(use_icount);
373 /* Reset the cycle counter to the start of the block
374 and shift if to the number of actually executed instructions */
375 cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
376 }
377 restore_state_to_opc(env, tb, data);
378
379 #ifdef CONFIG_PROFILER
380 atomic_set(&prof->restore_time,
381 prof->restore_time + profile_getclock() - ti);
382 atomic_set(&prof->restore_count, prof->restore_count + 1);
383 #endif
384 return 0;
385 }
386
387 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
388 {
389 TranslationBlock *tb;
390 bool r = false;
391 uintptr_t check_offset;
392
393 /* The host_pc has to be in the region of current code buffer. If
394 * it is not we will not be able to resolve it here. The two cases
395 * where host_pc will not be correct are:
396 *
397 * - fault during translation (instruction fetch)
398 * - fault from helper (not using GETPC() macro)
399 *
400 * Either way we need return early as we can't resolve it here.
401 *
402 * We are using unsigned arithmetic so if host_pc <
403 * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
404 * above the code_gen_buffer_size
405 */
406 check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
407
408 if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
409 tb = tcg_tb_lookup(host_pc);
410 if (tb) {
411 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
412 if (tb_cflags(tb) & CF_NOCACHE) {
413 /* one-shot translation, invalidate it immediately */
414 tb_phys_invalidate(tb, -1);
415 tcg_tb_remove(tb);
416 }
417 r = true;
418 }
419 }
420
421 return r;
422 }
423
424 static void page_init(void)
425 {
426 page_size_init();
427 page_table_config_init();
428
429 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
430 {
431 #ifdef HAVE_KINFO_GETVMMAP
432 struct kinfo_vmentry *freep;
433 int i, cnt;
434
435 freep = kinfo_getvmmap(getpid(), &cnt);
436 if (freep) {
437 mmap_lock();
438 for (i = 0; i < cnt; i++) {
439 unsigned long startaddr, endaddr;
440
441 startaddr = freep[i].kve_start;
442 endaddr = freep[i].kve_end;
443 if (h2g_valid(startaddr)) {
444 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
445
446 if (h2g_valid(endaddr)) {
447 endaddr = h2g(endaddr);
448 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
449 } else {
450 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
451 endaddr = ~0ul;
452 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
453 #endif
454 }
455 }
456 }
457 free(freep);
458 mmap_unlock();
459 }
460 #else
461 FILE *f;
462
463 last_brk = (unsigned long)sbrk(0);
464
465 f = fopen("/compat/linux/proc/self/maps", "r");
466 if (f) {
467 mmap_lock();
468
469 do {
470 unsigned long startaddr, endaddr;
471 int n;
472
473 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
474
475 if (n == 2 && h2g_valid(startaddr)) {
476 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
477
478 if (h2g_valid(endaddr)) {
479 endaddr = h2g(endaddr);
480 } else {
481 endaddr = ~0ul;
482 }
483 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
484 }
485 } while (!feof(f));
486
487 fclose(f);
488 mmap_unlock();
489 }
490 #endif
491 }
492 #endif
493 }
494
495 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
496 {
497 PageDesc *pd;
498 void **lp;
499 int i;
500
501 /* Level 1. Always allocated. */
502 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
503
504 /* Level 2..N-1. */
505 for (i = v_l2_levels; i > 0; i--) {
506 void **p = atomic_rcu_read(lp);
507
508 if (p == NULL) {
509 void *existing;
510
511 if (!alloc) {
512 return NULL;
513 }
514 p = g_new0(void *, V_L2_SIZE);
515 existing = atomic_cmpxchg(lp, NULL, p);
516 if (unlikely(existing)) {
517 g_free(p);
518 p = existing;
519 }
520 }
521
522 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
523 }
524
525 pd = atomic_rcu_read(lp);
526 if (pd == NULL) {
527 void *existing;
528
529 if (!alloc) {
530 return NULL;
531 }
532 pd = g_new0(PageDesc, V_L2_SIZE);
533 #ifndef CONFIG_USER_ONLY
534 {
535 int i;
536
537 for (i = 0; i < V_L2_SIZE; i++) {
538 qemu_spin_init(&pd[i].lock);
539 }
540 }
541 #endif
542 existing = atomic_cmpxchg(lp, NULL, pd);
543 if (unlikely(existing)) {
544 g_free(pd);
545 pd = existing;
546 }
547 }
548
549 return pd + (index & (V_L2_SIZE - 1));
550 }
551
552 static inline PageDesc *page_find(tb_page_addr_t index)
553 {
554 return page_find_alloc(index, 0);
555 }
556
557 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
558 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
559
560 /* In user-mode page locks aren't used; mmap_lock is enough */
561 #ifdef CONFIG_USER_ONLY
562
563 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
564
565 static inline void page_lock(PageDesc *pd)
566 { }
567
568 static inline void page_unlock(PageDesc *pd)
569 { }
570
571 static inline void page_lock_tb(const TranslationBlock *tb)
572 { }
573
574 static inline void page_unlock_tb(const TranslationBlock *tb)
575 { }
576
577 struct page_collection *
578 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
579 {
580 return NULL;
581 }
582
583 void page_collection_unlock(struct page_collection *set)
584 { }
585 #else /* !CONFIG_USER_ONLY */
586
587 #ifdef CONFIG_DEBUG_TCG
588
589 static __thread GHashTable *ht_pages_locked_debug;
590
591 static void ht_pages_locked_debug_init(void)
592 {
593 if (ht_pages_locked_debug) {
594 return;
595 }
596 ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
597 }
598
599 static bool page_is_locked(const PageDesc *pd)
600 {
601 PageDesc *found;
602
603 ht_pages_locked_debug_init();
604 found = g_hash_table_lookup(ht_pages_locked_debug, pd);
605 return !!found;
606 }
607
608 static void page_lock__debug(PageDesc *pd)
609 {
610 ht_pages_locked_debug_init();
611 g_assert(!page_is_locked(pd));
612 g_hash_table_insert(ht_pages_locked_debug, pd, pd);
613 }
614
615 static void page_unlock__debug(const PageDesc *pd)
616 {
617 bool removed;
618
619 ht_pages_locked_debug_init();
620 g_assert(page_is_locked(pd));
621 removed = g_hash_table_remove(ht_pages_locked_debug, pd);
622 g_assert(removed);
623 }
624
625 static void
626 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
627 {
628 if (unlikely(!page_is_locked(pd))) {
629 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
630 pd, file, line);
631 abort();
632 }
633 }
634
635 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
636
637 void assert_no_pages_locked(void)
638 {
639 ht_pages_locked_debug_init();
640 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
641 }
642
643 #else /* !CONFIG_DEBUG_TCG */
644
645 #define assert_page_locked(pd)
646
647 static inline void page_lock__debug(const PageDesc *pd)
648 {
649 }
650
651 static inline void page_unlock__debug(const PageDesc *pd)
652 {
653 }
654
655 #endif /* CONFIG_DEBUG_TCG */
656
657 static inline void page_lock(PageDesc *pd)
658 {
659 page_lock__debug(pd);
660 qemu_spin_lock(&pd->lock);
661 }
662
663 static inline void page_unlock(PageDesc *pd)
664 {
665 qemu_spin_unlock(&pd->lock);
666 page_unlock__debug(pd);
667 }
668
669 /* lock the page(s) of a TB in the correct acquisition order */
670 static inline void page_lock_tb(const TranslationBlock *tb)
671 {
672 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
673 }
674
675 static inline void page_unlock_tb(const TranslationBlock *tb)
676 {
677 PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
678
679 page_unlock(p1);
680 if (unlikely(tb->page_addr[1] != -1)) {
681 PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
682
683 if (p2 != p1) {
684 page_unlock(p2);
685 }
686 }
687 }
688
689 static inline struct page_entry *
690 page_entry_new(PageDesc *pd, tb_page_addr_t index)
691 {
692 struct page_entry *pe = g_malloc(sizeof(*pe));
693
694 pe->index = index;
695 pe->pd = pd;
696 pe->locked = false;
697 return pe;
698 }
699
700 static void page_entry_destroy(gpointer p)
701 {
702 struct page_entry *pe = p;
703
704 g_assert(pe->locked);
705 page_unlock(pe->pd);
706 g_free(pe);
707 }
708
709 /* returns false on success */
710 static bool page_entry_trylock(struct page_entry *pe)
711 {
712 bool busy;
713
714 busy = qemu_spin_trylock(&pe->pd->lock);
715 if (!busy) {
716 g_assert(!pe->locked);
717 pe->locked = true;
718 page_lock__debug(pe->pd);
719 }
720 return busy;
721 }
722
723 static void do_page_entry_lock(struct page_entry *pe)
724 {
725 page_lock(pe->pd);
726 g_assert(!pe->locked);
727 pe->locked = true;
728 }
729
730 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
731 {
732 struct page_entry *pe = value;
733
734 do_page_entry_lock(pe);
735 return FALSE;
736 }
737
738 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
739 {
740 struct page_entry *pe = value;
741
742 if (pe->locked) {
743 pe->locked = false;
744 page_unlock(pe->pd);
745 }
746 return FALSE;
747 }
748
749 /*
750 * Trylock a page, and if successful, add the page to a collection.
751 * Returns true ("busy") if the page could not be locked; false otherwise.
752 */
753 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
754 {
755 tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
756 struct page_entry *pe;
757 PageDesc *pd;
758
759 pe = g_tree_lookup(set->tree, &index);
760 if (pe) {
761 return false;
762 }
763
764 pd = page_find(index);
765 if (pd == NULL) {
766 return false;
767 }
768
769 pe = page_entry_new(pd, index);
770 g_tree_insert(set->tree, &pe->index, pe);
771
772 /*
773 * If this is either (1) the first insertion or (2) a page whose index
774 * is higher than any other so far, just lock the page and move on.
775 */
776 if (set->max == NULL || pe->index > set->max->index) {
777 set->max = pe;
778 do_page_entry_lock(pe);
779 return false;
780 }
781 /*
782 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
783 * locks in order.
784 */
785 return page_entry_trylock(pe);
786 }
787
788 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
789 {
790 tb_page_addr_t a = *(const tb_page_addr_t *)ap;
791 tb_page_addr_t b = *(const tb_page_addr_t *)bp;
792
793 if (a == b) {
794 return 0;
795 } else if (a < b) {
796 return -1;
797 }
798 return 1;
799 }
800
801 /*
802 * Lock a range of pages ([@start,@end[) as well as the pages of all
803 * intersecting TBs.
804 * Locking order: acquire locks in ascending order of page index.
805 */
806 struct page_collection *
807 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
808 {
809 struct page_collection *set = g_malloc(sizeof(*set));
810 tb_page_addr_t index;
811 PageDesc *pd;
812
813 start >>= TARGET_PAGE_BITS;
814 end >>= TARGET_PAGE_BITS;
815 g_assert(start <= end);
816
817 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
818 page_entry_destroy);
819 set->max = NULL;
820 assert_no_pages_locked();
821
822 retry:
823 g_tree_foreach(set->tree, page_entry_lock, NULL);
824
825 for (index = start; index <= end; index++) {
826 TranslationBlock *tb;
827 int n;
828
829 pd = page_find(index);
830 if (pd == NULL) {
831 continue;
832 }
833 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
834 g_tree_foreach(set->tree, page_entry_unlock, NULL);
835 goto retry;
836 }
837 assert_page_locked(pd);
838 PAGE_FOR_EACH_TB(pd, tb, n) {
839 if (page_trylock_add(set, tb->page_addr[0]) ||
840 (tb->page_addr[1] != -1 &&
841 page_trylock_add(set, tb->page_addr[1]))) {
842 /* drop all locks, and reacquire in order */
843 g_tree_foreach(set->tree, page_entry_unlock, NULL);
844 goto retry;
845 }
846 }
847 }
848 return set;
849 }
850
851 void page_collection_unlock(struct page_collection *set)
852 {
853 /* entries are unlocked and freed via page_entry_destroy */
854 g_tree_destroy(set->tree);
855 g_free(set);
856 }
857
858 #endif /* !CONFIG_USER_ONLY */
859
860 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
861 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
862 {
863 PageDesc *p1, *p2;
864 tb_page_addr_t page1;
865 tb_page_addr_t page2;
866
867 assert_memory_lock();
868 g_assert(phys1 != -1);
869
870 page1 = phys1 >> TARGET_PAGE_BITS;
871 page2 = phys2 >> TARGET_PAGE_BITS;
872
873 p1 = page_find_alloc(page1, alloc);
874 if (ret_p1) {
875 *ret_p1 = p1;
876 }
877 if (likely(phys2 == -1)) {
878 page_lock(p1);
879 return;
880 } else if (page1 == page2) {
881 page_lock(p1);
882 if (ret_p2) {
883 *ret_p2 = p1;
884 }
885 return;
886 }
887 p2 = page_find_alloc(page2, alloc);
888 if (ret_p2) {
889 *ret_p2 = p2;
890 }
891 if (page1 < page2) {
892 page_lock(p1);
893 page_lock(p2);
894 } else {
895 page_lock(p2);
896 page_lock(p1);
897 }
898 }
899
900 /* Minimum size of the code gen buffer. This number is randomly chosen,
901 but not so small that we can't have a fair number of TB's live. */
902 #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
903
904 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
905 indicated, this is constrained by the range of direct branches on the
906 host cpu, as used by the TCG implementation of goto_tb. */
907 #if defined(__x86_64__)
908 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
909 #elif defined(__sparc__)
910 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
911 #elif defined(__powerpc64__)
912 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
913 #elif defined(__powerpc__)
914 # define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
915 #elif defined(__aarch64__)
916 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
917 #elif defined(__s390x__)
918 /* We have a +- 4GB range on the branches; leave some slop. */
919 # define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
920 #elif defined(__mips__)
921 /* We have a 256MB branch region, but leave room to make sure the
922 main executable is also within that region. */
923 # define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
924 #else
925 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
926 #endif
927
928 #if TCG_TARGET_REG_BITS == 32
929 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
930 #ifdef CONFIG_USER_ONLY
931 /*
932 * For user mode on smaller 32 bit systems we may run into trouble
933 * allocating big chunks of data in the right place. On these systems
934 * we utilise a static code generation buffer directly in the binary.
935 */
936 #define USE_STATIC_CODE_GEN_BUFFER
937 #endif
938 #else /* TCG_TARGET_REG_BITS == 64 */
939 #ifdef CONFIG_USER_ONLY
940 /*
941 * As user-mode emulation typically means running multiple instances
942 * of the translator don't go too nuts with our default code gen
943 * buffer lest we make things too hard for the OS.
944 */
945 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
946 #else
947 /*
948 * We expect most system emulation to run one or two guests per host.
949 * Users running large scale system emulation may want to tweak their
950 * runtime setup via the tb-size control on the command line.
951 */
952 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
953 #endif
954 #endif
955
956 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
957 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
958 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
959
960 static inline size_t size_code_gen_buffer(size_t tb_size)
961 {
962 /* Size the buffer. */
963 if (tb_size == 0) {
964 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
965 }
966 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
967 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
968 }
969 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
970 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
971 }
972 return tb_size;
973 }
974
975 #ifdef __mips__
976 /* In order to use J and JAL within the code_gen_buffer, we require
977 that the buffer not cross a 256MB boundary. */
978 static inline bool cross_256mb(void *addr, size_t size)
979 {
980 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
981 }
982
983 /* We weren't able to allocate a buffer without crossing that boundary,
984 so make do with the larger portion of the buffer that doesn't cross.
985 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
986 static inline void *split_cross_256mb(void *buf1, size_t size1)
987 {
988 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
989 size_t size2 = buf1 + size1 - buf2;
990
991 size1 = buf2 - buf1;
992 if (size1 < size2) {
993 size1 = size2;
994 buf1 = buf2;
995 }
996
997 tcg_ctx->code_gen_buffer_size = size1;
998 return buf1;
999 }
1000 #endif
1001
1002 #ifdef USE_STATIC_CODE_GEN_BUFFER
1003 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
1004 __attribute__((aligned(CODE_GEN_ALIGN)));
1005
1006 static inline void *alloc_code_gen_buffer(void)
1007 {
1008 void *buf = static_code_gen_buffer;
1009 void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
1010 size_t size;
1011
1012 /* page-align the beginning and end of the buffer */
1013 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1014 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1015
1016 size = end - buf;
1017
1018 /* Honor a command-line option limiting the size of the buffer. */
1019 if (size > tcg_ctx->code_gen_buffer_size) {
1020 size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1021 qemu_real_host_page_size);
1022 }
1023 tcg_ctx->code_gen_buffer_size = size;
1024
1025 #ifdef __mips__
1026 if (cross_256mb(buf, size)) {
1027 buf = split_cross_256mb(buf, size);
1028 size = tcg_ctx->code_gen_buffer_size;
1029 }
1030 #endif
1031
1032 if (qemu_mprotect_rwx(buf, size)) {
1033 abort();
1034 }
1035 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1036
1037 return buf;
1038 }
1039 #elif defined(_WIN32)
1040 static inline void *alloc_code_gen_buffer(void)
1041 {
1042 size_t size = tcg_ctx->code_gen_buffer_size;
1043 return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1044 PAGE_EXECUTE_READWRITE);
1045 }
1046 #else
1047 static inline void *alloc_code_gen_buffer(void)
1048 {
1049 int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1050 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1051 size_t size = tcg_ctx->code_gen_buffer_size;
1052 void *buf;
1053
1054 buf = mmap(NULL, size, prot, flags, -1, 0);
1055 if (buf == MAP_FAILED) {
1056 return NULL;
1057 }
1058
1059 #ifdef __mips__
1060 if (cross_256mb(buf, size)) {
1061 /*
1062 * Try again, with the original still mapped, to avoid re-acquiring
1063 * the same 256mb crossing.
1064 */
1065 size_t size2;
1066 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1067 switch ((int)(buf2 != MAP_FAILED)) {
1068 case 1:
1069 if (!cross_256mb(buf2, size)) {
1070 /* Success! Use the new buffer. */
1071 munmap(buf, size);
1072 break;
1073 }
1074 /* Failure. Work with what we had. */
1075 munmap(buf2, size);
1076 /* fallthru */
1077 default:
1078 /* Split the original buffer. Free the smaller half. */
1079 buf2 = split_cross_256mb(buf, size);
1080 size2 = tcg_ctx->code_gen_buffer_size;
1081 if (buf == buf2) {
1082 munmap(buf + size2, size - size2);
1083 } else {
1084 munmap(buf, size - size2);
1085 }
1086 size = size2;
1087 break;
1088 }
1089 buf = buf2;
1090 }
1091 #endif
1092
1093 /* Request large pages for the buffer. */
1094 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1095
1096 return buf;
1097 }
1098 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1099
1100 static inline void code_gen_alloc(size_t tb_size)
1101 {
1102 tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1103 tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1104 if (tcg_ctx->code_gen_buffer == NULL) {
1105 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1106 exit(1);
1107 }
1108 }
1109
1110 static bool tb_cmp(const void *ap, const void *bp)
1111 {
1112 const TranslationBlock *a = ap;
1113 const TranslationBlock *b = bp;
1114
1115 return a->pc == b->pc &&
1116 a->cs_base == b->cs_base &&
1117 a->flags == b->flags &&
1118 (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1119 a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1120 a->page_addr[0] == b->page_addr[0] &&
1121 a->page_addr[1] == b->page_addr[1];
1122 }
1123
1124 static void tb_htable_init(void)
1125 {
1126 unsigned int mode = QHT_MODE_AUTO_RESIZE;
1127
1128 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1129 }
1130
1131 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1132 (in bytes) allocated to the translation buffer. Zero means default
1133 size. */
1134 void tcg_exec_init(unsigned long tb_size)
1135 {
1136 tcg_allowed = true;
1137 cpu_gen_init();
1138 page_init();
1139 tb_htable_init();
1140 code_gen_alloc(tb_size);
1141 #if defined(CONFIG_SOFTMMU)
1142 /* There's no guest base to take into account, so go ahead and
1143 initialize the prologue now. */
1144 tcg_prologue_init(tcg_ctx);
1145 #endif
1146 }
1147
1148 /* call with @p->lock held */
1149 static inline void invalidate_page_bitmap(PageDesc *p)
1150 {
1151 assert_page_locked(p);
1152 #ifdef CONFIG_SOFTMMU
1153 g_free(p->code_bitmap);
1154 p->code_bitmap = NULL;
1155 p->code_write_count = 0;
1156 #endif
1157 }
1158
1159 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1160 static void page_flush_tb_1(int level, void **lp)
1161 {
1162 int i;
1163
1164 if (*lp == NULL) {
1165 return;
1166 }
1167 if (level == 0) {
1168 PageDesc *pd = *lp;
1169
1170 for (i = 0; i < V_L2_SIZE; ++i) {
1171 page_lock(&pd[i]);
1172 pd[i].first_tb = (uintptr_t)NULL;
1173 invalidate_page_bitmap(pd + i);
1174 page_unlock(&pd[i]);
1175 }
1176 } else {
1177 void **pp = *lp;
1178
1179 for (i = 0; i < V_L2_SIZE; ++i) {
1180 page_flush_tb_1(level - 1, pp + i);
1181 }
1182 }
1183 }
1184
1185 static void page_flush_tb(void)
1186 {
1187 int i, l1_sz = v_l1_size;
1188
1189 for (i = 0; i < l1_sz; i++) {
1190 page_flush_tb_1(v_l2_levels, l1_map + i);
1191 }
1192 }
1193
1194 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1195 {
1196 const TranslationBlock *tb = value;
1197 size_t *size = data;
1198
1199 *size += tb->tc.size;
1200 return false;
1201 }
1202
1203 /* flush all the translation blocks */
1204 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1205 {
1206 bool did_flush = false;
1207
1208 mmap_lock();
1209 /* If it is already been done on request of another CPU,
1210 * just retry.
1211 */
1212 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1213 goto done;
1214 }
1215 did_flush = true;
1216
1217 if (DEBUG_TB_FLUSH_GATE) {
1218 size_t nb_tbs = tcg_nb_tbs();
1219 size_t host_size = 0;
1220
1221 tcg_tb_foreach(tb_host_size_iter, &host_size);
1222 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1223 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1224 }
1225
1226 CPU_FOREACH(cpu) {
1227 cpu_tb_jmp_cache_clear(cpu);
1228 }
1229
1230 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1231 page_flush_tb();
1232
1233 tcg_region_reset_all();
1234 /* XXX: flush processor icache at this point if cache flush is
1235 expensive */
1236 atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1237
1238 done:
1239 mmap_unlock();
1240 if (did_flush) {
1241 qemu_plugin_flush_cb();
1242 }
1243 }
1244
1245 void tb_flush(CPUState *cpu)
1246 {
1247 if (tcg_enabled()) {
1248 unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1249
1250 if (cpu_in_exclusive_context(cpu)) {
1251 do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1252 } else {
1253 async_safe_run_on_cpu(cpu, do_tb_flush,
1254 RUN_ON_CPU_HOST_INT(tb_flush_count));
1255 }
1256 }
1257 }
1258
1259 /*
1260 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1261 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1262 * and let the optimizer get rid of them by wrapping their user-only callers
1263 * with if (DEBUG_TB_CHECK_GATE).
1264 */
1265 #ifdef CONFIG_USER_ONLY
1266
1267 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1268 {
1269 TranslationBlock *tb = p;
1270 target_ulong addr = *(target_ulong *)userp;
1271
1272 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1273 printf("ERROR invalidate: address=" TARGET_FMT_lx
1274 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1275 }
1276 }
1277
1278 /* verify that all the pages have correct rights for code
1279 *
1280 * Called with mmap_lock held.
1281 */
1282 static void tb_invalidate_check(target_ulong address)
1283 {
1284 address &= TARGET_PAGE_MASK;
1285 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1286 }
1287
1288 static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1289 {
1290 TranslationBlock *tb = p;
1291 int flags1, flags2;
1292
1293 flags1 = page_get_flags(tb->pc);
1294 flags2 = page_get_flags(tb->pc + tb->size - 1);
1295 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1296 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1297 (long)tb->pc, tb->size, flags1, flags2);
1298 }
1299 }
1300
1301 /* verify that all the pages have correct rights for code */
1302 static void tb_page_check(void)
1303 {
1304 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1305 }
1306
1307 #endif /* CONFIG_USER_ONLY */
1308
1309 /*
1310 * user-mode: call with mmap_lock held
1311 * !user-mode: call with @pd->lock held
1312 */
1313 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1314 {
1315 TranslationBlock *tb1;
1316 uintptr_t *pprev;
1317 unsigned int n1;
1318
1319 assert_page_locked(pd);
1320 pprev = &pd->first_tb;
1321 PAGE_FOR_EACH_TB(pd, tb1, n1) {
1322 if (tb1 == tb) {
1323 *pprev = tb1->page_next[n1];
1324 return;
1325 }
1326 pprev = &tb1->page_next[n1];
1327 }
1328 g_assert_not_reached();
1329 }
1330
1331 /* remove @orig from its @n_orig-th jump list */
1332 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1333 {
1334 uintptr_t ptr, ptr_locked;
1335 TranslationBlock *dest;
1336 TranslationBlock *tb;
1337 uintptr_t *pprev;
1338 int n;
1339
1340 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1341 ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1342 dest = (TranslationBlock *)(ptr & ~1);
1343 if (dest == NULL) {
1344 return;
1345 }
1346
1347 qemu_spin_lock(&dest->jmp_lock);
1348 /*
1349 * While acquiring the lock, the jump might have been removed if the
1350 * destination TB was invalidated; check again.
1351 */
1352 ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
1353 if (ptr_locked != ptr) {
1354 qemu_spin_unlock(&dest->jmp_lock);
1355 /*
1356 * The only possibility is that the jump was unlinked via
1357 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1358 * because we set the LSB above.
1359 */
1360 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1361 return;
1362 }
1363 /*
1364 * We first acquired the lock, and since the destination pointer matches,
1365 * we know for sure that @orig is in the jmp list.
1366 */
1367 pprev = &dest->jmp_list_head;
1368 TB_FOR_EACH_JMP(dest, tb, n) {
1369 if (tb == orig && n == n_orig) {
1370 *pprev = tb->jmp_list_next[n];
1371 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1372 qemu_spin_unlock(&dest->jmp_lock);
1373 return;
1374 }
1375 pprev = &tb->jmp_list_next[n];
1376 }
1377 g_assert_not_reached();
1378 }
1379
1380 /* reset the jump entry 'n' of a TB so that it is not chained to
1381 another TB */
1382 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1383 {
1384 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1385 tb_set_jmp_target(tb, n, addr);
1386 }
1387
1388 /* remove any jumps to the TB */
1389 static inline void tb_jmp_unlink(TranslationBlock *dest)
1390 {
1391 TranslationBlock *tb;
1392 int n;
1393
1394 qemu_spin_lock(&dest->jmp_lock);
1395
1396 TB_FOR_EACH_JMP(dest, tb, n) {
1397 tb_reset_jump(tb, n);
1398 atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1399 /* No need to clear the list entry; setting the dest ptr is enough */
1400 }
1401 dest->jmp_list_head = (uintptr_t)NULL;
1402
1403 qemu_spin_unlock(&dest->jmp_lock);
1404 }
1405
1406 /*
1407 * In user-mode, call with mmap_lock held.
1408 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1409 * locks held.
1410 */
1411 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1412 {
1413 CPUState *cpu;
1414 PageDesc *p;
1415 uint32_t h;
1416 tb_page_addr_t phys_pc;
1417
1418 assert_memory_lock();
1419
1420 /* make sure no further incoming jumps will be chained to this TB */
1421 qemu_spin_lock(&tb->jmp_lock);
1422 atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1423 qemu_spin_unlock(&tb->jmp_lock);
1424
1425 /* remove the TB from the hash list */
1426 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1427 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1428 tb->trace_vcpu_dstate);
1429 if (!(tb->cflags & CF_NOCACHE) &&
1430 !qht_remove(&tb_ctx.htable, tb, h)) {
1431 return;
1432 }
1433
1434 /* remove the TB from the page list */
1435 if (rm_from_page_list) {
1436 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1437 tb_page_remove(p, tb);
1438 invalidate_page_bitmap(p);
1439 if (tb->page_addr[1] != -1) {
1440 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1441 tb_page_remove(p, tb);
1442 invalidate_page_bitmap(p);
1443 }
1444 }
1445
1446 /* remove the TB from the hash list */
1447 h = tb_jmp_cache_hash_func(tb->pc);
1448 CPU_FOREACH(cpu) {
1449 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1450 atomic_set(&cpu->tb_jmp_cache[h], NULL);
1451 }
1452 }
1453
1454 /* suppress this TB from the two jump lists */
1455 tb_remove_from_jmp_list(tb, 0);
1456 tb_remove_from_jmp_list(tb, 1);
1457
1458 /* suppress any remaining jumps to this TB */
1459 tb_jmp_unlink(tb);
1460
1461 atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1462 tcg_ctx->tb_phys_invalidate_count + 1);
1463 }
1464
1465 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1466 {
1467 do_tb_phys_invalidate(tb, true);
1468 }
1469
1470 /* invalidate one TB
1471 *
1472 * Called with mmap_lock held in user-mode.
1473 */
1474 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1475 {
1476 if (page_addr == -1 && tb->page_addr[0] != -1) {
1477 page_lock_tb(tb);
1478 do_tb_phys_invalidate(tb, true);
1479 page_unlock_tb(tb);
1480 } else {
1481 do_tb_phys_invalidate(tb, false);
1482 }
1483 }
1484
1485 #ifdef CONFIG_SOFTMMU
1486 /* call with @p->lock held */
1487 static void build_page_bitmap(PageDesc *p)
1488 {
1489 int n, tb_start, tb_end;
1490 TranslationBlock *tb;
1491
1492 assert_page_locked(p);
1493 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1494
1495 PAGE_FOR_EACH_TB(p, tb, n) {
1496 /* NOTE: this is subtle as a TB may span two physical pages */
1497 if (n == 0) {
1498 /* NOTE: tb_end may be after the end of the page, but
1499 it is not a problem */
1500 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1501 tb_end = tb_start + tb->size;
1502 if (tb_end > TARGET_PAGE_SIZE) {
1503 tb_end = TARGET_PAGE_SIZE;
1504 }
1505 } else {
1506 tb_start = 0;
1507 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1508 }
1509 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1510 }
1511 }
1512 #endif
1513
1514 /* add the tb in the target page and protect it if necessary
1515 *
1516 * Called with mmap_lock held for user-mode emulation.
1517 * Called with @p->lock held in !user-mode.
1518 */
1519 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1520 unsigned int n, tb_page_addr_t page_addr)
1521 {
1522 #ifndef CONFIG_USER_ONLY
1523 bool page_already_protected;
1524 #endif
1525
1526 assert_page_locked(p);
1527
1528 tb->page_addr[n] = page_addr;
1529 tb->page_next[n] = p->first_tb;
1530 #ifndef CONFIG_USER_ONLY
1531 page_already_protected = p->first_tb != (uintptr_t)NULL;
1532 #endif
1533 p->first_tb = (uintptr_t)tb | n;
1534 invalidate_page_bitmap(p);
1535
1536 #if defined(CONFIG_USER_ONLY)
1537 if (p->flags & PAGE_WRITE) {
1538 target_ulong addr;
1539 PageDesc *p2;
1540 int prot;
1541
1542 /* force the host page as non writable (writes will have a
1543 page fault + mprotect overhead) */
1544 page_addr &= qemu_host_page_mask;
1545 prot = 0;
1546 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1547 addr += TARGET_PAGE_SIZE) {
1548
1549 p2 = page_find(addr >> TARGET_PAGE_BITS);
1550 if (!p2) {
1551 continue;
1552 }
1553 prot |= p2->flags;
1554 p2->flags &= ~PAGE_WRITE;
1555 }
1556 mprotect(g2h(page_addr), qemu_host_page_size,
1557 (prot & PAGE_BITS) & ~PAGE_WRITE);
1558 if (DEBUG_TB_INVALIDATE_GATE) {
1559 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1560 }
1561 }
1562 #else
1563 /* if some code is already present, then the pages are already
1564 protected. So we handle the case where only the first TB is
1565 allocated in a physical page */
1566 if (!page_already_protected) {
1567 tlb_protect_code(page_addr);
1568 }
1569 #endif
1570 }
1571
1572 /* add a new TB and link it to the physical page tables. phys_page2 is
1573 * (-1) to indicate that only one page contains the TB.
1574 *
1575 * Called with mmap_lock held for user-mode emulation.
1576 *
1577 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1578 * Note that in !user-mode, another thread might have already added a TB
1579 * for the same block of guest code that @tb corresponds to. In that case,
1580 * the caller should discard the original @tb, and use instead the returned TB.
1581 */
1582 static TranslationBlock *
1583 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1584 tb_page_addr_t phys_page2)
1585 {
1586 PageDesc *p;
1587 PageDesc *p2 = NULL;
1588
1589 assert_memory_lock();
1590
1591 if (phys_pc == -1) {
1592 /*
1593 * If the TB is not associated with a physical RAM page then
1594 * it must be a temporary one-insn TB, and we have nothing to do
1595 * except fill in the page_addr[] fields.
1596 */
1597 assert(tb->cflags & CF_NOCACHE);
1598 tb->page_addr[0] = tb->page_addr[1] = -1;
1599 return tb;
1600 }
1601
1602 /*
1603 * Add the TB to the page list, acquiring first the pages's locks.
1604 * We keep the locks held until after inserting the TB in the hash table,
1605 * so that if the insertion fails we know for sure that the TBs are still
1606 * in the page descriptors.
1607 * Note that inserting into the hash table first isn't an option, since
1608 * we can only insert TBs that are fully initialized.
1609 */
1610 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1611 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1612 if (p2) {
1613 tb_page_add(p2, tb, 1, phys_page2);
1614 } else {
1615 tb->page_addr[1] = -1;
1616 }
1617
1618 if (!(tb->cflags & CF_NOCACHE)) {
1619 void *existing_tb = NULL;
1620 uint32_t h;
1621
1622 /* add in the hash table */
1623 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1624 tb->trace_vcpu_dstate);
1625 qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1626
1627 /* remove TB from the page(s) if we couldn't insert it */
1628 if (unlikely(existing_tb)) {
1629 tb_page_remove(p, tb);
1630 invalidate_page_bitmap(p);
1631 if (p2) {
1632 tb_page_remove(p2, tb);
1633 invalidate_page_bitmap(p2);
1634 }
1635 tb = existing_tb;
1636 }
1637 }
1638
1639 if (p2 && p2 != p) {
1640 page_unlock(p2);
1641 }
1642 page_unlock(p);
1643
1644 #ifdef CONFIG_USER_ONLY
1645 if (DEBUG_TB_CHECK_GATE) {
1646 tb_page_check();
1647 }
1648 #endif
1649 return tb;
1650 }
1651
1652 /* Called with mmap_lock held for user mode emulation. */
1653 TranslationBlock *tb_gen_code(CPUState *cpu,
1654 target_ulong pc, target_ulong cs_base,
1655 uint32_t flags, int cflags)
1656 {
1657 CPUArchState *env = cpu->env_ptr;
1658 TranslationBlock *tb, *existing_tb;
1659 tb_page_addr_t phys_pc, phys_page2;
1660 target_ulong virt_page2;
1661 tcg_insn_unit *gen_code_buf;
1662 int gen_code_size, search_size, max_insns;
1663 #ifdef CONFIG_PROFILER
1664 TCGProfile *prof = &tcg_ctx->prof;
1665 int64_t ti;
1666 #endif
1667
1668 assert_memory_lock();
1669
1670 phys_pc = get_page_addr_code(env, pc);
1671
1672 if (phys_pc == -1) {
1673 /* Generate a temporary TB with 1 insn in it */
1674 cflags &= ~CF_COUNT_MASK;
1675 cflags |= CF_NOCACHE | 1;
1676 }
1677
1678 cflags &= ~CF_CLUSTER_MASK;
1679 cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
1680
1681 max_insns = cflags & CF_COUNT_MASK;
1682 if (max_insns == 0) {
1683 max_insns = CF_COUNT_MASK;
1684 }
1685 if (max_insns > TCG_MAX_INSNS) {
1686 max_insns = TCG_MAX_INSNS;
1687 }
1688 if (cpu->singlestep_enabled || singlestep) {
1689 max_insns = 1;
1690 }
1691
1692 buffer_overflow:
1693 tb = tcg_tb_alloc(tcg_ctx);
1694 if (unlikely(!tb)) {
1695 /* flush must be done */
1696 tb_flush(cpu);
1697 mmap_unlock();
1698 /* Make the execution loop process the flush as soon as possible. */
1699 cpu->exception_index = EXCP_INTERRUPT;
1700 cpu_loop_exit(cpu);
1701 }
1702
1703 gen_code_buf = tcg_ctx->code_gen_ptr;
1704 tb->tc.ptr = gen_code_buf;
1705 tb->pc = pc;
1706 tb->cs_base = cs_base;
1707 tb->flags = flags;
1708 tb->cflags = cflags;
1709 tb->orig_tb = NULL;
1710 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1711 tcg_ctx->tb_cflags = cflags;
1712 tb_overflow:
1713
1714 #ifdef CONFIG_PROFILER
1715 /* includes aborted translations because of exceptions */
1716 atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1717 ti = profile_getclock();
1718 #endif
1719
1720 tcg_func_start(tcg_ctx);
1721
1722 tcg_ctx->cpu = env_cpu(env);
1723 gen_intermediate_code(cpu, tb, max_insns);
1724 tcg_ctx->cpu = NULL;
1725
1726 trace_translate_block(tb, tb->pc, tb->tc.ptr);
1727
1728 /* generate machine code */
1729 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1730 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1731 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1732 if (TCG_TARGET_HAS_direct_jump) {
1733 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1734 tcg_ctx->tb_jmp_target_addr = NULL;
1735 } else {
1736 tcg_ctx->tb_jmp_insn_offset = NULL;
1737 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1738 }
1739
1740 #ifdef CONFIG_PROFILER
1741 atomic_set(&prof->tb_count, prof->tb_count + 1);
1742 atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1743 ti = profile_getclock();
1744 #endif
1745
1746 gen_code_size = tcg_gen_code(tcg_ctx, tb);
1747 if (unlikely(gen_code_size < 0)) {
1748 switch (gen_code_size) {
1749 case -1:
1750 /*
1751 * Overflow of code_gen_buffer, or the current slice of it.
1752 *
1753 * TODO: We don't need to re-do gen_intermediate_code, nor
1754 * should we re-do the tcg optimization currently hidden
1755 * inside tcg_gen_code. All that should be required is to
1756 * flush the TBs, allocate a new TB, re-initialize it per
1757 * above, and re-do the actual code generation.
1758 */
1759 goto buffer_overflow;
1760
1761 case -2:
1762 /*
1763 * The code generated for the TranslationBlock is too large.
1764 * The maximum size allowed by the unwind info is 64k.
1765 * There may be stricter constraints from relocations
1766 * in the tcg backend.
1767 *
1768 * Try again with half as many insns as we attempted this time.
1769 * If a single insn overflows, there's a bug somewhere...
1770 */
1771 max_insns = tb->icount;
1772 assert(max_insns > 1);
1773 max_insns /= 2;
1774 goto tb_overflow;
1775
1776 default:
1777 g_assert_not_reached();
1778 }
1779 }
1780 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1781 if (unlikely(search_size < 0)) {
1782 goto buffer_overflow;
1783 }
1784 tb->tc.size = gen_code_size;
1785
1786 #ifdef CONFIG_PROFILER
1787 atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1788 atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1789 atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1790 atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1791 #endif
1792
1793 #ifdef DEBUG_DISAS
1794 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1795 qemu_log_in_addr_range(tb->pc)) {
1796 FILE *logfile = qemu_log_lock();
1797 int code_size, data_size = 0;
1798 g_autoptr(GString) note = g_string_new("[tb header & initial instruction]");
1799 size_t chunk_start = 0;
1800 int insn = 0;
1801 qemu_log("OUT: [size=%d]\n", gen_code_size);
1802 if (tcg_ctx->data_gen_ptr) {
1803 code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1804 data_size = gen_code_size - code_size;
1805 } else {
1806 code_size = gen_code_size;
1807 }
1808
1809 /* Dump header and the first instruction */
1810 chunk_start = tcg_ctx->gen_insn_end_off[insn];
1811 log_disas(tb->tc.ptr, chunk_start, note->str);
1812
1813 /*
1814 * Dump each instruction chunk, wrapping up empty chunks into
1815 * the next instruction. The whole array is offset so the
1816 * first entry is the beginning of the 2nd instruction.
1817 */
1818 while (insn <= tb->icount && chunk_start < code_size) {
1819 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
1820 if (chunk_end > chunk_start) {
1821 g_string_printf(note, "[guest addr: " TARGET_FMT_lx "]",
1822 tcg_ctx->gen_insn_data[insn][0]);
1823 log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start,
1824 note->str);
1825 chunk_start = chunk_end;
1826 }
1827 insn++;
1828 }
1829
1830 /* Finally dump any data we may have after the block */
1831 if (data_size) {
1832 int i;
1833 qemu_log(" data: [size=%d]\n", data_size);
1834 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1835 if (sizeof(tcg_target_ulong) == 8) {
1836 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
1837 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1838 *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1839 } else {
1840 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
1841 (uintptr_t)tcg_ctx->data_gen_ptr + i,
1842 *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1843 }
1844 }
1845 }
1846 qemu_log("\n");
1847 qemu_log_flush();
1848 qemu_log_unlock(logfile);
1849 }
1850 #endif
1851
1852 atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1853 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1854 CODE_GEN_ALIGN));
1855
1856 /* init jump list */
1857 qemu_spin_init(&tb->jmp_lock);
1858 tb->jmp_list_head = (uintptr_t)NULL;
1859 tb->jmp_list_next[0] = (uintptr_t)NULL;
1860 tb->jmp_list_next[1] = (uintptr_t)NULL;
1861 tb->jmp_dest[0] = (uintptr_t)NULL;
1862 tb->jmp_dest[1] = (uintptr_t)NULL;
1863
1864 /* init original jump addresses which have been set during tcg_gen_code() */
1865 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1866 tb_reset_jump(tb, 0);
1867 }
1868 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1869 tb_reset_jump(tb, 1);
1870 }
1871
1872 /* check next page if needed */
1873 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1874 phys_page2 = -1;
1875 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1876 phys_page2 = get_page_addr_code(env, virt_page2);
1877 }
1878 /*
1879 * No explicit memory barrier is required -- tb_link_page() makes the
1880 * TB visible in a consistent state.
1881 */
1882 existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1883 /* if the TB already exists, discard what we just translated */
1884 if (unlikely(existing_tb != tb)) {
1885 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1886
1887 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1888 atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1889 return existing_tb;
1890 }
1891 tcg_tb_insert(tb);
1892 return tb;
1893 }
1894
1895 /*
1896 * @p must be non-NULL.
1897 * user-mode: call with mmap_lock held.
1898 * !user-mode: call with all @pages locked.
1899 */
1900 static void
1901 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1902 PageDesc *p, tb_page_addr_t start,
1903 tb_page_addr_t end,
1904 uintptr_t retaddr)
1905 {
1906 TranslationBlock *tb;
1907 tb_page_addr_t tb_start, tb_end;
1908 int n;
1909 #ifdef TARGET_HAS_PRECISE_SMC
1910 CPUState *cpu = current_cpu;
1911 CPUArchState *env = NULL;
1912 bool current_tb_not_found = retaddr != 0;
1913 bool current_tb_modified = false;
1914 TranslationBlock *current_tb = NULL;
1915 target_ulong current_pc = 0;
1916 target_ulong current_cs_base = 0;
1917 uint32_t current_flags = 0;
1918 #endif /* TARGET_HAS_PRECISE_SMC */
1919
1920 assert_page_locked(p);
1921
1922 #if defined(TARGET_HAS_PRECISE_SMC)
1923 if (cpu != NULL) {
1924 env = cpu->env_ptr;
1925 }
1926 #endif
1927
1928 /* we remove all the TBs in the range [start, end[ */
1929 /* XXX: see if in some cases it could be faster to invalidate all
1930 the code */
1931 PAGE_FOR_EACH_TB(p, tb, n) {
1932 assert_page_locked(p);
1933 /* NOTE: this is subtle as a TB may span two physical pages */
1934 if (n == 0) {
1935 /* NOTE: tb_end may be after the end of the page, but
1936 it is not a problem */
1937 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1938 tb_end = tb_start + tb->size;
1939 } else {
1940 tb_start = tb->page_addr[1];
1941 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1942 }
1943 if (!(tb_end <= start || tb_start >= end)) {
1944 #ifdef TARGET_HAS_PRECISE_SMC
1945 if (current_tb_not_found) {
1946 current_tb_not_found = false;
1947 /* now we have a real cpu fault */
1948 current_tb = tcg_tb_lookup(retaddr);
1949 }
1950 if (current_tb == tb &&
1951 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1952 /*
1953 * If we are modifying the current TB, we must stop
1954 * its execution. We could be more precise by checking
1955 * that the modification is after the current PC, but it
1956 * would require a specialized function to partially
1957 * restore the CPU state.
1958 */
1959 current_tb_modified = true;
1960 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1961 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1962 &current_flags);
1963 }
1964 #endif /* TARGET_HAS_PRECISE_SMC */
1965 tb_phys_invalidate__locked(tb);
1966 }
1967 }
1968 #if !defined(CONFIG_USER_ONLY)
1969 /* if no code remaining, no need to continue to use slow writes */
1970 if (!p->first_tb) {
1971 invalidate_page_bitmap(p);
1972 tlb_unprotect_code(start);
1973 }
1974 #endif
1975 #ifdef TARGET_HAS_PRECISE_SMC
1976 if (current_tb_modified) {
1977 page_collection_unlock(pages);
1978 /* Force execution of one insn next time. */
1979 cpu->cflags_next_tb = 1 | curr_cflags();
1980 mmap_unlock();
1981 cpu_loop_exit_noexc(cpu);
1982 }
1983 #endif
1984 }
1985
1986 /*
1987 * Invalidate all TBs which intersect with the target physical address range
1988 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1989 * 'is_cpu_write_access' should be true if called from a real cpu write
1990 * access: the virtual CPU will exit the current TB if code is modified inside
1991 * this TB.
1992 *
1993 * Called with mmap_lock held for user-mode emulation
1994 */
1995 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
1996 {
1997 struct page_collection *pages;
1998 PageDesc *p;
1999
2000 assert_memory_lock();
2001
2002 p = page_find(start >> TARGET_PAGE_BITS);
2003 if (p == NULL) {
2004 return;
2005 }
2006 pages = page_collection_lock(start, end);
2007 tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
2008 page_collection_unlock(pages);
2009 }
2010
2011 /*
2012 * Invalidate all TBs which intersect with the target physical address range
2013 * [start;end[. NOTE: start and end may refer to *different* physical pages.
2014 * 'is_cpu_write_access' should be true if called from a real cpu write
2015 * access: the virtual CPU will exit the current TB if code is modified inside
2016 * this TB.
2017 *
2018 * Called with mmap_lock held for user-mode emulation.
2019 */
2020 #ifdef CONFIG_SOFTMMU
2021 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
2022 #else
2023 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
2024 #endif
2025 {
2026 struct page_collection *pages;
2027 tb_page_addr_t next;
2028
2029 assert_memory_lock();
2030
2031 pages = page_collection_lock(start, end);
2032 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2033 start < end;
2034 start = next, next += TARGET_PAGE_SIZE) {
2035 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2036 tb_page_addr_t bound = MIN(next, end);
2037
2038 if (pd == NULL) {
2039 continue;
2040 }
2041 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2042 }
2043 page_collection_unlock(pages);
2044 }
2045
2046 #ifdef CONFIG_SOFTMMU
2047 /* len must be <= 8 and start must be a multiple of len.
2048 * Called via softmmu_template.h when code areas are written to with
2049 * iothread mutex not held.
2050 *
2051 * Call with all @pages in the range [@start, @start + len[ locked.
2052 */
2053 void tb_invalidate_phys_page_fast(struct page_collection *pages,
2054 tb_page_addr_t start, int len,
2055 uintptr_t retaddr)
2056 {
2057 PageDesc *p;
2058
2059 assert_memory_lock();
2060
2061 p = page_find(start >> TARGET_PAGE_BITS);
2062 if (!p) {
2063 return;
2064 }
2065
2066 assert_page_locked(p);
2067 if (!p->code_bitmap &&
2068 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2069 build_page_bitmap(p);
2070 }
2071 if (p->code_bitmap) {
2072 unsigned int nr;
2073 unsigned long b;
2074
2075 nr = start & ~TARGET_PAGE_MASK;
2076 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2077 if (b & ((1 << len) - 1)) {
2078 goto do_invalidate;
2079 }
2080 } else {
2081 do_invalidate:
2082 tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2083 retaddr);
2084 }
2085 }
2086 #else
2087 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2088 * host PC of the faulting store instruction that caused this invalidate.
2089 * Returns true if the caller needs to abort execution of the current
2090 * TB (because it was modified by this store and the guest CPU has
2091 * precise-SMC semantics).
2092 */
2093 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2094 {
2095 TranslationBlock *tb;
2096 PageDesc *p;
2097 int n;
2098 #ifdef TARGET_HAS_PRECISE_SMC
2099 TranslationBlock *current_tb = NULL;
2100 CPUState *cpu = current_cpu;
2101 CPUArchState *env = NULL;
2102 int current_tb_modified = 0;
2103 target_ulong current_pc = 0;
2104 target_ulong current_cs_base = 0;
2105 uint32_t current_flags = 0;
2106 #endif
2107
2108 assert_memory_lock();
2109
2110 addr &= TARGET_PAGE_MASK;
2111 p = page_find(addr >> TARGET_PAGE_BITS);
2112 if (!p) {
2113 return false;
2114 }
2115
2116 #ifdef TARGET_HAS_PRECISE_SMC
2117 if (p->first_tb && pc != 0) {
2118 current_tb = tcg_tb_lookup(pc);
2119 }
2120 if (cpu != NULL) {
2121 env = cpu->env_ptr;
2122 }
2123 #endif
2124 assert_page_locked(p);
2125 PAGE_FOR_EACH_TB(p, tb, n) {
2126 #ifdef TARGET_HAS_PRECISE_SMC
2127 if (current_tb == tb &&
2128 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2129 /* If we are modifying the current TB, we must stop
2130 its execution. We could be more precise by checking
2131 that the modification is after the current PC, but it
2132 would require a specialized function to partially
2133 restore the CPU state */
2134
2135 current_tb_modified = 1;
2136 cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2137 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2138 &current_flags);
2139 }
2140 #endif /* TARGET_HAS_PRECISE_SMC */
2141 tb_phys_invalidate(tb, addr);
2142 }
2143 p->first_tb = (uintptr_t)NULL;
2144 #ifdef TARGET_HAS_PRECISE_SMC
2145 if (current_tb_modified) {
2146 /* Force execution of one insn next time. */
2147 cpu->cflags_next_tb = 1 | curr_cflags();
2148 return true;
2149 }
2150 #endif
2151
2152 return false;
2153 }
2154 #endif
2155
2156 /* user-mode: call with mmap_lock held */
2157 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2158 {
2159 TranslationBlock *tb;
2160
2161 assert_memory_lock();
2162
2163 tb = tcg_tb_lookup(retaddr);
2164 if (tb) {
2165 /* We can use retranslation to find the PC. */
2166 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2167 tb_phys_invalidate(tb, -1);
2168 } else {
2169 /* The exception probably happened in a helper. The CPU state should
2170 have been saved before calling it. Fetch the PC from there. */
2171 CPUArchState *env = cpu->env_ptr;
2172 target_ulong pc, cs_base;
2173 tb_page_addr_t addr;
2174 uint32_t flags;
2175
2176 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2177 addr = get_page_addr_code(env, pc);
2178 if (addr != -1) {
2179 tb_invalidate_phys_range(addr, addr + 1);
2180 }
2181 }
2182 }
2183
2184 #ifndef CONFIG_USER_ONLY
2185 /* in deterministic execution mode, instructions doing device I/Os
2186 * must be at the end of the TB.
2187 *
2188 * Called by softmmu_template.h, with iothread mutex not held.
2189 */
2190 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2191 {
2192 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2193 CPUArchState *env = cpu->env_ptr;
2194 #endif
2195 TranslationBlock *tb;
2196 uint32_t n;
2197
2198 tb = tcg_tb_lookup(retaddr);
2199 if (!tb) {
2200 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2201 (void *)retaddr);
2202 }
2203 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2204
2205 /* On MIPS and SH, delay slot instructions can only be restarted if
2206 they were already the first instruction in the TB. If this is not
2207 the first instruction in a TB then re-execute the preceding
2208 branch. */
2209 n = 1;
2210 #if defined(TARGET_MIPS)
2211 if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2212 && env->active_tc.PC != tb->pc) {
2213 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2214 cpu_neg(cpu)->icount_decr.u16.low++;
2215 env->hflags &= ~MIPS_HFLAG_BMASK;
2216 n = 2;
2217 }
2218 #elif defined(TARGET_SH4)
2219 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2220 && env->pc != tb->pc) {
2221 env->pc -= 2;
2222 cpu_neg(cpu)->icount_decr.u16.low++;
2223 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2224 n = 2;
2225 }
2226 #endif
2227
2228 /* Generate a new TB executing the I/O insn. */
2229 cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2230
2231 if (tb_cflags(tb) & CF_NOCACHE) {
2232 if (tb->orig_tb) {
2233 /* Invalidate original TB if this TB was generated in
2234 * cpu_exec_nocache() */
2235 tb_phys_invalidate(tb->orig_tb, -1);
2236 }
2237 tcg_tb_remove(tb);
2238 }
2239
2240 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2241 * the first in the TB) then we end up generating a whole new TB and
2242 * repeating the fault, which is horribly inefficient.
2243 * Better would be to execute just this insn uncached, or generate a
2244 * second new TB.
2245 */
2246 cpu_loop_exit_noexc(cpu);
2247 }
2248
2249 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2250 {
2251 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2252
2253 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2254 atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2255 }
2256 }
2257
2258 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2259 {
2260 /* Discard jump cache entries for any tb which might potentially
2261 overlap the flushed page. */
2262 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2263 tb_jmp_cache_clear_page(cpu, addr);
2264 }
2265
2266 static void print_qht_statistics(struct qht_stats hst)
2267 {
2268 uint32_t hgram_opts;
2269 size_t hgram_bins;
2270 char *hgram;
2271
2272 if (!hst.head_buckets) {
2273 return;
2274 }
2275 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2276 hst.used_head_buckets, hst.head_buckets,
2277 (double)hst.used_head_buckets / hst.head_buckets * 100);
2278
2279 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2280 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
2281 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2282 hgram_opts |= QDIST_PR_NODECIMAL;
2283 }
2284 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2285 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2286 qdist_avg(&hst.occupancy) * 100, hgram);
2287 g_free(hgram);
2288
2289 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2290 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2291 if (hgram_bins > 10) {
2292 hgram_bins = 10;
2293 } else {
2294 hgram_bins = 0;
2295 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2296 }
2297 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2298 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
2299 qdist_avg(&hst.chain), hgram);
2300 g_free(hgram);
2301 }
2302
2303 struct tb_tree_stats {
2304 size_t nb_tbs;
2305 size_t host_size;
2306 size_t target_size;
2307 size_t max_target_size;
2308 size_t direct_jmp_count;
2309 size_t direct_jmp2_count;
2310 size_t cross_page;
2311 };
2312
2313 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2314 {
2315 const TranslationBlock *tb = value;
2316 struct tb_tree_stats *tst = data;
2317
2318 tst->nb_tbs++;
2319 tst->host_size += tb->tc.size;
2320 tst->target_size += tb->size;
2321 if (tb->size > tst->max_target_size) {
2322 tst->max_target_size = tb->size;
2323 }
2324 if (tb->page_addr[1] != -1) {
2325 tst->cross_page++;
2326 }
2327 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2328 tst->direct_jmp_count++;
2329 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2330 tst->direct_jmp2_count++;
2331 }
2332 }
2333 return false;
2334 }
2335
2336 void dump_exec_info(void)
2337 {
2338 struct tb_tree_stats tst = {};
2339 struct qht_stats hst;
2340 size_t nb_tbs, flush_full, flush_part, flush_elide;
2341
2342 tcg_tb_foreach(tb_tree_stats_iter, &tst);
2343 nb_tbs = tst.nb_tbs;
2344 /* XXX: avoid using doubles ? */
2345 qemu_printf("Translation buffer state:\n");
2346 /*
2347 * Report total code size including the padding and TB structs;
2348 * otherwise users might think "-tb-size" is not honoured.
2349 * For avg host size we use the precise numbers from tb_tree_stats though.
2350 */
2351 qemu_printf("gen code size %zu/%zu\n",
2352 tcg_code_size(), tcg_code_capacity());
2353 qemu_printf("TB count %zu\n", nb_tbs);
2354 qemu_printf("TB avg target size %zu max=%zu bytes\n",
2355 nb_tbs ? tst.target_size / nb_tbs : 0,
2356 tst.max_target_size);
2357 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2358 nb_tbs ? tst.host_size / nb_tbs : 0,
2359 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2360 qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2361 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2362 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2363 tst.direct_jmp_count,
2364 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2365 tst.direct_jmp2_count,
2366 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2367
2368 qht_statistics_init(&tb_ctx.htable, &hst);
2369 print_qht_statistics(hst);
2370 qht_statistics_destroy(&hst);
2371
2372 qemu_printf("\nStatistics:\n");
2373 qemu_printf("TB flush count %u\n",
2374 atomic_read(&tb_ctx.tb_flush_count));
2375 qemu_printf("TB invalidate count %zu\n",
2376 tcg_tb_phys_invalidate_count());
2377
2378 tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2379 qemu_printf("TLB full flushes %zu\n", flush_full);
2380 qemu_printf("TLB partial flushes %zu\n", flush_part);
2381 qemu_printf("TLB elided flushes %zu\n", flush_elide);
2382 tcg_dump_info();
2383 }
2384
2385 void dump_opcount_info(void)
2386 {
2387 tcg_dump_op_count();
2388 }
2389
2390 #else /* CONFIG_USER_ONLY */
2391
2392 void cpu_interrupt(CPUState *cpu, int mask)
2393 {
2394 g_assert(qemu_mutex_iothread_locked());
2395 cpu->interrupt_request |= mask;
2396 atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2397 }
2398
2399 /*
2400 * Walks guest process memory "regions" one by one
2401 * and calls callback function 'fn' for each region.
2402 */
2403 struct walk_memory_regions_data {
2404 walk_memory_regions_fn fn;
2405 void *priv;
2406 target_ulong start;
2407 int prot;
2408 };
2409
2410 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2411 target_ulong end, int new_prot)
2412 {
2413 if (data->start != -1u) {
2414 int rc = data->fn(data->priv, data->start, end, data->prot);
2415 if (rc != 0) {
2416 return rc;
2417 }
2418 }
2419
2420 data->start = (new_prot ? end : -1u);
2421 data->prot = new_prot;
2422
2423 return 0;
2424 }
2425
2426 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2427 target_ulong base, int level, void **lp)
2428 {
2429 target_ulong pa;
2430 int i, rc;
2431
2432 if (*lp == NULL) {
2433 return walk_memory_regions_end(data, base, 0);
2434 }
2435
2436 if (level == 0) {
2437 PageDesc *pd = *lp;
2438
2439 for (i = 0; i < V_L2_SIZE; ++i) {
2440 int prot = pd[i].flags;
2441
2442 pa = base | (i << TARGET_PAGE_BITS);
2443 if (prot != data->prot) {
2444 rc = walk_memory_regions_end(data, pa, prot);
2445 if (rc != 0) {
2446 return rc;
2447 }
2448 }
2449 }
2450 } else {
2451 void **pp = *lp;
2452
2453 for (i = 0; i < V_L2_SIZE; ++i) {
2454 pa = base | ((target_ulong)i <<
2455 (TARGET_PAGE_BITS + V_L2_BITS * level));
2456 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2457 if (rc != 0) {
2458 return rc;
2459 }
2460 }
2461 }
2462
2463 return 0;
2464 }
2465
2466 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2467 {
2468 struct walk_memory_regions_data data;
2469 uintptr_t i, l1_sz = v_l1_size;
2470
2471 data.fn = fn;
2472 data.priv = priv;
2473 data.start = -1u;
2474 data.prot = 0;
2475
2476 for (i = 0; i < l1_sz; i++) {
2477 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2478 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2479 if (rc != 0) {
2480 return rc;
2481 }
2482 }
2483
2484 return walk_memory_regions_end(&data, 0, 0);
2485 }
2486
2487 static int dump_region(void *priv, target_ulong start,
2488 target_ulong end, unsigned long prot)
2489 {
2490 FILE *f = (FILE *)priv;
2491
2492 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2493 " "TARGET_FMT_lx" %c%c%c\n",
2494 start, end, end - start,
2495 ((prot & PAGE_READ) ? 'r' : '-'),
2496 ((prot & PAGE_WRITE) ? 'w' : '-'),
2497 ((prot & PAGE_EXEC) ? 'x' : '-'));
2498
2499 return 0;
2500 }
2501
2502 /* dump memory mappings */
2503 void page_dump(FILE *f)
2504 {
2505 const int length = sizeof(target_ulong) * 2;
2506 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2507 length, "start", length, "end", length, "size", "prot");
2508 walk_memory_regions(f, dump_region);
2509 }
2510
2511 int page_get_flags(target_ulong address)
2512 {
2513 PageDesc *p;
2514
2515 p = page_find(address >> TARGET_PAGE_BITS);
2516 if (!p) {
2517 return 0;
2518 }
2519 return p->flags;
2520 }
2521
2522 /* Modify the flags of a page and invalidate the code if necessary.
2523 The flag PAGE_WRITE_ORG is positioned automatically depending
2524 on PAGE_WRITE. The mmap_lock should already be held. */
2525 void page_set_flags(target_ulong start, target_ulong end, int flags)
2526 {
2527 target_ulong addr, len;
2528
2529 /* This function should never be called with addresses outside the
2530 guest address space. If this assert fires, it probably indicates
2531 a missing call to h2g_valid. */
2532 assert(end - 1 <= GUEST_ADDR_MAX);
2533 assert(start < end);
2534 assert_memory_lock();
2535
2536 start = start & TARGET_PAGE_MASK;
2537 end = TARGET_PAGE_ALIGN(end);
2538
2539 if (flags & PAGE_WRITE) {
2540 flags |= PAGE_WRITE_ORG;
2541 }
2542
2543 for (addr = start, len = end - start;
2544 len != 0;
2545 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2546 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2547
2548 /* If the write protection bit is set, then we invalidate
2549 the code inside. */
2550 if (!(p->flags & PAGE_WRITE) &&
2551 (flags & PAGE_WRITE) &&
2552 p->first_tb) {
2553 tb_invalidate_phys_page(addr, 0);
2554 }
2555 p->flags = flags;
2556 }
2557 }
2558
2559 int page_check_range(target_ulong start, target_ulong len, int flags)
2560 {
2561 PageDesc *p;
2562 target_ulong end;
2563 target_ulong addr;
2564
2565 /* This function should never be called with addresses outside the
2566 guest address space. If this assert fires, it probably indicates
2567 a missing call to h2g_valid. */
2568 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2569 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2570 #endif
2571
2572 if (len == 0) {
2573 return 0;
2574 }
2575 if (start + len - 1 < start) {
2576 /* We've wrapped around. */
2577 return -1;
2578 }
2579
2580 /* must do before we loose bits in the next step */
2581 end = TARGET_PAGE_ALIGN(start + len);
2582 start = start & TARGET_PAGE_MASK;
2583
2584 for (addr = start, len = end - start;
2585 len != 0;
2586 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2587 p = page_find(addr >> TARGET_PAGE_BITS);
2588 if (!p) {
2589 return -1;
2590 }
2591 if (!(p->flags & PAGE_VALID)) {
2592 return -1;
2593 }
2594
2595 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2596 return -1;
2597 }
2598 if (flags & PAGE_WRITE) {
2599 if (!(p->flags & PAGE_WRITE_ORG)) {
2600 return -1;
2601 }
2602 /* unprotect the page if it was put read-only because it
2603 contains translated code */
2604 if (!(p->flags & PAGE_WRITE)) {
2605 if (!page_unprotect(addr, 0)) {
2606 return -1;
2607 }
2608 }
2609 }
2610 }
2611 return 0;
2612 }
2613
2614 /* called from signal handler: invalidate the code and unprotect the
2615 * page. Return 0 if the fault was not handled, 1 if it was handled,
2616 * and 2 if it was handled but the caller must cause the TB to be
2617 * immediately exited. (We can only return 2 if the 'pc' argument is
2618 * non-zero.)
2619 */
2620 int page_unprotect(target_ulong address, uintptr_t pc)
2621 {
2622 unsigned int prot;
2623 bool current_tb_invalidated;
2624 PageDesc *p;
2625 target_ulong host_start, host_end, addr;
2626
2627 /* Technically this isn't safe inside a signal handler. However we
2628 know this only ever happens in a synchronous SEGV handler, so in
2629 practice it seems to be ok. */
2630 mmap_lock();
2631
2632 p = page_find(address >> TARGET_PAGE_BITS);
2633 if (!p) {
2634 mmap_unlock();
2635 return 0;
2636 }
2637
2638 /* if the page was really writable, then we change its
2639 protection back to writable */
2640 if (p->flags & PAGE_WRITE_ORG) {
2641 current_tb_invalidated = false;
2642 if (p->flags & PAGE_WRITE) {
2643 /* If the page is actually marked WRITE then assume this is because
2644 * this thread raced with another one which got here first and
2645 * set the page to PAGE_WRITE and did the TB invalidate for us.
2646 */
2647 #ifdef TARGET_HAS_PRECISE_SMC
2648 TranslationBlock *current_tb = tcg_tb_lookup(pc);
2649 if (current_tb) {
2650 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2651 }
2652 #endif
2653 } else {
2654 host_start = address & qemu_host_page_mask;
2655 host_end = host_start + qemu_host_page_size;
2656
2657 prot = 0;
2658 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2659 p = page_find(addr >> TARGET_PAGE_BITS);
2660 p->flags |= PAGE_WRITE;
2661 prot |= p->flags;
2662
2663 /* and since the content will be modified, we must invalidate
2664 the corresponding translated code. */
2665 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2666 #ifdef CONFIG_USER_ONLY
2667 if (DEBUG_TB_CHECK_GATE) {
2668 tb_invalidate_check(addr);
2669 }
2670 #endif
2671 }
2672 mprotect((void *)g2h(host_start), qemu_host_page_size,
2673 prot & PAGE_BITS);
2674 }
2675 mmap_unlock();
2676 /* If current TB was invalidated return to main loop */
2677 return current_tb_invalidated ? 2 : 1;
2678 }
2679 mmap_unlock();
2680 return 0;
2681 }
2682 #endif /* CONFIG_USER_ONLY */
2683
2684 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2685 void tcg_flush_softmmu_tlb(CPUState *cs)
2686 {
2687 #ifdef CONFIG_SOFTMMU
2688 tlb_flush(cs);
2689 #endif
2690 }