]> git.proxmox.com Git - mirror_qemu.git/blob - accel/tcg/translate-all.c
accel/tcg: Merge tcg_exec_init into tcg_init_machine
[mirror_qemu.git] / accel / tcg / translate-all.c
1 /*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22
23 #define NO_CPU_IO_DEFS
24 #include "trace.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg.h"
28 #if defined(CONFIG_USER_ONLY)
29 #include "qemu.h"
30 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
31 #include <sys/param.h>
32 #if __FreeBSD_version >= 700104
33 #define HAVE_KINFO_GETVMMAP
34 #define sigqueue sigqueue_freebsd /* avoid redefinition */
35 #include <sys/proc.h>
36 #include <machine/profile.h>
37 #define _KERNEL
38 #include <sys/user.h>
39 #undef _KERNEL
40 #undef sigqueue
41 #include <libutil.h>
42 #endif
43 #endif
44 #else
45 #include "exec/ram_addr.h"
46 #endif
47
48 #include "exec/cputlb.h"
49 #include "exec/translate-all.h"
50 #include "qemu/bitmap.h"
51 #include "qemu/qemu-print.h"
52 #include "qemu/timer.h"
53 #include "qemu/main-loop.h"
54 #include "exec/log.h"
55 #include "sysemu/cpus.h"
56 #include "sysemu/cpu-timers.h"
57 #include "sysemu/tcg.h"
58 #include "qapi/error.h"
59 #include "hw/core/tcg-cpu-ops.h"
60 #include "tb-hash.h"
61 #include "tb-context.h"
62 #include "internal.h"
63
64 /* #define DEBUG_TB_INVALIDATE */
65 /* #define DEBUG_TB_FLUSH */
66 /* make various TB consistency checks */
67 /* #define DEBUG_TB_CHECK */
68
69 #ifdef DEBUG_TB_INVALIDATE
70 #define DEBUG_TB_INVALIDATE_GATE 1
71 #else
72 #define DEBUG_TB_INVALIDATE_GATE 0
73 #endif
74
75 #ifdef DEBUG_TB_FLUSH
76 #define DEBUG_TB_FLUSH_GATE 1
77 #else
78 #define DEBUG_TB_FLUSH_GATE 0
79 #endif
80
81 #if !defined(CONFIG_USER_ONLY)
82 /* TB consistency checks only implemented for usermode emulation. */
83 #undef DEBUG_TB_CHECK
84 #endif
85
86 #ifdef DEBUG_TB_CHECK
87 #define DEBUG_TB_CHECK_GATE 1
88 #else
89 #define DEBUG_TB_CHECK_GATE 0
90 #endif
91
92 /* Access to the various translations structures need to be serialised via locks
93 * for consistency.
94 * In user-mode emulation access to the memory related structures are protected
95 * with mmap_lock.
96 * In !user-mode we use per-page locks.
97 */
98 #ifdef CONFIG_SOFTMMU
99 #define assert_memory_lock()
100 #else
101 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
102 #endif
103
104 #define SMC_BITMAP_USE_THRESHOLD 10
105
106 typedef struct PageDesc {
107 /* list of TBs intersecting this ram page */
108 uintptr_t first_tb;
109 #ifdef CONFIG_SOFTMMU
110 /* in order to optimize self modifying code, we count the number
111 of lookups we do to a given page to use a bitmap */
112 unsigned long *code_bitmap;
113 unsigned int code_write_count;
114 #else
115 unsigned long flags;
116 void *target_data;
117 #endif
118 #ifndef CONFIG_USER_ONLY
119 QemuSpin lock;
120 #endif
121 } PageDesc;
122
123 /**
124 * struct page_entry - page descriptor entry
125 * @pd: pointer to the &struct PageDesc of the page this entry represents
126 * @index: page index of the page
127 * @locked: whether the page is locked
128 *
129 * This struct helps us keep track of the locked state of a page, without
130 * bloating &struct PageDesc.
131 *
132 * A page lock protects accesses to all fields of &struct PageDesc.
133 *
134 * See also: &struct page_collection.
135 */
136 struct page_entry {
137 PageDesc *pd;
138 tb_page_addr_t index;
139 bool locked;
140 };
141
142 /**
143 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
144 * @tree: Binary search tree (BST) of the pages, with key == page index
145 * @max: Pointer to the page in @tree with the highest page index
146 *
147 * To avoid deadlock we lock pages in ascending order of page index.
148 * When operating on a set of pages, we need to keep track of them so that
149 * we can lock them in order and also unlock them later. For this we collect
150 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
151 * @tree implementation we use does not provide an O(1) operation to obtain the
152 * highest-ranked element, we use @max to keep track of the inserted page
153 * with the highest index. This is valuable because if a page is not in
154 * the tree and its index is higher than @max's, then we can lock it
155 * without breaking the locking order rule.
156 *
157 * Note on naming: 'struct page_set' would be shorter, but we already have a few
158 * page_set_*() helpers, so page_collection is used instead to avoid confusion.
159 *
160 * See also: page_collection_lock().
161 */
162 struct page_collection {
163 GTree *tree;
164 struct page_entry *max;
165 };
166
167 /* list iterators for lists of tagged pointers in TranslationBlock */
168 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \
169 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
170 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
171 tb = (TranslationBlock *)((uintptr_t)tb & ~1))
172
173 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
174 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
175
176 #define TB_FOR_EACH_JMP(head_tb, tb, n) \
177 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
178
179 /*
180 * In system mode we want L1_MAP to be based on ram offsets,
181 * while in user mode we want it to be based on virtual addresses.
182 *
183 * TODO: For user mode, see the caveat re host vs guest virtual
184 * address spaces near GUEST_ADDR_MAX.
185 */
186 #if !defined(CONFIG_USER_ONLY)
187 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
188 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
189 #else
190 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
191 #endif
192 #else
193 # define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
194 #endif
195
196 /* Size of the L2 (and L3, etc) page tables. */
197 #define V_L2_BITS 10
198 #define V_L2_SIZE (1 << V_L2_BITS)
199
200 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
201 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
202 sizeof_field(TranslationBlock, trace_vcpu_dstate)
203 * BITS_PER_BYTE);
204
205 /*
206 * L1 Mapping properties
207 */
208 static int v_l1_size;
209 static int v_l1_shift;
210 static int v_l2_levels;
211
212 /* The bottom level has pointers to PageDesc, and is indexed by
213 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
214 */
215 #define V_L1_MIN_BITS 4
216 #define V_L1_MAX_BITS (V_L2_BITS + 3)
217 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
218
219 static void *l1_map[V_L1_MAX_SIZE];
220
221 /* code generation context */
222 TCGContext tcg_init_ctx;
223 __thread TCGContext *tcg_ctx;
224 TBContext tb_ctx;
225
226 static void page_table_config_init(void)
227 {
228 uint32_t v_l1_bits;
229
230 assert(TARGET_PAGE_BITS);
231 /* The bits remaining after N lower levels of page tables. */
232 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
233 if (v_l1_bits < V_L1_MIN_BITS) {
234 v_l1_bits += V_L2_BITS;
235 }
236
237 v_l1_size = 1 << v_l1_bits;
238 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
239 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
240
241 assert(v_l1_bits <= V_L1_MAX_BITS);
242 assert(v_l1_shift % V_L2_BITS == 0);
243 assert(v_l2_levels >= 0);
244 }
245
246 /* Encode VAL as a signed leb128 sequence at P.
247 Return P incremented past the encoded value. */
248 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
249 {
250 int more, byte;
251
252 do {
253 byte = val & 0x7f;
254 val >>= 7;
255 more = !((val == 0 && (byte & 0x40) == 0)
256 || (val == -1 && (byte & 0x40) != 0));
257 if (more) {
258 byte |= 0x80;
259 }
260 *p++ = byte;
261 } while (more);
262
263 return p;
264 }
265
266 /* Decode a signed leb128 sequence at *PP; increment *PP past the
267 decoded value. Return the decoded value. */
268 static target_long decode_sleb128(const uint8_t **pp)
269 {
270 const uint8_t *p = *pp;
271 target_long val = 0;
272 int byte, shift = 0;
273
274 do {
275 byte = *p++;
276 val |= (target_ulong)(byte & 0x7f) << shift;
277 shift += 7;
278 } while (byte & 0x80);
279 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
280 val |= -(target_ulong)1 << shift;
281 }
282
283 *pp = p;
284 return val;
285 }
286
287 /* Encode the data collected about the instructions while compiling TB.
288 Place the data at BLOCK, and return the number of bytes consumed.
289
290 The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
291 which come from the target's insn_start data, followed by a uintptr_t
292 which comes from the host pc of the end of the code implementing the insn.
293
294 Each line of the table is encoded as sleb128 deltas from the previous
295 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
296 That is, the first column is seeded with the guest pc, the last column
297 with the host pc, and the middle columns with zeros. */
298
299 static int encode_search(TranslationBlock *tb, uint8_t *block)
300 {
301 uint8_t *highwater = tcg_ctx->code_gen_highwater;
302 uint8_t *p = block;
303 int i, j, n;
304
305 for (i = 0, n = tb->icount; i < n; ++i) {
306 target_ulong prev;
307
308 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
309 if (i == 0) {
310 prev = (j == 0 ? tb->pc : 0);
311 } else {
312 prev = tcg_ctx->gen_insn_data[i - 1][j];
313 }
314 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
315 }
316 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
317 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
318
319 /* Test for (pending) buffer overflow. The assumption is that any
320 one row beginning below the high water mark cannot overrun
321 the buffer completely. Thus we can test for overflow after
322 encoding a row without having to check during encoding. */
323 if (unlikely(p > highwater)) {
324 return -1;
325 }
326 }
327
328 return p - block;
329 }
330
331 /* The cpu state corresponding to 'searched_pc' is restored.
332 * When reset_icount is true, current TB will be interrupted and
333 * icount should be recalculated.
334 */
335 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
336 uintptr_t searched_pc, bool reset_icount)
337 {
338 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
339 uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
340 CPUArchState *env = cpu->env_ptr;
341 const uint8_t *p = tb->tc.ptr + tb->tc.size;
342 int i, j, num_insns = tb->icount;
343 #ifdef CONFIG_PROFILER
344 TCGProfile *prof = &tcg_ctx->prof;
345 int64_t ti = profile_getclock();
346 #endif
347
348 searched_pc -= GETPC_ADJ;
349
350 if (searched_pc < host_pc) {
351 return -1;
352 }
353
354 /* Reconstruct the stored insn data while looking for the point at
355 which the end of the insn exceeds the searched_pc. */
356 for (i = 0; i < num_insns; ++i) {
357 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
358 data[j] += decode_sleb128(&p);
359 }
360 host_pc += decode_sleb128(&p);
361 if (host_pc > searched_pc) {
362 goto found;
363 }
364 }
365 return -1;
366
367 found:
368 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
369 assert(icount_enabled());
370 /* Reset the cycle counter to the start of the block
371 and shift if to the number of actually executed instructions */
372 cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
373 }
374 restore_state_to_opc(env, tb, data);
375
376 #ifdef CONFIG_PROFILER
377 qatomic_set(&prof->restore_time,
378 prof->restore_time + profile_getclock() - ti);
379 qatomic_set(&prof->restore_count, prof->restore_count + 1);
380 #endif
381 return 0;
382 }
383
384 void tb_destroy(TranslationBlock *tb)
385 {
386 qemu_spin_destroy(&tb->jmp_lock);
387 }
388
389 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
390 {
391 /*
392 * The host_pc has to be in the rx region of the code buffer.
393 * If it is not we will not be able to resolve it here.
394 * The two cases where host_pc will not be correct are:
395 *
396 * - fault during translation (instruction fetch)
397 * - fault from helper (not using GETPC() macro)
398 *
399 * Either way we need return early as we can't resolve it here.
400 */
401 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
402 TranslationBlock *tb = tcg_tb_lookup(host_pc);
403 if (tb) {
404 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
405 return true;
406 }
407 }
408 return false;
409 }
410
411 void page_init(void)
412 {
413 page_size_init();
414 page_table_config_init();
415
416 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
417 {
418 #ifdef HAVE_KINFO_GETVMMAP
419 struct kinfo_vmentry *freep;
420 int i, cnt;
421
422 freep = kinfo_getvmmap(getpid(), &cnt);
423 if (freep) {
424 mmap_lock();
425 for (i = 0; i < cnt; i++) {
426 unsigned long startaddr, endaddr;
427
428 startaddr = freep[i].kve_start;
429 endaddr = freep[i].kve_end;
430 if (h2g_valid(startaddr)) {
431 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
432
433 if (h2g_valid(endaddr)) {
434 endaddr = h2g(endaddr);
435 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
436 } else {
437 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
438 endaddr = ~0ul;
439 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
440 #endif
441 }
442 }
443 }
444 free(freep);
445 mmap_unlock();
446 }
447 #else
448 FILE *f;
449
450 last_brk = (unsigned long)sbrk(0);
451
452 f = fopen("/compat/linux/proc/self/maps", "r");
453 if (f) {
454 mmap_lock();
455
456 do {
457 unsigned long startaddr, endaddr;
458 int n;
459
460 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
461
462 if (n == 2 && h2g_valid(startaddr)) {
463 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
464
465 if (h2g_valid(endaddr)) {
466 endaddr = h2g(endaddr);
467 } else {
468 endaddr = ~0ul;
469 }
470 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
471 }
472 } while (!feof(f));
473
474 fclose(f);
475 mmap_unlock();
476 }
477 #endif
478 }
479 #endif
480 }
481
482 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
483 {
484 PageDesc *pd;
485 void **lp;
486 int i;
487
488 /* Level 1. Always allocated. */
489 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
490
491 /* Level 2..N-1. */
492 for (i = v_l2_levels; i > 0; i--) {
493 void **p = qatomic_rcu_read(lp);
494
495 if (p == NULL) {
496 void *existing;
497
498 if (!alloc) {
499 return NULL;
500 }
501 p = g_new0(void *, V_L2_SIZE);
502 existing = qatomic_cmpxchg(lp, NULL, p);
503 if (unlikely(existing)) {
504 g_free(p);
505 p = existing;
506 }
507 }
508
509 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
510 }
511
512 pd = qatomic_rcu_read(lp);
513 if (pd == NULL) {
514 void *existing;
515
516 if (!alloc) {
517 return NULL;
518 }
519 pd = g_new0(PageDesc, V_L2_SIZE);
520 #ifndef CONFIG_USER_ONLY
521 {
522 int i;
523
524 for (i = 0; i < V_L2_SIZE; i++) {
525 qemu_spin_init(&pd[i].lock);
526 }
527 }
528 #endif
529 existing = qatomic_cmpxchg(lp, NULL, pd);
530 if (unlikely(existing)) {
531 #ifndef CONFIG_USER_ONLY
532 {
533 int i;
534
535 for (i = 0; i < V_L2_SIZE; i++) {
536 qemu_spin_destroy(&pd[i].lock);
537 }
538 }
539 #endif
540 g_free(pd);
541 pd = existing;
542 }
543 }
544
545 return pd + (index & (V_L2_SIZE - 1));
546 }
547
548 static inline PageDesc *page_find(tb_page_addr_t index)
549 {
550 return page_find_alloc(index, 0);
551 }
552
553 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
554 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
555
556 /* In user-mode page locks aren't used; mmap_lock is enough */
557 #ifdef CONFIG_USER_ONLY
558
559 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
560
561 static inline void page_lock(PageDesc *pd)
562 { }
563
564 static inline void page_unlock(PageDesc *pd)
565 { }
566
567 static inline void page_lock_tb(const TranslationBlock *tb)
568 { }
569
570 static inline void page_unlock_tb(const TranslationBlock *tb)
571 { }
572
573 struct page_collection *
574 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
575 {
576 return NULL;
577 }
578
579 void page_collection_unlock(struct page_collection *set)
580 { }
581 #else /* !CONFIG_USER_ONLY */
582
583 #ifdef CONFIG_DEBUG_TCG
584
585 static __thread GHashTable *ht_pages_locked_debug;
586
587 static void ht_pages_locked_debug_init(void)
588 {
589 if (ht_pages_locked_debug) {
590 return;
591 }
592 ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
593 }
594
595 static bool page_is_locked(const PageDesc *pd)
596 {
597 PageDesc *found;
598
599 ht_pages_locked_debug_init();
600 found = g_hash_table_lookup(ht_pages_locked_debug, pd);
601 return !!found;
602 }
603
604 static void page_lock__debug(PageDesc *pd)
605 {
606 ht_pages_locked_debug_init();
607 g_assert(!page_is_locked(pd));
608 g_hash_table_insert(ht_pages_locked_debug, pd, pd);
609 }
610
611 static void page_unlock__debug(const PageDesc *pd)
612 {
613 bool removed;
614
615 ht_pages_locked_debug_init();
616 g_assert(page_is_locked(pd));
617 removed = g_hash_table_remove(ht_pages_locked_debug, pd);
618 g_assert(removed);
619 }
620
621 static void
622 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
623 {
624 if (unlikely(!page_is_locked(pd))) {
625 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
626 pd, file, line);
627 abort();
628 }
629 }
630
631 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
632
633 void assert_no_pages_locked(void)
634 {
635 ht_pages_locked_debug_init();
636 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
637 }
638
639 #else /* !CONFIG_DEBUG_TCG */
640
641 #define assert_page_locked(pd)
642
643 static inline void page_lock__debug(const PageDesc *pd)
644 {
645 }
646
647 static inline void page_unlock__debug(const PageDesc *pd)
648 {
649 }
650
651 #endif /* CONFIG_DEBUG_TCG */
652
653 static inline void page_lock(PageDesc *pd)
654 {
655 page_lock__debug(pd);
656 qemu_spin_lock(&pd->lock);
657 }
658
659 static inline void page_unlock(PageDesc *pd)
660 {
661 qemu_spin_unlock(&pd->lock);
662 page_unlock__debug(pd);
663 }
664
665 /* lock the page(s) of a TB in the correct acquisition order */
666 static inline void page_lock_tb(const TranslationBlock *tb)
667 {
668 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
669 }
670
671 static inline void page_unlock_tb(const TranslationBlock *tb)
672 {
673 PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
674
675 page_unlock(p1);
676 if (unlikely(tb->page_addr[1] != -1)) {
677 PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
678
679 if (p2 != p1) {
680 page_unlock(p2);
681 }
682 }
683 }
684
685 static inline struct page_entry *
686 page_entry_new(PageDesc *pd, tb_page_addr_t index)
687 {
688 struct page_entry *pe = g_malloc(sizeof(*pe));
689
690 pe->index = index;
691 pe->pd = pd;
692 pe->locked = false;
693 return pe;
694 }
695
696 static void page_entry_destroy(gpointer p)
697 {
698 struct page_entry *pe = p;
699
700 g_assert(pe->locked);
701 page_unlock(pe->pd);
702 g_free(pe);
703 }
704
705 /* returns false on success */
706 static bool page_entry_trylock(struct page_entry *pe)
707 {
708 bool busy;
709
710 busy = qemu_spin_trylock(&pe->pd->lock);
711 if (!busy) {
712 g_assert(!pe->locked);
713 pe->locked = true;
714 page_lock__debug(pe->pd);
715 }
716 return busy;
717 }
718
719 static void do_page_entry_lock(struct page_entry *pe)
720 {
721 page_lock(pe->pd);
722 g_assert(!pe->locked);
723 pe->locked = true;
724 }
725
726 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
727 {
728 struct page_entry *pe = value;
729
730 do_page_entry_lock(pe);
731 return FALSE;
732 }
733
734 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
735 {
736 struct page_entry *pe = value;
737
738 if (pe->locked) {
739 pe->locked = false;
740 page_unlock(pe->pd);
741 }
742 return FALSE;
743 }
744
745 /*
746 * Trylock a page, and if successful, add the page to a collection.
747 * Returns true ("busy") if the page could not be locked; false otherwise.
748 */
749 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
750 {
751 tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
752 struct page_entry *pe;
753 PageDesc *pd;
754
755 pe = g_tree_lookup(set->tree, &index);
756 if (pe) {
757 return false;
758 }
759
760 pd = page_find(index);
761 if (pd == NULL) {
762 return false;
763 }
764
765 pe = page_entry_new(pd, index);
766 g_tree_insert(set->tree, &pe->index, pe);
767
768 /*
769 * If this is either (1) the first insertion or (2) a page whose index
770 * is higher than any other so far, just lock the page and move on.
771 */
772 if (set->max == NULL || pe->index > set->max->index) {
773 set->max = pe;
774 do_page_entry_lock(pe);
775 return false;
776 }
777 /*
778 * Try to acquire out-of-order lock; if busy, return busy so that we acquire
779 * locks in order.
780 */
781 return page_entry_trylock(pe);
782 }
783
784 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
785 {
786 tb_page_addr_t a = *(const tb_page_addr_t *)ap;
787 tb_page_addr_t b = *(const tb_page_addr_t *)bp;
788
789 if (a == b) {
790 return 0;
791 } else if (a < b) {
792 return -1;
793 }
794 return 1;
795 }
796
797 /*
798 * Lock a range of pages ([@start,@end[) as well as the pages of all
799 * intersecting TBs.
800 * Locking order: acquire locks in ascending order of page index.
801 */
802 struct page_collection *
803 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
804 {
805 struct page_collection *set = g_malloc(sizeof(*set));
806 tb_page_addr_t index;
807 PageDesc *pd;
808
809 start >>= TARGET_PAGE_BITS;
810 end >>= TARGET_PAGE_BITS;
811 g_assert(start <= end);
812
813 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
814 page_entry_destroy);
815 set->max = NULL;
816 assert_no_pages_locked();
817
818 retry:
819 g_tree_foreach(set->tree, page_entry_lock, NULL);
820
821 for (index = start; index <= end; index++) {
822 TranslationBlock *tb;
823 int n;
824
825 pd = page_find(index);
826 if (pd == NULL) {
827 continue;
828 }
829 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
830 g_tree_foreach(set->tree, page_entry_unlock, NULL);
831 goto retry;
832 }
833 assert_page_locked(pd);
834 PAGE_FOR_EACH_TB(pd, tb, n) {
835 if (page_trylock_add(set, tb->page_addr[0]) ||
836 (tb->page_addr[1] != -1 &&
837 page_trylock_add(set, tb->page_addr[1]))) {
838 /* drop all locks, and reacquire in order */
839 g_tree_foreach(set->tree, page_entry_unlock, NULL);
840 goto retry;
841 }
842 }
843 }
844 return set;
845 }
846
847 void page_collection_unlock(struct page_collection *set)
848 {
849 /* entries are unlocked and freed via page_entry_destroy */
850 g_tree_destroy(set->tree);
851 g_free(set);
852 }
853
854 #endif /* !CONFIG_USER_ONLY */
855
856 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
857 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
858 {
859 PageDesc *p1, *p2;
860 tb_page_addr_t page1;
861 tb_page_addr_t page2;
862
863 assert_memory_lock();
864 g_assert(phys1 != -1);
865
866 page1 = phys1 >> TARGET_PAGE_BITS;
867 page2 = phys2 >> TARGET_PAGE_BITS;
868
869 p1 = page_find_alloc(page1, alloc);
870 if (ret_p1) {
871 *ret_p1 = p1;
872 }
873 if (likely(phys2 == -1)) {
874 page_lock(p1);
875 return;
876 } else if (page1 == page2) {
877 page_lock(p1);
878 if (ret_p2) {
879 *ret_p2 = p1;
880 }
881 return;
882 }
883 p2 = page_find_alloc(page2, alloc);
884 if (ret_p2) {
885 *ret_p2 = p2;
886 }
887 if (page1 < page2) {
888 page_lock(p1);
889 page_lock(p2);
890 } else {
891 page_lock(p2);
892 page_lock(p1);
893 }
894 }
895
896 static bool tb_cmp(const void *ap, const void *bp)
897 {
898 const TranslationBlock *a = ap;
899 const TranslationBlock *b = bp;
900
901 return a->pc == b->pc &&
902 a->cs_base == b->cs_base &&
903 a->flags == b->flags &&
904 (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
905 a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
906 a->page_addr[0] == b->page_addr[0] &&
907 a->page_addr[1] == b->page_addr[1];
908 }
909
910 void tb_htable_init(void)
911 {
912 unsigned int mode = QHT_MODE_AUTO_RESIZE;
913
914 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
915 }
916
917 /* call with @p->lock held */
918 static inline void invalidate_page_bitmap(PageDesc *p)
919 {
920 assert_page_locked(p);
921 #ifdef CONFIG_SOFTMMU
922 g_free(p->code_bitmap);
923 p->code_bitmap = NULL;
924 p->code_write_count = 0;
925 #endif
926 }
927
928 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
929 static void page_flush_tb_1(int level, void **lp)
930 {
931 int i;
932
933 if (*lp == NULL) {
934 return;
935 }
936 if (level == 0) {
937 PageDesc *pd = *lp;
938
939 for (i = 0; i < V_L2_SIZE; ++i) {
940 page_lock(&pd[i]);
941 pd[i].first_tb = (uintptr_t)NULL;
942 invalidate_page_bitmap(pd + i);
943 page_unlock(&pd[i]);
944 }
945 } else {
946 void **pp = *lp;
947
948 for (i = 0; i < V_L2_SIZE; ++i) {
949 page_flush_tb_1(level - 1, pp + i);
950 }
951 }
952 }
953
954 static void page_flush_tb(void)
955 {
956 int i, l1_sz = v_l1_size;
957
958 for (i = 0; i < l1_sz; i++) {
959 page_flush_tb_1(v_l2_levels, l1_map + i);
960 }
961 }
962
963 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
964 {
965 const TranslationBlock *tb = value;
966 size_t *size = data;
967
968 *size += tb->tc.size;
969 return false;
970 }
971
972 /* flush all the translation blocks */
973 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
974 {
975 bool did_flush = false;
976
977 mmap_lock();
978 /* If it is already been done on request of another CPU,
979 * just retry.
980 */
981 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
982 goto done;
983 }
984 did_flush = true;
985
986 if (DEBUG_TB_FLUSH_GATE) {
987 size_t nb_tbs = tcg_nb_tbs();
988 size_t host_size = 0;
989
990 tcg_tb_foreach(tb_host_size_iter, &host_size);
991 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
992 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
993 }
994
995 CPU_FOREACH(cpu) {
996 cpu_tb_jmp_cache_clear(cpu);
997 }
998
999 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1000 page_flush_tb();
1001
1002 tcg_region_reset_all();
1003 /* XXX: flush processor icache at this point if cache flush is
1004 expensive */
1005 qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1006
1007 done:
1008 mmap_unlock();
1009 if (did_flush) {
1010 qemu_plugin_flush_cb();
1011 }
1012 }
1013
1014 void tb_flush(CPUState *cpu)
1015 {
1016 if (tcg_enabled()) {
1017 unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
1018
1019 if (cpu_in_exclusive_context(cpu)) {
1020 do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1021 } else {
1022 async_safe_run_on_cpu(cpu, do_tb_flush,
1023 RUN_ON_CPU_HOST_INT(tb_flush_count));
1024 }
1025 }
1026 }
1027
1028 /*
1029 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1030 * so in order to prevent bit rot we compile them unconditionally in user-mode,
1031 * and let the optimizer get rid of them by wrapping their user-only callers
1032 * with if (DEBUG_TB_CHECK_GATE).
1033 */
1034 #ifdef CONFIG_USER_ONLY
1035
1036 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1037 {
1038 TranslationBlock *tb = p;
1039 target_ulong addr = *(target_ulong *)userp;
1040
1041 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1042 printf("ERROR invalidate: address=" TARGET_FMT_lx
1043 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1044 }
1045 }
1046
1047 /* verify that all the pages have correct rights for code
1048 *
1049 * Called with mmap_lock held.
1050 */
1051 static void tb_invalidate_check(target_ulong address)
1052 {
1053 address &= TARGET_PAGE_MASK;
1054 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1055 }
1056
1057 static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1058 {
1059 TranslationBlock *tb = p;
1060 int flags1, flags2;
1061
1062 flags1 = page_get_flags(tb->pc);
1063 flags2 = page_get_flags(tb->pc + tb->size - 1);
1064 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1065 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1066 (long)tb->pc, tb->size, flags1, flags2);
1067 }
1068 }
1069
1070 /* verify that all the pages have correct rights for code */
1071 static void tb_page_check(void)
1072 {
1073 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1074 }
1075
1076 #endif /* CONFIG_USER_ONLY */
1077
1078 /*
1079 * user-mode: call with mmap_lock held
1080 * !user-mode: call with @pd->lock held
1081 */
1082 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1083 {
1084 TranslationBlock *tb1;
1085 uintptr_t *pprev;
1086 unsigned int n1;
1087
1088 assert_page_locked(pd);
1089 pprev = &pd->first_tb;
1090 PAGE_FOR_EACH_TB(pd, tb1, n1) {
1091 if (tb1 == tb) {
1092 *pprev = tb1->page_next[n1];
1093 return;
1094 }
1095 pprev = &tb1->page_next[n1];
1096 }
1097 g_assert_not_reached();
1098 }
1099
1100 /* remove @orig from its @n_orig-th jump list */
1101 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1102 {
1103 uintptr_t ptr, ptr_locked;
1104 TranslationBlock *dest;
1105 TranslationBlock *tb;
1106 uintptr_t *pprev;
1107 int n;
1108
1109 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1110 ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1111 dest = (TranslationBlock *)(ptr & ~1);
1112 if (dest == NULL) {
1113 return;
1114 }
1115
1116 qemu_spin_lock(&dest->jmp_lock);
1117 /*
1118 * While acquiring the lock, the jump might have been removed if the
1119 * destination TB was invalidated; check again.
1120 */
1121 ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
1122 if (ptr_locked != ptr) {
1123 qemu_spin_unlock(&dest->jmp_lock);
1124 /*
1125 * The only possibility is that the jump was unlinked via
1126 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1127 * because we set the LSB above.
1128 */
1129 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1130 return;
1131 }
1132 /*
1133 * We first acquired the lock, and since the destination pointer matches,
1134 * we know for sure that @orig is in the jmp list.
1135 */
1136 pprev = &dest->jmp_list_head;
1137 TB_FOR_EACH_JMP(dest, tb, n) {
1138 if (tb == orig && n == n_orig) {
1139 *pprev = tb->jmp_list_next[n];
1140 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1141 qemu_spin_unlock(&dest->jmp_lock);
1142 return;
1143 }
1144 pprev = &tb->jmp_list_next[n];
1145 }
1146 g_assert_not_reached();
1147 }
1148
1149 /* reset the jump entry 'n' of a TB so that it is not chained to
1150 another TB */
1151 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1152 {
1153 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1154 tb_set_jmp_target(tb, n, addr);
1155 }
1156
1157 /* remove any jumps to the TB */
1158 static inline void tb_jmp_unlink(TranslationBlock *dest)
1159 {
1160 TranslationBlock *tb;
1161 int n;
1162
1163 qemu_spin_lock(&dest->jmp_lock);
1164
1165 TB_FOR_EACH_JMP(dest, tb, n) {
1166 tb_reset_jump(tb, n);
1167 qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1168 /* No need to clear the list entry; setting the dest ptr is enough */
1169 }
1170 dest->jmp_list_head = (uintptr_t)NULL;
1171
1172 qemu_spin_unlock(&dest->jmp_lock);
1173 }
1174
1175 /*
1176 * In user-mode, call with mmap_lock held.
1177 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1178 * locks held.
1179 */
1180 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1181 {
1182 CPUState *cpu;
1183 PageDesc *p;
1184 uint32_t h;
1185 tb_page_addr_t phys_pc;
1186 uint32_t orig_cflags = tb_cflags(tb);
1187
1188 assert_memory_lock();
1189
1190 /* make sure no further incoming jumps will be chained to this TB */
1191 qemu_spin_lock(&tb->jmp_lock);
1192 qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1193 qemu_spin_unlock(&tb->jmp_lock);
1194
1195 /* remove the TB from the hash list */
1196 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1197 h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
1198 tb->trace_vcpu_dstate);
1199 if (!qht_remove(&tb_ctx.htable, tb, h)) {
1200 return;
1201 }
1202
1203 /* remove the TB from the page list */
1204 if (rm_from_page_list) {
1205 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1206 tb_page_remove(p, tb);
1207 invalidate_page_bitmap(p);
1208 if (tb->page_addr[1] != -1) {
1209 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1210 tb_page_remove(p, tb);
1211 invalidate_page_bitmap(p);
1212 }
1213 }
1214
1215 /* remove the TB from the hash list */
1216 h = tb_jmp_cache_hash_func(tb->pc);
1217 CPU_FOREACH(cpu) {
1218 if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1219 qatomic_set(&cpu->tb_jmp_cache[h], NULL);
1220 }
1221 }
1222
1223 /* suppress this TB from the two jump lists */
1224 tb_remove_from_jmp_list(tb, 0);
1225 tb_remove_from_jmp_list(tb, 1);
1226
1227 /* suppress any remaining jumps to this TB */
1228 tb_jmp_unlink(tb);
1229
1230 qatomic_set(&tcg_ctx->tb_phys_invalidate_count,
1231 tcg_ctx->tb_phys_invalidate_count + 1);
1232 }
1233
1234 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1235 {
1236 qemu_thread_jit_write();
1237 do_tb_phys_invalidate(tb, true);
1238 qemu_thread_jit_execute();
1239 }
1240
1241 /* invalidate one TB
1242 *
1243 * Called with mmap_lock held in user-mode.
1244 */
1245 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1246 {
1247 if (page_addr == -1 && tb->page_addr[0] != -1) {
1248 page_lock_tb(tb);
1249 do_tb_phys_invalidate(tb, true);
1250 page_unlock_tb(tb);
1251 } else {
1252 do_tb_phys_invalidate(tb, false);
1253 }
1254 }
1255
1256 #ifdef CONFIG_SOFTMMU
1257 /* call with @p->lock held */
1258 static void build_page_bitmap(PageDesc *p)
1259 {
1260 int n, tb_start, tb_end;
1261 TranslationBlock *tb;
1262
1263 assert_page_locked(p);
1264 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1265
1266 PAGE_FOR_EACH_TB(p, tb, n) {
1267 /* NOTE: this is subtle as a TB may span two physical pages */
1268 if (n == 0) {
1269 /* NOTE: tb_end may be after the end of the page, but
1270 it is not a problem */
1271 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1272 tb_end = tb_start + tb->size;
1273 if (tb_end > TARGET_PAGE_SIZE) {
1274 tb_end = TARGET_PAGE_SIZE;
1275 }
1276 } else {
1277 tb_start = 0;
1278 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1279 }
1280 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1281 }
1282 }
1283 #endif
1284
1285 /* add the tb in the target page and protect it if necessary
1286 *
1287 * Called with mmap_lock held for user-mode emulation.
1288 * Called with @p->lock held in !user-mode.
1289 */
1290 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1291 unsigned int n, tb_page_addr_t page_addr)
1292 {
1293 #ifndef CONFIG_USER_ONLY
1294 bool page_already_protected;
1295 #endif
1296
1297 assert_page_locked(p);
1298
1299 tb->page_addr[n] = page_addr;
1300 tb->page_next[n] = p->first_tb;
1301 #ifndef CONFIG_USER_ONLY
1302 page_already_protected = p->first_tb != (uintptr_t)NULL;
1303 #endif
1304 p->first_tb = (uintptr_t)tb | n;
1305 invalidate_page_bitmap(p);
1306
1307 #if defined(CONFIG_USER_ONLY)
1308 if (p->flags & PAGE_WRITE) {
1309 target_ulong addr;
1310 PageDesc *p2;
1311 int prot;
1312
1313 /* force the host page as non writable (writes will have a
1314 page fault + mprotect overhead) */
1315 page_addr &= qemu_host_page_mask;
1316 prot = 0;
1317 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1318 addr += TARGET_PAGE_SIZE) {
1319
1320 p2 = page_find(addr >> TARGET_PAGE_BITS);
1321 if (!p2) {
1322 continue;
1323 }
1324 prot |= p2->flags;
1325 p2->flags &= ~PAGE_WRITE;
1326 }
1327 mprotect(g2h_untagged(page_addr), qemu_host_page_size,
1328 (prot & PAGE_BITS) & ~PAGE_WRITE);
1329 if (DEBUG_TB_INVALIDATE_GATE) {
1330 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1331 }
1332 }
1333 #else
1334 /* if some code is already present, then the pages are already
1335 protected. So we handle the case where only the first TB is
1336 allocated in a physical page */
1337 if (!page_already_protected) {
1338 tlb_protect_code(page_addr);
1339 }
1340 #endif
1341 }
1342
1343 /*
1344 * Add a new TB and link it to the physical page tables. phys_page2 is
1345 * (-1) to indicate that only one page contains the TB.
1346 *
1347 * Called with mmap_lock held for user-mode emulation.
1348 *
1349 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1350 * Note that in !user-mode, another thread might have already added a TB
1351 * for the same block of guest code that @tb corresponds to. In that case,
1352 * the caller should discard the original @tb, and use instead the returned TB.
1353 */
1354 static TranslationBlock *
1355 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1356 tb_page_addr_t phys_page2)
1357 {
1358 PageDesc *p;
1359 PageDesc *p2 = NULL;
1360 void *existing_tb = NULL;
1361 uint32_t h;
1362
1363 assert_memory_lock();
1364 tcg_debug_assert(!(tb->cflags & CF_INVALID));
1365
1366 /*
1367 * Add the TB to the page list, acquiring first the pages's locks.
1368 * We keep the locks held until after inserting the TB in the hash table,
1369 * so that if the insertion fails we know for sure that the TBs are still
1370 * in the page descriptors.
1371 * Note that inserting into the hash table first isn't an option, since
1372 * we can only insert TBs that are fully initialized.
1373 */
1374 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1375 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1376 if (p2) {
1377 tb_page_add(p2, tb, 1, phys_page2);
1378 } else {
1379 tb->page_addr[1] = -1;
1380 }
1381
1382 /* add in the hash table */
1383 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
1384 tb->trace_vcpu_dstate);
1385 qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1386
1387 /* remove TB from the page(s) if we couldn't insert it */
1388 if (unlikely(existing_tb)) {
1389 tb_page_remove(p, tb);
1390 invalidate_page_bitmap(p);
1391 if (p2) {
1392 tb_page_remove(p2, tb);
1393 invalidate_page_bitmap(p2);
1394 }
1395 tb = existing_tb;
1396 }
1397
1398 if (p2 && p2 != p) {
1399 page_unlock(p2);
1400 }
1401 page_unlock(p);
1402
1403 #ifdef CONFIG_USER_ONLY
1404 if (DEBUG_TB_CHECK_GATE) {
1405 tb_page_check();
1406 }
1407 #endif
1408 return tb;
1409 }
1410
1411 /* Called with mmap_lock held for user mode emulation. */
1412 TranslationBlock *tb_gen_code(CPUState *cpu,
1413 target_ulong pc, target_ulong cs_base,
1414 uint32_t flags, int cflags)
1415 {
1416 CPUArchState *env = cpu->env_ptr;
1417 TranslationBlock *tb, *existing_tb;
1418 tb_page_addr_t phys_pc, phys_page2;
1419 target_ulong virt_page2;
1420 tcg_insn_unit *gen_code_buf;
1421 int gen_code_size, search_size, max_insns;
1422 #ifdef CONFIG_PROFILER
1423 TCGProfile *prof = &tcg_ctx->prof;
1424 int64_t ti;
1425 #endif
1426
1427 assert_memory_lock();
1428 qemu_thread_jit_write();
1429
1430 phys_pc = get_page_addr_code(env, pc);
1431
1432 if (phys_pc == -1) {
1433 /* Generate a one-shot TB with 1 insn in it */
1434 cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1;
1435 }
1436
1437 max_insns = cflags & CF_COUNT_MASK;
1438 if (max_insns == 0) {
1439 max_insns = CF_COUNT_MASK;
1440 }
1441 if (max_insns > TCG_MAX_INSNS) {
1442 max_insns = TCG_MAX_INSNS;
1443 }
1444 if (cpu->singlestep_enabled || singlestep) {
1445 max_insns = 1;
1446 }
1447
1448 buffer_overflow:
1449 tb = tcg_tb_alloc(tcg_ctx);
1450 if (unlikely(!tb)) {
1451 /* flush must be done */
1452 tb_flush(cpu);
1453 mmap_unlock();
1454 /* Make the execution loop process the flush as soon as possible. */
1455 cpu->exception_index = EXCP_INTERRUPT;
1456 cpu_loop_exit(cpu);
1457 }
1458
1459 gen_code_buf = tcg_ctx->code_gen_ptr;
1460 tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
1461 tb->pc = pc;
1462 tb->cs_base = cs_base;
1463 tb->flags = flags;
1464 tb->cflags = cflags;
1465 tb->trace_vcpu_dstate = *cpu->trace_dstate;
1466 tcg_ctx->tb_cflags = cflags;
1467 tb_overflow:
1468
1469 #ifdef CONFIG_PROFILER
1470 /* includes aborted translations because of exceptions */
1471 qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1472 ti = profile_getclock();
1473 #endif
1474
1475 gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0);
1476 if (unlikely(gen_code_size != 0)) {
1477 goto error_return;
1478 }
1479
1480 tcg_func_start(tcg_ctx);
1481
1482 tcg_ctx->cpu = env_cpu(env);
1483 gen_intermediate_code(cpu, tb, max_insns);
1484 assert(tb->size != 0);
1485 tcg_ctx->cpu = NULL;
1486 max_insns = tb->icount;
1487
1488 trace_translate_block(tb, tb->pc, tb->tc.ptr);
1489
1490 /* generate machine code */
1491 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1492 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1493 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1494 if (TCG_TARGET_HAS_direct_jump) {
1495 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1496 tcg_ctx->tb_jmp_target_addr = NULL;
1497 } else {
1498 tcg_ctx->tb_jmp_insn_offset = NULL;
1499 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1500 }
1501
1502 #ifdef CONFIG_PROFILER
1503 qatomic_set(&prof->tb_count, prof->tb_count + 1);
1504 qatomic_set(&prof->interm_time,
1505 prof->interm_time + profile_getclock() - ti);
1506 ti = profile_getclock();
1507 #endif
1508
1509 gen_code_size = tcg_gen_code(tcg_ctx, tb);
1510 if (unlikely(gen_code_size < 0)) {
1511 error_return:
1512 switch (gen_code_size) {
1513 case -1:
1514 /*
1515 * Overflow of code_gen_buffer, or the current slice of it.
1516 *
1517 * TODO: We don't need to re-do gen_intermediate_code, nor
1518 * should we re-do the tcg optimization currently hidden
1519 * inside tcg_gen_code. All that should be required is to
1520 * flush the TBs, allocate a new TB, re-initialize it per
1521 * above, and re-do the actual code generation.
1522 */
1523 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1524 "Restarting code generation for "
1525 "code_gen_buffer overflow\n");
1526 goto buffer_overflow;
1527
1528 case -2:
1529 /*
1530 * The code generated for the TranslationBlock is too large.
1531 * The maximum size allowed by the unwind info is 64k.
1532 * There may be stricter constraints from relocations
1533 * in the tcg backend.
1534 *
1535 * Try again with half as many insns as we attempted this time.
1536 * If a single insn overflows, there's a bug somewhere...
1537 */
1538 assert(max_insns > 1);
1539 max_insns /= 2;
1540 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
1541 "Restarting code generation with "
1542 "smaller translation block (max %d insns)\n",
1543 max_insns);
1544 goto tb_overflow;
1545
1546 default:
1547 g_assert_not_reached();
1548 }
1549 }
1550 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1551 if (unlikely(search_size < 0)) {
1552 goto buffer_overflow;
1553 }
1554 tb->tc.size = gen_code_size;
1555
1556 #ifdef CONFIG_PROFILER
1557 qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1558 qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1559 qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1560 qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1561 #endif
1562
1563 #ifdef DEBUG_DISAS
1564 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1565 qemu_log_in_addr_range(tb->pc)) {
1566 FILE *logfile = qemu_log_lock();
1567 int code_size, data_size;
1568 const tcg_target_ulong *rx_data_gen_ptr;
1569 size_t chunk_start;
1570 int insn = 0;
1571
1572 if (tcg_ctx->data_gen_ptr) {
1573 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
1574 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
1575 data_size = gen_code_size - code_size;
1576 } else {
1577 rx_data_gen_ptr = 0;
1578 code_size = gen_code_size;
1579 data_size = 0;
1580 }
1581
1582 /* Dump header and the first instruction */
1583 qemu_log("OUT: [size=%d]\n", gen_code_size);
1584 qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
1585 tcg_ctx->gen_insn_data[insn][0]);
1586 chunk_start = tcg_ctx->gen_insn_end_off[insn];
1587 log_disas(tb->tc.ptr, chunk_start);
1588
1589 /*
1590 * Dump each instruction chunk, wrapping up empty chunks into
1591 * the next instruction. The whole array is offset so the
1592 * first entry is the beginning of the 2nd instruction.
1593 */
1594 while (insn < tb->icount) {
1595 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
1596 if (chunk_end > chunk_start) {
1597 qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n",
1598 tcg_ctx->gen_insn_data[insn][0]);
1599 log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start);
1600 chunk_start = chunk_end;
1601 }
1602 insn++;
1603 }
1604
1605 if (chunk_start < code_size) {
1606 qemu_log(" -- tb slow paths + alignment\n");
1607 log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start);
1608 }
1609
1610 /* Finally dump any data we may have after the block */
1611 if (data_size) {
1612 int i;
1613 qemu_log(" data: [size=%d]\n", data_size);
1614 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
1615 if (sizeof(tcg_target_ulong) == 8) {
1616 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n",
1617 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
1618 } else if (sizeof(tcg_target_ulong) == 4) {
1619 qemu_log("0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n",
1620 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
1621 } else {
1622 qemu_build_not_reached();
1623 }
1624 }
1625 }
1626 qemu_log("\n");
1627 qemu_log_flush();
1628 qemu_log_unlock(logfile);
1629 }
1630 #endif
1631
1632 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
1633 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1634 CODE_GEN_ALIGN));
1635
1636 /* init jump list */
1637 qemu_spin_init(&tb->jmp_lock);
1638 tb->jmp_list_head = (uintptr_t)NULL;
1639 tb->jmp_list_next[0] = (uintptr_t)NULL;
1640 tb->jmp_list_next[1] = (uintptr_t)NULL;
1641 tb->jmp_dest[0] = (uintptr_t)NULL;
1642 tb->jmp_dest[1] = (uintptr_t)NULL;
1643
1644 /* init original jump addresses which have been set during tcg_gen_code() */
1645 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1646 tb_reset_jump(tb, 0);
1647 }
1648 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1649 tb_reset_jump(tb, 1);
1650 }
1651
1652 /*
1653 * If the TB is not associated with a physical RAM page then
1654 * it must be a temporary one-insn TB, and we have nothing to do
1655 * except fill in the page_addr[] fields. Return early before
1656 * attempting to link to other TBs or add to the lookup table.
1657 */
1658 if (phys_pc == -1) {
1659 tb->page_addr[0] = tb->page_addr[1] = -1;
1660 return tb;
1661 }
1662
1663 /* check next page if needed */
1664 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1665 phys_page2 = -1;
1666 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1667 phys_page2 = get_page_addr_code(env, virt_page2);
1668 }
1669 /*
1670 * No explicit memory barrier is required -- tb_link_page() makes the
1671 * TB visible in a consistent state.
1672 */
1673 existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1674 /* if the TB already exists, discard what we just translated */
1675 if (unlikely(existing_tb != tb)) {
1676 uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1677
1678 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1679 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1680 tb_destroy(tb);
1681 return existing_tb;
1682 }
1683 tcg_tb_insert(tb);
1684 return tb;
1685 }
1686
1687 /*
1688 * @p must be non-NULL.
1689 * user-mode: call with mmap_lock held.
1690 * !user-mode: call with all @pages locked.
1691 */
1692 static void
1693 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1694 PageDesc *p, tb_page_addr_t start,
1695 tb_page_addr_t end,
1696 uintptr_t retaddr)
1697 {
1698 TranslationBlock *tb;
1699 tb_page_addr_t tb_start, tb_end;
1700 int n;
1701 #ifdef TARGET_HAS_PRECISE_SMC
1702 CPUState *cpu = current_cpu;
1703 CPUArchState *env = NULL;
1704 bool current_tb_not_found = retaddr != 0;
1705 bool current_tb_modified = false;
1706 TranslationBlock *current_tb = NULL;
1707 target_ulong current_pc = 0;
1708 target_ulong current_cs_base = 0;
1709 uint32_t current_flags = 0;
1710 #endif /* TARGET_HAS_PRECISE_SMC */
1711
1712 assert_page_locked(p);
1713
1714 #if defined(TARGET_HAS_PRECISE_SMC)
1715 if (cpu != NULL) {
1716 env = cpu->env_ptr;
1717 }
1718 #endif
1719
1720 /* we remove all the TBs in the range [start, end[ */
1721 /* XXX: see if in some cases it could be faster to invalidate all
1722 the code */
1723 PAGE_FOR_EACH_TB(p, tb, n) {
1724 assert_page_locked(p);
1725 /* NOTE: this is subtle as a TB may span two physical pages */
1726 if (n == 0) {
1727 /* NOTE: tb_end may be after the end of the page, but
1728 it is not a problem */
1729 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1730 tb_end = tb_start + tb->size;
1731 } else {
1732 tb_start = tb->page_addr[1];
1733 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1734 }
1735 if (!(tb_end <= start || tb_start >= end)) {
1736 #ifdef TARGET_HAS_PRECISE_SMC
1737 if (current_tb_not_found) {
1738 current_tb_not_found = false;
1739 /* now we have a real cpu fault */
1740 current_tb = tcg_tb_lookup(retaddr);
1741 }
1742 if (current_tb == tb &&
1743 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1744 /*
1745 * If we are modifying the current TB, we must stop
1746 * its execution. We could be more precise by checking
1747 * that the modification is after the current PC, but it
1748 * would require a specialized function to partially
1749 * restore the CPU state.
1750 */
1751 current_tb_modified = true;
1752 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1753 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1754 &current_flags);
1755 }
1756 #endif /* TARGET_HAS_PRECISE_SMC */
1757 tb_phys_invalidate__locked(tb);
1758 }
1759 }
1760 #if !defined(CONFIG_USER_ONLY)
1761 /* if no code remaining, no need to continue to use slow writes */
1762 if (!p->first_tb) {
1763 invalidate_page_bitmap(p);
1764 tlb_unprotect_code(start);
1765 }
1766 #endif
1767 #ifdef TARGET_HAS_PRECISE_SMC
1768 if (current_tb_modified) {
1769 page_collection_unlock(pages);
1770 /* Force execution of one insn next time. */
1771 cpu->cflags_next_tb = 1 | curr_cflags(cpu);
1772 mmap_unlock();
1773 cpu_loop_exit_noexc(cpu);
1774 }
1775 #endif
1776 }
1777
1778 /*
1779 * Invalidate all TBs which intersect with the target physical address range
1780 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1781 * 'is_cpu_write_access' should be true if called from a real cpu write
1782 * access: the virtual CPU will exit the current TB if code is modified inside
1783 * this TB.
1784 *
1785 * Called with mmap_lock held for user-mode emulation
1786 */
1787 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
1788 {
1789 struct page_collection *pages;
1790 PageDesc *p;
1791
1792 assert_memory_lock();
1793
1794 p = page_find(start >> TARGET_PAGE_BITS);
1795 if (p == NULL) {
1796 return;
1797 }
1798 pages = page_collection_lock(start, end);
1799 tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
1800 page_collection_unlock(pages);
1801 }
1802
1803 /*
1804 * Invalidate all TBs which intersect with the target physical address range
1805 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1806 * 'is_cpu_write_access' should be true if called from a real cpu write
1807 * access: the virtual CPU will exit the current TB if code is modified inside
1808 * this TB.
1809 *
1810 * Called with mmap_lock held for user-mode emulation.
1811 */
1812 #ifdef CONFIG_SOFTMMU
1813 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
1814 #else
1815 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
1816 #endif
1817 {
1818 struct page_collection *pages;
1819 tb_page_addr_t next;
1820
1821 assert_memory_lock();
1822
1823 pages = page_collection_lock(start, end);
1824 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1825 start < end;
1826 start = next, next += TARGET_PAGE_SIZE) {
1827 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
1828 tb_page_addr_t bound = MIN(next, end);
1829
1830 if (pd == NULL) {
1831 continue;
1832 }
1833 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
1834 }
1835 page_collection_unlock(pages);
1836 }
1837
1838 #ifdef CONFIG_SOFTMMU
1839 /* len must be <= 8 and start must be a multiple of len.
1840 * Called via softmmu_template.h when code areas are written to with
1841 * iothread mutex not held.
1842 *
1843 * Call with all @pages in the range [@start, @start + len[ locked.
1844 */
1845 void tb_invalidate_phys_page_fast(struct page_collection *pages,
1846 tb_page_addr_t start, int len,
1847 uintptr_t retaddr)
1848 {
1849 PageDesc *p;
1850
1851 assert_memory_lock();
1852
1853 p = page_find(start >> TARGET_PAGE_BITS);
1854 if (!p) {
1855 return;
1856 }
1857
1858 assert_page_locked(p);
1859 if (!p->code_bitmap &&
1860 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1861 build_page_bitmap(p);
1862 }
1863 if (p->code_bitmap) {
1864 unsigned int nr;
1865 unsigned long b;
1866
1867 nr = start & ~TARGET_PAGE_MASK;
1868 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1869 if (b & ((1 << len) - 1)) {
1870 goto do_invalidate;
1871 }
1872 } else {
1873 do_invalidate:
1874 tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
1875 retaddr);
1876 }
1877 }
1878 #else
1879 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1880 * host PC of the faulting store instruction that caused this invalidate.
1881 * Returns true if the caller needs to abort execution of the current
1882 * TB (because it was modified by this store and the guest CPU has
1883 * precise-SMC semantics).
1884 */
1885 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1886 {
1887 TranslationBlock *tb;
1888 PageDesc *p;
1889 int n;
1890 #ifdef TARGET_HAS_PRECISE_SMC
1891 TranslationBlock *current_tb = NULL;
1892 CPUState *cpu = current_cpu;
1893 CPUArchState *env = NULL;
1894 int current_tb_modified = 0;
1895 target_ulong current_pc = 0;
1896 target_ulong current_cs_base = 0;
1897 uint32_t current_flags = 0;
1898 #endif
1899
1900 assert_memory_lock();
1901
1902 addr &= TARGET_PAGE_MASK;
1903 p = page_find(addr >> TARGET_PAGE_BITS);
1904 if (!p) {
1905 return false;
1906 }
1907
1908 #ifdef TARGET_HAS_PRECISE_SMC
1909 if (p->first_tb && pc != 0) {
1910 current_tb = tcg_tb_lookup(pc);
1911 }
1912 if (cpu != NULL) {
1913 env = cpu->env_ptr;
1914 }
1915 #endif
1916 assert_page_locked(p);
1917 PAGE_FOR_EACH_TB(p, tb, n) {
1918 #ifdef TARGET_HAS_PRECISE_SMC
1919 if (current_tb == tb &&
1920 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1921 /* If we are modifying the current TB, we must stop
1922 its execution. We could be more precise by checking
1923 that the modification is after the current PC, but it
1924 would require a specialized function to partially
1925 restore the CPU state */
1926
1927 current_tb_modified = 1;
1928 cpu_restore_state_from_tb(cpu, current_tb, pc, true);
1929 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1930 &current_flags);
1931 }
1932 #endif /* TARGET_HAS_PRECISE_SMC */
1933 tb_phys_invalidate(tb, addr);
1934 }
1935 p->first_tb = (uintptr_t)NULL;
1936 #ifdef TARGET_HAS_PRECISE_SMC
1937 if (current_tb_modified) {
1938 /* Force execution of one insn next time. */
1939 cpu->cflags_next_tb = 1 | curr_cflags(cpu);
1940 return true;
1941 }
1942 #endif
1943
1944 return false;
1945 }
1946 #endif
1947
1948 /* user-mode: call with mmap_lock held */
1949 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
1950 {
1951 TranslationBlock *tb;
1952
1953 assert_memory_lock();
1954
1955 tb = tcg_tb_lookup(retaddr);
1956 if (tb) {
1957 /* We can use retranslation to find the PC. */
1958 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
1959 tb_phys_invalidate(tb, -1);
1960 } else {
1961 /* The exception probably happened in a helper. The CPU state should
1962 have been saved before calling it. Fetch the PC from there. */
1963 CPUArchState *env = cpu->env_ptr;
1964 target_ulong pc, cs_base;
1965 tb_page_addr_t addr;
1966 uint32_t flags;
1967
1968 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1969 addr = get_page_addr_code(env, pc);
1970 if (addr != -1) {
1971 tb_invalidate_phys_range(addr, addr + 1);
1972 }
1973 }
1974 }
1975
1976 #ifndef CONFIG_USER_ONLY
1977 /*
1978 * In deterministic execution mode, instructions doing device I/Os
1979 * must be at the end of the TB.
1980 *
1981 * Called by softmmu_template.h, with iothread mutex not held.
1982 */
1983 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1984 {
1985 TranslationBlock *tb;
1986 CPUClass *cc;
1987 uint32_t n;
1988
1989 tb = tcg_tb_lookup(retaddr);
1990 if (!tb) {
1991 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1992 (void *)retaddr);
1993 }
1994 cpu_restore_state_from_tb(cpu, tb, retaddr, true);
1995
1996 /*
1997 * Some guests must re-execute the branch when re-executing a delay
1998 * slot instruction. When this is the case, adjust icount and N
1999 * to account for the re-execution of the branch.
2000 */
2001 n = 1;
2002 cc = CPU_GET_CLASS(cpu);
2003 if (cc->tcg_ops->io_recompile_replay_branch &&
2004 cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
2005 cpu_neg(cpu)->icount_decr.u16.low++;
2006 n = 2;
2007 }
2008
2009 /*
2010 * Exit the loop and potentially generate a new TB executing the
2011 * just the I/O insns. We also limit instrumentation to memory
2012 * operations only (which execute after completion) so we don't
2013 * double instrument the instruction.
2014 */
2015 cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
2016
2017 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
2018 "cpu_io_recompile: rewound execution of TB to "
2019 TARGET_FMT_lx "\n", tb->pc);
2020
2021 cpu_loop_exit_noexc(cpu);
2022 }
2023
2024 static void print_qht_statistics(struct qht_stats hst)
2025 {
2026 uint32_t hgram_opts;
2027 size_t hgram_bins;
2028 char *hgram;
2029
2030 if (!hst.head_buckets) {
2031 return;
2032 }
2033 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
2034 hst.used_head_buckets, hst.head_buckets,
2035 (double)hst.used_head_buckets / hst.head_buckets * 100);
2036
2037 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2038 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
2039 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2040 hgram_opts |= QDIST_PR_NODECIMAL;
2041 }
2042 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2043 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
2044 qdist_avg(&hst.occupancy) * 100, hgram);
2045 g_free(hgram);
2046
2047 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2048 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2049 if (hgram_bins > 10) {
2050 hgram_bins = 10;
2051 } else {
2052 hgram_bins = 0;
2053 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2054 }
2055 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2056 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n",
2057 qdist_avg(&hst.chain), hgram);
2058 g_free(hgram);
2059 }
2060
2061 struct tb_tree_stats {
2062 size_t nb_tbs;
2063 size_t host_size;
2064 size_t target_size;
2065 size_t max_target_size;
2066 size_t direct_jmp_count;
2067 size_t direct_jmp2_count;
2068 size_t cross_page;
2069 };
2070
2071 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2072 {
2073 const TranslationBlock *tb = value;
2074 struct tb_tree_stats *tst = data;
2075
2076 tst->nb_tbs++;
2077 tst->host_size += tb->tc.size;
2078 tst->target_size += tb->size;
2079 if (tb->size > tst->max_target_size) {
2080 tst->max_target_size = tb->size;
2081 }
2082 if (tb->page_addr[1] != -1) {
2083 tst->cross_page++;
2084 }
2085 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2086 tst->direct_jmp_count++;
2087 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2088 tst->direct_jmp2_count++;
2089 }
2090 }
2091 return false;
2092 }
2093
2094 void dump_exec_info(void)
2095 {
2096 struct tb_tree_stats tst = {};
2097 struct qht_stats hst;
2098 size_t nb_tbs, flush_full, flush_part, flush_elide;
2099
2100 tcg_tb_foreach(tb_tree_stats_iter, &tst);
2101 nb_tbs = tst.nb_tbs;
2102 /* XXX: avoid using doubles ? */
2103 qemu_printf("Translation buffer state:\n");
2104 /*
2105 * Report total code size including the padding and TB structs;
2106 * otherwise users might think "-accel tcg,tb-size" is not honoured.
2107 * For avg host size we use the precise numbers from tb_tree_stats though.
2108 */
2109 qemu_printf("gen code size %zu/%zu\n",
2110 tcg_code_size(), tcg_code_capacity());
2111 qemu_printf("TB count %zu\n", nb_tbs);
2112 qemu_printf("TB avg target size %zu max=%zu bytes\n",
2113 nb_tbs ? tst.target_size / nb_tbs : 0,
2114 tst.max_target_size);
2115 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n",
2116 nb_tbs ? tst.host_size / nb_tbs : 0,
2117 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2118 qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2119 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2120 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2121 tst.direct_jmp_count,
2122 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2123 tst.direct_jmp2_count,
2124 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2125
2126 qht_statistics_init(&tb_ctx.htable, &hst);
2127 print_qht_statistics(hst);
2128 qht_statistics_destroy(&hst);
2129
2130 qemu_printf("\nStatistics:\n");
2131 qemu_printf("TB flush count %u\n",
2132 qatomic_read(&tb_ctx.tb_flush_count));
2133 qemu_printf("TB invalidate count %zu\n",
2134 tcg_tb_phys_invalidate_count());
2135
2136 tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2137 qemu_printf("TLB full flushes %zu\n", flush_full);
2138 qemu_printf("TLB partial flushes %zu\n", flush_part);
2139 qemu_printf("TLB elided flushes %zu\n", flush_elide);
2140 tcg_dump_info();
2141 }
2142
2143 void dump_opcount_info(void)
2144 {
2145 tcg_dump_op_count();
2146 }
2147
2148 #else /* CONFIG_USER_ONLY */
2149
2150 void cpu_interrupt(CPUState *cpu, int mask)
2151 {
2152 g_assert(qemu_mutex_iothread_locked());
2153 cpu->interrupt_request |= mask;
2154 qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2155 }
2156
2157 /*
2158 * Walks guest process memory "regions" one by one
2159 * and calls callback function 'fn' for each region.
2160 */
2161 struct walk_memory_regions_data {
2162 walk_memory_regions_fn fn;
2163 void *priv;
2164 target_ulong start;
2165 int prot;
2166 };
2167
2168 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2169 target_ulong end, int new_prot)
2170 {
2171 if (data->start != -1u) {
2172 int rc = data->fn(data->priv, data->start, end, data->prot);
2173 if (rc != 0) {
2174 return rc;
2175 }
2176 }
2177
2178 data->start = (new_prot ? end : -1u);
2179 data->prot = new_prot;
2180
2181 return 0;
2182 }
2183
2184 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2185 target_ulong base, int level, void **lp)
2186 {
2187 target_ulong pa;
2188 int i, rc;
2189
2190 if (*lp == NULL) {
2191 return walk_memory_regions_end(data, base, 0);
2192 }
2193
2194 if (level == 0) {
2195 PageDesc *pd = *lp;
2196
2197 for (i = 0; i < V_L2_SIZE; ++i) {
2198 int prot = pd[i].flags;
2199
2200 pa = base | (i << TARGET_PAGE_BITS);
2201 if (prot != data->prot) {
2202 rc = walk_memory_regions_end(data, pa, prot);
2203 if (rc != 0) {
2204 return rc;
2205 }
2206 }
2207 }
2208 } else {
2209 void **pp = *lp;
2210
2211 for (i = 0; i < V_L2_SIZE; ++i) {
2212 pa = base | ((target_ulong)i <<
2213 (TARGET_PAGE_BITS + V_L2_BITS * level));
2214 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2215 if (rc != 0) {
2216 return rc;
2217 }
2218 }
2219 }
2220
2221 return 0;
2222 }
2223
2224 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2225 {
2226 struct walk_memory_regions_data data;
2227 uintptr_t i, l1_sz = v_l1_size;
2228
2229 data.fn = fn;
2230 data.priv = priv;
2231 data.start = -1u;
2232 data.prot = 0;
2233
2234 for (i = 0; i < l1_sz; i++) {
2235 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2236 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2237 if (rc != 0) {
2238 return rc;
2239 }
2240 }
2241
2242 return walk_memory_regions_end(&data, 0, 0);
2243 }
2244
2245 static int dump_region(void *priv, target_ulong start,
2246 target_ulong end, unsigned long prot)
2247 {
2248 FILE *f = (FILE *)priv;
2249
2250 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2251 " "TARGET_FMT_lx" %c%c%c\n",
2252 start, end, end - start,
2253 ((prot & PAGE_READ) ? 'r' : '-'),
2254 ((prot & PAGE_WRITE) ? 'w' : '-'),
2255 ((prot & PAGE_EXEC) ? 'x' : '-'));
2256
2257 return 0;
2258 }
2259
2260 /* dump memory mappings */
2261 void page_dump(FILE *f)
2262 {
2263 const int length = sizeof(target_ulong) * 2;
2264 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2265 length, "start", length, "end", length, "size", "prot");
2266 walk_memory_regions(f, dump_region);
2267 }
2268
2269 int page_get_flags(target_ulong address)
2270 {
2271 PageDesc *p;
2272
2273 p = page_find(address >> TARGET_PAGE_BITS);
2274 if (!p) {
2275 return 0;
2276 }
2277 return p->flags;
2278 }
2279
2280 /* Modify the flags of a page and invalidate the code if necessary.
2281 The flag PAGE_WRITE_ORG is positioned automatically depending
2282 on PAGE_WRITE. The mmap_lock should already be held. */
2283 void page_set_flags(target_ulong start, target_ulong end, int flags)
2284 {
2285 target_ulong addr, len;
2286 bool reset_target_data;
2287
2288 /* This function should never be called with addresses outside the
2289 guest address space. If this assert fires, it probably indicates
2290 a missing call to h2g_valid. */
2291 assert(end - 1 <= GUEST_ADDR_MAX);
2292 assert(start < end);
2293 /* Only set PAGE_ANON with new mappings. */
2294 assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
2295 assert_memory_lock();
2296
2297 start = start & TARGET_PAGE_MASK;
2298 end = TARGET_PAGE_ALIGN(end);
2299
2300 if (flags & PAGE_WRITE) {
2301 flags |= PAGE_WRITE_ORG;
2302 }
2303 reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
2304 flags &= ~PAGE_RESET;
2305
2306 for (addr = start, len = end - start;
2307 len != 0;
2308 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2309 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2310
2311 /* If the write protection bit is set, then we invalidate
2312 the code inside. */
2313 if (!(p->flags & PAGE_WRITE) &&
2314 (flags & PAGE_WRITE) &&
2315 p->first_tb) {
2316 tb_invalidate_phys_page(addr, 0);
2317 }
2318 if (reset_target_data) {
2319 g_free(p->target_data);
2320 p->target_data = NULL;
2321 p->flags = flags;
2322 } else {
2323 /* Using mprotect on a page does not change MAP_ANON. */
2324 p->flags = (p->flags & PAGE_ANON) | flags;
2325 }
2326 }
2327 }
2328
2329 void *page_get_target_data(target_ulong address)
2330 {
2331 PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2332 return p ? p->target_data : NULL;
2333 }
2334
2335 void *page_alloc_target_data(target_ulong address, size_t size)
2336 {
2337 PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
2338 void *ret = NULL;
2339
2340 if (p->flags & PAGE_VALID) {
2341 ret = p->target_data;
2342 if (!ret) {
2343 p->target_data = ret = g_malloc0(size);
2344 }
2345 }
2346 return ret;
2347 }
2348
2349 int page_check_range(target_ulong start, target_ulong len, int flags)
2350 {
2351 PageDesc *p;
2352 target_ulong end;
2353 target_ulong addr;
2354
2355 /* This function should never be called with addresses outside the
2356 guest address space. If this assert fires, it probably indicates
2357 a missing call to h2g_valid. */
2358 if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) {
2359 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2360 }
2361
2362 if (len == 0) {
2363 return 0;
2364 }
2365 if (start + len - 1 < start) {
2366 /* We've wrapped around. */
2367 return -1;
2368 }
2369
2370 /* must do before we loose bits in the next step */
2371 end = TARGET_PAGE_ALIGN(start + len);
2372 start = start & TARGET_PAGE_MASK;
2373
2374 for (addr = start, len = end - start;
2375 len != 0;
2376 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2377 p = page_find(addr >> TARGET_PAGE_BITS);
2378 if (!p) {
2379 return -1;
2380 }
2381 if (!(p->flags & PAGE_VALID)) {
2382 return -1;
2383 }
2384
2385 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2386 return -1;
2387 }
2388 if (flags & PAGE_WRITE) {
2389 if (!(p->flags & PAGE_WRITE_ORG)) {
2390 return -1;
2391 }
2392 /* unprotect the page if it was put read-only because it
2393 contains translated code */
2394 if (!(p->flags & PAGE_WRITE)) {
2395 if (!page_unprotect(addr, 0)) {
2396 return -1;
2397 }
2398 }
2399 }
2400 }
2401 return 0;
2402 }
2403
2404 /* called from signal handler: invalidate the code and unprotect the
2405 * page. Return 0 if the fault was not handled, 1 if it was handled,
2406 * and 2 if it was handled but the caller must cause the TB to be
2407 * immediately exited. (We can only return 2 if the 'pc' argument is
2408 * non-zero.)
2409 */
2410 int page_unprotect(target_ulong address, uintptr_t pc)
2411 {
2412 unsigned int prot;
2413 bool current_tb_invalidated;
2414 PageDesc *p;
2415 target_ulong host_start, host_end, addr;
2416
2417 /* Technically this isn't safe inside a signal handler. However we
2418 know this only ever happens in a synchronous SEGV handler, so in
2419 practice it seems to be ok. */
2420 mmap_lock();
2421
2422 p = page_find(address >> TARGET_PAGE_BITS);
2423 if (!p) {
2424 mmap_unlock();
2425 return 0;
2426 }
2427
2428 /* if the page was really writable, then we change its
2429 protection back to writable */
2430 if (p->flags & PAGE_WRITE_ORG) {
2431 current_tb_invalidated = false;
2432 if (p->flags & PAGE_WRITE) {
2433 /* If the page is actually marked WRITE then assume this is because
2434 * this thread raced with another one which got here first and
2435 * set the page to PAGE_WRITE and did the TB invalidate for us.
2436 */
2437 #ifdef TARGET_HAS_PRECISE_SMC
2438 TranslationBlock *current_tb = tcg_tb_lookup(pc);
2439 if (current_tb) {
2440 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2441 }
2442 #endif
2443 } else {
2444 host_start = address & qemu_host_page_mask;
2445 host_end = host_start + qemu_host_page_size;
2446
2447 prot = 0;
2448 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2449 p = page_find(addr >> TARGET_PAGE_BITS);
2450 p->flags |= PAGE_WRITE;
2451 prot |= p->flags;
2452
2453 /* and since the content will be modified, we must invalidate
2454 the corresponding translated code. */
2455 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2456 #ifdef CONFIG_USER_ONLY
2457 if (DEBUG_TB_CHECK_GATE) {
2458 tb_invalidate_check(addr);
2459 }
2460 #endif
2461 }
2462 mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
2463 prot & PAGE_BITS);
2464 }
2465 mmap_unlock();
2466 /* If current TB was invalidated return to main loop */
2467 return current_tb_invalidated ? 2 : 1;
2468 }
2469 mmap_unlock();
2470 return 0;
2471 }
2472 #endif /* CONFIG_USER_ONLY */
2473
2474 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2475 void tcg_flush_softmmu_tlb(CPUState *cs)
2476 {
2477 #ifdef CONFIG_SOFTMMU
2478 tlb_flush(cs);
2479 #endif
2480 }