]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/exec-all.h
Move page_size_init earlier
[mirror_qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
875cdcf6
AL
20#ifndef _EXEC_ALL_H_
21#define _EXEC_ALL_H_
7d99a001
BS
22
23#include "qemu-common.h"
24
b346ff46 25/* allow to see translation results - the slowdown should be negligible, so we leave it */
de9a95f0 26#define DEBUG_DISAS
b346ff46 27
41c1b1c9
PB
28/* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31#if defined(CONFIG_USER_ONLY)
b480d9b7 32typedef abi_ulong tb_page_addr_t;
41c1b1c9
PB
33#else
34typedef ram_addr_t tb_page_addr_t;
35#endif
36
b346ff46
FB
37/* is_jmp field values */
38#define DISAS_NEXT 0 /* next instruction can be analyzed */
39#define DISAS_JUMP 1 /* only pc was modified dynamically */
40#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41#define DISAS_TB_JUMP 3 /* only pc was modified statically */
42
f081c76c 43struct TranslationBlock;
2e70f6ef 44typedef struct TranslationBlock TranslationBlock;
b346ff46
FB
45
46/* XXX: make safe guess about sizes */
14dcdac8 47#define MAX_OP_PER_INSTR 266
4d0e4ac7
SB
48
49#if HOST_LONG_BITS == 32
50#define MAX_OPC_PARAM_PER_ARG 2
51#else
52#define MAX_OPC_PARAM_PER_ARG 1
53#endif
3cebc3f1 54#define MAX_OPC_PARAM_IARGS 5
4d0e4ac7
SB
55#define MAX_OPC_PARAM_OARGS 1
56#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
6db73509 62#define OPC_BUF_SIZE 640
b346ff46
FB
63#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
0115be31 65#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
b346ff46 66
1de7afc9 67#include "qemu/log.h"
b346ff46 68
9349b4f9 69void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
9349b4f9 70void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
bad729e2 71 target_ulong *data);
d2856f1a 72
57fec1fe 73void cpu_gen_init(void);
3f38f309 74bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
a8a826a3 75
0ea8cb88 76void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
90b40a69 77void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
648f034c 78TranslationBlock *tb_gen_code(CPUState *cpu,
2e70f6ef
PB
79 target_ulong pc, target_ulong cs_base, int flags,
80 int cflags);
4bad9e39 81void cpu_exec_init(CPUState *cpu, Error **errp);
5638d180 82void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
1c3c8af1 83void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
1652b974 84
0cac1b66 85#if !defined(CONFIG_USER_ONLY)
32857f4d 86void cpu_reloading_memory_map(void);
09daed84 87void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
0cac1b66 88/* cputlb.c */
d7a74a9d
PM
89/**
90 * tlb_flush_page:
91 * @cpu: CPU whose TLB should be flushed
92 * @addr: virtual address of page to be flushed
93 *
94 * Flush one page from the TLB of the specified CPU, for all
95 * MMU indexes.
96 */
31b030d4 97void tlb_flush_page(CPUState *cpu, target_ulong addr);
d7a74a9d
PM
98/**
99 * tlb_flush:
100 * @cpu: CPU whose TLB should be flushed
101 * @flush_global: ignored
102 *
103 * Flush the entire TLB for the specified CPU.
104 * The flush_global flag is in theory an indicator of whether the whole
105 * TLB should be flushed, or only those entries not marked global.
106 * In practice QEMU does not implement any global/not global flag for
107 * TLB entries, and the argument is ignored.
108 */
00c8cb0a 109void tlb_flush(CPUState *cpu, int flush_global);
d7a74a9d
PM
110/**
111 * tlb_flush_page_by_mmuidx:
112 * @cpu: CPU whose TLB should be flushed
113 * @addr: virtual address of page to be flushed
114 * @...: list of MMU indexes to flush, terminated by a negative value
115 *
116 * Flush one page from the TLB of the specified CPU, for the specified
117 * MMU indexes.
118 */
119void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
120/**
121 * tlb_flush_by_mmuidx:
122 * @cpu: CPU whose TLB should be flushed
123 * @...: list of MMU indexes to flush, terminated by a negative value
124 *
125 * Flush all entries from the TLB of the specified CPU, for the specified
126 * MMU indexes.
127 */
128void tlb_flush_by_mmuidx(CPUState *cpu, ...);
0c591eb0 129void tlb_set_page(CPUState *cpu, target_ulong vaddr,
a8170e5e 130 hwaddr paddr, int prot,
d4c430a8 131 int mmu_idx, target_ulong size);
fadc1cbe
PM
132void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
133 hwaddr paddr, MemTxAttrs attrs,
134 int prot, int mmu_idx, target_ulong size);
29d8ec7b 135void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
3b4afc9e
YK
136void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
137 uintptr_t retaddr);
0cac1b66 138#else
31b030d4 139static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66
BS
140{
141}
142
00c8cb0a 143static inline void tlb_flush(CPUState *cpu, int flush_global)
0cac1b66
BS
144{
145}
d7a74a9d
PM
146
147static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
148 target_ulong addr, ...)
149{
150}
151
152static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
153{
154}
c527ee8f 155#endif
d4e8164f 156
d4e8164f
FB
157#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
158
4390df51
FB
159#define CODE_GEN_PHYS_HASH_BITS 15
160#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
161
126d89e8
RH
162/* Estimated block size for TB allocation. */
163/* ??? The following is based on a 2015 survey of x86_64 host output.
164 Better would seem to be some sort of dynamically sized TB array,
165 adapting to the block sizes actually being produced. */
4390df51 166#if defined(CONFIG_SOFTMMU)
126d89e8 167#define CODE_GEN_AVG_BLOCK_SIZE 400
4390df51 168#else
126d89e8 169#define CODE_GEN_AVG_BLOCK_SIZE 150
4390df51
FB
170#endif
171
5bbd2cae
RH
172#if defined(__arm__) || defined(_ARCH_PPC) \
173 || defined(__x86_64__) || defined(__i386__) \
4a136e0a 174 || defined(__sparc__) || defined(__aarch64__) \
b6bfeea9 175 || defined(__s390x__) || defined(__mips__) \
5bbd2cae 176 || defined(CONFIG_TCG_INTERPRETER)
7316329a 177#define USE_DIRECT_JUMP
d4e8164f
FB
178#endif
179
2e70f6ef 180struct TranslationBlock {
2e12669a
FB
181 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
182 target_ulong cs_base; /* CS base for this block */
c068688b 183 uint64_t flags; /* flags defining in which context the code was generated */
d4e8164f
FB
184 uint16_t size; /* size of target code for this block (1 <=
185 size <= TARGET_PAGE_SIZE) */
0266359e
PB
186 uint16_t icount;
187 uint32_t cflags; /* compile flags */
2e70f6ef
PB
188#define CF_COUNT_MASK 0x7fff
189#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
d8a499f1 190#define CF_NOCACHE 0x10000 /* To be freed after execution */
0266359e 191#define CF_USE_ICOUNT 0x20000
56c0269a 192#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
58fe2f10 193
1813e175 194 void *tc_ptr; /* pointer to the translated code */
fca8a500 195 uint8_t *tc_search; /* pointer to search data */
4390df51 196 /* next matching tb for physical address. */
5fafdf24 197 struct TranslationBlock *phys_hash_next;
02d57ea1
SF
198 /* original tb when cflags has CF_NOCACHE */
199 struct TranslationBlock *orig_tb;
4390df51
FB
200 /* first and second physical page containing code. The lower bit
201 of the pointer tells the index in page_next[] */
5fafdf24 202 struct TranslationBlock *page_next[2];
41c1b1c9 203 tb_page_addr_t page_addr[2];
4390df51 204
d4e8164f
FB
205 /* the following data are used to directly call another TB from
206 the code of this one. */
207 uint16_t tb_next_offset[2]; /* offset of original jump target */
208#ifdef USE_DIRECT_JUMP
efc0a514 209 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
d4e8164f 210#else
6375e09e 211 uintptr_t tb_next[2]; /* address of jump generated code */
d4e8164f
FB
212#endif
213 /* list of TBs jumping to this one. This is a circular list using
214 the two least significant bits of the pointers to tell what is
215 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
216 jmp_first */
5fafdf24 217 struct TranslationBlock *jmp_next[2];
d4e8164f 218 struct TranslationBlock *jmp_first;
2e70f6ef 219};
d4e8164f 220
677ef623 221#include "qemu/thread.h"
5e5f07e0
EV
222
223typedef struct TBContext TBContext;
224
225struct TBContext {
226
227 TranslationBlock *tbs;
228 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
229 int nb_tbs;
230 /* any access to the tbs or the page table must use this lock */
677ef623 231 QemuMutex tb_lock;
5e5f07e0
EV
232
233 /* statistics */
234 int tb_flush_count;
235 int tb_phys_invalidate_count;
236
237 int tb_invalidated_flag;
238};
239
2e70f6ef 240void tb_free(TranslationBlock *tb);
bbd77c18 241void tb_flush(CPUState *cpu);
41c1b1c9 242void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
d4e8164f 243
4390df51
FB
244#if defined(USE_DIRECT_JUMP)
245
7316329a
SW
246#if defined(CONFIG_TCG_INTERPRETER)
247static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
248{
249 /* patch the branch destination */
250 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
251 /* no need to flush icache explicitly */
252}
253#elif defined(_ARCH_PPC)
9171478c 254void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
810260a8 255#define tb_set_jmp_target1 ppc_tb_set_jmp_target
57fec1fe 256#elif defined(__i386__) || defined(__x86_64__)
6375e09e 257static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
4390df51
FB
258{
259 /* patch the branch destination */
cb3d83bc 260 stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
1235fc06 261 /* no need to flush icache explicitly */
4390df51 262}
a10c64e0
RH
263#elif defined(__s390x__)
264static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
265{
266 /* patch the branch destination */
267 intptr_t disp = addr - (jmp_addr - 2);
268 stl_be_p((void*)jmp_addr, disp / 2);
269 /* no need to flush icache explicitly */
270}
4a136e0a
CF
271#elif defined(__aarch64__)
272void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
273#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
811d4cf4 274#elif defined(__arm__)
6375e09e 275static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
811d4cf4 276{
4a1e19ae 277#if !QEMU_GNUC_PREREQ(4, 1)
811d4cf4
AZ
278 register unsigned long _beg __asm ("a1");
279 register unsigned long _end __asm ("a2");
280 register unsigned long _flg __asm ("a3");
3233f0d4 281#endif
811d4cf4
AZ
282
283 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
87b78ad1
LD
284 *(uint32_t *)jmp_addr =
285 (*(uint32_t *)jmp_addr & ~0xffffff)
286 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
811d4cf4 287
3233f0d4 288#if QEMU_GNUC_PREREQ(4, 1)
4a1e19ae 289 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
3233f0d4 290#else
811d4cf4
AZ
291 /* flush icache */
292 _beg = jmp_addr;
293 _end = jmp_addr + 4;
294 _flg = 0;
295 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
3233f0d4 296#endif
811d4cf4 297}
b6bfeea9 298#elif defined(__sparc__) || defined(__mips__)
5bbd2cae 299void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
7316329a
SW
300#else
301#error tb_set_jmp_target1 is missing
4390df51 302#endif
d4e8164f 303
5fafdf24 304static inline void tb_set_jmp_target(TranslationBlock *tb,
6375e09e 305 int n, uintptr_t addr)
4cbb86e1 306{
6375e09e
SW
307 uint16_t offset = tb->tb_jmp_offset[n];
308 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
4cbb86e1
FB
309}
310
d4e8164f
FB
311#else
312
313/* set the jump target */
5fafdf24 314static inline void tb_set_jmp_target(TranslationBlock *tb,
6375e09e 315 int n, uintptr_t addr)
d4e8164f 316{
95f7652d 317 tb->tb_next[n] = addr;
d4e8164f
FB
318}
319
320#endif
321
5fafdf24 322static inline void tb_add_jump(TranslationBlock *tb, int n,
d4e8164f
FB
323 TranslationBlock *tb_next)
324{
cf25629d
FB
325 /* NOTE: this test is only needed for thread safety */
326 if (!tb->jmp_next[n]) {
327 /* patch the native jump address */
6375e09e 328 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
3b46e624 329
cf25629d
FB
330 /* add in TB jmp circular list */
331 tb->jmp_next[n] = tb_next->jmp_first;
6375e09e 332 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
cf25629d 333 }
d4e8164f
FB
334}
335
0f842f8a
RH
336/* GETRA is the true target of the return instruction that we'll execute,
337 defined here for simplicity of defining the follow-up macros. */
7316329a 338#if defined(CONFIG_TCG_INTERPRETER)
c3ca0467 339extern uintptr_t tci_tb_ptr;
0f842f8a
RH
340# define GETRA() tci_tb_ptr
341#else
342# define GETRA() \
343 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
344#endif
345
346/* The true return address will often point to a host insn that is part of
347 the next translated guest insn. Adjust the address backward to point to
348 the middle of the call insn. Subtracting one would do the job except for
349 several compressed mode architectures (arm, mips) which set the low bit
350 to indicate the compressed mode; subtracting two works around that. It
351 is also the case that there are no host isas that contain a call insn
352 smaller than 4 bytes, so we don't worry about special-casing this. */
a17d4482 353#define GETPC_ADJ 2
3917149d 354
0f842f8a
RH
355#define GETPC() (GETRA() - GETPC_ADJ)
356
e95c8d51 357#if !defined(CONFIG_USER_ONLY)
6e59c1db 358
9d82b5a7
PB
359struct MemoryRegion *iotlb_to_region(CPUState *cpu,
360 hwaddr index);
b3755a91 361
d5a11fef 362void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
20503968 363 uintptr_t retaddr);
6e59c1db 364
6e59c1db 365#endif
4390df51
FB
366
367#if defined(CONFIG_USER_ONLY)
8fd19e6c
PB
368void mmap_lock(void);
369void mmap_unlock(void);
370
9349b4f9 371static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
4390df51
FB
372{
373 return addr;
374}
375#else
8fd19e6c
PB
376static inline void mmap_lock(void) {}
377static inline void mmap_unlock(void) {}
378
0cac1b66 379/* cputlb.c */
9349b4f9 380tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
dfccc760
PC
381
382void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
383void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
384
385/* exec.c */
386void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
387
388MemoryRegionSection *
389address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, hwaddr *xlat,
390 hwaddr *plen);
391hwaddr memory_region_section_get_iotlb(CPUState *cpu,
392 MemoryRegionSection *section,
393 target_ulong vaddr,
394 hwaddr paddr, hwaddr xlat,
395 int prot,
396 target_ulong *address);
397bool memory_region_is_unassigned(MemoryRegion *mr);
398
4390df51 399#endif
9df217a3 400
1b530a6d
AJ
401/* vl.c */
402extern int singlestep;
403
e0c38211 404/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
9373e632 405extern CPUState *tcg_current_cpu;
e0c38211 406extern bool exit_request;
1a28cac3 407
875cdcf6 408#endif