]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/exec-all.h
migration: use migration_in_postcopy() to check POSTCOPY_ACTIVE
[mirror_qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
2a6a4076
MA
20#ifndef EXEC_ALL_H
21#define EXEC_ALL_H
7d99a001 22
00f6da6a 23#include "exec/tb-context.h"
416986d3 24#include "sysemu/cpus.h"
7d99a001 25
b346ff46 26/* allow to see translation results - the slowdown should be negligible, so we leave it */
de9a95f0 27#define DEBUG_DISAS
b346ff46 28
41c1b1c9
PB
29/* Page tracking code uses ram addresses in system mode, and virtual
30 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
31 type. */
32#if defined(CONFIG_USER_ONLY)
b480d9b7 33typedef abi_ulong tb_page_addr_t;
67a5b5d2 34#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
41c1b1c9
PB
35#else
36typedef ram_addr_t tb_page_addr_t;
67a5b5d2 37#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
41c1b1c9
PB
38#endif
39
1de7afc9 40#include "qemu/log.h"
b346ff46 41
8b86d6d2
RH
42void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
43void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
bad729e2 44 target_ulong *data);
d2856f1a 45
57fec1fe 46void cpu_gen_init(void);
d25f2a72
AB
47
48/**
49 * cpu_restore_state:
50 * @cpu: the vCPU state is to be restore to
51 * @searched_pc: the host PC the fault occurred at
afd46fca
PD
52 * @will_exit: true if the TB executed will be interrupted after some
53 cpu adjustments. Required for maintaining the correct
54 icount valus
d25f2a72
AB
55 * @return: true if state was restored, false otherwise
56 *
57 * Attempt to restore the state for a fault occurring in translated
58 * code. If the searched_pc is not in translated code no state is
59 * restored and the function returns false.
60 */
afd46fca 61bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
a8a826a3 62
6886b980 63void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
90b40a69 64void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
648f034c 65TranslationBlock *tb_gen_code(CPUState *cpu,
89fee74a
EC
66 target_ulong pc, target_ulong cs_base,
67 uint32_t flags,
2e70f6ef 68 int cflags);
1bc7e522 69
5638d180 70void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
1c3c8af1 71void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
fdbc2b57 72void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
1652b974 73
0cac1b66 74#if !defined(CONFIG_USER_ONLY)
32857f4d 75void cpu_reloading_memory_map(void);
56943e8c
PM
76/**
77 * cpu_address_space_init:
78 * @cpu: CPU to add this address space to
56943e8c 79 * @asidx: integer index of this address space
80ceb07a
PX
80 * @prefix: prefix to be used as name of address space
81 * @mr: the root memory region of address space
56943e8c
PM
82 *
83 * Add the specified address space to the CPU's cpu_ases list.
84 * The address space added with @asidx 0 is the one used for the
85 * convenience pointer cpu->as.
86 * The target-specific code which registers ASes is responsible
87 * for defining what semantics address space 0, 1, 2, etc have.
88 *
12ebc9a7
PM
89 * Before the first call to this function, the caller must set
90 * cpu->num_ases to the total number of address spaces it needs
91 * to support.
92 *
56943e8c
PM
93 * Note that with KVM only one address space is supported.
94 */
80ceb07a
PX
95void cpu_address_space_init(CPUState *cpu, int asidx,
96 const char *prefix, MemoryRegion *mr);
b11ec7f2
YZ
97#endif
98
99#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
0cac1b66 100/* cputlb.c */
5005e253
EC
101/**
102 * tlb_init - initialize a CPU's TLB
103 * @cpu: CPU whose TLB should be initialized
104 */
105void tlb_init(CPUState *cpu);
d7a74a9d
PM
106/**
107 * tlb_flush_page:
108 * @cpu: CPU whose TLB should be flushed
109 * @addr: virtual address of page to be flushed
110 *
111 * Flush one page from the TLB of the specified CPU, for all
112 * MMU indexes.
113 */
31b030d4 114void tlb_flush_page(CPUState *cpu, target_ulong addr);
c3b9a07a
AB
115/**
116 * tlb_flush_page_all_cpus:
117 * @cpu: src CPU of the flush
118 * @addr: virtual address of page to be flushed
119 *
120 * Flush one page from the TLB of the specified CPU, for all
121 * MMU indexes.
122 */
123void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
124/**
125 * tlb_flush_page_all_cpus_synced:
126 * @cpu: src CPU of the flush
127 * @addr: virtual address of page to be flushed
128 *
129 * Flush one page from the TLB of the specified CPU, for all MMU
130 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
131 * is scheduled as safe work meaning all flushes will be complete once
132 * the source vCPUs safe work is complete. This will depend on when
133 * the guests translation ends the TB.
134 */
135void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
d7a74a9d
PM
136/**
137 * tlb_flush:
138 * @cpu: CPU whose TLB should be flushed
d7a74a9d 139 *
d10eb08f
AB
140 * Flush the entire TLB for the specified CPU. Most CPU architectures
141 * allow the implementation to drop entries from the TLB at any time
142 * so this is generally safe. If more selective flushing is required
143 * use one of the other functions for efficiency.
d7a74a9d 144 */
d10eb08f 145void tlb_flush(CPUState *cpu);
c3b9a07a
AB
146/**
147 * tlb_flush_all_cpus:
148 * @cpu: src CPU of the flush
149 */
150void tlb_flush_all_cpus(CPUState *src_cpu);
151/**
152 * tlb_flush_all_cpus_synced:
153 * @cpu: src CPU of the flush
154 *
155 * Like tlb_flush_all_cpus except this except the source vCPUs work is
156 * scheduled as safe work meaning all flushes will be complete once
157 * the source vCPUs safe work is complete. This will depend on when
158 * the guests translation ends the TB.
159 */
160void tlb_flush_all_cpus_synced(CPUState *src_cpu);
d7a74a9d
PM
161/**
162 * tlb_flush_page_by_mmuidx:
163 * @cpu: CPU whose TLB should be flushed
164 * @addr: virtual address of page to be flushed
0336cbf8 165 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
166 *
167 * Flush one page from the TLB of the specified CPU, for the specified
168 * MMU indexes.
169 */
0336cbf8
AB
170void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
171 uint16_t idxmap);
c3b9a07a
AB
172/**
173 * tlb_flush_page_by_mmuidx_all_cpus:
174 * @cpu: Originating CPU of the flush
175 * @addr: virtual address of page to be flushed
176 * @idxmap: bitmap of MMU indexes to flush
177 *
178 * Flush one page from the TLB of all CPUs, for the specified
179 * MMU indexes.
180 */
181void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
182 uint16_t idxmap);
183/**
184 * tlb_flush_page_by_mmuidx_all_cpus_synced:
185 * @cpu: Originating CPU of the flush
186 * @addr: virtual address of page to be flushed
187 * @idxmap: bitmap of MMU indexes to flush
188 *
189 * Flush one page from the TLB of all CPUs, for the specified MMU
190 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
191 * vCPUs work is scheduled as safe work meaning all flushes will be
192 * complete once the source vCPUs safe work is complete. This will
193 * depend on when the guests translation ends the TB.
194 */
195void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
196 uint16_t idxmap);
d7a74a9d
PM
197/**
198 * tlb_flush_by_mmuidx:
199 * @cpu: CPU whose TLB should be flushed
c3b9a07a 200 * @wait: If true ensure synchronisation by exiting the cpu_loop
0336cbf8 201 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
202 *
203 * Flush all entries from the TLB of the specified CPU, for the specified
204 * MMU indexes.
205 */
0336cbf8 206void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
c3b9a07a
AB
207/**
208 * tlb_flush_by_mmuidx_all_cpus:
209 * @cpu: Originating CPU of the flush
210 * @idxmap: bitmap of MMU indexes to flush
211 *
212 * Flush all entries from all TLBs of all CPUs, for the specified
213 * MMU indexes.
214 */
215void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
216/**
217 * tlb_flush_by_mmuidx_all_cpus_synced:
218 * @cpu: Originating CPU of the flush
219 * @idxmap: bitmap of MMU indexes to flush
220 *
221 * Flush all entries from all TLBs of all CPUs, for the specified
222 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
223 * vCPUs work is scheduled as safe work meaning all flushes will be
224 * complete once the source vCPUs safe work is complete. This will
225 * depend on when the guests translation ends the TB.
226 */
227void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
1787cc8e
PM
228/**
229 * tlb_set_page_with_attrs:
230 * @cpu: CPU to add this TLB entry for
231 * @vaddr: virtual address of page to add entry for
232 * @paddr: physical address of the page
233 * @attrs: memory transaction attributes
234 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
235 * @mmu_idx: MMU index to insert TLB entry for
236 * @size: size of the page in bytes
237 *
238 * Add an entry to this CPU's TLB (a mapping from virtual address
239 * @vaddr to physical address @paddr) with the specified memory
240 * transaction attributes. This is generally called by the target CPU
241 * specific code after it has been called through the tlb_fill()
242 * entry point and performed a successful page table walk to find
243 * the physical address and attributes for the virtual address
244 * which provoked the TLB miss.
245 *
246 * At most one entry for a given virtual address is permitted. Only a
247 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
248 * used by tlb_flush_page.
249 */
fadc1cbe
PM
250void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
251 hwaddr paddr, MemTxAttrs attrs,
252 int prot, int mmu_idx, target_ulong size);
1787cc8e
PM
253/* tlb_set_page:
254 *
255 * This function is equivalent to calling tlb_set_page_with_attrs()
256 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
257 * as a convenience for CPUs which don't use memory transaction attributes.
258 */
259void tlb_set_page(CPUState *cpu, target_ulong vaddr,
260 hwaddr paddr, int prot,
261 int mmu_idx, target_ulong size);
98670d47 262void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
3b4afc9e 263 uintptr_t retaddr);
0cac1b66 264#else
5005e253
EC
265static inline void tlb_init(CPUState *cpu)
266{
267}
31b030d4 268static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66
BS
269{
270}
c3b9a07a
AB
271static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
272{
273}
274static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
275 target_ulong addr)
276{
277}
d10eb08f 278static inline void tlb_flush(CPUState *cpu)
0cac1b66
BS
279{
280}
c3b9a07a
AB
281static inline void tlb_flush_all_cpus(CPUState *src_cpu)
282{
283}
284static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
285{
286}
d7a74a9d 287static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
0336cbf8 288 target_ulong addr, uint16_t idxmap)
d7a74a9d
PM
289{
290}
291
0336cbf8 292static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d
PM
293{
294}
c3b9a07a
AB
295static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
296 target_ulong addr,
297 uint16_t idxmap)
298{
299}
300static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
301 target_ulong addr,
302 uint16_t idxmap)
303{
304}
305static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
306{
307}
8bca9a03 308
c3b9a07a
AB
309static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
310 uint16_t idxmap)
311{
312}
c527ee8f 313#endif
d4e8164f 314
d4e8164f
FB
315#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
316
126d89e8
RH
317/* Estimated block size for TB allocation. */
318/* ??? The following is based on a 2015 survey of x86_64 host output.
319 Better would seem to be some sort of dynamically sized TB array,
320 adapting to the block sizes actually being produced. */
4390df51 321#if defined(CONFIG_SOFTMMU)
126d89e8 322#define CODE_GEN_AVG_BLOCK_SIZE 400
4390df51 323#else
126d89e8 324#define CODE_GEN_AVG_BLOCK_SIZE 150
4390df51
FB
325#endif
326
e7e168f4
EC
327/*
328 * Translation Cache-related fields of a TB.
2ac01d6d
EC
329 * This struct exists just for convenience; we keep track of TB's in a binary
330 * search tree, and the only fields needed to compare TB's in the tree are
331 * @ptr and @size.
332 * Note: the address of search data can be obtained by adding @size to @ptr.
e7e168f4
EC
333 */
334struct tb_tc {
335 void *ptr; /* pointer to the translated code */
2ac01d6d 336 size_t size;
e7e168f4
EC
337};
338
2e70f6ef 339struct TranslationBlock {
2e12669a
FB
340 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
341 target_ulong cs_base; /* CS base for this block */
89fee74a 342 uint32_t flags; /* flags defining in which context the code was generated */
d4e8164f
FB
343 uint16_t size; /* size of target code for this block (1 <=
344 size <= TARGET_PAGE_SIZE) */
0266359e
PB
345 uint16_t icount;
346 uint32_t cflags; /* compile flags */
416986d3
RH
347#define CF_COUNT_MASK 0x00007fff
348#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
349#define CF_NOCACHE 0x00010000 /* To be freed after execution */
350#define CF_USE_ICOUNT 0x00020000
194125e3 351#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
416986d3 352#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
f7b78602
PM
353#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
354#define CF_CLUSTER_SHIFT 24
4e2ca83e 355/* cflags' mask for hashing/comparison */
0cf8a44c 356#define CF_HASH_MASK \
f7b78602 357 (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK)
58fe2f10 358
61a67f71
LV
359 /* Per-vCPU dynamic tracing state used to generate this TB */
360 uint32_t trace_vcpu_dstate;
361
e7e168f4
EC
362 struct tb_tc tc;
363
02d57ea1
SF
364 /* original tb when cflags has CF_NOCACHE */
365 struct TranslationBlock *orig_tb;
4390df51 366 /* first and second physical page containing code. The lower bit
0b5c91f7
EC
367 of the pointer tells the index in page_next[].
368 The list is protected by the TB's page('s) lock(s) */
1e05197f 369 uintptr_t page_next[2];
41c1b1c9 370 tb_page_addr_t page_addr[2];
4390df51 371
194125e3
EC
372 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
373 QemuSpin jmp_lock;
374
f309101c
SF
375 /* The following data are used to directly call another TB from
376 * the code of this one. This can be done either by emitting direct or
377 * indirect native jump instructions. These jumps are reset so that the TB
eb5e2b9e 378 * just continues its execution. The TB can be linked to another one by
f309101c
SF
379 * setting one of the jump targets (or patching the jump instruction). Only
380 * two of such jumps are supported.
381 */
382 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
383#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
a8583393
RH
384 uintptr_t jmp_target_arg[2]; /* target address or offset */
385
194125e3
EC
386 /*
387 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
388 * Each TB can have two outgoing jumps, and therefore can participate
389 * in two lists. The list entries are kept in jmp_list_next[2]. The least
390 * significant bit (LSB) of the pointers in these lists is used to encode
391 * which of the two list entries is to be used in the pointed TB.
392 *
393 * List traversals are protected by jmp_lock. The destination TB of each
394 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
395 * can be acquired from any origin TB.
396 *
397 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
398 * being invalidated, so that no further outgoing jumps from it can be set.
399 *
400 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
401 * to a destination TB that has CF_INVALID set.
f309101c 402 */
194125e3 403 uintptr_t jmp_list_head;
c37e6d7e 404 uintptr_t jmp_list_next[2];
194125e3 405 uintptr_t jmp_dest[2];
2e70f6ef 406};
d4e8164f 407
4e2ca83e
EC
408extern bool parallel_cpus;
409
410/* Hide the atomic_read to make code a little easier on the eyes */
411static inline uint32_t tb_cflags(const TranslationBlock *tb)
412{
413 return atomic_read(&tb->cflags);
414}
415
416/* current cflags for hashing/comparison */
417static inline uint32_t curr_cflags(void)
418{
416986d3
RH
419 return (parallel_cpus ? CF_PARALLEL : 0)
420 | (use_icount ? CF_USE_ICOUNT : 0);
4e2ca83e
EC
421}
422
646f34fa 423/* TranslationBlock invalidate API */
646f34fa 424#if defined(CONFIG_USER_ONLY)
c40d4792 425void tb_invalidate_phys_addr(target_ulong addr);
646f34fa 426void tb_invalidate_phys_range(target_ulong start, target_ulong end);
c40d4792
PB
427#else
428void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
646f34fa 429#endif
bbd77c18 430void tb_flush(CPUState *cpu);
41c1b1c9 431void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
cedbcb01 432TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
4e2ca83e
EC
433 target_ulong cs_base, uint32_t flags,
434 uint32_t cf_mask);
a8583393 435void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
d4e8164f 436
01ecaf43 437/* GETPC is the true target of the return instruction that we'll execute. */
7316329a 438#if defined(CONFIG_TCG_INTERPRETER)
c3ca0467 439extern uintptr_t tci_tb_ptr;
01ecaf43 440# define GETPC() tci_tb_ptr
0f842f8a 441#else
01ecaf43 442# define GETPC() \
0f842f8a
RH
443 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
444#endif
445
446/* The true return address will often point to a host insn that is part of
447 the next translated guest insn. Adjust the address backward to point to
448 the middle of the call insn. Subtracting one would do the job except for
449 several compressed mode architectures (arm, mips) which set the low bit
450 to indicate the compressed mode; subtracting two works around that. It
451 is also the case that there are no host isas that contain a call insn
452 smaller than 4 bytes, so we don't worry about special-casing this. */
a17d4482 453#define GETPC_ADJ 2
3917149d 454
faa9372c
EC
455#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
456void assert_no_pages_locked(void);
457#else
458static inline void assert_no_pages_locked(void)
459{
460}
461#endif
462
e95c8d51 463#if !defined(CONFIG_USER_ONLY)
6e59c1db 464
2d54f194
PM
465/**
466 * iotlb_to_section:
467 * @cpu: CPU performing the access
468 * @index: TCG CPU IOTLB entry
469 *
470 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
471 * it refers to. @index will have been initially created and returned
472 * by memory_region_section_get_iotlb().
473 */
474struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
475 hwaddr index, MemTxAttrs attrs);
6e59c1db 476#endif
4390df51
FB
477
478#if defined(CONFIG_USER_ONLY)
8fd19e6c
PB
479void mmap_lock(void);
480void mmap_unlock(void);
301e40ed 481bool have_mmap_lock(void);
8fd19e6c 482
9349b4f9 483static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
4390df51
FB
484{
485 return addr;
486}
487#else
8fd19e6c
PB
488static inline void mmap_lock(void) {}
489static inline void mmap_unlock(void) {}
490
0cac1b66 491/* cputlb.c */
9349b4f9 492tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
dfccc760
PC
493
494void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
495void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
496
497/* exec.c */
498void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
499
500MemoryRegionSection *
d7898cda 501address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
1f871c5e
PM
502 hwaddr *xlat, hwaddr *plen,
503 MemTxAttrs attrs, int *prot);
dfccc760
PC
504hwaddr memory_region_section_get_iotlb(CPUState *cpu,
505 MemoryRegionSection *section,
506 target_ulong vaddr,
507 hwaddr paddr, hwaddr xlat,
508 int prot,
509 target_ulong *address);
4390df51 510#endif
9df217a3 511
1b530a6d
AJ
512/* vl.c */
513extern int singlestep;
514
875cdcf6 515#endif