]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/exec-all.h
tcg: simplify !CONFIG_TCG handling of tb_invalidate_*
[mirror_qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
2a6a4076
MA
20#ifndef EXEC_ALL_H
21#define EXEC_ALL_H
7d99a001
BS
22
23#include "qemu-common.h"
00f6da6a 24#include "exec/tb-context.h"
416986d3 25#include "sysemu/cpus.h"
7d99a001 26
b346ff46 27/* allow to see translation results - the slowdown should be negligible, so we leave it */
de9a95f0 28#define DEBUG_DISAS
b346ff46 29
41c1b1c9
PB
30/* Page tracking code uses ram addresses in system mode, and virtual
31 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
32 type. */
33#if defined(CONFIG_USER_ONLY)
b480d9b7 34typedef abi_ulong tb_page_addr_t;
67a5b5d2 35#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
41c1b1c9
PB
36#else
37typedef ram_addr_t tb_page_addr_t;
67a5b5d2 38#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
41c1b1c9
PB
39#endif
40
1de7afc9 41#include "qemu/log.h"
b346ff46 42
9c489ea6 43void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb);
9349b4f9 44void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
bad729e2 45 target_ulong *data);
d2856f1a 46
57fec1fe 47void cpu_gen_init(void);
d25f2a72
AB
48
49/**
50 * cpu_restore_state:
51 * @cpu: the vCPU state is to be restore to
52 * @searched_pc: the host PC the fault occurred at
afd46fca
PD
53 * @will_exit: true if the TB executed will be interrupted after some
54 cpu adjustments. Required for maintaining the correct
55 icount valus
d25f2a72
AB
56 * @return: true if state was restored, false otherwise
57 *
58 * Attempt to restore the state for a fault occurring in translated
59 * code. If the searched_pc is not in translated code no state is
60 * restored and the function returns false.
61 */
afd46fca 62bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
a8a826a3 63
6886b980 64void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
90b40a69 65void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
648f034c 66TranslationBlock *tb_gen_code(CPUState *cpu,
89fee74a
EC
67 target_ulong pc, target_ulong cs_base,
68 uint32_t flags,
2e70f6ef 69 int cflags);
1bc7e522 70
5638d180 71void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
1c3c8af1 72void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
fdbc2b57 73void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
1652b974 74
0cac1b66 75#if !defined(CONFIG_USER_ONLY)
32857f4d 76void cpu_reloading_memory_map(void);
56943e8c
PM
77/**
78 * cpu_address_space_init:
79 * @cpu: CPU to add this address space to
56943e8c 80 * @asidx: integer index of this address space
80ceb07a
PX
81 * @prefix: prefix to be used as name of address space
82 * @mr: the root memory region of address space
56943e8c
PM
83 *
84 * Add the specified address space to the CPU's cpu_ases list.
85 * The address space added with @asidx 0 is the one used for the
86 * convenience pointer cpu->as.
87 * The target-specific code which registers ASes is responsible
88 * for defining what semantics address space 0, 1, 2, etc have.
89 *
12ebc9a7
PM
90 * Before the first call to this function, the caller must set
91 * cpu->num_ases to the total number of address spaces it needs
92 * to support.
93 *
56943e8c
PM
94 * Note that with KVM only one address space is supported.
95 */
80ceb07a
PX
96void cpu_address_space_init(CPUState *cpu, int asidx,
97 const char *prefix, MemoryRegion *mr);
b11ec7f2
YZ
98#endif
99
100#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
0cac1b66 101/* cputlb.c */
d7a74a9d
PM
102/**
103 * tlb_flush_page:
104 * @cpu: CPU whose TLB should be flushed
105 * @addr: virtual address of page to be flushed
106 *
107 * Flush one page from the TLB of the specified CPU, for all
108 * MMU indexes.
109 */
31b030d4 110void tlb_flush_page(CPUState *cpu, target_ulong addr);
c3b9a07a
AB
111/**
112 * tlb_flush_page_all_cpus:
113 * @cpu: src CPU of the flush
114 * @addr: virtual address of page to be flushed
115 *
116 * Flush one page from the TLB of the specified CPU, for all
117 * MMU indexes.
118 */
119void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
120/**
121 * tlb_flush_page_all_cpus_synced:
122 * @cpu: src CPU of the flush
123 * @addr: virtual address of page to be flushed
124 *
125 * Flush one page from the TLB of the specified CPU, for all MMU
126 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
127 * is scheduled as safe work meaning all flushes will be complete once
128 * the source vCPUs safe work is complete. This will depend on when
129 * the guests translation ends the TB.
130 */
131void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
d7a74a9d
PM
132/**
133 * tlb_flush:
134 * @cpu: CPU whose TLB should be flushed
d7a74a9d 135 *
d10eb08f
AB
136 * Flush the entire TLB for the specified CPU. Most CPU architectures
137 * allow the implementation to drop entries from the TLB at any time
138 * so this is generally safe. If more selective flushing is required
139 * use one of the other functions for efficiency.
d7a74a9d 140 */
d10eb08f 141void tlb_flush(CPUState *cpu);
c3b9a07a
AB
142/**
143 * tlb_flush_all_cpus:
144 * @cpu: src CPU of the flush
145 */
146void tlb_flush_all_cpus(CPUState *src_cpu);
147/**
148 * tlb_flush_all_cpus_synced:
149 * @cpu: src CPU of the flush
150 *
151 * Like tlb_flush_all_cpus except this except the source vCPUs work is
152 * scheduled as safe work meaning all flushes will be complete once
153 * the source vCPUs safe work is complete. This will depend on when
154 * the guests translation ends the TB.
155 */
156void tlb_flush_all_cpus_synced(CPUState *src_cpu);
d7a74a9d
PM
157/**
158 * tlb_flush_page_by_mmuidx:
159 * @cpu: CPU whose TLB should be flushed
160 * @addr: virtual address of page to be flushed
0336cbf8 161 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
162 *
163 * Flush one page from the TLB of the specified CPU, for the specified
164 * MMU indexes.
165 */
0336cbf8
AB
166void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
167 uint16_t idxmap);
c3b9a07a
AB
168/**
169 * tlb_flush_page_by_mmuidx_all_cpus:
170 * @cpu: Originating CPU of the flush
171 * @addr: virtual address of page to be flushed
172 * @idxmap: bitmap of MMU indexes to flush
173 *
174 * Flush one page from the TLB of all CPUs, for the specified
175 * MMU indexes.
176 */
177void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
178 uint16_t idxmap);
179/**
180 * tlb_flush_page_by_mmuidx_all_cpus_synced:
181 * @cpu: Originating CPU of the flush
182 * @addr: virtual address of page to be flushed
183 * @idxmap: bitmap of MMU indexes to flush
184 *
185 * Flush one page from the TLB of all CPUs, for the specified MMU
186 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
187 * vCPUs work is scheduled as safe work meaning all flushes will be
188 * complete once the source vCPUs safe work is complete. This will
189 * depend on when the guests translation ends the TB.
190 */
191void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
192 uint16_t idxmap);
d7a74a9d
PM
193/**
194 * tlb_flush_by_mmuidx:
195 * @cpu: CPU whose TLB should be flushed
c3b9a07a 196 * @wait: If true ensure synchronisation by exiting the cpu_loop
0336cbf8 197 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
198 *
199 * Flush all entries from the TLB of the specified CPU, for the specified
200 * MMU indexes.
201 */
0336cbf8 202void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
c3b9a07a
AB
203/**
204 * tlb_flush_by_mmuidx_all_cpus:
205 * @cpu: Originating CPU of the flush
206 * @idxmap: bitmap of MMU indexes to flush
207 *
208 * Flush all entries from all TLBs of all CPUs, for the specified
209 * MMU indexes.
210 */
211void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
212/**
213 * tlb_flush_by_mmuidx_all_cpus_synced:
214 * @cpu: Originating CPU of the flush
215 * @idxmap: bitmap of MMU indexes to flush
216 *
217 * Flush all entries from all TLBs of all CPUs, for the specified
218 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
219 * vCPUs work is scheduled as safe work meaning all flushes will be
220 * complete once the source vCPUs safe work is complete. This will
221 * depend on when the guests translation ends the TB.
222 */
223void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
1787cc8e
PM
224/**
225 * tlb_set_page_with_attrs:
226 * @cpu: CPU to add this TLB entry for
227 * @vaddr: virtual address of page to add entry for
228 * @paddr: physical address of the page
229 * @attrs: memory transaction attributes
230 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
231 * @mmu_idx: MMU index to insert TLB entry for
232 * @size: size of the page in bytes
233 *
234 * Add an entry to this CPU's TLB (a mapping from virtual address
235 * @vaddr to physical address @paddr) with the specified memory
236 * transaction attributes. This is generally called by the target CPU
237 * specific code after it has been called through the tlb_fill()
238 * entry point and performed a successful page table walk to find
239 * the physical address and attributes for the virtual address
240 * which provoked the TLB miss.
241 *
242 * At most one entry for a given virtual address is permitted. Only a
243 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
244 * used by tlb_flush_page.
245 */
fadc1cbe
PM
246void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
247 hwaddr paddr, MemTxAttrs attrs,
248 int prot, int mmu_idx, target_ulong size);
1787cc8e
PM
249/* tlb_set_page:
250 *
251 * This function is equivalent to calling tlb_set_page_with_attrs()
252 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
253 * as a convenience for CPUs which don't use memory transaction attributes.
254 */
255void tlb_set_page(CPUState *cpu, target_ulong vaddr,
256 hwaddr paddr, int prot,
257 int mmu_idx, target_ulong size);
98670d47 258void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
3b4afc9e 259 uintptr_t retaddr);
0cac1b66 260#else
31b030d4 261static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66
BS
262{
263}
c3b9a07a
AB
264static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
265{
266}
267static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
268 target_ulong addr)
269{
270}
d10eb08f 271static inline void tlb_flush(CPUState *cpu)
0cac1b66
BS
272{
273}
c3b9a07a
AB
274static inline void tlb_flush_all_cpus(CPUState *src_cpu)
275{
276}
277static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
278{
279}
d7a74a9d 280static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
0336cbf8 281 target_ulong addr, uint16_t idxmap)
d7a74a9d
PM
282{
283}
284
0336cbf8 285static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d
PM
286{
287}
c3b9a07a
AB
288static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
289 target_ulong addr,
290 uint16_t idxmap)
291{
292}
293static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
294 target_ulong addr,
295 uint16_t idxmap)
296{
297}
298static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
299{
300}
8bca9a03 301
c3b9a07a
AB
302static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
303 uint16_t idxmap)
304{
305}
c527ee8f 306#endif
d4e8164f 307
d4e8164f
FB
308#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
309
126d89e8
RH
310/* Estimated block size for TB allocation. */
311/* ??? The following is based on a 2015 survey of x86_64 host output.
312 Better would seem to be some sort of dynamically sized TB array,
313 adapting to the block sizes actually being produced. */
4390df51 314#if defined(CONFIG_SOFTMMU)
126d89e8 315#define CODE_GEN_AVG_BLOCK_SIZE 400
4390df51 316#else
126d89e8 317#define CODE_GEN_AVG_BLOCK_SIZE 150
4390df51
FB
318#endif
319
e7e168f4
EC
320/*
321 * Translation Cache-related fields of a TB.
2ac01d6d
EC
322 * This struct exists just for convenience; we keep track of TB's in a binary
323 * search tree, and the only fields needed to compare TB's in the tree are
324 * @ptr and @size.
325 * Note: the address of search data can be obtained by adding @size to @ptr.
e7e168f4
EC
326 */
327struct tb_tc {
328 void *ptr; /* pointer to the translated code */
2ac01d6d 329 size_t size;
e7e168f4
EC
330};
331
2e70f6ef 332struct TranslationBlock {
2e12669a
FB
333 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
334 target_ulong cs_base; /* CS base for this block */
89fee74a 335 uint32_t flags; /* flags defining in which context the code was generated */
d4e8164f
FB
336 uint16_t size; /* size of target code for this block (1 <=
337 size <= TARGET_PAGE_SIZE) */
0266359e
PB
338 uint16_t icount;
339 uint32_t cflags; /* compile flags */
416986d3
RH
340#define CF_COUNT_MASK 0x00007fff
341#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
342#define CF_NOCACHE 0x00010000 /* To be freed after execution */
343#define CF_USE_ICOUNT 0x00020000
194125e3 344#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
416986d3 345#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
4e2ca83e 346/* cflags' mask for hashing/comparison */
0cf8a44c
RH
347#define CF_HASH_MASK \
348 (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL)
58fe2f10 349
61a67f71
LV
350 /* Per-vCPU dynamic tracing state used to generate this TB */
351 uint32_t trace_vcpu_dstate;
352
e7e168f4
EC
353 struct tb_tc tc;
354
02d57ea1
SF
355 /* original tb when cflags has CF_NOCACHE */
356 struct TranslationBlock *orig_tb;
4390df51 357 /* first and second physical page containing code. The lower bit
0b5c91f7
EC
358 of the pointer tells the index in page_next[].
359 The list is protected by the TB's page('s) lock(s) */
1e05197f 360 uintptr_t page_next[2];
41c1b1c9 361 tb_page_addr_t page_addr[2];
4390df51 362
194125e3
EC
363 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
364 QemuSpin jmp_lock;
365
f309101c
SF
366 /* The following data are used to directly call another TB from
367 * the code of this one. This can be done either by emitting direct or
368 * indirect native jump instructions. These jumps are reset so that the TB
eb5e2b9e 369 * just continues its execution. The TB can be linked to another one by
f309101c
SF
370 * setting one of the jump targets (or patching the jump instruction). Only
371 * two of such jumps are supported.
372 */
373 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
374#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
a8583393
RH
375 uintptr_t jmp_target_arg[2]; /* target address or offset */
376
194125e3
EC
377 /*
378 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
379 * Each TB can have two outgoing jumps, and therefore can participate
380 * in two lists. The list entries are kept in jmp_list_next[2]. The least
381 * significant bit (LSB) of the pointers in these lists is used to encode
382 * which of the two list entries is to be used in the pointed TB.
383 *
384 * List traversals are protected by jmp_lock. The destination TB of each
385 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
386 * can be acquired from any origin TB.
387 *
388 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
389 * being invalidated, so that no further outgoing jumps from it can be set.
390 *
391 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
392 * to a destination TB that has CF_INVALID set.
f309101c 393 */
194125e3 394 uintptr_t jmp_list_head;
c37e6d7e 395 uintptr_t jmp_list_next[2];
194125e3 396 uintptr_t jmp_dest[2];
2e70f6ef 397};
d4e8164f 398
4e2ca83e
EC
399extern bool parallel_cpus;
400
401/* Hide the atomic_read to make code a little easier on the eyes */
402static inline uint32_t tb_cflags(const TranslationBlock *tb)
403{
404 return atomic_read(&tb->cflags);
405}
406
407/* current cflags for hashing/comparison */
408static inline uint32_t curr_cflags(void)
409{
416986d3
RH
410 return (parallel_cpus ? CF_PARALLEL : 0)
411 | (use_icount ? CF_USE_ICOUNT : 0);
4e2ca83e
EC
412}
413
646f34fa 414/* TranslationBlock invalidate API */
646f34fa 415#if defined(CONFIG_USER_ONLY)
c40d4792 416void tb_invalidate_phys_addr(target_ulong addr);
646f34fa 417void tb_invalidate_phys_range(target_ulong start, target_ulong end);
c40d4792
PB
418#else
419void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
646f34fa 420#endif
bbd77c18 421void tb_flush(CPUState *cpu);
41c1b1c9 422void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
cedbcb01 423TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
4e2ca83e
EC
424 target_ulong cs_base, uint32_t flags,
425 uint32_t cf_mask);
a8583393 426void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
d4e8164f 427
01ecaf43 428/* GETPC is the true target of the return instruction that we'll execute. */
7316329a 429#if defined(CONFIG_TCG_INTERPRETER)
c3ca0467 430extern uintptr_t tci_tb_ptr;
01ecaf43 431# define GETPC() tci_tb_ptr
0f842f8a 432#else
01ecaf43 433# define GETPC() \
0f842f8a
RH
434 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
435#endif
436
437/* The true return address will often point to a host insn that is part of
438 the next translated guest insn. Adjust the address backward to point to
439 the middle of the call insn. Subtracting one would do the job except for
440 several compressed mode architectures (arm, mips) which set the low bit
441 to indicate the compressed mode; subtracting two works around that. It
442 is also the case that there are no host isas that contain a call insn
443 smaller than 4 bytes, so we don't worry about special-casing this. */
a17d4482 444#define GETPC_ADJ 2
3917149d 445
faa9372c
EC
446#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
447void assert_no_pages_locked(void);
448#else
449static inline void assert_no_pages_locked(void)
450{
451}
452#endif
453
e95c8d51 454#if !defined(CONFIG_USER_ONLY)
6e59c1db 455
2d54f194
PM
456/**
457 * iotlb_to_section:
458 * @cpu: CPU performing the access
459 * @index: TCG CPU IOTLB entry
460 *
461 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
462 * it refers to. @index will have been initially created and returned
463 * by memory_region_section_get_iotlb().
464 */
465struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
466 hwaddr index, MemTxAttrs attrs);
b3755a91 467
98670d47
LV
468void tlb_fill(CPUState *cpu, target_ulong addr, int size,
469 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
6e59c1db 470
6e59c1db 471#endif
4390df51
FB
472
473#if defined(CONFIG_USER_ONLY)
8fd19e6c
PB
474void mmap_lock(void);
475void mmap_unlock(void);
301e40ed 476bool have_mmap_lock(void);
8fd19e6c 477
9349b4f9 478static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
4390df51
FB
479{
480 return addr;
481}
482#else
8fd19e6c
PB
483static inline void mmap_lock(void) {}
484static inline void mmap_unlock(void) {}
485
0cac1b66 486/* cputlb.c */
9349b4f9 487tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
dfccc760
PC
488
489void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
490void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
491
492/* exec.c */
493void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
494
495MemoryRegionSection *
d7898cda 496address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
1f871c5e
PM
497 hwaddr *xlat, hwaddr *plen,
498 MemTxAttrs attrs, int *prot);
dfccc760
PC
499hwaddr memory_region_section_get_iotlb(CPUState *cpu,
500 MemoryRegionSection *section,
501 target_ulong vaddr,
502 hwaddr paddr, hwaddr xlat,
503 int prot,
504 target_ulong *address);
505bool memory_region_is_unassigned(MemoryRegion *mr);
506
4390df51 507#endif
9df217a3 508
1b530a6d
AJ
509/* vl.c */
510extern int singlestep;
511
875cdcf6 512#endif