]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/cpu-all.h
exec: Extract 'page-vary.h' header
[mirror_qemu.git] / include / exec / cpu-all.h
1 /*
2 * defines common to all virtual CPUs
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifndef CPU_ALL_H
20 #define CPU_ALL_H
21
22 #include "exec/cpu-common.h"
23 #include "exec/memory.h"
24 #include "qemu/thread.h"
25 #include "hw/core/cpu.h"
26 #include "qemu/rcu.h"
27
28 #define EXCP_INTERRUPT 0x10000 /* async interruption */
29 #define EXCP_HLT 0x10001 /* hlt instruction reached */
30 #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
31 #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
32 #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
33 #define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
34
35 /* some important defines:
36 *
37 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
38 * otherwise little endian.
39 *
40 * TARGET_WORDS_BIGENDIAN : same for target cpu
41 */
42
43 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
44 #define BSWAP_NEEDED
45 #endif
46
47 #ifdef BSWAP_NEEDED
48
49 static inline uint16_t tswap16(uint16_t s)
50 {
51 return bswap16(s);
52 }
53
54 static inline uint32_t tswap32(uint32_t s)
55 {
56 return bswap32(s);
57 }
58
59 static inline uint64_t tswap64(uint64_t s)
60 {
61 return bswap64(s);
62 }
63
64 static inline void tswap16s(uint16_t *s)
65 {
66 *s = bswap16(*s);
67 }
68
69 static inline void tswap32s(uint32_t *s)
70 {
71 *s = bswap32(*s);
72 }
73
74 static inline void tswap64s(uint64_t *s)
75 {
76 *s = bswap64(*s);
77 }
78
79 #else
80
81 static inline uint16_t tswap16(uint16_t s)
82 {
83 return s;
84 }
85
86 static inline uint32_t tswap32(uint32_t s)
87 {
88 return s;
89 }
90
91 static inline uint64_t tswap64(uint64_t s)
92 {
93 return s;
94 }
95
96 static inline void tswap16s(uint16_t *s)
97 {
98 }
99
100 static inline void tswap32s(uint32_t *s)
101 {
102 }
103
104 static inline void tswap64s(uint64_t *s)
105 {
106 }
107
108 #endif
109
110 #if TARGET_LONG_SIZE == 4
111 #define tswapl(s) tswap32(s)
112 #define tswapls(s) tswap32s((uint32_t *)(s))
113 #define bswaptls(s) bswap32s(s)
114 #else
115 #define tswapl(s) tswap64(s)
116 #define tswapls(s) tswap64s((uint64_t *)(s))
117 #define bswaptls(s) bswap64s(s)
118 #endif
119
120 /* Target-endianness CPU memory access functions. These fit into the
121 * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
122 */
123 #if defined(TARGET_WORDS_BIGENDIAN)
124 #define lduw_p(p) lduw_be_p(p)
125 #define ldsw_p(p) ldsw_be_p(p)
126 #define ldl_p(p) ldl_be_p(p)
127 #define ldq_p(p) ldq_be_p(p)
128 #define stw_p(p, v) stw_be_p(p, v)
129 #define stl_p(p, v) stl_be_p(p, v)
130 #define stq_p(p, v) stq_be_p(p, v)
131 #define ldn_p(p, sz) ldn_be_p(p, sz)
132 #define stn_p(p, sz, v) stn_be_p(p, sz, v)
133 #else
134 #define lduw_p(p) lduw_le_p(p)
135 #define ldsw_p(p) ldsw_le_p(p)
136 #define ldl_p(p) ldl_le_p(p)
137 #define ldq_p(p) ldq_le_p(p)
138 #define stw_p(p, v) stw_le_p(p, v)
139 #define stl_p(p, v) stl_le_p(p, v)
140 #define stq_p(p, v) stq_le_p(p, v)
141 #define ldn_p(p, sz) ldn_le_p(p, sz)
142 #define stn_p(p, sz, v) stn_le_p(p, sz, v)
143 #endif
144
145 /* MMU memory access macros */
146
147 #if defined(CONFIG_USER_ONLY)
148 #include "exec/user/abitypes.h"
149
150 /* On some host systems the guest address space is reserved on the host.
151 * This allows the guest address space to be offset to a convenient location.
152 */
153 extern uintptr_t guest_base;
154 extern bool have_guest_base;
155 extern unsigned long reserved_va;
156
157 /*
158 * Limit the guest addresses as best we can.
159 *
160 * When not using -R reserved_va, we cannot really limit the guest
161 * to less address space than the host. For 32-bit guests, this
162 * acts as a sanity check that we're not giving the guest an address
163 * that it cannot even represent. For 64-bit guests... the address
164 * might not be what the real kernel would give, but it is at least
165 * representable in the guest.
166 *
167 * TODO: Improve address allocation to avoid this problem, and to
168 * avoid setting bits at the top of guest addresses that might need
169 * to be used for tags.
170 */
171 #define GUEST_ADDR_MAX_ \
172 ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
173 UINT32_MAX : ~0ul)
174 #define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
175
176 #else
177
178 #include "exec/hwaddr.h"
179
180 #define SUFFIX
181 #define ARG1 as
182 #define ARG1_DECL AddressSpace *as
183 #define TARGET_ENDIANNESS
184 #include "exec/memory_ldst.h.inc"
185
186 #define SUFFIX _cached_slow
187 #define ARG1 cache
188 #define ARG1_DECL MemoryRegionCache *cache
189 #define TARGET_ENDIANNESS
190 #include "exec/memory_ldst.h.inc"
191
192 static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
193 {
194 address_space_stl_notdirty(as, addr, val,
195 MEMTXATTRS_UNSPECIFIED, NULL);
196 }
197
198 #define SUFFIX
199 #define ARG1 as
200 #define ARG1_DECL AddressSpace *as
201 #define TARGET_ENDIANNESS
202 #include "exec/memory_ldst_phys.h.inc"
203
204 /* Inline fast path for direct RAM access. */
205 #define ENDIANNESS
206 #include "exec/memory_ldst_cached.h.inc"
207
208 #define SUFFIX _cached
209 #define ARG1 cache
210 #define ARG1_DECL MemoryRegionCache *cache
211 #define TARGET_ENDIANNESS
212 #include "exec/memory_ldst_phys.h.inc"
213 #endif
214
215 /* page related stuff */
216
217 #ifdef TARGET_PAGE_BITS_VARY
218 # include "exec/page-vary.h"
219 #if defined(CONFIG_ATTRIBUTE_ALIAS) || !defined(IN_EXEC_VARY)
220 extern const TargetPageBits target_page;
221 #else
222 extern TargetPageBits target_page;
223 #endif
224 #ifdef CONFIG_DEBUG_TCG
225 #define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
226 #define TARGET_PAGE_MASK ({ assert(target_page.decided); \
227 (target_long)target_page.mask; })
228 #else
229 #define TARGET_PAGE_BITS target_page.bits
230 #define TARGET_PAGE_MASK ((target_long)target_page.mask)
231 #endif
232 #define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
233 #else
234 #define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
235 #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
236 #define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
237 #endif
238
239 #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
240
241 /* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
242 * when intptr_t is 32-bit and we are aligning a long long.
243 */
244 extern uintptr_t qemu_host_page_size;
245 extern intptr_t qemu_host_page_mask;
246
247 #define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size)
248 #define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size)
249
250 /* same as PROT_xxx */
251 #define PAGE_READ 0x0001
252 #define PAGE_WRITE 0x0002
253 #define PAGE_EXEC 0x0004
254 #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
255 #define PAGE_VALID 0x0008
256 /*
257 * Original state of the write flag (used when tracking self-modifying code)
258 */
259 #define PAGE_WRITE_ORG 0x0010
260 /*
261 * Invalidate the TLB entry immediately, helpful for s390x
262 * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
263 */
264 #define PAGE_WRITE_INV 0x0020
265 /* For use with page_set_flags: page is being replaced; target_data cleared. */
266 #define PAGE_RESET 0x0040
267 /* For linux-user, indicates that the page is MAP_ANON. */
268 #define PAGE_ANON 0x0080
269
270 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
271 /* FIXME: Code that sets/uses this is broken and needs to go away. */
272 #define PAGE_RESERVED 0x0100
273 #endif
274 /* Target-specific bits that will be used via page_get_flags(). */
275 #define PAGE_TARGET_1 0x0080
276 #define PAGE_TARGET_2 0x0200
277
278 #if defined(CONFIG_USER_ONLY)
279 void page_dump(FILE *f);
280
281 typedef int (*walk_memory_regions_fn)(void *, target_ulong,
282 target_ulong, unsigned long);
283 int walk_memory_regions(void *, walk_memory_regions_fn);
284
285 int page_get_flags(target_ulong address);
286 void page_set_flags(target_ulong start, target_ulong end, int flags);
287 int page_check_range(target_ulong start, target_ulong len, int flags);
288
289 /**
290 * page_alloc_target_data(address, size)
291 * @address: guest virtual address
292 * @size: size of data to allocate
293 *
294 * Allocate @size bytes of out-of-band data to associate with the
295 * guest page at @address. If the page is not mapped, NULL will
296 * be returned. If there is existing data associated with @address,
297 * no new memory will be allocated.
298 *
299 * The memory will be freed when the guest page is deallocated,
300 * e.g. with the munmap system call.
301 */
302 void *page_alloc_target_data(target_ulong address, size_t size);
303
304 /**
305 * page_get_target_data(address)
306 * @address: guest virtual address
307 *
308 * Return any out-of-bound memory assocated with the guest page
309 * at @address, as per page_alloc_target_data.
310 */
311 void *page_get_target_data(target_ulong address);
312 #endif
313
314 CPUArchState *cpu_copy(CPUArchState *env);
315
316 /* Flags for use in ENV->INTERRUPT_PENDING.
317
318 The numbers assigned here are non-sequential in order to preserve
319 binary compatibility with the vmstate dump. Bit 0 (0x0001) was
320 previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
321 the vmstate dump. */
322
323 /* External hardware interrupt pending. This is typically used for
324 interrupts from devices. */
325 #define CPU_INTERRUPT_HARD 0x0002
326
327 /* Exit the current TB. This is typically used when some system-level device
328 makes some change to the memory mapping. E.g. the a20 line change. */
329 #define CPU_INTERRUPT_EXITTB 0x0004
330
331 /* Halt the CPU. */
332 #define CPU_INTERRUPT_HALT 0x0020
333
334 /* Debug event pending. */
335 #define CPU_INTERRUPT_DEBUG 0x0080
336
337 /* Reset signal. */
338 #define CPU_INTERRUPT_RESET 0x0400
339
340 /* Several target-specific external hardware interrupts. Each target/cpu.h
341 should define proper names based on these defines. */
342 #define CPU_INTERRUPT_TGT_EXT_0 0x0008
343 #define CPU_INTERRUPT_TGT_EXT_1 0x0010
344 #define CPU_INTERRUPT_TGT_EXT_2 0x0040
345 #define CPU_INTERRUPT_TGT_EXT_3 0x0200
346 #define CPU_INTERRUPT_TGT_EXT_4 0x1000
347
348 /* Several target-specific internal interrupts. These differ from the
349 preceding target-specific interrupts in that they are intended to
350 originate from within the cpu itself, typically in response to some
351 instruction being executed. These, therefore, are not masked while
352 single-stepping within the debugger. */
353 #define CPU_INTERRUPT_TGT_INT_0 0x0100
354 #define CPU_INTERRUPT_TGT_INT_1 0x0800
355 #define CPU_INTERRUPT_TGT_INT_2 0x2000
356
357 /* First unused bit: 0x4000. */
358
359 /* The set of all bits that should be masked when single-stepping. */
360 #define CPU_INTERRUPT_SSTEP_MASK \
361 (CPU_INTERRUPT_HARD \
362 | CPU_INTERRUPT_TGT_EXT_0 \
363 | CPU_INTERRUPT_TGT_EXT_1 \
364 | CPU_INTERRUPT_TGT_EXT_2 \
365 | CPU_INTERRUPT_TGT_EXT_3 \
366 | CPU_INTERRUPT_TGT_EXT_4)
367
368 #ifdef CONFIG_USER_ONLY
369
370 /*
371 * Allow some level of source compatibility with softmmu. We do not
372 * support any of the more exotic features, so only invalid pages may
373 * be signaled by probe_access_flags().
374 */
375 #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
376 #define TLB_MMIO 0
377 #define TLB_WATCHPOINT 0
378
379 #else
380
381 /*
382 * Flags stored in the low bits of the TLB virtual address.
383 * These are defined so that fast path ram access is all zeros.
384 * The flags all must be between TARGET_PAGE_BITS and
385 * maximum address alignment bit.
386 *
387 * Use TARGET_PAGE_BITS_MIN so that these bits are constant
388 * when TARGET_PAGE_BITS_VARY is in effect.
389 */
390 /* Zero if TLB entry is valid. */
391 #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
392 /* Set if TLB entry references a clean RAM page. The iotlb entry will
393 contain the page physical address. */
394 #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
395 /* Set if TLB entry is an IO callback. */
396 #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
397 /* Set if TLB entry contains a watchpoint. */
398 #define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
399 /* Set if TLB entry requires byte swap. */
400 #define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
401 /* Set if TLB entry writes ignored. */
402 #define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
403
404 /* Use this mask to check interception with an alignment mask
405 * in a TCG backend.
406 */
407 #define TLB_FLAGS_MASK \
408 (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
409 | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
410
411 /**
412 * tlb_hit_page: return true if page aligned @addr is a hit against the
413 * TLB entry @tlb_addr
414 *
415 * @addr: virtual address to test (must be page aligned)
416 * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
417 */
418 static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
419 {
420 return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
421 }
422
423 /**
424 * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
425 *
426 * @addr: virtual address to test (need not be page aligned)
427 * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
428 */
429 static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
430 {
431 return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
432 }
433
434 #ifdef CONFIG_TCG
435 /* accel/tcg/cpu-exec.c */
436 void dump_drift_info(void);
437 /* accel/tcg/translate-all.c */
438 void dump_exec_info(void);
439 void dump_opcount_info(void);
440 #endif /* CONFIG_TCG */
441
442 #endif /* !CONFIG_USER_ONLY */
443
444 #ifdef CONFIG_TCG
445 /* accel/tcg/cpu-exec.c */
446 int cpu_exec(CPUState *cpu);
447 void tcg_exec_realizefn(CPUState *cpu, Error **errp);
448 void tcg_exec_unrealizefn(CPUState *cpu);
449 #endif /* CONFIG_TCG */
450
451 /* Returns: 0 on success, -1 on error */
452 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
453 void *ptr, target_ulong len, bool is_write);
454
455 /**
456 * cpu_set_cpustate_pointers(cpu)
457 * @cpu: The cpu object
458 *
459 * Set the generic pointers in CPUState into the outer object.
460 */
461 static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
462 {
463 cpu->parent_obj.env_ptr = &cpu->env;
464 cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
465 }
466
467 /**
468 * env_archcpu(env)
469 * @env: The architecture environment
470 *
471 * Return the ArchCPU associated with the environment.
472 */
473 static inline ArchCPU *env_archcpu(CPUArchState *env)
474 {
475 return container_of(env, ArchCPU, env);
476 }
477
478 /**
479 * env_cpu(env)
480 * @env: The architecture environment
481 *
482 * Return the CPUState associated with the environment.
483 */
484 static inline CPUState *env_cpu(CPUArchState *env)
485 {
486 return &env_archcpu(env)->parent_obj;
487 }
488
489 /**
490 * env_neg(env)
491 * @env: The architecture environment
492 *
493 * Return the CPUNegativeOffsetState associated with the environment.
494 */
495 static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
496 {
497 ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
498 return &arch_cpu->neg;
499 }
500
501 /**
502 * cpu_neg(cpu)
503 * @cpu: The generic CPUState
504 *
505 * Return the CPUNegativeOffsetState associated with the cpu.
506 */
507 static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
508 {
509 ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
510 return &arch_cpu->neg;
511 }
512
513 /**
514 * env_tlb(env)
515 * @env: The architecture environment
516 *
517 * Return the CPUTLB state associated with the environment.
518 */
519 static inline CPUTLB *env_tlb(CPUArchState *env)
520 {
521 return &env_neg(env)->tlb;
522 }
523
524 #endif /* CPU_ALL_H */