]>
Commit | Line | Data |
---|---|---|
5a9fdfec FB |
1 | /* |
2 | * defines common to all virtual CPUs | |
5fafdf24 | 3 | * |
5a9fdfec FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
d6ea4236 | 9 | * version 2.1 of the License, or (at your option) any later version. |
5a9fdfec FB |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
5a9fdfec FB |
18 | */ |
19 | #ifndef CPU_ALL_H | |
20 | #define CPU_ALL_H | |
21 | ||
022c62cb | 22 | #include "exec/cpu-common.h" |
1ab4c8ce | 23 | #include "exec/memory.h" |
24be3369 | 24 | #include "exec/tswap.h" |
b2a8658e | 25 | #include "qemu/thread.h" |
2e5b09fd | 26 | #include "hw/core/cpu.h" |
43771539 | 27 | #include "qemu/rcu.h" |
0ac4bd56 | 28 | |
48805df9 | 29 | #define EXCP_INTERRUPT 0x10000 /* async interruption */ |
9e0dc48c PC |
30 | #define EXCP_HLT 0x10001 /* hlt instruction reached */ |
31 | #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ | |
32 | #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ | |
33 | #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ | |
fdbc2b57 | 34 | #define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ |
9e0dc48c | 35 | |
5fafdf24 | 36 | /* some important defines: |
5fafdf24 | 37 | * |
e03b5686 | 38 | * HOST_BIG_ENDIAN : whether the host cpu is big endian and |
0ac4bd56 | 39 | * otherwise little endian. |
5fafdf24 | 40 | * |
ee3eb3a7 | 41 | * TARGET_BIG_ENDIAN : same for the target cpu |
0ac4bd56 FB |
42 | */ |
43 | ||
ee3eb3a7 | 44 | #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN |
f193c797 FB |
45 | #define BSWAP_NEEDED |
46 | #endif | |
47 | ||
f193c797 FB |
48 | #if TARGET_LONG_SIZE == 4 |
49 | #define tswapl(s) tswap32(s) | |
50 | #define tswapls(s) tswap32s((uint32_t *)(s)) | |
0a962c02 | 51 | #define bswaptls(s) bswap32s(s) |
f193c797 FB |
52 | #else |
53 | #define tswapl(s) tswap64(s) | |
54 | #define tswapls(s) tswap64s((uint64_t *)(s)) | |
0a962c02 | 55 | #define bswaptls(s) bswap64s(s) |
f193c797 FB |
56 | #endif |
57 | ||
db5fd8d7 PM |
58 | /* Target-endianness CPU memory access functions. These fit into the |
59 | * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h. | |
83d73968 | 60 | */ |
ee3eb3a7 | 61 | #if TARGET_BIG_ENDIAN |
2df3b95d FB |
62 | #define lduw_p(p) lduw_be_p(p) |
63 | #define ldsw_p(p) ldsw_be_p(p) | |
64 | #define ldl_p(p) ldl_be_p(p) | |
65 | #define ldq_p(p) ldq_be_p(p) | |
2df3b95d FB |
66 | #define stw_p(p, v) stw_be_p(p, v) |
67 | #define stl_p(p, v) stl_be_p(p, v) | |
68 | #define stq_p(p, v) stq_be_p(p, v) | |
afa4f665 PM |
69 | #define ldn_p(p, sz) ldn_be_p(p, sz) |
70 | #define stn_p(p, sz, v) stn_be_p(p, sz, v) | |
2df3b95d FB |
71 | #else |
72 | #define lduw_p(p) lduw_le_p(p) | |
73 | #define ldsw_p(p) ldsw_le_p(p) | |
74 | #define ldl_p(p) ldl_le_p(p) | |
75 | #define ldq_p(p) ldq_le_p(p) | |
2df3b95d FB |
76 | #define stw_p(p, v) stw_le_p(p, v) |
77 | #define stl_p(p, v) stl_le_p(p, v) | |
78 | #define stq_p(p, v) stq_le_p(p, v) | |
afa4f665 PM |
79 | #define ldn_p(p, sz) ldn_le_p(p, sz) |
80 | #define stn_p(p, sz, v) stn_le_p(p, sz, v) | |
5a9fdfec FB |
81 | #endif |
82 | ||
61382a50 FB |
83 | /* MMU memory access macros */ |
84 | ||
53a5960a | 85 | #if defined(CONFIG_USER_ONLY) |
022c62cb | 86 | #include "exec/user/abitypes.h" |
7d478306 | 87 | #include "exec/user/guest-base.h" |
0e62fd79 | 88 | |
e307c192 | 89 | extern bool have_guest_base; |
95059f9c RH |
90 | |
91 | /* | |
92 | * If non-zero, the guest virtual address space is a contiguous subset | |
93 | * of the host virtual address space, i.e. '-R reserved_va' is in effect | |
94 | * either from the command-line or by default. The value is the last | |
95 | * byte of the guest address space e.g. UINT32_MAX. | |
96 | * | |
97 | * If zero, the host and guest virtual address spaces are intermingled. | |
98 | */ | |
68a1c816 | 99 | extern unsigned long reserved_va; |
53a5960a | 100 | |
7d8cbbab RH |
101 | /* |
102 | * Limit the guest addresses as best we can. | |
103 | * | |
104 | * When not using -R reserved_va, we cannot really limit the guest | |
105 | * to less address space than the host. For 32-bit guests, this | |
106 | * acts as a sanity check that we're not giving the guest an address | |
107 | * that it cannot even represent. For 64-bit guests... the address | |
108 | * might not be what the real kernel would give, but it is at least | |
109 | * representable in the guest. | |
110 | * | |
111 | * TODO: Improve address allocation to avoid this problem, and to | |
112 | * avoid setting bits at the top of guest addresses that might need | |
113 | * to be used for tags. | |
114 | */ | |
f9919116 EB |
115 | #define GUEST_ADDR_MAX_ \ |
116 | ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \ | |
117 | UINT32_MAX : ~0ul) | |
95059f9c | 118 | #define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_) |
7d8cbbab | 119 | |
a7d6039c PB |
120 | #else |
121 | ||
122 | #include "exec/hwaddr.h" | |
4269c82b PB |
123 | |
124 | #define SUFFIX | |
125 | #define ARG1 as | |
126 | #define ARG1_DECL AddressSpace *as | |
127 | #define TARGET_ENDIANNESS | |
0979ed01 | 128 | #include "exec/memory_ldst.h.inc" |
4269c82b | 129 | |
48564041 | 130 | #define SUFFIX _cached_slow |
4269c82b PB |
131 | #define ARG1 cache |
132 | #define ARG1_DECL MemoryRegionCache *cache | |
133 | #define TARGET_ENDIANNESS | |
0979ed01 | 134 | #include "exec/memory_ldst.h.inc" |
4269c82b PB |
135 | |
136 | static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) | |
137 | { | |
138 | address_space_stl_notdirty(as, addr, val, | |
139 | MEMTXATTRS_UNSPECIFIED, NULL); | |
140 | } | |
141 | ||
142 | #define SUFFIX | |
143 | #define ARG1 as | |
144 | #define ARG1_DECL AddressSpace *as | |
145 | #define TARGET_ENDIANNESS | |
0979ed01 | 146 | #include "exec/memory_ldst_phys.h.inc" |
4269c82b | 147 | |
48564041 PB |
148 | /* Inline fast path for direct RAM access. */ |
149 | #define ENDIANNESS | |
0979ed01 | 150 | #include "exec/memory_ldst_cached.h.inc" |
48564041 | 151 | |
4269c82b PB |
152 | #define SUFFIX _cached |
153 | #define ARG1 cache | |
154 | #define ARG1_DECL MemoryRegionCache *cache | |
155 | #define TARGET_ENDIANNESS | |
0979ed01 | 156 | #include "exec/memory_ldst_phys.h.inc" |
53a5960a PB |
157 | #endif |
158 | ||
5a9fdfec FB |
159 | /* page related stuff */ |
160 | ||
20bccb82 | 161 | #ifdef TARGET_PAGE_BITS_VARY |
27eb9d65 | 162 | # include "exec/page-vary.h" |
bbc17caf | 163 | extern const TargetPageBits target_page; |
639044b5 | 164 | #ifdef CONFIG_DEBUG_TCG |
bbc17caf | 165 | #define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; }) |
27eb9d65 RH |
166 | #define TARGET_PAGE_MASK ({ assert(target_page.decided); \ |
167 | (target_long)target_page.mask; }) | |
20bccb82 | 168 | #else |
639044b5 | 169 | #define TARGET_PAGE_BITS target_page.bits |
27eb9d65 | 170 | #define TARGET_PAGE_MASK ((target_long)target_page.mask) |
639044b5 | 171 | #endif |
bb8e3ea6 | 172 | #define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK) |
639044b5 | 173 | #else |
20bccb82 | 174 | #define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS |
bb8e3ea6 RH |
175 | #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
176 | #define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS) | |
20bccb82 PM |
177 | #endif |
178 | ||
50276a79 | 179 | #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) |
5a9fdfec | 180 | |
5a9fdfec FB |
181 | /* same as PROT_xxx */ |
182 | #define PAGE_READ 0x0001 | |
183 | #define PAGE_WRITE 0x0002 | |
184 | #define PAGE_EXEC 0x0004 | |
185 | #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) | |
186 | #define PAGE_VALID 0x0008 | |
d9c58585 RH |
187 | /* |
188 | * Original state of the write flag (used when tracking self-modifying code) | |
189 | */ | |
5fafdf24 | 190 | #define PAGE_WRITE_ORG 0x0010 |
d9c58585 RH |
191 | /* |
192 | * Invalidate the TLB entry immediately, helpful for s390x | |
193 | * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() | |
194 | */ | |
195 | #define PAGE_WRITE_INV 0x0020 | |
196 | /* For use with page_set_flags: page is being replaced; target_data cleared. */ | |
197 | #define PAGE_RESET 0x0040 | |
26bab757 RH |
198 | /* For linux-user, indicates that the page is MAP_ANON. */ |
199 | #define PAGE_ANON 0x0080 | |
d9c58585 | 200 | |
2e9a5713 PB |
201 | #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) |
202 | /* FIXME: Code that sets/uses this is broken and needs to go away. */ | |
d9c58585 | 203 | #define PAGE_RESERVED 0x0100 |
2e9a5713 | 204 | #endif |
be5d6f48 | 205 | /* Target-specific bits that will be used via page_get_flags(). */ |
52c01ada RH |
206 | #define PAGE_TARGET_1 0x0200 |
207 | #define PAGE_TARGET_2 0x0400 | |
5a9fdfec | 208 | |
f93b7695 IL |
209 | /* |
210 | * For linux-user, indicates that the page is mapped with the same semantics | |
211 | * in both guest and host. | |
212 | */ | |
213 | #define PAGE_PASSTHROUGH 0x0800 | |
214 | ||
b480d9b7 | 215 | #if defined(CONFIG_USER_ONLY) |
5a9fdfec | 216 | void page_dump(FILE *f); |
5cd2c5b6 | 217 | |
1a1c4db9 MI |
218 | typedef int (*walk_memory_regions_fn)(void *, target_ulong, |
219 | target_ulong, unsigned long); | |
5cd2c5b6 RH |
220 | int walk_memory_regions(void *, walk_memory_regions_fn); |
221 | ||
53a5960a | 222 | int page_get_flags(target_ulong address); |
49840a4a | 223 | void page_set_flags(target_ulong start, target_ulong last, int flags); |
10310cbd | 224 | void page_reset_target_data(target_ulong start, target_ulong last); |
bef6f008 RH |
225 | |
226 | /** | |
227 | * page_check_range | |
228 | * @start: first byte of range | |
229 | * @len: length of range | |
230 | * @flags: flags required for each page | |
231 | * | |
232 | * Return true if every page in [@start, @start+@len) has @flags set. | |
233 | * Return false if any page is unmapped. Thus testing flags == 0 is | |
234 | * equivalent to testing for flags == PAGE_VALID. | |
235 | */ | |
236 | bool page_check_range(target_ulong start, target_ulong last, int flags); | |
d9c58585 | 237 | |
c2281ddc RH |
238 | /** |
239 | * page_check_range_empty: | |
240 | * @start: first byte of range | |
241 | * @last: last byte of range | |
242 | * Context: holding mmap lock | |
243 | * | |
244 | * Return true if the entire range [@start, @last] is unmapped. | |
245 | * The memory lock must be held so that the caller will can ensure | |
246 | * the result stays true until a new mapping can be installed. | |
247 | */ | |
248 | bool page_check_range_empty(target_ulong start, target_ulong last); | |
249 | ||
f2bb7cf2 RH |
250 | /** |
251 | * page_find_range_empty | |
252 | * @min: first byte of search range | |
253 | * @max: last byte of search range | |
254 | * @len: size of the hole required | |
255 | * @align: alignment of the hole required (power of 2) | |
256 | * | |
257 | * If there is a range [x, x+@len) within [@min, @max] such that | |
258 | * x % @align == 0, then return x. Otherwise return -1. | |
259 | * The memory lock must be held, as the caller will want to ensure | |
260 | * the returned range stays empty until a new mapping can be installed. | |
261 | */ | |
262 | target_ulong page_find_range_empty(target_ulong min, target_ulong max, | |
263 | target_ulong len, target_ulong align); | |
264 | ||
d9c58585 | 265 | /** |
8269c014 | 266 | * page_get_target_data(address) |
d9c58585 | 267 | * @address: guest virtual address |
d9c58585 | 268 | * |
8269c014 RH |
269 | * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate |
270 | * with the guest page at @address, allocating it if necessary. The | |
271 | * caller should already have verified that the address is valid. | |
d9c58585 RH |
272 | * |
273 | * The memory will be freed when the guest page is deallocated, | |
274 | * e.g. with the munmap system call. | |
275 | */ | |
8269c014 RH |
276 | void *page_get_target_data(target_ulong address) |
277 | __attribute__((returns_nonnull)); | |
b480d9b7 | 278 | #endif |
5a9fdfec | 279 | |
9349b4f9 | 280 | CPUArchState *cpu_copy(CPUArchState *env); |
c5be9f08 | 281 | |
9c76219e RH |
282 | /* Flags for use in ENV->INTERRUPT_PENDING. |
283 | ||
284 | The numbers assigned here are non-sequential in order to preserve | |
285 | binary compatibility with the vmstate dump. Bit 0 (0x0001) was | |
286 | previously used for CPU_INTERRUPT_EXIT, and is cleared when loading | |
287 | the vmstate dump. */ | |
288 | ||
289 | /* External hardware interrupt pending. This is typically used for | |
290 | interrupts from devices. */ | |
291 | #define CPU_INTERRUPT_HARD 0x0002 | |
292 | ||
293 | /* Exit the current TB. This is typically used when some system-level device | |
294 | makes some change to the memory mapping. E.g. the a20 line change. */ | |
295 | #define CPU_INTERRUPT_EXITTB 0x0004 | |
296 | ||
297 | /* Halt the CPU. */ | |
298 | #define CPU_INTERRUPT_HALT 0x0020 | |
299 | ||
300 | /* Debug event pending. */ | |
301 | #define CPU_INTERRUPT_DEBUG 0x0080 | |
302 | ||
4a92a558 PB |
303 | /* Reset signal. */ |
304 | #define CPU_INTERRUPT_RESET 0x0400 | |
305 | ||
9c76219e RH |
306 | /* Several target-specific external hardware interrupts. Each target/cpu.h |
307 | should define proper names based on these defines. */ | |
308 | #define CPU_INTERRUPT_TGT_EXT_0 0x0008 | |
309 | #define CPU_INTERRUPT_TGT_EXT_1 0x0010 | |
310 | #define CPU_INTERRUPT_TGT_EXT_2 0x0040 | |
311 | #define CPU_INTERRUPT_TGT_EXT_3 0x0200 | |
312 | #define CPU_INTERRUPT_TGT_EXT_4 0x1000 | |
313 | ||
314 | /* Several target-specific internal interrupts. These differ from the | |
07f35073 | 315 | preceding target-specific interrupts in that they are intended to |
9c76219e RH |
316 | originate from within the cpu itself, typically in response to some |
317 | instruction being executed. These, therefore, are not masked while | |
318 | single-stepping within the debugger. */ | |
319 | #define CPU_INTERRUPT_TGT_INT_0 0x0100 | |
4a92a558 PB |
320 | #define CPU_INTERRUPT_TGT_INT_1 0x0800 |
321 | #define CPU_INTERRUPT_TGT_INT_2 0x2000 | |
9c76219e | 322 | |
d362e757 | 323 | /* First unused bit: 0x4000. */ |
9c76219e | 324 | |
3125f763 RH |
325 | /* The set of all bits that should be masked when single-stepping. */ |
326 | #define CPU_INTERRUPT_SSTEP_MASK \ | |
327 | (CPU_INTERRUPT_HARD \ | |
328 | | CPU_INTERRUPT_TGT_EXT_0 \ | |
329 | | CPU_INTERRUPT_TGT_EXT_1 \ | |
330 | | CPU_INTERRUPT_TGT_EXT_2 \ | |
331 | | CPU_INTERRUPT_TGT_EXT_3 \ | |
332 | | CPU_INTERRUPT_TGT_EXT_4) | |
98699967 | 333 | |
069cfe77 RH |
334 | #ifdef CONFIG_USER_ONLY |
335 | ||
336 | /* | |
337 | * Allow some level of source compatibility with softmmu. We do not | |
338 | * support any of the more exotic features, so only invalid pages may | |
339 | * be signaled by probe_access_flags(). | |
340 | */ | |
341 | #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) | |
6d03226b | 342 | #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2)) |
069cfe77 RH |
343 | #define TLB_WATCHPOINT 0 |
344 | ||
345 | #else | |
b3755a91 | 346 | |
1f6f2b34 RH |
347 | /* |
348 | * Flags stored in the low bits of the TLB virtual address. | |
349 | * These are defined so that fast path ram access is all zeros. | |
1f00b27f SS |
350 | * The flags all must be between TARGET_PAGE_BITS and |
351 | * maximum address alignment bit. | |
1f6f2b34 RH |
352 | * |
353 | * Use TARGET_PAGE_BITS_MIN so that these bits are constant | |
354 | * when TARGET_PAGE_BITS_VARY is in effect. | |
3a80bde3 RH |
355 | * |
356 | * The count, if not the placement of these bits is known | |
357 | * to tcg/tcg-op-ldst.c, check_max_alignment(). | |
1f00b27f | 358 | */ |
0f459d16 | 359 | /* Zero if TLB entry is valid. */ |
1f6f2b34 | 360 | #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) |
0f459d16 PB |
361 | /* Set if TLB entry references a clean RAM page. The iotlb entry will |
362 | contain the page physical address. */ | |
1f6f2b34 | 363 | #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2)) |
0f459d16 | 364 | /* Set if TLB entry is an IO callback. */ |
1f6f2b34 | 365 | #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3)) |
a0eaae08 RH |
366 | /* Set if TLB entry writes ignored. */ |
367 | #define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4)) | |
58e8f1f6 RH |
368 | /* Set if the slow path must be used; more flags in CPUTLBEntryFull. */ |
369 | #define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5)) | |
1f00b27f | 370 | |
58e8f1f6 RH |
371 | /* |
372 | * Use this mask to check interception with an alignment mask | |
1f00b27f SS |
373 | * in a TCG backend. |
374 | */ | |
50b107c5 | 375 | #define TLB_FLAGS_MASK \ |
7b0d792c | 376 | (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ |
187ba694 | 377 | | TLB_FORCE_SLOW | TLB_DISCARD_WRITE) |
58e8f1f6 RH |
378 | |
379 | /* | |
380 | * Flags stored in CPUTLBEntryFull.slow_flags[x]. | |
381 | * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x]. | |
382 | */ | |
383 | /* Set if TLB entry requires byte swap. */ | |
384 | #define TLB_BSWAP (1 << 0) | |
187ba694 RH |
385 | /* Set if TLB entry contains a watchpoint. */ |
386 | #define TLB_WATCHPOINT (1 << 1) | |
58e8f1f6 | 387 | |
187ba694 | 388 | #define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT) |
58e8f1f6 RH |
389 | |
390 | /* The two sets of flags must not overlap. */ | |
391 | QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); | |
0f459d16 | 392 | |
334692bc PM |
393 | /** |
394 | * tlb_hit_page: return true if page aligned @addr is a hit against the | |
395 | * TLB entry @tlb_addr | |
396 | * | |
397 | * @addr: virtual address to test (must be page aligned) | |
398 | * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) | |
399 | */ | |
c78edb56 | 400 | static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr) |
334692bc PM |
401 | { |
402 | return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); | |
403 | } | |
404 | ||
405 | /** | |
406 | * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr | |
407 | * | |
408 | * @addr: virtual address to test (need not be page aligned) | |
409 | * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) | |
410 | */ | |
c78edb56 | 411 | static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) |
334692bc PM |
412 | { |
413 | return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); | |
414 | } | |
415 | ||
740b1759 | 416 | #ifdef CONFIG_TCG |
7df5e3d6 | 417 | /* accel/tcg/translate-all.c */ |
3a841ab5 | 418 | void dump_exec_info(GString *buf); |
740b1759 CF |
419 | #endif /* CONFIG_TCG */ |
420 | ||
b3755a91 PB |
421 | #endif /* !CONFIG_USER_ONLY */ |
422 | ||
7df5e3d6 CF |
423 | /* accel/tcg/cpu-exec.c */ |
424 | int cpu_exec(CPUState *cpu); | |
7df5e3d6 | 425 | |
7506ed90 RH |
426 | /** |
427 | * cpu_set_cpustate_pointers(cpu) | |
428 | * @cpu: The cpu object | |
429 | * | |
430 | * Set the generic pointers in CPUState into the outer object. | |
431 | */ | |
432 | static inline void cpu_set_cpustate_pointers(ArchCPU *cpu) | |
433 | { | |
7506ed90 RH |
434 | } |
435 | ||
3b3d7df5 RH |
436 | /* Validate correct placement of CPUArchState. */ |
437 | QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0); | |
438 | QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState)); | |
439 | ||
083dc73d RH |
440 | /** |
441 | * env_archcpu(env) | |
442 | * @env: The architecture environment | |
443 | * | |
444 | * Return the ArchCPU associated with the environment. | |
445 | */ | |
446 | static inline ArchCPU *env_archcpu(CPUArchState *env) | |
447 | { | |
3b3d7df5 | 448 | return (void *)env - sizeof(CPUState); |
083dc73d RH |
449 | } |
450 | ||
29a0af61 RH |
451 | /** |
452 | * env_cpu(env) | |
453 | * @env: The architecture environment | |
454 | * | |
455 | * Return the CPUState associated with the environment. | |
456 | */ | |
457 | static inline CPUState *env_cpu(CPUArchState *env) | |
458 | { | |
3b3d7df5 | 459 | return (void *)env - sizeof(CPUState); |
29a0af61 RH |
460 | } |
461 | ||
5b146dc7 RH |
462 | /** |
463 | * env_neg(env) | |
464 | * @env: The architecture environment | |
465 | * | |
466 | * Return the CPUNegativeOffsetState associated with the environment. | |
467 | */ | |
468 | static inline CPUNegativeOffsetState *env_neg(CPUArchState *env) | |
469 | { | |
3b3d7df5 | 470 | return &env_cpu(env)->neg; |
5b146dc7 RH |
471 | } |
472 | ||
269bd5d8 RH |
473 | /** |
474 | * env_tlb(env) | |
475 | * @env: The architecture environment | |
476 | * | |
477 | * Return the CPUTLB state associated with the environment. | |
478 | */ | |
479 | static inline CPUTLB *env_tlb(CPUArchState *env) | |
480 | { | |
481 | return &env_neg(env)->tlb; | |
482 | } | |
483 | ||
5a9fdfec | 484 | #endif /* CPU_ALL_H */ |