]>
Commit | Line | Data |
---|---|---|
1ad2134f | 1 | #ifndef CPU_COMMON_H |
175de524 | 2 | #define CPU_COMMON_H |
1ad2134f | 3 | |
07f35073 | 4 | /* CPU interfaces that are target independent. */ |
1ad2134f | 5 | |
ce927ed9 | 6 | #ifndef CONFIG_USER_ONLY |
022c62cb | 7 | #include "exec/hwaddr.h" |
ce927ed9 | 8 | #endif |
37b76cfd | 9 | |
0ac20318 | 10 | /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ |
267f685b PB |
11 | void qemu_init_cpu_list(void); |
12 | void cpu_list_lock(void); | |
13 | void cpu_list_unlock(void); | |
14 | ||
2cd53943 TH |
15 | void tcg_flush_softmmu_tlb(CPUState *cs); |
16 | ||
b3755a91 PB |
17 | #if !defined(CONFIG_USER_ONLY) |
18 | ||
dd310534 AG |
19 | enum device_endian { |
20 | DEVICE_NATIVE_ENDIAN, | |
21 | DEVICE_BIG_ENDIAN, | |
22 | DEVICE_LITTLE_ENDIAN, | |
23 | }; | |
24 | ||
c99a29e7 YX |
25 | #if defined(HOST_WORDS_BIGENDIAN) |
26 | #define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN | |
27 | #else | |
28 | #define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN | |
29 | #endif | |
30 | ||
1ad2134f | 31 | /* address in the RAM (different from a physical address) */ |
4be403c8 | 32 | #if defined(CONFIG_XEN_BACKEND) |
f15fbc4b AP |
33 | typedef uint64_t ram_addr_t; |
34 | # define RAM_ADDR_MAX UINT64_MAX | |
35 | # define RAM_ADDR_FMT "%" PRIx64 | |
36 | #else | |
53576999 SW |
37 | typedef uintptr_t ram_addr_t; |
38 | # define RAM_ADDR_MAX UINTPTR_MAX | |
39 | # define RAM_ADDR_FMT "%" PRIxPTR | |
f15fbc4b | 40 | #endif |
1ad2134f | 41 | |
96d0e26c WG |
42 | extern ram_addr_t ram_size; |
43 | ||
1ad2134f PB |
44 | /* memory API */ |
45 | ||
a8170e5e AK |
46 | typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value); |
47 | typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr); | |
1ad2134f | 48 | |
cd19cfa2 | 49 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); |
1ad2134f | 50 | /* This should not be used by devices. */ |
07bdaa41 | 51 | ram_addr_t qemu_ram_addr_from_host(void *ptr); |
e3dd7493 | 52 | RAMBlock *qemu_ram_block_by_name(const char *name); |
422148d3 | 53 | RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, |
f615f396 | 54 | ram_addr_t *offset); |
f90bb71b | 55 | ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host); |
fa53a0e5 GA |
56 | void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev); |
57 | void qemu_ram_unset_idstr(RAMBlock *block); | |
422148d3 | 58 | const char *qemu_ram_get_idstr(RAMBlock *rb); |
754cb9c0 YK |
59 | void *qemu_ram_get_host_addr(RAMBlock *rb); |
60 | ram_addr_t qemu_ram_get_offset(RAMBlock *rb); | |
61 | ram_addr_t qemu_ram_get_used_length(RAMBlock *rb); | |
463a4ac2 | 62 | bool qemu_ram_is_shared(RAMBlock *rb); |
2ce16640 DDAG |
63 | bool qemu_ram_is_uf_zeroable(RAMBlock *rb); |
64 | void qemu_ram_set_uf_zeroable(RAMBlock *rb); | |
b895de50 CLG |
65 | bool qemu_ram_is_migratable(RAMBlock *rb); |
66 | void qemu_ram_set_migratable(RAMBlock *rb); | |
67 | void qemu_ram_unset_migratable(RAMBlock *rb); | |
2ce16640 | 68 | |
863e9621 | 69 | size_t qemu_ram_pagesize(RAMBlock *block); |
67f11b5c | 70 | size_t qemu_ram_pagesize_largest(void); |
1ad2134f | 71 | |
a8170e5e | 72 | void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, |
0c249ff7 | 73 | hwaddr len, int is_write); |
a8170e5e | 74 | static inline void cpu_physical_memory_read(hwaddr addr, |
0c249ff7 | 75 | void *buf, hwaddr len) |
1ad2134f PB |
76 | { |
77 | cpu_physical_memory_rw(addr, buf, len, 0); | |
78 | } | |
a8170e5e | 79 | static inline void cpu_physical_memory_write(hwaddr addr, |
0c249ff7 | 80 | const void *buf, hwaddr len) |
1ad2134f | 81 | { |
3bad9814 | 82 | cpu_physical_memory_rw(addr, (void *)buf, len, 1); |
1ad2134f | 83 | } |
a8170e5e AK |
84 | void *cpu_physical_memory_map(hwaddr addr, |
85 | hwaddr *plen, | |
1ad2134f | 86 | int is_write); |
a8170e5e AK |
87 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
88 | int is_write, hwaddr access_len); | |
e95205e1 FZ |
89 | void cpu_register_map_client(QEMUBH *bh); |
90 | void cpu_unregister_map_client(QEMUBH *bh); | |
1ad2134f | 91 | |
a8170e5e | 92 | bool cpu_physical_memory_is_io(hwaddr phys_addr); |
76f35538 | 93 | |
6842a08e BS |
94 | /* Coalesced MMIO regions are areas where write operations can be reordered. |
95 | * This usually implies that write operations are side-effect free. This allows | |
96 | * batching which can make a major impact on performance when using | |
97 | * virtualization. | |
98 | */ | |
6842a08e BS |
99 | void qemu_flush_coalesced_mmio_buffer(void); |
100 | ||
0c249ff7 | 101 | void cpu_flush_icache_range(hwaddr start, hwaddr len); |
1ad2134f | 102 | |
754cb9c0 | 103 | typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque); |
bd2fa51f | 104 | |
e3807054 | 105 | int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque); |
d3a5038c | 106 | int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length); |
bd2fa51f | 107 | |
b3755a91 PB |
108 | #endif |
109 | ||
175de524 | 110 | #endif /* CPU_COMMON_H */ |