]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-common.h
memory, xen: pass MemoryRegion to xen_ram_alloc()
[mirror_qemu.git] / cpu-common.h
CommitLineData
1ad2134f
PB
1#ifndef CPU_COMMON_H
2#define CPU_COMMON_H 1
3
07f35073 4/* CPU interfaces that are target independent. */
1ad2134f 5
37b76cfd
PB
6#ifdef TARGET_PHYS_ADDR_BITS
7#include "targphys.h"
8#endif
9
10#ifndef NEED_CPU_H
11#include "poison.h"
12#endif
13
1ad2134f 14#include "bswap.h"
f6f3fbca 15#include "qemu-queue.h"
1ad2134f 16
b3755a91
PB
17#if !defined(CONFIG_USER_ONLY)
18
dd310534
AG
19enum device_endian {
20 DEVICE_NATIVE_ENDIAN,
21 DEVICE_BIG_ENDIAN,
22 DEVICE_LITTLE_ENDIAN,
23};
24
1ad2134f 25/* address in the RAM (different from a physical address) */
f15fbc4b
AP
26#if defined(CONFIG_XEN_BACKEND) && TARGET_PHYS_ADDR_BITS == 64
27typedef uint64_t ram_addr_t;
28# define RAM_ADDR_MAX UINT64_MAX
29# define RAM_ADDR_FMT "%" PRIx64
30#else
c227f099 31typedef unsigned long ram_addr_t;
f15fbc4b
AP
32# define RAM_ADDR_MAX ULONG_MAX
33# define RAM_ADDR_FMT "%lx"
34#endif
1ad2134f
PB
35
36/* memory API */
37
c227f099
AL
38typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
39typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
1ad2134f 40
0fd542fb
MT
41void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
42 ram_addr_t size,
43 ram_addr_t phys_offset,
44 ram_addr_t region_offset,
45 bool log_dirty);
46
47static inline void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
48 ram_addr_t size,
49 ram_addr_t phys_offset,
50 ram_addr_t region_offset)
51{
52 cpu_register_physical_memory_log(start_addr, size, phys_offset,
53 region_offset, false);
54}
55
c227f099
AL
56static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
57 ram_addr_t size,
58 ram_addr_t phys_offset)
1ad2134f
PB
59{
60 cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
61}
62
c227f099 63ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
fce537d4 64struct MemoryRegion;
84b89d78 65ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
fce537d4
AK
66 ram_addr_t size, void *host,
67 struct MemoryRegion *mr);
68ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size,
69 struct MemoryRegion *mr);
c227f099 70void qemu_ram_free(ram_addr_t addr);
1f2e98b6 71void qemu_ram_free_from_ptr(ram_addr_t addr);
cd19cfa2 72void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
1ad2134f 73/* This should only be used for ram local to a device. */
c227f099 74void *qemu_get_ram_ptr(ram_addr_t addr);
8ab934f9 75void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size);
b2e0a138
MT
76/* Same but slower, to use for migration, where the order of
77 * RAMBlocks must not change. */
78void *qemu_safe_ram_ptr(ram_addr_t addr);
050a0ddf 79void qemu_put_ram_ptr(void *addr);
1ad2134f 80/* This should not be used by devices. */
e890261f
MT
81int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
82ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
1ad2134f 83
d60efc6b
BS
84int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
85 CPUWriteMemoryFunc * const *mem_write,
dd310534 86 void *opaque, enum device_endian endian);
1ad2134f
PB
87void cpu_unregister_io_memory(int table_address);
88
c227f099 89void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1ad2134f 90 int len, int is_write);
c227f099 91static inline void cpu_physical_memory_read(target_phys_addr_t addr,
3bad9814 92 void *buf, int len)
1ad2134f
PB
93{
94 cpu_physical_memory_rw(addr, buf, len, 0);
95}
c227f099 96static inline void cpu_physical_memory_write(target_phys_addr_t addr,
3bad9814 97 const void *buf, int len)
1ad2134f 98{
3bad9814 99 cpu_physical_memory_rw(addr, (void *)buf, len, 1);
1ad2134f 100}
c227f099
AL
101void *cpu_physical_memory_map(target_phys_addr_t addr,
102 target_phys_addr_t *plen,
1ad2134f 103 int is_write);
c227f099
AL
104void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
105 int is_write, target_phys_addr_t access_len);
1ad2134f
PB
106void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
107void cpu_unregister_map_client(void *cookie);
108
f6f3fbca
MT
109struct CPUPhysMemoryClient;
110typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
111struct CPUPhysMemoryClient {
112 void (*set_memory)(struct CPUPhysMemoryClient *client,
113 target_phys_addr_t start_addr,
114 ram_addr_t size,
0fd542fb
MT
115 ram_addr_t phys_offset,
116 bool log_dirty);
f6f3fbca
MT
117 int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
118 target_phys_addr_t start_addr,
119 target_phys_addr_t end_addr);
120 int (*migration_log)(struct CPUPhysMemoryClient *client,
121 int enable);
e5896b12
AP
122 int (*log_start)(struct CPUPhysMemoryClient *client,
123 target_phys_addr_t phys_addr, ram_addr_t size);
124 int (*log_stop)(struct CPUPhysMemoryClient *client,
125 target_phys_addr_t phys_addr, ram_addr_t size);
f6f3fbca
MT
126 QLIST_ENTRY(CPUPhysMemoryClient) list;
127};
128
129void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
130void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
131
6842a08e
BS
132/* Coalesced MMIO regions are areas where write operations can be reordered.
133 * This usually implies that write operations are side-effect free. This allows
134 * batching which can make a major impact on performance when using
135 * virtualization.
136 */
137void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
138
139void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
140
141void qemu_flush_coalesced_mmio_buffer(void);
142
c227f099 143uint32_t ldub_phys(target_phys_addr_t addr);
1e78bcc1
AG
144uint32_t lduw_le_phys(target_phys_addr_t addr);
145uint32_t lduw_be_phys(target_phys_addr_t addr);
1e78bcc1
AG
146uint32_t ldl_le_phys(target_phys_addr_t addr);
147uint32_t ldl_be_phys(target_phys_addr_t addr);
1e78bcc1
AG
148uint64_t ldq_le_phys(target_phys_addr_t addr);
149uint64_t ldq_be_phys(target_phys_addr_t addr);
c227f099 150void stb_phys(target_phys_addr_t addr, uint32_t val);
1e78bcc1
AG
151void stw_le_phys(target_phys_addr_t addr, uint32_t val);
152void stw_be_phys(target_phys_addr_t addr, uint32_t val);
1e78bcc1
AG
153void stl_le_phys(target_phys_addr_t addr, uint32_t val);
154void stl_be_phys(target_phys_addr_t addr, uint32_t val);
1e78bcc1
AG
155void stq_le_phys(target_phys_addr_t addr, uint64_t val);
156void stq_be_phys(target_phys_addr_t addr, uint64_t val);
c227f099 157
21673cde
BS
158#ifdef NEED_CPU_H
159uint32_t lduw_phys(target_phys_addr_t addr);
160uint32_t ldl_phys(target_phys_addr_t addr);
161uint64_t ldq_phys(target_phys_addr_t addr);
162void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
163void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
164void stw_phys(target_phys_addr_t addr, uint32_t val);
165void stl_phys(target_phys_addr_t addr, uint32_t val);
166void stq_phys(target_phys_addr_t addr, uint64_t val);
167#endif
168
c227f099 169void cpu_physical_memory_write_rom(target_phys_addr_t addr,
1ad2134f
PB
170 const uint8_t *buf, int len);
171
172#define IO_MEM_SHIFT 3
173
174#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
175#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
176#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
177#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
56384e8b 178#define IO_MEM_SUBPAGE_RAM (4 << IO_MEM_SHIFT)
1ad2134f
PB
179
180/* Acts like a ROM when read and like a device when written. */
181#define IO_MEM_ROMD (1)
182#define IO_MEM_SUBPAGE (2)
1ad2134f 183
b3755a91
PB
184#endif
185
1ad2134f 186#endif /* !CPU_COMMON_H */