]>
Commit | Line | Data |
---|---|---|
54936004 | 1 | /* |
5b6dd868 | 2 | * Virtual page mapping |
5fafdf24 | 3 | * |
54936004 FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
54936004 | 18 | */ |
67b915a5 | 19 | #include "config.h" |
d5a8f07c FB |
20 | #ifdef _WIN32 |
21 | #include <windows.h> | |
22 | #else | |
a98d49b1 | 23 | #include <sys/types.h> |
d5a8f07c FB |
24 | #include <sys/mman.h> |
25 | #endif | |
54936004 | 26 | |
055403b2 | 27 | #include "qemu-common.h" |
6180a181 | 28 | #include "cpu.h" |
b67d9a52 | 29 | #include "tcg.h" |
b3c7724c | 30 | #include "hw/hw.h" |
cc9e98cb | 31 | #include "hw/qdev.h" |
1de7afc9 | 32 | #include "qemu/osdep.h" |
9c17d615 | 33 | #include "sysemu/kvm.h" |
0d09e41a | 34 | #include "hw/xen/xen.h" |
1de7afc9 PB |
35 | #include "qemu/timer.h" |
36 | #include "qemu/config-file.h" | |
022c62cb | 37 | #include "exec/memory.h" |
9c17d615 | 38 | #include "sysemu/dma.h" |
022c62cb | 39 | #include "exec/address-spaces.h" |
53a5960a PB |
40 | #if defined(CONFIG_USER_ONLY) |
41 | #include <qemu.h> | |
432d268c | 42 | #else /* !CONFIG_USER_ONLY */ |
9c17d615 | 43 | #include "sysemu/xen-mapcache.h" |
6506e4f9 | 44 | #include "trace.h" |
53a5960a | 45 | #endif |
0d6d3c87 | 46 | #include "exec/cpu-all.h" |
54936004 | 47 | |
022c62cb | 48 | #include "exec/cputlb.h" |
5b6dd868 | 49 | #include "translate-all.h" |
0cac1b66 | 50 | |
022c62cb | 51 | #include "exec/memory-internal.h" |
67d95c15 | 52 | |
db7b5426 | 53 | //#define DEBUG_SUBPAGE |
1196be37 | 54 | |
e2eef170 | 55 | #if !defined(CONFIG_USER_ONLY) |
9fa3e853 | 56 | int phys_ram_fd; |
74576198 | 57 | static int in_migration; |
94a6b54f | 58 | |
a3161038 | 59 | RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; |
62152b8a AK |
60 | |
61 | static MemoryRegion *system_memory; | |
309cb471 | 62 | static MemoryRegion *system_io; |
62152b8a | 63 | |
f6790af6 AK |
64 | AddressSpace address_space_io; |
65 | AddressSpace address_space_memory; | |
9e11908f | 66 | DMAContext dma_context_memory; |
2673a5da | 67 | |
0844e007 PB |
68 | MemoryRegion io_mem_rom, io_mem_notdirty; |
69 | static MemoryRegion io_mem_unassigned, io_mem_subpage_ram; | |
0e0df1e2 | 70 | |
e2eef170 | 71 | #endif |
9fa3e853 | 72 | |
9349b4f9 | 73 | CPUArchState *first_cpu; |
6a00d601 FB |
74 | /* current CPU in the current thread. It is only valid inside |
75 | cpu_exec() */ | |
9349b4f9 | 76 | DEFINE_TLS(CPUArchState *,cpu_single_env); |
2e70f6ef | 77 | /* 0 = Do not count executed instructions. |
bf20dc07 | 78 | 1 = Precise instruction counting. |
2e70f6ef | 79 | 2 = Adaptive rate instruction counting. */ |
5708fc66 | 80 | int use_icount; |
6a00d601 | 81 | |
e2eef170 | 82 | #if !defined(CONFIG_USER_ONLY) |
4346ae3e | 83 | |
1db8abb1 PB |
84 | typedef struct PhysPageEntry PhysPageEntry; |
85 | ||
86 | struct PhysPageEntry { | |
87 | uint16_t is_leaf : 1; | |
88 | /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */ | |
89 | uint16_t ptr : 15; | |
90 | }; | |
91 | ||
92 | struct AddressSpaceDispatch { | |
93 | /* This is a multi-level map on the physical address space. | |
94 | * The bottom level has pointers to MemoryRegionSections. | |
95 | */ | |
96 | PhysPageEntry phys_map; | |
97 | MemoryListener listener; | |
98 | }; | |
99 | ||
90260c6c JK |
100 | #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
101 | typedef struct subpage_t { | |
102 | MemoryRegion iomem; | |
103 | hwaddr base; | |
104 | uint16_t sub_section[TARGET_PAGE_SIZE]; | |
105 | } subpage_t; | |
106 | ||
5312bd8b AK |
107 | static MemoryRegionSection *phys_sections; |
108 | static unsigned phys_sections_nb, phys_sections_nb_alloc; | |
109 | static uint16_t phys_section_unassigned; | |
aa102231 AK |
110 | static uint16_t phys_section_notdirty; |
111 | static uint16_t phys_section_rom; | |
112 | static uint16_t phys_section_watch; | |
5312bd8b | 113 | |
d6f2ea22 AK |
114 | /* Simple allocator for PhysPageEntry nodes */ |
115 | static PhysPageEntry (*phys_map_nodes)[L2_SIZE]; | |
116 | static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc; | |
117 | ||
07f07b31 | 118 | #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1) |
d6f2ea22 | 119 | |
e2eef170 | 120 | static void io_mem_init(void); |
62152b8a | 121 | static void memory_map_init(void); |
8b9c99d9 | 122 | static void *qemu_safe_ram_ptr(ram_addr_t addr); |
e2eef170 | 123 | |
1ec9b909 | 124 | static MemoryRegion io_mem_watch; |
6658ffb8 | 125 | #endif |
fd6ce8f6 | 126 | |
6d9a1304 | 127 | #if !defined(CONFIG_USER_ONLY) |
d6f2ea22 | 128 | |
f7bf5461 | 129 | static void phys_map_node_reserve(unsigned nodes) |
d6f2ea22 | 130 | { |
f7bf5461 | 131 | if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) { |
d6f2ea22 AK |
132 | typedef PhysPageEntry Node[L2_SIZE]; |
133 | phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16); | |
f7bf5461 AK |
134 | phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc, |
135 | phys_map_nodes_nb + nodes); | |
d6f2ea22 AK |
136 | phys_map_nodes = g_renew(Node, phys_map_nodes, |
137 | phys_map_nodes_nb_alloc); | |
138 | } | |
f7bf5461 AK |
139 | } |
140 | ||
141 | static uint16_t phys_map_node_alloc(void) | |
142 | { | |
143 | unsigned i; | |
144 | uint16_t ret; | |
145 | ||
146 | ret = phys_map_nodes_nb++; | |
147 | assert(ret != PHYS_MAP_NODE_NIL); | |
148 | assert(ret != phys_map_nodes_nb_alloc); | |
d6f2ea22 | 149 | for (i = 0; i < L2_SIZE; ++i) { |
07f07b31 | 150 | phys_map_nodes[ret][i].is_leaf = 0; |
c19e8800 | 151 | phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; |
d6f2ea22 | 152 | } |
f7bf5461 | 153 | return ret; |
d6f2ea22 AK |
154 | } |
155 | ||
156 | static void phys_map_nodes_reset(void) | |
157 | { | |
158 | phys_map_nodes_nb = 0; | |
159 | } | |
160 | ||
92e873b9 | 161 | |
a8170e5e AK |
162 | static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index, |
163 | hwaddr *nb, uint16_t leaf, | |
2999097b | 164 | int level) |
f7bf5461 AK |
165 | { |
166 | PhysPageEntry *p; | |
167 | int i; | |
a8170e5e | 168 | hwaddr step = (hwaddr)1 << (level * L2_BITS); |
108c49b8 | 169 | |
07f07b31 | 170 | if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) { |
c19e8800 AK |
171 | lp->ptr = phys_map_node_alloc(); |
172 | p = phys_map_nodes[lp->ptr]; | |
f7bf5461 AK |
173 | if (level == 0) { |
174 | for (i = 0; i < L2_SIZE; i++) { | |
07f07b31 | 175 | p[i].is_leaf = 1; |
c19e8800 | 176 | p[i].ptr = phys_section_unassigned; |
4346ae3e | 177 | } |
67c4d23c | 178 | } |
f7bf5461 | 179 | } else { |
c19e8800 | 180 | p = phys_map_nodes[lp->ptr]; |
92e873b9 | 181 | } |
2999097b | 182 | lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)]; |
f7bf5461 | 183 | |
2999097b | 184 | while (*nb && lp < &p[L2_SIZE]) { |
07f07b31 AK |
185 | if ((*index & (step - 1)) == 0 && *nb >= step) { |
186 | lp->is_leaf = true; | |
c19e8800 | 187 | lp->ptr = leaf; |
07f07b31 AK |
188 | *index += step; |
189 | *nb -= step; | |
2999097b AK |
190 | } else { |
191 | phys_page_set_level(lp, index, nb, leaf, level - 1); | |
192 | } | |
193 | ++lp; | |
f7bf5461 AK |
194 | } |
195 | } | |
196 | ||
ac1970fb | 197 | static void phys_page_set(AddressSpaceDispatch *d, |
a8170e5e | 198 | hwaddr index, hwaddr nb, |
2999097b | 199 | uint16_t leaf) |
f7bf5461 | 200 | { |
2999097b | 201 | /* Wildly overreserve - it doesn't matter much. */ |
07f07b31 | 202 | phys_map_node_reserve(3 * P_L2_LEVELS); |
5cd2c5b6 | 203 | |
ac1970fb | 204 | phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); |
92e873b9 FB |
205 | } |
206 | ||
149f54b5 | 207 | static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index) |
92e873b9 | 208 | { |
ac1970fb | 209 | PhysPageEntry lp = d->phys_map; |
31ab2b4a AK |
210 | PhysPageEntry *p; |
211 | int i; | |
f1f6e3b8 | 212 | |
07f07b31 | 213 | for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) { |
c19e8800 | 214 | if (lp.ptr == PHYS_MAP_NODE_NIL) { |
fd298934 | 215 | return &phys_sections[phys_section_unassigned]; |
31ab2b4a | 216 | } |
c19e8800 | 217 | p = phys_map_nodes[lp.ptr]; |
31ab2b4a | 218 | lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)]; |
5312bd8b | 219 | } |
fd298934 | 220 | return &phys_sections[lp.ptr]; |
f3705d53 AK |
221 | } |
222 | ||
e5548617 BS |
223 | bool memory_region_is_unassigned(MemoryRegion *mr) |
224 | { | |
2a8e7499 | 225 | return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device |
5b6dd868 | 226 | && mr != &io_mem_watch; |
fd6ce8f6 | 227 | } |
149f54b5 | 228 | |
9f029603 | 229 | static MemoryRegionSection *address_space_lookup_region(AddressSpace *as, |
90260c6c JK |
230 | hwaddr addr, |
231 | bool resolve_subpage) | |
9f029603 | 232 | { |
90260c6c JK |
233 | MemoryRegionSection *section; |
234 | subpage_t *subpage; | |
235 | ||
236 | section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS); | |
237 | if (resolve_subpage && section->mr->subpage) { | |
238 | subpage = container_of(section->mr, subpage_t, iomem); | |
239 | section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; | |
240 | } | |
241 | return section; | |
9f029603 JK |
242 | } |
243 | ||
90260c6c JK |
244 | static MemoryRegionSection * |
245 | address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat, | |
246 | hwaddr *plen, bool resolve_subpage) | |
149f54b5 PB |
247 | { |
248 | MemoryRegionSection *section; | |
249 | Int128 diff; | |
250 | ||
90260c6c | 251 | section = address_space_lookup_region(as, addr, resolve_subpage); |
149f54b5 PB |
252 | /* Compute offset within MemoryRegionSection */ |
253 | addr -= section->offset_within_address_space; | |
254 | ||
255 | /* Compute offset within MemoryRegion */ | |
256 | *xlat = addr + section->offset_within_region; | |
257 | ||
258 | diff = int128_sub(section->mr->size, int128_make64(addr)); | |
3752a036 | 259 | *plen = int128_get64(int128_min(diff, int128_make64(*plen))); |
149f54b5 PB |
260 | return section; |
261 | } | |
90260c6c JK |
262 | |
263 | MemoryRegionSection *address_space_translate(AddressSpace *as, hwaddr addr, | |
264 | hwaddr *xlat, hwaddr *plen, | |
265 | bool is_write) | |
266 | { | |
267 | return address_space_translate_internal(as, addr, xlat, plen, true); | |
268 | } | |
269 | ||
270 | MemoryRegionSection * | |
271 | address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat, | |
272 | hwaddr *plen) | |
273 | { | |
274 | return address_space_translate_internal(as, addr, xlat, plen, false); | |
275 | } | |
5b6dd868 | 276 | #endif |
fd6ce8f6 | 277 | |
5b6dd868 | 278 | void cpu_exec_init_all(void) |
fdbb84d1 | 279 | { |
5b6dd868 | 280 | #if !defined(CONFIG_USER_ONLY) |
b2a8658e | 281 | qemu_mutex_init(&ram_list.mutex); |
5b6dd868 BS |
282 | memory_map_init(); |
283 | io_mem_init(); | |
fdbb84d1 | 284 | #endif |
5b6dd868 | 285 | } |
fdbb84d1 | 286 | |
b170fce3 | 287 | #if !defined(CONFIG_USER_ONLY) |
5b6dd868 BS |
288 | |
289 | static int cpu_common_post_load(void *opaque, int version_id) | |
fd6ce8f6 | 290 | { |
259186a7 | 291 | CPUState *cpu = opaque; |
a513fe19 | 292 | |
5b6dd868 BS |
293 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the |
294 | version_id is increased. */ | |
259186a7 AF |
295 | cpu->interrupt_request &= ~0x01; |
296 | tlb_flush(cpu->env_ptr, 1); | |
5b6dd868 BS |
297 | |
298 | return 0; | |
a513fe19 | 299 | } |
7501267e | 300 | |
5b6dd868 BS |
301 | static const VMStateDescription vmstate_cpu_common = { |
302 | .name = "cpu_common", | |
303 | .version_id = 1, | |
304 | .minimum_version_id = 1, | |
305 | .minimum_version_id_old = 1, | |
306 | .post_load = cpu_common_post_load, | |
307 | .fields = (VMStateField []) { | |
259186a7 AF |
308 | VMSTATE_UINT32(halted, CPUState), |
309 | VMSTATE_UINT32(interrupt_request, CPUState), | |
5b6dd868 BS |
310 | VMSTATE_END_OF_LIST() |
311 | } | |
312 | }; | |
b170fce3 AF |
313 | #else |
314 | #define vmstate_cpu_common vmstate_dummy | |
5b6dd868 | 315 | #endif |
ea041c0e | 316 | |
38d8f5c8 | 317 | CPUState *qemu_get_cpu(int index) |
ea041c0e | 318 | { |
5b6dd868 | 319 | CPUArchState *env = first_cpu; |
38d8f5c8 | 320 | CPUState *cpu = NULL; |
ea041c0e | 321 | |
5b6dd868 | 322 | while (env) { |
55e5c285 AF |
323 | cpu = ENV_GET_CPU(env); |
324 | if (cpu->cpu_index == index) { | |
5b6dd868 | 325 | break; |
55e5c285 | 326 | } |
5b6dd868 | 327 | env = env->next_cpu; |
ea041c0e | 328 | } |
5b6dd868 | 329 | |
d76fddae | 330 | return env ? cpu : NULL; |
ea041c0e FB |
331 | } |
332 | ||
d6b9e0d6 MT |
333 | void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data) |
334 | { | |
335 | CPUArchState *env = first_cpu; | |
336 | ||
337 | while (env) { | |
338 | func(ENV_GET_CPU(env), data); | |
339 | env = env->next_cpu; | |
340 | } | |
341 | } | |
342 | ||
5b6dd868 | 343 | void cpu_exec_init(CPUArchState *env) |
ea041c0e | 344 | { |
5b6dd868 | 345 | CPUState *cpu = ENV_GET_CPU(env); |
b170fce3 | 346 | CPUClass *cc = CPU_GET_CLASS(cpu); |
5b6dd868 BS |
347 | CPUArchState **penv; |
348 | int cpu_index; | |
349 | ||
350 | #if defined(CONFIG_USER_ONLY) | |
351 | cpu_list_lock(); | |
352 | #endif | |
353 | env->next_cpu = NULL; | |
354 | penv = &first_cpu; | |
355 | cpu_index = 0; | |
356 | while (*penv != NULL) { | |
357 | penv = &(*penv)->next_cpu; | |
358 | cpu_index++; | |
359 | } | |
55e5c285 | 360 | cpu->cpu_index = cpu_index; |
1b1ed8dc | 361 | cpu->numa_node = 0; |
5b6dd868 BS |
362 | QTAILQ_INIT(&env->breakpoints); |
363 | QTAILQ_INIT(&env->watchpoints); | |
364 | #ifndef CONFIG_USER_ONLY | |
365 | cpu->thread_id = qemu_get_thread_id(); | |
366 | #endif | |
367 | *penv = env; | |
368 | #if defined(CONFIG_USER_ONLY) | |
369 | cpu_list_unlock(); | |
370 | #endif | |
259186a7 | 371 | vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu); |
5b6dd868 | 372 | #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) |
5b6dd868 BS |
373 | register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, |
374 | cpu_save, cpu_load, env); | |
b170fce3 | 375 | assert(cc->vmsd == NULL); |
5b6dd868 | 376 | #endif |
b170fce3 AF |
377 | if (cc->vmsd != NULL) { |
378 | vmstate_register(NULL, cpu_index, cc->vmsd, cpu); | |
379 | } | |
ea041c0e FB |
380 | } |
381 | ||
1fddef4b | 382 | #if defined(TARGET_HAS_ICE) |
94df27fd | 383 | #if defined(CONFIG_USER_ONLY) |
9349b4f9 | 384 | static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) |
94df27fd PB |
385 | { |
386 | tb_invalidate_phys_page_range(pc, pc + 1, 0); | |
387 | } | |
388 | #else | |
1e7855a5 MF |
389 | static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) |
390 | { | |
9d70c4b7 MF |
391 | tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) | |
392 | (pc & ~TARGET_PAGE_MASK)); | |
1e7855a5 | 393 | } |
c27004ec | 394 | #endif |
94df27fd | 395 | #endif /* TARGET_HAS_ICE */ |
d720b93d | 396 | |
c527ee8f | 397 | #if defined(CONFIG_USER_ONLY) |
9349b4f9 | 398 | void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
c527ee8f PB |
399 | |
400 | { | |
401 | } | |
402 | ||
9349b4f9 | 403 | int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, |
c527ee8f PB |
404 | int flags, CPUWatchpoint **watchpoint) |
405 | { | |
406 | return -ENOSYS; | |
407 | } | |
408 | #else | |
6658ffb8 | 409 | /* Add a watchpoint. */ |
9349b4f9 | 410 | int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, |
a1d1bb31 | 411 | int flags, CPUWatchpoint **watchpoint) |
6658ffb8 | 412 | { |
b4051334 | 413 | target_ulong len_mask = ~(len - 1); |
c0ce998e | 414 | CPUWatchpoint *wp; |
6658ffb8 | 415 | |
b4051334 | 416 | /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ |
0dc23828 MF |
417 | if ((len & (len - 1)) || (addr & ~len_mask) || |
418 | len == 0 || len > TARGET_PAGE_SIZE) { | |
b4051334 AL |
419 | fprintf(stderr, "qemu: tried to set invalid watchpoint at " |
420 | TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); | |
421 | return -EINVAL; | |
422 | } | |
7267c094 | 423 | wp = g_malloc(sizeof(*wp)); |
a1d1bb31 AL |
424 | |
425 | wp->vaddr = addr; | |
b4051334 | 426 | wp->len_mask = len_mask; |
a1d1bb31 AL |
427 | wp->flags = flags; |
428 | ||
2dc9f411 | 429 | /* keep all GDB-injected watchpoints in front */ |
c0ce998e | 430 | if (flags & BP_GDB) |
72cf2d4f | 431 | QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); |
c0ce998e | 432 | else |
72cf2d4f | 433 | QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); |
6658ffb8 | 434 | |
6658ffb8 | 435 | tlb_flush_page(env, addr); |
a1d1bb31 AL |
436 | |
437 | if (watchpoint) | |
438 | *watchpoint = wp; | |
439 | return 0; | |
6658ffb8 PB |
440 | } |
441 | ||
a1d1bb31 | 442 | /* Remove a specific watchpoint. */ |
9349b4f9 | 443 | int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len, |
a1d1bb31 | 444 | int flags) |
6658ffb8 | 445 | { |
b4051334 | 446 | target_ulong len_mask = ~(len - 1); |
a1d1bb31 | 447 | CPUWatchpoint *wp; |
6658ffb8 | 448 | |
72cf2d4f | 449 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
b4051334 | 450 | if (addr == wp->vaddr && len_mask == wp->len_mask |
6e140f28 | 451 | && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
a1d1bb31 | 452 | cpu_watchpoint_remove_by_ref(env, wp); |
6658ffb8 PB |
453 | return 0; |
454 | } | |
455 | } | |
a1d1bb31 | 456 | return -ENOENT; |
6658ffb8 PB |
457 | } |
458 | ||
a1d1bb31 | 459 | /* Remove a specific watchpoint by reference. */ |
9349b4f9 | 460 | void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint) |
a1d1bb31 | 461 | { |
72cf2d4f | 462 | QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); |
7d03f82f | 463 | |
a1d1bb31 AL |
464 | tlb_flush_page(env, watchpoint->vaddr); |
465 | ||
7267c094 | 466 | g_free(watchpoint); |
a1d1bb31 AL |
467 | } |
468 | ||
469 | /* Remove all matching watchpoints. */ | |
9349b4f9 | 470 | void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
a1d1bb31 | 471 | { |
c0ce998e | 472 | CPUWatchpoint *wp, *next; |
a1d1bb31 | 473 | |
72cf2d4f | 474 | QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { |
a1d1bb31 AL |
475 | if (wp->flags & mask) |
476 | cpu_watchpoint_remove_by_ref(env, wp); | |
c0ce998e | 477 | } |
7d03f82f | 478 | } |
c527ee8f | 479 | #endif |
7d03f82f | 480 | |
a1d1bb31 | 481 | /* Add a breakpoint. */ |
9349b4f9 | 482 | int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, |
a1d1bb31 | 483 | CPUBreakpoint **breakpoint) |
4c3a88a2 | 484 | { |
1fddef4b | 485 | #if defined(TARGET_HAS_ICE) |
c0ce998e | 486 | CPUBreakpoint *bp; |
3b46e624 | 487 | |
7267c094 | 488 | bp = g_malloc(sizeof(*bp)); |
4c3a88a2 | 489 | |
a1d1bb31 AL |
490 | bp->pc = pc; |
491 | bp->flags = flags; | |
492 | ||
2dc9f411 | 493 | /* keep all GDB-injected breakpoints in front */ |
c0ce998e | 494 | if (flags & BP_GDB) |
72cf2d4f | 495 | QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); |
c0ce998e | 496 | else |
72cf2d4f | 497 | QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); |
3b46e624 | 498 | |
d720b93d | 499 | breakpoint_invalidate(env, pc); |
a1d1bb31 AL |
500 | |
501 | if (breakpoint) | |
502 | *breakpoint = bp; | |
4c3a88a2 FB |
503 | return 0; |
504 | #else | |
a1d1bb31 | 505 | return -ENOSYS; |
4c3a88a2 FB |
506 | #endif |
507 | } | |
508 | ||
a1d1bb31 | 509 | /* Remove a specific breakpoint. */ |
9349b4f9 | 510 | int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags) |
a1d1bb31 | 511 | { |
7d03f82f | 512 | #if defined(TARGET_HAS_ICE) |
a1d1bb31 AL |
513 | CPUBreakpoint *bp; |
514 | ||
72cf2d4f | 515 | QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
a1d1bb31 AL |
516 | if (bp->pc == pc && bp->flags == flags) { |
517 | cpu_breakpoint_remove_by_ref(env, bp); | |
518 | return 0; | |
519 | } | |
7d03f82f | 520 | } |
a1d1bb31 AL |
521 | return -ENOENT; |
522 | #else | |
523 | return -ENOSYS; | |
7d03f82f EI |
524 | #endif |
525 | } | |
526 | ||
a1d1bb31 | 527 | /* Remove a specific breakpoint by reference. */ |
9349b4f9 | 528 | void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint) |
4c3a88a2 | 529 | { |
1fddef4b | 530 | #if defined(TARGET_HAS_ICE) |
72cf2d4f | 531 | QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); |
d720b93d | 532 | |
a1d1bb31 AL |
533 | breakpoint_invalidate(env, breakpoint->pc); |
534 | ||
7267c094 | 535 | g_free(breakpoint); |
a1d1bb31 AL |
536 | #endif |
537 | } | |
538 | ||
539 | /* Remove all matching breakpoints. */ | |
9349b4f9 | 540 | void cpu_breakpoint_remove_all(CPUArchState *env, int mask) |
a1d1bb31 AL |
541 | { |
542 | #if defined(TARGET_HAS_ICE) | |
c0ce998e | 543 | CPUBreakpoint *bp, *next; |
a1d1bb31 | 544 | |
72cf2d4f | 545 | QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { |
a1d1bb31 AL |
546 | if (bp->flags & mask) |
547 | cpu_breakpoint_remove_by_ref(env, bp); | |
c0ce998e | 548 | } |
4c3a88a2 FB |
549 | #endif |
550 | } | |
551 | ||
c33a346e FB |
552 | /* enable or disable single step mode. EXCP_DEBUG is returned by the |
553 | CPU loop after each instruction */ | |
9349b4f9 | 554 | void cpu_single_step(CPUArchState *env, int enabled) |
c33a346e | 555 | { |
1fddef4b | 556 | #if defined(TARGET_HAS_ICE) |
c33a346e FB |
557 | if (env->singlestep_enabled != enabled) { |
558 | env->singlestep_enabled = enabled; | |
e22a25c9 AL |
559 | if (kvm_enabled()) |
560 | kvm_update_guest_debug(env, 0); | |
561 | else { | |
ccbb4d44 | 562 | /* must flush all the translated code to avoid inconsistencies */ |
e22a25c9 AL |
563 | /* XXX: only flush what is necessary */ |
564 | tb_flush(env); | |
565 | } | |
c33a346e FB |
566 | } |
567 | #endif | |
568 | } | |
569 | ||
9349b4f9 | 570 | void cpu_exit(CPUArchState *env) |
3098dba0 | 571 | { |
fcd7d003 AF |
572 | CPUState *cpu = ENV_GET_CPU(env); |
573 | ||
574 | cpu->exit_request = 1; | |
378df4b2 | 575 | cpu->tcg_exit_req = 1; |
3098dba0 AJ |
576 | } |
577 | ||
9349b4f9 | 578 | void cpu_abort(CPUArchState *env, const char *fmt, ...) |
7501267e FB |
579 | { |
580 | va_list ap; | |
493ae1f0 | 581 | va_list ap2; |
7501267e FB |
582 | |
583 | va_start(ap, fmt); | |
493ae1f0 | 584 | va_copy(ap2, ap); |
7501267e FB |
585 | fprintf(stderr, "qemu: fatal: "); |
586 | vfprintf(stderr, fmt, ap); | |
587 | fprintf(stderr, "\n"); | |
6fd2a026 | 588 | cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
93fcfe39 AL |
589 | if (qemu_log_enabled()) { |
590 | qemu_log("qemu: fatal: "); | |
591 | qemu_log_vprintf(fmt, ap2); | |
592 | qemu_log("\n"); | |
6fd2a026 | 593 | log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
31b1a7b4 | 594 | qemu_log_flush(); |
93fcfe39 | 595 | qemu_log_close(); |
924edcae | 596 | } |
493ae1f0 | 597 | va_end(ap2); |
f9373291 | 598 | va_end(ap); |
fd052bf6 RV |
599 | #if defined(CONFIG_USER_ONLY) |
600 | { | |
601 | struct sigaction act; | |
602 | sigfillset(&act.sa_mask); | |
603 | act.sa_handler = SIG_DFL; | |
604 | sigaction(SIGABRT, &act, NULL); | |
605 | } | |
606 | #endif | |
7501267e FB |
607 | abort(); |
608 | } | |
609 | ||
9349b4f9 | 610 | CPUArchState *cpu_copy(CPUArchState *env) |
c5be9f08 | 611 | { |
9349b4f9 AF |
612 | CPUArchState *new_env = cpu_init(env->cpu_model_str); |
613 | CPUArchState *next_cpu = new_env->next_cpu; | |
5a38f081 AL |
614 | #if defined(TARGET_HAS_ICE) |
615 | CPUBreakpoint *bp; | |
616 | CPUWatchpoint *wp; | |
617 | #endif | |
618 | ||
9349b4f9 | 619 | memcpy(new_env, env, sizeof(CPUArchState)); |
5a38f081 | 620 | |
55e5c285 | 621 | /* Preserve chaining. */ |
c5be9f08 | 622 | new_env->next_cpu = next_cpu; |
5a38f081 AL |
623 | |
624 | /* Clone all break/watchpoints. | |
625 | Note: Once we support ptrace with hw-debug register access, make sure | |
626 | BP_CPU break/watchpoints are handled correctly on clone. */ | |
72cf2d4f BS |
627 | QTAILQ_INIT(&env->breakpoints); |
628 | QTAILQ_INIT(&env->watchpoints); | |
5a38f081 | 629 | #if defined(TARGET_HAS_ICE) |
72cf2d4f | 630 | QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
5a38f081 AL |
631 | cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL); |
632 | } | |
72cf2d4f | 633 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
5a38f081 AL |
634 | cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1, |
635 | wp->flags, NULL); | |
636 | } | |
637 | #endif | |
638 | ||
c5be9f08 TS |
639 | return new_env; |
640 | } | |
641 | ||
0124311e | 642 | #if !defined(CONFIG_USER_ONLY) |
d24981d3 JQ |
643 | static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end, |
644 | uintptr_t length) | |
645 | { | |
646 | uintptr_t start1; | |
647 | ||
648 | /* we modify the TLB cache so that the dirty bit will be set again | |
649 | when accessing the range */ | |
650 | start1 = (uintptr_t)qemu_safe_ram_ptr(start); | |
651 | /* Check that we don't span multiple blocks - this breaks the | |
652 | address comparisons below. */ | |
653 | if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 | |
654 | != (end - 1) - start) { | |
655 | abort(); | |
656 | } | |
657 | cpu_tlb_reset_dirty_all(start1, length); | |
658 | ||
659 | } | |
660 | ||
5579c7f3 | 661 | /* Note: start and end must be within the same ram block. */ |
c227f099 | 662 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
0a962c02 | 663 | int dirty_flags) |
1ccde1cb | 664 | { |
d24981d3 | 665 | uintptr_t length; |
1ccde1cb FB |
666 | |
667 | start &= TARGET_PAGE_MASK; | |
668 | end = TARGET_PAGE_ALIGN(end); | |
669 | ||
670 | length = end - start; | |
671 | if (length == 0) | |
672 | return; | |
f7c11b53 | 673 | cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); |
f23db169 | 674 | |
d24981d3 JQ |
675 | if (tcg_enabled()) { |
676 | tlb_reset_dirty_range_all(start, end, length); | |
5579c7f3 | 677 | } |
1ccde1cb FB |
678 | } |
679 | ||
8b9c99d9 | 680 | static int cpu_physical_memory_set_dirty_tracking(int enable) |
74576198 | 681 | { |
f6f3fbca | 682 | int ret = 0; |
74576198 | 683 | in_migration = enable; |
f6f3fbca | 684 | return ret; |
74576198 AL |
685 | } |
686 | ||
a8170e5e | 687 | hwaddr memory_region_section_get_iotlb(CPUArchState *env, |
149f54b5 PB |
688 | MemoryRegionSection *section, |
689 | target_ulong vaddr, | |
690 | hwaddr paddr, hwaddr xlat, | |
691 | int prot, | |
692 | target_ulong *address) | |
e5548617 | 693 | { |
a8170e5e | 694 | hwaddr iotlb; |
e5548617 BS |
695 | CPUWatchpoint *wp; |
696 | ||
cc5bea60 | 697 | if (memory_region_is_ram(section->mr)) { |
e5548617 BS |
698 | /* Normal RAM. */ |
699 | iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) | |
149f54b5 | 700 | + xlat; |
e5548617 BS |
701 | if (!section->readonly) { |
702 | iotlb |= phys_section_notdirty; | |
703 | } else { | |
704 | iotlb |= phys_section_rom; | |
705 | } | |
706 | } else { | |
e5548617 | 707 | iotlb = section - phys_sections; |
149f54b5 | 708 | iotlb += xlat; |
e5548617 BS |
709 | } |
710 | ||
711 | /* Make accesses to pages with watchpoints go via the | |
712 | watchpoint trap routines. */ | |
713 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { | |
714 | if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { | |
715 | /* Avoid trapping reads of pages with a write breakpoint. */ | |
716 | if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { | |
717 | iotlb = phys_section_watch + paddr; | |
718 | *address |= TLB_MMIO; | |
719 | break; | |
720 | } | |
721 | } | |
722 | } | |
723 | ||
724 | return iotlb; | |
725 | } | |
9fa3e853 FB |
726 | #endif /* defined(CONFIG_USER_ONLY) */ |
727 | ||
e2eef170 | 728 | #if !defined(CONFIG_USER_ONLY) |
8da3ff18 | 729 | |
c227f099 | 730 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 731 | uint16_t section); |
a8170e5e | 732 | static subpage_t *subpage_init(hwaddr base); |
5312bd8b | 733 | static void destroy_page_desc(uint16_t section_index) |
54688b1e | 734 | { |
5312bd8b AK |
735 | MemoryRegionSection *section = &phys_sections[section_index]; |
736 | MemoryRegion *mr = section->mr; | |
54688b1e AK |
737 | |
738 | if (mr->subpage) { | |
739 | subpage_t *subpage = container_of(mr, subpage_t, iomem); | |
740 | memory_region_destroy(&subpage->iomem); | |
741 | g_free(subpage); | |
742 | } | |
743 | } | |
744 | ||
4346ae3e | 745 | static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level) |
54688b1e AK |
746 | { |
747 | unsigned i; | |
d6f2ea22 | 748 | PhysPageEntry *p; |
54688b1e | 749 | |
c19e8800 | 750 | if (lp->ptr == PHYS_MAP_NODE_NIL) { |
54688b1e AK |
751 | return; |
752 | } | |
753 | ||
c19e8800 | 754 | p = phys_map_nodes[lp->ptr]; |
4346ae3e | 755 | for (i = 0; i < L2_SIZE; ++i) { |
07f07b31 | 756 | if (!p[i].is_leaf) { |
54688b1e | 757 | destroy_l2_mapping(&p[i], level - 1); |
4346ae3e | 758 | } else { |
c19e8800 | 759 | destroy_page_desc(p[i].ptr); |
54688b1e | 760 | } |
54688b1e | 761 | } |
07f07b31 | 762 | lp->is_leaf = 0; |
c19e8800 | 763 | lp->ptr = PHYS_MAP_NODE_NIL; |
54688b1e AK |
764 | } |
765 | ||
ac1970fb | 766 | static void destroy_all_mappings(AddressSpaceDispatch *d) |
54688b1e | 767 | { |
ac1970fb | 768 | destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1); |
d6f2ea22 | 769 | phys_map_nodes_reset(); |
54688b1e AK |
770 | } |
771 | ||
5312bd8b AK |
772 | static uint16_t phys_section_add(MemoryRegionSection *section) |
773 | { | |
68f3f65b PB |
774 | /* The physical section number is ORed with a page-aligned |
775 | * pointer to produce the iotlb entries. Thus it should | |
776 | * never overflow into the page-aligned value. | |
777 | */ | |
778 | assert(phys_sections_nb < TARGET_PAGE_SIZE); | |
779 | ||
5312bd8b AK |
780 | if (phys_sections_nb == phys_sections_nb_alloc) { |
781 | phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16); | |
782 | phys_sections = g_renew(MemoryRegionSection, phys_sections, | |
783 | phys_sections_nb_alloc); | |
784 | } | |
785 | phys_sections[phys_sections_nb] = *section; | |
786 | return phys_sections_nb++; | |
787 | } | |
788 | ||
789 | static void phys_sections_clear(void) | |
790 | { | |
791 | phys_sections_nb = 0; | |
792 | } | |
793 | ||
ac1970fb | 794 | static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) |
0f0cb164 AK |
795 | { |
796 | subpage_t *subpage; | |
a8170e5e | 797 | hwaddr base = section->offset_within_address_space |
0f0cb164 | 798 | & TARGET_PAGE_MASK; |
ac1970fb | 799 | MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS); |
0f0cb164 AK |
800 | MemoryRegionSection subsection = { |
801 | .offset_within_address_space = base, | |
802 | .size = TARGET_PAGE_SIZE, | |
803 | }; | |
a8170e5e | 804 | hwaddr start, end; |
0f0cb164 | 805 | |
f3705d53 | 806 | assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); |
0f0cb164 | 807 | |
f3705d53 | 808 | if (!(existing->mr->subpage)) { |
0f0cb164 AK |
809 | subpage = subpage_init(base); |
810 | subsection.mr = &subpage->iomem; | |
ac1970fb | 811 | phys_page_set(d, base >> TARGET_PAGE_BITS, 1, |
2999097b | 812 | phys_section_add(&subsection)); |
0f0cb164 | 813 | } else { |
f3705d53 | 814 | subpage = container_of(existing->mr, subpage_t, iomem); |
0f0cb164 AK |
815 | } |
816 | start = section->offset_within_address_space & ~TARGET_PAGE_MASK; | |
adb2a9b5 | 817 | end = start + section->size - 1; |
0f0cb164 AK |
818 | subpage_register(subpage, start, end, phys_section_add(section)); |
819 | } | |
820 | ||
821 | ||
ac1970fb | 822 | static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section) |
33417e70 | 823 | { |
a8170e5e | 824 | hwaddr start_addr = section->offset_within_address_space; |
dd81124b | 825 | ram_addr_t size = section->size; |
a8170e5e | 826 | hwaddr addr; |
5312bd8b | 827 | uint16_t section_index = phys_section_add(section); |
dd81124b | 828 | |
3b8e6a2d | 829 | assert(size); |
f6f3fbca | 830 | |
3b8e6a2d | 831 | addr = start_addr; |
ac1970fb | 832 | phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS, |
2999097b | 833 | section_index); |
33417e70 FB |
834 | } |
835 | ||
86a86236 AK |
836 | QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS) |
837 | ||
838 | static MemoryRegionSection limit(MemoryRegionSection section) | |
839 | { | |
840 | section.size = MIN(section.offset_within_address_space + section.size, | |
841 | MAX_PHYS_ADDR + 1) | |
842 | - section.offset_within_address_space; | |
843 | ||
844 | return section; | |
845 | } | |
846 | ||
ac1970fb | 847 | static void mem_add(MemoryListener *listener, MemoryRegionSection *section) |
0f0cb164 | 848 | { |
ac1970fb | 849 | AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); |
86a86236 | 850 | MemoryRegionSection now = limit(*section), remain = limit(*section); |
0f0cb164 AK |
851 | |
852 | if ((now.offset_within_address_space & ~TARGET_PAGE_MASK) | |
853 | || (now.size < TARGET_PAGE_SIZE)) { | |
854 | now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space) | |
855 | - now.offset_within_address_space, | |
856 | now.size); | |
ac1970fb | 857 | register_subpage(d, &now); |
0f0cb164 AK |
858 | remain.size -= now.size; |
859 | remain.offset_within_address_space += now.size; | |
860 | remain.offset_within_region += now.size; | |
861 | } | |
69b67646 TH |
862 | while (remain.size >= TARGET_PAGE_SIZE) { |
863 | now = remain; | |
864 | if (remain.offset_within_region & ~TARGET_PAGE_MASK) { | |
865 | now.size = TARGET_PAGE_SIZE; | |
ac1970fb | 866 | register_subpage(d, &now); |
69b67646 TH |
867 | } else { |
868 | now.size &= TARGET_PAGE_MASK; | |
ac1970fb | 869 | register_multipage(d, &now); |
69b67646 | 870 | } |
0f0cb164 AK |
871 | remain.size -= now.size; |
872 | remain.offset_within_address_space += now.size; | |
873 | remain.offset_within_region += now.size; | |
874 | } | |
875 | now = remain; | |
876 | if (now.size) { | |
ac1970fb | 877 | register_subpage(d, &now); |
0f0cb164 AK |
878 | } |
879 | } | |
880 | ||
62a2744c SY |
881 | void qemu_flush_coalesced_mmio_buffer(void) |
882 | { | |
883 | if (kvm_enabled()) | |
884 | kvm_flush_coalesced_mmio_buffer(); | |
885 | } | |
886 | ||
b2a8658e UD |
887 | void qemu_mutex_lock_ramlist(void) |
888 | { | |
889 | qemu_mutex_lock(&ram_list.mutex); | |
890 | } | |
891 | ||
892 | void qemu_mutex_unlock_ramlist(void) | |
893 | { | |
894 | qemu_mutex_unlock(&ram_list.mutex); | |
895 | } | |
896 | ||
c902760f MT |
897 | #if defined(__linux__) && !defined(TARGET_S390X) |
898 | ||
899 | #include <sys/vfs.h> | |
900 | ||
901 | #define HUGETLBFS_MAGIC 0x958458f6 | |
902 | ||
903 | static long gethugepagesize(const char *path) | |
904 | { | |
905 | struct statfs fs; | |
906 | int ret; | |
907 | ||
908 | do { | |
9742bf26 | 909 | ret = statfs(path, &fs); |
c902760f MT |
910 | } while (ret != 0 && errno == EINTR); |
911 | ||
912 | if (ret != 0) { | |
9742bf26 YT |
913 | perror(path); |
914 | return 0; | |
c902760f MT |
915 | } |
916 | ||
917 | if (fs.f_type != HUGETLBFS_MAGIC) | |
9742bf26 | 918 | fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); |
c902760f MT |
919 | |
920 | return fs.f_bsize; | |
921 | } | |
922 | ||
04b16653 AW |
923 | static void *file_ram_alloc(RAMBlock *block, |
924 | ram_addr_t memory, | |
925 | const char *path) | |
c902760f MT |
926 | { |
927 | char *filename; | |
8ca761f6 PF |
928 | char *sanitized_name; |
929 | char *c; | |
c902760f MT |
930 | void *area; |
931 | int fd; | |
932 | #ifdef MAP_POPULATE | |
933 | int flags; | |
934 | #endif | |
935 | unsigned long hpagesize; | |
936 | ||
937 | hpagesize = gethugepagesize(path); | |
938 | if (!hpagesize) { | |
9742bf26 | 939 | return NULL; |
c902760f MT |
940 | } |
941 | ||
942 | if (memory < hpagesize) { | |
943 | return NULL; | |
944 | } | |
945 | ||
946 | if (kvm_enabled() && !kvm_has_sync_mmu()) { | |
947 | fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n"); | |
948 | return NULL; | |
949 | } | |
950 | ||
8ca761f6 PF |
951 | /* Make name safe to use with mkstemp by replacing '/' with '_'. */ |
952 | sanitized_name = g_strdup(block->mr->name); | |
953 | for (c = sanitized_name; *c != '\0'; c++) { | |
954 | if (*c == '/') | |
955 | *c = '_'; | |
956 | } | |
957 | ||
958 | filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, | |
959 | sanitized_name); | |
960 | g_free(sanitized_name); | |
c902760f MT |
961 | |
962 | fd = mkstemp(filename); | |
963 | if (fd < 0) { | |
9742bf26 | 964 | perror("unable to create backing store for hugepages"); |
e4ada482 | 965 | g_free(filename); |
9742bf26 | 966 | return NULL; |
c902760f MT |
967 | } |
968 | unlink(filename); | |
e4ada482 | 969 | g_free(filename); |
c902760f MT |
970 | |
971 | memory = (memory+hpagesize-1) & ~(hpagesize-1); | |
972 | ||
973 | /* | |
974 | * ftruncate is not supported by hugetlbfs in older | |
975 | * hosts, so don't bother bailing out on errors. | |
976 | * If anything goes wrong with it under other filesystems, | |
977 | * mmap will fail. | |
978 | */ | |
979 | if (ftruncate(fd, memory)) | |
9742bf26 | 980 | perror("ftruncate"); |
c902760f MT |
981 | |
982 | #ifdef MAP_POPULATE | |
983 | /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case | |
984 | * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED | |
985 | * to sidestep this quirk. | |
986 | */ | |
987 | flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE; | |
988 | area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0); | |
989 | #else | |
990 | area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); | |
991 | #endif | |
992 | if (area == MAP_FAILED) { | |
9742bf26 YT |
993 | perror("file_ram_alloc: can't mmap RAM pages"); |
994 | close(fd); | |
995 | return (NULL); | |
c902760f | 996 | } |
04b16653 | 997 | block->fd = fd; |
c902760f MT |
998 | return area; |
999 | } | |
1000 | #endif | |
1001 | ||
d17b5288 | 1002 | static ram_addr_t find_ram_offset(ram_addr_t size) |
04b16653 AW |
1003 | { |
1004 | RAMBlock *block, *next_block; | |
3e837b2c | 1005 | ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; |
04b16653 | 1006 | |
49cd9ac6 SH |
1007 | assert(size != 0); /* it would hand out same offset multiple times */ |
1008 | ||
a3161038 | 1009 | if (QTAILQ_EMPTY(&ram_list.blocks)) |
04b16653 AW |
1010 | return 0; |
1011 | ||
a3161038 | 1012 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
f15fbc4b | 1013 | ram_addr_t end, next = RAM_ADDR_MAX; |
04b16653 AW |
1014 | |
1015 | end = block->offset + block->length; | |
1016 | ||
a3161038 | 1017 | QTAILQ_FOREACH(next_block, &ram_list.blocks, next) { |
04b16653 AW |
1018 | if (next_block->offset >= end) { |
1019 | next = MIN(next, next_block->offset); | |
1020 | } | |
1021 | } | |
1022 | if (next - end >= size && next - end < mingap) { | |
3e837b2c | 1023 | offset = end; |
04b16653 AW |
1024 | mingap = next - end; |
1025 | } | |
1026 | } | |
3e837b2c AW |
1027 | |
1028 | if (offset == RAM_ADDR_MAX) { | |
1029 | fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", | |
1030 | (uint64_t)size); | |
1031 | abort(); | |
1032 | } | |
1033 | ||
04b16653 AW |
1034 | return offset; |
1035 | } | |
1036 | ||
652d7ec2 | 1037 | ram_addr_t last_ram_offset(void) |
d17b5288 AW |
1038 | { |
1039 | RAMBlock *block; | |
1040 | ram_addr_t last = 0; | |
1041 | ||
a3161038 | 1042 | QTAILQ_FOREACH(block, &ram_list.blocks, next) |
d17b5288 AW |
1043 | last = MAX(last, block->offset + block->length); |
1044 | ||
1045 | return last; | |
1046 | } | |
1047 | ||
ddb97f1d JB |
1048 | static void qemu_ram_setup_dump(void *addr, ram_addr_t size) |
1049 | { | |
1050 | int ret; | |
1051 | QemuOpts *machine_opts; | |
1052 | ||
1053 | /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ | |
1054 | machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0); | |
1055 | if (machine_opts && | |
1056 | !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) { | |
1057 | ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); | |
1058 | if (ret) { | |
1059 | perror("qemu_madvise"); | |
1060 | fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " | |
1061 | "but dump_guest_core=off specified\n"); | |
1062 | } | |
1063 | } | |
1064 | } | |
1065 | ||
c5705a77 | 1066 | void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) |
84b89d78 CM |
1067 | { |
1068 | RAMBlock *new_block, *block; | |
1069 | ||
c5705a77 | 1070 | new_block = NULL; |
a3161038 | 1071 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
c5705a77 AK |
1072 | if (block->offset == addr) { |
1073 | new_block = block; | |
1074 | break; | |
1075 | } | |
1076 | } | |
1077 | assert(new_block); | |
1078 | assert(!new_block->idstr[0]); | |
84b89d78 | 1079 | |
09e5ab63 AL |
1080 | if (dev) { |
1081 | char *id = qdev_get_dev_path(dev); | |
84b89d78 CM |
1082 | if (id) { |
1083 | snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); | |
7267c094 | 1084 | g_free(id); |
84b89d78 CM |
1085 | } |
1086 | } | |
1087 | pstrcat(new_block->idstr, sizeof(new_block->idstr), name); | |
1088 | ||
b2a8658e UD |
1089 | /* This assumes the iothread lock is taken here too. */ |
1090 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1091 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
c5705a77 | 1092 | if (block != new_block && !strcmp(block->idstr, new_block->idstr)) { |
84b89d78 CM |
1093 | fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", |
1094 | new_block->idstr); | |
1095 | abort(); | |
1096 | } | |
1097 | } | |
b2a8658e | 1098 | qemu_mutex_unlock_ramlist(); |
c5705a77 AK |
1099 | } |
1100 | ||
8490fc78 LC |
1101 | static int memory_try_enable_merging(void *addr, size_t len) |
1102 | { | |
1103 | QemuOpts *opts; | |
1104 | ||
1105 | opts = qemu_opts_find(qemu_find_opts("machine"), 0); | |
1106 | if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) { | |
1107 | /* disabled by the user */ | |
1108 | return 0; | |
1109 | } | |
1110 | ||
1111 | return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); | |
1112 | } | |
1113 | ||
c5705a77 AK |
1114 | ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, |
1115 | MemoryRegion *mr) | |
1116 | { | |
abb26d63 | 1117 | RAMBlock *block, *new_block; |
c5705a77 AK |
1118 | |
1119 | size = TARGET_PAGE_ALIGN(size); | |
1120 | new_block = g_malloc0(sizeof(*new_block)); | |
84b89d78 | 1121 | |
b2a8658e UD |
1122 | /* This assumes the iothread lock is taken here too. */ |
1123 | qemu_mutex_lock_ramlist(); | |
7c637366 | 1124 | new_block->mr = mr; |
432d268c | 1125 | new_block->offset = find_ram_offset(size); |
6977dfe6 YT |
1126 | if (host) { |
1127 | new_block->host = host; | |
cd19cfa2 | 1128 | new_block->flags |= RAM_PREALLOC_MASK; |
6977dfe6 YT |
1129 | } else { |
1130 | if (mem_path) { | |
c902760f | 1131 | #if defined (__linux__) && !defined(TARGET_S390X) |
6977dfe6 YT |
1132 | new_block->host = file_ram_alloc(new_block, size, mem_path); |
1133 | if (!new_block->host) { | |
6eebf958 | 1134 | new_block->host = qemu_anon_ram_alloc(size); |
8490fc78 | 1135 | memory_try_enable_merging(new_block->host, size); |
6977dfe6 | 1136 | } |
c902760f | 1137 | #else |
6977dfe6 YT |
1138 | fprintf(stderr, "-mem-path option unsupported\n"); |
1139 | exit(1); | |
c902760f | 1140 | #endif |
6977dfe6 | 1141 | } else { |
868bb33f | 1142 | if (xen_enabled()) { |
fce537d4 | 1143 | xen_ram_alloc(new_block->offset, size, mr); |
fdec9918 CB |
1144 | } else if (kvm_enabled()) { |
1145 | /* some s390/kvm configurations have special constraints */ | |
6eebf958 | 1146 | new_block->host = kvm_ram_alloc(size); |
432d268c | 1147 | } else { |
6eebf958 | 1148 | new_block->host = qemu_anon_ram_alloc(size); |
432d268c | 1149 | } |
8490fc78 | 1150 | memory_try_enable_merging(new_block->host, size); |
6977dfe6 | 1151 | } |
c902760f | 1152 | } |
94a6b54f PB |
1153 | new_block->length = size; |
1154 | ||
abb26d63 PB |
1155 | /* Keep the list sorted from biggest to smallest block. */ |
1156 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { | |
1157 | if (block->length < new_block->length) { | |
1158 | break; | |
1159 | } | |
1160 | } | |
1161 | if (block) { | |
1162 | QTAILQ_INSERT_BEFORE(block, new_block, next); | |
1163 | } else { | |
1164 | QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next); | |
1165 | } | |
0d6d3c87 | 1166 | ram_list.mru_block = NULL; |
94a6b54f | 1167 | |
f798b07f | 1168 | ram_list.version++; |
b2a8658e | 1169 | qemu_mutex_unlock_ramlist(); |
f798b07f | 1170 | |
7267c094 | 1171 | ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, |
04b16653 | 1172 | last_ram_offset() >> TARGET_PAGE_BITS); |
5fda043f IM |
1173 | memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), |
1174 | 0, size >> TARGET_PAGE_BITS); | |
1720aeee | 1175 | cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff); |
94a6b54f | 1176 | |
ddb97f1d | 1177 | qemu_ram_setup_dump(new_block->host, size); |
ad0b5321 | 1178 | qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE); |
ddb97f1d | 1179 | |
6f0437e8 JK |
1180 | if (kvm_enabled()) |
1181 | kvm_setup_guest_memory(new_block->host, size); | |
1182 | ||
94a6b54f PB |
1183 | return new_block->offset; |
1184 | } | |
e9a1ab19 | 1185 | |
c5705a77 | 1186 | ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr) |
6977dfe6 | 1187 | { |
c5705a77 | 1188 | return qemu_ram_alloc_from_ptr(size, NULL, mr); |
6977dfe6 YT |
1189 | } |
1190 | ||
1f2e98b6 AW |
1191 | void qemu_ram_free_from_ptr(ram_addr_t addr) |
1192 | { | |
1193 | RAMBlock *block; | |
1194 | ||
b2a8658e UD |
1195 | /* This assumes the iothread lock is taken here too. */ |
1196 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1197 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1f2e98b6 | 1198 | if (addr == block->offset) { |
a3161038 | 1199 | QTAILQ_REMOVE(&ram_list.blocks, block, next); |
0d6d3c87 | 1200 | ram_list.mru_block = NULL; |
f798b07f | 1201 | ram_list.version++; |
7267c094 | 1202 | g_free(block); |
b2a8658e | 1203 | break; |
1f2e98b6 AW |
1204 | } |
1205 | } | |
b2a8658e | 1206 | qemu_mutex_unlock_ramlist(); |
1f2e98b6 AW |
1207 | } |
1208 | ||
c227f099 | 1209 | void qemu_ram_free(ram_addr_t addr) |
e9a1ab19 | 1210 | { |
04b16653 AW |
1211 | RAMBlock *block; |
1212 | ||
b2a8658e UD |
1213 | /* This assumes the iothread lock is taken here too. */ |
1214 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1215 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
04b16653 | 1216 | if (addr == block->offset) { |
a3161038 | 1217 | QTAILQ_REMOVE(&ram_list.blocks, block, next); |
0d6d3c87 | 1218 | ram_list.mru_block = NULL; |
f798b07f | 1219 | ram_list.version++; |
cd19cfa2 HY |
1220 | if (block->flags & RAM_PREALLOC_MASK) { |
1221 | ; | |
1222 | } else if (mem_path) { | |
04b16653 AW |
1223 | #if defined (__linux__) && !defined(TARGET_S390X) |
1224 | if (block->fd) { | |
1225 | munmap(block->host, block->length); | |
1226 | close(block->fd); | |
1227 | } else { | |
e7a09b92 | 1228 | qemu_anon_ram_free(block->host, block->length); |
04b16653 | 1229 | } |
fd28aa13 JK |
1230 | #else |
1231 | abort(); | |
04b16653 AW |
1232 | #endif |
1233 | } else { | |
868bb33f | 1234 | if (xen_enabled()) { |
e41d7c69 | 1235 | xen_invalidate_map_cache_entry(block->host); |
432d268c | 1236 | } else { |
e7a09b92 | 1237 | qemu_anon_ram_free(block->host, block->length); |
432d268c | 1238 | } |
04b16653 | 1239 | } |
7267c094 | 1240 | g_free(block); |
b2a8658e | 1241 | break; |
04b16653 AW |
1242 | } |
1243 | } | |
b2a8658e | 1244 | qemu_mutex_unlock_ramlist(); |
04b16653 | 1245 | |
e9a1ab19 FB |
1246 | } |
1247 | ||
cd19cfa2 HY |
1248 | #ifndef _WIN32 |
1249 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) | |
1250 | { | |
1251 | RAMBlock *block; | |
1252 | ram_addr_t offset; | |
1253 | int flags; | |
1254 | void *area, *vaddr; | |
1255 | ||
a3161038 | 1256 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
cd19cfa2 HY |
1257 | offset = addr - block->offset; |
1258 | if (offset < block->length) { | |
1259 | vaddr = block->host + offset; | |
1260 | if (block->flags & RAM_PREALLOC_MASK) { | |
1261 | ; | |
1262 | } else { | |
1263 | flags = MAP_FIXED; | |
1264 | munmap(vaddr, length); | |
1265 | if (mem_path) { | |
1266 | #if defined(__linux__) && !defined(TARGET_S390X) | |
1267 | if (block->fd) { | |
1268 | #ifdef MAP_POPULATE | |
1269 | flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED : | |
1270 | MAP_PRIVATE; | |
1271 | #else | |
1272 | flags |= MAP_PRIVATE; | |
1273 | #endif | |
1274 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1275 | flags, block->fd, offset); | |
1276 | } else { | |
1277 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; | |
1278 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1279 | flags, -1, 0); | |
1280 | } | |
fd28aa13 JK |
1281 | #else |
1282 | abort(); | |
cd19cfa2 HY |
1283 | #endif |
1284 | } else { | |
1285 | #if defined(TARGET_S390X) && defined(CONFIG_KVM) | |
1286 | flags |= MAP_SHARED | MAP_ANONYMOUS; | |
1287 | area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE, | |
1288 | flags, -1, 0); | |
1289 | #else | |
1290 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; | |
1291 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1292 | flags, -1, 0); | |
1293 | #endif | |
1294 | } | |
1295 | if (area != vaddr) { | |
f15fbc4b AP |
1296 | fprintf(stderr, "Could not remap addr: " |
1297 | RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", | |
cd19cfa2 HY |
1298 | length, addr); |
1299 | exit(1); | |
1300 | } | |
8490fc78 | 1301 | memory_try_enable_merging(vaddr, length); |
ddb97f1d | 1302 | qemu_ram_setup_dump(vaddr, length); |
cd19cfa2 HY |
1303 | } |
1304 | return; | |
1305 | } | |
1306 | } | |
1307 | } | |
1308 | #endif /* !_WIN32 */ | |
1309 | ||
dc828ca1 | 1310 | /* Return a host pointer to ram allocated with qemu_ram_alloc. |
5579c7f3 PB |
1311 | With the exception of the softmmu code in this file, this should |
1312 | only be used for local memory (e.g. video ram) that the device owns, | |
1313 | and knows it isn't going to access beyond the end of the block. | |
1314 | ||
1315 | It should not be used for general purpose DMA. | |
1316 | Use cpu_physical_memory_map/cpu_physical_memory_rw instead. | |
1317 | */ | |
c227f099 | 1318 | void *qemu_get_ram_ptr(ram_addr_t addr) |
dc828ca1 | 1319 | { |
94a6b54f PB |
1320 | RAMBlock *block; |
1321 | ||
b2a8658e | 1322 | /* The list is protected by the iothread lock here. */ |
0d6d3c87 PB |
1323 | block = ram_list.mru_block; |
1324 | if (block && addr - block->offset < block->length) { | |
1325 | goto found; | |
1326 | } | |
a3161038 | 1327 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
f471a17e | 1328 | if (addr - block->offset < block->length) { |
0d6d3c87 | 1329 | goto found; |
f471a17e | 1330 | } |
94a6b54f | 1331 | } |
f471a17e AW |
1332 | |
1333 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1334 | abort(); | |
1335 | ||
0d6d3c87 PB |
1336 | found: |
1337 | ram_list.mru_block = block; | |
1338 | if (xen_enabled()) { | |
1339 | /* We need to check if the requested address is in the RAM | |
1340 | * because we don't want to map the entire memory in QEMU. | |
1341 | * In that case just map until the end of the page. | |
1342 | */ | |
1343 | if (block->offset == 0) { | |
1344 | return xen_map_cache(addr, 0, 0); | |
1345 | } else if (block->host == NULL) { | |
1346 | block->host = | |
1347 | xen_map_cache(block->offset, block->length, 1); | |
1348 | } | |
1349 | } | |
1350 | return block->host + (addr - block->offset); | |
dc828ca1 PB |
1351 | } |
1352 | ||
0d6d3c87 PB |
1353 | /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as |
1354 | * qemu_get_ram_ptr but do not touch ram_list.mru_block. | |
1355 | * | |
1356 | * ??? Is this still necessary? | |
b2e0a138 | 1357 | */ |
8b9c99d9 | 1358 | static void *qemu_safe_ram_ptr(ram_addr_t addr) |
b2e0a138 MT |
1359 | { |
1360 | RAMBlock *block; | |
1361 | ||
b2a8658e | 1362 | /* The list is protected by the iothread lock here. */ |
a3161038 | 1363 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
b2e0a138 | 1364 | if (addr - block->offset < block->length) { |
868bb33f | 1365 | if (xen_enabled()) { |
432d268c JN |
1366 | /* We need to check if the requested address is in the RAM |
1367 | * because we don't want to map the entire memory in QEMU. | |
712c2b41 | 1368 | * In that case just map until the end of the page. |
432d268c JN |
1369 | */ |
1370 | if (block->offset == 0) { | |
e41d7c69 | 1371 | return xen_map_cache(addr, 0, 0); |
432d268c | 1372 | } else if (block->host == NULL) { |
e41d7c69 JK |
1373 | block->host = |
1374 | xen_map_cache(block->offset, block->length, 1); | |
432d268c JN |
1375 | } |
1376 | } | |
b2e0a138 MT |
1377 | return block->host + (addr - block->offset); |
1378 | } | |
1379 | } | |
1380 | ||
1381 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1382 | abort(); | |
1383 | ||
1384 | return NULL; | |
1385 | } | |
1386 | ||
38bee5dc SS |
1387 | /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr |
1388 | * but takes a size argument */ | |
8b9c99d9 | 1389 | static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size) |
38bee5dc | 1390 | { |
8ab934f9 SS |
1391 | if (*size == 0) { |
1392 | return NULL; | |
1393 | } | |
868bb33f | 1394 | if (xen_enabled()) { |
e41d7c69 | 1395 | return xen_map_cache(addr, *size, 1); |
868bb33f | 1396 | } else { |
38bee5dc SS |
1397 | RAMBlock *block; |
1398 | ||
a3161038 | 1399 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
38bee5dc SS |
1400 | if (addr - block->offset < block->length) { |
1401 | if (addr - block->offset + *size > block->length) | |
1402 | *size = block->length - addr + block->offset; | |
1403 | return block->host + (addr - block->offset); | |
1404 | } | |
1405 | } | |
1406 | ||
1407 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1408 | abort(); | |
38bee5dc SS |
1409 | } |
1410 | } | |
1411 | ||
e890261f | 1412 | int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) |
5579c7f3 | 1413 | { |
94a6b54f PB |
1414 | RAMBlock *block; |
1415 | uint8_t *host = ptr; | |
1416 | ||
868bb33f | 1417 | if (xen_enabled()) { |
e41d7c69 | 1418 | *ram_addr = xen_ram_addr_from_mapcache(ptr); |
712c2b41 SS |
1419 | return 0; |
1420 | } | |
1421 | ||
a3161038 | 1422 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
432d268c JN |
1423 | /* This case append when the block is not mapped. */ |
1424 | if (block->host == NULL) { | |
1425 | continue; | |
1426 | } | |
f471a17e | 1427 | if (host - block->host < block->length) { |
e890261f MT |
1428 | *ram_addr = block->offset + (host - block->host); |
1429 | return 0; | |
f471a17e | 1430 | } |
94a6b54f | 1431 | } |
432d268c | 1432 | |
e890261f MT |
1433 | return -1; |
1434 | } | |
f471a17e | 1435 | |
e890261f MT |
1436 | /* Some of the softmmu routines need to translate from a host pointer |
1437 | (typically a TLB entry) back to a ram offset. */ | |
1438 | ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) | |
1439 | { | |
1440 | ram_addr_t ram_addr; | |
f471a17e | 1441 | |
e890261f MT |
1442 | if (qemu_ram_addr_from_host(ptr, &ram_addr)) { |
1443 | fprintf(stderr, "Bad ram pointer %p\n", ptr); | |
1444 | abort(); | |
1445 | } | |
1446 | return ram_addr; | |
5579c7f3 PB |
1447 | } |
1448 | ||
a8170e5e | 1449 | static void notdirty_mem_write(void *opaque, hwaddr ram_addr, |
0e0df1e2 | 1450 | uint64_t val, unsigned size) |
9fa3e853 | 1451 | { |
3a7d929e | 1452 | int dirty_flags; |
f7c11b53 | 1453 | dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
3a7d929e | 1454 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
0e0df1e2 | 1455 | tb_invalidate_phys_page_fast(ram_addr, size); |
f7c11b53 | 1456 | dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
3a7d929e | 1457 | } |
0e0df1e2 AK |
1458 | switch (size) { |
1459 | case 1: | |
1460 | stb_p(qemu_get_ram_ptr(ram_addr), val); | |
1461 | break; | |
1462 | case 2: | |
1463 | stw_p(qemu_get_ram_ptr(ram_addr), val); | |
1464 | break; | |
1465 | case 4: | |
1466 | stl_p(qemu_get_ram_ptr(ram_addr), val); | |
1467 | break; | |
1468 | default: | |
1469 | abort(); | |
3a7d929e | 1470 | } |
f23db169 | 1471 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
f7c11b53 | 1472 | cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); |
f23db169 FB |
1473 | /* we remove the notdirty callback only if the code has been |
1474 | flushed */ | |
1475 | if (dirty_flags == 0xff) | |
2e70f6ef | 1476 | tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
9fa3e853 FB |
1477 | } |
1478 | ||
b018ddf6 PB |
1479 | static bool notdirty_mem_accepts(void *opaque, hwaddr addr, |
1480 | unsigned size, bool is_write) | |
1481 | { | |
1482 | return is_write; | |
1483 | } | |
1484 | ||
0e0df1e2 | 1485 | static const MemoryRegionOps notdirty_mem_ops = { |
0e0df1e2 | 1486 | .write = notdirty_mem_write, |
b018ddf6 | 1487 | .valid.accepts = notdirty_mem_accepts, |
0e0df1e2 | 1488 | .endianness = DEVICE_NATIVE_ENDIAN, |
1ccde1cb FB |
1489 | }; |
1490 | ||
0f459d16 | 1491 | /* Generate a debug exception if a watchpoint has been hit. */ |
b4051334 | 1492 | static void check_watchpoint(int offset, int len_mask, int flags) |
0f459d16 | 1493 | { |
9349b4f9 | 1494 | CPUArchState *env = cpu_single_env; |
06d55cc1 | 1495 | target_ulong pc, cs_base; |
0f459d16 | 1496 | target_ulong vaddr; |
a1d1bb31 | 1497 | CPUWatchpoint *wp; |
06d55cc1 | 1498 | int cpu_flags; |
0f459d16 | 1499 | |
06d55cc1 AL |
1500 | if (env->watchpoint_hit) { |
1501 | /* We re-entered the check after replacing the TB. Now raise | |
1502 | * the debug interrupt so that is will trigger after the | |
1503 | * current instruction. */ | |
c3affe56 | 1504 | cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG); |
06d55cc1 AL |
1505 | return; |
1506 | } | |
2e70f6ef | 1507 | vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; |
72cf2d4f | 1508 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
b4051334 AL |
1509 | if ((vaddr == (wp->vaddr & len_mask) || |
1510 | (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { | |
6e140f28 AL |
1511 | wp->flags |= BP_WATCHPOINT_HIT; |
1512 | if (!env->watchpoint_hit) { | |
1513 | env->watchpoint_hit = wp; | |
5a316526 | 1514 | tb_check_watchpoint(env); |
6e140f28 AL |
1515 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { |
1516 | env->exception_index = EXCP_DEBUG; | |
488d6577 | 1517 | cpu_loop_exit(env); |
6e140f28 AL |
1518 | } else { |
1519 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); | |
1520 | tb_gen_code(env, pc, cs_base, cpu_flags, 1); | |
488d6577 | 1521 | cpu_resume_from_signal(env, NULL); |
6e140f28 | 1522 | } |
06d55cc1 | 1523 | } |
6e140f28 AL |
1524 | } else { |
1525 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
0f459d16 PB |
1526 | } |
1527 | } | |
1528 | } | |
1529 | ||
6658ffb8 PB |
1530 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, |
1531 | so these check for a hit then pass through to the normal out-of-line | |
1532 | phys routines. */ | |
a8170e5e | 1533 | static uint64_t watch_mem_read(void *opaque, hwaddr addr, |
1ec9b909 | 1534 | unsigned size) |
6658ffb8 | 1535 | { |
1ec9b909 AK |
1536 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ); |
1537 | switch (size) { | |
1538 | case 1: return ldub_phys(addr); | |
1539 | case 2: return lduw_phys(addr); | |
1540 | case 4: return ldl_phys(addr); | |
1541 | default: abort(); | |
1542 | } | |
6658ffb8 PB |
1543 | } |
1544 | ||
a8170e5e | 1545 | static void watch_mem_write(void *opaque, hwaddr addr, |
1ec9b909 | 1546 | uint64_t val, unsigned size) |
6658ffb8 | 1547 | { |
1ec9b909 AK |
1548 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE); |
1549 | switch (size) { | |
67364150 MF |
1550 | case 1: |
1551 | stb_phys(addr, val); | |
1552 | break; | |
1553 | case 2: | |
1554 | stw_phys(addr, val); | |
1555 | break; | |
1556 | case 4: | |
1557 | stl_phys(addr, val); | |
1558 | break; | |
1ec9b909 AK |
1559 | default: abort(); |
1560 | } | |
6658ffb8 PB |
1561 | } |
1562 | ||
1ec9b909 AK |
1563 | static const MemoryRegionOps watch_mem_ops = { |
1564 | .read = watch_mem_read, | |
1565 | .write = watch_mem_write, | |
1566 | .endianness = DEVICE_NATIVE_ENDIAN, | |
6658ffb8 | 1567 | }; |
6658ffb8 | 1568 | |
a8170e5e | 1569 | static uint64_t subpage_read(void *opaque, hwaddr addr, |
70c68e44 | 1570 | unsigned len) |
db7b5426 | 1571 | { |
70c68e44 | 1572 | subpage_t *mmio = opaque; |
f6405247 | 1573 | unsigned int idx = SUBPAGE_IDX(addr); |
791af8c8 PB |
1574 | uint64_t val; |
1575 | ||
5312bd8b | 1576 | MemoryRegionSection *section; |
db7b5426 BS |
1577 | #if defined(DEBUG_SUBPAGE) |
1578 | printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, | |
1579 | mmio, len, addr, idx); | |
1580 | #endif | |
db7b5426 | 1581 | |
5312bd8b AK |
1582 | section = &phys_sections[mmio->sub_section[idx]]; |
1583 | addr += mmio->base; | |
1584 | addr -= section->offset_within_address_space; | |
1585 | addr += section->offset_within_region; | |
791af8c8 PB |
1586 | io_mem_read(section->mr, addr, &val, len); |
1587 | return val; | |
db7b5426 BS |
1588 | } |
1589 | ||
a8170e5e | 1590 | static void subpage_write(void *opaque, hwaddr addr, |
70c68e44 | 1591 | uint64_t value, unsigned len) |
db7b5426 | 1592 | { |
70c68e44 | 1593 | subpage_t *mmio = opaque; |
f6405247 | 1594 | unsigned int idx = SUBPAGE_IDX(addr); |
5312bd8b | 1595 | MemoryRegionSection *section; |
db7b5426 | 1596 | #if defined(DEBUG_SUBPAGE) |
70c68e44 AK |
1597 | printf("%s: subpage %p len %d addr " TARGET_FMT_plx |
1598 | " idx %d value %"PRIx64"\n", | |
f6405247 | 1599 | __func__, mmio, len, addr, idx, value); |
db7b5426 | 1600 | #endif |
f6405247 | 1601 | |
5312bd8b AK |
1602 | section = &phys_sections[mmio->sub_section[idx]]; |
1603 | addr += mmio->base; | |
1604 | addr -= section->offset_within_address_space; | |
1605 | addr += section->offset_within_region; | |
37ec01d4 | 1606 | io_mem_write(section->mr, addr, value, len); |
db7b5426 BS |
1607 | } |
1608 | ||
c353e4cc PB |
1609 | static bool subpage_accepts(void *opaque, hwaddr addr, |
1610 | unsigned size, bool is_write) | |
1611 | { | |
1612 | subpage_t *mmio = opaque; | |
1613 | unsigned int idx = SUBPAGE_IDX(addr); | |
1614 | MemoryRegionSection *section; | |
1615 | #if defined(DEBUG_SUBPAGE) | |
1616 | printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx | |
1617 | " idx %d\n", __func__, mmio, | |
1618 | is_write ? 'w' : 'r', len, addr, idx); | |
1619 | #endif | |
1620 | ||
1621 | section = &phys_sections[mmio->sub_section[idx]]; | |
1622 | addr += mmio->base; | |
1623 | addr -= section->offset_within_address_space; | |
1624 | addr += section->offset_within_region; | |
1625 | return memory_region_access_valid(section->mr, addr, size, is_write); | |
1626 | } | |
1627 | ||
70c68e44 AK |
1628 | static const MemoryRegionOps subpage_ops = { |
1629 | .read = subpage_read, | |
1630 | .write = subpage_write, | |
c353e4cc | 1631 | .valid.accepts = subpage_accepts, |
70c68e44 | 1632 | .endianness = DEVICE_NATIVE_ENDIAN, |
db7b5426 BS |
1633 | }; |
1634 | ||
a8170e5e | 1635 | static uint64_t subpage_ram_read(void *opaque, hwaddr addr, |
de712f94 | 1636 | unsigned size) |
56384e8b AF |
1637 | { |
1638 | ram_addr_t raddr = addr; | |
1639 | void *ptr = qemu_get_ram_ptr(raddr); | |
de712f94 AK |
1640 | switch (size) { |
1641 | case 1: return ldub_p(ptr); | |
1642 | case 2: return lduw_p(ptr); | |
1643 | case 4: return ldl_p(ptr); | |
1644 | default: abort(); | |
1645 | } | |
56384e8b AF |
1646 | } |
1647 | ||
a8170e5e | 1648 | static void subpage_ram_write(void *opaque, hwaddr addr, |
de712f94 | 1649 | uint64_t value, unsigned size) |
56384e8b AF |
1650 | { |
1651 | ram_addr_t raddr = addr; | |
1652 | void *ptr = qemu_get_ram_ptr(raddr); | |
de712f94 AK |
1653 | switch (size) { |
1654 | case 1: return stb_p(ptr, value); | |
1655 | case 2: return stw_p(ptr, value); | |
1656 | case 4: return stl_p(ptr, value); | |
1657 | default: abort(); | |
1658 | } | |
56384e8b AF |
1659 | } |
1660 | ||
de712f94 AK |
1661 | static const MemoryRegionOps subpage_ram_ops = { |
1662 | .read = subpage_ram_read, | |
1663 | .write = subpage_ram_write, | |
1664 | .endianness = DEVICE_NATIVE_ENDIAN, | |
56384e8b AF |
1665 | }; |
1666 | ||
c227f099 | 1667 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 1668 | uint16_t section) |
db7b5426 BS |
1669 | { |
1670 | int idx, eidx; | |
1671 | ||
1672 | if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) | |
1673 | return -1; | |
1674 | idx = SUBPAGE_IDX(start); | |
1675 | eidx = SUBPAGE_IDX(end); | |
1676 | #if defined(DEBUG_SUBPAGE) | |
0bf9e31a | 1677 | printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__, |
db7b5426 BS |
1678 | mmio, start, end, idx, eidx, memory); |
1679 | #endif | |
5312bd8b AK |
1680 | if (memory_region_is_ram(phys_sections[section].mr)) { |
1681 | MemoryRegionSection new_section = phys_sections[section]; | |
1682 | new_section.mr = &io_mem_subpage_ram; | |
1683 | section = phys_section_add(&new_section); | |
56384e8b | 1684 | } |
db7b5426 | 1685 | for (; idx <= eidx; idx++) { |
5312bd8b | 1686 | mmio->sub_section[idx] = section; |
db7b5426 BS |
1687 | } |
1688 | ||
1689 | return 0; | |
1690 | } | |
1691 | ||
a8170e5e | 1692 | static subpage_t *subpage_init(hwaddr base) |
db7b5426 | 1693 | { |
c227f099 | 1694 | subpage_t *mmio; |
db7b5426 | 1695 | |
7267c094 | 1696 | mmio = g_malloc0(sizeof(subpage_t)); |
1eec614b AL |
1697 | |
1698 | mmio->base = base; | |
70c68e44 AK |
1699 | memory_region_init_io(&mmio->iomem, &subpage_ops, mmio, |
1700 | "subpage", TARGET_PAGE_SIZE); | |
b3b00c78 | 1701 | mmio->iomem.subpage = true; |
db7b5426 | 1702 | #if defined(DEBUG_SUBPAGE) |
1eec614b AL |
1703 | printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, |
1704 | mmio, base, TARGET_PAGE_SIZE, subpage_memory); | |
db7b5426 | 1705 | #endif |
0f0cb164 | 1706 | subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned); |
db7b5426 BS |
1707 | |
1708 | return mmio; | |
1709 | } | |
1710 | ||
5312bd8b AK |
1711 | static uint16_t dummy_section(MemoryRegion *mr) |
1712 | { | |
1713 | MemoryRegionSection section = { | |
1714 | .mr = mr, | |
1715 | .offset_within_address_space = 0, | |
1716 | .offset_within_region = 0, | |
1717 | .size = UINT64_MAX, | |
1718 | }; | |
1719 | ||
1720 | return phys_section_add(§ion); | |
1721 | } | |
1722 | ||
a8170e5e | 1723 | MemoryRegion *iotlb_to_region(hwaddr index) |
aa102231 | 1724 | { |
37ec01d4 | 1725 | return phys_sections[index & ~TARGET_PAGE_MASK].mr; |
aa102231 AK |
1726 | } |
1727 | ||
e9179ce1 AK |
1728 | static void io_mem_init(void) |
1729 | { | |
bf8d5166 | 1730 | memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX); |
0e0df1e2 AK |
1731 | memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL, |
1732 | "unassigned", UINT64_MAX); | |
1733 | memory_region_init_io(&io_mem_notdirty, ¬dirty_mem_ops, NULL, | |
1734 | "notdirty", UINT64_MAX); | |
de712f94 AK |
1735 | memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL, |
1736 | "subpage-ram", UINT64_MAX); | |
1ec9b909 AK |
1737 | memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL, |
1738 | "watch", UINT64_MAX); | |
e9179ce1 AK |
1739 | } |
1740 | ||
ac1970fb AK |
1741 | static void mem_begin(MemoryListener *listener) |
1742 | { | |
1743 | AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); | |
1744 | ||
1745 | destroy_all_mappings(d); | |
1746 | d->phys_map.ptr = PHYS_MAP_NODE_NIL; | |
1747 | } | |
1748 | ||
50c1e149 AK |
1749 | static void core_begin(MemoryListener *listener) |
1750 | { | |
5312bd8b AK |
1751 | phys_sections_clear(); |
1752 | phys_section_unassigned = dummy_section(&io_mem_unassigned); | |
aa102231 AK |
1753 | phys_section_notdirty = dummy_section(&io_mem_notdirty); |
1754 | phys_section_rom = dummy_section(&io_mem_rom); | |
1755 | phys_section_watch = dummy_section(&io_mem_watch); | |
50c1e149 AK |
1756 | } |
1757 | ||
1d71148e | 1758 | static void tcg_commit(MemoryListener *listener) |
50c1e149 | 1759 | { |
9349b4f9 | 1760 | CPUArchState *env; |
117712c3 AK |
1761 | |
1762 | /* since each CPU stores ram addresses in its TLB cache, we must | |
1763 | reset the modified entries */ | |
1764 | /* XXX: slow ! */ | |
1765 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
1766 | tlb_flush(env, 1); | |
1767 | } | |
50c1e149 AK |
1768 | } |
1769 | ||
93632747 AK |
1770 | static void core_log_global_start(MemoryListener *listener) |
1771 | { | |
1772 | cpu_physical_memory_set_dirty_tracking(1); | |
1773 | } | |
1774 | ||
1775 | static void core_log_global_stop(MemoryListener *listener) | |
1776 | { | |
1777 | cpu_physical_memory_set_dirty_tracking(0); | |
1778 | } | |
1779 | ||
4855d41a AK |
1780 | static void io_region_add(MemoryListener *listener, |
1781 | MemoryRegionSection *section) | |
1782 | { | |
a2d33521 AK |
1783 | MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1); |
1784 | ||
1785 | mrio->mr = section->mr; | |
1786 | mrio->offset = section->offset_within_region; | |
1787 | iorange_init(&mrio->iorange, &memory_region_iorange_ops, | |
4855d41a | 1788 | section->offset_within_address_space, section->size); |
a2d33521 | 1789 | ioport_register(&mrio->iorange); |
4855d41a AK |
1790 | } |
1791 | ||
1792 | static void io_region_del(MemoryListener *listener, | |
1793 | MemoryRegionSection *section) | |
1794 | { | |
1795 | isa_unassign_ioport(section->offset_within_address_space, section->size); | |
1796 | } | |
1797 | ||
93632747 | 1798 | static MemoryListener core_memory_listener = { |
50c1e149 | 1799 | .begin = core_begin, |
93632747 AK |
1800 | .log_global_start = core_log_global_start, |
1801 | .log_global_stop = core_log_global_stop, | |
ac1970fb | 1802 | .priority = 1, |
93632747 AK |
1803 | }; |
1804 | ||
4855d41a AK |
1805 | static MemoryListener io_memory_listener = { |
1806 | .region_add = io_region_add, | |
1807 | .region_del = io_region_del, | |
4855d41a AK |
1808 | .priority = 0, |
1809 | }; | |
1810 | ||
1d71148e AK |
1811 | static MemoryListener tcg_memory_listener = { |
1812 | .commit = tcg_commit, | |
1813 | }; | |
1814 | ||
ac1970fb AK |
1815 | void address_space_init_dispatch(AddressSpace *as) |
1816 | { | |
1817 | AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1); | |
1818 | ||
1819 | d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 }; | |
1820 | d->listener = (MemoryListener) { | |
1821 | .begin = mem_begin, | |
1822 | .region_add = mem_add, | |
1823 | .region_nop = mem_add, | |
1824 | .priority = 0, | |
1825 | }; | |
1826 | as->dispatch = d; | |
1827 | memory_listener_register(&d->listener, as); | |
1828 | } | |
1829 | ||
83f3c251 AK |
1830 | void address_space_destroy_dispatch(AddressSpace *as) |
1831 | { | |
1832 | AddressSpaceDispatch *d = as->dispatch; | |
1833 | ||
1834 | memory_listener_unregister(&d->listener); | |
1835 | destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1); | |
1836 | g_free(d); | |
1837 | as->dispatch = NULL; | |
1838 | } | |
1839 | ||
62152b8a AK |
1840 | static void memory_map_init(void) |
1841 | { | |
7267c094 | 1842 | system_memory = g_malloc(sizeof(*system_memory)); |
8417cebf | 1843 | memory_region_init(system_memory, "system", INT64_MAX); |
2673a5da AK |
1844 | address_space_init(&address_space_memory, system_memory); |
1845 | address_space_memory.name = "memory"; | |
309cb471 | 1846 | |
7267c094 | 1847 | system_io = g_malloc(sizeof(*system_io)); |
309cb471 | 1848 | memory_region_init(system_io, "io", 65536); |
2673a5da AK |
1849 | address_space_init(&address_space_io, system_io); |
1850 | address_space_io.name = "I/O"; | |
93632747 | 1851 | |
f6790af6 AK |
1852 | memory_listener_register(&core_memory_listener, &address_space_memory); |
1853 | memory_listener_register(&io_memory_listener, &address_space_io); | |
1854 | memory_listener_register(&tcg_memory_listener, &address_space_memory); | |
9e11908f PM |
1855 | |
1856 | dma_context_init(&dma_context_memory, &address_space_memory, | |
1857 | NULL, NULL, NULL); | |
62152b8a AK |
1858 | } |
1859 | ||
1860 | MemoryRegion *get_system_memory(void) | |
1861 | { | |
1862 | return system_memory; | |
1863 | } | |
1864 | ||
309cb471 AK |
1865 | MemoryRegion *get_system_io(void) |
1866 | { | |
1867 | return system_io; | |
1868 | } | |
1869 | ||
e2eef170 PB |
1870 | #endif /* !defined(CONFIG_USER_ONLY) */ |
1871 | ||
13eb76e0 FB |
1872 | /* physical memory access (slow version, mainly for debug) */ |
1873 | #if defined(CONFIG_USER_ONLY) | |
9349b4f9 | 1874 | int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, |
a68fe89c | 1875 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
1876 | { |
1877 | int l, flags; | |
1878 | target_ulong page; | |
53a5960a | 1879 | void * p; |
13eb76e0 FB |
1880 | |
1881 | while (len > 0) { | |
1882 | page = addr & TARGET_PAGE_MASK; | |
1883 | l = (page + TARGET_PAGE_SIZE) - addr; | |
1884 | if (l > len) | |
1885 | l = len; | |
1886 | flags = page_get_flags(page); | |
1887 | if (!(flags & PAGE_VALID)) | |
a68fe89c | 1888 | return -1; |
13eb76e0 FB |
1889 | if (is_write) { |
1890 | if (!(flags & PAGE_WRITE)) | |
a68fe89c | 1891 | return -1; |
579a97f7 | 1892 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 1893 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) |
a68fe89c | 1894 | return -1; |
72fb7daa AJ |
1895 | memcpy(p, buf, l); |
1896 | unlock_user(p, addr, l); | |
13eb76e0 FB |
1897 | } else { |
1898 | if (!(flags & PAGE_READ)) | |
a68fe89c | 1899 | return -1; |
579a97f7 | 1900 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 1901 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) |
a68fe89c | 1902 | return -1; |
72fb7daa | 1903 | memcpy(buf, p, l); |
5b257578 | 1904 | unlock_user(p, addr, 0); |
13eb76e0 FB |
1905 | } |
1906 | len -= l; | |
1907 | buf += l; | |
1908 | addr += l; | |
1909 | } | |
a68fe89c | 1910 | return 0; |
13eb76e0 | 1911 | } |
8df1cd07 | 1912 | |
13eb76e0 | 1913 | #else |
51d7a9eb | 1914 | |
a8170e5e AK |
1915 | static void invalidate_and_set_dirty(hwaddr addr, |
1916 | hwaddr length) | |
51d7a9eb AP |
1917 | { |
1918 | if (!cpu_physical_memory_is_dirty(addr)) { | |
1919 | /* invalidate code */ | |
1920 | tb_invalidate_phys_page_range(addr, addr + length, 0); | |
1921 | /* set dirty bit */ | |
1922 | cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG)); | |
1923 | } | |
e226939d | 1924 | xen_modified_memory(addr, length); |
51d7a9eb AP |
1925 | } |
1926 | ||
2bbfa05d PB |
1927 | static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) |
1928 | { | |
1929 | if (memory_region_is_ram(mr)) { | |
1930 | return !(is_write && mr->readonly); | |
1931 | } | |
1932 | if (memory_region_is_romd(mr)) { | |
1933 | return !is_write; | |
1934 | } | |
1935 | ||
1936 | return false; | |
1937 | } | |
1938 | ||
f52cc467 | 1939 | static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr) |
82f2563f | 1940 | { |
f52cc467 | 1941 | if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) { |
82f2563f PB |
1942 | return 4; |
1943 | } | |
f52cc467 | 1944 | if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) { |
82f2563f PB |
1945 | return 2; |
1946 | } | |
1947 | return 1; | |
1948 | } | |
1949 | ||
fd8aaa76 | 1950 | bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, |
ac1970fb | 1951 | int len, bool is_write) |
13eb76e0 | 1952 | { |
149f54b5 | 1953 | hwaddr l; |
13eb76e0 | 1954 | uint8_t *ptr; |
791af8c8 | 1955 | uint64_t val; |
149f54b5 | 1956 | hwaddr addr1; |
f3705d53 | 1957 | MemoryRegionSection *section; |
fd8aaa76 | 1958 | bool error = false; |
3b46e624 | 1959 | |
13eb76e0 | 1960 | while (len > 0) { |
149f54b5 PB |
1961 | l = len; |
1962 | section = address_space_translate(as, addr, &addr1, &l, is_write); | |
3b46e624 | 1963 | |
13eb76e0 | 1964 | if (is_write) { |
2bbfa05d | 1965 | if (!memory_access_is_direct(section->mr, is_write)) { |
f52cc467 | 1966 | l = memory_access_size(section->mr, l, addr1); |
6a00d601 FB |
1967 | /* XXX: could force cpu_single_env to NULL to avoid |
1968 | potential bugs */ | |
82f2563f | 1969 | if (l == 4) { |
1c213d19 | 1970 | /* 32 bit write access */ |
c27004ec | 1971 | val = ldl_p(buf); |
fd8aaa76 | 1972 | error |= io_mem_write(section->mr, addr1, val, 4); |
82f2563f | 1973 | } else if (l == 2) { |
1c213d19 | 1974 | /* 16 bit write access */ |
c27004ec | 1975 | val = lduw_p(buf); |
fd8aaa76 | 1976 | error |= io_mem_write(section->mr, addr1, val, 2); |
13eb76e0 | 1977 | } else { |
1c213d19 | 1978 | /* 8 bit write access */ |
c27004ec | 1979 | val = ldub_p(buf); |
fd8aaa76 | 1980 | error |= io_mem_write(section->mr, addr1, val, 1); |
13eb76e0 | 1981 | } |
2bbfa05d | 1982 | } else { |
149f54b5 | 1983 | addr1 += memory_region_get_ram_addr(section->mr); |
13eb76e0 | 1984 | /* RAM case */ |
5579c7f3 | 1985 | ptr = qemu_get_ram_ptr(addr1); |
13eb76e0 | 1986 | memcpy(ptr, buf, l); |
51d7a9eb | 1987 | invalidate_and_set_dirty(addr1, l); |
13eb76e0 FB |
1988 | } |
1989 | } else { | |
2bbfa05d | 1990 | if (!memory_access_is_direct(section->mr, is_write)) { |
13eb76e0 | 1991 | /* I/O case */ |
f52cc467 | 1992 | l = memory_access_size(section->mr, l, addr1); |
82f2563f | 1993 | if (l == 4) { |
13eb76e0 | 1994 | /* 32 bit read access */ |
fd8aaa76 | 1995 | error |= io_mem_read(section->mr, addr1, &val, 4); |
c27004ec | 1996 | stl_p(buf, val); |
82f2563f | 1997 | } else if (l == 2) { |
13eb76e0 | 1998 | /* 16 bit read access */ |
fd8aaa76 | 1999 | error |= io_mem_read(section->mr, addr1, &val, 2); |
c27004ec | 2000 | stw_p(buf, val); |
13eb76e0 | 2001 | } else { |
1c213d19 | 2002 | /* 8 bit read access */ |
fd8aaa76 | 2003 | error |= io_mem_read(section->mr, addr1, &val, 1); |
c27004ec | 2004 | stb_p(buf, val); |
13eb76e0 FB |
2005 | } |
2006 | } else { | |
2007 | /* RAM case */ | |
149f54b5 | 2008 | ptr = qemu_get_ram_ptr(section->mr->ram_addr + addr1); |
f3705d53 | 2009 | memcpy(buf, ptr, l); |
13eb76e0 FB |
2010 | } |
2011 | } | |
2012 | len -= l; | |
2013 | buf += l; | |
2014 | addr += l; | |
2015 | } | |
fd8aaa76 PB |
2016 | |
2017 | return error; | |
13eb76e0 | 2018 | } |
8df1cd07 | 2019 | |
fd8aaa76 | 2020 | bool address_space_write(AddressSpace *as, hwaddr addr, |
ac1970fb AK |
2021 | const uint8_t *buf, int len) |
2022 | { | |
fd8aaa76 | 2023 | return address_space_rw(as, addr, (uint8_t *)buf, len, true); |
ac1970fb AK |
2024 | } |
2025 | ||
fd8aaa76 | 2026 | bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) |
ac1970fb | 2027 | { |
fd8aaa76 | 2028 | return address_space_rw(as, addr, buf, len, false); |
ac1970fb AK |
2029 | } |
2030 | ||
2031 | ||
a8170e5e | 2032 | void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, |
ac1970fb AK |
2033 | int len, int is_write) |
2034 | { | |
fd8aaa76 | 2035 | address_space_rw(&address_space_memory, addr, buf, len, is_write); |
ac1970fb AK |
2036 | } |
2037 | ||
d0ecd2aa | 2038 | /* used for ROM loading : can write in RAM and ROM */ |
a8170e5e | 2039 | void cpu_physical_memory_write_rom(hwaddr addr, |
d0ecd2aa FB |
2040 | const uint8_t *buf, int len) |
2041 | { | |
149f54b5 | 2042 | hwaddr l; |
d0ecd2aa | 2043 | uint8_t *ptr; |
149f54b5 | 2044 | hwaddr addr1; |
f3705d53 | 2045 | MemoryRegionSection *section; |
3b46e624 | 2046 | |
d0ecd2aa | 2047 | while (len > 0) { |
149f54b5 PB |
2048 | l = len; |
2049 | section = address_space_translate(&address_space_memory, | |
2050 | addr, &addr1, &l, true); | |
3b46e624 | 2051 | |
cc5bea60 BS |
2052 | if (!(memory_region_is_ram(section->mr) || |
2053 | memory_region_is_romd(section->mr))) { | |
d0ecd2aa FB |
2054 | /* do nothing */ |
2055 | } else { | |
149f54b5 | 2056 | addr1 += memory_region_get_ram_addr(section->mr); |
d0ecd2aa | 2057 | /* ROM/RAM case */ |
5579c7f3 | 2058 | ptr = qemu_get_ram_ptr(addr1); |
d0ecd2aa | 2059 | memcpy(ptr, buf, l); |
51d7a9eb | 2060 | invalidate_and_set_dirty(addr1, l); |
d0ecd2aa FB |
2061 | } |
2062 | len -= l; | |
2063 | buf += l; | |
2064 | addr += l; | |
2065 | } | |
2066 | } | |
2067 | ||
6d16c2f8 AL |
2068 | typedef struct { |
2069 | void *buffer; | |
a8170e5e AK |
2070 | hwaddr addr; |
2071 | hwaddr len; | |
6d16c2f8 AL |
2072 | } BounceBuffer; |
2073 | ||
2074 | static BounceBuffer bounce; | |
2075 | ||
ba223c29 AL |
2076 | typedef struct MapClient { |
2077 | void *opaque; | |
2078 | void (*callback)(void *opaque); | |
72cf2d4f | 2079 | QLIST_ENTRY(MapClient) link; |
ba223c29 AL |
2080 | } MapClient; |
2081 | ||
72cf2d4f BS |
2082 | static QLIST_HEAD(map_client_list, MapClient) map_client_list |
2083 | = QLIST_HEAD_INITIALIZER(map_client_list); | |
ba223c29 AL |
2084 | |
2085 | void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) | |
2086 | { | |
7267c094 | 2087 | MapClient *client = g_malloc(sizeof(*client)); |
ba223c29 AL |
2088 | |
2089 | client->opaque = opaque; | |
2090 | client->callback = callback; | |
72cf2d4f | 2091 | QLIST_INSERT_HEAD(&map_client_list, client, link); |
ba223c29 AL |
2092 | return client; |
2093 | } | |
2094 | ||
8b9c99d9 | 2095 | static void cpu_unregister_map_client(void *_client) |
ba223c29 AL |
2096 | { |
2097 | MapClient *client = (MapClient *)_client; | |
2098 | ||
72cf2d4f | 2099 | QLIST_REMOVE(client, link); |
7267c094 | 2100 | g_free(client); |
ba223c29 AL |
2101 | } |
2102 | ||
2103 | static void cpu_notify_map_clients(void) | |
2104 | { | |
2105 | MapClient *client; | |
2106 | ||
72cf2d4f BS |
2107 | while (!QLIST_EMPTY(&map_client_list)) { |
2108 | client = QLIST_FIRST(&map_client_list); | |
ba223c29 | 2109 | client->callback(client->opaque); |
34d5e948 | 2110 | cpu_unregister_map_client(client); |
ba223c29 AL |
2111 | } |
2112 | } | |
2113 | ||
51644ab7 PB |
2114 | bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) |
2115 | { | |
2116 | MemoryRegionSection *section; | |
2117 | hwaddr l, xlat; | |
2118 | ||
2119 | while (len > 0) { | |
2120 | l = len; | |
2121 | section = address_space_translate(as, addr, &xlat, &l, is_write); | |
2122 | if (!memory_access_is_direct(section->mr, is_write)) { | |
f52cc467 | 2123 | l = memory_access_size(section->mr, l, addr); |
51644ab7 PB |
2124 | if (!memory_region_access_valid(section->mr, xlat, l, is_write)) { |
2125 | return false; | |
2126 | } | |
2127 | } | |
2128 | ||
2129 | len -= l; | |
2130 | addr += l; | |
2131 | } | |
2132 | return true; | |
2133 | } | |
2134 | ||
6d16c2f8 AL |
2135 | /* Map a physical memory region into a host virtual address. |
2136 | * May map a subset of the requested range, given by and returned in *plen. | |
2137 | * May return NULL if resources needed to perform the mapping are exhausted. | |
2138 | * Use only for reads OR writes - not for read-modify-write operations. | |
ba223c29 AL |
2139 | * Use cpu_register_map_client() to know when retrying the map operation is |
2140 | * likely to succeed. | |
6d16c2f8 | 2141 | */ |
ac1970fb | 2142 | void *address_space_map(AddressSpace *as, |
a8170e5e AK |
2143 | hwaddr addr, |
2144 | hwaddr *plen, | |
ac1970fb | 2145 | bool is_write) |
6d16c2f8 | 2146 | { |
a8170e5e AK |
2147 | hwaddr len = *plen; |
2148 | hwaddr todo = 0; | |
149f54b5 | 2149 | hwaddr l, xlat; |
f3705d53 | 2150 | MemoryRegionSection *section; |
f15fbc4b | 2151 | ram_addr_t raddr = RAM_ADDR_MAX; |
8ab934f9 SS |
2152 | ram_addr_t rlen; |
2153 | void *ret; | |
6d16c2f8 AL |
2154 | |
2155 | while (len > 0) { | |
149f54b5 PB |
2156 | l = len; |
2157 | section = address_space_translate(as, addr, &xlat, &l, is_write); | |
6d16c2f8 | 2158 | |
2bbfa05d | 2159 | if (!memory_access_is_direct(section->mr, is_write)) { |
38bee5dc | 2160 | if (todo || bounce.buffer) { |
6d16c2f8 AL |
2161 | break; |
2162 | } | |
2163 | bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); | |
2164 | bounce.addr = addr; | |
2165 | bounce.len = l; | |
2166 | if (!is_write) { | |
ac1970fb | 2167 | address_space_read(as, addr, bounce.buffer, l); |
6d16c2f8 | 2168 | } |
38bee5dc SS |
2169 | |
2170 | *plen = l; | |
2171 | return bounce.buffer; | |
6d16c2f8 | 2172 | } |
8ab934f9 | 2173 | if (!todo) { |
149f54b5 PB |
2174 | raddr = memory_region_get_ram_addr(section->mr) + xlat; |
2175 | } else { | |
2176 | if (memory_region_get_ram_addr(section->mr) + xlat != raddr + todo) { | |
2177 | break; | |
2178 | } | |
8ab934f9 | 2179 | } |
6d16c2f8 AL |
2180 | |
2181 | len -= l; | |
2182 | addr += l; | |
38bee5dc | 2183 | todo += l; |
6d16c2f8 | 2184 | } |
8ab934f9 SS |
2185 | rlen = todo; |
2186 | ret = qemu_ram_ptr_length(raddr, &rlen); | |
2187 | *plen = rlen; | |
2188 | return ret; | |
6d16c2f8 AL |
2189 | } |
2190 | ||
ac1970fb | 2191 | /* Unmaps a memory region previously mapped by address_space_map(). |
6d16c2f8 AL |
2192 | * Will also mark the memory as dirty if is_write == 1. access_len gives |
2193 | * the amount of memory that was actually read or written by the caller. | |
2194 | */ | |
a8170e5e AK |
2195 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
2196 | int is_write, hwaddr access_len) | |
6d16c2f8 AL |
2197 | { |
2198 | if (buffer != bounce.buffer) { | |
2199 | if (is_write) { | |
e890261f | 2200 | ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer); |
6d16c2f8 AL |
2201 | while (access_len) { |
2202 | unsigned l; | |
2203 | l = TARGET_PAGE_SIZE; | |
2204 | if (l > access_len) | |
2205 | l = access_len; | |
51d7a9eb | 2206 | invalidate_and_set_dirty(addr1, l); |
6d16c2f8 AL |
2207 | addr1 += l; |
2208 | access_len -= l; | |
2209 | } | |
2210 | } | |
868bb33f | 2211 | if (xen_enabled()) { |
e41d7c69 | 2212 | xen_invalidate_map_cache_entry(buffer); |
050a0ddf | 2213 | } |
6d16c2f8 AL |
2214 | return; |
2215 | } | |
2216 | if (is_write) { | |
ac1970fb | 2217 | address_space_write(as, bounce.addr, bounce.buffer, access_len); |
6d16c2f8 | 2218 | } |
f8a83245 | 2219 | qemu_vfree(bounce.buffer); |
6d16c2f8 | 2220 | bounce.buffer = NULL; |
ba223c29 | 2221 | cpu_notify_map_clients(); |
6d16c2f8 | 2222 | } |
d0ecd2aa | 2223 | |
a8170e5e AK |
2224 | void *cpu_physical_memory_map(hwaddr addr, |
2225 | hwaddr *plen, | |
ac1970fb AK |
2226 | int is_write) |
2227 | { | |
2228 | return address_space_map(&address_space_memory, addr, plen, is_write); | |
2229 | } | |
2230 | ||
a8170e5e AK |
2231 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
2232 | int is_write, hwaddr access_len) | |
ac1970fb AK |
2233 | { |
2234 | return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); | |
2235 | } | |
2236 | ||
8df1cd07 | 2237 | /* warning: addr must be aligned */ |
a8170e5e | 2238 | static inline uint32_t ldl_phys_internal(hwaddr addr, |
1e78bcc1 | 2239 | enum device_endian endian) |
8df1cd07 | 2240 | { |
8df1cd07 | 2241 | uint8_t *ptr; |
791af8c8 | 2242 | uint64_t val; |
f3705d53 | 2243 | MemoryRegionSection *section; |
149f54b5 PB |
2244 | hwaddr l = 4; |
2245 | hwaddr addr1; | |
8df1cd07 | 2246 | |
149f54b5 PB |
2247 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2248 | false); | |
2bbfa05d | 2249 | if (l < 4 || !memory_access_is_direct(section->mr, false)) { |
8df1cd07 | 2250 | /* I/O case */ |
791af8c8 | 2251 | io_mem_read(section->mr, addr1, &val, 4); |
1e78bcc1 AG |
2252 | #if defined(TARGET_WORDS_BIGENDIAN) |
2253 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2254 | val = bswap32(val); | |
2255 | } | |
2256 | #else | |
2257 | if (endian == DEVICE_BIG_ENDIAN) { | |
2258 | val = bswap32(val); | |
2259 | } | |
2260 | #endif | |
8df1cd07 FB |
2261 | } else { |
2262 | /* RAM case */ | |
f3705d53 | 2263 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) |
06ef3525 | 2264 | & TARGET_PAGE_MASK) |
149f54b5 | 2265 | + addr1); |
1e78bcc1 AG |
2266 | switch (endian) { |
2267 | case DEVICE_LITTLE_ENDIAN: | |
2268 | val = ldl_le_p(ptr); | |
2269 | break; | |
2270 | case DEVICE_BIG_ENDIAN: | |
2271 | val = ldl_be_p(ptr); | |
2272 | break; | |
2273 | default: | |
2274 | val = ldl_p(ptr); | |
2275 | break; | |
2276 | } | |
8df1cd07 FB |
2277 | } |
2278 | return val; | |
2279 | } | |
2280 | ||
a8170e5e | 2281 | uint32_t ldl_phys(hwaddr addr) |
1e78bcc1 AG |
2282 | { |
2283 | return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2284 | } | |
2285 | ||
a8170e5e | 2286 | uint32_t ldl_le_phys(hwaddr addr) |
1e78bcc1 AG |
2287 | { |
2288 | return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2289 | } | |
2290 | ||
a8170e5e | 2291 | uint32_t ldl_be_phys(hwaddr addr) |
1e78bcc1 AG |
2292 | { |
2293 | return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2294 | } | |
2295 | ||
84b7b8e7 | 2296 | /* warning: addr must be aligned */ |
a8170e5e | 2297 | static inline uint64_t ldq_phys_internal(hwaddr addr, |
1e78bcc1 | 2298 | enum device_endian endian) |
84b7b8e7 | 2299 | { |
84b7b8e7 FB |
2300 | uint8_t *ptr; |
2301 | uint64_t val; | |
f3705d53 | 2302 | MemoryRegionSection *section; |
149f54b5 PB |
2303 | hwaddr l = 8; |
2304 | hwaddr addr1; | |
84b7b8e7 | 2305 | |
149f54b5 PB |
2306 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2307 | false); | |
2bbfa05d | 2308 | if (l < 8 || !memory_access_is_direct(section->mr, false)) { |
84b7b8e7 | 2309 | /* I/O case */ |
791af8c8 | 2310 | io_mem_read(section->mr, addr1, &val, 8); |
968a5627 PB |
2311 | #if defined(TARGET_WORDS_BIGENDIAN) |
2312 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2313 | val = bswap64(val); | |
2314 | } | |
2315 | #else | |
2316 | if (endian == DEVICE_BIG_ENDIAN) { | |
2317 | val = bswap64(val); | |
2318 | } | |
84b7b8e7 FB |
2319 | #endif |
2320 | } else { | |
2321 | /* RAM case */ | |
f3705d53 | 2322 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) |
06ef3525 | 2323 | & TARGET_PAGE_MASK) |
149f54b5 | 2324 | + addr1); |
1e78bcc1 AG |
2325 | switch (endian) { |
2326 | case DEVICE_LITTLE_ENDIAN: | |
2327 | val = ldq_le_p(ptr); | |
2328 | break; | |
2329 | case DEVICE_BIG_ENDIAN: | |
2330 | val = ldq_be_p(ptr); | |
2331 | break; | |
2332 | default: | |
2333 | val = ldq_p(ptr); | |
2334 | break; | |
2335 | } | |
84b7b8e7 FB |
2336 | } |
2337 | return val; | |
2338 | } | |
2339 | ||
a8170e5e | 2340 | uint64_t ldq_phys(hwaddr addr) |
1e78bcc1 AG |
2341 | { |
2342 | return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2343 | } | |
2344 | ||
a8170e5e | 2345 | uint64_t ldq_le_phys(hwaddr addr) |
1e78bcc1 AG |
2346 | { |
2347 | return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2348 | } | |
2349 | ||
a8170e5e | 2350 | uint64_t ldq_be_phys(hwaddr addr) |
1e78bcc1 AG |
2351 | { |
2352 | return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2353 | } | |
2354 | ||
aab33094 | 2355 | /* XXX: optimize */ |
a8170e5e | 2356 | uint32_t ldub_phys(hwaddr addr) |
aab33094 FB |
2357 | { |
2358 | uint8_t val; | |
2359 | cpu_physical_memory_read(addr, &val, 1); | |
2360 | return val; | |
2361 | } | |
2362 | ||
733f0b02 | 2363 | /* warning: addr must be aligned */ |
a8170e5e | 2364 | static inline uint32_t lduw_phys_internal(hwaddr addr, |
1e78bcc1 | 2365 | enum device_endian endian) |
aab33094 | 2366 | { |
733f0b02 MT |
2367 | uint8_t *ptr; |
2368 | uint64_t val; | |
f3705d53 | 2369 | MemoryRegionSection *section; |
149f54b5 PB |
2370 | hwaddr l = 2; |
2371 | hwaddr addr1; | |
733f0b02 | 2372 | |
149f54b5 PB |
2373 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2374 | false); | |
2bbfa05d | 2375 | if (l < 2 || !memory_access_is_direct(section->mr, false)) { |
733f0b02 | 2376 | /* I/O case */ |
791af8c8 | 2377 | io_mem_read(section->mr, addr1, &val, 2); |
1e78bcc1 AG |
2378 | #if defined(TARGET_WORDS_BIGENDIAN) |
2379 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2380 | val = bswap16(val); | |
2381 | } | |
2382 | #else | |
2383 | if (endian == DEVICE_BIG_ENDIAN) { | |
2384 | val = bswap16(val); | |
2385 | } | |
2386 | #endif | |
733f0b02 MT |
2387 | } else { |
2388 | /* RAM case */ | |
f3705d53 | 2389 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) |
06ef3525 | 2390 | & TARGET_PAGE_MASK) |
149f54b5 | 2391 | + addr1); |
1e78bcc1 AG |
2392 | switch (endian) { |
2393 | case DEVICE_LITTLE_ENDIAN: | |
2394 | val = lduw_le_p(ptr); | |
2395 | break; | |
2396 | case DEVICE_BIG_ENDIAN: | |
2397 | val = lduw_be_p(ptr); | |
2398 | break; | |
2399 | default: | |
2400 | val = lduw_p(ptr); | |
2401 | break; | |
2402 | } | |
733f0b02 MT |
2403 | } |
2404 | return val; | |
aab33094 FB |
2405 | } |
2406 | ||
a8170e5e | 2407 | uint32_t lduw_phys(hwaddr addr) |
1e78bcc1 AG |
2408 | { |
2409 | return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2410 | } | |
2411 | ||
a8170e5e | 2412 | uint32_t lduw_le_phys(hwaddr addr) |
1e78bcc1 AG |
2413 | { |
2414 | return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2415 | } | |
2416 | ||
a8170e5e | 2417 | uint32_t lduw_be_phys(hwaddr addr) |
1e78bcc1 AG |
2418 | { |
2419 | return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2420 | } | |
2421 | ||
8df1cd07 FB |
2422 | /* warning: addr must be aligned. The ram page is not masked as dirty |
2423 | and the code inside is not invalidated. It is useful if the dirty | |
2424 | bits are used to track modified PTEs */ | |
a8170e5e | 2425 | void stl_phys_notdirty(hwaddr addr, uint32_t val) |
8df1cd07 | 2426 | { |
8df1cd07 | 2427 | uint8_t *ptr; |
f3705d53 | 2428 | MemoryRegionSection *section; |
149f54b5 PB |
2429 | hwaddr l = 4; |
2430 | hwaddr addr1; | |
8df1cd07 | 2431 | |
149f54b5 PB |
2432 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2433 | true); | |
2bbfa05d | 2434 | if (l < 4 || !memory_access_is_direct(section->mr, true)) { |
149f54b5 | 2435 | io_mem_write(section->mr, addr1, val, 4); |
8df1cd07 | 2436 | } else { |
149f54b5 | 2437 | addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK; |
5579c7f3 | 2438 | ptr = qemu_get_ram_ptr(addr1); |
8df1cd07 | 2439 | stl_p(ptr, val); |
74576198 AL |
2440 | |
2441 | if (unlikely(in_migration)) { | |
2442 | if (!cpu_physical_memory_is_dirty(addr1)) { | |
2443 | /* invalidate code */ | |
2444 | tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | |
2445 | /* set dirty bit */ | |
f7c11b53 YT |
2446 | cpu_physical_memory_set_dirty_flags( |
2447 | addr1, (0xff & ~CODE_DIRTY_FLAG)); | |
74576198 AL |
2448 | } |
2449 | } | |
8df1cd07 FB |
2450 | } |
2451 | } | |
2452 | ||
2453 | /* warning: addr must be aligned */ | |
a8170e5e | 2454 | static inline void stl_phys_internal(hwaddr addr, uint32_t val, |
1e78bcc1 | 2455 | enum device_endian endian) |
8df1cd07 | 2456 | { |
8df1cd07 | 2457 | uint8_t *ptr; |
f3705d53 | 2458 | MemoryRegionSection *section; |
149f54b5 PB |
2459 | hwaddr l = 4; |
2460 | hwaddr addr1; | |
8df1cd07 | 2461 | |
149f54b5 PB |
2462 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2463 | true); | |
2bbfa05d | 2464 | if (l < 4 || !memory_access_is_direct(section->mr, true)) { |
1e78bcc1 AG |
2465 | #if defined(TARGET_WORDS_BIGENDIAN) |
2466 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2467 | val = bswap32(val); | |
2468 | } | |
2469 | #else | |
2470 | if (endian == DEVICE_BIG_ENDIAN) { | |
2471 | val = bswap32(val); | |
2472 | } | |
2473 | #endif | |
149f54b5 | 2474 | io_mem_write(section->mr, addr1, val, 4); |
8df1cd07 | 2475 | } else { |
8df1cd07 | 2476 | /* RAM case */ |
149f54b5 | 2477 | addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK; |
5579c7f3 | 2478 | ptr = qemu_get_ram_ptr(addr1); |
1e78bcc1 AG |
2479 | switch (endian) { |
2480 | case DEVICE_LITTLE_ENDIAN: | |
2481 | stl_le_p(ptr, val); | |
2482 | break; | |
2483 | case DEVICE_BIG_ENDIAN: | |
2484 | stl_be_p(ptr, val); | |
2485 | break; | |
2486 | default: | |
2487 | stl_p(ptr, val); | |
2488 | break; | |
2489 | } | |
51d7a9eb | 2490 | invalidate_and_set_dirty(addr1, 4); |
8df1cd07 FB |
2491 | } |
2492 | } | |
2493 | ||
a8170e5e | 2494 | void stl_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2495 | { |
2496 | stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); | |
2497 | } | |
2498 | ||
a8170e5e | 2499 | void stl_le_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2500 | { |
2501 | stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); | |
2502 | } | |
2503 | ||
a8170e5e | 2504 | void stl_be_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2505 | { |
2506 | stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN); | |
2507 | } | |
2508 | ||
aab33094 | 2509 | /* XXX: optimize */ |
a8170e5e | 2510 | void stb_phys(hwaddr addr, uint32_t val) |
aab33094 FB |
2511 | { |
2512 | uint8_t v = val; | |
2513 | cpu_physical_memory_write(addr, &v, 1); | |
2514 | } | |
2515 | ||
733f0b02 | 2516 | /* warning: addr must be aligned */ |
a8170e5e | 2517 | static inline void stw_phys_internal(hwaddr addr, uint32_t val, |
1e78bcc1 | 2518 | enum device_endian endian) |
aab33094 | 2519 | { |
733f0b02 | 2520 | uint8_t *ptr; |
f3705d53 | 2521 | MemoryRegionSection *section; |
149f54b5 PB |
2522 | hwaddr l = 2; |
2523 | hwaddr addr1; | |
733f0b02 | 2524 | |
149f54b5 PB |
2525 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2526 | true); | |
2bbfa05d | 2527 | if (l < 2 || !memory_access_is_direct(section->mr, true)) { |
1e78bcc1 AG |
2528 | #if defined(TARGET_WORDS_BIGENDIAN) |
2529 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2530 | val = bswap16(val); | |
2531 | } | |
2532 | #else | |
2533 | if (endian == DEVICE_BIG_ENDIAN) { | |
2534 | val = bswap16(val); | |
2535 | } | |
2536 | #endif | |
149f54b5 | 2537 | io_mem_write(section->mr, addr1, val, 2); |
733f0b02 | 2538 | } else { |
733f0b02 | 2539 | /* RAM case */ |
149f54b5 | 2540 | addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK; |
733f0b02 | 2541 | ptr = qemu_get_ram_ptr(addr1); |
1e78bcc1 AG |
2542 | switch (endian) { |
2543 | case DEVICE_LITTLE_ENDIAN: | |
2544 | stw_le_p(ptr, val); | |
2545 | break; | |
2546 | case DEVICE_BIG_ENDIAN: | |
2547 | stw_be_p(ptr, val); | |
2548 | break; | |
2549 | default: | |
2550 | stw_p(ptr, val); | |
2551 | break; | |
2552 | } | |
51d7a9eb | 2553 | invalidate_and_set_dirty(addr1, 2); |
733f0b02 | 2554 | } |
aab33094 FB |
2555 | } |
2556 | ||
a8170e5e | 2557 | void stw_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2558 | { |
2559 | stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); | |
2560 | } | |
2561 | ||
a8170e5e | 2562 | void stw_le_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2563 | { |
2564 | stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); | |
2565 | } | |
2566 | ||
a8170e5e | 2567 | void stw_be_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2568 | { |
2569 | stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN); | |
2570 | } | |
2571 | ||
aab33094 | 2572 | /* XXX: optimize */ |
a8170e5e | 2573 | void stq_phys(hwaddr addr, uint64_t val) |
aab33094 FB |
2574 | { |
2575 | val = tswap64(val); | |
71d2b725 | 2576 | cpu_physical_memory_write(addr, &val, 8); |
aab33094 FB |
2577 | } |
2578 | ||
a8170e5e | 2579 | void stq_le_phys(hwaddr addr, uint64_t val) |
1e78bcc1 AG |
2580 | { |
2581 | val = cpu_to_le64(val); | |
2582 | cpu_physical_memory_write(addr, &val, 8); | |
2583 | } | |
2584 | ||
a8170e5e | 2585 | void stq_be_phys(hwaddr addr, uint64_t val) |
1e78bcc1 AG |
2586 | { |
2587 | val = cpu_to_be64(val); | |
2588 | cpu_physical_memory_write(addr, &val, 8); | |
2589 | } | |
2590 | ||
5e2972fd | 2591 | /* virtual memory access for debug (includes writing to ROM) */ |
9349b4f9 | 2592 | int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, |
b448f2f3 | 2593 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
2594 | { |
2595 | int l; | |
a8170e5e | 2596 | hwaddr phys_addr; |
9b3c35e0 | 2597 | target_ulong page; |
13eb76e0 FB |
2598 | |
2599 | while (len > 0) { | |
2600 | page = addr & TARGET_PAGE_MASK; | |
2601 | phys_addr = cpu_get_phys_page_debug(env, page); | |
2602 | /* if no physical page mapped, return an error */ | |
2603 | if (phys_addr == -1) | |
2604 | return -1; | |
2605 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2606 | if (l > len) | |
2607 | l = len; | |
5e2972fd | 2608 | phys_addr += (addr & ~TARGET_PAGE_MASK); |
5e2972fd AL |
2609 | if (is_write) |
2610 | cpu_physical_memory_write_rom(phys_addr, buf, l); | |
2611 | else | |
5e2972fd | 2612 | cpu_physical_memory_rw(phys_addr, buf, l, is_write); |
13eb76e0 FB |
2613 | len -= l; |
2614 | buf += l; | |
2615 | addr += l; | |
2616 | } | |
2617 | return 0; | |
2618 | } | |
a68fe89c | 2619 | #endif |
13eb76e0 | 2620 | |
8e4a424b BS |
2621 | #if !defined(CONFIG_USER_ONLY) |
2622 | ||
2623 | /* | |
2624 | * A helper function for the _utterly broken_ virtio device model to find out if | |
2625 | * it's running on a big endian machine. Don't do this at home kids! | |
2626 | */ | |
2627 | bool virtio_is_big_endian(void); | |
2628 | bool virtio_is_big_endian(void) | |
2629 | { | |
2630 | #if defined(TARGET_WORDS_BIGENDIAN) | |
2631 | return true; | |
2632 | #else | |
2633 | return false; | |
2634 | #endif | |
2635 | } | |
2636 | ||
2637 | #endif | |
2638 | ||
76f35538 | 2639 | #ifndef CONFIG_USER_ONLY |
a8170e5e | 2640 | bool cpu_physical_memory_is_io(hwaddr phys_addr) |
76f35538 WC |
2641 | { |
2642 | MemoryRegionSection *section; | |
149f54b5 | 2643 | hwaddr l = 1; |
76f35538 | 2644 | |
149f54b5 PB |
2645 | section = address_space_translate(&address_space_memory, |
2646 | phys_addr, &phys_addr, &l, false); | |
76f35538 WC |
2647 | |
2648 | return !(memory_region_is_ram(section->mr) || | |
2649 | memory_region_is_romd(section->mr)); | |
2650 | } | |
2651 | #endif |