]>
Commit | Line | Data |
---|---|---|
54936004 | 1 | /* |
5b6dd868 | 2 | * Virtual page mapping |
5fafdf24 | 3 | * |
54936004 FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
54936004 | 18 | */ |
67b915a5 | 19 | #include "config.h" |
d5a8f07c FB |
20 | #ifdef _WIN32 |
21 | #include <windows.h> | |
22 | #else | |
a98d49b1 | 23 | #include <sys/types.h> |
d5a8f07c FB |
24 | #include <sys/mman.h> |
25 | #endif | |
54936004 | 26 | |
055403b2 | 27 | #include "qemu-common.h" |
6180a181 | 28 | #include "cpu.h" |
b67d9a52 | 29 | #include "tcg.h" |
b3c7724c | 30 | #include "hw/hw.h" |
cc9e98cb | 31 | #include "hw/qdev.h" |
1de7afc9 | 32 | #include "qemu/osdep.h" |
9c17d615 | 33 | #include "sysemu/kvm.h" |
2ff3de68 | 34 | #include "sysemu/sysemu.h" |
0d09e41a | 35 | #include "hw/xen/xen.h" |
1de7afc9 PB |
36 | #include "qemu/timer.h" |
37 | #include "qemu/config-file.h" | |
022c62cb | 38 | #include "exec/memory.h" |
9c17d615 | 39 | #include "sysemu/dma.h" |
022c62cb | 40 | #include "exec/address-spaces.h" |
53a5960a PB |
41 | #if defined(CONFIG_USER_ONLY) |
42 | #include <qemu.h> | |
432d268c | 43 | #else /* !CONFIG_USER_ONLY */ |
9c17d615 | 44 | #include "sysemu/xen-mapcache.h" |
6506e4f9 | 45 | #include "trace.h" |
53a5960a | 46 | #endif |
0d6d3c87 | 47 | #include "exec/cpu-all.h" |
54936004 | 48 | |
022c62cb | 49 | #include "exec/cputlb.h" |
5b6dd868 | 50 | #include "translate-all.h" |
0cac1b66 | 51 | |
022c62cb | 52 | #include "exec/memory-internal.h" |
67d95c15 | 53 | |
b35ba30f MT |
54 | #include "qemu/range.h" |
55 | ||
db7b5426 | 56 | //#define DEBUG_SUBPAGE |
1196be37 | 57 | |
e2eef170 | 58 | #if !defined(CONFIG_USER_ONLY) |
74576198 | 59 | static int in_migration; |
94a6b54f | 60 | |
a3161038 | 61 | RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; |
62152b8a AK |
62 | |
63 | static MemoryRegion *system_memory; | |
309cb471 | 64 | static MemoryRegion *system_io; |
62152b8a | 65 | |
f6790af6 AK |
66 | AddressSpace address_space_io; |
67 | AddressSpace address_space_memory; | |
2673a5da | 68 | |
0844e007 | 69 | MemoryRegion io_mem_rom, io_mem_notdirty; |
acc9d80b | 70 | static MemoryRegion io_mem_unassigned; |
0e0df1e2 | 71 | |
e2eef170 | 72 | #endif |
9fa3e853 | 73 | |
bdc44640 | 74 | struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); |
6a00d601 FB |
75 | /* current CPU in the current thread. It is only valid inside |
76 | cpu_exec() */ | |
4917cf44 | 77 | DEFINE_TLS(CPUState *, current_cpu); |
2e70f6ef | 78 | /* 0 = Do not count executed instructions. |
bf20dc07 | 79 | 1 = Precise instruction counting. |
2e70f6ef | 80 | 2 = Adaptive rate instruction counting. */ |
5708fc66 | 81 | int use_icount; |
6a00d601 | 82 | |
e2eef170 | 83 | #if !defined(CONFIG_USER_ONLY) |
4346ae3e | 84 | |
1db8abb1 PB |
85 | typedef struct PhysPageEntry PhysPageEntry; |
86 | ||
87 | struct PhysPageEntry { | |
9736e55b | 88 | /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ |
8b795765 | 89 | uint32_t skip : 6; |
9736e55b | 90 | /* index into phys_sections (!skip) or phys_map_nodes (skip) */ |
8b795765 | 91 | uint32_t ptr : 26; |
1db8abb1 PB |
92 | }; |
93 | ||
8b795765 MT |
94 | #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) |
95 | ||
03f49957 PB |
96 | /* Size of the L2 (and L3, etc) page tables. */ |
97 | #define ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS | |
98 | ||
99 | #define P_L2_BITS 10 | |
100 | #define P_L2_SIZE (1 << P_L2_BITS) | |
101 | ||
102 | #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) | |
103 | ||
104 | typedef PhysPageEntry Node[P_L2_SIZE]; | |
0475d94f | 105 | |
1db8abb1 PB |
106 | struct AddressSpaceDispatch { |
107 | /* This is a multi-level map on the physical address space. | |
108 | * The bottom level has pointers to MemoryRegionSections. | |
109 | */ | |
110 | PhysPageEntry phys_map; | |
0475d94f PB |
111 | Node *nodes; |
112 | MemoryRegionSection *sections; | |
acc9d80b | 113 | AddressSpace *as; |
1db8abb1 PB |
114 | }; |
115 | ||
90260c6c JK |
116 | #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
117 | typedef struct subpage_t { | |
118 | MemoryRegion iomem; | |
acc9d80b | 119 | AddressSpace *as; |
90260c6c JK |
120 | hwaddr base; |
121 | uint16_t sub_section[TARGET_PAGE_SIZE]; | |
122 | } subpage_t; | |
123 | ||
b41aac4f LPF |
124 | #define PHYS_SECTION_UNASSIGNED 0 |
125 | #define PHYS_SECTION_NOTDIRTY 1 | |
126 | #define PHYS_SECTION_ROM 2 | |
127 | #define PHYS_SECTION_WATCH 3 | |
5312bd8b | 128 | |
9affd6fc PB |
129 | typedef struct PhysPageMap { |
130 | unsigned sections_nb; | |
131 | unsigned sections_nb_alloc; | |
132 | unsigned nodes_nb; | |
133 | unsigned nodes_nb_alloc; | |
134 | Node *nodes; | |
135 | MemoryRegionSection *sections; | |
136 | } PhysPageMap; | |
137 | ||
6092666e | 138 | static PhysPageMap *prev_map; |
9affd6fc | 139 | static PhysPageMap next_map; |
d6f2ea22 | 140 | |
e2eef170 | 141 | static void io_mem_init(void); |
62152b8a | 142 | static void memory_map_init(void); |
e2eef170 | 143 | |
1ec9b909 | 144 | static MemoryRegion io_mem_watch; |
6658ffb8 | 145 | #endif |
fd6ce8f6 | 146 | |
6d9a1304 | 147 | #if !defined(CONFIG_USER_ONLY) |
d6f2ea22 | 148 | |
f7bf5461 | 149 | static void phys_map_node_reserve(unsigned nodes) |
d6f2ea22 | 150 | { |
9affd6fc PB |
151 | if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) { |
152 | next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2, | |
153 | 16); | |
154 | next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc, | |
155 | next_map.nodes_nb + nodes); | |
156 | next_map.nodes = g_renew(Node, next_map.nodes, | |
157 | next_map.nodes_nb_alloc); | |
d6f2ea22 | 158 | } |
f7bf5461 AK |
159 | } |
160 | ||
8b795765 | 161 | static uint32_t phys_map_node_alloc(void) |
f7bf5461 AK |
162 | { |
163 | unsigned i; | |
8b795765 | 164 | uint32_t ret; |
f7bf5461 | 165 | |
9affd6fc | 166 | ret = next_map.nodes_nb++; |
f7bf5461 | 167 | assert(ret != PHYS_MAP_NODE_NIL); |
9affd6fc | 168 | assert(ret != next_map.nodes_nb_alloc); |
03f49957 | 169 | for (i = 0; i < P_L2_SIZE; ++i) { |
9736e55b | 170 | next_map.nodes[ret][i].skip = 1; |
9affd6fc | 171 | next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; |
d6f2ea22 | 172 | } |
f7bf5461 | 173 | return ret; |
d6f2ea22 AK |
174 | } |
175 | ||
a8170e5e AK |
176 | static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index, |
177 | hwaddr *nb, uint16_t leaf, | |
2999097b | 178 | int level) |
f7bf5461 AK |
179 | { |
180 | PhysPageEntry *p; | |
181 | int i; | |
03f49957 | 182 | hwaddr step = (hwaddr)1 << (level * P_L2_BITS); |
108c49b8 | 183 | |
9736e55b | 184 | if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { |
c19e8800 | 185 | lp->ptr = phys_map_node_alloc(); |
9affd6fc | 186 | p = next_map.nodes[lp->ptr]; |
f7bf5461 | 187 | if (level == 0) { |
03f49957 | 188 | for (i = 0; i < P_L2_SIZE; i++) { |
9736e55b | 189 | p[i].skip = 0; |
b41aac4f | 190 | p[i].ptr = PHYS_SECTION_UNASSIGNED; |
4346ae3e | 191 | } |
67c4d23c | 192 | } |
f7bf5461 | 193 | } else { |
9affd6fc | 194 | p = next_map.nodes[lp->ptr]; |
92e873b9 | 195 | } |
03f49957 | 196 | lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; |
f7bf5461 | 197 | |
03f49957 | 198 | while (*nb && lp < &p[P_L2_SIZE]) { |
07f07b31 | 199 | if ((*index & (step - 1)) == 0 && *nb >= step) { |
9736e55b | 200 | lp->skip = 0; |
c19e8800 | 201 | lp->ptr = leaf; |
07f07b31 AK |
202 | *index += step; |
203 | *nb -= step; | |
2999097b AK |
204 | } else { |
205 | phys_page_set_level(lp, index, nb, leaf, level - 1); | |
206 | } | |
207 | ++lp; | |
f7bf5461 AK |
208 | } |
209 | } | |
210 | ||
ac1970fb | 211 | static void phys_page_set(AddressSpaceDispatch *d, |
a8170e5e | 212 | hwaddr index, hwaddr nb, |
2999097b | 213 | uint16_t leaf) |
f7bf5461 | 214 | { |
2999097b | 215 | /* Wildly overreserve - it doesn't matter much. */ |
07f07b31 | 216 | phys_map_node_reserve(3 * P_L2_LEVELS); |
5cd2c5b6 | 217 | |
ac1970fb | 218 | phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); |
92e873b9 FB |
219 | } |
220 | ||
b35ba30f MT |
221 | /* Compact a non leaf page entry. Simply detect that the entry has a single child, |
222 | * and update our entry so we can skip it and go directly to the destination. | |
223 | */ | |
224 | static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted) | |
225 | { | |
226 | unsigned valid_ptr = P_L2_SIZE; | |
227 | int valid = 0; | |
228 | PhysPageEntry *p; | |
229 | int i; | |
230 | ||
231 | if (lp->ptr == PHYS_MAP_NODE_NIL) { | |
232 | return; | |
233 | } | |
234 | ||
235 | p = nodes[lp->ptr]; | |
236 | for (i = 0; i < P_L2_SIZE; i++) { | |
237 | if (p[i].ptr == PHYS_MAP_NODE_NIL) { | |
238 | continue; | |
239 | } | |
240 | ||
241 | valid_ptr = i; | |
242 | valid++; | |
243 | if (p[i].skip) { | |
244 | phys_page_compact(&p[i], nodes, compacted); | |
245 | } | |
246 | } | |
247 | ||
248 | /* We can only compress if there's only one child. */ | |
249 | if (valid != 1) { | |
250 | return; | |
251 | } | |
252 | ||
253 | assert(valid_ptr < P_L2_SIZE); | |
254 | ||
255 | /* Don't compress if it won't fit in the # of bits we have. */ | |
256 | if (lp->skip + p[valid_ptr].skip >= (1 << 3)) { | |
257 | return; | |
258 | } | |
259 | ||
260 | lp->ptr = p[valid_ptr].ptr; | |
261 | if (!p[valid_ptr].skip) { | |
262 | /* If our only child is a leaf, make this a leaf. */ | |
263 | /* By design, we should have made this node a leaf to begin with so we | |
264 | * should never reach here. | |
265 | * But since it's so simple to handle this, let's do it just in case we | |
266 | * change this rule. | |
267 | */ | |
268 | lp->skip = 0; | |
269 | } else { | |
270 | lp->skip += p[valid_ptr].skip; | |
271 | } | |
272 | } | |
273 | ||
274 | static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb) | |
275 | { | |
276 | DECLARE_BITMAP(compacted, nodes_nb); | |
277 | ||
278 | if (d->phys_map.skip) { | |
279 | phys_page_compact(&d->phys_map, d->nodes, compacted); | |
280 | } | |
281 | } | |
282 | ||
97115a8d | 283 | static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr, |
9affd6fc | 284 | Node *nodes, MemoryRegionSection *sections) |
92e873b9 | 285 | { |
31ab2b4a | 286 | PhysPageEntry *p; |
97115a8d | 287 | hwaddr index = addr >> TARGET_PAGE_BITS; |
31ab2b4a | 288 | int i; |
f1f6e3b8 | 289 | |
9736e55b | 290 | for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { |
c19e8800 | 291 | if (lp.ptr == PHYS_MAP_NODE_NIL) { |
9affd6fc | 292 | return §ions[PHYS_SECTION_UNASSIGNED]; |
31ab2b4a | 293 | } |
9affd6fc | 294 | p = nodes[lp.ptr]; |
03f49957 | 295 | lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; |
5312bd8b | 296 | } |
b35ba30f MT |
297 | |
298 | if (sections[lp.ptr].size.hi || | |
299 | range_covers_byte(sections[lp.ptr].offset_within_address_space, | |
300 | sections[lp.ptr].size.lo, addr)) { | |
301 | return §ions[lp.ptr]; | |
302 | } else { | |
303 | return §ions[PHYS_SECTION_UNASSIGNED]; | |
304 | } | |
f3705d53 AK |
305 | } |
306 | ||
e5548617 BS |
307 | bool memory_region_is_unassigned(MemoryRegion *mr) |
308 | { | |
2a8e7499 | 309 | return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device |
5b6dd868 | 310 | && mr != &io_mem_watch; |
fd6ce8f6 | 311 | } |
149f54b5 | 312 | |
c7086b4a | 313 | static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, |
90260c6c JK |
314 | hwaddr addr, |
315 | bool resolve_subpage) | |
9f029603 | 316 | { |
90260c6c JK |
317 | MemoryRegionSection *section; |
318 | subpage_t *subpage; | |
319 | ||
97115a8d | 320 | section = phys_page_find(d->phys_map, addr, d->nodes, d->sections); |
90260c6c JK |
321 | if (resolve_subpage && section->mr->subpage) { |
322 | subpage = container_of(section->mr, subpage_t, iomem); | |
0475d94f | 323 | section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; |
90260c6c JK |
324 | } |
325 | return section; | |
9f029603 JK |
326 | } |
327 | ||
90260c6c | 328 | static MemoryRegionSection * |
c7086b4a | 329 | address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, |
90260c6c | 330 | hwaddr *plen, bool resolve_subpage) |
149f54b5 PB |
331 | { |
332 | MemoryRegionSection *section; | |
333 | Int128 diff; | |
334 | ||
c7086b4a | 335 | section = address_space_lookup_region(d, addr, resolve_subpage); |
149f54b5 PB |
336 | /* Compute offset within MemoryRegionSection */ |
337 | addr -= section->offset_within_address_space; | |
338 | ||
339 | /* Compute offset within MemoryRegion */ | |
340 | *xlat = addr + section->offset_within_region; | |
341 | ||
342 | diff = int128_sub(section->mr->size, int128_make64(addr)); | |
3752a036 | 343 | *plen = int128_get64(int128_min(diff, int128_make64(*plen))); |
149f54b5 PB |
344 | return section; |
345 | } | |
90260c6c | 346 | |
5c8a00ce PB |
347 | MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, |
348 | hwaddr *xlat, hwaddr *plen, | |
349 | bool is_write) | |
90260c6c | 350 | { |
30951157 AK |
351 | IOMMUTLBEntry iotlb; |
352 | MemoryRegionSection *section; | |
353 | MemoryRegion *mr; | |
354 | hwaddr len = *plen; | |
355 | ||
356 | for (;;) { | |
c7086b4a | 357 | section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true); |
30951157 AK |
358 | mr = section->mr; |
359 | ||
360 | if (!mr->iommu_ops) { | |
361 | break; | |
362 | } | |
363 | ||
364 | iotlb = mr->iommu_ops->translate(mr, addr); | |
365 | addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | |
366 | | (addr & iotlb.addr_mask)); | |
367 | len = MIN(len, (addr | iotlb.addr_mask) - addr + 1); | |
368 | if (!(iotlb.perm & (1 << is_write))) { | |
369 | mr = &io_mem_unassigned; | |
370 | break; | |
371 | } | |
372 | ||
373 | as = iotlb.target_as; | |
374 | } | |
375 | ||
376 | *plen = len; | |
377 | *xlat = addr; | |
378 | return mr; | |
90260c6c JK |
379 | } |
380 | ||
381 | MemoryRegionSection * | |
382 | address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat, | |
383 | hwaddr *plen) | |
384 | { | |
30951157 | 385 | MemoryRegionSection *section; |
c7086b4a | 386 | section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false); |
30951157 AK |
387 | |
388 | assert(!section->mr->iommu_ops); | |
389 | return section; | |
90260c6c | 390 | } |
5b6dd868 | 391 | #endif |
fd6ce8f6 | 392 | |
5b6dd868 | 393 | void cpu_exec_init_all(void) |
fdbb84d1 | 394 | { |
5b6dd868 | 395 | #if !defined(CONFIG_USER_ONLY) |
b2a8658e | 396 | qemu_mutex_init(&ram_list.mutex); |
5b6dd868 BS |
397 | memory_map_init(); |
398 | io_mem_init(); | |
fdbb84d1 | 399 | #endif |
5b6dd868 | 400 | } |
fdbb84d1 | 401 | |
b170fce3 | 402 | #if !defined(CONFIG_USER_ONLY) |
5b6dd868 BS |
403 | |
404 | static int cpu_common_post_load(void *opaque, int version_id) | |
fd6ce8f6 | 405 | { |
259186a7 | 406 | CPUState *cpu = opaque; |
a513fe19 | 407 | |
5b6dd868 BS |
408 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the |
409 | version_id is increased. */ | |
259186a7 AF |
410 | cpu->interrupt_request &= ~0x01; |
411 | tlb_flush(cpu->env_ptr, 1); | |
5b6dd868 BS |
412 | |
413 | return 0; | |
a513fe19 | 414 | } |
7501267e | 415 | |
1a1562f5 | 416 | const VMStateDescription vmstate_cpu_common = { |
5b6dd868 BS |
417 | .name = "cpu_common", |
418 | .version_id = 1, | |
419 | .minimum_version_id = 1, | |
420 | .minimum_version_id_old = 1, | |
421 | .post_load = cpu_common_post_load, | |
422 | .fields = (VMStateField []) { | |
259186a7 AF |
423 | VMSTATE_UINT32(halted, CPUState), |
424 | VMSTATE_UINT32(interrupt_request, CPUState), | |
5b6dd868 BS |
425 | VMSTATE_END_OF_LIST() |
426 | } | |
427 | }; | |
1a1562f5 | 428 | |
5b6dd868 | 429 | #endif |
ea041c0e | 430 | |
38d8f5c8 | 431 | CPUState *qemu_get_cpu(int index) |
ea041c0e | 432 | { |
bdc44640 | 433 | CPUState *cpu; |
ea041c0e | 434 | |
bdc44640 | 435 | CPU_FOREACH(cpu) { |
55e5c285 | 436 | if (cpu->cpu_index == index) { |
bdc44640 | 437 | return cpu; |
55e5c285 | 438 | } |
ea041c0e | 439 | } |
5b6dd868 | 440 | |
bdc44640 | 441 | return NULL; |
ea041c0e FB |
442 | } |
443 | ||
5b6dd868 | 444 | void cpu_exec_init(CPUArchState *env) |
ea041c0e | 445 | { |
5b6dd868 | 446 | CPUState *cpu = ENV_GET_CPU(env); |
b170fce3 | 447 | CPUClass *cc = CPU_GET_CLASS(cpu); |
bdc44640 | 448 | CPUState *some_cpu; |
5b6dd868 BS |
449 | int cpu_index; |
450 | ||
451 | #if defined(CONFIG_USER_ONLY) | |
452 | cpu_list_lock(); | |
453 | #endif | |
5b6dd868 | 454 | cpu_index = 0; |
bdc44640 | 455 | CPU_FOREACH(some_cpu) { |
5b6dd868 BS |
456 | cpu_index++; |
457 | } | |
55e5c285 | 458 | cpu->cpu_index = cpu_index; |
1b1ed8dc | 459 | cpu->numa_node = 0; |
5b6dd868 BS |
460 | QTAILQ_INIT(&env->breakpoints); |
461 | QTAILQ_INIT(&env->watchpoints); | |
462 | #ifndef CONFIG_USER_ONLY | |
463 | cpu->thread_id = qemu_get_thread_id(); | |
464 | #endif | |
bdc44640 | 465 | QTAILQ_INSERT_TAIL(&cpus, cpu, node); |
5b6dd868 BS |
466 | #if defined(CONFIG_USER_ONLY) |
467 | cpu_list_unlock(); | |
468 | #endif | |
e0d47944 AF |
469 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { |
470 | vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu); | |
471 | } | |
5b6dd868 | 472 | #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) |
5b6dd868 BS |
473 | register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, |
474 | cpu_save, cpu_load, env); | |
b170fce3 | 475 | assert(cc->vmsd == NULL); |
e0d47944 | 476 | assert(qdev_get_vmsd(DEVICE(cpu)) == NULL); |
5b6dd868 | 477 | #endif |
b170fce3 AF |
478 | if (cc->vmsd != NULL) { |
479 | vmstate_register(NULL, cpu_index, cc->vmsd, cpu); | |
480 | } | |
ea041c0e FB |
481 | } |
482 | ||
1fddef4b | 483 | #if defined(TARGET_HAS_ICE) |
94df27fd | 484 | #if defined(CONFIG_USER_ONLY) |
00b941e5 | 485 | static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) |
94df27fd PB |
486 | { |
487 | tb_invalidate_phys_page_range(pc, pc + 1, 0); | |
488 | } | |
489 | #else | |
00b941e5 | 490 | static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) |
1e7855a5 | 491 | { |
e8262a1b MF |
492 | hwaddr phys = cpu_get_phys_page_debug(cpu, pc); |
493 | if (phys != -1) { | |
494 | tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK)); | |
495 | } | |
1e7855a5 | 496 | } |
c27004ec | 497 | #endif |
94df27fd | 498 | #endif /* TARGET_HAS_ICE */ |
d720b93d | 499 | |
c527ee8f | 500 | #if defined(CONFIG_USER_ONLY) |
9349b4f9 | 501 | void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
c527ee8f PB |
502 | |
503 | { | |
504 | } | |
505 | ||
9349b4f9 | 506 | int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, |
c527ee8f PB |
507 | int flags, CPUWatchpoint **watchpoint) |
508 | { | |
509 | return -ENOSYS; | |
510 | } | |
511 | #else | |
6658ffb8 | 512 | /* Add a watchpoint. */ |
9349b4f9 | 513 | int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, |
a1d1bb31 | 514 | int flags, CPUWatchpoint **watchpoint) |
6658ffb8 | 515 | { |
b4051334 | 516 | target_ulong len_mask = ~(len - 1); |
c0ce998e | 517 | CPUWatchpoint *wp; |
6658ffb8 | 518 | |
b4051334 | 519 | /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ |
0dc23828 MF |
520 | if ((len & (len - 1)) || (addr & ~len_mask) || |
521 | len == 0 || len > TARGET_PAGE_SIZE) { | |
b4051334 AL |
522 | fprintf(stderr, "qemu: tried to set invalid watchpoint at " |
523 | TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); | |
524 | return -EINVAL; | |
525 | } | |
7267c094 | 526 | wp = g_malloc(sizeof(*wp)); |
a1d1bb31 AL |
527 | |
528 | wp->vaddr = addr; | |
b4051334 | 529 | wp->len_mask = len_mask; |
a1d1bb31 AL |
530 | wp->flags = flags; |
531 | ||
2dc9f411 | 532 | /* keep all GDB-injected watchpoints in front */ |
c0ce998e | 533 | if (flags & BP_GDB) |
72cf2d4f | 534 | QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); |
c0ce998e | 535 | else |
72cf2d4f | 536 | QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); |
6658ffb8 | 537 | |
6658ffb8 | 538 | tlb_flush_page(env, addr); |
a1d1bb31 AL |
539 | |
540 | if (watchpoint) | |
541 | *watchpoint = wp; | |
542 | return 0; | |
6658ffb8 PB |
543 | } |
544 | ||
a1d1bb31 | 545 | /* Remove a specific watchpoint. */ |
9349b4f9 | 546 | int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len, |
a1d1bb31 | 547 | int flags) |
6658ffb8 | 548 | { |
b4051334 | 549 | target_ulong len_mask = ~(len - 1); |
a1d1bb31 | 550 | CPUWatchpoint *wp; |
6658ffb8 | 551 | |
72cf2d4f | 552 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
b4051334 | 553 | if (addr == wp->vaddr && len_mask == wp->len_mask |
6e140f28 | 554 | && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
a1d1bb31 | 555 | cpu_watchpoint_remove_by_ref(env, wp); |
6658ffb8 PB |
556 | return 0; |
557 | } | |
558 | } | |
a1d1bb31 | 559 | return -ENOENT; |
6658ffb8 PB |
560 | } |
561 | ||
a1d1bb31 | 562 | /* Remove a specific watchpoint by reference. */ |
9349b4f9 | 563 | void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint) |
a1d1bb31 | 564 | { |
72cf2d4f | 565 | QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); |
7d03f82f | 566 | |
a1d1bb31 AL |
567 | tlb_flush_page(env, watchpoint->vaddr); |
568 | ||
7267c094 | 569 | g_free(watchpoint); |
a1d1bb31 AL |
570 | } |
571 | ||
572 | /* Remove all matching watchpoints. */ | |
9349b4f9 | 573 | void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
a1d1bb31 | 574 | { |
c0ce998e | 575 | CPUWatchpoint *wp, *next; |
a1d1bb31 | 576 | |
72cf2d4f | 577 | QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { |
a1d1bb31 AL |
578 | if (wp->flags & mask) |
579 | cpu_watchpoint_remove_by_ref(env, wp); | |
c0ce998e | 580 | } |
7d03f82f | 581 | } |
c527ee8f | 582 | #endif |
7d03f82f | 583 | |
a1d1bb31 | 584 | /* Add a breakpoint. */ |
9349b4f9 | 585 | int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, |
a1d1bb31 | 586 | CPUBreakpoint **breakpoint) |
4c3a88a2 | 587 | { |
1fddef4b | 588 | #if defined(TARGET_HAS_ICE) |
c0ce998e | 589 | CPUBreakpoint *bp; |
3b46e624 | 590 | |
7267c094 | 591 | bp = g_malloc(sizeof(*bp)); |
4c3a88a2 | 592 | |
a1d1bb31 AL |
593 | bp->pc = pc; |
594 | bp->flags = flags; | |
595 | ||
2dc9f411 | 596 | /* keep all GDB-injected breakpoints in front */ |
00b941e5 | 597 | if (flags & BP_GDB) { |
72cf2d4f | 598 | QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); |
00b941e5 | 599 | } else { |
72cf2d4f | 600 | QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); |
00b941e5 | 601 | } |
3b46e624 | 602 | |
00b941e5 | 603 | breakpoint_invalidate(ENV_GET_CPU(env), pc); |
a1d1bb31 | 604 | |
00b941e5 | 605 | if (breakpoint) { |
a1d1bb31 | 606 | *breakpoint = bp; |
00b941e5 | 607 | } |
4c3a88a2 FB |
608 | return 0; |
609 | #else | |
a1d1bb31 | 610 | return -ENOSYS; |
4c3a88a2 FB |
611 | #endif |
612 | } | |
613 | ||
a1d1bb31 | 614 | /* Remove a specific breakpoint. */ |
9349b4f9 | 615 | int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags) |
a1d1bb31 | 616 | { |
7d03f82f | 617 | #if defined(TARGET_HAS_ICE) |
a1d1bb31 AL |
618 | CPUBreakpoint *bp; |
619 | ||
72cf2d4f | 620 | QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
a1d1bb31 AL |
621 | if (bp->pc == pc && bp->flags == flags) { |
622 | cpu_breakpoint_remove_by_ref(env, bp); | |
623 | return 0; | |
624 | } | |
7d03f82f | 625 | } |
a1d1bb31 AL |
626 | return -ENOENT; |
627 | #else | |
628 | return -ENOSYS; | |
7d03f82f EI |
629 | #endif |
630 | } | |
631 | ||
a1d1bb31 | 632 | /* Remove a specific breakpoint by reference. */ |
9349b4f9 | 633 | void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint) |
4c3a88a2 | 634 | { |
1fddef4b | 635 | #if defined(TARGET_HAS_ICE) |
72cf2d4f | 636 | QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); |
d720b93d | 637 | |
00b941e5 | 638 | breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc); |
a1d1bb31 | 639 | |
7267c094 | 640 | g_free(breakpoint); |
a1d1bb31 AL |
641 | #endif |
642 | } | |
643 | ||
644 | /* Remove all matching breakpoints. */ | |
9349b4f9 | 645 | void cpu_breakpoint_remove_all(CPUArchState *env, int mask) |
a1d1bb31 AL |
646 | { |
647 | #if defined(TARGET_HAS_ICE) | |
c0ce998e | 648 | CPUBreakpoint *bp, *next; |
a1d1bb31 | 649 | |
72cf2d4f | 650 | QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { |
a1d1bb31 AL |
651 | if (bp->flags & mask) |
652 | cpu_breakpoint_remove_by_ref(env, bp); | |
c0ce998e | 653 | } |
4c3a88a2 FB |
654 | #endif |
655 | } | |
656 | ||
c33a346e FB |
657 | /* enable or disable single step mode. EXCP_DEBUG is returned by the |
658 | CPU loop after each instruction */ | |
3825b28f | 659 | void cpu_single_step(CPUState *cpu, int enabled) |
c33a346e | 660 | { |
1fddef4b | 661 | #if defined(TARGET_HAS_ICE) |
ed2803da AF |
662 | if (cpu->singlestep_enabled != enabled) { |
663 | cpu->singlestep_enabled = enabled; | |
664 | if (kvm_enabled()) { | |
38e478ec | 665 | kvm_update_guest_debug(cpu, 0); |
ed2803da | 666 | } else { |
ccbb4d44 | 667 | /* must flush all the translated code to avoid inconsistencies */ |
e22a25c9 | 668 | /* XXX: only flush what is necessary */ |
38e478ec | 669 | CPUArchState *env = cpu->env_ptr; |
e22a25c9 AL |
670 | tb_flush(env); |
671 | } | |
c33a346e FB |
672 | } |
673 | #endif | |
674 | } | |
675 | ||
9349b4f9 | 676 | void cpu_abort(CPUArchState *env, const char *fmt, ...) |
7501267e | 677 | { |
878096ee | 678 | CPUState *cpu = ENV_GET_CPU(env); |
7501267e | 679 | va_list ap; |
493ae1f0 | 680 | va_list ap2; |
7501267e FB |
681 | |
682 | va_start(ap, fmt); | |
493ae1f0 | 683 | va_copy(ap2, ap); |
7501267e FB |
684 | fprintf(stderr, "qemu: fatal: "); |
685 | vfprintf(stderr, fmt, ap); | |
686 | fprintf(stderr, "\n"); | |
878096ee | 687 | cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
93fcfe39 AL |
688 | if (qemu_log_enabled()) { |
689 | qemu_log("qemu: fatal: "); | |
690 | qemu_log_vprintf(fmt, ap2); | |
691 | qemu_log("\n"); | |
a0762859 | 692 | log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
31b1a7b4 | 693 | qemu_log_flush(); |
93fcfe39 | 694 | qemu_log_close(); |
924edcae | 695 | } |
493ae1f0 | 696 | va_end(ap2); |
f9373291 | 697 | va_end(ap); |
fd052bf6 RV |
698 | #if defined(CONFIG_USER_ONLY) |
699 | { | |
700 | struct sigaction act; | |
701 | sigfillset(&act.sa_mask); | |
702 | act.sa_handler = SIG_DFL; | |
703 | sigaction(SIGABRT, &act, NULL); | |
704 | } | |
705 | #endif | |
7501267e FB |
706 | abort(); |
707 | } | |
708 | ||
0124311e | 709 | #if !defined(CONFIG_USER_ONLY) |
041603fe PB |
710 | static RAMBlock *qemu_get_ram_block(ram_addr_t addr) |
711 | { | |
712 | RAMBlock *block; | |
713 | ||
714 | /* The list is protected by the iothread lock here. */ | |
715 | block = ram_list.mru_block; | |
716 | if (block && addr - block->offset < block->length) { | |
717 | goto found; | |
718 | } | |
719 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { | |
720 | if (addr - block->offset < block->length) { | |
721 | goto found; | |
722 | } | |
723 | } | |
724 | ||
725 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
726 | abort(); | |
727 | ||
728 | found: | |
729 | ram_list.mru_block = block; | |
730 | return block; | |
731 | } | |
732 | ||
d24981d3 JQ |
733 | static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end, |
734 | uintptr_t length) | |
735 | { | |
041603fe PB |
736 | RAMBlock *block; |
737 | ram_addr_t start1; | |
d24981d3 | 738 | |
041603fe PB |
739 | block = qemu_get_ram_block(start); |
740 | assert(block == qemu_get_ram_block(end - 1)); | |
741 | start1 = (uintptr_t)block->host + (start - block->offset); | |
742 | cpu_tlb_reset_dirty_all(start1, length); | |
d24981d3 JQ |
743 | } |
744 | ||
5579c7f3 | 745 | /* Note: start and end must be within the same ram block. */ |
c227f099 | 746 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
0a962c02 | 747 | int dirty_flags) |
1ccde1cb | 748 | { |
d24981d3 | 749 | uintptr_t length; |
1ccde1cb FB |
750 | |
751 | start &= TARGET_PAGE_MASK; | |
752 | end = TARGET_PAGE_ALIGN(end); | |
753 | ||
754 | length = end - start; | |
755 | if (length == 0) | |
756 | return; | |
f7c11b53 | 757 | cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); |
f23db169 | 758 | |
d24981d3 JQ |
759 | if (tcg_enabled()) { |
760 | tlb_reset_dirty_range_all(start, end, length); | |
5579c7f3 | 761 | } |
1ccde1cb FB |
762 | } |
763 | ||
8b9c99d9 | 764 | static int cpu_physical_memory_set_dirty_tracking(int enable) |
74576198 | 765 | { |
f6f3fbca | 766 | int ret = 0; |
74576198 | 767 | in_migration = enable; |
f6f3fbca | 768 | return ret; |
74576198 AL |
769 | } |
770 | ||
a8170e5e | 771 | hwaddr memory_region_section_get_iotlb(CPUArchState *env, |
149f54b5 PB |
772 | MemoryRegionSection *section, |
773 | target_ulong vaddr, | |
774 | hwaddr paddr, hwaddr xlat, | |
775 | int prot, | |
776 | target_ulong *address) | |
e5548617 | 777 | { |
a8170e5e | 778 | hwaddr iotlb; |
e5548617 BS |
779 | CPUWatchpoint *wp; |
780 | ||
cc5bea60 | 781 | if (memory_region_is_ram(section->mr)) { |
e5548617 BS |
782 | /* Normal RAM. */ |
783 | iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) | |
149f54b5 | 784 | + xlat; |
e5548617 | 785 | if (!section->readonly) { |
b41aac4f | 786 | iotlb |= PHYS_SECTION_NOTDIRTY; |
e5548617 | 787 | } else { |
b41aac4f | 788 | iotlb |= PHYS_SECTION_ROM; |
e5548617 BS |
789 | } |
790 | } else { | |
0475d94f | 791 | iotlb = section - address_space_memory.dispatch->sections; |
149f54b5 | 792 | iotlb += xlat; |
e5548617 BS |
793 | } |
794 | ||
795 | /* Make accesses to pages with watchpoints go via the | |
796 | watchpoint trap routines. */ | |
797 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { | |
798 | if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { | |
799 | /* Avoid trapping reads of pages with a write breakpoint. */ | |
800 | if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { | |
b41aac4f | 801 | iotlb = PHYS_SECTION_WATCH + paddr; |
e5548617 BS |
802 | *address |= TLB_MMIO; |
803 | break; | |
804 | } | |
805 | } | |
806 | } | |
807 | ||
808 | return iotlb; | |
809 | } | |
9fa3e853 FB |
810 | #endif /* defined(CONFIG_USER_ONLY) */ |
811 | ||
e2eef170 | 812 | #if !defined(CONFIG_USER_ONLY) |
8da3ff18 | 813 | |
c227f099 | 814 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 815 | uint16_t section); |
acc9d80b | 816 | static subpage_t *subpage_init(AddressSpace *as, hwaddr base); |
54688b1e | 817 | |
575ddeb4 | 818 | static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc; |
91138037 MA |
819 | |
820 | /* | |
821 | * Set a custom physical guest memory alloator. | |
822 | * Accelerators with unusual needs may need this. Hopefully, we can | |
823 | * get rid of it eventually. | |
824 | */ | |
575ddeb4 | 825 | void phys_mem_set_alloc(void *(*alloc)(size_t)) |
91138037 MA |
826 | { |
827 | phys_mem_alloc = alloc; | |
828 | } | |
829 | ||
5312bd8b AK |
830 | static uint16_t phys_section_add(MemoryRegionSection *section) |
831 | { | |
68f3f65b PB |
832 | /* The physical section number is ORed with a page-aligned |
833 | * pointer to produce the iotlb entries. Thus it should | |
834 | * never overflow into the page-aligned value. | |
835 | */ | |
9affd6fc | 836 | assert(next_map.sections_nb < TARGET_PAGE_SIZE); |
68f3f65b | 837 | |
9affd6fc PB |
838 | if (next_map.sections_nb == next_map.sections_nb_alloc) { |
839 | next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2, | |
840 | 16); | |
841 | next_map.sections = g_renew(MemoryRegionSection, next_map.sections, | |
842 | next_map.sections_nb_alloc); | |
5312bd8b | 843 | } |
9affd6fc | 844 | next_map.sections[next_map.sections_nb] = *section; |
dfde4e6e | 845 | memory_region_ref(section->mr); |
9affd6fc | 846 | return next_map.sections_nb++; |
5312bd8b AK |
847 | } |
848 | ||
058bc4b5 PB |
849 | static void phys_section_destroy(MemoryRegion *mr) |
850 | { | |
dfde4e6e PB |
851 | memory_region_unref(mr); |
852 | ||
058bc4b5 PB |
853 | if (mr->subpage) { |
854 | subpage_t *subpage = container_of(mr, subpage_t, iomem); | |
855 | memory_region_destroy(&subpage->iomem); | |
856 | g_free(subpage); | |
857 | } | |
858 | } | |
859 | ||
6092666e | 860 | static void phys_sections_free(PhysPageMap *map) |
5312bd8b | 861 | { |
9affd6fc PB |
862 | while (map->sections_nb > 0) { |
863 | MemoryRegionSection *section = &map->sections[--map->sections_nb]; | |
058bc4b5 PB |
864 | phys_section_destroy(section->mr); |
865 | } | |
9affd6fc PB |
866 | g_free(map->sections); |
867 | g_free(map->nodes); | |
6092666e | 868 | g_free(map); |
5312bd8b AK |
869 | } |
870 | ||
ac1970fb | 871 | static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) |
0f0cb164 AK |
872 | { |
873 | subpage_t *subpage; | |
a8170e5e | 874 | hwaddr base = section->offset_within_address_space |
0f0cb164 | 875 | & TARGET_PAGE_MASK; |
97115a8d | 876 | MemoryRegionSection *existing = phys_page_find(d->phys_map, base, |
9affd6fc | 877 | next_map.nodes, next_map.sections); |
0f0cb164 AK |
878 | MemoryRegionSection subsection = { |
879 | .offset_within_address_space = base, | |
052e87b0 | 880 | .size = int128_make64(TARGET_PAGE_SIZE), |
0f0cb164 | 881 | }; |
a8170e5e | 882 | hwaddr start, end; |
0f0cb164 | 883 | |
f3705d53 | 884 | assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); |
0f0cb164 | 885 | |
f3705d53 | 886 | if (!(existing->mr->subpage)) { |
acc9d80b | 887 | subpage = subpage_init(d->as, base); |
0f0cb164 | 888 | subsection.mr = &subpage->iomem; |
ac1970fb | 889 | phys_page_set(d, base >> TARGET_PAGE_BITS, 1, |
2999097b | 890 | phys_section_add(&subsection)); |
0f0cb164 | 891 | } else { |
f3705d53 | 892 | subpage = container_of(existing->mr, subpage_t, iomem); |
0f0cb164 AK |
893 | } |
894 | start = section->offset_within_address_space & ~TARGET_PAGE_MASK; | |
052e87b0 | 895 | end = start + int128_get64(section->size) - 1; |
0f0cb164 AK |
896 | subpage_register(subpage, start, end, phys_section_add(section)); |
897 | } | |
898 | ||
899 | ||
052e87b0 PB |
900 | static void register_multipage(AddressSpaceDispatch *d, |
901 | MemoryRegionSection *section) | |
33417e70 | 902 | { |
a8170e5e | 903 | hwaddr start_addr = section->offset_within_address_space; |
5312bd8b | 904 | uint16_t section_index = phys_section_add(section); |
052e87b0 PB |
905 | uint64_t num_pages = int128_get64(int128_rshift(section->size, |
906 | TARGET_PAGE_BITS)); | |
dd81124b | 907 | |
733d5ef5 PB |
908 | assert(num_pages); |
909 | phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); | |
33417e70 FB |
910 | } |
911 | ||
ac1970fb | 912 | static void mem_add(MemoryListener *listener, MemoryRegionSection *section) |
0f0cb164 | 913 | { |
89ae337a | 914 | AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); |
00752703 | 915 | AddressSpaceDispatch *d = as->next_dispatch; |
99b9cc06 | 916 | MemoryRegionSection now = *section, remain = *section; |
052e87b0 | 917 | Int128 page_size = int128_make64(TARGET_PAGE_SIZE); |
0f0cb164 | 918 | |
733d5ef5 PB |
919 | if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { |
920 | uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) | |
921 | - now.offset_within_address_space; | |
922 | ||
052e87b0 | 923 | now.size = int128_min(int128_make64(left), now.size); |
ac1970fb | 924 | register_subpage(d, &now); |
733d5ef5 | 925 | } else { |
052e87b0 | 926 | now.size = int128_zero(); |
733d5ef5 | 927 | } |
052e87b0 PB |
928 | while (int128_ne(remain.size, now.size)) { |
929 | remain.size = int128_sub(remain.size, now.size); | |
930 | remain.offset_within_address_space += int128_get64(now.size); | |
931 | remain.offset_within_region += int128_get64(now.size); | |
69b67646 | 932 | now = remain; |
052e87b0 | 933 | if (int128_lt(remain.size, page_size)) { |
733d5ef5 | 934 | register_subpage(d, &now); |
88266249 | 935 | } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { |
052e87b0 | 936 | now.size = page_size; |
ac1970fb | 937 | register_subpage(d, &now); |
69b67646 | 938 | } else { |
052e87b0 | 939 | now.size = int128_and(now.size, int128_neg(page_size)); |
ac1970fb | 940 | register_multipage(d, &now); |
69b67646 | 941 | } |
0f0cb164 AK |
942 | } |
943 | } | |
944 | ||
62a2744c SY |
945 | void qemu_flush_coalesced_mmio_buffer(void) |
946 | { | |
947 | if (kvm_enabled()) | |
948 | kvm_flush_coalesced_mmio_buffer(); | |
949 | } | |
950 | ||
b2a8658e UD |
951 | void qemu_mutex_lock_ramlist(void) |
952 | { | |
953 | qemu_mutex_lock(&ram_list.mutex); | |
954 | } | |
955 | ||
956 | void qemu_mutex_unlock_ramlist(void) | |
957 | { | |
958 | qemu_mutex_unlock(&ram_list.mutex); | |
959 | } | |
960 | ||
e1e84ba0 | 961 | #ifdef __linux__ |
c902760f MT |
962 | |
963 | #include <sys/vfs.h> | |
964 | ||
965 | #define HUGETLBFS_MAGIC 0x958458f6 | |
966 | ||
967 | static long gethugepagesize(const char *path) | |
968 | { | |
969 | struct statfs fs; | |
970 | int ret; | |
971 | ||
972 | do { | |
9742bf26 | 973 | ret = statfs(path, &fs); |
c902760f MT |
974 | } while (ret != 0 && errno == EINTR); |
975 | ||
976 | if (ret != 0) { | |
9742bf26 YT |
977 | perror(path); |
978 | return 0; | |
c902760f MT |
979 | } |
980 | ||
981 | if (fs.f_type != HUGETLBFS_MAGIC) | |
9742bf26 | 982 | fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); |
c902760f MT |
983 | |
984 | return fs.f_bsize; | |
985 | } | |
986 | ||
ef36fa14 MT |
987 | static sigjmp_buf sigjump; |
988 | ||
989 | static void sigbus_handler(int signal) | |
990 | { | |
991 | siglongjmp(sigjump, 1); | |
992 | } | |
993 | ||
04b16653 AW |
994 | static void *file_ram_alloc(RAMBlock *block, |
995 | ram_addr_t memory, | |
996 | const char *path) | |
c902760f MT |
997 | { |
998 | char *filename; | |
8ca761f6 PF |
999 | char *sanitized_name; |
1000 | char *c; | |
c902760f MT |
1001 | void *area; |
1002 | int fd; | |
c902760f MT |
1003 | unsigned long hpagesize; |
1004 | ||
1005 | hpagesize = gethugepagesize(path); | |
1006 | if (!hpagesize) { | |
9742bf26 | 1007 | return NULL; |
c902760f MT |
1008 | } |
1009 | ||
1010 | if (memory < hpagesize) { | |
1011 | return NULL; | |
1012 | } | |
1013 | ||
1014 | if (kvm_enabled() && !kvm_has_sync_mmu()) { | |
1015 | fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n"); | |
1016 | return NULL; | |
1017 | } | |
1018 | ||
8ca761f6 PF |
1019 | /* Make name safe to use with mkstemp by replacing '/' with '_'. */ |
1020 | sanitized_name = g_strdup(block->mr->name); | |
1021 | for (c = sanitized_name; *c != '\0'; c++) { | |
1022 | if (*c == '/') | |
1023 | *c = '_'; | |
1024 | } | |
1025 | ||
1026 | filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, | |
1027 | sanitized_name); | |
1028 | g_free(sanitized_name); | |
c902760f MT |
1029 | |
1030 | fd = mkstemp(filename); | |
1031 | if (fd < 0) { | |
9742bf26 | 1032 | perror("unable to create backing store for hugepages"); |
e4ada482 | 1033 | g_free(filename); |
9742bf26 | 1034 | return NULL; |
c902760f MT |
1035 | } |
1036 | unlink(filename); | |
e4ada482 | 1037 | g_free(filename); |
c902760f MT |
1038 | |
1039 | memory = (memory+hpagesize-1) & ~(hpagesize-1); | |
1040 | ||
1041 | /* | |
1042 | * ftruncate is not supported by hugetlbfs in older | |
1043 | * hosts, so don't bother bailing out on errors. | |
1044 | * If anything goes wrong with it under other filesystems, | |
1045 | * mmap will fail. | |
1046 | */ | |
1047 | if (ftruncate(fd, memory)) | |
9742bf26 | 1048 | perror("ftruncate"); |
c902760f | 1049 | |
c902760f | 1050 | area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); |
c902760f | 1051 | if (area == MAP_FAILED) { |
9742bf26 YT |
1052 | perror("file_ram_alloc: can't mmap RAM pages"); |
1053 | close(fd); | |
1054 | return (NULL); | |
c902760f | 1055 | } |
ef36fa14 MT |
1056 | |
1057 | if (mem_prealloc) { | |
1058 | int ret, i; | |
1059 | struct sigaction act, oldact; | |
1060 | sigset_t set, oldset; | |
1061 | ||
1062 | memset(&act, 0, sizeof(act)); | |
1063 | act.sa_handler = &sigbus_handler; | |
1064 | act.sa_flags = 0; | |
1065 | ||
1066 | ret = sigaction(SIGBUS, &act, &oldact); | |
1067 | if (ret) { | |
1068 | perror("file_ram_alloc: failed to install signal handler"); | |
1069 | exit(1); | |
1070 | } | |
1071 | ||
1072 | /* unblock SIGBUS */ | |
1073 | sigemptyset(&set); | |
1074 | sigaddset(&set, SIGBUS); | |
1075 | pthread_sigmask(SIG_UNBLOCK, &set, &oldset); | |
1076 | ||
1077 | if (sigsetjmp(sigjump, 1)) { | |
1078 | fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n"); | |
1079 | exit(1); | |
1080 | } | |
1081 | ||
1082 | /* MAP_POPULATE silently ignores failures */ | |
1083 | for (i = 0; i < (memory/hpagesize)-1; i++) { | |
1084 | memset(area + (hpagesize*i), 0, 1); | |
1085 | } | |
1086 | ||
1087 | ret = sigaction(SIGBUS, &oldact, NULL); | |
1088 | if (ret) { | |
1089 | perror("file_ram_alloc: failed to reinstall signal handler"); | |
1090 | exit(1); | |
1091 | } | |
1092 | ||
1093 | pthread_sigmask(SIG_SETMASK, &oldset, NULL); | |
1094 | } | |
1095 | ||
04b16653 | 1096 | block->fd = fd; |
c902760f MT |
1097 | return area; |
1098 | } | |
e1e84ba0 MA |
1099 | #else |
1100 | static void *file_ram_alloc(RAMBlock *block, | |
1101 | ram_addr_t memory, | |
1102 | const char *path) | |
1103 | { | |
1104 | fprintf(stderr, "-mem-path not supported on this host\n"); | |
1105 | exit(1); | |
1106 | } | |
c902760f MT |
1107 | #endif |
1108 | ||
d17b5288 | 1109 | static ram_addr_t find_ram_offset(ram_addr_t size) |
04b16653 AW |
1110 | { |
1111 | RAMBlock *block, *next_block; | |
3e837b2c | 1112 | ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; |
04b16653 | 1113 | |
49cd9ac6 SH |
1114 | assert(size != 0); /* it would hand out same offset multiple times */ |
1115 | ||
a3161038 | 1116 | if (QTAILQ_EMPTY(&ram_list.blocks)) |
04b16653 AW |
1117 | return 0; |
1118 | ||
a3161038 | 1119 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
f15fbc4b | 1120 | ram_addr_t end, next = RAM_ADDR_MAX; |
04b16653 AW |
1121 | |
1122 | end = block->offset + block->length; | |
1123 | ||
a3161038 | 1124 | QTAILQ_FOREACH(next_block, &ram_list.blocks, next) { |
04b16653 AW |
1125 | if (next_block->offset >= end) { |
1126 | next = MIN(next, next_block->offset); | |
1127 | } | |
1128 | } | |
1129 | if (next - end >= size && next - end < mingap) { | |
3e837b2c | 1130 | offset = end; |
04b16653 AW |
1131 | mingap = next - end; |
1132 | } | |
1133 | } | |
3e837b2c AW |
1134 | |
1135 | if (offset == RAM_ADDR_MAX) { | |
1136 | fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", | |
1137 | (uint64_t)size); | |
1138 | abort(); | |
1139 | } | |
1140 | ||
04b16653 AW |
1141 | return offset; |
1142 | } | |
1143 | ||
652d7ec2 | 1144 | ram_addr_t last_ram_offset(void) |
d17b5288 AW |
1145 | { |
1146 | RAMBlock *block; | |
1147 | ram_addr_t last = 0; | |
1148 | ||
a3161038 | 1149 | QTAILQ_FOREACH(block, &ram_list.blocks, next) |
d17b5288 AW |
1150 | last = MAX(last, block->offset + block->length); |
1151 | ||
1152 | return last; | |
1153 | } | |
1154 | ||
ddb97f1d JB |
1155 | static void qemu_ram_setup_dump(void *addr, ram_addr_t size) |
1156 | { | |
1157 | int ret; | |
ddb97f1d JB |
1158 | |
1159 | /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ | |
2ff3de68 MA |
1160 | if (!qemu_opt_get_bool(qemu_get_machine_opts(), |
1161 | "dump-guest-core", true)) { | |
ddb97f1d JB |
1162 | ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); |
1163 | if (ret) { | |
1164 | perror("qemu_madvise"); | |
1165 | fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " | |
1166 | "but dump_guest_core=off specified\n"); | |
1167 | } | |
1168 | } | |
1169 | } | |
1170 | ||
c5705a77 | 1171 | void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) |
84b89d78 CM |
1172 | { |
1173 | RAMBlock *new_block, *block; | |
1174 | ||
c5705a77 | 1175 | new_block = NULL; |
a3161038 | 1176 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
c5705a77 AK |
1177 | if (block->offset == addr) { |
1178 | new_block = block; | |
1179 | break; | |
1180 | } | |
1181 | } | |
1182 | assert(new_block); | |
1183 | assert(!new_block->idstr[0]); | |
84b89d78 | 1184 | |
09e5ab63 AL |
1185 | if (dev) { |
1186 | char *id = qdev_get_dev_path(dev); | |
84b89d78 CM |
1187 | if (id) { |
1188 | snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); | |
7267c094 | 1189 | g_free(id); |
84b89d78 CM |
1190 | } |
1191 | } | |
1192 | pstrcat(new_block->idstr, sizeof(new_block->idstr), name); | |
1193 | ||
b2a8658e UD |
1194 | /* This assumes the iothread lock is taken here too. */ |
1195 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1196 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
c5705a77 | 1197 | if (block != new_block && !strcmp(block->idstr, new_block->idstr)) { |
84b89d78 CM |
1198 | fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", |
1199 | new_block->idstr); | |
1200 | abort(); | |
1201 | } | |
1202 | } | |
b2a8658e | 1203 | qemu_mutex_unlock_ramlist(); |
c5705a77 AK |
1204 | } |
1205 | ||
8490fc78 LC |
1206 | static int memory_try_enable_merging(void *addr, size_t len) |
1207 | { | |
2ff3de68 | 1208 | if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) { |
8490fc78 LC |
1209 | /* disabled by the user */ |
1210 | return 0; | |
1211 | } | |
1212 | ||
1213 | return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); | |
1214 | } | |
1215 | ||
c5705a77 AK |
1216 | ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, |
1217 | MemoryRegion *mr) | |
1218 | { | |
abb26d63 | 1219 | RAMBlock *block, *new_block; |
c5705a77 AK |
1220 | |
1221 | size = TARGET_PAGE_ALIGN(size); | |
1222 | new_block = g_malloc0(sizeof(*new_block)); | |
3435f395 | 1223 | new_block->fd = -1; |
84b89d78 | 1224 | |
b2a8658e UD |
1225 | /* This assumes the iothread lock is taken here too. */ |
1226 | qemu_mutex_lock_ramlist(); | |
7c637366 | 1227 | new_block->mr = mr; |
432d268c | 1228 | new_block->offset = find_ram_offset(size); |
6977dfe6 YT |
1229 | if (host) { |
1230 | new_block->host = host; | |
cd19cfa2 | 1231 | new_block->flags |= RAM_PREALLOC_MASK; |
dfeaf2ab MA |
1232 | } else if (xen_enabled()) { |
1233 | if (mem_path) { | |
1234 | fprintf(stderr, "-mem-path not supported with Xen\n"); | |
1235 | exit(1); | |
1236 | } | |
1237 | xen_ram_alloc(new_block->offset, size, mr); | |
6977dfe6 YT |
1238 | } else { |
1239 | if (mem_path) { | |
e1e84ba0 MA |
1240 | if (phys_mem_alloc != qemu_anon_ram_alloc) { |
1241 | /* | |
1242 | * file_ram_alloc() needs to allocate just like | |
1243 | * phys_mem_alloc, but we haven't bothered to provide | |
1244 | * a hook there. | |
1245 | */ | |
1246 | fprintf(stderr, | |
1247 | "-mem-path not supported with this accelerator\n"); | |
1248 | exit(1); | |
1249 | } | |
6977dfe6 | 1250 | new_block->host = file_ram_alloc(new_block, size, mem_path); |
0628c182 MA |
1251 | } |
1252 | if (!new_block->host) { | |
91138037 | 1253 | new_block->host = phys_mem_alloc(size); |
39228250 MA |
1254 | if (!new_block->host) { |
1255 | fprintf(stderr, "Cannot set up guest memory '%s': %s\n", | |
1256 | new_block->mr->name, strerror(errno)); | |
1257 | exit(1); | |
1258 | } | |
8490fc78 | 1259 | memory_try_enable_merging(new_block->host, size); |
6977dfe6 | 1260 | } |
c902760f | 1261 | } |
94a6b54f PB |
1262 | new_block->length = size; |
1263 | ||
abb26d63 PB |
1264 | /* Keep the list sorted from biggest to smallest block. */ |
1265 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { | |
1266 | if (block->length < new_block->length) { | |
1267 | break; | |
1268 | } | |
1269 | } | |
1270 | if (block) { | |
1271 | QTAILQ_INSERT_BEFORE(block, new_block, next); | |
1272 | } else { | |
1273 | QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next); | |
1274 | } | |
0d6d3c87 | 1275 | ram_list.mru_block = NULL; |
94a6b54f | 1276 | |
f798b07f | 1277 | ram_list.version++; |
b2a8658e | 1278 | qemu_mutex_unlock_ramlist(); |
f798b07f | 1279 | |
7267c094 | 1280 | ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, |
04b16653 | 1281 | last_ram_offset() >> TARGET_PAGE_BITS); |
5fda043f IM |
1282 | memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), |
1283 | 0, size >> TARGET_PAGE_BITS); | |
1720aeee | 1284 | cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff); |
94a6b54f | 1285 | |
ddb97f1d | 1286 | qemu_ram_setup_dump(new_block->host, size); |
ad0b5321 | 1287 | qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE); |
3e469dbf | 1288 | qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK); |
ddb97f1d | 1289 | |
6f0437e8 JK |
1290 | if (kvm_enabled()) |
1291 | kvm_setup_guest_memory(new_block->host, size); | |
1292 | ||
94a6b54f PB |
1293 | return new_block->offset; |
1294 | } | |
e9a1ab19 | 1295 | |
c5705a77 | 1296 | ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr) |
6977dfe6 | 1297 | { |
c5705a77 | 1298 | return qemu_ram_alloc_from_ptr(size, NULL, mr); |
6977dfe6 YT |
1299 | } |
1300 | ||
1f2e98b6 AW |
1301 | void qemu_ram_free_from_ptr(ram_addr_t addr) |
1302 | { | |
1303 | RAMBlock *block; | |
1304 | ||
b2a8658e UD |
1305 | /* This assumes the iothread lock is taken here too. */ |
1306 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1307 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1f2e98b6 | 1308 | if (addr == block->offset) { |
a3161038 | 1309 | QTAILQ_REMOVE(&ram_list.blocks, block, next); |
0d6d3c87 | 1310 | ram_list.mru_block = NULL; |
f798b07f | 1311 | ram_list.version++; |
7267c094 | 1312 | g_free(block); |
b2a8658e | 1313 | break; |
1f2e98b6 AW |
1314 | } |
1315 | } | |
b2a8658e | 1316 | qemu_mutex_unlock_ramlist(); |
1f2e98b6 AW |
1317 | } |
1318 | ||
c227f099 | 1319 | void qemu_ram_free(ram_addr_t addr) |
e9a1ab19 | 1320 | { |
04b16653 AW |
1321 | RAMBlock *block; |
1322 | ||
b2a8658e UD |
1323 | /* This assumes the iothread lock is taken here too. */ |
1324 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1325 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
04b16653 | 1326 | if (addr == block->offset) { |
a3161038 | 1327 | QTAILQ_REMOVE(&ram_list.blocks, block, next); |
0d6d3c87 | 1328 | ram_list.mru_block = NULL; |
f798b07f | 1329 | ram_list.version++; |
cd19cfa2 HY |
1330 | if (block->flags & RAM_PREALLOC_MASK) { |
1331 | ; | |
dfeaf2ab MA |
1332 | } else if (xen_enabled()) { |
1333 | xen_invalidate_map_cache_entry(block->host); | |
089f3f76 | 1334 | #ifndef _WIN32 |
3435f395 MA |
1335 | } else if (block->fd >= 0) { |
1336 | munmap(block->host, block->length); | |
1337 | close(block->fd); | |
089f3f76 | 1338 | #endif |
04b16653 | 1339 | } else { |
dfeaf2ab | 1340 | qemu_anon_ram_free(block->host, block->length); |
04b16653 | 1341 | } |
7267c094 | 1342 | g_free(block); |
b2a8658e | 1343 | break; |
04b16653 AW |
1344 | } |
1345 | } | |
b2a8658e | 1346 | qemu_mutex_unlock_ramlist(); |
04b16653 | 1347 | |
e9a1ab19 FB |
1348 | } |
1349 | ||
cd19cfa2 HY |
1350 | #ifndef _WIN32 |
1351 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) | |
1352 | { | |
1353 | RAMBlock *block; | |
1354 | ram_addr_t offset; | |
1355 | int flags; | |
1356 | void *area, *vaddr; | |
1357 | ||
a3161038 | 1358 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
cd19cfa2 HY |
1359 | offset = addr - block->offset; |
1360 | if (offset < block->length) { | |
1361 | vaddr = block->host + offset; | |
1362 | if (block->flags & RAM_PREALLOC_MASK) { | |
1363 | ; | |
dfeaf2ab MA |
1364 | } else if (xen_enabled()) { |
1365 | abort(); | |
cd19cfa2 HY |
1366 | } else { |
1367 | flags = MAP_FIXED; | |
1368 | munmap(vaddr, length); | |
3435f395 | 1369 | if (block->fd >= 0) { |
cd19cfa2 | 1370 | #ifdef MAP_POPULATE |
3435f395 MA |
1371 | flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED : |
1372 | MAP_PRIVATE; | |
fd28aa13 | 1373 | #else |
3435f395 | 1374 | flags |= MAP_PRIVATE; |
cd19cfa2 | 1375 | #endif |
3435f395 MA |
1376 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, |
1377 | flags, block->fd, offset); | |
cd19cfa2 | 1378 | } else { |
2eb9fbaa MA |
1379 | /* |
1380 | * Remap needs to match alloc. Accelerators that | |
1381 | * set phys_mem_alloc never remap. If they did, | |
1382 | * we'd need a remap hook here. | |
1383 | */ | |
1384 | assert(phys_mem_alloc == qemu_anon_ram_alloc); | |
1385 | ||
cd19cfa2 HY |
1386 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; |
1387 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1388 | flags, -1, 0); | |
cd19cfa2 HY |
1389 | } |
1390 | if (area != vaddr) { | |
f15fbc4b AP |
1391 | fprintf(stderr, "Could not remap addr: " |
1392 | RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", | |
cd19cfa2 HY |
1393 | length, addr); |
1394 | exit(1); | |
1395 | } | |
8490fc78 | 1396 | memory_try_enable_merging(vaddr, length); |
ddb97f1d | 1397 | qemu_ram_setup_dump(vaddr, length); |
cd19cfa2 HY |
1398 | } |
1399 | return; | |
1400 | } | |
1401 | } | |
1402 | } | |
1403 | #endif /* !_WIN32 */ | |
1404 | ||
1b5ec234 PB |
1405 | /* Return a host pointer to ram allocated with qemu_ram_alloc. |
1406 | With the exception of the softmmu code in this file, this should | |
1407 | only be used for local memory (e.g. video ram) that the device owns, | |
1408 | and knows it isn't going to access beyond the end of the block. | |
1409 | ||
1410 | It should not be used for general purpose DMA. | |
1411 | Use cpu_physical_memory_map/cpu_physical_memory_rw instead. | |
1412 | */ | |
1413 | void *qemu_get_ram_ptr(ram_addr_t addr) | |
1414 | { | |
1415 | RAMBlock *block = qemu_get_ram_block(addr); | |
1416 | ||
0d6d3c87 PB |
1417 | if (xen_enabled()) { |
1418 | /* We need to check if the requested address is in the RAM | |
1419 | * because we don't want to map the entire memory in QEMU. | |
1420 | * In that case just map until the end of the page. | |
1421 | */ | |
1422 | if (block->offset == 0) { | |
1423 | return xen_map_cache(addr, 0, 0); | |
1424 | } else if (block->host == NULL) { | |
1425 | block->host = | |
1426 | xen_map_cache(block->offset, block->length, 1); | |
1427 | } | |
1428 | } | |
1429 | return block->host + (addr - block->offset); | |
dc828ca1 PB |
1430 | } |
1431 | ||
38bee5dc SS |
1432 | /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr |
1433 | * but takes a size argument */ | |
cb85f7ab | 1434 | static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size) |
38bee5dc | 1435 | { |
8ab934f9 SS |
1436 | if (*size == 0) { |
1437 | return NULL; | |
1438 | } | |
868bb33f | 1439 | if (xen_enabled()) { |
e41d7c69 | 1440 | return xen_map_cache(addr, *size, 1); |
868bb33f | 1441 | } else { |
38bee5dc SS |
1442 | RAMBlock *block; |
1443 | ||
a3161038 | 1444 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
38bee5dc SS |
1445 | if (addr - block->offset < block->length) { |
1446 | if (addr - block->offset + *size > block->length) | |
1447 | *size = block->length - addr + block->offset; | |
1448 | return block->host + (addr - block->offset); | |
1449 | } | |
1450 | } | |
1451 | ||
1452 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1453 | abort(); | |
38bee5dc SS |
1454 | } |
1455 | } | |
1456 | ||
7443b437 PB |
1457 | /* Some of the softmmu routines need to translate from a host pointer |
1458 | (typically a TLB entry) back to a ram offset. */ | |
1b5ec234 | 1459 | MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) |
5579c7f3 | 1460 | { |
94a6b54f PB |
1461 | RAMBlock *block; |
1462 | uint8_t *host = ptr; | |
1463 | ||
868bb33f | 1464 | if (xen_enabled()) { |
e41d7c69 | 1465 | *ram_addr = xen_ram_addr_from_mapcache(ptr); |
1b5ec234 | 1466 | return qemu_get_ram_block(*ram_addr)->mr; |
712c2b41 SS |
1467 | } |
1468 | ||
23887b79 PB |
1469 | block = ram_list.mru_block; |
1470 | if (block && block->host && host - block->host < block->length) { | |
1471 | goto found; | |
1472 | } | |
1473 | ||
a3161038 | 1474 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
432d268c JN |
1475 | /* This case append when the block is not mapped. */ |
1476 | if (block->host == NULL) { | |
1477 | continue; | |
1478 | } | |
f471a17e | 1479 | if (host - block->host < block->length) { |
23887b79 | 1480 | goto found; |
f471a17e | 1481 | } |
94a6b54f | 1482 | } |
432d268c | 1483 | |
1b5ec234 | 1484 | return NULL; |
23887b79 PB |
1485 | |
1486 | found: | |
1487 | *ram_addr = block->offset + (host - block->host); | |
1b5ec234 | 1488 | return block->mr; |
e890261f | 1489 | } |
f471a17e | 1490 | |
a8170e5e | 1491 | static void notdirty_mem_write(void *opaque, hwaddr ram_addr, |
0e0df1e2 | 1492 | uint64_t val, unsigned size) |
9fa3e853 | 1493 | { |
3a7d929e | 1494 | int dirty_flags; |
f7c11b53 | 1495 | dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
3a7d929e | 1496 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
0e0df1e2 | 1497 | tb_invalidate_phys_page_fast(ram_addr, size); |
f7c11b53 | 1498 | dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
3a7d929e | 1499 | } |
0e0df1e2 AK |
1500 | switch (size) { |
1501 | case 1: | |
1502 | stb_p(qemu_get_ram_ptr(ram_addr), val); | |
1503 | break; | |
1504 | case 2: | |
1505 | stw_p(qemu_get_ram_ptr(ram_addr), val); | |
1506 | break; | |
1507 | case 4: | |
1508 | stl_p(qemu_get_ram_ptr(ram_addr), val); | |
1509 | break; | |
1510 | default: | |
1511 | abort(); | |
3a7d929e | 1512 | } |
f23db169 | 1513 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
f7c11b53 | 1514 | cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); |
f23db169 FB |
1515 | /* we remove the notdirty callback only if the code has been |
1516 | flushed */ | |
4917cf44 AF |
1517 | if (dirty_flags == 0xff) { |
1518 | CPUArchState *env = current_cpu->env_ptr; | |
1519 | tlb_set_dirty(env, env->mem_io_vaddr); | |
1520 | } | |
9fa3e853 FB |
1521 | } |
1522 | ||
b018ddf6 PB |
1523 | static bool notdirty_mem_accepts(void *opaque, hwaddr addr, |
1524 | unsigned size, bool is_write) | |
1525 | { | |
1526 | return is_write; | |
1527 | } | |
1528 | ||
0e0df1e2 | 1529 | static const MemoryRegionOps notdirty_mem_ops = { |
0e0df1e2 | 1530 | .write = notdirty_mem_write, |
b018ddf6 | 1531 | .valid.accepts = notdirty_mem_accepts, |
0e0df1e2 | 1532 | .endianness = DEVICE_NATIVE_ENDIAN, |
1ccde1cb FB |
1533 | }; |
1534 | ||
0f459d16 | 1535 | /* Generate a debug exception if a watchpoint has been hit. */ |
b4051334 | 1536 | static void check_watchpoint(int offset, int len_mask, int flags) |
0f459d16 | 1537 | { |
4917cf44 | 1538 | CPUArchState *env = current_cpu->env_ptr; |
06d55cc1 | 1539 | target_ulong pc, cs_base; |
0f459d16 | 1540 | target_ulong vaddr; |
a1d1bb31 | 1541 | CPUWatchpoint *wp; |
06d55cc1 | 1542 | int cpu_flags; |
0f459d16 | 1543 | |
06d55cc1 AL |
1544 | if (env->watchpoint_hit) { |
1545 | /* We re-entered the check after replacing the TB. Now raise | |
1546 | * the debug interrupt so that is will trigger after the | |
1547 | * current instruction. */ | |
c3affe56 | 1548 | cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG); |
06d55cc1 AL |
1549 | return; |
1550 | } | |
2e70f6ef | 1551 | vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; |
72cf2d4f | 1552 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
b4051334 AL |
1553 | if ((vaddr == (wp->vaddr & len_mask) || |
1554 | (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { | |
6e140f28 AL |
1555 | wp->flags |= BP_WATCHPOINT_HIT; |
1556 | if (!env->watchpoint_hit) { | |
1557 | env->watchpoint_hit = wp; | |
5a316526 | 1558 | tb_check_watchpoint(env); |
6e140f28 AL |
1559 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { |
1560 | env->exception_index = EXCP_DEBUG; | |
488d6577 | 1561 | cpu_loop_exit(env); |
6e140f28 AL |
1562 | } else { |
1563 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); | |
1564 | tb_gen_code(env, pc, cs_base, cpu_flags, 1); | |
488d6577 | 1565 | cpu_resume_from_signal(env, NULL); |
6e140f28 | 1566 | } |
06d55cc1 | 1567 | } |
6e140f28 AL |
1568 | } else { |
1569 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
0f459d16 PB |
1570 | } |
1571 | } | |
1572 | } | |
1573 | ||
6658ffb8 PB |
1574 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, |
1575 | so these check for a hit then pass through to the normal out-of-line | |
1576 | phys routines. */ | |
a8170e5e | 1577 | static uint64_t watch_mem_read(void *opaque, hwaddr addr, |
1ec9b909 | 1578 | unsigned size) |
6658ffb8 | 1579 | { |
1ec9b909 AK |
1580 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ); |
1581 | switch (size) { | |
1582 | case 1: return ldub_phys(addr); | |
1583 | case 2: return lduw_phys(addr); | |
1584 | case 4: return ldl_phys(addr); | |
1585 | default: abort(); | |
1586 | } | |
6658ffb8 PB |
1587 | } |
1588 | ||
a8170e5e | 1589 | static void watch_mem_write(void *opaque, hwaddr addr, |
1ec9b909 | 1590 | uint64_t val, unsigned size) |
6658ffb8 | 1591 | { |
1ec9b909 AK |
1592 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE); |
1593 | switch (size) { | |
67364150 MF |
1594 | case 1: |
1595 | stb_phys(addr, val); | |
1596 | break; | |
1597 | case 2: | |
1598 | stw_phys(addr, val); | |
1599 | break; | |
1600 | case 4: | |
1601 | stl_phys(addr, val); | |
1602 | break; | |
1ec9b909 AK |
1603 | default: abort(); |
1604 | } | |
6658ffb8 PB |
1605 | } |
1606 | ||
1ec9b909 AK |
1607 | static const MemoryRegionOps watch_mem_ops = { |
1608 | .read = watch_mem_read, | |
1609 | .write = watch_mem_write, | |
1610 | .endianness = DEVICE_NATIVE_ENDIAN, | |
6658ffb8 | 1611 | }; |
6658ffb8 | 1612 | |
a8170e5e | 1613 | static uint64_t subpage_read(void *opaque, hwaddr addr, |
70c68e44 | 1614 | unsigned len) |
db7b5426 | 1615 | { |
acc9d80b JK |
1616 | subpage_t *subpage = opaque; |
1617 | uint8_t buf[4]; | |
791af8c8 | 1618 | |
db7b5426 | 1619 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 1620 | printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, |
acc9d80b | 1621 | subpage, len, addr); |
db7b5426 | 1622 | #endif |
acc9d80b JK |
1623 | address_space_read(subpage->as, addr + subpage->base, buf, len); |
1624 | switch (len) { | |
1625 | case 1: | |
1626 | return ldub_p(buf); | |
1627 | case 2: | |
1628 | return lduw_p(buf); | |
1629 | case 4: | |
1630 | return ldl_p(buf); | |
1631 | default: | |
1632 | abort(); | |
1633 | } | |
db7b5426 BS |
1634 | } |
1635 | ||
a8170e5e | 1636 | static void subpage_write(void *opaque, hwaddr addr, |
70c68e44 | 1637 | uint64_t value, unsigned len) |
db7b5426 | 1638 | { |
acc9d80b JK |
1639 | subpage_t *subpage = opaque; |
1640 | uint8_t buf[4]; | |
1641 | ||
db7b5426 | 1642 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 1643 | printf("%s: subpage %p len %u addr " TARGET_FMT_plx |
acc9d80b JK |
1644 | " value %"PRIx64"\n", |
1645 | __func__, subpage, len, addr, value); | |
db7b5426 | 1646 | #endif |
acc9d80b JK |
1647 | switch (len) { |
1648 | case 1: | |
1649 | stb_p(buf, value); | |
1650 | break; | |
1651 | case 2: | |
1652 | stw_p(buf, value); | |
1653 | break; | |
1654 | case 4: | |
1655 | stl_p(buf, value); | |
1656 | break; | |
1657 | default: | |
1658 | abort(); | |
1659 | } | |
1660 | address_space_write(subpage->as, addr + subpage->base, buf, len); | |
db7b5426 BS |
1661 | } |
1662 | ||
c353e4cc | 1663 | static bool subpage_accepts(void *opaque, hwaddr addr, |
016e9d62 | 1664 | unsigned len, bool is_write) |
c353e4cc | 1665 | { |
acc9d80b | 1666 | subpage_t *subpage = opaque; |
c353e4cc | 1667 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 1668 | printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", |
acc9d80b | 1669 | __func__, subpage, is_write ? 'w' : 'r', len, addr); |
c353e4cc PB |
1670 | #endif |
1671 | ||
acc9d80b | 1672 | return address_space_access_valid(subpage->as, addr + subpage->base, |
016e9d62 | 1673 | len, is_write); |
c353e4cc PB |
1674 | } |
1675 | ||
70c68e44 AK |
1676 | static const MemoryRegionOps subpage_ops = { |
1677 | .read = subpage_read, | |
1678 | .write = subpage_write, | |
c353e4cc | 1679 | .valid.accepts = subpage_accepts, |
70c68e44 | 1680 | .endianness = DEVICE_NATIVE_ENDIAN, |
db7b5426 BS |
1681 | }; |
1682 | ||
c227f099 | 1683 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 1684 | uint16_t section) |
db7b5426 BS |
1685 | { |
1686 | int idx, eidx; | |
1687 | ||
1688 | if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) | |
1689 | return -1; | |
1690 | idx = SUBPAGE_IDX(start); | |
1691 | eidx = SUBPAGE_IDX(end); | |
1692 | #if defined(DEBUG_SUBPAGE) | |
016e9d62 AK |
1693 | printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", |
1694 | __func__, mmio, start, end, idx, eidx, section); | |
db7b5426 | 1695 | #endif |
db7b5426 | 1696 | for (; idx <= eidx; idx++) { |
5312bd8b | 1697 | mmio->sub_section[idx] = section; |
db7b5426 BS |
1698 | } |
1699 | ||
1700 | return 0; | |
1701 | } | |
1702 | ||
acc9d80b | 1703 | static subpage_t *subpage_init(AddressSpace *as, hwaddr base) |
db7b5426 | 1704 | { |
c227f099 | 1705 | subpage_t *mmio; |
db7b5426 | 1706 | |
7267c094 | 1707 | mmio = g_malloc0(sizeof(subpage_t)); |
1eec614b | 1708 | |
acc9d80b | 1709 | mmio->as = as; |
1eec614b | 1710 | mmio->base = base; |
2c9b15ca | 1711 | memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, |
70c68e44 | 1712 | "subpage", TARGET_PAGE_SIZE); |
b3b00c78 | 1713 | mmio->iomem.subpage = true; |
db7b5426 | 1714 | #if defined(DEBUG_SUBPAGE) |
016e9d62 AK |
1715 | printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, |
1716 | mmio, base, TARGET_PAGE_SIZE); | |
db7b5426 | 1717 | #endif |
b41aac4f | 1718 | subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED); |
db7b5426 BS |
1719 | |
1720 | return mmio; | |
1721 | } | |
1722 | ||
5312bd8b AK |
1723 | static uint16_t dummy_section(MemoryRegion *mr) |
1724 | { | |
1725 | MemoryRegionSection section = { | |
1726 | .mr = mr, | |
1727 | .offset_within_address_space = 0, | |
1728 | .offset_within_region = 0, | |
052e87b0 | 1729 | .size = int128_2_64(), |
5312bd8b AK |
1730 | }; |
1731 | ||
1732 | return phys_section_add(§ion); | |
1733 | } | |
1734 | ||
a8170e5e | 1735 | MemoryRegion *iotlb_to_region(hwaddr index) |
aa102231 | 1736 | { |
0475d94f | 1737 | return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr; |
aa102231 AK |
1738 | } |
1739 | ||
e9179ce1 AK |
1740 | static void io_mem_init(void) |
1741 | { | |
2c9b15ca PB |
1742 | memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX); |
1743 | memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, | |
0e0df1e2 | 1744 | "unassigned", UINT64_MAX); |
2c9b15ca | 1745 | memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, |
0e0df1e2 | 1746 | "notdirty", UINT64_MAX); |
2c9b15ca | 1747 | memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL, |
1ec9b909 | 1748 | "watch", UINT64_MAX); |
e9179ce1 AK |
1749 | } |
1750 | ||
ac1970fb | 1751 | static void mem_begin(MemoryListener *listener) |
00752703 PB |
1752 | { |
1753 | AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); | |
1754 | AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1); | |
1755 | ||
9736e55b | 1756 | d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; |
00752703 PB |
1757 | d->as = as; |
1758 | as->next_dispatch = d; | |
1759 | } | |
1760 | ||
1761 | static void mem_commit(MemoryListener *listener) | |
ac1970fb | 1762 | { |
89ae337a | 1763 | AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); |
0475d94f PB |
1764 | AddressSpaceDispatch *cur = as->dispatch; |
1765 | AddressSpaceDispatch *next = as->next_dispatch; | |
1766 | ||
1767 | next->nodes = next_map.nodes; | |
1768 | next->sections = next_map.sections; | |
ac1970fb | 1769 | |
b35ba30f MT |
1770 | phys_page_compact_all(next, next_map.nodes_nb); |
1771 | ||
0475d94f PB |
1772 | as->dispatch = next; |
1773 | g_free(cur); | |
ac1970fb AK |
1774 | } |
1775 | ||
50c1e149 AK |
1776 | static void core_begin(MemoryListener *listener) |
1777 | { | |
b41aac4f LPF |
1778 | uint16_t n; |
1779 | ||
6092666e PB |
1780 | prev_map = g_new(PhysPageMap, 1); |
1781 | *prev_map = next_map; | |
1782 | ||
9affd6fc | 1783 | memset(&next_map, 0, sizeof(next_map)); |
b41aac4f LPF |
1784 | n = dummy_section(&io_mem_unassigned); |
1785 | assert(n == PHYS_SECTION_UNASSIGNED); | |
1786 | n = dummy_section(&io_mem_notdirty); | |
1787 | assert(n == PHYS_SECTION_NOTDIRTY); | |
1788 | n = dummy_section(&io_mem_rom); | |
1789 | assert(n == PHYS_SECTION_ROM); | |
1790 | n = dummy_section(&io_mem_watch); | |
1791 | assert(n == PHYS_SECTION_WATCH); | |
50c1e149 AK |
1792 | } |
1793 | ||
9affd6fc PB |
1794 | /* This listener's commit run after the other AddressSpaceDispatch listeners'. |
1795 | * All AddressSpaceDispatch instances have switched to the next map. | |
1796 | */ | |
1797 | static void core_commit(MemoryListener *listener) | |
1798 | { | |
6092666e | 1799 | phys_sections_free(prev_map); |
9affd6fc PB |
1800 | } |
1801 | ||
1d71148e | 1802 | static void tcg_commit(MemoryListener *listener) |
50c1e149 | 1803 | { |
182735ef | 1804 | CPUState *cpu; |
117712c3 AK |
1805 | |
1806 | /* since each CPU stores ram addresses in its TLB cache, we must | |
1807 | reset the modified entries */ | |
1808 | /* XXX: slow ! */ | |
bdc44640 | 1809 | CPU_FOREACH(cpu) { |
182735ef AF |
1810 | CPUArchState *env = cpu->env_ptr; |
1811 | ||
117712c3 AK |
1812 | tlb_flush(env, 1); |
1813 | } | |
50c1e149 AK |
1814 | } |
1815 | ||
93632747 AK |
1816 | static void core_log_global_start(MemoryListener *listener) |
1817 | { | |
1818 | cpu_physical_memory_set_dirty_tracking(1); | |
1819 | } | |
1820 | ||
1821 | static void core_log_global_stop(MemoryListener *listener) | |
1822 | { | |
1823 | cpu_physical_memory_set_dirty_tracking(0); | |
1824 | } | |
1825 | ||
93632747 | 1826 | static MemoryListener core_memory_listener = { |
50c1e149 | 1827 | .begin = core_begin, |
9affd6fc | 1828 | .commit = core_commit, |
93632747 AK |
1829 | .log_global_start = core_log_global_start, |
1830 | .log_global_stop = core_log_global_stop, | |
ac1970fb | 1831 | .priority = 1, |
93632747 AK |
1832 | }; |
1833 | ||
1d71148e AK |
1834 | static MemoryListener tcg_memory_listener = { |
1835 | .commit = tcg_commit, | |
1836 | }; | |
1837 | ||
ac1970fb AK |
1838 | void address_space_init_dispatch(AddressSpace *as) |
1839 | { | |
00752703 | 1840 | as->dispatch = NULL; |
89ae337a | 1841 | as->dispatch_listener = (MemoryListener) { |
ac1970fb | 1842 | .begin = mem_begin, |
00752703 | 1843 | .commit = mem_commit, |
ac1970fb AK |
1844 | .region_add = mem_add, |
1845 | .region_nop = mem_add, | |
1846 | .priority = 0, | |
1847 | }; | |
89ae337a | 1848 | memory_listener_register(&as->dispatch_listener, as); |
ac1970fb AK |
1849 | } |
1850 | ||
83f3c251 AK |
1851 | void address_space_destroy_dispatch(AddressSpace *as) |
1852 | { | |
1853 | AddressSpaceDispatch *d = as->dispatch; | |
1854 | ||
89ae337a | 1855 | memory_listener_unregister(&as->dispatch_listener); |
83f3c251 AK |
1856 | g_free(d); |
1857 | as->dispatch = NULL; | |
1858 | } | |
1859 | ||
62152b8a AK |
1860 | static void memory_map_init(void) |
1861 | { | |
7267c094 | 1862 | system_memory = g_malloc(sizeof(*system_memory)); |
03f49957 PB |
1863 | |
1864 | assert(ADDR_SPACE_BITS <= 64); | |
1865 | ||
1866 | memory_region_init(system_memory, NULL, "system", | |
1867 | ADDR_SPACE_BITS == 64 ? | |
1868 | UINT64_MAX : (0x1ULL << ADDR_SPACE_BITS)); | |
7dca8043 | 1869 | address_space_init(&address_space_memory, system_memory, "memory"); |
309cb471 | 1870 | |
7267c094 | 1871 | system_io = g_malloc(sizeof(*system_io)); |
3bb28b72 JK |
1872 | memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", |
1873 | 65536); | |
7dca8043 | 1874 | address_space_init(&address_space_io, system_io, "I/O"); |
93632747 | 1875 | |
f6790af6 | 1876 | memory_listener_register(&core_memory_listener, &address_space_memory); |
2641689a LG |
1877 | if (tcg_enabled()) { |
1878 | memory_listener_register(&tcg_memory_listener, &address_space_memory); | |
1879 | } | |
62152b8a AK |
1880 | } |
1881 | ||
1882 | MemoryRegion *get_system_memory(void) | |
1883 | { | |
1884 | return system_memory; | |
1885 | } | |
1886 | ||
309cb471 AK |
1887 | MemoryRegion *get_system_io(void) |
1888 | { | |
1889 | return system_io; | |
1890 | } | |
1891 | ||
e2eef170 PB |
1892 | #endif /* !defined(CONFIG_USER_ONLY) */ |
1893 | ||
13eb76e0 FB |
1894 | /* physical memory access (slow version, mainly for debug) */ |
1895 | #if defined(CONFIG_USER_ONLY) | |
f17ec444 | 1896 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, |
a68fe89c | 1897 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
1898 | { |
1899 | int l, flags; | |
1900 | target_ulong page; | |
53a5960a | 1901 | void * p; |
13eb76e0 FB |
1902 | |
1903 | while (len > 0) { | |
1904 | page = addr & TARGET_PAGE_MASK; | |
1905 | l = (page + TARGET_PAGE_SIZE) - addr; | |
1906 | if (l > len) | |
1907 | l = len; | |
1908 | flags = page_get_flags(page); | |
1909 | if (!(flags & PAGE_VALID)) | |
a68fe89c | 1910 | return -1; |
13eb76e0 FB |
1911 | if (is_write) { |
1912 | if (!(flags & PAGE_WRITE)) | |
a68fe89c | 1913 | return -1; |
579a97f7 | 1914 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 1915 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) |
a68fe89c | 1916 | return -1; |
72fb7daa AJ |
1917 | memcpy(p, buf, l); |
1918 | unlock_user(p, addr, l); | |
13eb76e0 FB |
1919 | } else { |
1920 | if (!(flags & PAGE_READ)) | |
a68fe89c | 1921 | return -1; |
579a97f7 | 1922 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 1923 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) |
a68fe89c | 1924 | return -1; |
72fb7daa | 1925 | memcpy(buf, p, l); |
5b257578 | 1926 | unlock_user(p, addr, 0); |
13eb76e0 FB |
1927 | } |
1928 | len -= l; | |
1929 | buf += l; | |
1930 | addr += l; | |
1931 | } | |
a68fe89c | 1932 | return 0; |
13eb76e0 | 1933 | } |
8df1cd07 | 1934 | |
13eb76e0 | 1935 | #else |
51d7a9eb | 1936 | |
a8170e5e AK |
1937 | static void invalidate_and_set_dirty(hwaddr addr, |
1938 | hwaddr length) | |
51d7a9eb AP |
1939 | { |
1940 | if (!cpu_physical_memory_is_dirty(addr)) { | |
1941 | /* invalidate code */ | |
1942 | tb_invalidate_phys_page_range(addr, addr + length, 0); | |
1943 | /* set dirty bit */ | |
1944 | cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG)); | |
1945 | } | |
e226939d | 1946 | xen_modified_memory(addr, length); |
51d7a9eb AP |
1947 | } |
1948 | ||
2bbfa05d PB |
1949 | static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) |
1950 | { | |
1951 | if (memory_region_is_ram(mr)) { | |
1952 | return !(is_write && mr->readonly); | |
1953 | } | |
1954 | if (memory_region_is_romd(mr)) { | |
1955 | return !is_write; | |
1956 | } | |
1957 | ||
1958 | return false; | |
1959 | } | |
1960 | ||
23326164 | 1961 | static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) |
82f2563f | 1962 | { |
e1622f4b | 1963 | unsigned access_size_max = mr->ops->valid.max_access_size; |
23326164 RH |
1964 | |
1965 | /* Regions are assumed to support 1-4 byte accesses unless | |
1966 | otherwise specified. */ | |
23326164 RH |
1967 | if (access_size_max == 0) { |
1968 | access_size_max = 4; | |
1969 | } | |
1970 | ||
1971 | /* Bound the maximum access by the alignment of the address. */ | |
1972 | if (!mr->ops->impl.unaligned) { | |
1973 | unsigned align_size_max = addr & -addr; | |
1974 | if (align_size_max != 0 && align_size_max < access_size_max) { | |
1975 | access_size_max = align_size_max; | |
1976 | } | |
82f2563f | 1977 | } |
23326164 RH |
1978 | |
1979 | /* Don't attempt accesses larger than the maximum. */ | |
1980 | if (l > access_size_max) { | |
1981 | l = access_size_max; | |
82f2563f | 1982 | } |
098178f2 PB |
1983 | if (l & (l - 1)) { |
1984 | l = 1 << (qemu_fls(l) - 1); | |
1985 | } | |
23326164 RH |
1986 | |
1987 | return l; | |
82f2563f PB |
1988 | } |
1989 | ||
fd8aaa76 | 1990 | bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, |
ac1970fb | 1991 | int len, bool is_write) |
13eb76e0 | 1992 | { |
149f54b5 | 1993 | hwaddr l; |
13eb76e0 | 1994 | uint8_t *ptr; |
791af8c8 | 1995 | uint64_t val; |
149f54b5 | 1996 | hwaddr addr1; |
5c8a00ce | 1997 | MemoryRegion *mr; |
fd8aaa76 | 1998 | bool error = false; |
3b46e624 | 1999 | |
13eb76e0 | 2000 | while (len > 0) { |
149f54b5 | 2001 | l = len; |
5c8a00ce | 2002 | mr = address_space_translate(as, addr, &addr1, &l, is_write); |
3b46e624 | 2003 | |
13eb76e0 | 2004 | if (is_write) { |
5c8a00ce PB |
2005 | if (!memory_access_is_direct(mr, is_write)) { |
2006 | l = memory_access_size(mr, l, addr1); | |
4917cf44 | 2007 | /* XXX: could force current_cpu to NULL to avoid |
6a00d601 | 2008 | potential bugs */ |
23326164 RH |
2009 | switch (l) { |
2010 | case 8: | |
2011 | /* 64 bit write access */ | |
2012 | val = ldq_p(buf); | |
2013 | error |= io_mem_write(mr, addr1, val, 8); | |
2014 | break; | |
2015 | case 4: | |
1c213d19 | 2016 | /* 32 bit write access */ |
c27004ec | 2017 | val = ldl_p(buf); |
5c8a00ce | 2018 | error |= io_mem_write(mr, addr1, val, 4); |
23326164 RH |
2019 | break; |
2020 | case 2: | |
1c213d19 | 2021 | /* 16 bit write access */ |
c27004ec | 2022 | val = lduw_p(buf); |
5c8a00ce | 2023 | error |= io_mem_write(mr, addr1, val, 2); |
23326164 RH |
2024 | break; |
2025 | case 1: | |
1c213d19 | 2026 | /* 8 bit write access */ |
c27004ec | 2027 | val = ldub_p(buf); |
5c8a00ce | 2028 | error |= io_mem_write(mr, addr1, val, 1); |
23326164 RH |
2029 | break; |
2030 | default: | |
2031 | abort(); | |
13eb76e0 | 2032 | } |
2bbfa05d | 2033 | } else { |
5c8a00ce | 2034 | addr1 += memory_region_get_ram_addr(mr); |
13eb76e0 | 2035 | /* RAM case */ |
5579c7f3 | 2036 | ptr = qemu_get_ram_ptr(addr1); |
13eb76e0 | 2037 | memcpy(ptr, buf, l); |
51d7a9eb | 2038 | invalidate_and_set_dirty(addr1, l); |
13eb76e0 FB |
2039 | } |
2040 | } else { | |
5c8a00ce | 2041 | if (!memory_access_is_direct(mr, is_write)) { |
13eb76e0 | 2042 | /* I/O case */ |
5c8a00ce | 2043 | l = memory_access_size(mr, l, addr1); |
23326164 RH |
2044 | switch (l) { |
2045 | case 8: | |
2046 | /* 64 bit read access */ | |
2047 | error |= io_mem_read(mr, addr1, &val, 8); | |
2048 | stq_p(buf, val); | |
2049 | break; | |
2050 | case 4: | |
13eb76e0 | 2051 | /* 32 bit read access */ |
5c8a00ce | 2052 | error |= io_mem_read(mr, addr1, &val, 4); |
c27004ec | 2053 | stl_p(buf, val); |
23326164 RH |
2054 | break; |
2055 | case 2: | |
13eb76e0 | 2056 | /* 16 bit read access */ |
5c8a00ce | 2057 | error |= io_mem_read(mr, addr1, &val, 2); |
c27004ec | 2058 | stw_p(buf, val); |
23326164 RH |
2059 | break; |
2060 | case 1: | |
1c213d19 | 2061 | /* 8 bit read access */ |
5c8a00ce | 2062 | error |= io_mem_read(mr, addr1, &val, 1); |
c27004ec | 2063 | stb_p(buf, val); |
23326164 RH |
2064 | break; |
2065 | default: | |
2066 | abort(); | |
13eb76e0 FB |
2067 | } |
2068 | } else { | |
2069 | /* RAM case */ | |
5c8a00ce | 2070 | ptr = qemu_get_ram_ptr(mr->ram_addr + addr1); |
f3705d53 | 2071 | memcpy(buf, ptr, l); |
13eb76e0 FB |
2072 | } |
2073 | } | |
2074 | len -= l; | |
2075 | buf += l; | |
2076 | addr += l; | |
2077 | } | |
fd8aaa76 PB |
2078 | |
2079 | return error; | |
13eb76e0 | 2080 | } |
8df1cd07 | 2081 | |
fd8aaa76 | 2082 | bool address_space_write(AddressSpace *as, hwaddr addr, |
ac1970fb AK |
2083 | const uint8_t *buf, int len) |
2084 | { | |
fd8aaa76 | 2085 | return address_space_rw(as, addr, (uint8_t *)buf, len, true); |
ac1970fb AK |
2086 | } |
2087 | ||
fd8aaa76 | 2088 | bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) |
ac1970fb | 2089 | { |
fd8aaa76 | 2090 | return address_space_rw(as, addr, buf, len, false); |
ac1970fb AK |
2091 | } |
2092 | ||
2093 | ||
a8170e5e | 2094 | void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, |
ac1970fb AK |
2095 | int len, int is_write) |
2096 | { | |
fd8aaa76 | 2097 | address_space_rw(&address_space_memory, addr, buf, len, is_write); |
ac1970fb AK |
2098 | } |
2099 | ||
d0ecd2aa | 2100 | /* used for ROM loading : can write in RAM and ROM */ |
a8170e5e | 2101 | void cpu_physical_memory_write_rom(hwaddr addr, |
d0ecd2aa FB |
2102 | const uint8_t *buf, int len) |
2103 | { | |
149f54b5 | 2104 | hwaddr l; |
d0ecd2aa | 2105 | uint8_t *ptr; |
149f54b5 | 2106 | hwaddr addr1; |
5c8a00ce | 2107 | MemoryRegion *mr; |
3b46e624 | 2108 | |
d0ecd2aa | 2109 | while (len > 0) { |
149f54b5 | 2110 | l = len; |
5c8a00ce PB |
2111 | mr = address_space_translate(&address_space_memory, |
2112 | addr, &addr1, &l, true); | |
3b46e624 | 2113 | |
5c8a00ce PB |
2114 | if (!(memory_region_is_ram(mr) || |
2115 | memory_region_is_romd(mr))) { | |
d0ecd2aa FB |
2116 | /* do nothing */ |
2117 | } else { | |
5c8a00ce | 2118 | addr1 += memory_region_get_ram_addr(mr); |
d0ecd2aa | 2119 | /* ROM/RAM case */ |
5579c7f3 | 2120 | ptr = qemu_get_ram_ptr(addr1); |
d0ecd2aa | 2121 | memcpy(ptr, buf, l); |
51d7a9eb | 2122 | invalidate_and_set_dirty(addr1, l); |
d0ecd2aa FB |
2123 | } |
2124 | len -= l; | |
2125 | buf += l; | |
2126 | addr += l; | |
2127 | } | |
2128 | } | |
2129 | ||
6d16c2f8 | 2130 | typedef struct { |
d3e71559 | 2131 | MemoryRegion *mr; |
6d16c2f8 | 2132 | void *buffer; |
a8170e5e AK |
2133 | hwaddr addr; |
2134 | hwaddr len; | |
6d16c2f8 AL |
2135 | } BounceBuffer; |
2136 | ||
2137 | static BounceBuffer bounce; | |
2138 | ||
ba223c29 AL |
2139 | typedef struct MapClient { |
2140 | void *opaque; | |
2141 | void (*callback)(void *opaque); | |
72cf2d4f | 2142 | QLIST_ENTRY(MapClient) link; |
ba223c29 AL |
2143 | } MapClient; |
2144 | ||
72cf2d4f BS |
2145 | static QLIST_HEAD(map_client_list, MapClient) map_client_list |
2146 | = QLIST_HEAD_INITIALIZER(map_client_list); | |
ba223c29 AL |
2147 | |
2148 | void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) | |
2149 | { | |
7267c094 | 2150 | MapClient *client = g_malloc(sizeof(*client)); |
ba223c29 AL |
2151 | |
2152 | client->opaque = opaque; | |
2153 | client->callback = callback; | |
72cf2d4f | 2154 | QLIST_INSERT_HEAD(&map_client_list, client, link); |
ba223c29 AL |
2155 | return client; |
2156 | } | |
2157 | ||
8b9c99d9 | 2158 | static void cpu_unregister_map_client(void *_client) |
ba223c29 AL |
2159 | { |
2160 | MapClient *client = (MapClient *)_client; | |
2161 | ||
72cf2d4f | 2162 | QLIST_REMOVE(client, link); |
7267c094 | 2163 | g_free(client); |
ba223c29 AL |
2164 | } |
2165 | ||
2166 | static void cpu_notify_map_clients(void) | |
2167 | { | |
2168 | MapClient *client; | |
2169 | ||
72cf2d4f BS |
2170 | while (!QLIST_EMPTY(&map_client_list)) { |
2171 | client = QLIST_FIRST(&map_client_list); | |
ba223c29 | 2172 | client->callback(client->opaque); |
34d5e948 | 2173 | cpu_unregister_map_client(client); |
ba223c29 AL |
2174 | } |
2175 | } | |
2176 | ||
51644ab7 PB |
2177 | bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) |
2178 | { | |
5c8a00ce | 2179 | MemoryRegion *mr; |
51644ab7 PB |
2180 | hwaddr l, xlat; |
2181 | ||
2182 | while (len > 0) { | |
2183 | l = len; | |
5c8a00ce PB |
2184 | mr = address_space_translate(as, addr, &xlat, &l, is_write); |
2185 | if (!memory_access_is_direct(mr, is_write)) { | |
2186 | l = memory_access_size(mr, l, addr); | |
2187 | if (!memory_region_access_valid(mr, xlat, l, is_write)) { | |
51644ab7 PB |
2188 | return false; |
2189 | } | |
2190 | } | |
2191 | ||
2192 | len -= l; | |
2193 | addr += l; | |
2194 | } | |
2195 | return true; | |
2196 | } | |
2197 | ||
6d16c2f8 AL |
2198 | /* Map a physical memory region into a host virtual address. |
2199 | * May map a subset of the requested range, given by and returned in *plen. | |
2200 | * May return NULL if resources needed to perform the mapping are exhausted. | |
2201 | * Use only for reads OR writes - not for read-modify-write operations. | |
ba223c29 AL |
2202 | * Use cpu_register_map_client() to know when retrying the map operation is |
2203 | * likely to succeed. | |
6d16c2f8 | 2204 | */ |
ac1970fb | 2205 | void *address_space_map(AddressSpace *as, |
a8170e5e AK |
2206 | hwaddr addr, |
2207 | hwaddr *plen, | |
ac1970fb | 2208 | bool is_write) |
6d16c2f8 | 2209 | { |
a8170e5e | 2210 | hwaddr len = *plen; |
e3127ae0 PB |
2211 | hwaddr done = 0; |
2212 | hwaddr l, xlat, base; | |
2213 | MemoryRegion *mr, *this_mr; | |
2214 | ram_addr_t raddr; | |
6d16c2f8 | 2215 | |
e3127ae0 PB |
2216 | if (len == 0) { |
2217 | return NULL; | |
2218 | } | |
38bee5dc | 2219 | |
e3127ae0 PB |
2220 | l = len; |
2221 | mr = address_space_translate(as, addr, &xlat, &l, is_write); | |
2222 | if (!memory_access_is_direct(mr, is_write)) { | |
2223 | if (bounce.buffer) { | |
2224 | return NULL; | |
6d16c2f8 | 2225 | } |
e85d9db5 KW |
2226 | /* Avoid unbounded allocations */ |
2227 | l = MIN(l, TARGET_PAGE_SIZE); | |
2228 | bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); | |
e3127ae0 PB |
2229 | bounce.addr = addr; |
2230 | bounce.len = l; | |
d3e71559 PB |
2231 | |
2232 | memory_region_ref(mr); | |
2233 | bounce.mr = mr; | |
e3127ae0 PB |
2234 | if (!is_write) { |
2235 | address_space_read(as, addr, bounce.buffer, l); | |
8ab934f9 | 2236 | } |
6d16c2f8 | 2237 | |
e3127ae0 PB |
2238 | *plen = l; |
2239 | return bounce.buffer; | |
2240 | } | |
2241 | ||
2242 | base = xlat; | |
2243 | raddr = memory_region_get_ram_addr(mr); | |
2244 | ||
2245 | for (;;) { | |
6d16c2f8 AL |
2246 | len -= l; |
2247 | addr += l; | |
e3127ae0 PB |
2248 | done += l; |
2249 | if (len == 0) { | |
2250 | break; | |
2251 | } | |
2252 | ||
2253 | l = len; | |
2254 | this_mr = address_space_translate(as, addr, &xlat, &l, is_write); | |
2255 | if (this_mr != mr || xlat != base + done) { | |
2256 | break; | |
2257 | } | |
6d16c2f8 | 2258 | } |
e3127ae0 | 2259 | |
d3e71559 | 2260 | memory_region_ref(mr); |
e3127ae0 PB |
2261 | *plen = done; |
2262 | return qemu_ram_ptr_length(raddr + base, plen); | |
6d16c2f8 AL |
2263 | } |
2264 | ||
ac1970fb | 2265 | /* Unmaps a memory region previously mapped by address_space_map(). |
6d16c2f8 AL |
2266 | * Will also mark the memory as dirty if is_write == 1. access_len gives |
2267 | * the amount of memory that was actually read or written by the caller. | |
2268 | */ | |
a8170e5e AK |
2269 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
2270 | int is_write, hwaddr access_len) | |
6d16c2f8 AL |
2271 | { |
2272 | if (buffer != bounce.buffer) { | |
d3e71559 PB |
2273 | MemoryRegion *mr; |
2274 | ram_addr_t addr1; | |
2275 | ||
2276 | mr = qemu_ram_addr_from_host(buffer, &addr1); | |
2277 | assert(mr != NULL); | |
6d16c2f8 | 2278 | if (is_write) { |
6d16c2f8 AL |
2279 | while (access_len) { |
2280 | unsigned l; | |
2281 | l = TARGET_PAGE_SIZE; | |
2282 | if (l > access_len) | |
2283 | l = access_len; | |
51d7a9eb | 2284 | invalidate_and_set_dirty(addr1, l); |
6d16c2f8 AL |
2285 | addr1 += l; |
2286 | access_len -= l; | |
2287 | } | |
2288 | } | |
868bb33f | 2289 | if (xen_enabled()) { |
e41d7c69 | 2290 | xen_invalidate_map_cache_entry(buffer); |
050a0ddf | 2291 | } |
d3e71559 | 2292 | memory_region_unref(mr); |
6d16c2f8 AL |
2293 | return; |
2294 | } | |
2295 | if (is_write) { | |
ac1970fb | 2296 | address_space_write(as, bounce.addr, bounce.buffer, access_len); |
6d16c2f8 | 2297 | } |
f8a83245 | 2298 | qemu_vfree(bounce.buffer); |
6d16c2f8 | 2299 | bounce.buffer = NULL; |
d3e71559 | 2300 | memory_region_unref(bounce.mr); |
ba223c29 | 2301 | cpu_notify_map_clients(); |
6d16c2f8 | 2302 | } |
d0ecd2aa | 2303 | |
a8170e5e AK |
2304 | void *cpu_physical_memory_map(hwaddr addr, |
2305 | hwaddr *plen, | |
ac1970fb AK |
2306 | int is_write) |
2307 | { | |
2308 | return address_space_map(&address_space_memory, addr, plen, is_write); | |
2309 | } | |
2310 | ||
a8170e5e AK |
2311 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
2312 | int is_write, hwaddr access_len) | |
ac1970fb AK |
2313 | { |
2314 | return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); | |
2315 | } | |
2316 | ||
8df1cd07 | 2317 | /* warning: addr must be aligned */ |
a8170e5e | 2318 | static inline uint32_t ldl_phys_internal(hwaddr addr, |
1e78bcc1 | 2319 | enum device_endian endian) |
8df1cd07 | 2320 | { |
8df1cd07 | 2321 | uint8_t *ptr; |
791af8c8 | 2322 | uint64_t val; |
5c8a00ce | 2323 | MemoryRegion *mr; |
149f54b5 PB |
2324 | hwaddr l = 4; |
2325 | hwaddr addr1; | |
8df1cd07 | 2326 | |
5c8a00ce PB |
2327 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2328 | false); | |
2329 | if (l < 4 || !memory_access_is_direct(mr, false)) { | |
8df1cd07 | 2330 | /* I/O case */ |
5c8a00ce | 2331 | io_mem_read(mr, addr1, &val, 4); |
1e78bcc1 AG |
2332 | #if defined(TARGET_WORDS_BIGENDIAN) |
2333 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2334 | val = bswap32(val); | |
2335 | } | |
2336 | #else | |
2337 | if (endian == DEVICE_BIG_ENDIAN) { | |
2338 | val = bswap32(val); | |
2339 | } | |
2340 | #endif | |
8df1cd07 FB |
2341 | } else { |
2342 | /* RAM case */ | |
5c8a00ce | 2343 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) |
06ef3525 | 2344 | & TARGET_PAGE_MASK) |
149f54b5 | 2345 | + addr1); |
1e78bcc1 AG |
2346 | switch (endian) { |
2347 | case DEVICE_LITTLE_ENDIAN: | |
2348 | val = ldl_le_p(ptr); | |
2349 | break; | |
2350 | case DEVICE_BIG_ENDIAN: | |
2351 | val = ldl_be_p(ptr); | |
2352 | break; | |
2353 | default: | |
2354 | val = ldl_p(ptr); | |
2355 | break; | |
2356 | } | |
8df1cd07 FB |
2357 | } |
2358 | return val; | |
2359 | } | |
2360 | ||
a8170e5e | 2361 | uint32_t ldl_phys(hwaddr addr) |
1e78bcc1 AG |
2362 | { |
2363 | return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2364 | } | |
2365 | ||
a8170e5e | 2366 | uint32_t ldl_le_phys(hwaddr addr) |
1e78bcc1 AG |
2367 | { |
2368 | return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2369 | } | |
2370 | ||
a8170e5e | 2371 | uint32_t ldl_be_phys(hwaddr addr) |
1e78bcc1 AG |
2372 | { |
2373 | return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2374 | } | |
2375 | ||
84b7b8e7 | 2376 | /* warning: addr must be aligned */ |
a8170e5e | 2377 | static inline uint64_t ldq_phys_internal(hwaddr addr, |
1e78bcc1 | 2378 | enum device_endian endian) |
84b7b8e7 | 2379 | { |
84b7b8e7 FB |
2380 | uint8_t *ptr; |
2381 | uint64_t val; | |
5c8a00ce | 2382 | MemoryRegion *mr; |
149f54b5 PB |
2383 | hwaddr l = 8; |
2384 | hwaddr addr1; | |
84b7b8e7 | 2385 | |
5c8a00ce PB |
2386 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2387 | false); | |
2388 | if (l < 8 || !memory_access_is_direct(mr, false)) { | |
84b7b8e7 | 2389 | /* I/O case */ |
5c8a00ce | 2390 | io_mem_read(mr, addr1, &val, 8); |
968a5627 PB |
2391 | #if defined(TARGET_WORDS_BIGENDIAN) |
2392 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2393 | val = bswap64(val); | |
2394 | } | |
2395 | #else | |
2396 | if (endian == DEVICE_BIG_ENDIAN) { | |
2397 | val = bswap64(val); | |
2398 | } | |
84b7b8e7 FB |
2399 | #endif |
2400 | } else { | |
2401 | /* RAM case */ | |
5c8a00ce | 2402 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) |
06ef3525 | 2403 | & TARGET_PAGE_MASK) |
149f54b5 | 2404 | + addr1); |
1e78bcc1 AG |
2405 | switch (endian) { |
2406 | case DEVICE_LITTLE_ENDIAN: | |
2407 | val = ldq_le_p(ptr); | |
2408 | break; | |
2409 | case DEVICE_BIG_ENDIAN: | |
2410 | val = ldq_be_p(ptr); | |
2411 | break; | |
2412 | default: | |
2413 | val = ldq_p(ptr); | |
2414 | break; | |
2415 | } | |
84b7b8e7 FB |
2416 | } |
2417 | return val; | |
2418 | } | |
2419 | ||
a8170e5e | 2420 | uint64_t ldq_phys(hwaddr addr) |
1e78bcc1 AG |
2421 | { |
2422 | return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2423 | } | |
2424 | ||
a8170e5e | 2425 | uint64_t ldq_le_phys(hwaddr addr) |
1e78bcc1 AG |
2426 | { |
2427 | return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2428 | } | |
2429 | ||
a8170e5e | 2430 | uint64_t ldq_be_phys(hwaddr addr) |
1e78bcc1 AG |
2431 | { |
2432 | return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2433 | } | |
2434 | ||
aab33094 | 2435 | /* XXX: optimize */ |
a8170e5e | 2436 | uint32_t ldub_phys(hwaddr addr) |
aab33094 FB |
2437 | { |
2438 | uint8_t val; | |
2439 | cpu_physical_memory_read(addr, &val, 1); | |
2440 | return val; | |
2441 | } | |
2442 | ||
733f0b02 | 2443 | /* warning: addr must be aligned */ |
a8170e5e | 2444 | static inline uint32_t lduw_phys_internal(hwaddr addr, |
1e78bcc1 | 2445 | enum device_endian endian) |
aab33094 | 2446 | { |
733f0b02 MT |
2447 | uint8_t *ptr; |
2448 | uint64_t val; | |
5c8a00ce | 2449 | MemoryRegion *mr; |
149f54b5 PB |
2450 | hwaddr l = 2; |
2451 | hwaddr addr1; | |
733f0b02 | 2452 | |
5c8a00ce PB |
2453 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2454 | false); | |
2455 | if (l < 2 || !memory_access_is_direct(mr, false)) { | |
733f0b02 | 2456 | /* I/O case */ |
5c8a00ce | 2457 | io_mem_read(mr, addr1, &val, 2); |
1e78bcc1 AG |
2458 | #if defined(TARGET_WORDS_BIGENDIAN) |
2459 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2460 | val = bswap16(val); | |
2461 | } | |
2462 | #else | |
2463 | if (endian == DEVICE_BIG_ENDIAN) { | |
2464 | val = bswap16(val); | |
2465 | } | |
2466 | #endif | |
733f0b02 MT |
2467 | } else { |
2468 | /* RAM case */ | |
5c8a00ce | 2469 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr) |
06ef3525 | 2470 | & TARGET_PAGE_MASK) |
149f54b5 | 2471 | + addr1); |
1e78bcc1 AG |
2472 | switch (endian) { |
2473 | case DEVICE_LITTLE_ENDIAN: | |
2474 | val = lduw_le_p(ptr); | |
2475 | break; | |
2476 | case DEVICE_BIG_ENDIAN: | |
2477 | val = lduw_be_p(ptr); | |
2478 | break; | |
2479 | default: | |
2480 | val = lduw_p(ptr); | |
2481 | break; | |
2482 | } | |
733f0b02 MT |
2483 | } |
2484 | return val; | |
aab33094 FB |
2485 | } |
2486 | ||
a8170e5e | 2487 | uint32_t lduw_phys(hwaddr addr) |
1e78bcc1 AG |
2488 | { |
2489 | return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2490 | } | |
2491 | ||
a8170e5e | 2492 | uint32_t lduw_le_phys(hwaddr addr) |
1e78bcc1 AG |
2493 | { |
2494 | return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2495 | } | |
2496 | ||
a8170e5e | 2497 | uint32_t lduw_be_phys(hwaddr addr) |
1e78bcc1 AG |
2498 | { |
2499 | return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2500 | } | |
2501 | ||
8df1cd07 FB |
2502 | /* warning: addr must be aligned. The ram page is not masked as dirty |
2503 | and the code inside is not invalidated. It is useful if the dirty | |
2504 | bits are used to track modified PTEs */ | |
a8170e5e | 2505 | void stl_phys_notdirty(hwaddr addr, uint32_t val) |
8df1cd07 | 2506 | { |
8df1cd07 | 2507 | uint8_t *ptr; |
5c8a00ce | 2508 | MemoryRegion *mr; |
149f54b5 PB |
2509 | hwaddr l = 4; |
2510 | hwaddr addr1; | |
8df1cd07 | 2511 | |
5c8a00ce PB |
2512 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2513 | true); | |
2514 | if (l < 4 || !memory_access_is_direct(mr, true)) { | |
2515 | io_mem_write(mr, addr1, val, 4); | |
8df1cd07 | 2516 | } else { |
5c8a00ce | 2517 | addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; |
5579c7f3 | 2518 | ptr = qemu_get_ram_ptr(addr1); |
8df1cd07 | 2519 | stl_p(ptr, val); |
74576198 AL |
2520 | |
2521 | if (unlikely(in_migration)) { | |
2522 | if (!cpu_physical_memory_is_dirty(addr1)) { | |
2523 | /* invalidate code */ | |
2524 | tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | |
2525 | /* set dirty bit */ | |
f7c11b53 YT |
2526 | cpu_physical_memory_set_dirty_flags( |
2527 | addr1, (0xff & ~CODE_DIRTY_FLAG)); | |
74576198 AL |
2528 | } |
2529 | } | |
8df1cd07 FB |
2530 | } |
2531 | } | |
2532 | ||
2533 | /* warning: addr must be aligned */ | |
a8170e5e | 2534 | static inline void stl_phys_internal(hwaddr addr, uint32_t val, |
1e78bcc1 | 2535 | enum device_endian endian) |
8df1cd07 | 2536 | { |
8df1cd07 | 2537 | uint8_t *ptr; |
5c8a00ce | 2538 | MemoryRegion *mr; |
149f54b5 PB |
2539 | hwaddr l = 4; |
2540 | hwaddr addr1; | |
8df1cd07 | 2541 | |
5c8a00ce PB |
2542 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2543 | true); | |
2544 | if (l < 4 || !memory_access_is_direct(mr, true)) { | |
1e78bcc1 AG |
2545 | #if defined(TARGET_WORDS_BIGENDIAN) |
2546 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2547 | val = bswap32(val); | |
2548 | } | |
2549 | #else | |
2550 | if (endian == DEVICE_BIG_ENDIAN) { | |
2551 | val = bswap32(val); | |
2552 | } | |
2553 | #endif | |
5c8a00ce | 2554 | io_mem_write(mr, addr1, val, 4); |
8df1cd07 | 2555 | } else { |
8df1cd07 | 2556 | /* RAM case */ |
5c8a00ce | 2557 | addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; |
5579c7f3 | 2558 | ptr = qemu_get_ram_ptr(addr1); |
1e78bcc1 AG |
2559 | switch (endian) { |
2560 | case DEVICE_LITTLE_ENDIAN: | |
2561 | stl_le_p(ptr, val); | |
2562 | break; | |
2563 | case DEVICE_BIG_ENDIAN: | |
2564 | stl_be_p(ptr, val); | |
2565 | break; | |
2566 | default: | |
2567 | stl_p(ptr, val); | |
2568 | break; | |
2569 | } | |
51d7a9eb | 2570 | invalidate_and_set_dirty(addr1, 4); |
8df1cd07 FB |
2571 | } |
2572 | } | |
2573 | ||
a8170e5e | 2574 | void stl_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2575 | { |
2576 | stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); | |
2577 | } | |
2578 | ||
a8170e5e | 2579 | void stl_le_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2580 | { |
2581 | stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); | |
2582 | } | |
2583 | ||
a8170e5e | 2584 | void stl_be_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2585 | { |
2586 | stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN); | |
2587 | } | |
2588 | ||
aab33094 | 2589 | /* XXX: optimize */ |
a8170e5e | 2590 | void stb_phys(hwaddr addr, uint32_t val) |
aab33094 FB |
2591 | { |
2592 | uint8_t v = val; | |
2593 | cpu_physical_memory_write(addr, &v, 1); | |
2594 | } | |
2595 | ||
733f0b02 | 2596 | /* warning: addr must be aligned */ |
a8170e5e | 2597 | static inline void stw_phys_internal(hwaddr addr, uint32_t val, |
1e78bcc1 | 2598 | enum device_endian endian) |
aab33094 | 2599 | { |
733f0b02 | 2600 | uint8_t *ptr; |
5c8a00ce | 2601 | MemoryRegion *mr; |
149f54b5 PB |
2602 | hwaddr l = 2; |
2603 | hwaddr addr1; | |
733f0b02 | 2604 | |
5c8a00ce PB |
2605 | mr = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2606 | true); | |
2607 | if (l < 2 || !memory_access_is_direct(mr, true)) { | |
1e78bcc1 AG |
2608 | #if defined(TARGET_WORDS_BIGENDIAN) |
2609 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2610 | val = bswap16(val); | |
2611 | } | |
2612 | #else | |
2613 | if (endian == DEVICE_BIG_ENDIAN) { | |
2614 | val = bswap16(val); | |
2615 | } | |
2616 | #endif | |
5c8a00ce | 2617 | io_mem_write(mr, addr1, val, 2); |
733f0b02 | 2618 | } else { |
733f0b02 | 2619 | /* RAM case */ |
5c8a00ce | 2620 | addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK; |
733f0b02 | 2621 | ptr = qemu_get_ram_ptr(addr1); |
1e78bcc1 AG |
2622 | switch (endian) { |
2623 | case DEVICE_LITTLE_ENDIAN: | |
2624 | stw_le_p(ptr, val); | |
2625 | break; | |
2626 | case DEVICE_BIG_ENDIAN: | |
2627 | stw_be_p(ptr, val); | |
2628 | break; | |
2629 | default: | |
2630 | stw_p(ptr, val); | |
2631 | break; | |
2632 | } | |
51d7a9eb | 2633 | invalidate_and_set_dirty(addr1, 2); |
733f0b02 | 2634 | } |
aab33094 FB |
2635 | } |
2636 | ||
a8170e5e | 2637 | void stw_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2638 | { |
2639 | stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); | |
2640 | } | |
2641 | ||
a8170e5e | 2642 | void stw_le_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2643 | { |
2644 | stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); | |
2645 | } | |
2646 | ||
a8170e5e | 2647 | void stw_be_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2648 | { |
2649 | stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN); | |
2650 | } | |
2651 | ||
aab33094 | 2652 | /* XXX: optimize */ |
a8170e5e | 2653 | void stq_phys(hwaddr addr, uint64_t val) |
aab33094 FB |
2654 | { |
2655 | val = tswap64(val); | |
71d2b725 | 2656 | cpu_physical_memory_write(addr, &val, 8); |
aab33094 FB |
2657 | } |
2658 | ||
a8170e5e | 2659 | void stq_le_phys(hwaddr addr, uint64_t val) |
1e78bcc1 AG |
2660 | { |
2661 | val = cpu_to_le64(val); | |
2662 | cpu_physical_memory_write(addr, &val, 8); | |
2663 | } | |
2664 | ||
a8170e5e | 2665 | void stq_be_phys(hwaddr addr, uint64_t val) |
1e78bcc1 AG |
2666 | { |
2667 | val = cpu_to_be64(val); | |
2668 | cpu_physical_memory_write(addr, &val, 8); | |
2669 | } | |
2670 | ||
5e2972fd | 2671 | /* virtual memory access for debug (includes writing to ROM) */ |
f17ec444 | 2672 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, |
b448f2f3 | 2673 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
2674 | { |
2675 | int l; | |
a8170e5e | 2676 | hwaddr phys_addr; |
9b3c35e0 | 2677 | target_ulong page; |
13eb76e0 FB |
2678 | |
2679 | while (len > 0) { | |
2680 | page = addr & TARGET_PAGE_MASK; | |
f17ec444 | 2681 | phys_addr = cpu_get_phys_page_debug(cpu, page); |
13eb76e0 FB |
2682 | /* if no physical page mapped, return an error */ |
2683 | if (phys_addr == -1) | |
2684 | return -1; | |
2685 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2686 | if (l > len) | |
2687 | l = len; | |
5e2972fd | 2688 | phys_addr += (addr & ~TARGET_PAGE_MASK); |
5e2972fd AL |
2689 | if (is_write) |
2690 | cpu_physical_memory_write_rom(phys_addr, buf, l); | |
2691 | else | |
5e2972fd | 2692 | cpu_physical_memory_rw(phys_addr, buf, l, is_write); |
13eb76e0 FB |
2693 | len -= l; |
2694 | buf += l; | |
2695 | addr += l; | |
2696 | } | |
2697 | return 0; | |
2698 | } | |
a68fe89c | 2699 | #endif |
13eb76e0 | 2700 | |
8e4a424b BS |
2701 | #if !defined(CONFIG_USER_ONLY) |
2702 | ||
2703 | /* | |
2704 | * A helper function for the _utterly broken_ virtio device model to find out if | |
2705 | * it's running on a big endian machine. Don't do this at home kids! | |
2706 | */ | |
2707 | bool virtio_is_big_endian(void); | |
2708 | bool virtio_is_big_endian(void) | |
2709 | { | |
2710 | #if defined(TARGET_WORDS_BIGENDIAN) | |
2711 | return true; | |
2712 | #else | |
2713 | return false; | |
2714 | #endif | |
2715 | } | |
2716 | ||
2717 | #endif | |
2718 | ||
76f35538 | 2719 | #ifndef CONFIG_USER_ONLY |
a8170e5e | 2720 | bool cpu_physical_memory_is_io(hwaddr phys_addr) |
76f35538 | 2721 | { |
5c8a00ce | 2722 | MemoryRegion*mr; |
149f54b5 | 2723 | hwaddr l = 1; |
76f35538 | 2724 | |
5c8a00ce PB |
2725 | mr = address_space_translate(&address_space_memory, |
2726 | phys_addr, &phys_addr, &l, false); | |
76f35538 | 2727 | |
5c8a00ce PB |
2728 | return !(memory_region_is_ram(mr) || |
2729 | memory_region_is_romd(mr)); | |
76f35538 | 2730 | } |
bd2fa51f MH |
2731 | |
2732 | void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) | |
2733 | { | |
2734 | RAMBlock *block; | |
2735 | ||
2736 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { | |
2737 | func(block->host, block->offset, block->length, opaque); | |
2738 | } | |
2739 | } | |
ec3f8c99 | 2740 | #endif |