]>
Commit | Line | Data |
---|---|---|
54936004 | 1 | /* |
d9f24bf5 | 2 | * RAM allocation and memory access |
5fafdf24 | 3 | * |
54936004 FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
61f3c91a | 9 | * version 2.1 of the License, or (at your option) any later version. |
54936004 FB |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
54936004 | 18 | */ |
14a48c1d | 19 | |
7b31bbc2 | 20 | #include "qemu/osdep.h" |
a8d25326 | 21 | #include "qemu-common.h" |
da34e65c | 22 | #include "qapi/error.h" |
54936004 | 23 | |
f348b6d1 | 24 | #include "qemu/cutils.h" |
084cfca1 | 25 | #include "qemu/cacheflush.h" |
6180a181 | 26 | #include "cpu.h" |
78271684 CF |
27 | |
28 | #ifdef CONFIG_TCG | |
29 | #include "hw/core/tcg-cpu-ops.h" | |
30 | #endif /* CONFIG_TCG */ | |
31 | ||
63c91552 | 32 | #include "exec/exec-all.h" |
51180423 | 33 | #include "exec/target_page.h" |
741da0d3 | 34 | #include "hw/qdev-core.h" |
c7e002c5 | 35 | #include "hw/qdev-properties.h" |
47c8ca53 | 36 | #include "hw/boards.h" |
33c11879 | 37 | #include "hw/xen/xen.h" |
9c17d615 | 38 | #include "sysemu/kvm.h" |
2ff3de68 | 39 | #include "sysemu/sysemu.h" |
14a48c1d | 40 | #include "sysemu/tcg.h" |
a028edea | 41 | #include "sysemu/qtest.h" |
1de7afc9 PB |
42 | #include "qemu/timer.h" |
43 | #include "qemu/config-file.h" | |
75a34036 | 44 | #include "qemu/error-report.h" |
b6b71cb5 | 45 | #include "qemu/qemu-print.h" |
741da0d3 | 46 | #include "exec/memory.h" |
df43d49c | 47 | #include "exec/ioport.h" |
741da0d3 | 48 | #include "sysemu/dma.h" |
b58c5c2d | 49 | #include "sysemu/hostmem.h" |
79ca7a1b | 50 | #include "sysemu/hw_accel.h" |
741da0d3 | 51 | #include "exec/address-spaces.h" |
9c17d615 | 52 | #include "sysemu/xen-mapcache.h" |
243af022 | 53 | #include "trace/trace-root.h" |
d3a5038c | 54 | |
e2fa71f5 | 55 | #ifdef CONFIG_FALLOCATE_PUNCH_HOLE |
e2fa71f5 DDAG |
56 | #include <linux/falloc.h> |
57 | #endif | |
58 | ||
0dc3f44a | 59 | #include "qemu/rcu_queue.h" |
4840f10e | 60 | #include "qemu/main-loop.h" |
3b9bd3f4 | 61 | #include "exec/translate-all.h" |
7615936e | 62 | #include "sysemu/replay.h" |
0cac1b66 | 63 | |
022c62cb | 64 | #include "exec/memory-internal.h" |
220c3ebd | 65 | #include "exec/ram_addr.h" |
508127e2 | 66 | #include "exec/log.h" |
67d95c15 | 67 | |
61c490e2 BM |
68 | #include "qemu/pmem.h" |
69 | ||
9dfeca7c BR |
70 | #include "migration/vmstate.h" |
71 | ||
b35ba30f | 72 | #include "qemu/range.h" |
794e8f30 MT |
73 | #ifndef _WIN32 |
74 | #include "qemu/mmap-alloc.h" | |
75 | #endif | |
b35ba30f | 76 | |
be9b23c4 PX |
77 | #include "monitor/monitor.h" |
78 | ||
ce317be9 JL |
79 | #ifdef CONFIG_LIBDAXCTL |
80 | #include <daxctl/libdaxctl.h> | |
81 | #endif | |
82 | ||
db7b5426 | 83 | //#define DEBUG_SUBPAGE |
1196be37 | 84 | |
0dc3f44a MD |
85 | /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes |
86 | * are protected by the ramlist lock. | |
87 | */ | |
0d53d9fe | 88 | RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; |
62152b8a AK |
89 | |
90 | static MemoryRegion *system_memory; | |
309cb471 | 91 | static MemoryRegion *system_io; |
62152b8a | 92 | |
f6790af6 AK |
93 | AddressSpace address_space_io; |
94 | AddressSpace address_space_memory; | |
2673a5da | 95 | |
acc9d80b | 96 | static MemoryRegion io_mem_unassigned; |
4346ae3e | 97 | |
1db8abb1 PB |
98 | typedef struct PhysPageEntry PhysPageEntry; |
99 | ||
100 | struct PhysPageEntry { | |
9736e55b | 101 | /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ |
8b795765 | 102 | uint32_t skip : 6; |
9736e55b | 103 | /* index into phys_sections (!skip) or phys_map_nodes (skip) */ |
8b795765 | 104 | uint32_t ptr : 26; |
1db8abb1 PB |
105 | }; |
106 | ||
8b795765 MT |
107 | #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) |
108 | ||
03f49957 | 109 | /* Size of the L2 (and L3, etc) page tables. */ |
57271d63 | 110 | #define ADDR_SPACE_BITS 64 |
03f49957 | 111 | |
026736ce | 112 | #define P_L2_BITS 9 |
03f49957 PB |
113 | #define P_L2_SIZE (1 << P_L2_BITS) |
114 | ||
115 | #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) | |
116 | ||
117 | typedef PhysPageEntry Node[P_L2_SIZE]; | |
0475d94f | 118 | |
53cb28cb | 119 | typedef struct PhysPageMap { |
79e2b9ae PB |
120 | struct rcu_head rcu; |
121 | ||
53cb28cb MA |
122 | unsigned sections_nb; |
123 | unsigned sections_nb_alloc; | |
124 | unsigned nodes_nb; | |
125 | unsigned nodes_nb_alloc; | |
126 | Node *nodes; | |
127 | MemoryRegionSection *sections; | |
128 | } PhysPageMap; | |
129 | ||
1db8abb1 | 130 | struct AddressSpaceDispatch { |
729633c2 | 131 | MemoryRegionSection *mru_section; |
1db8abb1 PB |
132 | /* This is a multi-level map on the physical address space. |
133 | * The bottom level has pointers to MemoryRegionSections. | |
134 | */ | |
135 | PhysPageEntry phys_map; | |
53cb28cb | 136 | PhysPageMap map; |
1db8abb1 PB |
137 | }; |
138 | ||
90260c6c JK |
139 | #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
140 | typedef struct subpage_t { | |
141 | MemoryRegion iomem; | |
16620684 | 142 | FlatView *fv; |
90260c6c | 143 | hwaddr base; |
2615fabd | 144 | uint16_t sub_section[]; |
90260c6c JK |
145 | } subpage_t; |
146 | ||
b41aac4f | 147 | #define PHYS_SECTION_UNASSIGNED 0 |
5312bd8b | 148 | |
e2eef170 | 149 | static void io_mem_init(void); |
62152b8a | 150 | static void memory_map_init(void); |
9458a9a1 | 151 | static void tcg_log_global_after_sync(MemoryListener *listener); |
09daed84 | 152 | static void tcg_commit(MemoryListener *listener); |
e2eef170 | 153 | |
32857f4d PM |
154 | /** |
155 | * CPUAddressSpace: all the information a CPU needs about an AddressSpace | |
156 | * @cpu: the CPU whose AddressSpace this is | |
157 | * @as: the AddressSpace itself | |
158 | * @memory_dispatch: its dispatch pointer (cached, RCU protected) | |
159 | * @tcg_as_listener: listener for tracking changes to the AddressSpace | |
160 | */ | |
161 | struct CPUAddressSpace { | |
162 | CPUState *cpu; | |
163 | AddressSpace *as; | |
164 | struct AddressSpaceDispatch *memory_dispatch; | |
165 | MemoryListener tcg_as_listener; | |
166 | }; | |
167 | ||
8deaf12c GH |
168 | struct DirtyBitmapSnapshot { |
169 | ram_addr_t start; | |
170 | ram_addr_t end; | |
171 | unsigned long dirty[]; | |
172 | }; | |
173 | ||
53cb28cb | 174 | static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) |
d6f2ea22 | 175 | { |
101420b8 | 176 | static unsigned alloc_hint = 16; |
53cb28cb | 177 | if (map->nodes_nb + nodes > map->nodes_nb_alloc) { |
c95cfd04 | 178 | map->nodes_nb_alloc = MAX(alloc_hint, map->nodes_nb + nodes); |
53cb28cb | 179 | map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); |
101420b8 | 180 | alloc_hint = map->nodes_nb_alloc; |
d6f2ea22 | 181 | } |
f7bf5461 AK |
182 | } |
183 | ||
db94604b | 184 | static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) |
f7bf5461 AK |
185 | { |
186 | unsigned i; | |
8b795765 | 187 | uint32_t ret; |
db94604b PB |
188 | PhysPageEntry e; |
189 | PhysPageEntry *p; | |
f7bf5461 | 190 | |
53cb28cb | 191 | ret = map->nodes_nb++; |
db94604b | 192 | p = map->nodes[ret]; |
f7bf5461 | 193 | assert(ret != PHYS_MAP_NODE_NIL); |
53cb28cb | 194 | assert(ret != map->nodes_nb_alloc); |
db94604b PB |
195 | |
196 | e.skip = leaf ? 0 : 1; | |
197 | e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; | |
03f49957 | 198 | for (i = 0; i < P_L2_SIZE; ++i) { |
db94604b | 199 | memcpy(&p[i], &e, sizeof(e)); |
d6f2ea22 | 200 | } |
f7bf5461 | 201 | return ret; |
d6f2ea22 AK |
202 | } |
203 | ||
53cb28cb | 204 | static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, |
56b15076 | 205 | hwaddr *index, uint64_t *nb, uint16_t leaf, |
2999097b | 206 | int level) |
f7bf5461 AK |
207 | { |
208 | PhysPageEntry *p; | |
03f49957 | 209 | hwaddr step = (hwaddr)1 << (level * P_L2_BITS); |
108c49b8 | 210 | |
9736e55b | 211 | if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { |
db94604b | 212 | lp->ptr = phys_map_node_alloc(map, level == 0); |
92e873b9 | 213 | } |
db94604b | 214 | p = map->nodes[lp->ptr]; |
03f49957 | 215 | lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; |
f7bf5461 | 216 | |
03f49957 | 217 | while (*nb && lp < &p[P_L2_SIZE]) { |
07f07b31 | 218 | if ((*index & (step - 1)) == 0 && *nb >= step) { |
9736e55b | 219 | lp->skip = 0; |
c19e8800 | 220 | lp->ptr = leaf; |
07f07b31 AK |
221 | *index += step; |
222 | *nb -= step; | |
2999097b | 223 | } else { |
53cb28cb | 224 | phys_page_set_level(map, lp, index, nb, leaf, level - 1); |
2999097b AK |
225 | } |
226 | ++lp; | |
f7bf5461 AK |
227 | } |
228 | } | |
229 | ||
ac1970fb | 230 | static void phys_page_set(AddressSpaceDispatch *d, |
56b15076 | 231 | hwaddr index, uint64_t nb, |
2999097b | 232 | uint16_t leaf) |
f7bf5461 | 233 | { |
2999097b | 234 | /* Wildly overreserve - it doesn't matter much. */ |
53cb28cb | 235 | phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); |
5cd2c5b6 | 236 | |
53cb28cb | 237 | phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); |
92e873b9 FB |
238 | } |
239 | ||
b35ba30f MT |
240 | /* Compact a non leaf page entry. Simply detect that the entry has a single child, |
241 | * and update our entry so we can skip it and go directly to the destination. | |
242 | */ | |
efee678d | 243 | static void phys_page_compact(PhysPageEntry *lp, Node *nodes) |
b35ba30f MT |
244 | { |
245 | unsigned valid_ptr = P_L2_SIZE; | |
246 | int valid = 0; | |
247 | PhysPageEntry *p; | |
248 | int i; | |
249 | ||
250 | if (lp->ptr == PHYS_MAP_NODE_NIL) { | |
251 | return; | |
252 | } | |
253 | ||
254 | p = nodes[lp->ptr]; | |
255 | for (i = 0; i < P_L2_SIZE; i++) { | |
256 | if (p[i].ptr == PHYS_MAP_NODE_NIL) { | |
257 | continue; | |
258 | } | |
259 | ||
260 | valid_ptr = i; | |
261 | valid++; | |
262 | if (p[i].skip) { | |
efee678d | 263 | phys_page_compact(&p[i], nodes); |
b35ba30f MT |
264 | } |
265 | } | |
266 | ||
267 | /* We can only compress if there's only one child. */ | |
268 | if (valid != 1) { | |
269 | return; | |
270 | } | |
271 | ||
272 | assert(valid_ptr < P_L2_SIZE); | |
273 | ||
274 | /* Don't compress if it won't fit in the # of bits we have. */ | |
526ca236 WY |
275 | if (P_L2_LEVELS >= (1 << 6) && |
276 | lp->skip + p[valid_ptr].skip >= (1 << 6)) { | |
b35ba30f MT |
277 | return; |
278 | } | |
279 | ||
280 | lp->ptr = p[valid_ptr].ptr; | |
281 | if (!p[valid_ptr].skip) { | |
282 | /* If our only child is a leaf, make this a leaf. */ | |
283 | /* By design, we should have made this node a leaf to begin with so we | |
284 | * should never reach here. | |
285 | * But since it's so simple to handle this, let's do it just in case we | |
286 | * change this rule. | |
287 | */ | |
288 | lp->skip = 0; | |
289 | } else { | |
290 | lp->skip += p[valid_ptr].skip; | |
291 | } | |
292 | } | |
293 | ||
8629d3fc | 294 | void address_space_dispatch_compact(AddressSpaceDispatch *d) |
b35ba30f | 295 | { |
b35ba30f | 296 | if (d->phys_map.skip) { |
efee678d | 297 | phys_page_compact(&d->phys_map, d->map.nodes); |
b35ba30f MT |
298 | } |
299 | } | |
300 | ||
29cb533d FZ |
301 | static inline bool section_covers_addr(const MemoryRegionSection *section, |
302 | hwaddr addr) | |
303 | { | |
304 | /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means | |
305 | * the section must cover the entire address space. | |
306 | */ | |
258dfaaa | 307 | return int128_gethi(section->size) || |
29cb533d | 308 | range_covers_byte(section->offset_within_address_space, |
258dfaaa | 309 | int128_getlo(section->size), addr); |
29cb533d FZ |
310 | } |
311 | ||
003a0cf2 | 312 | static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) |
92e873b9 | 313 | { |
003a0cf2 PX |
314 | PhysPageEntry lp = d->phys_map, *p; |
315 | Node *nodes = d->map.nodes; | |
316 | MemoryRegionSection *sections = d->map.sections; | |
97115a8d | 317 | hwaddr index = addr >> TARGET_PAGE_BITS; |
31ab2b4a | 318 | int i; |
f1f6e3b8 | 319 | |
9736e55b | 320 | for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { |
c19e8800 | 321 | if (lp.ptr == PHYS_MAP_NODE_NIL) { |
9affd6fc | 322 | return §ions[PHYS_SECTION_UNASSIGNED]; |
31ab2b4a | 323 | } |
9affd6fc | 324 | p = nodes[lp.ptr]; |
03f49957 | 325 | lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; |
5312bd8b | 326 | } |
b35ba30f | 327 | |
29cb533d | 328 | if (section_covers_addr(§ions[lp.ptr], addr)) { |
b35ba30f MT |
329 | return §ions[lp.ptr]; |
330 | } else { | |
331 | return §ions[PHYS_SECTION_UNASSIGNED]; | |
332 | } | |
f3705d53 AK |
333 | } |
334 | ||
79e2b9ae | 335 | /* Called from RCU critical section */ |
c7086b4a | 336 | static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, |
90260c6c JK |
337 | hwaddr addr, |
338 | bool resolve_subpage) | |
9f029603 | 339 | { |
d73415a3 | 340 | MemoryRegionSection *section = qatomic_read(&d->mru_section); |
90260c6c JK |
341 | subpage_t *subpage; |
342 | ||
07c114bb PB |
343 | if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || |
344 | !section_covers_addr(section, addr)) { | |
003a0cf2 | 345 | section = phys_page_find(d, addr); |
d73415a3 | 346 | qatomic_set(&d->mru_section, section); |
729633c2 | 347 | } |
90260c6c JK |
348 | if (resolve_subpage && section->mr->subpage) { |
349 | subpage = container_of(section->mr, subpage_t, iomem); | |
53cb28cb | 350 | section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; |
90260c6c JK |
351 | } |
352 | return section; | |
9f029603 JK |
353 | } |
354 | ||
79e2b9ae | 355 | /* Called from RCU critical section */ |
90260c6c | 356 | static MemoryRegionSection * |
c7086b4a | 357 | address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, |
90260c6c | 358 | hwaddr *plen, bool resolve_subpage) |
149f54b5 PB |
359 | { |
360 | MemoryRegionSection *section; | |
965eb2fc | 361 | MemoryRegion *mr; |
a87f3954 | 362 | Int128 diff; |
149f54b5 | 363 | |
c7086b4a | 364 | section = address_space_lookup_region(d, addr, resolve_subpage); |
149f54b5 PB |
365 | /* Compute offset within MemoryRegionSection */ |
366 | addr -= section->offset_within_address_space; | |
367 | ||
368 | /* Compute offset within MemoryRegion */ | |
369 | *xlat = addr + section->offset_within_region; | |
370 | ||
965eb2fc | 371 | mr = section->mr; |
b242e0e0 PB |
372 | |
373 | /* MMIO registers can be expected to perform full-width accesses based only | |
374 | * on their address, without considering adjacent registers that could | |
375 | * decode to completely different MemoryRegions. When such registers | |
376 | * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO | |
377 | * regions overlap wildly. For this reason we cannot clamp the accesses | |
378 | * here. | |
379 | * | |
380 | * If the length is small (as is the case for address_space_ldl/stl), | |
381 | * everything works fine. If the incoming length is large, however, | |
382 | * the caller really has to do the clamping through memory_access_size. | |
383 | */ | |
965eb2fc | 384 | if (memory_region_is_ram(mr)) { |
e4a511f8 | 385 | diff = int128_sub(section->size, int128_make64(addr)); |
965eb2fc PB |
386 | *plen = int128_get64(int128_min(diff, int128_make64(*plen))); |
387 | } | |
149f54b5 PB |
388 | return section; |
389 | } | |
90260c6c | 390 | |
a411c84b PB |
391 | /** |
392 | * address_space_translate_iommu - translate an address through an IOMMU | |
393 | * memory region and then through the target address space. | |
394 | * | |
395 | * @iommu_mr: the IOMMU memory region that we start the translation from | |
396 | * @addr: the address to be translated through the MMU | |
397 | * @xlat: the translated address offset within the destination memory region. | |
398 | * It cannot be %NULL. | |
399 | * @plen_out: valid read/write length of the translated address. It | |
400 | * cannot be %NULL. | |
401 | * @page_mask_out: page mask for the translated address. This | |
402 | * should only be meaningful for IOMMU translated | |
403 | * addresses, since there may be huge pages that this bit | |
404 | * would tell. It can be %NULL if we don't care about it. | |
405 | * @is_write: whether the translation operation is for write | |
406 | * @is_mmio: whether this can be MMIO, set true if it can | |
407 | * @target_as: the address space targeted by the IOMMU | |
2f7b009c | 408 | * @attrs: transaction attributes |
a411c84b PB |
409 | * |
410 | * This function is called from RCU critical section. It is the common | |
411 | * part of flatview_do_translate and address_space_translate_cached. | |
412 | */ | |
413 | static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr, | |
414 | hwaddr *xlat, | |
415 | hwaddr *plen_out, | |
416 | hwaddr *page_mask_out, | |
417 | bool is_write, | |
418 | bool is_mmio, | |
2f7b009c PM |
419 | AddressSpace **target_as, |
420 | MemTxAttrs attrs) | |
a411c84b PB |
421 | { |
422 | MemoryRegionSection *section; | |
423 | hwaddr page_mask = (hwaddr)-1; | |
424 | ||
425 | do { | |
426 | hwaddr addr = *xlat; | |
427 | IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr); | |
2c91bcf2 PM |
428 | int iommu_idx = 0; |
429 | IOMMUTLBEntry iotlb; | |
430 | ||
431 | if (imrc->attrs_to_index) { | |
432 | iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); | |
433 | } | |
434 | ||
435 | iotlb = imrc->translate(iommu_mr, addr, is_write ? | |
436 | IOMMU_WO : IOMMU_RO, iommu_idx); | |
a411c84b PB |
437 | |
438 | if (!(iotlb.perm & (1 << is_write))) { | |
439 | goto unassigned; | |
440 | } | |
441 | ||
442 | addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | |
443 | | (addr & iotlb.addr_mask)); | |
444 | page_mask &= iotlb.addr_mask; | |
445 | *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1); | |
446 | *target_as = iotlb.target_as; | |
447 | ||
448 | section = address_space_translate_internal( | |
449 | address_space_to_dispatch(iotlb.target_as), addr, xlat, | |
450 | plen_out, is_mmio); | |
451 | ||
452 | iommu_mr = memory_region_get_iommu(section->mr); | |
453 | } while (unlikely(iommu_mr)); | |
454 | ||
455 | if (page_mask_out) { | |
456 | *page_mask_out = page_mask; | |
457 | } | |
458 | return *section; | |
459 | ||
460 | unassigned: | |
461 | return (MemoryRegionSection) { .mr = &io_mem_unassigned }; | |
462 | } | |
463 | ||
d5e5fafd PX |
464 | /** |
465 | * flatview_do_translate - translate an address in FlatView | |
466 | * | |
467 | * @fv: the flat view that we want to translate on | |
468 | * @addr: the address to be translated in above address space | |
469 | * @xlat: the translated address offset within memory region. It | |
470 | * cannot be @NULL. | |
471 | * @plen_out: valid read/write length of the translated address. It | |
472 | * can be @NULL when we don't care about it. | |
473 | * @page_mask_out: page mask for the translated address. This | |
474 | * should only be meaningful for IOMMU translated | |
475 | * addresses, since there may be huge pages that this bit | |
476 | * would tell. It can be @NULL if we don't care about it. | |
477 | * @is_write: whether the translation operation is for write | |
478 | * @is_mmio: whether this can be MMIO, set true if it can | |
ad2804d9 | 479 | * @target_as: the address space targeted by the IOMMU |
49e14aa8 | 480 | * @attrs: memory transaction attributes |
d5e5fafd PX |
481 | * |
482 | * This function is called from RCU critical section | |
483 | */ | |
16620684 AK |
484 | static MemoryRegionSection flatview_do_translate(FlatView *fv, |
485 | hwaddr addr, | |
486 | hwaddr *xlat, | |
d5e5fafd PX |
487 | hwaddr *plen_out, |
488 | hwaddr *page_mask_out, | |
16620684 AK |
489 | bool is_write, |
490 | bool is_mmio, | |
49e14aa8 PM |
491 | AddressSpace **target_as, |
492 | MemTxAttrs attrs) | |
052c8fa9 | 493 | { |
052c8fa9 | 494 | MemoryRegionSection *section; |
3df9d748 | 495 | IOMMUMemoryRegion *iommu_mr; |
d5e5fafd PX |
496 | hwaddr plen = (hwaddr)(-1); |
497 | ||
ad2804d9 PB |
498 | if (!plen_out) { |
499 | plen_out = &plen; | |
d5e5fafd | 500 | } |
052c8fa9 | 501 | |
a411c84b PB |
502 | section = address_space_translate_internal( |
503 | flatview_to_dispatch(fv), addr, xlat, | |
504 | plen_out, is_mmio); | |
052c8fa9 | 505 | |
a411c84b PB |
506 | iommu_mr = memory_region_get_iommu(section->mr); |
507 | if (unlikely(iommu_mr)) { | |
508 | return address_space_translate_iommu(iommu_mr, xlat, | |
509 | plen_out, page_mask_out, | |
510 | is_write, is_mmio, | |
2f7b009c | 511 | target_as, attrs); |
052c8fa9 | 512 | } |
d5e5fafd | 513 | if (page_mask_out) { |
a411c84b PB |
514 | /* Not behind an IOMMU, use default page size. */ |
515 | *page_mask_out = ~TARGET_PAGE_MASK; | |
d5e5fafd PX |
516 | } |
517 | ||
a764040c | 518 | return *section; |
052c8fa9 JW |
519 | } |
520 | ||
521 | /* Called from RCU critical section */ | |
a764040c | 522 | IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, |
7446eb07 | 523 | bool is_write, MemTxAttrs attrs) |
90260c6c | 524 | { |
a764040c | 525 | MemoryRegionSection section; |
076a93d7 | 526 | hwaddr xlat, page_mask; |
30951157 | 527 | |
076a93d7 PX |
528 | /* |
529 | * This can never be MMIO, and we don't really care about plen, | |
530 | * but page mask. | |
531 | */ | |
532 | section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat, | |
49e14aa8 PM |
533 | NULL, &page_mask, is_write, false, &as, |
534 | attrs); | |
30951157 | 535 | |
a764040c PX |
536 | /* Illegal translation */ |
537 | if (section.mr == &io_mem_unassigned) { | |
538 | goto iotlb_fail; | |
539 | } | |
30951157 | 540 | |
a764040c PX |
541 | /* Convert memory region offset into address space offset */ |
542 | xlat += section.offset_within_address_space - | |
543 | section.offset_within_region; | |
544 | ||
a764040c | 545 | return (IOMMUTLBEntry) { |
e76bb18f | 546 | .target_as = as, |
076a93d7 PX |
547 | .iova = addr & ~page_mask, |
548 | .translated_addr = xlat & ~page_mask, | |
549 | .addr_mask = page_mask, | |
a764040c PX |
550 | /* IOTLBs are for DMAs, and DMA only allows on RAMs. */ |
551 | .perm = IOMMU_RW, | |
552 | }; | |
553 | ||
554 | iotlb_fail: | |
555 | return (IOMMUTLBEntry) {0}; | |
556 | } | |
557 | ||
558 | /* Called from RCU critical section */ | |
16620684 | 559 | MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat, |
efa99a2f PM |
560 | hwaddr *plen, bool is_write, |
561 | MemTxAttrs attrs) | |
a764040c PX |
562 | { |
563 | MemoryRegion *mr; | |
564 | MemoryRegionSection section; | |
16620684 | 565 | AddressSpace *as = NULL; |
a764040c PX |
566 | |
567 | /* This can be MMIO, so setup MMIO bit. */ | |
d5e5fafd | 568 | section = flatview_do_translate(fv, addr, xlat, plen, NULL, |
49e14aa8 | 569 | is_write, true, &as, attrs); |
a764040c PX |
570 | mr = section.mr; |
571 | ||
fe680d0d | 572 | if (xen_enabled() && memory_access_is_direct(mr, is_write)) { |
a87f3954 | 573 | hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; |
23820dbf | 574 | *plen = MIN(page, *plen); |
a87f3954 PB |
575 | } |
576 | ||
30951157 | 577 | return mr; |
90260c6c JK |
578 | } |
579 | ||
1f871c5e PM |
580 | typedef struct TCGIOMMUNotifier { |
581 | IOMMUNotifier n; | |
582 | MemoryRegion *mr; | |
583 | CPUState *cpu; | |
584 | int iommu_idx; | |
585 | bool active; | |
586 | } TCGIOMMUNotifier; | |
587 | ||
588 | static void tcg_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) | |
589 | { | |
590 | TCGIOMMUNotifier *notifier = container_of(n, TCGIOMMUNotifier, n); | |
591 | ||
592 | if (!notifier->active) { | |
593 | return; | |
594 | } | |
595 | tlb_flush(notifier->cpu); | |
596 | notifier->active = false; | |
597 | /* We leave the notifier struct on the list to avoid reallocating it later. | |
598 | * Generally the number of IOMMUs a CPU deals with will be small. | |
599 | * In any case we can't unregister the iommu notifier from a notify | |
600 | * callback. | |
601 | */ | |
602 | } | |
603 | ||
604 | static void tcg_register_iommu_notifier(CPUState *cpu, | |
605 | IOMMUMemoryRegion *iommu_mr, | |
606 | int iommu_idx) | |
607 | { | |
608 | /* Make sure this CPU has an IOMMU notifier registered for this | |
609 | * IOMMU/IOMMU index combination, so that we can flush its TLB | |
610 | * when the IOMMU tells us the mappings we've cached have changed. | |
611 | */ | |
612 | MemoryRegion *mr = MEMORY_REGION(iommu_mr); | |
bbf90191 | 613 | TCGIOMMUNotifier *notifier = NULL; |
805d4496 | 614 | int i; |
1f871c5e PM |
615 | |
616 | for (i = 0; i < cpu->iommu_notifiers->len; i++) { | |
5601be3b | 617 | notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); |
1f871c5e PM |
618 | if (notifier->mr == mr && notifier->iommu_idx == iommu_idx) { |
619 | break; | |
620 | } | |
621 | } | |
622 | if (i == cpu->iommu_notifiers->len) { | |
623 | /* Not found, add a new entry at the end of the array */ | |
624 | cpu->iommu_notifiers = g_array_set_size(cpu->iommu_notifiers, i + 1); | |
5601be3b PM |
625 | notifier = g_new0(TCGIOMMUNotifier, 1); |
626 | g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i) = notifier; | |
1f871c5e PM |
627 | |
628 | notifier->mr = mr; | |
629 | notifier->iommu_idx = iommu_idx; | |
630 | notifier->cpu = cpu; | |
631 | /* Rather than trying to register interest in the specific part | |
632 | * of the iommu's address space that we've accessed and then | |
633 | * expand it later as subsequent accesses touch more of it, we | |
634 | * just register interest in the whole thing, on the assumption | |
635 | * that iommu reconfiguration will be rare. | |
636 | */ | |
637 | iommu_notifier_init(¬ifier->n, | |
638 | tcg_iommu_unmap_notify, | |
639 | IOMMU_NOTIFIER_UNMAP, | |
640 | 0, | |
641 | HWADDR_MAX, | |
642 | iommu_idx); | |
805d4496 MA |
643 | memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, |
644 | &error_fatal); | |
1f871c5e PM |
645 | } |
646 | ||
647 | if (!notifier->active) { | |
648 | notifier->active = true; | |
649 | } | |
650 | } | |
651 | ||
d9f24bf5 | 652 | void tcg_iommu_free_notifier_list(CPUState *cpu) |
1f871c5e PM |
653 | { |
654 | /* Destroy the CPU's notifier list */ | |
655 | int i; | |
656 | TCGIOMMUNotifier *notifier; | |
657 | ||
658 | for (i = 0; i < cpu->iommu_notifiers->len; i++) { | |
5601be3b | 659 | notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); |
1f871c5e | 660 | memory_region_unregister_iommu_notifier(notifier->mr, ¬ifier->n); |
5601be3b | 661 | g_free(notifier); |
1f871c5e PM |
662 | } |
663 | g_array_free(cpu->iommu_notifiers, true); | |
664 | } | |
665 | ||
d9f24bf5 PB |
666 | void tcg_iommu_init_notifier_list(CPUState *cpu) |
667 | { | |
668 | cpu->iommu_notifiers = g_array_new(false, true, sizeof(TCGIOMMUNotifier *)); | |
669 | } | |
670 | ||
79e2b9ae | 671 | /* Called from RCU critical section */ |
90260c6c | 672 | MemoryRegionSection * |
d7898cda | 673 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
1f871c5e PM |
674 | hwaddr *xlat, hwaddr *plen, |
675 | MemTxAttrs attrs, int *prot) | |
90260c6c | 676 | { |
30951157 | 677 | MemoryRegionSection *section; |
1f871c5e PM |
678 | IOMMUMemoryRegion *iommu_mr; |
679 | IOMMUMemoryRegionClass *imrc; | |
680 | IOMMUTLBEntry iotlb; | |
681 | int iommu_idx; | |
d73415a3 SH |
682 | AddressSpaceDispatch *d = |
683 | qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); | |
d7898cda | 684 | |
1f871c5e PM |
685 | for (;;) { |
686 | section = address_space_translate_internal(d, addr, &addr, plen, false); | |
687 | ||
688 | iommu_mr = memory_region_get_iommu(section->mr); | |
689 | if (!iommu_mr) { | |
690 | break; | |
691 | } | |
692 | ||
693 | imrc = memory_region_get_iommu_class_nocheck(iommu_mr); | |
694 | ||
695 | iommu_idx = imrc->attrs_to_index(iommu_mr, attrs); | |
696 | tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx); | |
697 | /* We need all the permissions, so pass IOMMU_NONE so the IOMMU | |
698 | * doesn't short-cut its translation table walk. | |
699 | */ | |
700 | iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx); | |
701 | addr = ((iotlb.translated_addr & ~iotlb.addr_mask) | |
702 | | (addr & iotlb.addr_mask)); | |
703 | /* Update the caller's prot bits to remove permissions the IOMMU | |
704 | * is giving us a failure response for. If we get down to no | |
705 | * permissions left at all we can give up now. | |
706 | */ | |
707 | if (!(iotlb.perm & IOMMU_RO)) { | |
708 | *prot &= ~(PAGE_READ | PAGE_EXEC); | |
709 | } | |
710 | if (!(iotlb.perm & IOMMU_WO)) { | |
711 | *prot &= ~PAGE_WRITE; | |
712 | } | |
713 | ||
714 | if (!*prot) { | |
715 | goto translate_fail; | |
716 | } | |
717 | ||
718 | d = flatview_to_dispatch(address_space_to_flatview(iotlb.target_as)); | |
719 | } | |
30951157 | 720 | |
3df9d748 | 721 | assert(!memory_region_is_iommu(section->mr)); |
1f871c5e | 722 | *xlat = addr; |
30951157 | 723 | return section; |
1f871c5e PM |
724 | |
725 | translate_fail: | |
726 | return &d->map.sections[PHYS_SECTION_UNASSIGNED]; | |
90260c6c | 727 | } |
1a1562f5 | 728 | |
80ceb07a PX |
729 | void cpu_address_space_init(CPUState *cpu, int asidx, |
730 | const char *prefix, MemoryRegion *mr) | |
09daed84 | 731 | { |
12ebc9a7 | 732 | CPUAddressSpace *newas; |
80ceb07a | 733 | AddressSpace *as = g_new0(AddressSpace, 1); |
87a621d8 | 734 | char *as_name; |
80ceb07a PX |
735 | |
736 | assert(mr); | |
87a621d8 PX |
737 | as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index); |
738 | address_space_init(as, mr, as_name); | |
739 | g_free(as_name); | |
12ebc9a7 PM |
740 | |
741 | /* Target code should have set num_ases before calling us */ | |
742 | assert(asidx < cpu->num_ases); | |
743 | ||
56943e8c PM |
744 | if (asidx == 0) { |
745 | /* address space 0 gets the convenience alias */ | |
746 | cpu->as = as; | |
747 | } | |
748 | ||
12ebc9a7 PM |
749 | /* KVM cannot currently support multiple address spaces. */ |
750 | assert(asidx == 0 || !kvm_enabled()); | |
09daed84 | 751 | |
12ebc9a7 PM |
752 | if (!cpu->cpu_ases) { |
753 | cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); | |
09daed84 | 754 | } |
32857f4d | 755 | |
12ebc9a7 PM |
756 | newas = &cpu->cpu_ases[asidx]; |
757 | newas->cpu = cpu; | |
758 | newas->as = as; | |
56943e8c | 759 | if (tcg_enabled()) { |
9458a9a1 | 760 | newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; |
12ebc9a7 PM |
761 | newas->tcg_as_listener.commit = tcg_commit; |
762 | memory_listener_register(&newas->tcg_as_listener, as); | |
56943e8c | 763 | } |
09daed84 | 764 | } |
651a5bc0 PM |
765 | |
766 | AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) | |
767 | { | |
768 | /* Return the AddressSpace corresponding to the specified index */ | |
769 | return cpu->cpu_ases[asidx].as; | |
770 | } | |
8bca9a03 | 771 | |
6658ffb8 | 772 | /* Add a watchpoint. */ |
75a34036 | 773 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, |
a1d1bb31 | 774 | int flags, CPUWatchpoint **watchpoint) |
6658ffb8 | 775 | { |
c0ce998e | 776 | CPUWatchpoint *wp; |
2e886a24 | 777 | vaddr in_page; |
6658ffb8 | 778 | |
05068c0d | 779 | /* forbid ranges which are empty or run off the end of the address space */ |
07e2863d | 780 | if (len == 0 || (addr + len - 1) < addr) { |
75a34036 AF |
781 | error_report("tried to set invalid watchpoint at %" |
782 | VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); | |
b4051334 AL |
783 | return -EINVAL; |
784 | } | |
7267c094 | 785 | wp = g_malloc(sizeof(*wp)); |
a1d1bb31 AL |
786 | |
787 | wp->vaddr = addr; | |
05068c0d | 788 | wp->len = len; |
a1d1bb31 AL |
789 | wp->flags = flags; |
790 | ||
2dc9f411 | 791 | /* keep all GDB-injected watchpoints in front */ |
ff4700b0 AF |
792 | if (flags & BP_GDB) { |
793 | QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); | |
794 | } else { | |
795 | QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); | |
796 | } | |
6658ffb8 | 797 | |
2e886a24 AB |
798 | in_page = -(addr | TARGET_PAGE_MASK); |
799 | if (len <= in_page) { | |
800 | tlb_flush_page(cpu, addr); | |
801 | } else { | |
802 | tlb_flush(cpu); | |
803 | } | |
a1d1bb31 AL |
804 | |
805 | if (watchpoint) | |
806 | *watchpoint = wp; | |
807 | return 0; | |
6658ffb8 PB |
808 | } |
809 | ||
a1d1bb31 | 810 | /* Remove a specific watchpoint. */ |
75a34036 | 811 | int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, |
a1d1bb31 | 812 | int flags) |
6658ffb8 | 813 | { |
a1d1bb31 | 814 | CPUWatchpoint *wp; |
6658ffb8 | 815 | |
ff4700b0 | 816 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
05068c0d | 817 | if (addr == wp->vaddr && len == wp->len |
6e140f28 | 818 | && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
75a34036 | 819 | cpu_watchpoint_remove_by_ref(cpu, wp); |
6658ffb8 PB |
820 | return 0; |
821 | } | |
822 | } | |
a1d1bb31 | 823 | return -ENOENT; |
6658ffb8 PB |
824 | } |
825 | ||
a1d1bb31 | 826 | /* Remove a specific watchpoint by reference. */ |
75a34036 | 827 | void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) |
a1d1bb31 | 828 | { |
ff4700b0 | 829 | QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); |
7d03f82f | 830 | |
31b030d4 | 831 | tlb_flush_page(cpu, watchpoint->vaddr); |
a1d1bb31 | 832 | |
7267c094 | 833 | g_free(watchpoint); |
a1d1bb31 AL |
834 | } |
835 | ||
836 | /* Remove all matching watchpoints. */ | |
75a34036 | 837 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask) |
a1d1bb31 | 838 | { |
c0ce998e | 839 | CPUWatchpoint *wp, *next; |
a1d1bb31 | 840 | |
ff4700b0 | 841 | QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { |
75a34036 AF |
842 | if (wp->flags & mask) { |
843 | cpu_watchpoint_remove_by_ref(cpu, wp); | |
844 | } | |
c0ce998e | 845 | } |
7d03f82f | 846 | } |
05068c0d | 847 | |
79fc8d45 | 848 | #ifdef CONFIG_TCG |
05068c0d PM |
849 | /* Return true if this watchpoint address matches the specified |
850 | * access (ie the address range covered by the watchpoint overlaps | |
851 | * partially or completely with the address range covered by the | |
852 | * access). | |
853 | */ | |
56ad8b00 RH |
854 | static inline bool watchpoint_address_matches(CPUWatchpoint *wp, |
855 | vaddr addr, vaddr len) | |
05068c0d PM |
856 | { |
857 | /* We know the lengths are non-zero, but a little caution is | |
858 | * required to avoid errors in the case where the range ends | |
859 | * exactly at the top of the address space and so addr + len | |
860 | * wraps round to zero. | |
861 | */ | |
862 | vaddr wpend = wp->vaddr + wp->len - 1; | |
863 | vaddr addrend = addr + len - 1; | |
864 | ||
865 | return !(addr > wpend || wp->vaddr > addrend); | |
866 | } | |
867 | ||
56ad8b00 RH |
868 | /* Return flags for watchpoints that match addr + prot. */ |
869 | int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) | |
870 | { | |
871 | CPUWatchpoint *wp; | |
872 | int ret = 0; | |
873 | ||
874 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | |
9835936d | 875 | if (watchpoint_address_matches(wp, addr, len)) { |
56ad8b00 RH |
876 | ret |= wp->flags; |
877 | } | |
878 | } | |
879 | return ret; | |
880 | } | |
7d03f82f | 881 | |
79fc8d45 CF |
882 | /* Generate a debug exception if a watchpoint has been hit. */ |
883 | void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | |
884 | MemTxAttrs attrs, int flags, uintptr_t ra) | |
885 | { | |
886 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
887 | CPUWatchpoint *wp; | |
888 | ||
889 | assert(tcg_enabled()); | |
890 | if (cpu->watchpoint_hit) { | |
891 | /* | |
892 | * We re-entered the check after replacing the TB. | |
893 | * Now raise the debug interrupt so that it will | |
894 | * trigger after the current instruction. | |
895 | */ | |
896 | qemu_mutex_lock_iothread(); | |
897 | cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); | |
898 | qemu_mutex_unlock_iothread(); | |
899 | return; | |
900 | } | |
901 | ||
78271684 | 902 | if (cc->tcg_ops->adjust_watchpoint_address) { |
9ea9087b | 903 | /* this is currently used only by ARM BE32 */ |
78271684 | 904 | addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len); |
9ea9087b | 905 | } |
79fc8d45 CF |
906 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
907 | if (watchpoint_address_matches(wp, addr, len) | |
908 | && (wp->flags & flags)) { | |
909 | if (replay_running_debug()) { | |
910 | /* | |
911 | * Don't process the watchpoints when we are | |
912 | * in a reverse debugging operation. | |
913 | */ | |
914 | replay_breakpoint(); | |
915 | return; | |
916 | } | |
917 | if (flags == BP_MEM_READ) { | |
918 | wp->flags |= BP_WATCHPOINT_HIT_READ; | |
919 | } else { | |
920 | wp->flags |= BP_WATCHPOINT_HIT_WRITE; | |
921 | } | |
922 | wp->hitaddr = MAX(addr, wp->vaddr); | |
923 | wp->hitattrs = attrs; | |
924 | if (!cpu->watchpoint_hit) { | |
78271684 CF |
925 | if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint && |
926 | !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { | |
79fc8d45 CF |
927 | wp->flags &= ~BP_WATCHPOINT_HIT; |
928 | continue; | |
929 | } | |
930 | cpu->watchpoint_hit = wp; | |
931 | ||
932 | mmap_lock(); | |
933 | tb_check_watchpoint(cpu, ra); | |
934 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { | |
935 | cpu->exception_index = EXCP_DEBUG; | |
936 | mmap_unlock(); | |
937 | cpu_loop_exit_restore(cpu, ra); | |
938 | } else { | |
939 | /* Force execution of one insn next time. */ | |
940 | cpu->cflags_next_tb = 1 | curr_cflags(); | |
941 | mmap_unlock(); | |
942 | if (ra) { | |
943 | cpu_restore_state(cpu, ra, true); | |
944 | } | |
945 | cpu_loop_exit_noexc(cpu); | |
946 | } | |
947 | } | |
948 | } else { | |
949 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
950 | } | |
951 | } | |
952 | } | |
953 | ||
954 | #endif /* CONFIG_TCG */ | |
955 | ||
0dc3f44a | 956 | /* Called from RCU critical section */ |
041603fe PB |
957 | static RAMBlock *qemu_get_ram_block(ram_addr_t addr) |
958 | { | |
959 | RAMBlock *block; | |
960 | ||
d73415a3 | 961 | block = qatomic_rcu_read(&ram_list.mru_block); |
9b8424d5 | 962 | if (block && addr - block->offset < block->max_length) { |
68851b98 | 963 | return block; |
041603fe | 964 | } |
99e15582 | 965 | RAMBLOCK_FOREACH(block) { |
9b8424d5 | 966 | if (addr - block->offset < block->max_length) { |
041603fe PB |
967 | goto found; |
968 | } | |
969 | } | |
970 | ||
971 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
972 | abort(); | |
973 | ||
974 | found: | |
43771539 PB |
975 | /* It is safe to write mru_block outside the iothread lock. This |
976 | * is what happens: | |
977 | * | |
978 | * mru_block = xxx | |
979 | * rcu_read_unlock() | |
980 | * xxx removed from list | |
981 | * rcu_read_lock() | |
982 | * read mru_block | |
983 | * mru_block = NULL; | |
984 | * call_rcu(reclaim_ramblock, xxx); | |
985 | * rcu_read_unlock() | |
986 | * | |
d73415a3 | 987 | * qatomic_rcu_set is not needed here. The block was already published |
43771539 PB |
988 | * when it was placed into the list. Here we're just making an extra |
989 | * copy of the pointer. | |
990 | */ | |
041603fe PB |
991 | ram_list.mru_block = block; |
992 | return block; | |
993 | } | |
994 | ||
a2f4d5be | 995 | static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) |
d24981d3 | 996 | { |
9a13565d | 997 | CPUState *cpu; |
041603fe | 998 | ram_addr_t start1; |
a2f4d5be JQ |
999 | RAMBlock *block; |
1000 | ram_addr_t end; | |
1001 | ||
f28d0dfd | 1002 | assert(tcg_enabled()); |
a2f4d5be JQ |
1003 | end = TARGET_PAGE_ALIGN(start + length); |
1004 | start &= TARGET_PAGE_MASK; | |
d24981d3 | 1005 | |
694ea274 | 1006 | RCU_READ_LOCK_GUARD(); |
041603fe PB |
1007 | block = qemu_get_ram_block(start); |
1008 | assert(block == qemu_get_ram_block(end - 1)); | |
1240be24 | 1009 | start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); |
9a13565d PC |
1010 | CPU_FOREACH(cpu) { |
1011 | tlb_reset_dirty(cpu, start1, length); | |
1012 | } | |
d24981d3 JQ |
1013 | } |
1014 | ||
5579c7f3 | 1015 | /* Note: start and end must be within the same ram block. */ |
03eebc9e SH |
1016 | bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, |
1017 | ram_addr_t length, | |
1018 | unsigned client) | |
1ccde1cb | 1019 | { |
5b82b703 | 1020 | DirtyMemoryBlocks *blocks; |
25aa6b37 | 1021 | unsigned long end, page, start_page; |
5b82b703 | 1022 | bool dirty = false; |
077874e0 PX |
1023 | RAMBlock *ramblock; |
1024 | uint64_t mr_offset, mr_size; | |
03eebc9e SH |
1025 | |
1026 | if (length == 0) { | |
1027 | return false; | |
1028 | } | |
f23db169 | 1029 | |
03eebc9e | 1030 | end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; |
25aa6b37 MB |
1031 | start_page = start >> TARGET_PAGE_BITS; |
1032 | page = start_page; | |
5b82b703 | 1033 | |
694ea274 | 1034 | WITH_RCU_READ_LOCK_GUARD() { |
d73415a3 | 1035 | blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); |
694ea274 DDAG |
1036 | ramblock = qemu_get_ram_block(start); |
1037 | /* Range sanity check on the ramblock */ | |
1038 | assert(start >= ramblock->offset && | |
1039 | start + length <= ramblock->offset + ramblock->used_length); | |
5b82b703 | 1040 | |
694ea274 DDAG |
1041 | while (page < end) { |
1042 | unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; | |
1043 | unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; | |
1044 | unsigned long num = MIN(end - page, | |
1045 | DIRTY_MEMORY_BLOCK_SIZE - offset); | |
5b82b703 | 1046 | |
694ea274 DDAG |
1047 | dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], |
1048 | offset, num); | |
1049 | page += num; | |
1050 | } | |
5b82b703 | 1051 | |
25aa6b37 MB |
1052 | mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; |
1053 | mr_size = (end - start_page) << TARGET_PAGE_BITS; | |
694ea274 | 1054 | memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); |
5b82b703 SH |
1055 | } |
1056 | ||
03eebc9e | 1057 | if (dirty && tcg_enabled()) { |
a2f4d5be | 1058 | tlb_reset_dirty_range_all(start, length); |
5579c7f3 | 1059 | } |
03eebc9e SH |
1060 | |
1061 | return dirty; | |
1ccde1cb FB |
1062 | } |
1063 | ||
8deaf12c | 1064 | DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty |
5dea4079 | 1065 | (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) |
8deaf12c GH |
1066 | { |
1067 | DirtyMemoryBlocks *blocks; | |
5dea4079 | 1068 | ram_addr_t start = memory_region_get_ram_addr(mr) + offset; |
8deaf12c GH |
1069 | unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL); |
1070 | ram_addr_t first = QEMU_ALIGN_DOWN(start, align); | |
1071 | ram_addr_t last = QEMU_ALIGN_UP(start + length, align); | |
1072 | DirtyBitmapSnapshot *snap; | |
1073 | unsigned long page, end, dest; | |
1074 | ||
1075 | snap = g_malloc0(sizeof(*snap) + | |
1076 | ((last - first) >> (TARGET_PAGE_BITS + 3))); | |
1077 | snap->start = first; | |
1078 | snap->end = last; | |
1079 | ||
1080 | page = first >> TARGET_PAGE_BITS; | |
1081 | end = last >> TARGET_PAGE_BITS; | |
1082 | dest = 0; | |
1083 | ||
694ea274 | 1084 | WITH_RCU_READ_LOCK_GUARD() { |
d73415a3 | 1085 | blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); |
8deaf12c | 1086 | |
694ea274 DDAG |
1087 | while (page < end) { |
1088 | unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; | |
1089 | unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; | |
1090 | unsigned long num = MIN(end - page, | |
1091 | DIRTY_MEMORY_BLOCK_SIZE - offset); | |
8deaf12c | 1092 | |
694ea274 DDAG |
1093 | assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL))); |
1094 | assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL))); | |
1095 | offset >>= BITS_PER_LEVEL; | |
8deaf12c | 1096 | |
694ea274 DDAG |
1097 | bitmap_copy_and_clear_atomic(snap->dirty + dest, |
1098 | blocks->blocks[idx] + offset, | |
1099 | num); | |
1100 | page += num; | |
1101 | dest += num >> BITS_PER_LEVEL; | |
1102 | } | |
8deaf12c GH |
1103 | } |
1104 | ||
8deaf12c GH |
1105 | if (tcg_enabled()) { |
1106 | tlb_reset_dirty_range_all(start, length); | |
1107 | } | |
1108 | ||
077874e0 PX |
1109 | memory_region_clear_dirty_bitmap(mr, offset, length); |
1110 | ||
8deaf12c GH |
1111 | return snap; |
1112 | } | |
1113 | ||
1114 | bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, | |
1115 | ram_addr_t start, | |
1116 | ram_addr_t length) | |
1117 | { | |
1118 | unsigned long page, end; | |
1119 | ||
1120 | assert(start >= snap->start); | |
1121 | assert(start + length <= snap->end); | |
1122 | ||
1123 | end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; | |
1124 | page = (start - snap->start) >> TARGET_PAGE_BITS; | |
1125 | ||
1126 | while (page < end) { | |
1127 | if (test_bit(page, snap->dirty)) { | |
1128 | return true; | |
1129 | } | |
1130 | page++; | |
1131 | } | |
1132 | return false; | |
1133 | } | |
1134 | ||
79e2b9ae | 1135 | /* Called from RCU critical section */ |
bb0e627a | 1136 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
8f5db641 | 1137 | MemoryRegionSection *section) |
e5548617 | 1138 | { |
8f5db641 RH |
1139 | AddressSpaceDispatch *d = flatview_to_dispatch(section->fv); |
1140 | return section - d->map.sections; | |
e5548617 | 1141 | } |
8da3ff18 | 1142 | |
b797ab1a WY |
1143 | static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, |
1144 | uint16_t section); | |
16620684 | 1145 | static subpage_t *subpage_init(FlatView *fv, hwaddr base); |
54688b1e | 1146 | |
06329cce | 1147 | static void *(*phys_mem_alloc)(size_t size, uint64_t *align, bool shared) = |
a2b257d6 | 1148 | qemu_anon_ram_alloc; |
91138037 MA |
1149 | |
1150 | /* | |
1151 | * Set a custom physical guest memory alloator. | |
1152 | * Accelerators with unusual needs may need this. Hopefully, we can | |
1153 | * get rid of it eventually. | |
1154 | */ | |
06329cce | 1155 | void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared)) |
91138037 MA |
1156 | { |
1157 | phys_mem_alloc = alloc; | |
1158 | } | |
1159 | ||
53cb28cb MA |
1160 | static uint16_t phys_section_add(PhysPageMap *map, |
1161 | MemoryRegionSection *section) | |
5312bd8b | 1162 | { |
68f3f65b PB |
1163 | /* The physical section number is ORed with a page-aligned |
1164 | * pointer to produce the iotlb entries. Thus it should | |
1165 | * never overflow into the page-aligned value. | |
1166 | */ | |
53cb28cb | 1167 | assert(map->sections_nb < TARGET_PAGE_SIZE); |
68f3f65b | 1168 | |
53cb28cb MA |
1169 | if (map->sections_nb == map->sections_nb_alloc) { |
1170 | map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); | |
1171 | map->sections = g_renew(MemoryRegionSection, map->sections, | |
1172 | map->sections_nb_alloc); | |
5312bd8b | 1173 | } |
53cb28cb | 1174 | map->sections[map->sections_nb] = *section; |
dfde4e6e | 1175 | memory_region_ref(section->mr); |
53cb28cb | 1176 | return map->sections_nb++; |
5312bd8b AK |
1177 | } |
1178 | ||
058bc4b5 PB |
1179 | static void phys_section_destroy(MemoryRegion *mr) |
1180 | { | |
55b4e80b DS |
1181 | bool have_sub_page = mr->subpage; |
1182 | ||
dfde4e6e PB |
1183 | memory_region_unref(mr); |
1184 | ||
55b4e80b | 1185 | if (have_sub_page) { |
058bc4b5 | 1186 | subpage_t *subpage = container_of(mr, subpage_t, iomem); |
b4fefef9 | 1187 | object_unref(OBJECT(&subpage->iomem)); |
058bc4b5 PB |
1188 | g_free(subpage); |
1189 | } | |
1190 | } | |
1191 | ||
6092666e | 1192 | static void phys_sections_free(PhysPageMap *map) |
5312bd8b | 1193 | { |
9affd6fc PB |
1194 | while (map->sections_nb > 0) { |
1195 | MemoryRegionSection *section = &map->sections[--map->sections_nb]; | |
058bc4b5 PB |
1196 | phys_section_destroy(section->mr); |
1197 | } | |
9affd6fc PB |
1198 | g_free(map->sections); |
1199 | g_free(map->nodes); | |
5312bd8b AK |
1200 | } |
1201 | ||
9950322a | 1202 | static void register_subpage(FlatView *fv, MemoryRegionSection *section) |
0f0cb164 | 1203 | { |
9950322a | 1204 | AddressSpaceDispatch *d = flatview_to_dispatch(fv); |
0f0cb164 | 1205 | subpage_t *subpage; |
a8170e5e | 1206 | hwaddr base = section->offset_within_address_space |
0f0cb164 | 1207 | & TARGET_PAGE_MASK; |
003a0cf2 | 1208 | MemoryRegionSection *existing = phys_page_find(d, base); |
0f0cb164 AK |
1209 | MemoryRegionSection subsection = { |
1210 | .offset_within_address_space = base, | |
052e87b0 | 1211 | .size = int128_make64(TARGET_PAGE_SIZE), |
0f0cb164 | 1212 | }; |
a8170e5e | 1213 | hwaddr start, end; |
0f0cb164 | 1214 | |
f3705d53 | 1215 | assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); |
0f0cb164 | 1216 | |
f3705d53 | 1217 | if (!(existing->mr->subpage)) { |
16620684 AK |
1218 | subpage = subpage_init(fv, base); |
1219 | subsection.fv = fv; | |
0f0cb164 | 1220 | subsection.mr = &subpage->iomem; |
ac1970fb | 1221 | phys_page_set(d, base >> TARGET_PAGE_BITS, 1, |
53cb28cb | 1222 | phys_section_add(&d->map, &subsection)); |
0f0cb164 | 1223 | } else { |
f3705d53 | 1224 | subpage = container_of(existing->mr, subpage_t, iomem); |
0f0cb164 AK |
1225 | } |
1226 | start = section->offset_within_address_space & ~TARGET_PAGE_MASK; | |
052e87b0 | 1227 | end = start + int128_get64(section->size) - 1; |
53cb28cb MA |
1228 | subpage_register(subpage, start, end, |
1229 | phys_section_add(&d->map, section)); | |
0f0cb164 AK |
1230 | } |
1231 | ||
1232 | ||
9950322a | 1233 | static void register_multipage(FlatView *fv, |
052e87b0 | 1234 | MemoryRegionSection *section) |
33417e70 | 1235 | { |
9950322a | 1236 | AddressSpaceDispatch *d = flatview_to_dispatch(fv); |
a8170e5e | 1237 | hwaddr start_addr = section->offset_within_address_space; |
53cb28cb | 1238 | uint16_t section_index = phys_section_add(&d->map, section); |
052e87b0 PB |
1239 | uint64_t num_pages = int128_get64(int128_rshift(section->size, |
1240 | TARGET_PAGE_BITS)); | |
dd81124b | 1241 | |
733d5ef5 PB |
1242 | assert(num_pages); |
1243 | phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); | |
33417e70 FB |
1244 | } |
1245 | ||
494d1997 WY |
1246 | /* |
1247 | * The range in *section* may look like this: | |
1248 | * | |
1249 | * |s|PPPPPPP|s| | |
1250 | * | |
1251 | * where s stands for subpage and P for page. | |
1252 | */ | |
8629d3fc | 1253 | void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section) |
0f0cb164 | 1254 | { |
494d1997 | 1255 | MemoryRegionSection remain = *section; |
052e87b0 | 1256 | Int128 page_size = int128_make64(TARGET_PAGE_SIZE); |
0f0cb164 | 1257 | |
494d1997 WY |
1258 | /* register first subpage */ |
1259 | if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { | |
1260 | uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space) | |
1261 | - remain.offset_within_address_space; | |
733d5ef5 | 1262 | |
494d1997 | 1263 | MemoryRegionSection now = remain; |
052e87b0 | 1264 | now.size = int128_min(int128_make64(left), now.size); |
9950322a | 1265 | register_subpage(fv, &now); |
494d1997 WY |
1266 | if (int128_eq(remain.size, now.size)) { |
1267 | return; | |
1268 | } | |
052e87b0 PB |
1269 | remain.size = int128_sub(remain.size, now.size); |
1270 | remain.offset_within_address_space += int128_get64(now.size); | |
1271 | remain.offset_within_region += int128_get64(now.size); | |
494d1997 WY |
1272 | } |
1273 | ||
1274 | /* register whole pages */ | |
1275 | if (int128_ge(remain.size, page_size)) { | |
1276 | MemoryRegionSection now = remain; | |
1277 | now.size = int128_and(now.size, int128_neg(page_size)); | |
1278 | register_multipage(fv, &now); | |
1279 | if (int128_eq(remain.size, now.size)) { | |
1280 | return; | |
69b67646 | 1281 | } |
494d1997 WY |
1282 | remain.size = int128_sub(remain.size, now.size); |
1283 | remain.offset_within_address_space += int128_get64(now.size); | |
1284 | remain.offset_within_region += int128_get64(now.size); | |
0f0cb164 | 1285 | } |
494d1997 WY |
1286 | |
1287 | /* register last subpage */ | |
1288 | register_subpage(fv, &remain); | |
0f0cb164 AK |
1289 | } |
1290 | ||
62a2744c SY |
1291 | void qemu_flush_coalesced_mmio_buffer(void) |
1292 | { | |
1293 | if (kvm_enabled()) | |
1294 | kvm_flush_coalesced_mmio_buffer(); | |
1295 | } | |
1296 | ||
b2a8658e UD |
1297 | void qemu_mutex_lock_ramlist(void) |
1298 | { | |
1299 | qemu_mutex_lock(&ram_list.mutex); | |
1300 | } | |
1301 | ||
1302 | void qemu_mutex_unlock_ramlist(void) | |
1303 | { | |
1304 | qemu_mutex_unlock(&ram_list.mutex); | |
1305 | } | |
1306 | ||
be9b23c4 PX |
1307 | void ram_block_dump(Monitor *mon) |
1308 | { | |
1309 | RAMBlock *block; | |
1310 | char *psize; | |
1311 | ||
694ea274 | 1312 | RCU_READ_LOCK_GUARD(); |
be9b23c4 PX |
1313 | monitor_printf(mon, "%24s %8s %18s %18s %18s\n", |
1314 | "Block Name", "PSize", "Offset", "Used", "Total"); | |
1315 | RAMBLOCK_FOREACH(block) { | |
1316 | psize = size_to_str(block->page_size); | |
1317 | monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 | |
1318 | " 0x%016" PRIx64 "\n", block->idstr, psize, | |
1319 | (uint64_t)block->offset, | |
1320 | (uint64_t)block->used_length, | |
1321 | (uint64_t)block->max_length); | |
1322 | g_free(psize); | |
1323 | } | |
be9b23c4 PX |
1324 | } |
1325 | ||
9c607668 AK |
1326 | #ifdef __linux__ |
1327 | /* | |
1328 | * FIXME TOCTTOU: this iterates over memory backends' mem-path, which | |
1329 | * may or may not name the same files / on the same filesystem now as | |
1330 | * when we actually open and map them. Iterate over the file | |
1331 | * descriptors instead, and use qemu_fd_getpagesize(). | |
1332 | */ | |
905b7ee4 | 1333 | static int find_min_backend_pagesize(Object *obj, void *opaque) |
9c607668 | 1334 | { |
9c607668 AK |
1335 | long *hpsize_min = opaque; |
1336 | ||
1337 | if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { | |
7d5489e6 DG |
1338 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); |
1339 | long hpsize = host_memory_backend_pagesize(backend); | |
2b108085 | 1340 | |
7d5489e6 | 1341 | if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) { |
0de6e2a3 | 1342 | *hpsize_min = hpsize; |
9c607668 AK |
1343 | } |
1344 | } | |
1345 | ||
1346 | return 0; | |
1347 | } | |
1348 | ||
905b7ee4 DH |
1349 | static int find_max_backend_pagesize(Object *obj, void *opaque) |
1350 | { | |
1351 | long *hpsize_max = opaque; | |
1352 | ||
1353 | if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { | |
1354 | HostMemoryBackend *backend = MEMORY_BACKEND(obj); | |
1355 | long hpsize = host_memory_backend_pagesize(backend); | |
1356 | ||
1357 | if (host_memory_backend_is_mapped(backend) && (hpsize > *hpsize_max)) { | |
1358 | *hpsize_max = hpsize; | |
1359 | } | |
1360 | } | |
1361 | ||
1362 | return 0; | |
1363 | } | |
1364 | ||
1365 | /* | |
1366 | * TODO: We assume right now that all mapped host memory backends are | |
1367 | * used as RAM, however some might be used for different purposes. | |
1368 | */ | |
1369 | long qemu_minrampagesize(void) | |
9c607668 AK |
1370 | { |
1371 | long hpsize = LONG_MAX; | |
ad1172d8 | 1372 | Object *memdev_root = object_resolve_path("/objects", NULL); |
9c607668 | 1373 | |
ad1172d8 | 1374 | object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); |
9c607668 AK |
1375 | return hpsize; |
1376 | } | |
905b7ee4 DH |
1377 | |
1378 | long qemu_maxrampagesize(void) | |
1379 | { | |
ad1172d8 | 1380 | long pagesize = 0; |
905b7ee4 DH |
1381 | Object *memdev_root = object_resolve_path("/objects", NULL); |
1382 | ||
ad1172d8 | 1383 | object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize); |
905b7ee4 DH |
1384 | return pagesize; |
1385 | } | |
9c607668 | 1386 | #else |
905b7ee4 DH |
1387 | long qemu_minrampagesize(void) |
1388 | { | |
038adc2f | 1389 | return qemu_real_host_page_size; |
905b7ee4 DH |
1390 | } |
1391 | long qemu_maxrampagesize(void) | |
9c607668 | 1392 | { |
038adc2f | 1393 | return qemu_real_host_page_size; |
9c607668 AK |
1394 | } |
1395 | #endif | |
1396 | ||
d5dbde46 | 1397 | #ifdef CONFIG_POSIX |
d6af99c9 HZ |
1398 | static int64_t get_file_size(int fd) |
1399 | { | |
72d41eb4 SH |
1400 | int64_t size; |
1401 | #if defined(__linux__) | |
1402 | struct stat st; | |
1403 | ||
1404 | if (fstat(fd, &st) < 0) { | |
1405 | return -errno; | |
1406 | } | |
1407 | ||
1408 | /* Special handling for devdax character devices */ | |
1409 | if (S_ISCHR(st.st_mode)) { | |
1410 | g_autofree char *subsystem_path = NULL; | |
1411 | g_autofree char *subsystem = NULL; | |
1412 | ||
1413 | subsystem_path = g_strdup_printf("/sys/dev/char/%d:%d/subsystem", | |
1414 | major(st.st_rdev), minor(st.st_rdev)); | |
1415 | subsystem = g_file_read_link(subsystem_path, NULL); | |
1416 | ||
1417 | if (subsystem && g_str_has_suffix(subsystem, "/dax")) { | |
1418 | g_autofree char *size_path = NULL; | |
1419 | g_autofree char *size_str = NULL; | |
1420 | ||
1421 | size_path = g_strdup_printf("/sys/dev/char/%d:%d/size", | |
1422 | major(st.st_rdev), minor(st.st_rdev)); | |
1423 | ||
1424 | if (g_file_get_contents(size_path, &size_str, NULL, NULL)) { | |
1425 | return g_ascii_strtoll(size_str, NULL, 0); | |
1426 | } | |
1427 | } | |
1428 | } | |
1429 | #endif /* defined(__linux__) */ | |
1430 | ||
1431 | /* st.st_size may be zero for special files yet lseek(2) works */ | |
1432 | size = lseek(fd, 0, SEEK_END); | |
d6af99c9 HZ |
1433 | if (size < 0) { |
1434 | return -errno; | |
1435 | } | |
1436 | return size; | |
1437 | } | |
1438 | ||
ce317be9 JL |
1439 | static int64_t get_file_align(int fd) |
1440 | { | |
1441 | int64_t align = -1; | |
1442 | #if defined(__linux__) && defined(CONFIG_LIBDAXCTL) | |
1443 | struct stat st; | |
1444 | ||
1445 | if (fstat(fd, &st) < 0) { | |
1446 | return -errno; | |
1447 | } | |
1448 | ||
1449 | /* Special handling for devdax character devices */ | |
1450 | if (S_ISCHR(st.st_mode)) { | |
1451 | g_autofree char *path = NULL; | |
1452 | g_autofree char *rpath = NULL; | |
1453 | struct daxctl_ctx *ctx; | |
1454 | struct daxctl_region *region; | |
1455 | int rc = 0; | |
1456 | ||
1457 | path = g_strdup_printf("/sys/dev/char/%d:%d", | |
1458 | major(st.st_rdev), minor(st.st_rdev)); | |
1459 | rpath = realpath(path, NULL); | |
1460 | ||
1461 | rc = daxctl_new(&ctx); | |
1462 | if (rc) { | |
1463 | return -1; | |
1464 | } | |
1465 | ||
1466 | daxctl_region_foreach(ctx, region) { | |
1467 | if (strstr(rpath, daxctl_region_get_path(region))) { | |
1468 | align = daxctl_region_get_align(region); | |
1469 | break; | |
1470 | } | |
1471 | } | |
1472 | daxctl_unref(ctx); | |
1473 | } | |
1474 | #endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */ | |
1475 | ||
1476 | return align; | |
1477 | } | |
1478 | ||
8d37b030 MAL |
1479 | static int file_ram_open(const char *path, |
1480 | const char *region_name, | |
369d6dc4 | 1481 | bool readonly, |
8d37b030 MAL |
1482 | bool *created, |
1483 | Error **errp) | |
c902760f MT |
1484 | { |
1485 | char *filename; | |
8ca761f6 PF |
1486 | char *sanitized_name; |
1487 | char *c; | |
5c3ece79 | 1488 | int fd = -1; |
c902760f | 1489 | |
8d37b030 | 1490 | *created = false; |
fd97fd44 | 1491 | for (;;) { |
369d6dc4 | 1492 | fd = open(path, readonly ? O_RDONLY : O_RDWR); |
fd97fd44 MA |
1493 | if (fd >= 0) { |
1494 | /* @path names an existing file, use it */ | |
1495 | break; | |
8d31d6b6 | 1496 | } |
fd97fd44 MA |
1497 | if (errno == ENOENT) { |
1498 | /* @path names a file that doesn't exist, create it */ | |
1499 | fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); | |
1500 | if (fd >= 0) { | |
8d37b030 | 1501 | *created = true; |
fd97fd44 MA |
1502 | break; |
1503 | } | |
1504 | } else if (errno == EISDIR) { | |
1505 | /* @path names a directory, create a file there */ | |
1506 | /* Make name safe to use with mkstemp by replacing '/' with '_'. */ | |
8d37b030 | 1507 | sanitized_name = g_strdup(region_name); |
fd97fd44 MA |
1508 | for (c = sanitized_name; *c != '\0'; c++) { |
1509 | if (*c == '/') { | |
1510 | *c = '_'; | |
1511 | } | |
1512 | } | |
8ca761f6 | 1513 | |
fd97fd44 MA |
1514 | filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, |
1515 | sanitized_name); | |
1516 | g_free(sanitized_name); | |
8d31d6b6 | 1517 | |
fd97fd44 MA |
1518 | fd = mkstemp(filename); |
1519 | if (fd >= 0) { | |
1520 | unlink(filename); | |
1521 | g_free(filename); | |
1522 | break; | |
1523 | } | |
1524 | g_free(filename); | |
8d31d6b6 | 1525 | } |
fd97fd44 MA |
1526 | if (errno != EEXIST && errno != EINTR) { |
1527 | error_setg_errno(errp, errno, | |
1528 | "can't open backing store %s for guest RAM", | |
1529 | path); | |
8d37b030 | 1530 | return -1; |
fd97fd44 MA |
1531 | } |
1532 | /* | |
1533 | * Try again on EINTR and EEXIST. The latter happens when | |
1534 | * something else creates the file between our two open(). | |
1535 | */ | |
8d31d6b6 | 1536 | } |
c902760f | 1537 | |
8d37b030 MAL |
1538 | return fd; |
1539 | } | |
1540 | ||
1541 | static void *file_ram_alloc(RAMBlock *block, | |
1542 | ram_addr_t memory, | |
1543 | int fd, | |
369d6dc4 | 1544 | bool readonly, |
8d37b030 MAL |
1545 | bool truncate, |
1546 | Error **errp) | |
1547 | { | |
1548 | void *area; | |
1549 | ||
863e9621 | 1550 | block->page_size = qemu_fd_getpagesize(fd); |
98376843 HZ |
1551 | if (block->mr->align % block->page_size) { |
1552 | error_setg(errp, "alignment 0x%" PRIx64 | |
1553 | " must be multiples of page size 0x%zx", | |
1554 | block->mr->align, block->page_size); | |
1555 | return NULL; | |
61362b71 DH |
1556 | } else if (block->mr->align && !is_power_of_2(block->mr->align)) { |
1557 | error_setg(errp, "alignment 0x%" PRIx64 | |
1558 | " must be a power of two", block->mr->align); | |
1559 | return NULL; | |
98376843 HZ |
1560 | } |
1561 | block->mr->align = MAX(block->page_size, block->mr->align); | |
8360668e HZ |
1562 | #if defined(__s390x__) |
1563 | if (kvm_enabled()) { | |
1564 | block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); | |
1565 | } | |
1566 | #endif | |
fd97fd44 | 1567 | |
863e9621 | 1568 | if (memory < block->page_size) { |
fd97fd44 | 1569 | error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " |
863e9621 DDAG |
1570 | "or larger than page size 0x%zx", |
1571 | memory, block->page_size); | |
8d37b030 | 1572 | return NULL; |
1775f111 HZ |
1573 | } |
1574 | ||
863e9621 | 1575 | memory = ROUND_UP(memory, block->page_size); |
c902760f MT |
1576 | |
1577 | /* | |
1578 | * ftruncate is not supported by hugetlbfs in older | |
1579 | * hosts, so don't bother bailing out on errors. | |
1580 | * If anything goes wrong with it under other filesystems, | |
1581 | * mmap will fail. | |
d6af99c9 HZ |
1582 | * |
1583 | * Do not truncate the non-empty backend file to avoid corrupting | |
1584 | * the existing data in the file. Disabling shrinking is not | |
1585 | * enough. For example, the current vNVDIMM implementation stores | |
1586 | * the guest NVDIMM labels at the end of the backend file. If the | |
1587 | * backend file is later extended, QEMU will not be able to find | |
1588 | * those labels. Therefore, extending the non-empty backend file | |
1589 | * is disabled as well. | |
c902760f | 1590 | */ |
8d37b030 | 1591 | if (truncate && ftruncate(fd, memory)) { |
9742bf26 | 1592 | perror("ftruncate"); |
7f56e740 | 1593 | } |
c902760f | 1594 | |
369d6dc4 | 1595 | area = qemu_ram_mmap(fd, memory, block->mr->align, readonly, |
2ac0f162 | 1596 | block->flags & RAM_SHARED, block->flags & RAM_PMEM); |
c902760f | 1597 | if (area == MAP_FAILED) { |
7f56e740 | 1598 | error_setg_errno(errp, errno, |
fd97fd44 | 1599 | "unable to map backing store for guest RAM"); |
8d37b030 | 1600 | return NULL; |
c902760f | 1601 | } |
ef36fa14 | 1602 | |
04b16653 | 1603 | block->fd = fd; |
c902760f MT |
1604 | return area; |
1605 | } | |
1606 | #endif | |
1607 | ||
154cc9ea DDAG |
1608 | /* Allocate space within the ram_addr_t space that governs the |
1609 | * dirty bitmaps. | |
1610 | * Called with the ramlist lock held. | |
1611 | */ | |
d17b5288 | 1612 | static ram_addr_t find_ram_offset(ram_addr_t size) |
04b16653 AW |
1613 | { |
1614 | RAMBlock *block, *next_block; | |
3e837b2c | 1615 | ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; |
04b16653 | 1616 | |
49cd9ac6 SH |
1617 | assert(size != 0); /* it would hand out same offset multiple times */ |
1618 | ||
0dc3f44a | 1619 | if (QLIST_EMPTY_RCU(&ram_list.blocks)) { |
04b16653 | 1620 | return 0; |
0d53d9fe | 1621 | } |
04b16653 | 1622 | |
99e15582 | 1623 | RAMBLOCK_FOREACH(block) { |
154cc9ea | 1624 | ram_addr_t candidate, next = RAM_ADDR_MAX; |
04b16653 | 1625 | |
801110ab DDAG |
1626 | /* Align blocks to start on a 'long' in the bitmap |
1627 | * which makes the bitmap sync'ing take the fast path. | |
1628 | */ | |
154cc9ea | 1629 | candidate = block->offset + block->max_length; |
801110ab | 1630 | candidate = ROUND_UP(candidate, BITS_PER_LONG << TARGET_PAGE_BITS); |
04b16653 | 1631 | |
154cc9ea DDAG |
1632 | /* Search for the closest following block |
1633 | * and find the gap. | |
1634 | */ | |
99e15582 | 1635 | RAMBLOCK_FOREACH(next_block) { |
154cc9ea | 1636 | if (next_block->offset >= candidate) { |
04b16653 AW |
1637 | next = MIN(next, next_block->offset); |
1638 | } | |
1639 | } | |
154cc9ea DDAG |
1640 | |
1641 | /* If it fits remember our place and remember the size | |
1642 | * of gap, but keep going so that we might find a smaller | |
1643 | * gap to fill so avoiding fragmentation. | |
1644 | */ | |
1645 | if (next - candidate >= size && next - candidate < mingap) { | |
1646 | offset = candidate; | |
1647 | mingap = next - candidate; | |
04b16653 | 1648 | } |
154cc9ea DDAG |
1649 | |
1650 | trace_find_ram_offset_loop(size, candidate, offset, next, mingap); | |
04b16653 | 1651 | } |
3e837b2c AW |
1652 | |
1653 | if (offset == RAM_ADDR_MAX) { | |
1654 | fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", | |
1655 | (uint64_t)size); | |
1656 | abort(); | |
1657 | } | |
1658 | ||
154cc9ea DDAG |
1659 | trace_find_ram_offset(size, offset); |
1660 | ||
04b16653 AW |
1661 | return offset; |
1662 | } | |
1663 | ||
c136180c | 1664 | static unsigned long last_ram_page(void) |
d17b5288 AW |
1665 | { |
1666 | RAMBlock *block; | |
1667 | ram_addr_t last = 0; | |
1668 | ||
694ea274 | 1669 | RCU_READ_LOCK_GUARD(); |
99e15582 | 1670 | RAMBLOCK_FOREACH(block) { |
62be4e3a | 1671 | last = MAX(last, block->offset + block->max_length); |
0d53d9fe | 1672 | } |
b8c48993 | 1673 | return last >> TARGET_PAGE_BITS; |
d17b5288 AW |
1674 | } |
1675 | ||
ddb97f1d JB |
1676 | static void qemu_ram_setup_dump(void *addr, ram_addr_t size) |
1677 | { | |
1678 | int ret; | |
ddb97f1d JB |
1679 | |
1680 | /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ | |
47c8ca53 | 1681 | if (!machine_dump_guest_core(current_machine)) { |
ddb97f1d JB |
1682 | ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); |
1683 | if (ret) { | |
1684 | perror("qemu_madvise"); | |
1685 | fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " | |
1686 | "but dump_guest_core=off specified\n"); | |
1687 | } | |
1688 | } | |
1689 | } | |
1690 | ||
422148d3 DDAG |
1691 | const char *qemu_ram_get_idstr(RAMBlock *rb) |
1692 | { | |
1693 | return rb->idstr; | |
1694 | } | |
1695 | ||
754cb9c0 YK |
1696 | void *qemu_ram_get_host_addr(RAMBlock *rb) |
1697 | { | |
1698 | return rb->host; | |
1699 | } | |
1700 | ||
1701 | ram_addr_t qemu_ram_get_offset(RAMBlock *rb) | |
1702 | { | |
1703 | return rb->offset; | |
1704 | } | |
1705 | ||
1706 | ram_addr_t qemu_ram_get_used_length(RAMBlock *rb) | |
1707 | { | |
1708 | return rb->used_length; | |
1709 | } | |
1710 | ||
463a4ac2 DDAG |
1711 | bool qemu_ram_is_shared(RAMBlock *rb) |
1712 | { | |
1713 | return rb->flags & RAM_SHARED; | |
1714 | } | |
1715 | ||
2ce16640 DDAG |
1716 | /* Note: Only set at the start of postcopy */ |
1717 | bool qemu_ram_is_uf_zeroable(RAMBlock *rb) | |
1718 | { | |
1719 | return rb->flags & RAM_UF_ZEROPAGE; | |
1720 | } | |
1721 | ||
1722 | void qemu_ram_set_uf_zeroable(RAMBlock *rb) | |
1723 | { | |
1724 | rb->flags |= RAM_UF_ZEROPAGE; | |
1725 | } | |
1726 | ||
b895de50 CLG |
1727 | bool qemu_ram_is_migratable(RAMBlock *rb) |
1728 | { | |
1729 | return rb->flags & RAM_MIGRATABLE; | |
1730 | } | |
1731 | ||
1732 | void qemu_ram_set_migratable(RAMBlock *rb) | |
1733 | { | |
1734 | rb->flags |= RAM_MIGRATABLE; | |
1735 | } | |
1736 | ||
1737 | void qemu_ram_unset_migratable(RAMBlock *rb) | |
1738 | { | |
1739 | rb->flags &= ~RAM_MIGRATABLE; | |
1740 | } | |
1741 | ||
ae3a7047 | 1742 | /* Called with iothread lock held. */ |
fa53a0e5 | 1743 | void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) |
20cfe881 | 1744 | { |
fa53a0e5 | 1745 | RAMBlock *block; |
20cfe881 | 1746 | |
c5705a77 AK |
1747 | assert(new_block); |
1748 | assert(!new_block->idstr[0]); | |
84b89d78 | 1749 | |
09e5ab63 AL |
1750 | if (dev) { |
1751 | char *id = qdev_get_dev_path(dev); | |
84b89d78 CM |
1752 | if (id) { |
1753 | snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); | |
7267c094 | 1754 | g_free(id); |
84b89d78 CM |
1755 | } |
1756 | } | |
1757 | pstrcat(new_block->idstr, sizeof(new_block->idstr), name); | |
1758 | ||
694ea274 | 1759 | RCU_READ_LOCK_GUARD(); |
99e15582 | 1760 | RAMBLOCK_FOREACH(block) { |
fa53a0e5 GA |
1761 | if (block != new_block && |
1762 | !strcmp(block->idstr, new_block->idstr)) { | |
84b89d78 CM |
1763 | fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", |
1764 | new_block->idstr); | |
1765 | abort(); | |
1766 | } | |
1767 | } | |
c5705a77 AK |
1768 | } |
1769 | ||
ae3a7047 | 1770 | /* Called with iothread lock held. */ |
fa53a0e5 | 1771 | void qemu_ram_unset_idstr(RAMBlock *block) |
20cfe881 | 1772 | { |
ae3a7047 MD |
1773 | /* FIXME: arch_init.c assumes that this is not called throughout |
1774 | * migration. Ignore the problem since hot-unplug during migration | |
1775 | * does not work anyway. | |
1776 | */ | |
20cfe881 HT |
1777 | if (block) { |
1778 | memset(block->idstr, 0, sizeof(block->idstr)); | |
1779 | } | |
1780 | } | |
1781 | ||
863e9621 DDAG |
1782 | size_t qemu_ram_pagesize(RAMBlock *rb) |
1783 | { | |
1784 | return rb->page_size; | |
1785 | } | |
1786 | ||
67f11b5c DDAG |
1787 | /* Returns the largest size of page in use */ |
1788 | size_t qemu_ram_pagesize_largest(void) | |
1789 | { | |
1790 | RAMBlock *block; | |
1791 | size_t largest = 0; | |
1792 | ||
99e15582 | 1793 | RAMBLOCK_FOREACH(block) { |
67f11b5c DDAG |
1794 | largest = MAX(largest, qemu_ram_pagesize(block)); |
1795 | } | |
1796 | ||
1797 | return largest; | |
1798 | } | |
1799 | ||
8490fc78 LC |
1800 | static int memory_try_enable_merging(void *addr, size_t len) |
1801 | { | |
75cc7f01 | 1802 | if (!machine_mem_merge(current_machine)) { |
8490fc78 LC |
1803 | /* disabled by the user */ |
1804 | return 0; | |
1805 | } | |
1806 | ||
1807 | return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); | |
1808 | } | |
1809 | ||
62be4e3a MT |
1810 | /* Only legal before guest might have detected the memory size: e.g. on |
1811 | * incoming migration, or right after reset. | |
1812 | * | |
1813 | * As memory core doesn't know how is memory accessed, it is up to | |
1814 | * resize callback to update device state and/or add assertions to detect | |
1815 | * misuse, if necessary. | |
1816 | */ | |
fa53a0e5 | 1817 | int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) |
62be4e3a | 1818 | { |
ce4adc0b DH |
1819 | const ram_addr_t unaligned_size = newsize; |
1820 | ||
62be4e3a MT |
1821 | assert(block); |
1822 | ||
4ed023ce | 1823 | newsize = HOST_PAGE_ALIGN(newsize); |
129ddaf3 | 1824 | |
62be4e3a | 1825 | if (block->used_length == newsize) { |
ce4adc0b DH |
1826 | /* |
1827 | * We don't have to resize the ram block (which only knows aligned | |
1828 | * sizes), however, we have to notify if the unaligned size changed. | |
1829 | */ | |
1830 | if (unaligned_size != memory_region_size(block->mr)) { | |
1831 | memory_region_set_size(block->mr, unaligned_size); | |
1832 | if (block->resized) { | |
1833 | block->resized(block->idstr, unaligned_size, block->host); | |
1834 | } | |
1835 | } | |
62be4e3a MT |
1836 | return 0; |
1837 | } | |
1838 | ||
1839 | if (!(block->flags & RAM_RESIZEABLE)) { | |
1840 | error_setg_errno(errp, EINVAL, | |
a3a92908 PG |
1841 | "Size mismatch: %s: 0x" RAM_ADDR_FMT |
1842 | " != 0x" RAM_ADDR_FMT, block->idstr, | |
62be4e3a MT |
1843 | newsize, block->used_length); |
1844 | return -EINVAL; | |
1845 | } | |
1846 | ||
1847 | if (block->max_length < newsize) { | |
1848 | error_setg_errno(errp, EINVAL, | |
a3a92908 | 1849 | "Size too large: %s: 0x" RAM_ADDR_FMT |
62be4e3a MT |
1850 | " > 0x" RAM_ADDR_FMT, block->idstr, |
1851 | newsize, block->max_length); | |
1852 | return -EINVAL; | |
1853 | } | |
1854 | ||
1855 | cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); | |
1856 | block->used_length = newsize; | |
58d2707e PB |
1857 | cpu_physical_memory_set_dirty_range(block->offset, block->used_length, |
1858 | DIRTY_CLIENTS_ALL); | |
ce4adc0b | 1859 | memory_region_set_size(block->mr, unaligned_size); |
62be4e3a | 1860 | if (block->resized) { |
ce4adc0b | 1861 | block->resized(block->idstr, unaligned_size, block->host); |
62be4e3a MT |
1862 | } |
1863 | return 0; | |
1864 | } | |
1865 | ||
61c490e2 BM |
1866 | /* |
1867 | * Trigger sync on the given ram block for range [start, start + length] | |
1868 | * with the backing store if one is available. | |
1869 | * Otherwise no-op. | |
1870 | * @Note: this is supposed to be a synchronous op. | |
1871 | */ | |
ab7e41e6 | 1872 | void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) |
61c490e2 | 1873 | { |
61c490e2 BM |
1874 | /* The requested range should fit in within the block range */ |
1875 | g_assert((start + length) <= block->used_length); | |
1876 | ||
1877 | #ifdef CONFIG_LIBPMEM | |
1878 | /* The lack of support for pmem should not block the sync */ | |
1879 | if (ramblock_is_pmem(block)) { | |
5d4c9549 | 1880 | void *addr = ramblock_ptr(block, start); |
61c490e2 BM |
1881 | pmem_persist(addr, length); |
1882 | return; | |
1883 | } | |
1884 | #endif | |
1885 | if (block->fd >= 0) { | |
1886 | /** | |
1887 | * Case there is no support for PMEM or the memory has not been | |
1888 | * specified as persistent (or is not one) - use the msync. | |
1889 | * Less optimal but still achieves the same goal | |
1890 | */ | |
5d4c9549 | 1891 | void *addr = ramblock_ptr(block, start); |
61c490e2 BM |
1892 | if (qemu_msync(addr, length, block->fd)) { |
1893 | warn_report("%s: failed to sync memory range: start: " | |
1894 | RAM_ADDR_FMT " length: " RAM_ADDR_FMT, | |
1895 | __func__, start, length); | |
1896 | } | |
1897 | } | |
1898 | } | |
1899 | ||
5b82b703 SH |
1900 | /* Called with ram_list.mutex held */ |
1901 | static void dirty_memory_extend(ram_addr_t old_ram_size, | |
1902 | ram_addr_t new_ram_size) | |
1903 | { | |
1904 | ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size, | |
1905 | DIRTY_MEMORY_BLOCK_SIZE); | |
1906 | ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size, | |
1907 | DIRTY_MEMORY_BLOCK_SIZE); | |
1908 | int i; | |
1909 | ||
1910 | /* Only need to extend if block count increased */ | |
1911 | if (new_num_blocks <= old_num_blocks) { | |
1912 | return; | |
1913 | } | |
1914 | ||
1915 | for (i = 0; i < DIRTY_MEMORY_NUM; i++) { | |
1916 | DirtyMemoryBlocks *old_blocks; | |
1917 | DirtyMemoryBlocks *new_blocks; | |
1918 | int j; | |
1919 | ||
d73415a3 | 1920 | old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]); |
5b82b703 SH |
1921 | new_blocks = g_malloc(sizeof(*new_blocks) + |
1922 | sizeof(new_blocks->blocks[0]) * new_num_blocks); | |
1923 | ||
1924 | if (old_num_blocks) { | |
1925 | memcpy(new_blocks->blocks, old_blocks->blocks, | |
1926 | old_num_blocks * sizeof(old_blocks->blocks[0])); | |
1927 | } | |
1928 | ||
1929 | for (j = old_num_blocks; j < new_num_blocks; j++) { | |
1930 | new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); | |
1931 | } | |
1932 | ||
d73415a3 | 1933 | qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); |
5b82b703 SH |
1934 | |
1935 | if (old_blocks) { | |
1936 | g_free_rcu(old_blocks, rcu); | |
1937 | } | |
1938 | } | |
1939 | } | |
1940 | ||
06329cce | 1941 | static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared) |
c5705a77 | 1942 | { |
e1c57ab8 | 1943 | RAMBlock *block; |
0d53d9fe | 1944 | RAMBlock *last_block = NULL; |
2152f5ca | 1945 | ram_addr_t old_ram_size, new_ram_size; |
37aa7a0e | 1946 | Error *err = NULL; |
2152f5ca | 1947 | |
b8c48993 | 1948 | old_ram_size = last_ram_page(); |
c5705a77 | 1949 | |
b2a8658e | 1950 | qemu_mutex_lock_ramlist(); |
9b8424d5 | 1951 | new_block->offset = find_ram_offset(new_block->max_length); |
e1c57ab8 PB |
1952 | |
1953 | if (!new_block->host) { | |
1954 | if (xen_enabled()) { | |
9b8424d5 | 1955 | xen_ram_alloc(new_block->offset, new_block->max_length, |
37aa7a0e MA |
1956 | new_block->mr, &err); |
1957 | if (err) { | |
1958 | error_propagate(errp, err); | |
1959 | qemu_mutex_unlock_ramlist(); | |
39c350ee | 1960 | return; |
37aa7a0e | 1961 | } |
e1c57ab8 | 1962 | } else { |
9b8424d5 | 1963 | new_block->host = phys_mem_alloc(new_block->max_length, |
06329cce | 1964 | &new_block->mr->align, shared); |
39228250 | 1965 | if (!new_block->host) { |
ef701d7b HT |
1966 | error_setg_errno(errp, errno, |
1967 | "cannot set up guest memory '%s'", | |
1968 | memory_region_name(new_block->mr)); | |
1969 | qemu_mutex_unlock_ramlist(); | |
39c350ee | 1970 | return; |
39228250 | 1971 | } |
9b8424d5 | 1972 | memory_try_enable_merging(new_block->host, new_block->max_length); |
6977dfe6 | 1973 | } |
c902760f | 1974 | } |
94a6b54f | 1975 | |
dd631697 LZ |
1976 | new_ram_size = MAX(old_ram_size, |
1977 | (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS); | |
1978 | if (new_ram_size > old_ram_size) { | |
5b82b703 | 1979 | dirty_memory_extend(old_ram_size, new_ram_size); |
dd631697 | 1980 | } |
0d53d9fe MD |
1981 | /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, |
1982 | * QLIST (which has an RCU-friendly variant) does not have insertion at | |
1983 | * tail, so save the last element in last_block. | |
1984 | */ | |
99e15582 | 1985 | RAMBLOCK_FOREACH(block) { |
0d53d9fe | 1986 | last_block = block; |
9b8424d5 | 1987 | if (block->max_length < new_block->max_length) { |
abb26d63 PB |
1988 | break; |
1989 | } | |
1990 | } | |
1991 | if (block) { | |
0dc3f44a | 1992 | QLIST_INSERT_BEFORE_RCU(block, new_block, next); |
0d53d9fe | 1993 | } else if (last_block) { |
0dc3f44a | 1994 | QLIST_INSERT_AFTER_RCU(last_block, new_block, next); |
0d53d9fe | 1995 | } else { /* list is empty */ |
0dc3f44a | 1996 | QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); |
abb26d63 | 1997 | } |
0d6d3c87 | 1998 | ram_list.mru_block = NULL; |
94a6b54f | 1999 | |
0dc3f44a MD |
2000 | /* Write list before version */ |
2001 | smp_wmb(); | |
f798b07f | 2002 | ram_list.version++; |
b2a8658e | 2003 | qemu_mutex_unlock_ramlist(); |
f798b07f | 2004 | |
9b8424d5 | 2005 | cpu_physical_memory_set_dirty_range(new_block->offset, |
58d2707e PB |
2006 | new_block->used_length, |
2007 | DIRTY_CLIENTS_ALL); | |
94a6b54f | 2008 | |
a904c911 PB |
2009 | if (new_block->host) { |
2010 | qemu_ram_setup_dump(new_block->host, new_block->max_length); | |
2011 | qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); | |
a028edea AB |
2012 | /* |
2013 | * MADV_DONTFORK is also needed by KVM in absence of synchronous MMU | |
2014 | * Configure it unless the machine is a qtest server, in which case | |
2015 | * KVM is not used and it may be forked (eg for fuzzing purposes). | |
2016 | */ | |
2017 | if (!qtest_enabled()) { | |
2018 | qemu_madvise(new_block->host, new_block->max_length, | |
2019 | QEMU_MADV_DONTFORK); | |
2020 | } | |
0987d735 | 2021 | ram_block_notify_add(new_block->host, new_block->max_length); |
e1c57ab8 | 2022 | } |
94a6b54f | 2023 | } |
e9a1ab19 | 2024 | |
d5dbde46 | 2025 | #ifdef CONFIG_POSIX |
38b3362d | 2026 | RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, |
369d6dc4 | 2027 | uint32_t ram_flags, int fd, bool readonly, |
38b3362d | 2028 | Error **errp) |
e1c57ab8 PB |
2029 | { |
2030 | RAMBlock *new_block; | |
ef701d7b | 2031 | Error *local_err = NULL; |
ce317be9 | 2032 | int64_t file_size, file_align; |
e1c57ab8 | 2033 | |
a4de8552 JH |
2034 | /* Just support these ram flags by now. */ |
2035 | assert((ram_flags & ~(RAM_SHARED | RAM_PMEM)) == 0); | |
2036 | ||
e1c57ab8 | 2037 | if (xen_enabled()) { |
7f56e740 | 2038 | error_setg(errp, "-mem-path not supported with Xen"); |
528f46af | 2039 | return NULL; |
e1c57ab8 PB |
2040 | } |
2041 | ||
e45e7ae2 MAL |
2042 | if (kvm_enabled() && !kvm_has_sync_mmu()) { |
2043 | error_setg(errp, | |
2044 | "host lacks kvm mmu notifiers, -mem-path unsupported"); | |
2045 | return NULL; | |
2046 | } | |
2047 | ||
e1c57ab8 PB |
2048 | if (phys_mem_alloc != qemu_anon_ram_alloc) { |
2049 | /* | |
2050 | * file_ram_alloc() needs to allocate just like | |
2051 | * phys_mem_alloc, but we haven't bothered to provide | |
2052 | * a hook there. | |
2053 | */ | |
7f56e740 PB |
2054 | error_setg(errp, |
2055 | "-mem-path not supported with this accelerator"); | |
528f46af | 2056 | return NULL; |
e1c57ab8 PB |
2057 | } |
2058 | ||
4ed023ce | 2059 | size = HOST_PAGE_ALIGN(size); |
8d37b030 MAL |
2060 | file_size = get_file_size(fd); |
2061 | if (file_size > 0 && file_size < size) { | |
c001c3b3 | 2062 | error_setg(errp, "backing store size 0x%" PRIx64 |
8d37b030 | 2063 | " does not match 'size' option 0x" RAM_ADDR_FMT, |
c001c3b3 | 2064 | file_size, size); |
8d37b030 MAL |
2065 | return NULL; |
2066 | } | |
2067 | ||
ce317be9 JL |
2068 | file_align = get_file_align(fd); |
2069 | if (file_align > 0 && mr && file_align > mr->align) { | |
2070 | error_setg(errp, "backing store align 0x%" PRIx64 | |
5f509751 | 2071 | " is larger than 'align' option 0x%" PRIx64, |
ce317be9 JL |
2072 | file_align, mr->align); |
2073 | return NULL; | |
2074 | } | |
2075 | ||
e1c57ab8 PB |
2076 | new_block = g_malloc0(sizeof(*new_block)); |
2077 | new_block->mr = mr; | |
9b8424d5 MT |
2078 | new_block->used_length = size; |
2079 | new_block->max_length = size; | |
cbfc0171 | 2080 | new_block->flags = ram_flags; |
369d6dc4 SH |
2081 | new_block->host = file_ram_alloc(new_block, size, fd, readonly, |
2082 | !file_size, errp); | |
7f56e740 PB |
2083 | if (!new_block->host) { |
2084 | g_free(new_block); | |
528f46af | 2085 | return NULL; |
7f56e740 PB |
2086 | } |
2087 | ||
cbfc0171 | 2088 | ram_block_add(new_block, &local_err, ram_flags & RAM_SHARED); |
ef701d7b HT |
2089 | if (local_err) { |
2090 | g_free(new_block); | |
2091 | error_propagate(errp, local_err); | |
528f46af | 2092 | return NULL; |
ef701d7b | 2093 | } |
528f46af | 2094 | return new_block; |
38b3362d MAL |
2095 | |
2096 | } | |
2097 | ||
2098 | ||
2099 | RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, | |
cbfc0171 | 2100 | uint32_t ram_flags, const char *mem_path, |
369d6dc4 | 2101 | bool readonly, Error **errp) |
38b3362d MAL |
2102 | { |
2103 | int fd; | |
2104 | bool created; | |
2105 | RAMBlock *block; | |
2106 | ||
369d6dc4 SH |
2107 | fd = file_ram_open(mem_path, memory_region_name(mr), readonly, &created, |
2108 | errp); | |
38b3362d MAL |
2109 | if (fd < 0) { |
2110 | return NULL; | |
2111 | } | |
2112 | ||
369d6dc4 | 2113 | block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, readonly, errp); |
38b3362d MAL |
2114 | if (!block) { |
2115 | if (created) { | |
2116 | unlink(mem_path); | |
2117 | } | |
2118 | close(fd); | |
2119 | return NULL; | |
2120 | } | |
2121 | ||
2122 | return block; | |
e1c57ab8 | 2123 | } |
0b183fc8 | 2124 | #endif |
e1c57ab8 | 2125 | |
62be4e3a | 2126 | static |
528f46af FZ |
2127 | RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, |
2128 | void (*resized)(const char*, | |
2129 | uint64_t length, | |
2130 | void *host), | |
06329cce | 2131 | void *host, bool resizeable, bool share, |
528f46af | 2132 | MemoryRegion *mr, Error **errp) |
e1c57ab8 PB |
2133 | { |
2134 | RAMBlock *new_block; | |
ef701d7b | 2135 | Error *local_err = NULL; |
e1c57ab8 | 2136 | |
4ed023ce DDAG |
2137 | size = HOST_PAGE_ALIGN(size); |
2138 | max_size = HOST_PAGE_ALIGN(max_size); | |
e1c57ab8 PB |
2139 | new_block = g_malloc0(sizeof(*new_block)); |
2140 | new_block->mr = mr; | |
62be4e3a | 2141 | new_block->resized = resized; |
9b8424d5 MT |
2142 | new_block->used_length = size; |
2143 | new_block->max_length = max_size; | |
62be4e3a | 2144 | assert(max_size >= size); |
e1c57ab8 | 2145 | new_block->fd = -1; |
038adc2f | 2146 | new_block->page_size = qemu_real_host_page_size; |
e1c57ab8 PB |
2147 | new_block->host = host; |
2148 | if (host) { | |
7bd4f430 | 2149 | new_block->flags |= RAM_PREALLOC; |
e1c57ab8 | 2150 | } |
62be4e3a MT |
2151 | if (resizeable) { |
2152 | new_block->flags |= RAM_RESIZEABLE; | |
2153 | } | |
06329cce | 2154 | ram_block_add(new_block, &local_err, share); |
ef701d7b HT |
2155 | if (local_err) { |
2156 | g_free(new_block); | |
2157 | error_propagate(errp, local_err); | |
528f46af | 2158 | return NULL; |
ef701d7b | 2159 | } |
528f46af | 2160 | return new_block; |
e1c57ab8 PB |
2161 | } |
2162 | ||
528f46af | 2163 | RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, |
62be4e3a MT |
2164 | MemoryRegion *mr, Error **errp) |
2165 | { | |
06329cce MA |
2166 | return qemu_ram_alloc_internal(size, size, NULL, host, false, |
2167 | false, mr, errp); | |
62be4e3a MT |
2168 | } |
2169 | ||
06329cce MA |
2170 | RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, |
2171 | MemoryRegion *mr, Error **errp) | |
6977dfe6 | 2172 | { |
06329cce MA |
2173 | return qemu_ram_alloc_internal(size, size, NULL, NULL, false, |
2174 | share, mr, errp); | |
62be4e3a MT |
2175 | } |
2176 | ||
528f46af | 2177 | RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, |
62be4e3a MT |
2178 | void (*resized)(const char*, |
2179 | uint64_t length, | |
2180 | void *host), | |
2181 | MemoryRegion *mr, Error **errp) | |
2182 | { | |
06329cce MA |
2183 | return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, |
2184 | false, mr, errp); | |
6977dfe6 YT |
2185 | } |
2186 | ||
43771539 PB |
2187 | static void reclaim_ramblock(RAMBlock *block) |
2188 | { | |
2189 | if (block->flags & RAM_PREALLOC) { | |
2190 | ; | |
2191 | } else if (xen_enabled()) { | |
2192 | xen_invalidate_map_cache_entry(block->host); | |
2193 | #ifndef _WIN32 | |
2194 | } else if (block->fd >= 0) { | |
53adb9d4 | 2195 | qemu_ram_munmap(block->fd, block->host, block->max_length); |
43771539 PB |
2196 | close(block->fd); |
2197 | #endif | |
2198 | } else { | |
2199 | qemu_anon_ram_free(block->host, block->max_length); | |
2200 | } | |
2201 | g_free(block); | |
2202 | } | |
2203 | ||
f1060c55 | 2204 | void qemu_ram_free(RAMBlock *block) |
e9a1ab19 | 2205 | { |
85bc2a15 MAL |
2206 | if (!block) { |
2207 | return; | |
2208 | } | |
2209 | ||
0987d735 PB |
2210 | if (block->host) { |
2211 | ram_block_notify_remove(block->host, block->max_length); | |
2212 | } | |
2213 | ||
b2a8658e | 2214 | qemu_mutex_lock_ramlist(); |
f1060c55 FZ |
2215 | QLIST_REMOVE_RCU(block, next); |
2216 | ram_list.mru_block = NULL; | |
2217 | /* Write list before version */ | |
2218 | smp_wmb(); | |
2219 | ram_list.version++; | |
2220 | call_rcu(block, reclaim_ramblock, rcu); | |
b2a8658e | 2221 | qemu_mutex_unlock_ramlist(); |
e9a1ab19 FB |
2222 | } |
2223 | ||
cd19cfa2 HY |
2224 | #ifndef _WIN32 |
2225 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) | |
2226 | { | |
2227 | RAMBlock *block; | |
2228 | ram_addr_t offset; | |
2229 | int flags; | |
2230 | void *area, *vaddr; | |
2231 | ||
99e15582 | 2232 | RAMBLOCK_FOREACH(block) { |
cd19cfa2 | 2233 | offset = addr - block->offset; |
9b8424d5 | 2234 | if (offset < block->max_length) { |
1240be24 | 2235 | vaddr = ramblock_ptr(block, offset); |
7bd4f430 | 2236 | if (block->flags & RAM_PREALLOC) { |
cd19cfa2 | 2237 | ; |
dfeaf2ab MA |
2238 | } else if (xen_enabled()) { |
2239 | abort(); | |
cd19cfa2 HY |
2240 | } else { |
2241 | flags = MAP_FIXED; | |
3435f395 | 2242 | if (block->fd >= 0) { |
dbcb8981 PB |
2243 | flags |= (block->flags & RAM_SHARED ? |
2244 | MAP_SHARED : MAP_PRIVATE); | |
3435f395 MA |
2245 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, |
2246 | flags, block->fd, offset); | |
cd19cfa2 | 2247 | } else { |
2eb9fbaa MA |
2248 | /* |
2249 | * Remap needs to match alloc. Accelerators that | |
2250 | * set phys_mem_alloc never remap. If they did, | |
2251 | * we'd need a remap hook here. | |
2252 | */ | |
2253 | assert(phys_mem_alloc == qemu_anon_ram_alloc); | |
2254 | ||
cd19cfa2 HY |
2255 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; |
2256 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
2257 | flags, -1, 0); | |
cd19cfa2 HY |
2258 | } |
2259 | if (area != vaddr) { | |
493d89bf AF |
2260 | error_report("Could not remap addr: " |
2261 | RAM_ADDR_FMT "@" RAM_ADDR_FMT "", | |
2262 | length, addr); | |
cd19cfa2 HY |
2263 | exit(1); |
2264 | } | |
8490fc78 | 2265 | memory_try_enable_merging(vaddr, length); |
ddb97f1d | 2266 | qemu_ram_setup_dump(vaddr, length); |
cd19cfa2 | 2267 | } |
cd19cfa2 HY |
2268 | } |
2269 | } | |
2270 | } | |
2271 | #endif /* !_WIN32 */ | |
2272 | ||
1b5ec234 | 2273 | /* Return a host pointer to ram allocated with qemu_ram_alloc. |
ae3a7047 MD |
2274 | * This should not be used for general purpose DMA. Use address_space_map |
2275 | * or address_space_rw instead. For local memory (e.g. video ram) that the | |
2276 | * device owns, use memory_region_get_ram_ptr. | |
0dc3f44a | 2277 | * |
49b24afc | 2278 | * Called within RCU critical section. |
1b5ec234 | 2279 | */ |
0878d0e1 | 2280 | void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) |
1b5ec234 | 2281 | { |
3655cb9c GA |
2282 | RAMBlock *block = ram_block; |
2283 | ||
2284 | if (block == NULL) { | |
2285 | block = qemu_get_ram_block(addr); | |
0878d0e1 | 2286 | addr -= block->offset; |
3655cb9c | 2287 | } |
ae3a7047 MD |
2288 | |
2289 | if (xen_enabled() && block->host == NULL) { | |
0d6d3c87 PB |
2290 | /* We need to check if the requested address is in the RAM |
2291 | * because we don't want to map the entire memory in QEMU. | |
2292 | * In that case just map until the end of the page. | |
2293 | */ | |
2294 | if (block->offset == 0) { | |
1ff7c598 | 2295 | return xen_map_cache(addr, 0, 0, false); |
0d6d3c87 | 2296 | } |
ae3a7047 | 2297 | |
1ff7c598 | 2298 | block->host = xen_map_cache(block->offset, block->max_length, 1, false); |
0d6d3c87 | 2299 | } |
0878d0e1 | 2300 | return ramblock_ptr(block, addr); |
dc828ca1 PB |
2301 | } |
2302 | ||
0878d0e1 | 2303 | /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr |
ae3a7047 | 2304 | * but takes a size argument. |
0dc3f44a | 2305 | * |
e81bcda5 | 2306 | * Called within RCU critical section. |
ae3a7047 | 2307 | */ |
3655cb9c | 2308 | static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr, |
f5aa69bd | 2309 | hwaddr *size, bool lock) |
38bee5dc | 2310 | { |
3655cb9c | 2311 | RAMBlock *block = ram_block; |
8ab934f9 SS |
2312 | if (*size == 0) { |
2313 | return NULL; | |
2314 | } | |
e81bcda5 | 2315 | |
3655cb9c GA |
2316 | if (block == NULL) { |
2317 | block = qemu_get_ram_block(addr); | |
0878d0e1 | 2318 | addr -= block->offset; |
3655cb9c | 2319 | } |
0878d0e1 | 2320 | *size = MIN(*size, block->max_length - addr); |
e81bcda5 PB |
2321 | |
2322 | if (xen_enabled() && block->host == NULL) { | |
2323 | /* We need to check if the requested address is in the RAM | |
2324 | * because we don't want to map the entire memory in QEMU. | |
2325 | * In that case just map the requested area. | |
2326 | */ | |
2327 | if (block->offset == 0) { | |
f5aa69bd | 2328 | return xen_map_cache(addr, *size, lock, lock); |
38bee5dc SS |
2329 | } |
2330 | ||
f5aa69bd | 2331 | block->host = xen_map_cache(block->offset, block->max_length, 1, lock); |
38bee5dc | 2332 | } |
e81bcda5 | 2333 | |
0878d0e1 | 2334 | return ramblock_ptr(block, addr); |
38bee5dc SS |
2335 | } |
2336 | ||
f90bb71b DDAG |
2337 | /* Return the offset of a hostpointer within a ramblock */ |
2338 | ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host) | |
2339 | { | |
2340 | ram_addr_t res = (uint8_t *)host - (uint8_t *)rb->host; | |
2341 | assert((uintptr_t)host >= (uintptr_t)rb->host); | |
2342 | assert(res < rb->max_length); | |
2343 | ||
2344 | return res; | |
2345 | } | |
2346 | ||
422148d3 DDAG |
2347 | /* |
2348 | * Translates a host ptr back to a RAMBlock, a ram_addr and an offset | |
2349 | * in that RAMBlock. | |
2350 | * | |
2351 | * ptr: Host pointer to look up | |
2352 | * round_offset: If true round the result offset down to a page boundary | |
2353 | * *ram_addr: set to result ram_addr | |
2354 | * *offset: set to result offset within the RAMBlock | |
2355 | * | |
2356 | * Returns: RAMBlock (or NULL if not found) | |
ae3a7047 MD |
2357 | * |
2358 | * By the time this function returns, the returned pointer is not protected | |
2359 | * by RCU anymore. If the caller is not within an RCU critical section and | |
2360 | * does not hold the iothread lock, it must have other means of protecting the | |
2361 | * pointer, such as a reference to the region that includes the incoming | |
2362 | * ram_addr_t. | |
2363 | */ | |
422148d3 | 2364 | RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, |
422148d3 | 2365 | ram_addr_t *offset) |
5579c7f3 | 2366 | { |
94a6b54f PB |
2367 | RAMBlock *block; |
2368 | uint8_t *host = ptr; | |
2369 | ||
868bb33f | 2370 | if (xen_enabled()) { |
f615f396 | 2371 | ram_addr_t ram_addr; |
694ea274 | 2372 | RCU_READ_LOCK_GUARD(); |
f615f396 PB |
2373 | ram_addr = xen_ram_addr_from_mapcache(ptr); |
2374 | block = qemu_get_ram_block(ram_addr); | |
422148d3 | 2375 | if (block) { |
d6b6aec4 | 2376 | *offset = ram_addr - block->offset; |
422148d3 | 2377 | } |
422148d3 | 2378 | return block; |
712c2b41 SS |
2379 | } |
2380 | ||
694ea274 | 2381 | RCU_READ_LOCK_GUARD(); |
d73415a3 | 2382 | block = qatomic_rcu_read(&ram_list.mru_block); |
9b8424d5 | 2383 | if (block && block->host && host - block->host < block->max_length) { |
23887b79 PB |
2384 | goto found; |
2385 | } | |
2386 | ||
99e15582 | 2387 | RAMBLOCK_FOREACH(block) { |
432d268c JN |
2388 | /* This case append when the block is not mapped. */ |
2389 | if (block->host == NULL) { | |
2390 | continue; | |
2391 | } | |
9b8424d5 | 2392 | if (host - block->host < block->max_length) { |
23887b79 | 2393 | goto found; |
f471a17e | 2394 | } |
94a6b54f | 2395 | } |
432d268c | 2396 | |
1b5ec234 | 2397 | return NULL; |
23887b79 PB |
2398 | |
2399 | found: | |
422148d3 DDAG |
2400 | *offset = (host - block->host); |
2401 | if (round_offset) { | |
2402 | *offset &= TARGET_PAGE_MASK; | |
2403 | } | |
422148d3 DDAG |
2404 | return block; |
2405 | } | |
2406 | ||
e3dd7493 DDAG |
2407 | /* |
2408 | * Finds the named RAMBlock | |
2409 | * | |
2410 | * name: The name of RAMBlock to find | |
2411 | * | |
2412 | * Returns: RAMBlock (or NULL if not found) | |
2413 | */ | |
2414 | RAMBlock *qemu_ram_block_by_name(const char *name) | |
2415 | { | |
2416 | RAMBlock *block; | |
2417 | ||
99e15582 | 2418 | RAMBLOCK_FOREACH(block) { |
e3dd7493 DDAG |
2419 | if (!strcmp(name, block->idstr)) { |
2420 | return block; | |
2421 | } | |
2422 | } | |
2423 | ||
2424 | return NULL; | |
2425 | } | |
2426 | ||
422148d3 DDAG |
2427 | /* Some of the softmmu routines need to translate from a host pointer |
2428 | (typically a TLB entry) back to a ram offset. */ | |
07bdaa41 | 2429 | ram_addr_t qemu_ram_addr_from_host(void *ptr) |
422148d3 DDAG |
2430 | { |
2431 | RAMBlock *block; | |
f615f396 | 2432 | ram_addr_t offset; |
422148d3 | 2433 | |
f615f396 | 2434 | block = qemu_ram_block_from_host(ptr, false, &offset); |
422148d3 | 2435 | if (!block) { |
07bdaa41 | 2436 | return RAM_ADDR_INVALID; |
422148d3 DDAG |
2437 | } |
2438 | ||
07bdaa41 | 2439 | return block->offset + offset; |
e890261f | 2440 | } |
f471a17e | 2441 | |
b2a44fca | 2442 | static MemTxResult flatview_read(FlatView *fv, hwaddr addr, |
a152be43 | 2443 | MemTxAttrs attrs, void *buf, hwaddr len); |
16620684 | 2444 | static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, |
a152be43 | 2445 | const void *buf, hwaddr len); |
0c249ff7 | 2446 | static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, |
eace72b7 | 2447 | bool is_write, MemTxAttrs attrs); |
16620684 | 2448 | |
f25a49e0 PM |
2449 | static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, |
2450 | unsigned len, MemTxAttrs attrs) | |
db7b5426 | 2451 | { |
acc9d80b | 2452 | subpage_t *subpage = opaque; |
ff6cff75 | 2453 | uint8_t buf[8]; |
5c9eb028 | 2454 | MemTxResult res; |
791af8c8 | 2455 | |
db7b5426 | 2456 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 2457 | printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, |
acc9d80b | 2458 | subpage, len, addr); |
db7b5426 | 2459 | #endif |
16620684 | 2460 | res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len); |
5c9eb028 PM |
2461 | if (res) { |
2462 | return res; | |
f25a49e0 | 2463 | } |
6d3ede54 PM |
2464 | *data = ldn_p(buf, len); |
2465 | return MEMTX_OK; | |
db7b5426 BS |
2466 | } |
2467 | ||
f25a49e0 PM |
2468 | static MemTxResult subpage_write(void *opaque, hwaddr addr, |
2469 | uint64_t value, unsigned len, MemTxAttrs attrs) | |
db7b5426 | 2470 | { |
acc9d80b | 2471 | subpage_t *subpage = opaque; |
ff6cff75 | 2472 | uint8_t buf[8]; |
acc9d80b | 2473 | |
db7b5426 | 2474 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 2475 | printf("%s: subpage %p len %u addr " TARGET_FMT_plx |
acc9d80b JK |
2476 | " value %"PRIx64"\n", |
2477 | __func__, subpage, len, addr, value); | |
db7b5426 | 2478 | #endif |
6d3ede54 | 2479 | stn_p(buf, len, value); |
16620684 | 2480 | return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len); |
db7b5426 BS |
2481 | } |
2482 | ||
c353e4cc | 2483 | static bool subpage_accepts(void *opaque, hwaddr addr, |
8372d383 PM |
2484 | unsigned len, bool is_write, |
2485 | MemTxAttrs attrs) | |
c353e4cc | 2486 | { |
acc9d80b | 2487 | subpage_t *subpage = opaque; |
c353e4cc | 2488 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 2489 | printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", |
acc9d80b | 2490 | __func__, subpage, is_write ? 'w' : 'r', len, addr); |
c353e4cc PB |
2491 | #endif |
2492 | ||
16620684 | 2493 | return flatview_access_valid(subpage->fv, addr + subpage->base, |
eace72b7 | 2494 | len, is_write, attrs); |
c353e4cc PB |
2495 | } |
2496 | ||
70c68e44 | 2497 | static const MemoryRegionOps subpage_ops = { |
f25a49e0 PM |
2498 | .read_with_attrs = subpage_read, |
2499 | .write_with_attrs = subpage_write, | |
ff6cff75 PB |
2500 | .impl.min_access_size = 1, |
2501 | .impl.max_access_size = 8, | |
2502 | .valid.min_access_size = 1, | |
2503 | .valid.max_access_size = 8, | |
c353e4cc | 2504 | .valid.accepts = subpage_accepts, |
70c68e44 | 2505 | .endianness = DEVICE_NATIVE_ENDIAN, |
db7b5426 BS |
2506 | }; |
2507 | ||
b797ab1a WY |
2508 | static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, |
2509 | uint16_t section) | |
db7b5426 BS |
2510 | { |
2511 | int idx, eidx; | |
2512 | ||
2513 | if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) | |
2514 | return -1; | |
2515 | idx = SUBPAGE_IDX(start); | |
2516 | eidx = SUBPAGE_IDX(end); | |
2517 | #if defined(DEBUG_SUBPAGE) | |
016e9d62 AK |
2518 | printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", |
2519 | __func__, mmio, start, end, idx, eidx, section); | |
db7b5426 | 2520 | #endif |
db7b5426 | 2521 | for (; idx <= eidx; idx++) { |
5312bd8b | 2522 | mmio->sub_section[idx] = section; |
db7b5426 BS |
2523 | } |
2524 | ||
2525 | return 0; | |
2526 | } | |
2527 | ||
16620684 | 2528 | static subpage_t *subpage_init(FlatView *fv, hwaddr base) |
db7b5426 | 2529 | { |
c227f099 | 2530 | subpage_t *mmio; |
db7b5426 | 2531 | |
b797ab1a | 2532 | /* mmio->sub_section is set to PHYS_SECTION_UNASSIGNED with g_malloc0 */ |
2615fabd | 2533 | mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); |
16620684 | 2534 | mmio->fv = fv; |
1eec614b | 2535 | mmio->base = base; |
2c9b15ca | 2536 | memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, |
b4fefef9 | 2537 | NULL, TARGET_PAGE_SIZE); |
b3b00c78 | 2538 | mmio->iomem.subpage = true; |
db7b5426 | 2539 | #if defined(DEBUG_SUBPAGE) |
016e9d62 AK |
2540 | printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, |
2541 | mmio, base, TARGET_PAGE_SIZE); | |
db7b5426 | 2542 | #endif |
db7b5426 BS |
2543 | |
2544 | return mmio; | |
2545 | } | |
2546 | ||
16620684 | 2547 | static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr) |
5312bd8b | 2548 | { |
16620684 | 2549 | assert(fv); |
5312bd8b | 2550 | MemoryRegionSection section = { |
16620684 | 2551 | .fv = fv, |
5312bd8b AK |
2552 | .mr = mr, |
2553 | .offset_within_address_space = 0, | |
2554 | .offset_within_region = 0, | |
052e87b0 | 2555 | .size = int128_2_64(), |
5312bd8b AK |
2556 | }; |
2557 | ||
53cb28cb | 2558 | return phys_section_add(map, §ion); |
5312bd8b AK |
2559 | } |
2560 | ||
2d54f194 PM |
2561 | MemoryRegionSection *iotlb_to_section(CPUState *cpu, |
2562 | hwaddr index, MemTxAttrs attrs) | |
aa102231 | 2563 | { |
a54c87b6 PM |
2564 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
2565 | CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; | |
d73415a3 | 2566 | AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch); |
79e2b9ae | 2567 | MemoryRegionSection *sections = d->map.sections; |
9d82b5a7 | 2568 | |
2d54f194 | 2569 | return §ions[index & ~TARGET_PAGE_MASK]; |
aa102231 AK |
2570 | } |
2571 | ||
e9179ce1 AK |
2572 | static void io_mem_init(void) |
2573 | { | |
2c9b15ca | 2574 | memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, |
1f6245e5 | 2575 | NULL, UINT64_MAX); |
e9179ce1 AK |
2576 | } |
2577 | ||
8629d3fc | 2578 | AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv) |
00752703 | 2579 | { |
53cb28cb MA |
2580 | AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); |
2581 | uint16_t n; | |
2582 | ||
16620684 | 2583 | n = dummy_section(&d->map, fv, &io_mem_unassigned); |
53cb28cb | 2584 | assert(n == PHYS_SECTION_UNASSIGNED); |
00752703 | 2585 | |
9736e55b | 2586 | d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; |
66a6df1d AK |
2587 | |
2588 | return d; | |
00752703 PB |
2589 | } |
2590 | ||
66a6df1d | 2591 | void address_space_dispatch_free(AddressSpaceDispatch *d) |
79e2b9ae PB |
2592 | { |
2593 | phys_sections_free(&d->map); | |
2594 | g_free(d); | |
2595 | } | |
2596 | ||
9458a9a1 PB |
2597 | static void do_nothing(CPUState *cpu, run_on_cpu_data d) |
2598 | { | |
2599 | } | |
2600 | ||
2601 | static void tcg_log_global_after_sync(MemoryListener *listener) | |
2602 | { | |
2603 | CPUAddressSpace *cpuas; | |
2604 | ||
2605 | /* Wait for the CPU to end the current TB. This avoids the following | |
2606 | * incorrect race: | |
2607 | * | |
2608 | * vCPU migration | |
2609 | * ---------------------- ------------------------- | |
2610 | * TLB check -> slow path | |
2611 | * notdirty_mem_write | |
2612 | * write to RAM | |
2613 | * mark dirty | |
2614 | * clear dirty flag | |
2615 | * TLB check -> fast path | |
2616 | * read memory | |
2617 | * write to RAM | |
2618 | * | |
2619 | * by pushing the migration thread's memory read after the vCPU thread has | |
2620 | * written the memory. | |
2621 | */ | |
86cf9e15 PD |
2622 | if (replay_mode == REPLAY_MODE_NONE) { |
2623 | /* | |
2624 | * VGA can make calls to this function while updating the screen. | |
2625 | * In record/replay mode this causes a deadlock, because | |
2626 | * run_on_cpu waits for rr mutex. Therefore no races are possible | |
2627 | * in this case and no need for making run_on_cpu when | |
2628 | * record/replay is not enabled. | |
2629 | */ | |
2630 | cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); | |
2631 | run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); | |
2632 | } | |
9458a9a1 PB |
2633 | } |
2634 | ||
1d71148e | 2635 | static void tcg_commit(MemoryListener *listener) |
50c1e149 | 2636 | { |
32857f4d PM |
2637 | CPUAddressSpace *cpuas; |
2638 | AddressSpaceDispatch *d; | |
117712c3 | 2639 | |
f28d0dfd | 2640 | assert(tcg_enabled()); |
117712c3 AK |
2641 | /* since each CPU stores ram addresses in its TLB cache, we must |
2642 | reset the modified entries */ | |
32857f4d PM |
2643 | cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); |
2644 | cpu_reloading_memory_map(); | |
2645 | /* The CPU and TLB are protected by the iothread lock. | |
2646 | * We reload the dispatch pointer now because cpu_reloading_memory_map() | |
2647 | * may have split the RCU critical section. | |
2648 | */ | |
66a6df1d | 2649 | d = address_space_to_dispatch(cpuas->as); |
d73415a3 | 2650 | qatomic_rcu_set(&cpuas->memory_dispatch, d); |
d10eb08f | 2651 | tlb_flush(cpuas->cpu); |
50c1e149 AK |
2652 | } |
2653 | ||
62152b8a AK |
2654 | static void memory_map_init(void) |
2655 | { | |
7267c094 | 2656 | system_memory = g_malloc(sizeof(*system_memory)); |
03f49957 | 2657 | |
57271d63 | 2658 | memory_region_init(system_memory, NULL, "system", UINT64_MAX); |
7dca8043 | 2659 | address_space_init(&address_space_memory, system_memory, "memory"); |
309cb471 | 2660 | |
7267c094 | 2661 | system_io = g_malloc(sizeof(*system_io)); |
3bb28b72 JK |
2662 | memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", |
2663 | 65536); | |
7dca8043 | 2664 | address_space_init(&address_space_io, system_io, "I/O"); |
62152b8a AK |
2665 | } |
2666 | ||
2667 | MemoryRegion *get_system_memory(void) | |
2668 | { | |
2669 | return system_memory; | |
2670 | } | |
2671 | ||
309cb471 AK |
2672 | MemoryRegion *get_system_io(void) |
2673 | { | |
2674 | return system_io; | |
2675 | } | |
2676 | ||
845b6214 | 2677 | static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, |
a8170e5e | 2678 | hwaddr length) |
51d7a9eb | 2679 | { |
e87f7778 | 2680 | uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); |
0878d0e1 PB |
2681 | addr += memory_region_get_ram_addr(mr); |
2682 | ||
e87f7778 PB |
2683 | /* No early return if dirty_log_mask is or becomes 0, because |
2684 | * cpu_physical_memory_set_dirty_range will still call | |
2685 | * xen_modified_memory. | |
2686 | */ | |
2687 | if (dirty_log_mask) { | |
2688 | dirty_log_mask = | |
2689 | cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); | |
2690 | } | |
2691 | if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { | |
5aa1ef71 | 2692 | assert(tcg_enabled()); |
e87f7778 PB |
2693 | tb_invalidate_phys_range(addr, addr + length); |
2694 | dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); | |
51d7a9eb | 2695 | } |
e87f7778 | 2696 | cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); |
51d7a9eb AP |
2697 | } |
2698 | ||
047be4ed SH |
2699 | void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) |
2700 | { | |
2701 | /* | |
2702 | * In principle this function would work on other memory region types too, | |
2703 | * but the ROM device use case is the only one where this operation is | |
2704 | * necessary. Other memory regions should use the | |
2705 | * address_space_read/write() APIs. | |
2706 | */ | |
2707 | assert(memory_region_is_romd(mr)); | |
2708 | ||
2709 | invalidate_and_set_dirty(mr, addr, size); | |
2710 | } | |
2711 | ||
23326164 | 2712 | static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) |
82f2563f | 2713 | { |
e1622f4b | 2714 | unsigned access_size_max = mr->ops->valid.max_access_size; |
23326164 RH |
2715 | |
2716 | /* Regions are assumed to support 1-4 byte accesses unless | |
2717 | otherwise specified. */ | |
23326164 RH |
2718 | if (access_size_max == 0) { |
2719 | access_size_max = 4; | |
2720 | } | |
2721 | ||
2722 | /* Bound the maximum access by the alignment of the address. */ | |
2723 | if (!mr->ops->impl.unaligned) { | |
2724 | unsigned align_size_max = addr & -addr; | |
2725 | if (align_size_max != 0 && align_size_max < access_size_max) { | |
2726 | access_size_max = align_size_max; | |
2727 | } | |
82f2563f | 2728 | } |
23326164 RH |
2729 | |
2730 | /* Don't attempt accesses larger than the maximum. */ | |
2731 | if (l > access_size_max) { | |
2732 | l = access_size_max; | |
82f2563f | 2733 | } |
6554f5c0 | 2734 | l = pow2floor(l); |
23326164 RH |
2735 | |
2736 | return l; | |
82f2563f PB |
2737 | } |
2738 | ||
4840f10e | 2739 | static bool prepare_mmio_access(MemoryRegion *mr) |
125b3806 | 2740 | { |
4840f10e JK |
2741 | bool release_lock = false; |
2742 | ||
37921851 | 2743 | if (!qemu_mutex_iothread_locked()) { |
4840f10e | 2744 | qemu_mutex_lock_iothread(); |
4840f10e JK |
2745 | release_lock = true; |
2746 | } | |
125b3806 PB |
2747 | if (mr->flush_coalesced_mmio) { |
2748 | qemu_flush_coalesced_mmio_buffer(); | |
2749 | } | |
4840f10e JK |
2750 | |
2751 | return release_lock; | |
125b3806 PB |
2752 | } |
2753 | ||
a203ac70 | 2754 | /* Called within RCU critical section. */ |
16620684 AK |
2755 | static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, |
2756 | MemTxAttrs attrs, | |
a152be43 | 2757 | const void *ptr, |
0c249ff7 | 2758 | hwaddr len, hwaddr addr1, |
16620684 | 2759 | hwaddr l, MemoryRegion *mr) |
13eb76e0 | 2760 | { |
20804676 | 2761 | uint8_t *ram_ptr; |
791af8c8 | 2762 | uint64_t val; |
3b643495 | 2763 | MemTxResult result = MEMTX_OK; |
4840f10e | 2764 | bool release_lock = false; |
a152be43 | 2765 | const uint8_t *buf = ptr; |
3b46e624 | 2766 | |
a203ac70 | 2767 | for (;;) { |
eb7eeb88 PB |
2768 | if (!memory_access_is_direct(mr, true)) { |
2769 | release_lock |= prepare_mmio_access(mr); | |
2770 | l = memory_access_size(mr, l, addr1); | |
2771 | /* XXX: could force current_cpu to NULL to avoid | |
2772 | potential bugs */ | |
9bf825bf | 2773 | val = ldn_he_p(buf, l); |
3d9e7c3e | 2774 | result |= memory_region_dispatch_write(mr, addr1, val, |
9bf825bf | 2775 | size_memop(l), attrs); |
13eb76e0 | 2776 | } else { |
eb7eeb88 | 2777 | /* RAM case */ |
20804676 PMD |
2778 | ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); |
2779 | memcpy(ram_ptr, buf, l); | |
eb7eeb88 | 2780 | invalidate_and_set_dirty(mr, addr1, l); |
13eb76e0 | 2781 | } |
4840f10e JK |
2782 | |
2783 | if (release_lock) { | |
2784 | qemu_mutex_unlock_iothread(); | |
2785 | release_lock = false; | |
2786 | } | |
2787 | ||
13eb76e0 FB |
2788 | len -= l; |
2789 | buf += l; | |
2790 | addr += l; | |
a203ac70 PB |
2791 | |
2792 | if (!len) { | |
2793 | break; | |
2794 | } | |
2795 | ||
2796 | l = len; | |
efa99a2f | 2797 | mr = flatview_translate(fv, addr, &addr1, &l, true, attrs); |
13eb76e0 | 2798 | } |
fd8aaa76 | 2799 | |
3b643495 | 2800 | return result; |
13eb76e0 | 2801 | } |
8df1cd07 | 2802 | |
4c6ebbb3 | 2803 | /* Called from RCU critical section. */ |
16620684 | 2804 | static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, |
a152be43 | 2805 | const void *buf, hwaddr len) |
ac1970fb | 2806 | { |
eb7eeb88 | 2807 | hwaddr l; |
eb7eeb88 PB |
2808 | hwaddr addr1; |
2809 | MemoryRegion *mr; | |
2810 | MemTxResult result = MEMTX_OK; | |
eb7eeb88 | 2811 | |
4c6ebbb3 | 2812 | l = len; |
efa99a2f | 2813 | mr = flatview_translate(fv, addr, &addr1, &l, true, attrs); |
4c6ebbb3 PB |
2814 | result = flatview_write_continue(fv, addr, attrs, buf, len, |
2815 | addr1, l, mr); | |
a203ac70 PB |
2816 | |
2817 | return result; | |
2818 | } | |
2819 | ||
2820 | /* Called within RCU critical section. */ | |
16620684 | 2821 | MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, |
a152be43 | 2822 | MemTxAttrs attrs, void *ptr, |
0c249ff7 | 2823 | hwaddr len, hwaddr addr1, hwaddr l, |
16620684 | 2824 | MemoryRegion *mr) |
a203ac70 | 2825 | { |
20804676 | 2826 | uint8_t *ram_ptr; |
a203ac70 PB |
2827 | uint64_t val; |
2828 | MemTxResult result = MEMTX_OK; | |
2829 | bool release_lock = false; | |
a152be43 | 2830 | uint8_t *buf = ptr; |
eb7eeb88 | 2831 | |
a203ac70 | 2832 | for (;;) { |
eb7eeb88 PB |
2833 | if (!memory_access_is_direct(mr, false)) { |
2834 | /* I/O case */ | |
2835 | release_lock |= prepare_mmio_access(mr); | |
2836 | l = memory_access_size(mr, l, addr1); | |
3d9e7c3e | 2837 | result |= memory_region_dispatch_read(mr, addr1, &val, |
9bf825bf TN |
2838 | size_memop(l), attrs); |
2839 | stn_he_p(buf, l, val); | |
eb7eeb88 PB |
2840 | } else { |
2841 | /* RAM case */ | |
fc1c8344 | 2842 | fuzz_dma_read_cb(addr, len, mr); |
20804676 PMD |
2843 | ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); |
2844 | memcpy(buf, ram_ptr, l); | |
eb7eeb88 PB |
2845 | } |
2846 | ||
2847 | if (release_lock) { | |
2848 | qemu_mutex_unlock_iothread(); | |
2849 | release_lock = false; | |
2850 | } | |
2851 | ||
2852 | len -= l; | |
2853 | buf += l; | |
2854 | addr += l; | |
a203ac70 PB |
2855 | |
2856 | if (!len) { | |
2857 | break; | |
2858 | } | |
2859 | ||
2860 | l = len; | |
efa99a2f | 2861 | mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); |
a203ac70 PB |
2862 | } |
2863 | ||
2864 | return result; | |
2865 | } | |
2866 | ||
b2a44fca PB |
2867 | /* Called from RCU critical section. */ |
2868 | static MemTxResult flatview_read(FlatView *fv, hwaddr addr, | |
a152be43 | 2869 | MemTxAttrs attrs, void *buf, hwaddr len) |
a203ac70 PB |
2870 | { |
2871 | hwaddr l; | |
2872 | hwaddr addr1; | |
2873 | MemoryRegion *mr; | |
eb7eeb88 | 2874 | |
b2a44fca | 2875 | l = len; |
efa99a2f | 2876 | mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); |
b2a44fca PB |
2877 | return flatview_read_continue(fv, addr, attrs, buf, len, |
2878 | addr1, l, mr); | |
ac1970fb AK |
2879 | } |
2880 | ||
b2a44fca | 2881 | MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, |
daa3dda4 | 2882 | MemTxAttrs attrs, void *buf, hwaddr len) |
b2a44fca PB |
2883 | { |
2884 | MemTxResult result = MEMTX_OK; | |
2885 | FlatView *fv; | |
2886 | ||
2887 | if (len > 0) { | |
694ea274 | 2888 | RCU_READ_LOCK_GUARD(); |
b2a44fca PB |
2889 | fv = address_space_to_flatview(as); |
2890 | result = flatview_read(fv, addr, attrs, buf, len); | |
b2a44fca PB |
2891 | } |
2892 | ||
2893 | return result; | |
2894 | } | |
2895 | ||
4c6ebbb3 PB |
2896 | MemTxResult address_space_write(AddressSpace *as, hwaddr addr, |
2897 | MemTxAttrs attrs, | |
daa3dda4 | 2898 | const void *buf, hwaddr len) |
4c6ebbb3 PB |
2899 | { |
2900 | MemTxResult result = MEMTX_OK; | |
2901 | FlatView *fv; | |
2902 | ||
2903 | if (len > 0) { | |
694ea274 | 2904 | RCU_READ_LOCK_GUARD(); |
4c6ebbb3 PB |
2905 | fv = address_space_to_flatview(as); |
2906 | result = flatview_write(fv, addr, attrs, buf, len); | |
4c6ebbb3 PB |
2907 | } |
2908 | ||
2909 | return result; | |
2910 | } | |
2911 | ||
db84fd97 | 2912 | MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, |
daa3dda4 | 2913 | void *buf, hwaddr len, bool is_write) |
db84fd97 PB |
2914 | { |
2915 | if (is_write) { | |
2916 | return address_space_write(as, addr, attrs, buf, len); | |
2917 | } else { | |
2918 | return address_space_read_full(as, addr, attrs, buf, len); | |
2919 | } | |
2920 | } | |
2921 | ||
d7ef71ef | 2922 | void cpu_physical_memory_rw(hwaddr addr, void *buf, |
28c80bfe | 2923 | hwaddr len, bool is_write) |
ac1970fb | 2924 | { |
5c9eb028 PM |
2925 | address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, |
2926 | buf, len, is_write); | |
ac1970fb AK |
2927 | } |
2928 | ||
582b55a9 AG |
2929 | enum write_rom_type { |
2930 | WRITE_DATA, | |
2931 | FLUSH_CACHE, | |
2932 | }; | |
2933 | ||
75693e14 PM |
2934 | static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, |
2935 | hwaddr addr, | |
2936 | MemTxAttrs attrs, | |
daa3dda4 | 2937 | const void *ptr, |
0c249ff7 | 2938 | hwaddr len, |
75693e14 | 2939 | enum write_rom_type type) |
d0ecd2aa | 2940 | { |
149f54b5 | 2941 | hwaddr l; |
20804676 | 2942 | uint8_t *ram_ptr; |
149f54b5 | 2943 | hwaddr addr1; |
5c8a00ce | 2944 | MemoryRegion *mr; |
daa3dda4 | 2945 | const uint8_t *buf = ptr; |
3b46e624 | 2946 | |
694ea274 | 2947 | RCU_READ_LOCK_GUARD(); |
d0ecd2aa | 2948 | while (len > 0) { |
149f54b5 | 2949 | l = len; |
75693e14 | 2950 | mr = address_space_translate(as, addr, &addr1, &l, true, attrs); |
3b46e624 | 2951 | |
5c8a00ce PB |
2952 | if (!(memory_region_is_ram(mr) || |
2953 | memory_region_is_romd(mr))) { | |
b242e0e0 | 2954 | l = memory_access_size(mr, l, addr1); |
d0ecd2aa | 2955 | } else { |
d0ecd2aa | 2956 | /* ROM/RAM case */ |
20804676 | 2957 | ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); |
582b55a9 AG |
2958 | switch (type) { |
2959 | case WRITE_DATA: | |
20804676 | 2960 | memcpy(ram_ptr, buf, l); |
845b6214 | 2961 | invalidate_and_set_dirty(mr, addr1, l); |
582b55a9 AG |
2962 | break; |
2963 | case FLUSH_CACHE: | |
1da8de39 | 2964 | flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l); |
582b55a9 AG |
2965 | break; |
2966 | } | |
d0ecd2aa FB |
2967 | } |
2968 | len -= l; | |
2969 | buf += l; | |
2970 | addr += l; | |
2971 | } | |
75693e14 | 2972 | return MEMTX_OK; |
d0ecd2aa FB |
2973 | } |
2974 | ||
582b55a9 | 2975 | /* used for ROM loading : can write in RAM and ROM */ |
3c8133f9 PM |
2976 | MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, |
2977 | MemTxAttrs attrs, | |
daa3dda4 | 2978 | const void *buf, hwaddr len) |
582b55a9 | 2979 | { |
3c8133f9 PM |
2980 | return address_space_write_rom_internal(as, addr, attrs, |
2981 | buf, len, WRITE_DATA); | |
582b55a9 AG |
2982 | } |
2983 | ||
0c249ff7 | 2984 | void cpu_flush_icache_range(hwaddr start, hwaddr len) |
582b55a9 AG |
2985 | { |
2986 | /* | |
2987 | * This function should do the same thing as an icache flush that was | |
2988 | * triggered from within the guest. For TCG we are always cache coherent, | |
2989 | * so there is no need to flush anything. For KVM / Xen we need to flush | |
2990 | * the host's instruction cache at least. | |
2991 | */ | |
2992 | if (tcg_enabled()) { | |
2993 | return; | |
2994 | } | |
2995 | ||
75693e14 PM |
2996 | address_space_write_rom_internal(&address_space_memory, |
2997 | start, MEMTXATTRS_UNSPECIFIED, | |
2998 | NULL, len, FLUSH_CACHE); | |
582b55a9 AG |
2999 | } |
3000 | ||
6d16c2f8 | 3001 | typedef struct { |
d3e71559 | 3002 | MemoryRegion *mr; |
6d16c2f8 | 3003 | void *buffer; |
a8170e5e AK |
3004 | hwaddr addr; |
3005 | hwaddr len; | |
c2cba0ff | 3006 | bool in_use; |
6d16c2f8 AL |
3007 | } BounceBuffer; |
3008 | ||
3009 | static BounceBuffer bounce; | |
3010 | ||
ba223c29 | 3011 | typedef struct MapClient { |
e95205e1 | 3012 | QEMUBH *bh; |
72cf2d4f | 3013 | QLIST_ENTRY(MapClient) link; |
ba223c29 AL |
3014 | } MapClient; |
3015 | ||
38e047b5 | 3016 | QemuMutex map_client_list_lock; |
b58deb34 | 3017 | static QLIST_HEAD(, MapClient) map_client_list |
72cf2d4f | 3018 | = QLIST_HEAD_INITIALIZER(map_client_list); |
ba223c29 | 3019 | |
e95205e1 FZ |
3020 | static void cpu_unregister_map_client_do(MapClient *client) |
3021 | { | |
3022 | QLIST_REMOVE(client, link); | |
3023 | g_free(client); | |
3024 | } | |
3025 | ||
33b6c2ed FZ |
3026 | static void cpu_notify_map_clients_locked(void) |
3027 | { | |
3028 | MapClient *client; | |
3029 | ||
3030 | while (!QLIST_EMPTY(&map_client_list)) { | |
3031 | client = QLIST_FIRST(&map_client_list); | |
e95205e1 FZ |
3032 | qemu_bh_schedule(client->bh); |
3033 | cpu_unregister_map_client_do(client); | |
33b6c2ed FZ |
3034 | } |
3035 | } | |
3036 | ||
e95205e1 | 3037 | void cpu_register_map_client(QEMUBH *bh) |
ba223c29 | 3038 | { |
7267c094 | 3039 | MapClient *client = g_malloc(sizeof(*client)); |
ba223c29 | 3040 | |
38e047b5 | 3041 | qemu_mutex_lock(&map_client_list_lock); |
e95205e1 | 3042 | client->bh = bh; |
72cf2d4f | 3043 | QLIST_INSERT_HEAD(&map_client_list, client, link); |
d73415a3 | 3044 | if (!qatomic_read(&bounce.in_use)) { |
33b6c2ed FZ |
3045 | cpu_notify_map_clients_locked(); |
3046 | } | |
38e047b5 | 3047 | qemu_mutex_unlock(&map_client_list_lock); |
ba223c29 AL |
3048 | } |
3049 | ||
38e047b5 | 3050 | void cpu_exec_init_all(void) |
ba223c29 | 3051 | { |
38e047b5 | 3052 | qemu_mutex_init(&ram_list.mutex); |
20bccb82 PM |
3053 | /* The data structures we set up here depend on knowing the page size, |
3054 | * so no more changes can be made after this point. | |
3055 | * In an ideal world, nothing we did before we had finished the | |
3056 | * machine setup would care about the target page size, and we could | |
3057 | * do this much later, rather than requiring board models to state | |
3058 | * up front what their requirements are. | |
3059 | */ | |
3060 | finalize_target_page_bits(); | |
38e047b5 | 3061 | io_mem_init(); |
680a4783 | 3062 | memory_map_init(); |
38e047b5 | 3063 | qemu_mutex_init(&map_client_list_lock); |
ba223c29 AL |
3064 | } |
3065 | ||
e95205e1 | 3066 | void cpu_unregister_map_client(QEMUBH *bh) |
ba223c29 AL |
3067 | { |
3068 | MapClient *client; | |
3069 | ||
e95205e1 FZ |
3070 | qemu_mutex_lock(&map_client_list_lock); |
3071 | QLIST_FOREACH(client, &map_client_list, link) { | |
3072 | if (client->bh == bh) { | |
3073 | cpu_unregister_map_client_do(client); | |
3074 | break; | |
3075 | } | |
ba223c29 | 3076 | } |
e95205e1 | 3077 | qemu_mutex_unlock(&map_client_list_lock); |
ba223c29 AL |
3078 | } |
3079 | ||
3080 | static void cpu_notify_map_clients(void) | |
3081 | { | |
38e047b5 | 3082 | qemu_mutex_lock(&map_client_list_lock); |
33b6c2ed | 3083 | cpu_notify_map_clients_locked(); |
38e047b5 | 3084 | qemu_mutex_unlock(&map_client_list_lock); |
ba223c29 AL |
3085 | } |
3086 | ||
0c249ff7 | 3087 | static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, |
eace72b7 | 3088 | bool is_write, MemTxAttrs attrs) |
51644ab7 | 3089 | { |
5c8a00ce | 3090 | MemoryRegion *mr; |
51644ab7 PB |
3091 | hwaddr l, xlat; |
3092 | ||
3093 | while (len > 0) { | |
3094 | l = len; | |
efa99a2f | 3095 | mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); |
5c8a00ce PB |
3096 | if (!memory_access_is_direct(mr, is_write)) { |
3097 | l = memory_access_size(mr, l, addr); | |
eace72b7 | 3098 | if (!memory_region_access_valid(mr, xlat, l, is_write, attrs)) { |
51644ab7 PB |
3099 | return false; |
3100 | } | |
3101 | } | |
3102 | ||
3103 | len -= l; | |
3104 | addr += l; | |
3105 | } | |
3106 | return true; | |
3107 | } | |
3108 | ||
16620684 | 3109 | bool address_space_access_valid(AddressSpace *as, hwaddr addr, |
0c249ff7 | 3110 | hwaddr len, bool is_write, |
fddffa42 | 3111 | MemTxAttrs attrs) |
16620684 | 3112 | { |
11e732a5 PB |
3113 | FlatView *fv; |
3114 | bool result; | |
3115 | ||
694ea274 | 3116 | RCU_READ_LOCK_GUARD(); |
11e732a5 | 3117 | fv = address_space_to_flatview(as); |
eace72b7 | 3118 | result = flatview_access_valid(fv, addr, len, is_write, attrs); |
11e732a5 | 3119 | return result; |
16620684 AK |
3120 | } |
3121 | ||
715c31ec | 3122 | static hwaddr |
16620684 | 3123 | flatview_extend_translation(FlatView *fv, hwaddr addr, |
53d0790d PM |
3124 | hwaddr target_len, |
3125 | MemoryRegion *mr, hwaddr base, hwaddr len, | |
3126 | bool is_write, MemTxAttrs attrs) | |
715c31ec PB |
3127 | { |
3128 | hwaddr done = 0; | |
3129 | hwaddr xlat; | |
3130 | MemoryRegion *this_mr; | |
3131 | ||
3132 | for (;;) { | |
3133 | target_len -= len; | |
3134 | addr += len; | |
3135 | done += len; | |
3136 | if (target_len == 0) { | |
3137 | return done; | |
3138 | } | |
3139 | ||
3140 | len = target_len; | |
16620684 | 3141 | this_mr = flatview_translate(fv, addr, &xlat, |
efa99a2f | 3142 | &len, is_write, attrs); |
715c31ec PB |
3143 | if (this_mr != mr || xlat != base + done) { |
3144 | return done; | |
3145 | } | |
3146 | } | |
3147 | } | |
3148 | ||
6d16c2f8 AL |
3149 | /* Map a physical memory region into a host virtual address. |
3150 | * May map a subset of the requested range, given by and returned in *plen. | |
3151 | * May return NULL if resources needed to perform the mapping are exhausted. | |
3152 | * Use only for reads OR writes - not for read-modify-write operations. | |
ba223c29 AL |
3153 | * Use cpu_register_map_client() to know when retrying the map operation is |
3154 | * likely to succeed. | |
6d16c2f8 | 3155 | */ |
ac1970fb | 3156 | void *address_space_map(AddressSpace *as, |
a8170e5e AK |
3157 | hwaddr addr, |
3158 | hwaddr *plen, | |
f26404fb PM |
3159 | bool is_write, |
3160 | MemTxAttrs attrs) | |
6d16c2f8 | 3161 | { |
a8170e5e | 3162 | hwaddr len = *plen; |
715c31ec PB |
3163 | hwaddr l, xlat; |
3164 | MemoryRegion *mr; | |
e81bcda5 | 3165 | void *ptr; |
ad0c60fa | 3166 | FlatView *fv; |
6d16c2f8 | 3167 | |
e3127ae0 PB |
3168 | if (len == 0) { |
3169 | return NULL; | |
3170 | } | |
38bee5dc | 3171 | |
e3127ae0 | 3172 | l = len; |
694ea274 | 3173 | RCU_READ_LOCK_GUARD(); |
ad0c60fa | 3174 | fv = address_space_to_flatview(as); |
efa99a2f | 3175 | mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); |
41063e1e | 3176 | |
e3127ae0 | 3177 | if (!memory_access_is_direct(mr, is_write)) { |
d73415a3 | 3178 | if (qatomic_xchg(&bounce.in_use, true)) { |
77f55eac | 3179 | *plen = 0; |
e3127ae0 | 3180 | return NULL; |
6d16c2f8 | 3181 | } |
e85d9db5 KW |
3182 | /* Avoid unbounded allocations */ |
3183 | l = MIN(l, TARGET_PAGE_SIZE); | |
3184 | bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); | |
e3127ae0 PB |
3185 | bounce.addr = addr; |
3186 | bounce.len = l; | |
d3e71559 PB |
3187 | |
3188 | memory_region_ref(mr); | |
3189 | bounce.mr = mr; | |
e3127ae0 | 3190 | if (!is_write) { |
16620684 | 3191 | flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED, |
5c9eb028 | 3192 | bounce.buffer, l); |
8ab934f9 | 3193 | } |
6d16c2f8 | 3194 | |
e3127ae0 PB |
3195 | *plen = l; |
3196 | return bounce.buffer; | |
3197 | } | |
3198 | ||
e3127ae0 | 3199 | |
d3e71559 | 3200 | memory_region_ref(mr); |
16620684 | 3201 | *plen = flatview_extend_translation(fv, addr, len, mr, xlat, |
53d0790d | 3202 | l, is_write, attrs); |
fc1c8344 | 3203 | fuzz_dma_read_cb(addr, *plen, mr); |
f5aa69bd | 3204 | ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); |
e81bcda5 PB |
3205 | |
3206 | return ptr; | |
6d16c2f8 AL |
3207 | } |
3208 | ||
ac1970fb | 3209 | /* Unmaps a memory region previously mapped by address_space_map(). |
ae5883ab | 3210 | * Will also mark the memory as dirty if is_write is true. access_len gives |
6d16c2f8 AL |
3211 | * the amount of memory that was actually read or written by the caller. |
3212 | */ | |
a8170e5e | 3213 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
ae5883ab | 3214 | bool is_write, hwaddr access_len) |
6d16c2f8 AL |
3215 | { |
3216 | if (buffer != bounce.buffer) { | |
d3e71559 PB |
3217 | MemoryRegion *mr; |
3218 | ram_addr_t addr1; | |
3219 | ||
07bdaa41 | 3220 | mr = memory_region_from_host(buffer, &addr1); |
d3e71559 | 3221 | assert(mr != NULL); |
6d16c2f8 | 3222 | if (is_write) { |
845b6214 | 3223 | invalidate_and_set_dirty(mr, addr1, access_len); |
6d16c2f8 | 3224 | } |
868bb33f | 3225 | if (xen_enabled()) { |
e41d7c69 | 3226 | xen_invalidate_map_cache_entry(buffer); |
050a0ddf | 3227 | } |
d3e71559 | 3228 | memory_region_unref(mr); |
6d16c2f8 AL |
3229 | return; |
3230 | } | |
3231 | if (is_write) { | |
5c9eb028 PM |
3232 | address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED, |
3233 | bounce.buffer, access_len); | |
6d16c2f8 | 3234 | } |
f8a83245 | 3235 | qemu_vfree(bounce.buffer); |
6d16c2f8 | 3236 | bounce.buffer = NULL; |
d3e71559 | 3237 | memory_region_unref(bounce.mr); |
d73415a3 | 3238 | qatomic_mb_set(&bounce.in_use, false); |
ba223c29 | 3239 | cpu_notify_map_clients(); |
6d16c2f8 | 3240 | } |
d0ecd2aa | 3241 | |
a8170e5e AK |
3242 | void *cpu_physical_memory_map(hwaddr addr, |
3243 | hwaddr *plen, | |
28c80bfe | 3244 | bool is_write) |
ac1970fb | 3245 | { |
f26404fb PM |
3246 | return address_space_map(&address_space_memory, addr, plen, is_write, |
3247 | MEMTXATTRS_UNSPECIFIED); | |
ac1970fb AK |
3248 | } |
3249 | ||
a8170e5e | 3250 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
28c80bfe | 3251 | bool is_write, hwaddr access_len) |
ac1970fb AK |
3252 | { |
3253 | return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); | |
3254 | } | |
3255 | ||
0ce265ff PB |
3256 | #define ARG1_DECL AddressSpace *as |
3257 | #define ARG1 as | |
3258 | #define SUFFIX | |
3259 | #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) | |
0ce265ff PB |
3260 | #define RCU_READ_LOCK(...) rcu_read_lock() |
3261 | #define RCU_READ_UNLOCK(...) rcu_read_unlock() | |
139c1837 | 3262 | #include "memory_ldst.c.inc" |
1e78bcc1 | 3263 | |
1f4e496e PB |
3264 | int64_t address_space_cache_init(MemoryRegionCache *cache, |
3265 | AddressSpace *as, | |
3266 | hwaddr addr, | |
3267 | hwaddr len, | |
3268 | bool is_write) | |
3269 | { | |
48564041 PB |
3270 | AddressSpaceDispatch *d; |
3271 | hwaddr l; | |
3272 | MemoryRegion *mr; | |
4bfb024b | 3273 | Int128 diff; |
48564041 PB |
3274 | |
3275 | assert(len > 0); | |
3276 | ||
3277 | l = len; | |
3278 | cache->fv = address_space_get_flatview(as); | |
3279 | d = flatview_to_dispatch(cache->fv); | |
3280 | cache->mrs = *address_space_translate_internal(d, addr, &cache->xlat, &l, true); | |
3281 | ||
4bfb024b PB |
3282 | /* |
3283 | * cache->xlat is now relative to cache->mrs.mr, not to the section itself. | |
3284 | * Take that into account to compute how many bytes are there between | |
3285 | * cache->xlat and the end of the section. | |
3286 | */ | |
3287 | diff = int128_sub(cache->mrs.size, | |
3288 | int128_make64(cache->xlat - cache->mrs.offset_within_region)); | |
3289 | l = int128_get64(int128_min(diff, int128_make64(l))); | |
3290 | ||
48564041 PB |
3291 | mr = cache->mrs.mr; |
3292 | memory_region_ref(mr); | |
3293 | if (memory_access_is_direct(mr, is_write)) { | |
53d0790d PM |
3294 | /* We don't care about the memory attributes here as we're only |
3295 | * doing this if we found actual RAM, which behaves the same | |
3296 | * regardless of attributes; so UNSPECIFIED is fine. | |
3297 | */ | |
48564041 | 3298 | l = flatview_extend_translation(cache->fv, addr, len, mr, |
53d0790d PM |
3299 | cache->xlat, l, is_write, |
3300 | MEMTXATTRS_UNSPECIFIED); | |
48564041 PB |
3301 | cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true); |
3302 | } else { | |
3303 | cache->ptr = NULL; | |
3304 | } | |
3305 | ||
3306 | cache->len = l; | |
3307 | cache->is_write = is_write; | |
3308 | return l; | |
1f4e496e PB |
3309 | } |
3310 | ||
3311 | void address_space_cache_invalidate(MemoryRegionCache *cache, | |
3312 | hwaddr addr, | |
3313 | hwaddr access_len) | |
3314 | { | |
48564041 PB |
3315 | assert(cache->is_write); |
3316 | if (likely(cache->ptr)) { | |
3317 | invalidate_and_set_dirty(cache->mrs.mr, addr + cache->xlat, access_len); | |
3318 | } | |
1f4e496e PB |
3319 | } |
3320 | ||
3321 | void address_space_cache_destroy(MemoryRegionCache *cache) | |
3322 | { | |
48564041 PB |
3323 | if (!cache->mrs.mr) { |
3324 | return; | |
3325 | } | |
3326 | ||
3327 | if (xen_enabled()) { | |
3328 | xen_invalidate_map_cache_entry(cache->ptr); | |
3329 | } | |
3330 | memory_region_unref(cache->mrs.mr); | |
3331 | flatview_unref(cache->fv); | |
3332 | cache->mrs.mr = NULL; | |
3333 | cache->fv = NULL; | |
3334 | } | |
3335 | ||
3336 | /* Called from RCU critical section. This function has the same | |
3337 | * semantics as address_space_translate, but it only works on a | |
3338 | * predefined range of a MemoryRegion that was mapped with | |
3339 | * address_space_cache_init. | |
3340 | */ | |
3341 | static inline MemoryRegion *address_space_translate_cached( | |
3342 | MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat, | |
bc6b1cec | 3343 | hwaddr *plen, bool is_write, MemTxAttrs attrs) |
48564041 PB |
3344 | { |
3345 | MemoryRegionSection section; | |
3346 | MemoryRegion *mr; | |
3347 | IOMMUMemoryRegion *iommu_mr; | |
3348 | AddressSpace *target_as; | |
3349 | ||
3350 | assert(!cache->ptr); | |
3351 | *xlat = addr + cache->xlat; | |
3352 | ||
3353 | mr = cache->mrs.mr; | |
3354 | iommu_mr = memory_region_get_iommu(mr); | |
3355 | if (!iommu_mr) { | |
3356 | /* MMIO region. */ | |
3357 | return mr; | |
3358 | } | |
3359 | ||
3360 | section = address_space_translate_iommu(iommu_mr, xlat, plen, | |
3361 | NULL, is_write, true, | |
2f7b009c | 3362 | &target_as, attrs); |
48564041 PB |
3363 | return section.mr; |
3364 | } | |
3365 | ||
3366 | /* Called from RCU critical section. address_space_read_cached uses this | |
3367 | * out of line function when the target is an MMIO or IOMMU region. | |
3368 | */ | |
38df19fa | 3369 | MemTxResult |
48564041 | 3370 | address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, |
0c249ff7 | 3371 | void *buf, hwaddr len) |
48564041 PB |
3372 | { |
3373 | hwaddr addr1, l; | |
3374 | MemoryRegion *mr; | |
3375 | ||
3376 | l = len; | |
bc6b1cec PM |
3377 | mr = address_space_translate_cached(cache, addr, &addr1, &l, false, |
3378 | MEMTXATTRS_UNSPECIFIED); | |
38df19fa PMD |
3379 | return flatview_read_continue(cache->fv, |
3380 | addr, MEMTXATTRS_UNSPECIFIED, buf, len, | |
3381 | addr1, l, mr); | |
48564041 PB |
3382 | } |
3383 | ||
3384 | /* Called from RCU critical section. address_space_write_cached uses this | |
3385 | * out of line function when the target is an MMIO or IOMMU region. | |
3386 | */ | |
38df19fa | 3387 | MemTxResult |
48564041 | 3388 | address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, |
0c249ff7 | 3389 | const void *buf, hwaddr len) |
48564041 PB |
3390 | { |
3391 | hwaddr addr1, l; | |
3392 | MemoryRegion *mr; | |
3393 | ||
3394 | l = len; | |
bc6b1cec PM |
3395 | mr = address_space_translate_cached(cache, addr, &addr1, &l, true, |
3396 | MEMTXATTRS_UNSPECIFIED); | |
38df19fa PMD |
3397 | return flatview_write_continue(cache->fv, |
3398 | addr, MEMTXATTRS_UNSPECIFIED, buf, len, | |
3399 | addr1, l, mr); | |
1f4e496e PB |
3400 | } |
3401 | ||
3402 | #define ARG1_DECL MemoryRegionCache *cache | |
3403 | #define ARG1 cache | |
48564041 PB |
3404 | #define SUFFIX _cached_slow |
3405 | #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) | |
48564041 PB |
3406 | #define RCU_READ_LOCK() ((void)0) |
3407 | #define RCU_READ_UNLOCK() ((void)0) | |
139c1837 | 3408 | #include "memory_ldst.c.inc" |
1f4e496e | 3409 | |
5e2972fd | 3410 | /* virtual memory access for debug (includes writing to ROM) */ |
f17ec444 | 3411 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, |
28c80bfe | 3412 | void *ptr, target_ulong len, bool is_write) |
13eb76e0 | 3413 | { |
a8170e5e | 3414 | hwaddr phys_addr; |
0c249ff7 | 3415 | target_ulong l, page; |
d7ef71ef | 3416 | uint8_t *buf = ptr; |
13eb76e0 | 3417 | |
79ca7a1b | 3418 | cpu_synchronize_state(cpu); |
13eb76e0 | 3419 | while (len > 0) { |
5232e4c7 PM |
3420 | int asidx; |
3421 | MemTxAttrs attrs; | |
ddfc8b96 | 3422 | MemTxResult res; |
5232e4c7 | 3423 | |
13eb76e0 | 3424 | page = addr & TARGET_PAGE_MASK; |
5232e4c7 PM |
3425 | phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); |
3426 | asidx = cpu_asidx_from_attrs(cpu, attrs); | |
13eb76e0 FB |
3427 | /* if no physical page mapped, return an error */ |
3428 | if (phys_addr == -1) | |
3429 | return -1; | |
3430 | l = (page + TARGET_PAGE_SIZE) - addr; | |
3431 | if (l > len) | |
3432 | l = len; | |
5e2972fd | 3433 | phys_addr += (addr & ~TARGET_PAGE_MASK); |
2e38847b | 3434 | if (is_write) { |
ddfc8b96 PMD |
3435 | res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, |
3436 | attrs, buf, l); | |
2e38847b | 3437 | } else { |
ddfc8b96 PMD |
3438 | res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr, |
3439 | attrs, buf, l); | |
3440 | } | |
3441 | if (res != MEMTX_OK) { | |
3442 | return -1; | |
2e38847b | 3443 | } |
13eb76e0 FB |
3444 | len -= l; |
3445 | buf += l; | |
3446 | addr += l; | |
3447 | } | |
3448 | return 0; | |
3449 | } | |
038629a6 DDAG |
3450 | |
3451 | /* | |
3452 | * Allows code that needs to deal with migration bitmaps etc to still be built | |
3453 | * target independent. | |
3454 | */ | |
20afaed9 | 3455 | size_t qemu_target_page_size(void) |
038629a6 | 3456 | { |
20afaed9 | 3457 | return TARGET_PAGE_SIZE; |
038629a6 DDAG |
3458 | } |
3459 | ||
46d702b1 JQ |
3460 | int qemu_target_page_bits(void) |
3461 | { | |
3462 | return TARGET_PAGE_BITS; | |
3463 | } | |
3464 | ||
3465 | int qemu_target_page_bits_min(void) | |
3466 | { | |
3467 | return TARGET_PAGE_BITS_MIN; | |
3468 | } | |
8e4a424b | 3469 | |
a8170e5e | 3470 | bool cpu_physical_memory_is_io(hwaddr phys_addr) |
76f35538 | 3471 | { |
5c8a00ce | 3472 | MemoryRegion*mr; |
149f54b5 | 3473 | hwaddr l = 1; |
41063e1e | 3474 | bool res; |
76f35538 | 3475 | |
694ea274 | 3476 | RCU_READ_LOCK_GUARD(); |
5c8a00ce | 3477 | mr = address_space_translate(&address_space_memory, |
bc6b1cec PM |
3478 | phys_addr, &phys_addr, &l, false, |
3479 | MEMTXATTRS_UNSPECIFIED); | |
76f35538 | 3480 | |
41063e1e | 3481 | res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); |
41063e1e | 3482 | return res; |
76f35538 | 3483 | } |
bd2fa51f | 3484 | |
e3807054 | 3485 | int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) |
bd2fa51f MH |
3486 | { |
3487 | RAMBlock *block; | |
e3807054 | 3488 | int ret = 0; |
bd2fa51f | 3489 | |
694ea274 | 3490 | RCU_READ_LOCK_GUARD(); |
99e15582 | 3491 | RAMBLOCK_FOREACH(block) { |
754cb9c0 | 3492 | ret = func(block, opaque); |
e3807054 DDAG |
3493 | if (ret) { |
3494 | break; | |
3495 | } | |
bd2fa51f | 3496 | } |
e3807054 | 3497 | return ret; |
bd2fa51f | 3498 | } |
d3a5038c DDAG |
3499 | |
3500 | /* | |
3501 | * Unmap pages of memory from start to start+length such that | |
3502 | * they a) read as 0, b) Trigger whatever fault mechanism | |
3503 | * the OS provides for postcopy. | |
3504 | * The pages must be unmapped by the end of the function. | |
3505 | * Returns: 0 on success, none-0 on failure | |
3506 | * | |
3507 | */ | |
3508 | int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) | |
3509 | { | |
3510 | int ret = -1; | |
3511 | ||
3512 | uint8_t *host_startaddr = rb->host + start; | |
3513 | ||
619bd31d | 3514 | if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { |
d3a5038c DDAG |
3515 | error_report("ram_block_discard_range: Unaligned start address: %p", |
3516 | host_startaddr); | |
3517 | goto err; | |
3518 | } | |
3519 | ||
3520 | if ((start + length) <= rb->used_length) { | |
db144f70 | 3521 | bool need_madvise, need_fallocate; |
619bd31d | 3522 | if (!QEMU_IS_ALIGNED(length, rb->page_size)) { |
72821d93 WY |
3523 | error_report("ram_block_discard_range: Unaligned length: %zx", |
3524 | length); | |
d3a5038c DDAG |
3525 | goto err; |
3526 | } | |
3527 | ||
3528 | errno = ENOTSUP; /* If we are missing MADVISE etc */ | |
3529 | ||
db144f70 DDAG |
3530 | /* The logic here is messy; |
3531 | * madvise DONTNEED fails for hugepages | |
3532 | * fallocate works on hugepages and shmem | |
3533 | */ | |
3534 | need_madvise = (rb->page_size == qemu_host_page_size); | |
3535 | need_fallocate = rb->fd != -1; | |
3536 | if (need_fallocate) { | |
3537 | /* For a file, this causes the area of the file to be zero'd | |
3538 | * if read, and for hugetlbfs also causes it to be unmapped | |
3539 | * so a userfault will trigger. | |
e2fa71f5 DDAG |
3540 | */ |
3541 | #ifdef CONFIG_FALLOCATE_PUNCH_HOLE | |
3542 | ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, | |
3543 | start, length); | |
db144f70 DDAG |
3544 | if (ret) { |
3545 | ret = -errno; | |
3546 | error_report("ram_block_discard_range: Failed to fallocate " | |
3547 | "%s:%" PRIx64 " +%zx (%d)", | |
3548 | rb->idstr, start, length, ret); | |
3549 | goto err; | |
3550 | } | |
3551 | #else | |
3552 | ret = -ENOSYS; | |
3553 | error_report("ram_block_discard_range: fallocate not available/file" | |
3554 | "%s:%" PRIx64 " +%zx (%d)", | |
3555 | rb->idstr, start, length, ret); | |
3556 | goto err; | |
e2fa71f5 DDAG |
3557 | #endif |
3558 | } | |
db144f70 DDAG |
3559 | if (need_madvise) { |
3560 | /* For normal RAM this causes it to be unmapped, | |
3561 | * for shared memory it causes the local mapping to disappear | |
3562 | * and to fall back on the file contents (which we just | |
3563 | * fallocate'd away). | |
3564 | */ | |
3565 | #if defined(CONFIG_MADVISE) | |
3566 | ret = madvise(host_startaddr, length, MADV_DONTNEED); | |
3567 | if (ret) { | |
3568 | ret = -errno; | |
3569 | error_report("ram_block_discard_range: Failed to discard range " | |
3570 | "%s:%" PRIx64 " +%zx (%d)", | |
3571 | rb->idstr, start, length, ret); | |
3572 | goto err; | |
3573 | } | |
3574 | #else | |
3575 | ret = -ENOSYS; | |
3576 | error_report("ram_block_discard_range: MADVISE not available" | |
d3a5038c DDAG |
3577 | "%s:%" PRIx64 " +%zx (%d)", |
3578 | rb->idstr, start, length, ret); | |
db144f70 DDAG |
3579 | goto err; |
3580 | #endif | |
d3a5038c | 3581 | } |
db144f70 DDAG |
3582 | trace_ram_block_discard_range(rb->idstr, host_startaddr, length, |
3583 | need_madvise, need_fallocate, ret); | |
d3a5038c DDAG |
3584 | } else { |
3585 | error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 | |
3586 | "/%zx/" RAM_ADDR_FMT")", | |
3587 | rb->idstr, start, length, rb->used_length); | |
3588 | } | |
3589 | ||
3590 | err: | |
3591 | return ret; | |
3592 | } | |
3593 | ||
a4de8552 JH |
3594 | bool ramblock_is_pmem(RAMBlock *rb) |
3595 | { | |
3596 | return rb->flags & RAM_PMEM; | |
3597 | } | |
3598 | ||
b6b71cb5 | 3599 | static void mtree_print_phys_entries(int start, int end, int skip, int ptr) |
5e8fd947 AK |
3600 | { |
3601 | if (start == end - 1) { | |
b6b71cb5 | 3602 | qemu_printf("\t%3d ", start); |
5e8fd947 | 3603 | } else { |
b6b71cb5 | 3604 | qemu_printf("\t%3d..%-3d ", start, end - 1); |
5e8fd947 | 3605 | } |
b6b71cb5 | 3606 | qemu_printf(" skip=%d ", skip); |
5e8fd947 | 3607 | if (ptr == PHYS_MAP_NODE_NIL) { |
b6b71cb5 | 3608 | qemu_printf(" ptr=NIL"); |
5e8fd947 | 3609 | } else if (!skip) { |
b6b71cb5 | 3610 | qemu_printf(" ptr=#%d", ptr); |
5e8fd947 | 3611 | } else { |
b6b71cb5 | 3612 | qemu_printf(" ptr=[%d]", ptr); |
5e8fd947 | 3613 | } |
b6b71cb5 | 3614 | qemu_printf("\n"); |
5e8fd947 AK |
3615 | } |
3616 | ||
3617 | #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ | |
3618 | int128_sub((size), int128_one())) : 0) | |
3619 | ||
b6b71cb5 | 3620 | void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) |
5e8fd947 AK |
3621 | { |
3622 | int i; | |
3623 | ||
b6b71cb5 MA |
3624 | qemu_printf(" Dispatch\n"); |
3625 | qemu_printf(" Physical sections\n"); | |
5e8fd947 AK |
3626 | |
3627 | for (i = 0; i < d->map.sections_nb; ++i) { | |
3628 | MemoryRegionSection *s = d->map.sections + i; | |
3629 | const char *names[] = { " [unassigned]", " [not dirty]", | |
3630 | " [ROM]", " [watch]" }; | |
3631 | ||
b6b71cb5 MA |
3632 | qemu_printf(" #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx |
3633 | " %s%s%s%s%s", | |
5e8fd947 AK |
3634 | i, |
3635 | s->offset_within_address_space, | |
3636 | s->offset_within_address_space + MR_SIZE(s->mr->size), | |
3637 | s->mr->name ? s->mr->name : "(noname)", | |
3638 | i < ARRAY_SIZE(names) ? names[i] : "", | |
3639 | s->mr == root ? " [ROOT]" : "", | |
3640 | s == d->mru_section ? " [MRU]" : "", | |
3641 | s->mr->is_iommu ? " [iommu]" : ""); | |
3642 | ||
3643 | if (s->mr->alias) { | |
b6b71cb5 | 3644 | qemu_printf(" alias=%s", s->mr->alias->name ? |
5e8fd947 AK |
3645 | s->mr->alias->name : "noname"); |
3646 | } | |
b6b71cb5 | 3647 | qemu_printf("\n"); |
5e8fd947 AK |
3648 | } |
3649 | ||
b6b71cb5 | 3650 | qemu_printf(" Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n", |
5e8fd947 AK |
3651 | P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip); |
3652 | for (i = 0; i < d->map.nodes_nb; ++i) { | |
3653 | int j, jprev; | |
3654 | PhysPageEntry prev; | |
3655 | Node *n = d->map.nodes + i; | |
3656 | ||
b6b71cb5 | 3657 | qemu_printf(" [%d]\n", i); |
5e8fd947 AK |
3658 | |
3659 | for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) { | |
3660 | PhysPageEntry *pe = *n + j; | |
3661 | ||
3662 | if (pe->ptr == prev.ptr && pe->skip == prev.skip) { | |
3663 | continue; | |
3664 | } | |
3665 | ||
b6b71cb5 | 3666 | mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); |
5e8fd947 AK |
3667 | |
3668 | jprev = j; | |
3669 | prev = *pe; | |
3670 | } | |
3671 | ||
3672 | if (jprev != ARRAY_SIZE(*n)) { | |
b6b71cb5 | 3673 | mtree_print_phys_entries(jprev, j, prev.skip, prev.ptr); |
5e8fd947 AK |
3674 | } |
3675 | } | |
3676 | } | |
3677 | ||
d24f31db DH |
3678 | /* |
3679 | * If positive, discarding RAM is disabled. If negative, discarding RAM is | |
3680 | * required to work and cannot be disabled. | |
3681 | */ | |
3682 | static int ram_block_discard_disabled; | |
3683 | ||
3684 | int ram_block_discard_disable(bool state) | |
3685 | { | |
3686 | int old; | |
3687 | ||
3688 | if (!state) { | |
d73415a3 | 3689 | qatomic_dec(&ram_block_discard_disabled); |
d24f31db DH |
3690 | return 0; |
3691 | } | |
3692 | ||
3693 | do { | |
d73415a3 | 3694 | old = qatomic_read(&ram_block_discard_disabled); |
d24f31db DH |
3695 | if (old < 0) { |
3696 | return -EBUSY; | |
3697 | } | |
d73415a3 SH |
3698 | } while (qatomic_cmpxchg(&ram_block_discard_disabled, |
3699 | old, old + 1) != old); | |
d24f31db DH |
3700 | return 0; |
3701 | } | |
3702 | ||
3703 | int ram_block_discard_require(bool state) | |
3704 | { | |
3705 | int old; | |
3706 | ||
3707 | if (!state) { | |
d73415a3 | 3708 | qatomic_inc(&ram_block_discard_disabled); |
d24f31db DH |
3709 | return 0; |
3710 | } | |
3711 | ||
3712 | do { | |
d73415a3 | 3713 | old = qatomic_read(&ram_block_discard_disabled); |
d24f31db DH |
3714 | if (old > 0) { |
3715 | return -EBUSY; | |
3716 | } | |
d73415a3 SH |
3717 | } while (qatomic_cmpxchg(&ram_block_discard_disabled, |
3718 | old, old - 1) != old); | |
d24f31db DH |
3719 | return 0; |
3720 | } | |
3721 | ||
3722 | bool ram_block_discard_is_disabled(void) | |
3723 | { | |
d73415a3 | 3724 | return qatomic_read(&ram_block_discard_disabled) > 0; |
d24f31db DH |
3725 | } |
3726 | ||
3727 | bool ram_block_discard_is_required(void) | |
3728 | { | |
d73415a3 | 3729 | return qatomic_read(&ram_block_discard_disabled) < 0; |
d24f31db | 3730 | } |