]> git.proxmox.com Git - mirror_qemu.git/blame - memory.c
migration/postcopy: rename postcopy_ram_enable_notify to postcopy_ram_incoming_setup
[mirror_qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
093bc2cd
AK
14 */
15
d38ea87a 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
33c11879 18#include "cpu.h"
022c62cb
PB
19#include "exec/memory.h"
20#include "exec/address-spaces.h"
409ddd01 21#include "qapi/visitor.h"
1de7afc9 22#include "qemu/bitops.h"
8c56c1a5 23#include "qemu/error-report.h"
db725815 24#include "qemu/main-loop.h"
b6b71cb5 25#include "qemu/qemu-print.h"
2c9b15ca 26#include "qom/object.h"
0ab8ed18 27#include "trace-root.h"
093bc2cd 28
022c62cb 29#include "exec/memory-internal.h"
220c3ebd 30#include "exec/ram_addr.h"
8c56c1a5 31#include "sysemu/kvm.h"
54d31236 32#include "sysemu/runstate.h"
14a48c1d 33#include "sysemu/tcg.h"
8072aae3 34#include "sysemu/accel.h"
8072aae3 35#include "hw/boards.h"
b08199c6 36#include "migration/vmstate.h"
67d95c15 37
d197063f
PB
38//#define DEBUG_UNASSIGNED
39
22bde714
JK
40static unsigned memory_region_transaction_depth;
41static bool memory_region_update_pending;
4dc56152 42static bool ioeventfd_update_pending;
ae7a2bca 43bool global_dirty_log;
7664e80c 44
eae3eb3e 45static QTAILQ_HEAD(, MemoryListener) memory_listeners
72e22d2f 46 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
4ef4db86 47
0d673e36
AK
48static QTAILQ_HEAD(, AddressSpace) address_spaces
49 = QTAILQ_HEAD_INITIALIZER(address_spaces);
50
967dc9b1
AK
51static GHashTable *flat_views;
52
093bc2cd
AK
53typedef struct AddrRange AddrRange;
54
8417cebf 55/*
c9cdaa3a 56 * Note that signed integers are needed for negative offsetting in aliases
8417cebf
AK
57 * (large MemoryRegion::alias_offset).
58 */
093bc2cd 59struct AddrRange {
08dafab4
AK
60 Int128 start;
61 Int128 size;
093bc2cd
AK
62};
63
08dafab4 64static AddrRange addrrange_make(Int128 start, Int128 size)
093bc2cd
AK
65{
66 return (AddrRange) { start, size };
67}
68
69static bool addrrange_equal(AddrRange r1, AddrRange r2)
70{
08dafab4 71 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
093bc2cd
AK
72}
73
08dafab4 74static Int128 addrrange_end(AddrRange r)
093bc2cd 75{
08dafab4 76 return int128_add(r.start, r.size);
093bc2cd
AK
77}
78
08dafab4 79static AddrRange addrrange_shift(AddrRange range, Int128 delta)
093bc2cd 80{
08dafab4 81 int128_addto(&range.start, delta);
093bc2cd
AK
82 return range;
83}
84
08dafab4
AK
85static bool addrrange_contains(AddrRange range, Int128 addr)
86{
87 return int128_ge(addr, range.start)
88 && int128_lt(addr, addrrange_end(range));
89}
90
093bc2cd
AK
91static bool addrrange_intersects(AddrRange r1, AddrRange r2)
92{
08dafab4
AK
93 return addrrange_contains(r1, r2.start)
94 || addrrange_contains(r2, r1.start);
093bc2cd
AK
95}
96
97static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
98{
08dafab4
AK
99 Int128 start = int128_max(r1.start, r2.start);
100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
101 return addrrange_make(start, int128_sub(end, start));
093bc2cd
AK
102}
103
0e0d36b4
AK
104enum ListenerDirection { Forward, Reverse };
105
7376e582 106#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
0e0d36b4
AK
107 do { \
108 MemoryListener *_listener; \
109 \
110 switch (_direction) { \
111 case Forward: \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
975aefe0
AK
113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
115 } \
0e0d36b4
AK
116 } \
117 break; \
118 case Reverse: \
eae3eb3e 119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
975aefe0
AK
120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
0e0d36b4
AK
123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
9a54635d 130#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
7376e582
AK
131 do { \
132 MemoryListener *_listener; \
133 \
134 switch (_direction) { \
135 case Forward: \
eae3eb3e 136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
9a54635d 137 if (_listener->_callback) { \
7376e582
AK
138 _listener->_callback(_listener, _section, ##_args); \
139 } \
140 } \
141 break; \
142 case Reverse: \
eae3eb3e 143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
9a54635d 144 if (_listener->_callback) { \
7376e582
AK
145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
dfde4e6e 154/* No need to ref/unref .mr, the FlatRange keeps it alive. */
b2dfd71c 155#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
9c1f8f44 156 do { \
16620684
AK
157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
9a54635d 159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
9c1f8f44 160 } while(0)
0e0d36b4 161
093bc2cd
AK
162struct CoalescedMemoryRange {
163 AddrRange addr;
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165};
166
3e9d69e7
AK
167struct MemoryRegionIoeventfd {
168 AddrRange addr;
169 bool match_data;
170 uint64_t data;
753d5e14 171 EventNotifier *e;
3e9d69e7
AK
172};
173
73bb753d
TB
174static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
3e9d69e7 176{
73bb753d 177 if (int128_lt(a->addr.start, b->addr.start)) {
3e9d69e7 178 return true;
73bb753d 179 } else if (int128_gt(a->addr.start, b->addr.start)) {
3e9d69e7 180 return false;
73bb753d 181 } else if (int128_lt(a->addr.size, b->addr.size)) {
3e9d69e7 182 return true;
73bb753d 183 } else if (int128_gt(a->addr.size, b->addr.size)) {
3e9d69e7 184 return false;
73bb753d 185 } else if (a->match_data < b->match_data) {
3e9d69e7 186 return true;
73bb753d 187 } else if (a->match_data > b->match_data) {
3e9d69e7 188 return false;
73bb753d
TB
189 } else if (a->match_data) {
190 if (a->data < b->data) {
3e9d69e7 191 return true;
73bb753d 192 } else if (a->data > b->data) {
3e9d69e7
AK
193 return false;
194 }
195 }
73bb753d 196 if (a->e < b->e) {
3e9d69e7 197 return true;
73bb753d 198 } else if (a->e > b->e) {
3e9d69e7
AK
199 return false;
200 }
201 return false;
202}
203
73bb753d
TB
204static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
3e9d69e7
AK
206{
207 return !memory_region_ioeventfd_before(a, b)
208 && !memory_region_ioeventfd_before(b, a);
209}
210
093bc2cd
AK
211/* Range of memory in the global map. Addresses are absolute. */
212struct FlatRange {
213 MemoryRegion *mr;
a8170e5e 214 hwaddr offset_in_region;
093bc2cd 215 AddrRange addr;
5a583347 216 uint8_t dirty_log_mask;
b138e654 217 bool romd_mode;
fb1cd6f9 218 bool readonly;
c26763f8 219 bool nonvolatile;
093bc2cd
AK
220};
221
093bc2cd
AK
222#define FOR_EACH_FLAT_RANGE(var, view) \
223 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
224
9c1f8f44 225static inline MemoryRegionSection
16620684 226section_from_flat_range(FlatRange *fr, FlatView *fv)
9c1f8f44
PB
227{
228 return (MemoryRegionSection) {
229 .mr = fr->mr,
16620684 230 .fv = fv,
9c1f8f44
PB
231 .offset_within_region = fr->offset_in_region,
232 .size = fr->addr.size,
233 .offset_within_address_space = int128_get64(fr->addr.start),
234 .readonly = fr->readonly,
c26763f8 235 .nonvolatile = fr->nonvolatile,
9c1f8f44
PB
236 };
237}
238
093bc2cd
AK
239static bool flatrange_equal(FlatRange *a, FlatRange *b)
240{
241 return a->mr == b->mr
242 && addrrange_equal(a->addr, b->addr)
d0a9b5bc 243 && a->offset_in_region == b->offset_in_region
b138e654 244 && a->romd_mode == b->romd_mode
c26763f8
MAL
245 && a->readonly == b->readonly
246 && a->nonvolatile == b->nonvolatile;
093bc2cd
AK
247}
248
89c177bb 249static FlatView *flatview_new(MemoryRegion *mr_root)
093bc2cd 250{
cc94cd6d
AK
251 FlatView *view;
252
253 view = g_new0(FlatView, 1);
856d7245 254 view->ref = 1;
89c177bb
AK
255 view->root = mr_root;
256 memory_region_ref(mr_root);
02d9651d 257 trace_flatview_new(view, mr_root);
cc94cd6d
AK
258
259 return view;
093bc2cd
AK
260}
261
262/* Insert a range into a given position. Caller is responsible for maintaining
263 * sorting order.
264 */
265static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
266{
267 if (view->nr == view->nr_allocated) {
268 view->nr_allocated = MAX(2 * view->nr, 10);
7267c094 269 view->ranges = g_realloc(view->ranges,
093bc2cd
AK
270 view->nr_allocated * sizeof(*view->ranges));
271 }
272 memmove(view->ranges + pos + 1, view->ranges + pos,
273 (view->nr - pos) * sizeof(FlatRange));
274 view->ranges[pos] = *range;
dfde4e6e 275 memory_region_ref(range->mr);
093bc2cd
AK
276 ++view->nr;
277}
278
279static void flatview_destroy(FlatView *view)
280{
dfde4e6e
PB
281 int i;
282
02d9651d 283 trace_flatview_destroy(view, view->root);
66a6df1d
AK
284 if (view->dispatch) {
285 address_space_dispatch_free(view->dispatch);
286 }
dfde4e6e
PB
287 for (i = 0; i < view->nr; i++) {
288 memory_region_unref(view->ranges[i].mr);
289 }
7267c094 290 g_free(view->ranges);
89c177bb 291 memory_region_unref(view->root);
a9a0c06d 292 g_free(view);
093bc2cd
AK
293}
294
447b0d0b 295static bool flatview_ref(FlatView *view)
856d7245 296{
447b0d0b 297 return atomic_fetch_inc_nonzero(&view->ref) > 0;
856d7245
PB
298}
299
48564041 300void flatview_unref(FlatView *view)
856d7245
PB
301{
302 if (atomic_fetch_dec(&view->ref) == 1) {
02d9651d 303 trace_flatview_destroy_rcu(view, view->root);
092aa2fc 304 assert(view->root);
66a6df1d 305 call_rcu(view, flatview_destroy, rcu);
856d7245
PB
306 }
307}
308
3d8e6bf9
AK
309static bool can_merge(FlatRange *r1, FlatRange *r2)
310{
08dafab4 311 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
3d8e6bf9 312 && r1->mr == r2->mr
08dafab4
AK
313 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
314 r1->addr.size),
315 int128_make64(r2->offset_in_region))
d0a9b5bc 316 && r1->dirty_log_mask == r2->dirty_log_mask
b138e654 317 && r1->romd_mode == r2->romd_mode
c26763f8
MAL
318 && r1->readonly == r2->readonly
319 && r1->nonvolatile == r2->nonvolatile;
3d8e6bf9
AK
320}
321
8508e024 322/* Attempt to simplify a view by merging adjacent ranges */
3d8e6bf9
AK
323static void flatview_simplify(FlatView *view)
324{
838ec117 325 unsigned i, j, k;
3d8e6bf9
AK
326
327 i = 0;
328 while (i < view->nr) {
329 j = i + 1;
330 while (j < view->nr
331 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
08dafab4 332 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
3d8e6bf9
AK
333 ++j;
334 }
335 ++i;
838ec117
KW
336 for (k = i; k < j; k++) {
337 memory_region_unref(view->ranges[k].mr);
338 }
3d8e6bf9
AK
339 memmove(&view->ranges[i], &view->ranges[j],
340 (view->nr - j) * sizeof(view->ranges[j]));
341 view->nr -= j - i;
342 }
343}
344
e7342aa3
PB
345static bool memory_region_big_endian(MemoryRegion *mr)
346{
347#ifdef TARGET_WORDS_BIGENDIAN
348 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
349#else
350 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
351#endif
352}
353
9bf825bf 354static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
e11ef3d1 355{
9bf825bf
TN
356 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
357 switch (op & MO_SIZE) {
358 case MO_8:
e11ef3d1 359 break;
9bf825bf 360 case MO_16:
e11ef3d1
PB
361 *data = bswap16(*data);
362 break;
9bf825bf 363 case MO_32:
e11ef3d1
PB
364 *data = bswap32(*data);
365 break;
9bf825bf 366 case MO_64:
e11ef3d1
PB
367 *data = bswap64(*data);
368 break;
369 default:
9bf825bf 370 g_assert_not_reached();
e11ef3d1
PB
371 }
372 }
373}
374
3c754a93 375static inline void memory_region_shift_read_access(uint64_t *value,
98f52cdb 376 signed shift,
3c754a93
PMD
377 uint64_t mask,
378 uint64_t tmp)
379{
98f52cdb
PMD
380 if (shift >= 0) {
381 *value |= (tmp & mask) << shift;
382 } else {
383 *value |= (tmp & mask) >> -shift;
384 }
3c754a93
PMD
385}
386
387static inline uint64_t memory_region_shift_write_access(uint64_t *value,
98f52cdb 388 signed shift,
3c754a93
PMD
389 uint64_t mask)
390{
98f52cdb
PMD
391 uint64_t tmp;
392
393 if (shift >= 0) {
394 tmp = (*value >> shift) & mask;
395 } else {
396 tmp = (*value << -shift) & mask;
397 }
398
399 return tmp;
3c754a93
PMD
400}
401
4779dc1d
HB
402static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
403{
404 MemoryRegion *root;
405 hwaddr abs_addr = offset;
406
407 abs_addr += mr->addr;
408 for (root = mr; root->container; ) {
409 root = root->container;
410 abs_addr += root->addr;
411 }
412
413 return abs_addr;
414}
415
5a68be94
HB
416static int get_cpu_index(void)
417{
418 if (current_cpu) {
419 return current_cpu->cpu_index;
420 }
421 return -1;
422}
423
cc05c43a 424static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
ce5d2f33
PB
425 hwaddr addr,
426 uint64_t *value,
427 unsigned size,
98f52cdb 428 signed shift,
cc05c43a
PM
429 uint64_t mask,
430 MemTxAttrs attrs)
ce5d2f33 431{
ce5d2f33
PB
432 uint64_t tmp;
433
cc05c43a 434 tmp = mr->ops->read(mr->opaque, addr, size);
23d92d68 435 if (mr->subpage) {
5a68be94 436 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
4779dc1d
HB
437 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
438 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 439 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 440 }
3c754a93 441 memory_region_shift_read_access(value, shift, mask, tmp);
cc05c43a 442 return MEMTX_OK;
ce5d2f33
PB
443}
444
cc05c43a
PM
445static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
446 hwaddr addr,
447 uint64_t *value,
448 unsigned size,
98f52cdb 449 signed shift,
cc05c43a
PM
450 uint64_t mask,
451 MemTxAttrs attrs)
164a4dcd 452{
cc05c43a
PM
453 uint64_t tmp = 0;
454 MemTxResult r;
164a4dcd 455
cc05c43a 456 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
23d92d68 457 if (mr->subpage) {
5a68be94 458 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
4779dc1d
HB
459 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
460 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 461 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 462 }
3c754a93 463 memory_region_shift_read_access(value, shift, mask, tmp);
cc05c43a 464 return r;
164a4dcd
AK
465}
466
cc05c43a
PM
467static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
468 hwaddr addr,
469 uint64_t *value,
470 unsigned size,
98f52cdb 471 signed shift,
cc05c43a
PM
472 uint64_t mask,
473 MemTxAttrs attrs)
164a4dcd 474{
3c754a93 475 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
164a4dcd 476
23d92d68 477 if (mr->subpage) {
5a68be94 478 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
4779dc1d
HB
479 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
480 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 481 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 482 }
164a4dcd 483 mr->ops->write(mr->opaque, addr, tmp, size);
cc05c43a 484 return MEMTX_OK;
164a4dcd
AK
485}
486
cc05c43a
PM
487static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
488 hwaddr addr,
489 uint64_t *value,
490 unsigned size,
98f52cdb 491 signed shift,
cc05c43a
PM
492 uint64_t mask,
493 MemTxAttrs attrs)
494{
3c754a93 495 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
cc05c43a 496
23d92d68 497 if (mr->subpage) {
5a68be94 498 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
4779dc1d
HB
499 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
500 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 501 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 502 }
cc05c43a
PM
503 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
504}
505
506static MemTxResult access_with_adjusted_size(hwaddr addr,
164a4dcd
AK
507 uint64_t *value,
508 unsigned size,
509 unsigned access_size_min,
510 unsigned access_size_max,
05e015f7
KF
511 MemTxResult (*access_fn)
512 (MemoryRegion *mr,
513 hwaddr addr,
514 uint64_t *value,
515 unsigned size,
98f52cdb 516 signed shift,
05e015f7
KF
517 uint64_t mask,
518 MemTxAttrs attrs),
cc05c43a
PM
519 MemoryRegion *mr,
520 MemTxAttrs attrs)
164a4dcd
AK
521{
522 uint64_t access_mask;
523 unsigned access_size;
524 unsigned i;
cc05c43a 525 MemTxResult r = MEMTX_OK;
164a4dcd
AK
526
527 if (!access_size_min) {
528 access_size_min = 1;
529 }
530 if (!access_size_max) {
531 access_size_max = 4;
532 }
ce5d2f33
PB
533
534 /* FIXME: support unaligned access? */
164a4dcd 535 access_size = MAX(MIN(size, access_size_max), access_size_min);
36960b4d 536 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
e7342aa3
PB
537 if (memory_region_big_endian(mr)) {
538 for (i = 0; i < size; i += access_size) {
05e015f7 539 r |= access_fn(mr, addr + i, value, access_size,
cc05c43a 540 (size - access_size - i) * 8, access_mask, attrs);
e7342aa3
PB
541 }
542 } else {
543 for (i = 0; i < size; i += access_size) {
05e015f7 544 r |= access_fn(mr, addr + i, value, access_size, i * 8,
cc05c43a 545 access_mask, attrs);
e7342aa3 546 }
164a4dcd 547 }
cc05c43a 548 return r;
164a4dcd
AK
549}
550
e2177955
AK
551static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
552{
0d673e36
AK
553 AddressSpace *as;
554
feca4ac1
PB
555 while (mr->container) {
556 mr = mr->container;
e2177955 557 }
0d673e36
AK
558 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
559 if (mr == as->root) {
560 return as;
561 }
e2177955 562 }
eed2bacf 563 return NULL;
e2177955
AK
564}
565
093bc2cd
AK
566/* Render a memory region into the global view. Ranges in @view obscure
567 * ranges in @mr.
568 */
569static void render_memory_region(FlatView *view,
570 MemoryRegion *mr,
08dafab4 571 Int128 base,
fb1cd6f9 572 AddrRange clip,
c26763f8
MAL
573 bool readonly,
574 bool nonvolatile)
093bc2cd
AK
575{
576 MemoryRegion *subregion;
577 unsigned i;
a8170e5e 578 hwaddr offset_in_region;
08dafab4
AK
579 Int128 remain;
580 Int128 now;
093bc2cd
AK
581 FlatRange fr;
582 AddrRange tmp;
583
6bba19ba
AK
584 if (!mr->enabled) {
585 return;
586 }
587
08dafab4 588 int128_addto(&base, int128_make64(mr->addr));
fb1cd6f9 589 readonly |= mr->readonly;
c26763f8 590 nonvolatile |= mr->nonvolatile;
093bc2cd
AK
591
592 tmp = addrrange_make(base, mr->size);
593
594 if (!addrrange_intersects(tmp, clip)) {
595 return;
596 }
597
598 clip = addrrange_intersection(tmp, clip);
599
600 if (mr->alias) {
08dafab4
AK
601 int128_subfrom(&base, int128_make64(mr->alias->addr));
602 int128_subfrom(&base, int128_make64(mr->alias_offset));
c26763f8
MAL
603 render_memory_region(view, mr->alias, base, clip,
604 readonly, nonvolatile);
093bc2cd
AK
605 return;
606 }
607
608 /* Render subregions in priority order. */
609 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
c26763f8
MAL
610 render_memory_region(view, subregion, base, clip,
611 readonly, nonvolatile);
093bc2cd
AK
612 }
613
14a3c10a 614 if (!mr->terminates) {
093bc2cd
AK
615 return;
616 }
617
08dafab4 618 offset_in_region = int128_get64(int128_sub(clip.start, base));
093bc2cd
AK
619 base = clip.start;
620 remain = clip.size;
621
2eb74e1a 622 fr.mr = mr;
6f6a5ef3 623 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
b138e654 624 fr.romd_mode = mr->romd_mode;
2eb74e1a 625 fr.readonly = readonly;
c26763f8 626 fr.nonvolatile = nonvolatile;
2eb74e1a 627
093bc2cd 628 /* Render the region itself into any gaps left by the current view. */
08dafab4
AK
629 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
630 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
093bc2cd
AK
631 continue;
632 }
08dafab4
AK
633 if (int128_lt(base, view->ranges[i].addr.start)) {
634 now = int128_min(remain,
635 int128_sub(view->ranges[i].addr.start, base));
093bc2cd
AK
636 fr.offset_in_region = offset_in_region;
637 fr.addr = addrrange_make(base, now);
638 flatview_insert(view, i, &fr);
639 ++i;
08dafab4
AK
640 int128_addto(&base, now);
641 offset_in_region += int128_get64(now);
642 int128_subfrom(&remain, now);
093bc2cd 643 }
d26a8cae
AK
644 now = int128_sub(int128_min(int128_add(base, remain),
645 addrrange_end(view->ranges[i].addr)),
646 base);
647 int128_addto(&base, now);
648 offset_in_region += int128_get64(now);
649 int128_subfrom(&remain, now);
093bc2cd 650 }
08dafab4 651 if (int128_nz(remain)) {
093bc2cd
AK
652 fr.offset_in_region = offset_in_region;
653 fr.addr = addrrange_make(base, remain);
654 flatview_insert(view, i, &fr);
655 }
656}
657
89c177bb
AK
658static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
659{
e673ba9a
PB
660 while (mr->enabled) {
661 if (mr->alias) {
662 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
663 /* The alias is included in its entirety. Use it as
664 * the "real" root, so that we can share more FlatViews.
665 */
666 mr = mr->alias;
667 continue;
668 }
669 } else if (!mr->terminates) {
670 unsigned int found = 0;
671 MemoryRegion *child, *next = NULL;
672 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
673 if (child->enabled) {
674 if (++found > 1) {
675 next = NULL;
676 break;
677 }
678 if (!child->addr && int128_ge(mr->size, child->size)) {
679 /* A child is included in its entirety. If it's the only
680 * enabled one, use it in the hope of finding an alias down the
681 * way. This will also let us share FlatViews.
682 */
683 next = child;
684 }
685 }
686 }
092aa2fc
AK
687 if (found == 0) {
688 return NULL;
689 }
e673ba9a
PB
690 if (next) {
691 mr = next;
692 continue;
693 }
694 }
695
092aa2fc 696 return mr;
89c177bb
AK
697 }
698
092aa2fc 699 return NULL;
89c177bb
AK
700}
701
093bc2cd 702/* Render a memory topology into a list of disjoint absolute ranges. */
a9a0c06d 703static FlatView *generate_memory_topology(MemoryRegion *mr)
093bc2cd 704{
9bf561e3 705 int i;
a9a0c06d 706 FlatView *view;
093bc2cd 707
89c177bb 708 view = flatview_new(mr);
093bc2cd 709
83f3c251 710 if (mr) {
a9a0c06d 711 render_memory_region(view, mr, int128_zero(),
c26763f8
MAL
712 addrrange_make(int128_zero(), int128_2_64()),
713 false, false);
83f3c251 714 }
a9a0c06d 715 flatview_simplify(view);
093bc2cd 716
9bf561e3
AK
717 view->dispatch = address_space_dispatch_new(view);
718 for (i = 0; i < view->nr; i++) {
719 MemoryRegionSection mrs =
720 section_from_flat_range(&view->ranges[i], view);
721 flatview_add_to_dispatch(view, &mrs);
722 }
723 address_space_dispatch_compact(view->dispatch);
967dc9b1 724 g_hash_table_replace(flat_views, mr, view);
9bf561e3 725
093bc2cd
AK
726 return view;
727}
728
3e9d69e7
AK
729static void address_space_add_del_ioeventfds(AddressSpace *as,
730 MemoryRegionIoeventfd *fds_new,
731 unsigned fds_new_nb,
732 MemoryRegionIoeventfd *fds_old,
733 unsigned fds_old_nb)
734{
735 unsigned iold, inew;
80a1ea37
AK
736 MemoryRegionIoeventfd *fd;
737 MemoryRegionSection section;
3e9d69e7
AK
738
739 /* Generate a symmetric difference of the old and new fd sets, adding
740 * and deleting as necessary.
741 */
742
743 iold = inew = 0;
744 while (iold < fds_old_nb || inew < fds_new_nb) {
745 if (iold < fds_old_nb
746 && (inew == fds_new_nb
73bb753d
TB
747 || memory_region_ioeventfd_before(&fds_old[iold],
748 &fds_new[inew]))) {
80a1ea37
AK
749 fd = &fds_old[iold];
750 section = (MemoryRegionSection) {
16620684 751 .fv = address_space_to_flatview(as),
80a1ea37 752 .offset_within_address_space = int128_get64(fd->addr.start),
052e87b0 753 .size = fd->addr.size,
80a1ea37 754 };
9a54635d 755 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
753d5e14 756 fd->match_data, fd->data, fd->e);
3e9d69e7
AK
757 ++iold;
758 } else if (inew < fds_new_nb
759 && (iold == fds_old_nb
73bb753d
TB
760 || memory_region_ioeventfd_before(&fds_new[inew],
761 &fds_old[iold]))) {
80a1ea37
AK
762 fd = &fds_new[inew];
763 section = (MemoryRegionSection) {
16620684 764 .fv = address_space_to_flatview(as),
80a1ea37 765 .offset_within_address_space = int128_get64(fd->addr.start),
052e87b0 766 .size = fd->addr.size,
80a1ea37 767 };
9a54635d 768 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
753d5e14 769 fd->match_data, fd->data, fd->e);
3e9d69e7
AK
770 ++inew;
771 } else {
772 ++iold;
773 ++inew;
774 }
775 }
776}
777
48564041 778FlatView *address_space_get_flatview(AddressSpace *as)
856d7245
PB
779{
780 FlatView *view;
781
694ea274 782 RCU_READ_LOCK_GUARD();
447b0d0b 783 do {
16620684 784 view = address_space_to_flatview(as);
447b0d0b
PB
785 /* If somebody has replaced as->current_map concurrently,
786 * flatview_ref returns false.
787 */
788 } while (!flatview_ref(view));
856d7245
PB
789 return view;
790}
791
3e9d69e7
AK
792static void address_space_update_ioeventfds(AddressSpace *as)
793{
99e86347 794 FlatView *view;
3e9d69e7
AK
795 FlatRange *fr;
796 unsigned ioeventfd_nb = 0;
797 MemoryRegionIoeventfd *ioeventfds = NULL;
798 AddrRange tmp;
799 unsigned i;
800
856d7245 801 view = address_space_get_flatview(as);
99e86347 802 FOR_EACH_FLAT_RANGE(fr, view) {
3e9d69e7
AK
803 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
804 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
08dafab4
AK
805 int128_sub(fr->addr.start,
806 int128_make64(fr->offset_in_region)));
3e9d69e7
AK
807 if (addrrange_intersects(fr->addr, tmp)) {
808 ++ioeventfd_nb;
7267c094 809 ioeventfds = g_realloc(ioeventfds,
3e9d69e7
AK
810 ioeventfd_nb * sizeof(*ioeventfds));
811 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
812 ioeventfds[ioeventfd_nb-1].addr = tmp;
813 }
814 }
815 }
816
817 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
818 as->ioeventfds, as->ioeventfd_nb);
819
7267c094 820 g_free(as->ioeventfds);
3e9d69e7
AK
821 as->ioeventfds = ioeventfds;
822 as->ioeventfd_nb = ioeventfd_nb;
856d7245 823 flatview_unref(view);
3e9d69e7
AK
824}
825
23f1174a
PX
826/*
827 * Notify the memory listeners about the coalesced IO change events of
828 * range `cmr'. Only the part that has intersection of the specified
829 * FlatRange will be sent.
830 */
831static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
832 CoalescedMemoryRange *cmr, bool add)
833{
834 AddrRange tmp;
835
836 tmp = addrrange_shift(cmr->addr,
837 int128_sub(fr->addr.start,
838 int128_make64(fr->offset_in_region)));
839 if (!addrrange_intersects(tmp, fr->addr)) {
840 return;
841 }
842 tmp = addrrange_intersection(tmp, fr->addr);
843
844 if (add) {
845 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
846 int128_get64(tmp.start),
847 int128_get64(tmp.size));
848 } else {
849 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
850 int128_get64(tmp.start),
851 int128_get64(tmp.size));
852 }
853}
854
909bf763
PB
855static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
856{
23f1174a
PX
857 CoalescedMemoryRange *cmr;
858
23f1174a
PX
859 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
860 flat_range_coalesced_io_notify(fr, as, cmr, false);
861 }
909bf763
PB
862}
863
864static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
865{
866 MemoryRegion *mr = fr->mr;
867 CoalescedMemoryRange *cmr;
909bf763 868
1f7af804
PB
869 if (QTAILQ_EMPTY(&mr->coalesced)) {
870 return;
871 }
872
909bf763 873 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
23f1174a 874 flat_range_coalesced_io_notify(fr, as, cmr, true);
909bf763
PB
875 }
876}
877
b8af1afb 878static void address_space_update_topology_pass(AddressSpace *as,
a9a0c06d
PB
879 const FlatView *old_view,
880 const FlatView *new_view,
b8af1afb 881 bool adding)
093bc2cd 882{
093bc2cd
AK
883 unsigned iold, inew;
884 FlatRange *frold, *frnew;
093bc2cd
AK
885
886 /* Generate a symmetric difference of the old and new memory maps.
887 * Kill ranges in the old map, and instantiate ranges in the new map.
888 */
889 iold = inew = 0;
a9a0c06d
PB
890 while (iold < old_view->nr || inew < new_view->nr) {
891 if (iold < old_view->nr) {
892 frold = &old_view->ranges[iold];
093bc2cd
AK
893 } else {
894 frold = NULL;
895 }
a9a0c06d
PB
896 if (inew < new_view->nr) {
897 frnew = &new_view->ranges[inew];
093bc2cd
AK
898 } else {
899 frnew = NULL;
900 }
901
902 if (frold
903 && (!frnew
08dafab4
AK
904 || int128_lt(frold->addr.start, frnew->addr.start)
905 || (int128_eq(frold->addr.start, frnew->addr.start)
093bc2cd 906 && !flatrange_equal(frold, frnew)))) {
41a6e477 907 /* In old but not in new, or in both but attributes changed. */
093bc2cd 908
b8af1afb 909 if (!adding) {
3ac7d43a 910 flat_range_coalesced_io_del(frold, as);
72e22d2f 911 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
b8af1afb
AK
912 }
913
093bc2cd
AK
914 ++iold;
915 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
41a6e477 916 /* In both and unchanged (except logging may have changed) */
093bc2cd 917
4f826024 918 if (adding) {
50c1e149 919 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
b2dfd71c
PB
920 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
921 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
922 frold->dirty_log_mask,
923 frnew->dirty_log_mask);
924 }
925 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
926 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
927 frold->dirty_log_mask,
928 frnew->dirty_log_mask);
b8af1afb 929 }
5a583347
AK
930 }
931
093bc2cd
AK
932 ++iold;
933 ++inew;
093bc2cd
AK
934 } else {
935 /* In new */
936
b8af1afb 937 if (adding) {
72e22d2f 938 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
3ac7d43a 939 flat_range_coalesced_io_add(frnew, as);
b8af1afb
AK
940 }
941
093bc2cd
AK
942 ++inew;
943 }
944 }
b8af1afb
AK
945}
946
967dc9b1
AK
947static void flatviews_init(void)
948{
092aa2fc
AK
949 static FlatView *empty_view;
950
967dc9b1
AK
951 if (flat_views) {
952 return;
953 }
954
955 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
956 (GDestroyNotify) flatview_unref);
092aa2fc
AK
957 if (!empty_view) {
958 empty_view = generate_memory_topology(NULL);
959 /* We keep it alive forever in the global variable. */
960 flatview_ref(empty_view);
961 } else {
962 g_hash_table_replace(flat_views, NULL, empty_view);
963 flatview_ref(empty_view);
964 }
967dc9b1
AK
965}
966
967static void flatviews_reset(void)
968{
969 AddressSpace *as;
970
971 if (flat_views) {
972 g_hash_table_unref(flat_views);
973 flat_views = NULL;
974 }
975 flatviews_init();
976
977 /* Render unique FVs */
978 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
979 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
980
981 if (g_hash_table_lookup(flat_views, physmr)) {
982 continue;
983 }
984
985 generate_memory_topology(physmr);
986 }
987}
988
989static void address_space_set_flatview(AddressSpace *as)
b8af1afb 990{
67ace39b 991 FlatView *old_view = address_space_to_flatview(as);
967dc9b1
AK
992 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
993 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
994
995 assert(new_view);
996
67ace39b
AK
997 if (old_view == new_view) {
998 return;
999 }
1000
1001 if (old_view) {
1002 flatview_ref(old_view);
1003 }
1004
967dc9b1 1005 flatview_ref(new_view);
9a62e24f
AK
1006
1007 if (!QTAILQ_EMPTY(&as->listeners)) {
67ace39b
AK
1008 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1009
1010 if (!old_view2) {
1011 old_view2 = &tmpview;
1012 }
1013 address_space_update_topology_pass(as, old_view2, new_view, false);
1014 address_space_update_topology_pass(as, old_view2, new_view, true);
9a62e24f 1015 }
b8af1afb 1016
374f2981
PB
1017 /* Writes are protected by the BQL. */
1018 atomic_rcu_set(&as->current_map, new_view);
67ace39b
AK
1019 if (old_view) {
1020 flatview_unref(old_view);
1021 }
856d7245
PB
1022
1023 /* Note that all the old MemoryRegions are still alive up to this
1024 * point. This relieves most MemoryListeners from the need to
1025 * ref/unref the MemoryRegions they get---unless they use them
1026 * outside the iothread mutex, in which case precise reference
1027 * counting is necessary.
1028 */
67ace39b
AK
1029 if (old_view) {
1030 flatview_unref(old_view);
1031 }
093bc2cd
AK
1032}
1033
202fc01b
AK
1034static void address_space_update_topology(AddressSpace *as)
1035{
1036 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1037
1038 flatviews_init();
1039 if (!g_hash_table_lookup(flat_views, physmr)) {
1040 generate_memory_topology(physmr);
1041 }
1042 address_space_set_flatview(as);
1043}
1044
4ef4db86
AK
1045void memory_region_transaction_begin(void)
1046{
bb880ded 1047 qemu_flush_coalesced_mmio_buffer();
4ef4db86
AK
1048 ++memory_region_transaction_depth;
1049}
1050
1051void memory_region_transaction_commit(void)
1052{
0d673e36
AK
1053 AddressSpace *as;
1054
4ef4db86 1055 assert(memory_region_transaction_depth);
8d04fb55
JK
1056 assert(qemu_mutex_iothread_locked());
1057
4ef4db86 1058 --memory_region_transaction_depth;
4dc56152
GA
1059 if (!memory_region_transaction_depth) {
1060 if (memory_region_update_pending) {
967dc9b1
AK
1061 flatviews_reset();
1062
4dc56152 1063 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
02e2b95f 1064
4dc56152 1065 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
967dc9b1 1066 address_space_set_flatview(as);
02218487 1067 address_space_update_ioeventfds(as);
4dc56152 1068 }
ade9c1aa 1069 memory_region_update_pending = false;
0b152095 1070 ioeventfd_update_pending = false;
4dc56152
GA
1071 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1072 } else if (ioeventfd_update_pending) {
1073 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1074 address_space_update_ioeventfds(as);
1075 }
ade9c1aa 1076 ioeventfd_update_pending = false;
4dc56152 1077 }
4dc56152 1078 }
4ef4db86
AK
1079}
1080
545e92e0
AK
1081static void memory_region_destructor_none(MemoryRegion *mr)
1082{
1083}
1084
1085static void memory_region_destructor_ram(MemoryRegion *mr)
1086{
f1060c55 1087 qemu_ram_free(mr->ram_block);
545e92e0
AK
1088}
1089
b4fefef9
PC
1090static bool memory_region_need_escape(char c)
1091{
1092 return c == '/' || c == '[' || c == '\\' || c == ']';
1093}
1094
1095static char *memory_region_escape_name(const char *name)
1096{
1097 const char *p;
1098 char *escaped, *q;
1099 uint8_t c;
1100 size_t bytes = 0;
1101
1102 for (p = name; *p; p++) {
1103 bytes += memory_region_need_escape(*p) ? 4 : 1;
1104 }
1105 if (bytes == p - name) {
1106 return g_memdup(name, bytes + 1);
1107 }
1108
1109 escaped = g_malloc(bytes + 1);
1110 for (p = name, q = escaped; *p; p++) {
1111 c = *p;
1112 if (unlikely(memory_region_need_escape(c))) {
1113 *q++ = '\\';
1114 *q++ = 'x';
1115 *q++ = "0123456789abcdef"[c >> 4];
1116 c = "0123456789abcdef"[c & 15];
1117 }
1118 *q++ = c;
1119 }
1120 *q = 0;
1121 return escaped;
1122}
1123
3df9d748
AK
1124static void memory_region_do_init(MemoryRegion *mr,
1125 Object *owner,
1126 const char *name,
1127 uint64_t size)
093bc2cd 1128{
08dafab4
AK
1129 mr->size = int128_make64(size);
1130 if (size == UINT64_MAX) {
1131 mr->size = int128_2_64();
1132 }
302fa283 1133 mr->name = g_strdup(name);
612263cf 1134 mr->owner = owner;
58eaa217 1135 mr->ram_block = NULL;
b4fefef9
PC
1136
1137 if (name) {
843ef73a
PC
1138 char *escaped_name = memory_region_escape_name(name);
1139 char *name_array = g_strdup_printf("%s[*]", escaped_name);
612263cf
PB
1140
1141 if (!owner) {
1142 owner = container_get(qdev_get_machine(), "/unattached");
1143 }
1144
843ef73a 1145 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
b4fefef9 1146 object_unref(OBJECT(mr));
843ef73a
PC
1147 g_free(name_array);
1148 g_free(escaped_name);
b4fefef9
PC
1149 }
1150}
1151
3df9d748
AK
1152void memory_region_init(MemoryRegion *mr,
1153 Object *owner,
1154 const char *name,
1155 uint64_t size)
1156{
1157 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1158 memory_region_do_init(mr, owner, name, size);
1159}
1160
d7bce999
EB
1161static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1162 void *opaque, Error **errp)
409ddd01
PC
1163{
1164 MemoryRegion *mr = MEMORY_REGION(obj);
1165 uint64_t value = mr->addr;
1166
51e72bc1 1167 visit_type_uint64(v, name, &value, errp);
409ddd01
PC
1168}
1169
d7bce999
EB
1170static void memory_region_get_container(Object *obj, Visitor *v,
1171 const char *name, void *opaque,
1172 Error **errp)
409ddd01
PC
1173{
1174 MemoryRegion *mr = MEMORY_REGION(obj);
1175 gchar *path = (gchar *)"";
1176
1177 if (mr->container) {
1178 path = object_get_canonical_path(OBJECT(mr->container));
1179 }
51e72bc1 1180 visit_type_str(v, name, &path, errp);
409ddd01
PC
1181 if (mr->container) {
1182 g_free(path);
1183 }
1184}
1185
1186static Object *memory_region_resolve_container(Object *obj, void *opaque,
1187 const char *part)
1188{
1189 MemoryRegion *mr = MEMORY_REGION(obj);
1190
1191 return OBJECT(mr->container);
1192}
1193
d7bce999
EB
1194static void memory_region_get_priority(Object *obj, Visitor *v,
1195 const char *name, void *opaque,
1196 Error **errp)
d33382da
PC
1197{
1198 MemoryRegion *mr = MEMORY_REGION(obj);
1199 int32_t value = mr->priority;
1200
51e72bc1 1201 visit_type_int32(v, name, &value, errp);
d33382da
PC
1202}
1203
d7bce999
EB
1204static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1205 void *opaque, Error **errp)
52aef7bb
PC
1206{
1207 MemoryRegion *mr = MEMORY_REGION(obj);
1208 uint64_t value = memory_region_size(mr);
1209
51e72bc1 1210 visit_type_uint64(v, name, &value, errp);
52aef7bb
PC
1211}
1212
b4fefef9
PC
1213static void memory_region_initfn(Object *obj)
1214{
1215 MemoryRegion *mr = MEMORY_REGION(obj);
409ddd01 1216 ObjectProperty *op;
b4fefef9
PC
1217
1218 mr->ops = &unassigned_mem_ops;
6bba19ba 1219 mr->enabled = true;
5f9a5ea1 1220 mr->romd_mode = true;
196ea131 1221 mr->global_locking = true;
545e92e0 1222 mr->destructor = memory_region_destructor_none;
093bc2cd 1223 QTAILQ_INIT(&mr->subregions);
093bc2cd 1224 QTAILQ_INIT(&mr->coalesced);
409ddd01
PC
1225
1226 op = object_property_add(OBJECT(mr), "container",
1227 "link<" TYPE_MEMORY_REGION ">",
1228 memory_region_get_container,
1229 NULL, /* memory_region_set_container */
1230 NULL, NULL, &error_abort);
1231 op->resolve = memory_region_resolve_container;
1232
1233 object_property_add(OBJECT(mr), "addr", "uint64",
1234 memory_region_get_addr,
1235 NULL, /* memory_region_set_addr */
1236 NULL, NULL, &error_abort);
d33382da
PC
1237 object_property_add(OBJECT(mr), "priority", "uint32",
1238 memory_region_get_priority,
1239 NULL, /* memory_region_set_priority */
1240 NULL, NULL, &error_abort);
52aef7bb
PC
1241 object_property_add(OBJECT(mr), "size", "uint64",
1242 memory_region_get_size,
1243 NULL, /* memory_region_set_size, */
1244 NULL, NULL, &error_abort);
093bc2cd
AK
1245}
1246
3df9d748
AK
1247static void iommu_memory_region_initfn(Object *obj)
1248{
1249 MemoryRegion *mr = MEMORY_REGION(obj);
1250
1251 mr->is_iommu = true;
1252}
1253
b018ddf6
PB
1254static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1255 unsigned size)
1256{
1257#ifdef DEBUG_UNASSIGNED
1258 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1259#endif
4917cf44 1260 if (current_cpu != NULL) {
dbea78a4
PM
1261 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1262 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
c658b94f 1263 }
68a7439a 1264 return 0;
b018ddf6
PB
1265}
1266
1267static void unassigned_mem_write(void *opaque, hwaddr addr,
1268 uint64_t val, unsigned size)
1269{
1270#ifdef DEBUG_UNASSIGNED
1271 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1272#endif
4917cf44
AF
1273 if (current_cpu != NULL) {
1274 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
c658b94f 1275 }
b018ddf6
PB
1276}
1277
d197063f 1278static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
8372d383
PM
1279 unsigned size, bool is_write,
1280 MemTxAttrs attrs)
d197063f
PB
1281{
1282 return false;
1283}
1284
1285const MemoryRegionOps unassigned_mem_ops = {
1286 .valid.accepts = unassigned_mem_accepts,
1287 .endianness = DEVICE_NATIVE_ENDIAN,
1288};
1289
4a2e242b
AW
1290static uint64_t memory_region_ram_device_read(void *opaque,
1291 hwaddr addr, unsigned size)
1292{
1293 MemoryRegion *mr = opaque;
1294 uint64_t data = (uint64_t)~0;
1295
1296 switch (size) {
1297 case 1:
1298 data = *(uint8_t *)(mr->ram_block->host + addr);
1299 break;
1300 case 2:
1301 data = *(uint16_t *)(mr->ram_block->host + addr);
1302 break;
1303 case 4:
1304 data = *(uint32_t *)(mr->ram_block->host + addr);
1305 break;
1306 case 8:
1307 data = *(uint64_t *)(mr->ram_block->host + addr);
1308 break;
1309 }
1310
1311 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1312
1313 return data;
1314}
1315
1316static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1317 uint64_t data, unsigned size)
1318{
1319 MemoryRegion *mr = opaque;
1320
1321 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1322
1323 switch (size) {
1324 case 1:
1325 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1326 break;
1327 case 2:
1328 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1329 break;
1330 case 4:
1331 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1332 break;
1333 case 8:
1334 *(uint64_t *)(mr->ram_block->host + addr) = data;
1335 break;
1336 }
1337}
1338
1339static const MemoryRegionOps ram_device_mem_ops = {
1340 .read = memory_region_ram_device_read,
1341 .write = memory_region_ram_device_write,
c99a29e7 1342 .endianness = DEVICE_HOST_ENDIAN,
4a2e242b
AW
1343 .valid = {
1344 .min_access_size = 1,
1345 .max_access_size = 8,
1346 .unaligned = true,
1347 },
1348 .impl = {
1349 .min_access_size = 1,
1350 .max_access_size = 8,
1351 .unaligned = true,
1352 },
1353};
1354
d2702032
PB
1355bool memory_region_access_valid(MemoryRegion *mr,
1356 hwaddr addr,
1357 unsigned size,
6d7b9a6c
PM
1358 bool is_write,
1359 MemTxAttrs attrs)
093bc2cd 1360{
a014ed07
PB
1361 int access_size_min, access_size_max;
1362 int access_size, i;
897fa7cf 1363
093bc2cd
AK
1364 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1365 return false;
1366 }
1367
a014ed07 1368 if (!mr->ops->valid.accepts) {
093bc2cd
AK
1369 return true;
1370 }
1371
a014ed07
PB
1372 access_size_min = mr->ops->valid.min_access_size;
1373 if (!mr->ops->valid.min_access_size) {
1374 access_size_min = 1;
1375 }
1376
1377 access_size_max = mr->ops->valid.max_access_size;
1378 if (!mr->ops->valid.max_access_size) {
1379 access_size_max = 4;
1380 }
1381
1382 access_size = MAX(MIN(size, access_size_max), access_size_min);
1383 for (i = 0; i < size; i += access_size) {
1384 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
8372d383 1385 is_write, attrs)) {
a014ed07
PB
1386 return false;
1387 }
093bc2cd 1388 }
a014ed07 1389
093bc2cd
AK
1390 return true;
1391}
1392
cc05c43a
PM
1393static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1394 hwaddr addr,
1395 uint64_t *pval,
1396 unsigned size,
1397 MemTxAttrs attrs)
093bc2cd 1398{
cc05c43a 1399 *pval = 0;
093bc2cd 1400
ce5d2f33 1401 if (mr->ops->read) {
cc05c43a
PM
1402 return access_with_adjusted_size(addr, pval, size,
1403 mr->ops->impl.min_access_size,
1404 mr->ops->impl.max_access_size,
1405 memory_region_read_accessor,
1406 mr, attrs);
62a0db94 1407 } else {
cc05c43a
PM
1408 return access_with_adjusted_size(addr, pval, size,
1409 mr->ops->impl.min_access_size,
1410 mr->ops->impl.max_access_size,
1411 memory_region_read_with_attrs_accessor,
1412 mr, attrs);
74901c3b 1413 }
093bc2cd
AK
1414}
1415
3b643495
PM
1416MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1417 hwaddr addr,
1418 uint64_t *pval,
e67c9046 1419 MemOp op,
3b643495 1420 MemTxAttrs attrs)
a621f38d 1421{
e67c9046 1422 unsigned size = memop_size(op);
cc05c43a
PM
1423 MemTxResult r;
1424
6d7b9a6c 1425 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
791af8c8 1426 *pval = unassigned_mem_read(mr, addr, size);
cc05c43a 1427 return MEMTX_DECODE_ERROR;
791af8c8 1428 }
a621f38d 1429
cc05c43a 1430 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
9bf825bf 1431 adjust_endianness(mr, pval, op);
cc05c43a 1432 return r;
a621f38d 1433}
093bc2cd 1434
8c56c1a5
PF
1435/* Return true if an eventfd was signalled */
1436static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1437 hwaddr addr,
1438 uint64_t data,
1439 unsigned size,
1440 MemTxAttrs attrs)
1441{
1442 MemoryRegionIoeventfd ioeventfd = {
1443 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1444 .data = data,
1445 };
1446 unsigned i;
1447
1448 for (i = 0; i < mr->ioeventfd_nb; i++) {
1449 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1450 ioeventfd.e = mr->ioeventfds[i].e;
1451
73bb753d 1452 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
8c56c1a5
PF
1453 event_notifier_set(ioeventfd.e);
1454 return true;
1455 }
1456 }
1457
1458 return false;
1459}
1460
3b643495
PM
1461MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1462 hwaddr addr,
1463 uint64_t data,
e67c9046 1464 MemOp op,
3b643495 1465 MemTxAttrs attrs)
a621f38d 1466{
e67c9046
TN
1467 unsigned size = memop_size(op);
1468
6d7b9a6c 1469 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
b018ddf6 1470 unassigned_mem_write(mr, addr, data, size);
cc05c43a 1471 return MEMTX_DECODE_ERROR;
093bc2cd
AK
1472 }
1473
9bf825bf 1474 adjust_endianness(mr, &data, op);
a621f38d 1475
8c56c1a5
PF
1476 if ((!kvm_eventfds_enabled()) &&
1477 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1478 return MEMTX_OK;
1479 }
1480
ce5d2f33 1481 if (mr->ops->write) {
cc05c43a
PM
1482 return access_with_adjusted_size(addr, &data, size,
1483 mr->ops->impl.min_access_size,
1484 mr->ops->impl.max_access_size,
1485 memory_region_write_accessor, mr,
1486 attrs);
62a0db94 1487 } else {
cc05c43a
PM
1488 return
1489 access_with_adjusted_size(addr, &data, size,
1490 mr->ops->impl.min_access_size,
1491 mr->ops->impl.max_access_size,
1492 memory_region_write_with_attrs_accessor,
1493 mr, attrs);
74901c3b 1494 }
093bc2cd
AK
1495}
1496
093bc2cd 1497void memory_region_init_io(MemoryRegion *mr,
2c9b15ca 1498 Object *owner,
093bc2cd
AK
1499 const MemoryRegionOps *ops,
1500 void *opaque,
1501 const char *name,
1502 uint64_t size)
1503{
2c9b15ca 1504 memory_region_init(mr, owner, name, size);
6d6d2abf 1505 mr->ops = ops ? ops : &unassigned_mem_ops;
093bc2cd 1506 mr->opaque = opaque;
14a3c10a 1507 mr->terminates = true;
093bc2cd
AK
1508}
1509
1cfe48c1
PM
1510void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1511 Object *owner,
1512 const char *name,
1513 uint64_t size,
1514 Error **errp)
06329cce
MA
1515{
1516 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1517}
1518
1519void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1520 Object *owner,
1521 const char *name,
1522 uint64_t size,
1523 bool share,
1524 Error **errp)
093bc2cd 1525{
1cd3d492 1526 Error *err = NULL;
2c9b15ca 1527 memory_region_init(mr, owner, name, size);
8ea9252a 1528 mr->ram = true;
14a3c10a 1529 mr->terminates = true;
545e92e0 1530 mr->destructor = memory_region_destructor_ram;
1cd3d492 1531 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
677e7805 1532 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1cd3d492
IM
1533 if (err) {
1534 mr->size = int128_zero();
1535 object_unparent(OBJECT(mr));
1536 error_propagate(errp, err);
1537 }
0b183fc8
PB
1538}
1539
60786ef3
MT
1540void memory_region_init_resizeable_ram(MemoryRegion *mr,
1541 Object *owner,
1542 const char *name,
1543 uint64_t size,
1544 uint64_t max_size,
1545 void (*resized)(const char*,
1546 uint64_t length,
1547 void *host),
1548 Error **errp)
1549{
1cd3d492 1550 Error *err = NULL;
60786ef3
MT
1551 memory_region_init(mr, owner, name, size);
1552 mr->ram = true;
1553 mr->terminates = true;
1554 mr->destructor = memory_region_destructor_ram;
8e41fb63 1555 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1cd3d492 1556 mr, &err);
677e7805 1557 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1cd3d492
IM
1558 if (err) {
1559 mr->size = int128_zero();
1560 object_unparent(OBJECT(mr));
1561 error_propagate(errp, err);
1562 }
60786ef3
MT
1563}
1564
d5dbde46 1565#ifdef CONFIG_POSIX
0b183fc8
PB
1566void memory_region_init_ram_from_file(MemoryRegion *mr,
1567 struct Object *owner,
1568 const char *name,
1569 uint64_t size,
98376843 1570 uint64_t align,
cbfc0171 1571 uint32_t ram_flags,
7f56e740
PB
1572 const char *path,
1573 Error **errp)
0b183fc8 1574{
1cd3d492 1575 Error *err = NULL;
0b183fc8
PB
1576 memory_region_init(mr, owner, name, size);
1577 mr->ram = true;
1578 mr->terminates = true;
1579 mr->destructor = memory_region_destructor_ram;
98376843 1580 mr->align = align;
1cd3d492 1581 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
677e7805 1582 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1cd3d492
IM
1583 if (err) {
1584 mr->size = int128_zero();
1585 object_unparent(OBJECT(mr));
1586 error_propagate(errp, err);
1587 }
093bc2cd 1588}
fea617c5
MAL
1589
1590void memory_region_init_ram_from_fd(MemoryRegion *mr,
1591 struct Object *owner,
1592 const char *name,
1593 uint64_t size,
1594 bool share,
1595 int fd,
1596 Error **errp)
1597{
1cd3d492 1598 Error *err = NULL;
fea617c5
MAL
1599 memory_region_init(mr, owner, name, size);
1600 mr->ram = true;
1601 mr->terminates = true;
1602 mr->destructor = memory_region_destructor_ram;
cbfc0171
JH
1603 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1604 share ? RAM_SHARED : 0,
1cd3d492 1605 fd, &err);
fea617c5 1606 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1cd3d492
IM
1607 if (err) {
1608 mr->size = int128_zero();
1609 object_unparent(OBJECT(mr));
1610 error_propagate(errp, err);
1611 }
fea617c5 1612}
0b183fc8 1613#endif
093bc2cd
AK
1614
1615void memory_region_init_ram_ptr(MemoryRegion *mr,
2c9b15ca 1616 Object *owner,
093bc2cd
AK
1617 const char *name,
1618 uint64_t size,
1619 void *ptr)
1620{
2c9b15ca 1621 memory_region_init(mr, owner, name, size);
8ea9252a 1622 mr->ram = true;
14a3c10a 1623 mr->terminates = true;
fc3e7665 1624 mr->destructor = memory_region_destructor_ram;
677e7805 1625 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
ef701d7b
HT
1626
1627 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1628 assert(ptr != NULL);
8e41fb63 1629 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
093bc2cd
AK
1630}
1631
21e00fa5
AW
1632void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1633 Object *owner,
1634 const char *name,
1635 uint64_t size,
1636 void *ptr)
e4dc3f59 1637{
2ddb89b0
BS
1638 memory_region_init(mr, owner, name, size);
1639 mr->ram = true;
1640 mr->terminates = true;
21e00fa5 1641 mr->ram_device = true;
4a2e242b
AW
1642 mr->ops = &ram_device_mem_ops;
1643 mr->opaque = mr;
2ddb89b0
BS
1644 mr->destructor = memory_region_destructor_ram;
1645 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1646 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1647 assert(ptr != NULL);
1648 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
e4dc3f59
ND
1649}
1650
093bc2cd 1651void memory_region_init_alias(MemoryRegion *mr,
2c9b15ca 1652 Object *owner,
093bc2cd
AK
1653 const char *name,
1654 MemoryRegion *orig,
a8170e5e 1655 hwaddr offset,
093bc2cd
AK
1656 uint64_t size)
1657{
2c9b15ca 1658 memory_region_init(mr, owner, name, size);
093bc2cd
AK
1659 mr->alias = orig;
1660 mr->alias_offset = offset;
1661}
1662
b59821a9
PM
1663void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1664 struct Object *owner,
1665 const char *name,
1666 uint64_t size,
1667 Error **errp)
a1777f7f 1668{
1cd3d492 1669 Error *err = NULL;
a1777f7f
PM
1670 memory_region_init(mr, owner, name, size);
1671 mr->ram = true;
1672 mr->readonly = true;
1673 mr->terminates = true;
1674 mr->destructor = memory_region_destructor_ram;
1cd3d492 1675 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
a1777f7f 1676 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1cd3d492
IM
1677 if (err) {
1678 mr->size = int128_zero();
1679 object_unparent(OBJECT(mr));
1680 error_propagate(errp, err);
1681 }
a1777f7f
PM
1682}
1683
b59821a9
PM
1684void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1685 Object *owner,
1686 const MemoryRegionOps *ops,
1687 void *opaque,
1688 const char *name,
1689 uint64_t size,
1690 Error **errp)
d0a9b5bc 1691{
1cd3d492 1692 Error *err = NULL;
39e0b03d 1693 assert(ops);
2c9b15ca 1694 memory_region_init(mr, owner, name, size);
7bc2b9cd 1695 mr->ops = ops;
75f5941c 1696 mr->opaque = opaque;
d0a9b5bc 1697 mr->terminates = true;
75c578dc 1698 mr->rom_device = true;
58268c8d 1699 mr->destructor = memory_region_destructor_ram;
1cd3d492
IM
1700 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1701 if (err) {
1702 mr->size = int128_zero();
1703 object_unparent(OBJECT(mr));
1704 error_propagate(errp, err);
1705 }
d0a9b5bc
AK
1706}
1707
1221a474
AK
1708void memory_region_init_iommu(void *_iommu_mr,
1709 size_t instance_size,
1710 const char *mrtypename,
2c9b15ca 1711 Object *owner,
30951157
AK
1712 const char *name,
1713 uint64_t size)
1714{
1221a474 1715 struct IOMMUMemoryRegion *iommu_mr;
3df9d748
AK
1716 struct MemoryRegion *mr;
1717
1221a474
AK
1718 object_initialize(_iommu_mr, instance_size, mrtypename);
1719 mr = MEMORY_REGION(_iommu_mr);
3df9d748
AK
1720 memory_region_do_init(mr, owner, name, size);
1721 iommu_mr = IOMMU_MEMORY_REGION(mr);
30951157 1722 mr->terminates = true; /* then re-forwards */
3df9d748
AK
1723 QLIST_INIT(&iommu_mr->iommu_notify);
1724 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
30951157
AK
1725}
1726
b4fefef9 1727static void memory_region_finalize(Object *obj)
093bc2cd 1728{
b4fefef9
PC
1729 MemoryRegion *mr = MEMORY_REGION(obj);
1730
2e2b8eb7
PB
1731 assert(!mr->container);
1732
1733 /* We know the region is not visible in any address space (it
1734 * does not have a container and cannot be a root either because
1735 * it has no references, so we can blindly clear mr->enabled.
1736 * memory_region_set_enabled instead could trigger a transaction
1737 * and cause an infinite loop.
1738 */
1739 mr->enabled = false;
1740 memory_region_transaction_begin();
1741 while (!QTAILQ_EMPTY(&mr->subregions)) {
1742 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1743 memory_region_del_subregion(mr, subregion);
1744 }
1745 memory_region_transaction_commit();
1746
545e92e0 1747 mr->destructor(mr);
093bc2cd 1748 memory_region_clear_coalescing(mr);
302fa283 1749 g_free((char *)mr->name);
7267c094 1750 g_free(mr->ioeventfds);
093bc2cd
AK
1751}
1752
803c0816
PB
1753Object *memory_region_owner(MemoryRegion *mr)
1754{
22a893e4
PB
1755 Object *obj = OBJECT(mr);
1756 return obj->parent;
803c0816
PB
1757}
1758
46637be2
PB
1759void memory_region_ref(MemoryRegion *mr)
1760{
22a893e4
PB
1761 /* MMIO callbacks most likely will access data that belongs
1762 * to the owner, hence the need to ref/unref the owner whenever
1763 * the memory region is in use.
1764 *
1765 * The memory region is a child of its owner. As long as the
1766 * owner doesn't call unparent itself on the memory region,
1767 * ref-ing the owner will also keep the memory region alive.
612263cf
PB
1768 * Memory regions without an owner are supposed to never go away;
1769 * we do not ref/unref them because it slows down DMA sensibly.
22a893e4 1770 */
612263cf
PB
1771 if (mr && mr->owner) {
1772 object_ref(mr->owner);
46637be2
PB
1773 }
1774}
1775
1776void memory_region_unref(MemoryRegion *mr)
1777{
612263cf
PB
1778 if (mr && mr->owner) {
1779 object_unref(mr->owner);
46637be2
PB
1780 }
1781}
1782
093bc2cd
AK
1783uint64_t memory_region_size(MemoryRegion *mr)
1784{
08dafab4
AK
1785 if (int128_eq(mr->size, int128_2_64())) {
1786 return UINT64_MAX;
1787 }
1788 return int128_get64(mr->size);
093bc2cd
AK
1789}
1790
5d546d4b 1791const char *memory_region_name(const MemoryRegion *mr)
8991c79b 1792{
d1dd32af
PC
1793 if (!mr->name) {
1794 ((MemoryRegion *)mr)->name =
1795 object_get_canonical_path_component(OBJECT(mr));
1796 }
302fa283 1797 return mr->name;
8991c79b
AK
1798}
1799
21e00fa5 1800bool memory_region_is_ram_device(MemoryRegion *mr)
e4dc3f59 1801{
21e00fa5 1802 return mr->ram_device;
e4dc3f59
ND
1803}
1804
2d1a35be 1805uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
55043ba3 1806{
6f6a5ef3 1807 uint8_t mask = mr->dirty_log_mask;
adaad61c 1808 if (global_dirty_log && mr->ram_block) {
6f6a5ef3
PB
1809 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1810 }
1811 return mask;
55043ba3
AK
1812}
1813
2d1a35be
PB
1814bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1815{
1816 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1817}
1818
549d4005
EA
1819static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
1820 Error **errp)
5bf3d319
PX
1821{
1822 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1823 IOMMUNotifier *iommu_notifier;
1221a474 1824 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
549d4005 1825 int ret = 0;
5bf3d319 1826
3df9d748 1827 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
5bf3d319
PX
1828 flags |= iommu_notifier->notifier_flags;
1829 }
1830
1221a474 1831 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
549d4005
EA
1832 ret = imrc->notify_flag_changed(iommu_mr,
1833 iommu_mr->iommu_notify_flags,
1834 flags, errp);
5bf3d319
PX
1835 }
1836
549d4005
EA
1837 if (!ret) {
1838 iommu_mr->iommu_notify_flags = flags;
1839 }
1840 return ret;
5bf3d319
PX
1841}
1842
549d4005
EA
1843int memory_region_register_iommu_notifier(MemoryRegion *mr,
1844 IOMMUNotifier *n, Error **errp)
06866575 1845{
3df9d748 1846 IOMMUMemoryRegion *iommu_mr;
549d4005 1847 int ret;
3df9d748 1848
efcd38c5 1849 if (mr->alias) {
549d4005 1850 return memory_region_register_iommu_notifier(mr->alias, n, errp);
efcd38c5
JW
1851 }
1852
cdb30812 1853 /* We need to register for at least one bitfield */
3df9d748 1854 iommu_mr = IOMMU_MEMORY_REGION(mr);
cdb30812 1855 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
698feb5e 1856 assert(n->start <= n->end);
cb1efcf4
PM
1857 assert(n->iommu_idx >= 0 &&
1858 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1859
3df9d748 1860 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
549d4005
EA
1861 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
1862 if (ret) {
1863 QLIST_REMOVE(n, node);
1864 }
1865 return ret;
06866575
DG
1866}
1867
3df9d748 1868uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
a788f227 1869{
1221a474
AK
1870 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1871
1872 if (imrc->get_min_page_size) {
1873 return imrc->get_min_page_size(iommu_mr);
f682e9c2
AK
1874 }
1875 return TARGET_PAGE_SIZE;
1876}
1877
3df9d748 1878void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
f682e9c2 1879{
3df9d748 1880 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1221a474 1881 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
f682e9c2 1882 hwaddr addr, granularity;
a788f227
DG
1883 IOMMUTLBEntry iotlb;
1884
faa362e3 1885 /* If the IOMMU has its own replay callback, override */
1221a474
AK
1886 if (imrc->replay) {
1887 imrc->replay(iommu_mr, n);
faa362e3
PX
1888 return;
1889 }
1890
3df9d748 1891 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
f682e9c2 1892
a788f227 1893 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
2c91bcf2 1894 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
a788f227
DG
1895 if (iotlb.perm != IOMMU_NONE) {
1896 n->notify(n, &iotlb);
1897 }
1898
1899 /* if (2^64 - MR size) < granularity, it's possible to get an
1900 * infinite loop here. This should catch such a wraparound */
1901 if ((addr + granularity) < addr) {
1902 break;
1903 }
1904 }
1905}
1906
cdb30812
PX
1907void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1908 IOMMUNotifier *n)
06866575 1909{
3df9d748
AK
1910 IOMMUMemoryRegion *iommu_mr;
1911
efcd38c5
JW
1912 if (mr->alias) {
1913 memory_region_unregister_iommu_notifier(mr->alias, n);
1914 return;
1915 }
cdb30812 1916 QLIST_REMOVE(n, node);
3df9d748 1917 iommu_mr = IOMMU_MEMORY_REGION(mr);
549d4005 1918 memory_region_update_iommu_notify_flags(iommu_mr, NULL);
06866575
DG
1919}
1920
bd2bfa4c
PX
1921void memory_region_notify_one(IOMMUNotifier *notifier,
1922 IOMMUTLBEntry *entry)
06866575 1923{
cdb30812 1924 IOMMUNotifierFlag request_flags;
03c7140c 1925 hwaddr entry_end = entry->iova + entry->addr_mask;
cdb30812 1926
bd2bfa4c
PX
1927 /*
1928 * Skip the notification if the notification does not overlap
1929 * with registered range.
1930 */
03c7140c 1931 if (notifier->start > entry_end || notifier->end < entry->iova) {
bd2bfa4c
PX
1932 return;
1933 }
cdb30812 1934
03c7140c
YZ
1935 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
1936
bd2bfa4c 1937 if (entry->perm & IOMMU_RW) {
cdb30812
PX
1938 request_flags = IOMMU_NOTIFIER_MAP;
1939 } else {
1940 request_flags = IOMMU_NOTIFIER_UNMAP;
1941 }
1942
bd2bfa4c
PX
1943 if (notifier->notifier_flags & request_flags) {
1944 notifier->notify(notifier, entry);
1945 }
1946}
1947
3df9d748 1948void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
cb1efcf4 1949 int iommu_idx,
bd2bfa4c
PX
1950 IOMMUTLBEntry entry)
1951{
1952 IOMMUNotifier *iommu_notifier;
1953
3df9d748 1954 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
bd2bfa4c 1955
3df9d748 1956 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
cb1efcf4
PM
1957 if (iommu_notifier->iommu_idx == iommu_idx) {
1958 memory_region_notify_one(iommu_notifier, &entry);
1959 }
cdb30812 1960 }
06866575
DG
1961}
1962
f1334de6
AK
1963int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1964 enum IOMMUMemoryRegionAttr attr,
1965 void *data)
1966{
1967 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1968
1969 if (!imrc->get_attr) {
1970 return -EINVAL;
1971 }
1972
1973 return imrc->get_attr(iommu_mr, attr, data);
1974}
1975
21f40209
PM
1976int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1977 MemTxAttrs attrs)
1978{
1979 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1980
1981 if (!imrc->attrs_to_index) {
1982 return 0;
1983 }
1984
1985 return imrc->attrs_to_index(iommu_mr, attrs);
1986}
1987
1988int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
1989{
1990 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1991
1992 if (!imrc->num_indexes) {
1993 return 1;
1994 }
1995
1996 return imrc->num_indexes(iommu_mr);
1997}
1998
093bc2cd
AK
1999void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2000{
5a583347 2001 uint8_t mask = 1 << client;
deb809ed 2002 uint8_t old_logging;
5a583347 2003
dbddac6d 2004 assert(client == DIRTY_MEMORY_VGA);
deb809ed
PB
2005 old_logging = mr->vga_logging_count;
2006 mr->vga_logging_count += log ? 1 : -1;
2007 if (!!old_logging == !!mr->vga_logging_count) {
2008 return;
2009 }
2010
59023ef4 2011 memory_region_transaction_begin();
5a583347 2012 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
22bde714 2013 memory_region_update_pending |= mr->enabled;
59023ef4 2014 memory_region_transaction_commit();
093bc2cd
AK
2015}
2016
a8170e5e
AK
2017void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2018 hwaddr size)
093bc2cd 2019{
8e41fb63
FZ
2020 assert(mr->ram_block);
2021 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2022 size,
58d2707e 2023 memory_region_get_dirty_log_mask(mr));
093bc2cd
AK
2024}
2025
0fe1eca7 2026static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
093bc2cd 2027{
0a752eee 2028 MemoryListener *listener;
0d673e36 2029 AddressSpace *as;
0a752eee 2030 FlatView *view;
5a583347
AK
2031 FlatRange *fr;
2032
0a752eee
PB
2033 /* If the same address space has multiple log_sync listeners, we
2034 * visit that address space's FlatView multiple times. But because
2035 * log_sync listeners are rare, it's still cheaper than walking each
2036 * address space once.
2037 */
2038 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2039 if (!listener->log_sync) {
2040 continue;
2041 }
2042 as = listener->address_space;
2043 view = address_space_get_flatview(as);
99e86347 2044 FOR_EACH_FLAT_RANGE(fr, view) {
3ebb1817 2045 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
16620684 2046 MemoryRegionSection mrs = section_from_flat_range(fr, view);
0a752eee 2047 listener->log_sync(listener, &mrs);
0d673e36 2048 }
5a583347 2049 }
856d7245 2050 flatview_unref(view);
5a583347 2051 }
093bc2cd
AK
2052}
2053
077874e0
PX
2054void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2055 hwaddr len)
2056{
2057 MemoryRegionSection mrs;
2058 MemoryListener *listener;
2059 AddressSpace *as;
2060 FlatView *view;
2061 FlatRange *fr;
2062 hwaddr sec_start, sec_end, sec_size;
2063
2064 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2065 if (!listener->log_clear) {
2066 continue;
2067 }
2068 as = listener->address_space;
2069 view = address_space_get_flatview(as);
2070 FOR_EACH_FLAT_RANGE(fr, view) {
2071 if (!fr->dirty_log_mask || fr->mr != mr) {
2072 /*
2073 * Clear dirty bitmap operation only applies to those
2074 * regions whose dirty logging is at least enabled
2075 */
2076 continue;
2077 }
2078
2079 mrs = section_from_flat_range(fr, view);
2080
2081 sec_start = MAX(mrs.offset_within_region, start);
2082 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2083 sec_end = MIN(sec_end, start + len);
2084
2085 if (sec_start >= sec_end) {
2086 /*
2087 * If this memory region section has no intersection
2088 * with the requested range, skip.
2089 */
2090 continue;
2091 }
2092
2093 /* Valid case; shrink the section if needed */
2094 mrs.offset_within_address_space +=
2095 sec_start - mrs.offset_within_region;
2096 mrs.offset_within_region = sec_start;
2097 sec_size = sec_end - sec_start;
2098 mrs.size = int128_make64(sec_size);
2099 listener->log_clear(listener, &mrs);
2100 }
2101 flatview_unref(view);
2102 }
2103}
2104
0fe1eca7
PB
2105DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2106 hwaddr addr,
2107 hwaddr size,
2108 unsigned client)
2109{
9458a9a1 2110 DirtyBitmapSnapshot *snapshot;
0fe1eca7
PB
2111 assert(mr->ram_block);
2112 memory_region_sync_dirty_bitmap(mr);
9458a9a1
PB
2113 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2114 memory_global_after_dirty_log_sync();
2115 return snapshot;
0fe1eca7
PB
2116}
2117
2118bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2119 hwaddr addr, hwaddr size)
2120{
2121 assert(mr->ram_block);
2122 return cpu_physical_memory_snapshot_get_dirty(snap,
2123 memory_region_get_ram_addr(mr) + addr, size);
2124}
2125
093bc2cd
AK
2126void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2127{
fb1cd6f9 2128 if (mr->readonly != readonly) {
59023ef4 2129 memory_region_transaction_begin();
fb1cd6f9 2130 mr->readonly = readonly;
22bde714 2131 memory_region_update_pending |= mr->enabled;
59023ef4 2132 memory_region_transaction_commit();
fb1cd6f9 2133 }
093bc2cd
AK
2134}
2135
c26763f8
MAL
2136void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2137{
2138 if (mr->nonvolatile != nonvolatile) {
2139 memory_region_transaction_begin();
2140 mr->nonvolatile = nonvolatile;
2141 memory_region_update_pending |= mr->enabled;
2142 memory_region_transaction_commit();
2143 }
2144}
2145
5f9a5ea1 2146void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
d0a9b5bc 2147{
5f9a5ea1 2148 if (mr->romd_mode != romd_mode) {
59023ef4 2149 memory_region_transaction_begin();
5f9a5ea1 2150 mr->romd_mode = romd_mode;
22bde714 2151 memory_region_update_pending |= mr->enabled;
59023ef4 2152 memory_region_transaction_commit();
d0a9b5bc
AK
2153 }
2154}
2155
a8170e5e
AK
2156void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2157 hwaddr size, unsigned client)
093bc2cd 2158{
8e41fb63
FZ
2159 assert(mr->ram_block);
2160 cpu_physical_memory_test_and_clear_dirty(
2161 memory_region_get_ram_addr(mr) + addr, size, client);
093bc2cd
AK
2162}
2163
a35ba7be
PB
2164int memory_region_get_fd(MemoryRegion *mr)
2165{
4ff87573
PB
2166 int fd;
2167
694ea274 2168 RCU_READ_LOCK_GUARD();
4ff87573
PB
2169 while (mr->alias) {
2170 mr = mr->alias;
a35ba7be 2171 }
4ff87573 2172 fd = mr->ram_block->fd;
a35ba7be 2173
4ff87573
PB
2174 return fd;
2175}
a35ba7be 2176
093bc2cd
AK
2177void *memory_region_get_ram_ptr(MemoryRegion *mr)
2178{
49b24afc
PB
2179 void *ptr;
2180 uint64_t offset = 0;
093bc2cd 2181
694ea274 2182 RCU_READ_LOCK_GUARD();
49b24afc
PB
2183 while (mr->alias) {
2184 offset += mr->alias_offset;
2185 mr = mr->alias;
2186 }
8e41fb63 2187 assert(mr->ram_block);
0878d0e1 2188 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
093bc2cd 2189
0878d0e1 2190 return ptr;
093bc2cd
AK
2191}
2192
07bdaa41
PB
2193MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2194{
2195 RAMBlock *block;
2196
2197 block = qemu_ram_block_from_host(ptr, false, offset);
2198 if (!block) {
2199 return NULL;
2200 }
2201
2202 return block->mr;
2203}
2204
7ebb2745
FZ
2205ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2206{
2207 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2208}
2209
37d7c084
PB
2210void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2211{
8e41fb63 2212 assert(mr->ram_block);
37d7c084 2213
fa53a0e5 2214 qemu_ram_resize(mr->ram_block, newsize, errp);
37d7c084
PB
2215}
2216
b960fc17
PX
2217/*
2218 * Call proper memory listeners about the change on the newly
2219 * added/removed CoalescedMemoryRange.
2220 */
2221static void memory_region_update_coalesced_range(MemoryRegion *mr,
2222 CoalescedMemoryRange *cmr,
2223 bool add)
093bc2cd 2224{
b960fc17 2225 AddressSpace *as;
99e86347 2226 FlatView *view;
093bc2cd 2227 FlatRange *fr;
093bc2cd 2228
0d673e36 2229 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
b960fc17
PX
2230 view = address_space_get_flatview(as);
2231 FOR_EACH_FLAT_RANGE(fr, view) {
2232 if (fr->mr == mr) {
2233 flat_range_coalesced_io_notify(fr, as, cmr, add);
2234 }
2235 }
2236 flatview_unref(view);
0d673e36
AK
2237 }
2238}
2239
093bc2cd
AK
2240void memory_region_set_coalescing(MemoryRegion *mr)
2241{
2242 memory_region_clear_coalescing(mr);
08dafab4 2243 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
093bc2cd
AK
2244}
2245
2246void memory_region_add_coalescing(MemoryRegion *mr,
a8170e5e 2247 hwaddr offset,
093bc2cd
AK
2248 uint64_t size)
2249{
7267c094 2250 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
093bc2cd 2251
08dafab4 2252 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
093bc2cd 2253 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
b960fc17 2254 memory_region_update_coalesced_range(mr, cmr, true);
d410515e 2255 memory_region_set_flush_coalesced(mr);
093bc2cd
AK
2256}
2257
2258void memory_region_clear_coalescing(MemoryRegion *mr)
2259{
2260 CoalescedMemoryRange *cmr;
9c1aa1c2
PX
2261
2262 if (QTAILQ_EMPTY(&mr->coalesced)) {
2263 return;
2264 }
093bc2cd 2265
d410515e
JK
2266 qemu_flush_coalesced_mmio_buffer();
2267 mr->flush_coalesced_mmio = false;
2268
093bc2cd
AK
2269 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2270 cmr = QTAILQ_FIRST(&mr->coalesced);
2271 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
b960fc17 2272 memory_region_update_coalesced_range(mr, cmr, false);
7267c094 2273 g_free(cmr);
ab5b3db5 2274 }
093bc2cd
AK
2275}
2276
d410515e
JK
2277void memory_region_set_flush_coalesced(MemoryRegion *mr)
2278{
2279 mr->flush_coalesced_mmio = true;
2280}
2281
2282void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2283{
2284 qemu_flush_coalesced_mmio_buffer();
2285 if (QTAILQ_EMPTY(&mr->coalesced)) {
2286 mr->flush_coalesced_mmio = false;
2287 }
2288}
2289
196ea131
JK
2290void memory_region_clear_global_locking(MemoryRegion *mr)
2291{
2292 mr->global_locking = false;
2293}
2294
8c56c1a5
PF
2295static bool userspace_eventfd_warning;
2296
3e9d69e7 2297void memory_region_add_eventfd(MemoryRegion *mr,
a8170e5e 2298 hwaddr addr,
3e9d69e7
AK
2299 unsigned size,
2300 bool match_data,
2301 uint64_t data,
753d5e14 2302 EventNotifier *e)
3e9d69e7
AK
2303{
2304 MemoryRegionIoeventfd mrfd = {
08dafab4
AK
2305 .addr.start = int128_make64(addr),
2306 .addr.size = int128_make64(size),
3e9d69e7
AK
2307 .match_data = match_data,
2308 .data = data,
753d5e14 2309 .e = e,
3e9d69e7
AK
2310 };
2311 unsigned i;
2312
8c56c1a5
PF
2313 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2314 userspace_eventfd_warning))) {
2315 userspace_eventfd_warning = true;
2316 error_report("Using eventfd without MMIO binding in KVM. "
2317 "Suboptimal performance expected");
2318 }
2319
b8aecea2 2320 if (size) {
9bf825bf 2321 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
b8aecea2 2322 }
59023ef4 2323 memory_region_transaction_begin();
3e9d69e7 2324 for (i = 0; i < mr->ioeventfd_nb; ++i) {
73bb753d 2325 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
3e9d69e7
AK
2326 break;
2327 }
2328 }
2329 ++mr->ioeventfd_nb;
7267c094 2330 mr->ioeventfds = g_realloc(mr->ioeventfds,
3e9d69e7
AK
2331 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2332 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2333 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2334 mr->ioeventfds[i] = mrfd;
4dc56152 2335 ioeventfd_update_pending |= mr->enabled;
59023ef4 2336 memory_region_transaction_commit();
3e9d69e7
AK
2337}
2338
2339void memory_region_del_eventfd(MemoryRegion *mr,
a8170e5e 2340 hwaddr addr,
3e9d69e7
AK
2341 unsigned size,
2342 bool match_data,
2343 uint64_t data,
753d5e14 2344 EventNotifier *e)
3e9d69e7
AK
2345{
2346 MemoryRegionIoeventfd mrfd = {
08dafab4
AK
2347 .addr.start = int128_make64(addr),
2348 .addr.size = int128_make64(size),
3e9d69e7
AK
2349 .match_data = match_data,
2350 .data = data,
753d5e14 2351 .e = e,
3e9d69e7
AK
2352 };
2353 unsigned i;
2354
b8aecea2 2355 if (size) {
9bf825bf 2356 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
b8aecea2 2357 }
59023ef4 2358 memory_region_transaction_begin();
3e9d69e7 2359 for (i = 0; i < mr->ioeventfd_nb; ++i) {
73bb753d 2360 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
3e9d69e7
AK
2361 break;
2362 }
2363 }
2364 assert(i != mr->ioeventfd_nb);
2365 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2366 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2367 --mr->ioeventfd_nb;
7267c094 2368 mr->ioeventfds = g_realloc(mr->ioeventfds,
3e9d69e7 2369 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
4dc56152 2370 ioeventfd_update_pending |= mr->enabled;
59023ef4 2371 memory_region_transaction_commit();
3e9d69e7
AK
2372}
2373
feca4ac1 2374static void memory_region_update_container_subregions(MemoryRegion *subregion)
093bc2cd 2375{
feca4ac1 2376 MemoryRegion *mr = subregion->container;
093bc2cd
AK
2377 MemoryRegion *other;
2378
59023ef4
JK
2379 memory_region_transaction_begin();
2380
dfde4e6e 2381 memory_region_ref(subregion);
093bc2cd
AK
2382 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2383 if (subregion->priority >= other->priority) {
2384 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2385 goto done;
2386 }
2387 }
2388 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2389done:
22bde714 2390 memory_region_update_pending |= mr->enabled && subregion->enabled;
59023ef4 2391 memory_region_transaction_commit();
093bc2cd
AK
2392}
2393
0598701a
PC
2394static void memory_region_add_subregion_common(MemoryRegion *mr,
2395 hwaddr offset,
2396 MemoryRegion *subregion)
2397{
feca4ac1
PB
2398 assert(!subregion->container);
2399 subregion->container = mr;
0598701a 2400 subregion->addr = offset;
feca4ac1 2401 memory_region_update_container_subregions(subregion);
0598701a 2402}
093bc2cd
AK
2403
2404void memory_region_add_subregion(MemoryRegion *mr,
a8170e5e 2405 hwaddr offset,
093bc2cd
AK
2406 MemoryRegion *subregion)
2407{
093bc2cd
AK
2408 subregion->priority = 0;
2409 memory_region_add_subregion_common(mr, offset, subregion);
2410}
2411
2412void memory_region_add_subregion_overlap(MemoryRegion *mr,
a8170e5e 2413 hwaddr offset,
093bc2cd 2414 MemoryRegion *subregion,
a1ff8ae0 2415 int priority)
093bc2cd 2416{
093bc2cd
AK
2417 subregion->priority = priority;
2418 memory_region_add_subregion_common(mr, offset, subregion);
2419}
2420
2421void memory_region_del_subregion(MemoryRegion *mr,
2422 MemoryRegion *subregion)
2423{
59023ef4 2424 memory_region_transaction_begin();
feca4ac1
PB
2425 assert(subregion->container == mr);
2426 subregion->container = NULL;
093bc2cd 2427 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
dfde4e6e 2428 memory_region_unref(subregion);
22bde714 2429 memory_region_update_pending |= mr->enabled && subregion->enabled;
59023ef4 2430 memory_region_transaction_commit();
6bba19ba
AK
2431}
2432
2433void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2434{
2435 if (enabled == mr->enabled) {
2436 return;
2437 }
59023ef4 2438 memory_region_transaction_begin();
6bba19ba 2439 mr->enabled = enabled;
22bde714 2440 memory_region_update_pending = true;
59023ef4 2441 memory_region_transaction_commit();
093bc2cd 2442}
1c0ffa58 2443
e7af4c67
MT
2444void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2445{
2446 Int128 s = int128_make64(size);
2447
2448 if (size == UINT64_MAX) {
2449 s = int128_2_64();
2450 }
2451 if (int128_eq(s, mr->size)) {
2452 return;
2453 }
2454 memory_region_transaction_begin();
2455 mr->size = s;
2456 memory_region_update_pending = true;
2457 memory_region_transaction_commit();
2458}
2459
67891b8a 2460static void memory_region_readd_subregion(MemoryRegion *mr)
2282e1af 2461{
feca4ac1 2462 MemoryRegion *container = mr->container;
2282e1af 2463
feca4ac1 2464 if (container) {
67891b8a
PC
2465 memory_region_transaction_begin();
2466 memory_region_ref(mr);
feca4ac1
PB
2467 memory_region_del_subregion(container, mr);
2468 mr->container = container;
2469 memory_region_update_container_subregions(mr);
67891b8a
PC
2470 memory_region_unref(mr);
2471 memory_region_transaction_commit();
2282e1af 2472 }
67891b8a 2473}
2282e1af 2474
67891b8a
PC
2475void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2476{
2477 if (addr != mr->addr) {
2478 mr->addr = addr;
2479 memory_region_readd_subregion(mr);
2480 }
2282e1af
AK
2481}
2482
a8170e5e 2483void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
4703359e 2484{
4703359e 2485 assert(mr->alias);
4703359e 2486
59023ef4 2487 if (offset == mr->alias_offset) {
4703359e
AK
2488 return;
2489 }
2490
59023ef4
JK
2491 memory_region_transaction_begin();
2492 mr->alias_offset = offset;
22bde714 2493 memory_region_update_pending |= mr->enabled;
59023ef4 2494 memory_region_transaction_commit();
4703359e
AK
2495}
2496
a2b257d6
IM
2497uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2498{
2499 return mr->align;
2500}
2501
e2177955
AK
2502static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2503{
2504 const AddrRange *addr = addr_;
2505 const FlatRange *fr = fr_;
2506
2507 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2508 return -1;
2509 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2510 return 1;
2511 }
2512 return 0;
2513}
2514
99e86347 2515static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
e2177955 2516{
99e86347 2517 return bsearch(&addr, view->ranges, view->nr,
e2177955
AK
2518 sizeof(FlatRange), cmp_flatrange_addr);
2519}
2520
eed2bacf
IM
2521bool memory_region_is_mapped(MemoryRegion *mr)
2522{
2523 return mr->container ? true : false;
2524}
2525
c6742b14
PB
2526/* Same as memory_region_find, but it does not add a reference to the
2527 * returned region. It must be called from an RCU critical section.
2528 */
2529static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2530 hwaddr addr, uint64_t size)
e2177955 2531{
052e87b0 2532 MemoryRegionSection ret = { .mr = NULL };
73034e9e
PB
2533 MemoryRegion *root;
2534 AddressSpace *as;
2535 AddrRange range;
99e86347 2536 FlatView *view;
73034e9e
PB
2537 FlatRange *fr;
2538
2539 addr += mr->addr;
feca4ac1
PB
2540 for (root = mr; root->container; ) {
2541 root = root->container;
73034e9e
PB
2542 addr += root->addr;
2543 }
e2177955 2544
73034e9e 2545 as = memory_region_to_address_space(root);
eed2bacf
IM
2546 if (!as) {
2547 return ret;
2548 }
73034e9e 2549 range = addrrange_make(int128_make64(addr), int128_make64(size));
99e86347 2550
16620684 2551 view = address_space_to_flatview(as);
99e86347 2552 fr = flatview_lookup(view, range);
e2177955 2553 if (!fr) {
c6742b14 2554 return ret;
e2177955
AK
2555 }
2556
99e86347 2557 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
e2177955
AK
2558 --fr;
2559 }
2560
2561 ret.mr = fr->mr;
16620684 2562 ret.fv = view;
e2177955
AK
2563 range = addrrange_intersection(range, fr->addr);
2564 ret.offset_within_region = fr->offset_in_region;
2565 ret.offset_within_region += int128_get64(int128_sub(range.start,
2566 fr->addr.start));
052e87b0 2567 ret.size = range.size;
e2177955 2568 ret.offset_within_address_space = int128_get64(range.start);
7a8499e8 2569 ret.readonly = fr->readonly;
c26763f8 2570 ret.nonvolatile = fr->nonvolatile;
c6742b14
PB
2571 return ret;
2572}
2573
2574MemoryRegionSection memory_region_find(MemoryRegion *mr,
2575 hwaddr addr, uint64_t size)
2576{
2577 MemoryRegionSection ret;
694ea274 2578 RCU_READ_LOCK_GUARD();
c6742b14
PB
2579 ret = memory_region_find_rcu(mr, addr, size);
2580 if (ret.mr) {
2581 memory_region_ref(ret.mr);
2582 }
e2177955
AK
2583 return ret;
2584}
2585
c6742b14
PB
2586bool memory_region_present(MemoryRegion *container, hwaddr addr)
2587{
2588 MemoryRegion *mr;
2589
694ea274 2590 RCU_READ_LOCK_GUARD();
c6742b14 2591 mr = memory_region_find_rcu(container, addr, 1).mr;
c6742b14
PB
2592 return mr && mr != container;
2593}
2594
9c1f8f44 2595void memory_global_dirty_log_sync(void)
86e775c6 2596{
3ebb1817 2597 memory_region_sync_dirty_bitmap(NULL);
7664e80c
AK
2598}
2599
9458a9a1
PB
2600void memory_global_after_dirty_log_sync(void)
2601{
2602 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2603}
2604
19310760
JZ
2605static VMChangeStateEntry *vmstate_change;
2606
7664e80c
AK
2607void memory_global_dirty_log_start(void)
2608{
19310760
JZ
2609 if (vmstate_change) {
2610 qemu_del_vm_change_state_handler(vmstate_change);
2611 vmstate_change = NULL;
2612 }
2613
7664e80c 2614 global_dirty_log = true;
6f6a5ef3 2615
7376e582 2616 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
6f6a5ef3 2617
39adb536 2618 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
6f6a5ef3
PB
2619 memory_region_transaction_begin();
2620 memory_region_update_pending = true;
2621 memory_region_transaction_commit();
7664e80c
AK
2622}
2623
19310760 2624static void memory_global_dirty_log_do_stop(void)
7664e80c 2625{
7664e80c 2626 global_dirty_log = false;
6f6a5ef3 2627
39adb536 2628 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
6f6a5ef3
PB
2629 memory_region_transaction_begin();
2630 memory_region_update_pending = true;
2631 memory_region_transaction_commit();
2632
7376e582 2633 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
7664e80c
AK
2634}
2635
19310760
JZ
2636static void memory_vm_change_state_handler(void *opaque, int running,
2637 RunState state)
2638{
2639 if (running) {
2640 memory_global_dirty_log_do_stop();
2641
2642 if (vmstate_change) {
2643 qemu_del_vm_change_state_handler(vmstate_change);
2644 vmstate_change = NULL;
2645 }
2646 }
2647}
2648
2649void memory_global_dirty_log_stop(void)
2650{
2651 if (!runstate_is_running()) {
2652 if (vmstate_change) {
2653 return;
2654 }
2655 vmstate_change = qemu_add_vm_change_state_handler(
2656 memory_vm_change_state_handler, NULL);
2657 return;
2658 }
2659
2660 memory_global_dirty_log_do_stop();
2661}
2662
7664e80c
AK
2663static void listener_add_address_space(MemoryListener *listener,
2664 AddressSpace *as)
2665{
99e86347 2666 FlatView *view;
7664e80c
AK
2667 FlatRange *fr;
2668
680a4783
PB
2669 if (listener->begin) {
2670 listener->begin(listener);
2671 }
7664e80c 2672 if (global_dirty_log) {
975aefe0
AK
2673 if (listener->log_global_start) {
2674 listener->log_global_start(listener);
2675 }
7664e80c 2676 }
975aefe0 2677
856d7245 2678 view = address_space_get_flatview(as);
99e86347 2679 FOR_EACH_FLAT_RANGE(fr, view) {
279836f8
DH
2680 MemoryRegionSection section = section_from_flat_range(fr, view);
2681
975aefe0
AK
2682 if (listener->region_add) {
2683 listener->region_add(listener, &section);
2684 }
ae990e6c
DH
2685 if (fr->dirty_log_mask && listener->log_start) {
2686 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2687 }
7664e80c 2688 }
680a4783
PB
2689 if (listener->commit) {
2690 listener->commit(listener);
2691 }
856d7245 2692 flatview_unref(view);
7664e80c
AK
2693}
2694
d25836ca
PX
2695static void listener_del_address_space(MemoryListener *listener,
2696 AddressSpace *as)
2697{
2698 FlatView *view;
2699 FlatRange *fr;
2700
2701 if (listener->begin) {
2702 listener->begin(listener);
2703 }
2704 view = address_space_get_flatview(as);
2705 FOR_EACH_FLAT_RANGE(fr, view) {
2706 MemoryRegionSection section = section_from_flat_range(fr, view);
2707
2708 if (fr->dirty_log_mask && listener->log_stop) {
2709 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2710 }
2711 if (listener->region_del) {
2712 listener->region_del(listener, &section);
2713 }
2714 }
2715 if (listener->commit) {
2716 listener->commit(listener);
2717 }
2718 flatview_unref(view);
2719}
2720
d45fa784 2721void memory_listener_register(MemoryListener *listener, AddressSpace *as)
7664e80c 2722{
72e22d2f
AK
2723 MemoryListener *other = NULL;
2724
d45fa784 2725 listener->address_space = as;
72e22d2f 2726 if (QTAILQ_EMPTY(&memory_listeners)
eae3eb3e 2727 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
72e22d2f
AK
2728 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2729 } else {
2730 QTAILQ_FOREACH(other, &memory_listeners, link) {
2731 if (listener->priority < other->priority) {
2732 break;
2733 }
2734 }
2735 QTAILQ_INSERT_BEFORE(other, listener, link);
2736 }
0d673e36 2737
9a54635d 2738 if (QTAILQ_EMPTY(&as->listeners)
eae3eb3e 2739 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
9a54635d
PB
2740 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2741 } else {
2742 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2743 if (listener->priority < other->priority) {
2744 break;
2745 }
2746 }
2747 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2748 }
2749
d45fa784 2750 listener_add_address_space(listener, as);
7664e80c
AK
2751}
2752
2753void memory_listener_unregister(MemoryListener *listener)
2754{
1d8280c1
PB
2755 if (!listener->address_space) {
2756 return;
2757 }
2758
d25836ca 2759 listener_del_address_space(listener, listener->address_space);
72e22d2f 2760 QTAILQ_REMOVE(&memory_listeners, listener, link);
9a54635d 2761 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
1d8280c1 2762 listener->address_space = NULL;
86e775c6 2763}
e2177955 2764
a2166410
GK
2765void address_space_remove_listeners(AddressSpace *as)
2766{
2767 while (!QTAILQ_EMPTY(&as->listeners)) {
2768 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
2769 }
2770}
2771
7dca8043 2772void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
1c0ffa58 2773{
ac95190e 2774 memory_region_ref(root);
8786db7c 2775 as->root = root;
67ace39b 2776 as->current_map = NULL;
4c19eb72
AK
2777 as->ioeventfd_nb = 0;
2778 as->ioeventfds = NULL;
9a54635d 2779 QTAILQ_INIT(&as->listeners);
0d673e36 2780 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
7dca8043 2781 as->name = g_strdup(name ? name : "anonymous");
202fc01b
AK
2782 address_space_update_topology(as);
2783 address_space_update_ioeventfds(as);
1c0ffa58 2784}
658b2224 2785
374f2981 2786static void do_address_space_destroy(AddressSpace *as)
83f3c251 2787{
9a54635d 2788 assert(QTAILQ_EMPTY(&as->listeners));
078c44f4 2789
856d7245 2790 flatview_unref(as->current_map);
7dca8043 2791 g_free(as->name);
4c19eb72 2792 g_free(as->ioeventfds);
ac95190e 2793 memory_region_unref(as->root);
83f3c251
AK
2794}
2795
374f2981
PB
2796void address_space_destroy(AddressSpace *as)
2797{
ac95190e
PB
2798 MemoryRegion *root = as->root;
2799
374f2981
PB
2800 /* Flush out anything from MemoryListeners listening in on this */
2801 memory_region_transaction_begin();
2802 as->root = NULL;
2803 memory_region_transaction_commit();
2804 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2805
2806 /* At this point, as->dispatch and as->current_map are dummy
2807 * entries that the guest should never use. Wait for the old
2808 * values to expire before freeing the data.
2809 */
ac95190e 2810 as->root = root;
374f2981
PB
2811 call_rcu(as, do_address_space_destroy, rcu);
2812}
2813
4e831901
PX
2814static const char *memory_region_type(MemoryRegion *mr)
2815{
2816 if (memory_region_is_ram_device(mr)) {
2817 return "ramd";
2818 } else if (memory_region_is_romd(mr)) {
2819 return "romd";
2820 } else if (memory_region_is_rom(mr)) {
2821 return "rom";
2822 } else if (memory_region_is_ram(mr)) {
2823 return "ram";
2824 } else {
2825 return "i/o";
2826 }
2827}
2828
314e2987
BS
2829typedef struct MemoryRegionList MemoryRegionList;
2830
2831struct MemoryRegionList {
2832 const MemoryRegion *mr;
a16878d2 2833 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
314e2987
BS
2834};
2835
b58deb34 2836typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
314e2987 2837
4e831901
PX
2838#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2839 int128_sub((size), int128_one())) : 0)
2840#define MTREE_INDENT " "
2841
b6b71cb5 2842static void mtree_expand_owner(const char *label, Object *obj)
fc051ae6
AK
2843{
2844 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2845
b6b71cb5 2846 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
fc051ae6 2847 if (dev && dev->id) {
b6b71cb5 2848 qemu_printf(" id=%s", dev->id);
fc051ae6
AK
2849 } else {
2850 gchar *canonical_path = object_get_canonical_path(obj);
2851 if (canonical_path) {
b6b71cb5 2852 qemu_printf(" path=%s", canonical_path);
fc051ae6
AK
2853 g_free(canonical_path);
2854 } else {
b6b71cb5 2855 qemu_printf(" type=%s", object_get_typename(obj));
fc051ae6
AK
2856 }
2857 }
b6b71cb5 2858 qemu_printf("}");
fc051ae6
AK
2859}
2860
b6b71cb5 2861static void mtree_print_mr_owner(const MemoryRegion *mr)
fc051ae6
AK
2862{
2863 Object *owner = mr->owner;
2864 Object *parent = memory_region_owner((MemoryRegion *)mr);
2865
2866 if (!owner && !parent) {
b6b71cb5 2867 qemu_printf(" orphan");
fc051ae6
AK
2868 return;
2869 }
2870 if (owner) {
b6b71cb5 2871 mtree_expand_owner("owner", owner);
fc051ae6
AK
2872 }
2873 if (parent && parent != owner) {
b6b71cb5 2874 mtree_expand_owner("parent", parent);
fc051ae6
AK
2875 }
2876}
2877
b6b71cb5 2878static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
a8170e5e 2879 hwaddr base,
fc051ae6
AK
2880 MemoryRegionListHead *alias_print_queue,
2881 bool owner)
314e2987 2882{
9479c57a
JK
2883 MemoryRegionList *new_ml, *ml, *next_ml;
2884 MemoryRegionListHead submr_print_queue;
314e2987
BS
2885 const MemoryRegion *submr;
2886 unsigned int i;
b31f8412 2887 hwaddr cur_start, cur_end;
314e2987 2888
f8a9f720 2889 if (!mr) {
314e2987
BS
2890 return;
2891 }
2892
2893 for (i = 0; i < level; i++) {
b6b71cb5 2894 qemu_printf(MTREE_INDENT);
314e2987
BS
2895 }
2896
b31f8412
PX
2897 cur_start = base + mr->addr;
2898 cur_end = cur_start + MR_SIZE(mr->size);
2899
2900 /*
2901 * Try to detect overflow of memory region. This should never
2902 * happen normally. When it happens, we dump something to warn the
2903 * user who is observing this.
2904 */
2905 if (cur_start < base || cur_end < cur_start) {
b6b71cb5 2906 qemu_printf("[DETECTED OVERFLOW!] ");
b31f8412
PX
2907 }
2908
314e2987
BS
2909 if (mr->alias) {
2910 MemoryRegionList *ml;
2911 bool found = false;
2912
2913 /* check if the alias is already in the queue */
a16878d2 2914 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
f54bb15f 2915 if (ml->mr == mr->alias) {
314e2987
BS
2916 found = true;
2917 }
2918 }
2919
2920 if (!found) {
2921 ml = g_new(MemoryRegionList, 1);
2922 ml->mr = mr->alias;
a16878d2 2923 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
314e2987 2924 }
b6b71cb5
MA
2925 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2926 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
2927 "-" TARGET_FMT_plx "%s",
2928 cur_start, cur_end,
2929 mr->priority,
2930 mr->nonvolatile ? "nv-" : "",
2931 memory_region_type((MemoryRegion *)mr),
2932 memory_region_name(mr),
2933 memory_region_name(mr->alias),
2934 mr->alias_offset,
2935 mr->alias_offset + MR_SIZE(mr->size),
2936 mr->enabled ? "" : " [disabled]");
fc051ae6 2937 if (owner) {
b6b71cb5 2938 mtree_print_mr_owner(mr);
fc051ae6 2939 }
314e2987 2940 } else {
b6b71cb5
MA
2941 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2942 " (prio %d, %s%s): %s%s",
2943 cur_start, cur_end,
2944 mr->priority,
2945 mr->nonvolatile ? "nv-" : "",
2946 memory_region_type((MemoryRegion *)mr),
2947 memory_region_name(mr),
2948 mr->enabled ? "" : " [disabled]");
fc051ae6 2949 if (owner) {
b6b71cb5 2950 mtree_print_mr_owner(mr);
fc051ae6 2951 }
314e2987 2952 }
b6b71cb5 2953 qemu_printf("\n");
9479c57a
JK
2954
2955 QTAILQ_INIT(&submr_print_queue);
2956
314e2987 2957 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
9479c57a
JK
2958 new_ml = g_new(MemoryRegionList, 1);
2959 new_ml->mr = submr;
a16878d2 2960 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
9479c57a
JK
2961 if (new_ml->mr->addr < ml->mr->addr ||
2962 (new_ml->mr->addr == ml->mr->addr &&
2963 new_ml->mr->priority > ml->mr->priority)) {
a16878d2 2964 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
9479c57a
JK
2965 new_ml = NULL;
2966 break;
2967 }
2968 }
2969 if (new_ml) {
a16878d2 2970 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
9479c57a
JK
2971 }
2972 }
2973
a16878d2 2974 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
b6b71cb5 2975 mtree_print_mr(ml->mr, level + 1, cur_start,
fc051ae6 2976 alias_print_queue, owner);
9479c57a
JK
2977 }
2978
a16878d2 2979 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
9479c57a 2980 g_free(ml);
314e2987
BS
2981 }
2982}
2983
5e8fd947 2984struct FlatViewInfo {
5e8fd947
AK
2985 int counter;
2986 bool dispatch_tree;
fc051ae6 2987 bool owner;
8072aae3
AK
2988 AccelClass *ac;
2989 const char *ac_name;
5e8fd947
AK
2990};
2991
2992static void mtree_print_flatview(gpointer key, gpointer value,
2993 gpointer user_data)
57bb40c9 2994{
5e8fd947
AK
2995 FlatView *view = key;
2996 GArray *fv_address_spaces = value;
2997 struct FlatViewInfo *fvi = user_data;
57bb40c9
PX
2998 FlatRange *range = &view->ranges[0];
2999 MemoryRegion *mr;
3000 int n = view->nr;
5e8fd947
AK
3001 int i;
3002 AddressSpace *as;
3003
b6b71cb5 3004 qemu_printf("FlatView #%d\n", fvi->counter);
5e8fd947
AK
3005 ++fvi->counter;
3006
3007 for (i = 0; i < fv_address_spaces->len; ++i) {
3008 as = g_array_index(fv_address_spaces, AddressSpace*, i);
b6b71cb5
MA
3009 qemu_printf(" AS \"%s\", root: %s",
3010 as->name, memory_region_name(as->root));
5e8fd947 3011 if (as->root->alias) {
b6b71cb5 3012 qemu_printf(", alias %s", memory_region_name(as->root->alias));
5e8fd947 3013 }
b6b71cb5 3014 qemu_printf("\n");
5e8fd947
AK
3015 }
3016
b6b71cb5 3017 qemu_printf(" Root memory region: %s\n",
5e8fd947 3018 view->root ? memory_region_name(view->root) : "(none)");
57bb40c9
PX
3019
3020 if (n <= 0) {
b6b71cb5 3021 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
57bb40c9
PX
3022 return;
3023 }
3024
3025 while (n--) {
3026 mr = range->mr;
377a07aa 3027 if (range->offset_in_region) {
b6b71cb5
MA
3028 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3029 " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3030 int128_get64(range->addr.start),
3031 int128_get64(range->addr.start)
3032 + MR_SIZE(range->addr.size),
3033 mr->priority,
3034 range->nonvolatile ? "nv-" : "",
3035 range->readonly ? "rom" : memory_region_type(mr),
3036 memory_region_name(mr),
3037 range->offset_in_region);
377a07aa 3038 } else {
b6b71cb5
MA
3039 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3040 " (prio %d, %s%s): %s",
3041 int128_get64(range->addr.start),
3042 int128_get64(range->addr.start)
3043 + MR_SIZE(range->addr.size),
3044 mr->priority,
3045 range->nonvolatile ? "nv-" : "",
3046 range->readonly ? "rom" : memory_region_type(mr),
3047 memory_region_name(mr));
377a07aa 3048 }
fc051ae6 3049 if (fvi->owner) {
b6b71cb5 3050 mtree_print_mr_owner(mr);
fc051ae6 3051 }
8072aae3
AK
3052
3053 if (fvi->ac) {
3054 for (i = 0; i < fv_address_spaces->len; ++i) {
3055 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3056 if (fvi->ac->has_memory(current_machine, as,
3057 int128_get64(range->addr.start),
3058 MR_SIZE(range->addr.size) + 1)) {
3059 qemu_printf(" %s", fvi->ac_name);
3060 }
3061 }
3062 }
b6b71cb5 3063 qemu_printf("\n");
57bb40c9
PX
3064 range++;
3065 }
3066
5e8fd947
AK
3067#if !defined(CONFIG_USER_ONLY)
3068 if (fvi->dispatch_tree && view->root) {
b6b71cb5 3069 mtree_print_dispatch(view->dispatch, view->root);
5e8fd947
AK
3070 }
3071#endif
3072
b6b71cb5 3073 qemu_printf("\n");
5e8fd947
AK
3074}
3075
3076static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3077 gpointer user_data)
3078{
3079 FlatView *view = key;
3080 GArray *fv_address_spaces = value;
3081
3082 g_array_unref(fv_address_spaces);
57bb40c9 3083 flatview_unref(view);
5e8fd947
AK
3084
3085 return true;
57bb40c9
PX
3086}
3087
b6b71cb5 3088void mtree_info(bool flatview, bool dispatch_tree, bool owner)
314e2987
BS
3089{
3090 MemoryRegionListHead ml_head;
3091 MemoryRegionList *ml, *ml2;
0d673e36 3092 AddressSpace *as;
314e2987 3093
57bb40c9 3094 if (flatview) {
5e8fd947
AK
3095 FlatView *view;
3096 struct FlatViewInfo fvi = {
5e8fd947 3097 .counter = 0,
fc051ae6
AK
3098 .dispatch_tree = dispatch_tree,
3099 .owner = owner,
5e8fd947
AK
3100 };
3101 GArray *fv_address_spaces;
3102 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
8072aae3
AK
3103 AccelClass *ac = ACCEL_GET_CLASS(current_machine->accelerator);
3104
3105 if (ac->has_memory) {
3106 fvi.ac = ac;
3107 fvi.ac_name = current_machine->accel ? current_machine->accel :
3108 object_class_get_name(OBJECT_CLASS(ac));
3109 }
5e8fd947
AK
3110
3111 /* Gather all FVs in one table */
57bb40c9 3112 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
5e8fd947
AK
3113 view = address_space_get_flatview(as);
3114
3115 fv_address_spaces = g_hash_table_lookup(views, view);
3116 if (!fv_address_spaces) {
3117 fv_address_spaces = g_array_new(false, false, sizeof(as));
3118 g_hash_table_insert(views, view, fv_address_spaces);
3119 }
3120
3121 g_array_append_val(fv_address_spaces, as);
57bb40c9 3122 }
5e8fd947
AK
3123
3124 /* Print */
3125 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3126
3127 /* Free */
3128 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3129 g_hash_table_unref(views);
3130
57bb40c9
PX
3131 return;
3132 }
3133
314e2987
BS
3134 QTAILQ_INIT(&ml_head);
3135
0d673e36 3136 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
b6b71cb5
MA
3137 qemu_printf("address-space: %s\n", as->name);
3138 mtree_print_mr(as->root, 1, 0, &ml_head, owner);
3139 qemu_printf("\n");
b9f9be88
BS
3140 }
3141
314e2987 3142 /* print aliased regions */
a16878d2 3143 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
b6b71cb5
MA
3144 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3145 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner);
3146 qemu_printf("\n");
314e2987
BS
3147 }
3148
a16878d2 3149 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
88365e47 3150 g_free(ml);
314e2987 3151 }
314e2987 3152}
b4fefef9 3153
b08199c6
PM
3154void memory_region_init_ram(MemoryRegion *mr,
3155 struct Object *owner,
3156 const char *name,
3157 uint64_t size,
3158 Error **errp)
3159{
3160 DeviceState *owner_dev;
3161 Error *err = NULL;
3162
3163 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3164 if (err) {
3165 error_propagate(errp, err);
3166 return;
3167 }
3168 /* This will assert if owner is neither NULL nor a DeviceState.
3169 * We only want the owner here for the purposes of defining a
3170 * unique name for migration. TODO: Ideally we should implement
3171 * a naming scheme for Objects which are not DeviceStates, in
3172 * which case we can relax this restriction.
3173 */
3174 owner_dev = DEVICE(owner);
3175 vmstate_register_ram(mr, owner_dev);
3176}
3177
3178void memory_region_init_rom(MemoryRegion *mr,
3179 struct Object *owner,
3180 const char *name,
3181 uint64_t size,
3182 Error **errp)
3183{
3184 DeviceState *owner_dev;
3185 Error *err = NULL;
3186
3187 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3188 if (err) {
3189 error_propagate(errp, err);
3190 return;
3191 }
3192 /* This will assert if owner is neither NULL nor a DeviceState.
3193 * We only want the owner here for the purposes of defining a
3194 * unique name for migration. TODO: Ideally we should implement
3195 * a naming scheme for Objects which are not DeviceStates, in
3196 * which case we can relax this restriction.
3197 */
3198 owner_dev = DEVICE(owner);
3199 vmstate_register_ram(mr, owner_dev);
3200}
3201
3202void memory_region_init_rom_device(MemoryRegion *mr,
3203 struct Object *owner,
3204 const MemoryRegionOps *ops,
3205 void *opaque,
3206 const char *name,
3207 uint64_t size,
3208 Error **errp)
3209{
3210 DeviceState *owner_dev;
3211 Error *err = NULL;
3212
3213 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3214 name, size, &err);
3215 if (err) {
3216 error_propagate(errp, err);
3217 return;
3218 }
3219 /* This will assert if owner is neither NULL nor a DeviceState.
3220 * We only want the owner here for the purposes of defining a
3221 * unique name for migration. TODO: Ideally we should implement
3222 * a naming scheme for Objects which are not DeviceStates, in
3223 * which case we can relax this restriction.
3224 */
3225 owner_dev = DEVICE(owner);
3226 vmstate_register_ram(mr, owner_dev);
3227}
3228
b4fefef9
PC
3229static const TypeInfo memory_region_info = {
3230 .parent = TYPE_OBJECT,
3231 .name = TYPE_MEMORY_REGION,
1b53ecd9 3232 .class_size = sizeof(MemoryRegionClass),
b4fefef9
PC
3233 .instance_size = sizeof(MemoryRegion),
3234 .instance_init = memory_region_initfn,
3235 .instance_finalize = memory_region_finalize,
3236};
3237
3df9d748
AK
3238static const TypeInfo iommu_memory_region_info = {
3239 .parent = TYPE_MEMORY_REGION,
3240 .name = TYPE_IOMMU_MEMORY_REGION,
1221a474 3241 .class_size = sizeof(IOMMUMemoryRegionClass),
3df9d748
AK
3242 .instance_size = sizeof(IOMMUMemoryRegion),
3243 .instance_init = iommu_memory_region_initfn,
1221a474 3244 .abstract = true,
3df9d748
AK
3245};
3246
b4fefef9
PC
3247static void memory_register_types(void)
3248{
3249 type_register_static(&memory_region_info);
3df9d748 3250 type_register_static(&iommu_memory_region_info);
b4fefef9
PC
3251}
3252
3253type_init(memory_register_types)