]> git.proxmox.com Git - mirror_qemu.git/blame - memory.c
target-i386: add kvm stubs to user-mode emulators
[mirror_qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
093bc2cd
AK
14 */
15
d38ea87a 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
33c11879
PB
18#include "qemu-common.h"
19#include "cpu.h"
022c62cb
PB
20#include "exec/memory.h"
21#include "exec/address-spaces.h"
409ddd01 22#include "qapi/visitor.h"
1de7afc9 23#include "qemu/bitops.h"
8c56c1a5 24#include "qemu/error-report.h"
2c9b15ca 25#include "qom/object.h"
0ab8ed18 26#include "trace-root.h"
093bc2cd 27
022c62cb 28#include "exec/memory-internal.h"
220c3ebd 29#include "exec/ram_addr.h"
8c56c1a5 30#include "sysemu/kvm.h"
e1c57ab8 31#include "sysemu/sysemu.h"
c9356746 32#include "hw/qdev-properties.h"
b08199c6 33#include "migration/vmstate.h"
67d95c15 34
d197063f
PB
35//#define DEBUG_UNASSIGNED
36
22bde714
JK
37static unsigned memory_region_transaction_depth;
38static bool memory_region_update_pending;
4dc56152 39static bool ioeventfd_update_pending;
7664e80c
AK
40static bool global_dirty_log = false;
41
eae3eb3e 42static QTAILQ_HEAD(, MemoryListener) memory_listeners
72e22d2f 43 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
4ef4db86 44
0d673e36
AK
45static QTAILQ_HEAD(, AddressSpace) address_spaces
46 = QTAILQ_HEAD_INITIALIZER(address_spaces);
47
967dc9b1
AK
48static GHashTable *flat_views;
49
093bc2cd
AK
50typedef struct AddrRange AddrRange;
51
8417cebf 52/*
c9cdaa3a 53 * Note that signed integers are needed for negative offsetting in aliases
8417cebf
AK
54 * (large MemoryRegion::alias_offset).
55 */
093bc2cd 56struct AddrRange {
08dafab4
AK
57 Int128 start;
58 Int128 size;
093bc2cd
AK
59};
60
08dafab4 61static AddrRange addrrange_make(Int128 start, Int128 size)
093bc2cd
AK
62{
63 return (AddrRange) { start, size };
64}
65
66static bool addrrange_equal(AddrRange r1, AddrRange r2)
67{
08dafab4 68 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
093bc2cd
AK
69}
70
08dafab4 71static Int128 addrrange_end(AddrRange r)
093bc2cd 72{
08dafab4 73 return int128_add(r.start, r.size);
093bc2cd
AK
74}
75
08dafab4 76static AddrRange addrrange_shift(AddrRange range, Int128 delta)
093bc2cd 77{
08dafab4 78 int128_addto(&range.start, delta);
093bc2cd
AK
79 return range;
80}
81
08dafab4
AK
82static bool addrrange_contains(AddrRange range, Int128 addr)
83{
84 return int128_ge(addr, range.start)
85 && int128_lt(addr, addrrange_end(range));
86}
87
093bc2cd
AK
88static bool addrrange_intersects(AddrRange r1, AddrRange r2)
89{
08dafab4
AK
90 return addrrange_contains(r1, r2.start)
91 || addrrange_contains(r2, r1.start);
093bc2cd
AK
92}
93
94static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
95{
08dafab4
AK
96 Int128 start = int128_max(r1.start, r2.start);
97 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
98 return addrrange_make(start, int128_sub(end, start));
093bc2cd
AK
99}
100
0e0d36b4
AK
101enum ListenerDirection { Forward, Reverse };
102
7376e582 103#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
0e0d36b4
AK
104 do { \
105 MemoryListener *_listener; \
106 \
107 switch (_direction) { \
108 case Forward: \
109 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
975aefe0
AK
110 if (_listener->_callback) { \
111 _listener->_callback(_listener, ##_args); \
112 } \
0e0d36b4
AK
113 } \
114 break; \
115 case Reverse: \
eae3eb3e 116 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
975aefe0
AK
117 if (_listener->_callback) { \
118 _listener->_callback(_listener, ##_args); \
119 } \
0e0d36b4
AK
120 } \
121 break; \
122 default: \
123 abort(); \
124 } \
125 } while (0)
126
9a54635d 127#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
7376e582
AK
128 do { \
129 MemoryListener *_listener; \
130 \
131 switch (_direction) { \
132 case Forward: \
eae3eb3e 133 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
9a54635d 134 if (_listener->_callback) { \
7376e582
AK
135 _listener->_callback(_listener, _section, ##_args); \
136 } \
137 } \
138 break; \
139 case Reverse: \
eae3eb3e 140 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
9a54635d 141 if (_listener->_callback) { \
7376e582
AK
142 _listener->_callback(_listener, _section, ##_args); \
143 } \
144 } \
145 break; \
146 default: \
147 abort(); \
148 } \
149 } while (0)
150
dfde4e6e 151/* No need to ref/unref .mr, the FlatRange keeps it alive. */
b2dfd71c 152#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
9c1f8f44 153 do { \
16620684
AK
154 MemoryRegionSection mrs = section_from_flat_range(fr, \
155 address_space_to_flatview(as)); \
9a54635d 156 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
9c1f8f44 157 } while(0)
0e0d36b4 158
093bc2cd
AK
159struct CoalescedMemoryRange {
160 AddrRange addr;
161 QTAILQ_ENTRY(CoalescedMemoryRange) link;
162};
163
3e9d69e7
AK
164struct MemoryRegionIoeventfd {
165 AddrRange addr;
166 bool match_data;
167 uint64_t data;
753d5e14 168 EventNotifier *e;
3e9d69e7
AK
169};
170
73bb753d
TB
171static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
172 MemoryRegionIoeventfd *b)
3e9d69e7 173{
73bb753d 174 if (int128_lt(a->addr.start, b->addr.start)) {
3e9d69e7 175 return true;
73bb753d 176 } else if (int128_gt(a->addr.start, b->addr.start)) {
3e9d69e7 177 return false;
73bb753d 178 } else if (int128_lt(a->addr.size, b->addr.size)) {
3e9d69e7 179 return true;
73bb753d 180 } else if (int128_gt(a->addr.size, b->addr.size)) {
3e9d69e7 181 return false;
73bb753d 182 } else if (a->match_data < b->match_data) {
3e9d69e7 183 return true;
73bb753d 184 } else if (a->match_data > b->match_data) {
3e9d69e7 185 return false;
73bb753d
TB
186 } else if (a->match_data) {
187 if (a->data < b->data) {
3e9d69e7 188 return true;
73bb753d 189 } else if (a->data > b->data) {
3e9d69e7
AK
190 return false;
191 }
192 }
73bb753d 193 if (a->e < b->e) {
3e9d69e7 194 return true;
73bb753d 195 } else if (a->e > b->e) {
3e9d69e7
AK
196 return false;
197 }
198 return false;
199}
200
73bb753d
TB
201static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
202 MemoryRegionIoeventfd *b)
3e9d69e7
AK
203{
204 return !memory_region_ioeventfd_before(a, b)
205 && !memory_region_ioeventfd_before(b, a);
206}
207
093bc2cd
AK
208/* Range of memory in the global map. Addresses are absolute. */
209struct FlatRange {
210 MemoryRegion *mr;
a8170e5e 211 hwaddr offset_in_region;
093bc2cd 212 AddrRange addr;
5a583347 213 uint8_t dirty_log_mask;
b138e654 214 bool romd_mode;
fb1cd6f9 215 bool readonly;
c26763f8 216 bool nonvolatile;
3ac7d43a 217 int has_coalesced_range;
093bc2cd
AK
218};
219
093bc2cd
AK
220#define FOR_EACH_FLAT_RANGE(var, view) \
221 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
222
9c1f8f44 223static inline MemoryRegionSection
16620684 224section_from_flat_range(FlatRange *fr, FlatView *fv)
9c1f8f44
PB
225{
226 return (MemoryRegionSection) {
227 .mr = fr->mr,
16620684 228 .fv = fv,
9c1f8f44
PB
229 .offset_within_region = fr->offset_in_region,
230 .size = fr->addr.size,
231 .offset_within_address_space = int128_get64(fr->addr.start),
232 .readonly = fr->readonly,
c26763f8 233 .nonvolatile = fr->nonvolatile,
9c1f8f44
PB
234 };
235}
236
093bc2cd
AK
237static bool flatrange_equal(FlatRange *a, FlatRange *b)
238{
239 return a->mr == b->mr
240 && addrrange_equal(a->addr, b->addr)
d0a9b5bc 241 && a->offset_in_region == b->offset_in_region
b138e654 242 && a->romd_mode == b->romd_mode
c26763f8
MAL
243 && a->readonly == b->readonly
244 && a->nonvolatile == b->nonvolatile;
093bc2cd
AK
245}
246
89c177bb 247static FlatView *flatview_new(MemoryRegion *mr_root)
093bc2cd 248{
cc94cd6d
AK
249 FlatView *view;
250
251 view = g_new0(FlatView, 1);
856d7245 252 view->ref = 1;
89c177bb
AK
253 view->root = mr_root;
254 memory_region_ref(mr_root);
02d9651d 255 trace_flatview_new(view, mr_root);
cc94cd6d
AK
256
257 return view;
093bc2cd
AK
258}
259
260/* Insert a range into a given position. Caller is responsible for maintaining
261 * sorting order.
262 */
263static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
264{
265 if (view->nr == view->nr_allocated) {
266 view->nr_allocated = MAX(2 * view->nr, 10);
7267c094 267 view->ranges = g_realloc(view->ranges,
093bc2cd
AK
268 view->nr_allocated * sizeof(*view->ranges));
269 }
270 memmove(view->ranges + pos + 1, view->ranges + pos,
271 (view->nr - pos) * sizeof(FlatRange));
272 view->ranges[pos] = *range;
dfde4e6e 273 memory_region_ref(range->mr);
093bc2cd
AK
274 ++view->nr;
275}
276
277static void flatview_destroy(FlatView *view)
278{
dfde4e6e
PB
279 int i;
280
02d9651d 281 trace_flatview_destroy(view, view->root);
66a6df1d
AK
282 if (view->dispatch) {
283 address_space_dispatch_free(view->dispatch);
284 }
dfde4e6e
PB
285 for (i = 0; i < view->nr; i++) {
286 memory_region_unref(view->ranges[i].mr);
287 }
7267c094 288 g_free(view->ranges);
89c177bb 289 memory_region_unref(view->root);
a9a0c06d 290 g_free(view);
093bc2cd
AK
291}
292
447b0d0b 293static bool flatview_ref(FlatView *view)
856d7245 294{
447b0d0b 295 return atomic_fetch_inc_nonzero(&view->ref) > 0;
856d7245
PB
296}
297
48564041 298void flatview_unref(FlatView *view)
856d7245
PB
299{
300 if (atomic_fetch_dec(&view->ref) == 1) {
02d9651d 301 trace_flatview_destroy_rcu(view, view->root);
092aa2fc 302 assert(view->root);
66a6df1d 303 call_rcu(view, flatview_destroy, rcu);
856d7245
PB
304 }
305}
306
3d8e6bf9
AK
307static bool can_merge(FlatRange *r1, FlatRange *r2)
308{
08dafab4 309 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
3d8e6bf9 310 && r1->mr == r2->mr
08dafab4
AK
311 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
312 r1->addr.size),
313 int128_make64(r2->offset_in_region))
d0a9b5bc 314 && r1->dirty_log_mask == r2->dirty_log_mask
b138e654 315 && r1->romd_mode == r2->romd_mode
c26763f8
MAL
316 && r1->readonly == r2->readonly
317 && r1->nonvolatile == r2->nonvolatile;
3d8e6bf9
AK
318}
319
8508e024 320/* Attempt to simplify a view by merging adjacent ranges */
3d8e6bf9
AK
321static void flatview_simplify(FlatView *view)
322{
323 unsigned i, j;
324
325 i = 0;
326 while (i < view->nr) {
327 j = i + 1;
328 while (j < view->nr
329 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
08dafab4 330 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
3d8e6bf9
AK
331 ++j;
332 }
333 ++i;
334 memmove(&view->ranges[i], &view->ranges[j],
335 (view->nr - j) * sizeof(view->ranges[j]));
336 view->nr -= j - i;
337 }
338}
339
e7342aa3
PB
340static bool memory_region_big_endian(MemoryRegion *mr)
341{
342#ifdef TARGET_WORDS_BIGENDIAN
343 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
344#else
345 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
346#endif
347}
348
e11ef3d1
PB
349static bool memory_region_wrong_endianness(MemoryRegion *mr)
350{
351#ifdef TARGET_WORDS_BIGENDIAN
352 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
353#else
354 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
355#endif
356}
357
358static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
359{
360 if (memory_region_wrong_endianness(mr)) {
361 switch (size) {
362 case 1:
363 break;
364 case 2:
365 *data = bswap16(*data);
366 break;
367 case 4:
368 *data = bswap32(*data);
369 break;
370 case 8:
371 *data = bswap64(*data);
372 break;
373 default:
374 abort();
375 }
376 }
377}
378
3c754a93 379static inline void memory_region_shift_read_access(uint64_t *value,
98f52cdb 380 signed shift,
3c754a93
PMD
381 uint64_t mask,
382 uint64_t tmp)
383{
98f52cdb
PMD
384 if (shift >= 0) {
385 *value |= (tmp & mask) << shift;
386 } else {
387 *value |= (tmp & mask) >> -shift;
388 }
3c754a93
PMD
389}
390
391static inline uint64_t memory_region_shift_write_access(uint64_t *value,
98f52cdb 392 signed shift,
3c754a93
PMD
393 uint64_t mask)
394{
98f52cdb
PMD
395 uint64_t tmp;
396
397 if (shift >= 0) {
398 tmp = (*value >> shift) & mask;
399 } else {
400 tmp = (*value << -shift) & mask;
401 }
402
403 return tmp;
3c754a93
PMD
404}
405
4779dc1d
HB
406static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
407{
408 MemoryRegion *root;
409 hwaddr abs_addr = offset;
410
411 abs_addr += mr->addr;
412 for (root = mr; root->container; ) {
413 root = root->container;
414 abs_addr += root->addr;
415 }
416
417 return abs_addr;
418}
419
5a68be94
HB
420static int get_cpu_index(void)
421{
422 if (current_cpu) {
423 return current_cpu->cpu_index;
424 }
425 return -1;
426}
427
cc05c43a 428static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
ce5d2f33
PB
429 hwaddr addr,
430 uint64_t *value,
431 unsigned size,
98f52cdb 432 signed shift,
cc05c43a
PM
433 uint64_t mask,
434 MemTxAttrs attrs)
ce5d2f33 435{
ce5d2f33
PB
436 uint64_t tmp;
437
cc05c43a 438 tmp = mr->ops->read(mr->opaque, addr, size);
23d92d68 439 if (mr->subpage) {
5a68be94 440 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
441 } else if (mr == &io_mem_notdirty) {
442 /* Accesses to code which has previously been translated into a TB show
443 * up in the MMIO path, as accesses to the io_mem_notdirty
444 * MemoryRegion. */
445 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
446 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
447 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 448 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 449 }
3c754a93 450 memory_region_shift_read_access(value, shift, mask, tmp);
cc05c43a 451 return MEMTX_OK;
ce5d2f33
PB
452}
453
cc05c43a
PM
454static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
455 hwaddr addr,
456 uint64_t *value,
457 unsigned size,
98f52cdb 458 signed shift,
cc05c43a
PM
459 uint64_t mask,
460 MemTxAttrs attrs)
164a4dcd 461{
cc05c43a
PM
462 uint64_t tmp = 0;
463 MemTxResult r;
164a4dcd 464
cc05c43a 465 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
23d92d68 466 if (mr->subpage) {
5a68be94 467 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
468 } else if (mr == &io_mem_notdirty) {
469 /* Accesses to code which has previously been translated into a TB show
470 * up in the MMIO path, as accesses to the io_mem_notdirty
471 * MemoryRegion. */
472 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
473 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
474 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 475 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 476 }
3c754a93 477 memory_region_shift_read_access(value, shift, mask, tmp);
cc05c43a 478 return r;
164a4dcd
AK
479}
480
cc05c43a
PM
481static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
482 hwaddr addr,
483 uint64_t *value,
484 unsigned size,
98f52cdb 485 signed shift,
cc05c43a
PM
486 uint64_t mask,
487 MemTxAttrs attrs)
164a4dcd 488{
3c754a93 489 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
164a4dcd 490
23d92d68 491 if (mr->subpage) {
5a68be94 492 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
493 } else if (mr == &io_mem_notdirty) {
494 /* Accesses to code which has previously been translated into a TB show
495 * up in the MMIO path, as accesses to the io_mem_notdirty
496 * MemoryRegion. */
497 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
498 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
499 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 500 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 501 }
164a4dcd 502 mr->ops->write(mr->opaque, addr, tmp, size);
cc05c43a 503 return MEMTX_OK;
164a4dcd
AK
504}
505
cc05c43a
PM
506static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
507 hwaddr addr,
508 uint64_t *value,
509 unsigned size,
98f52cdb 510 signed shift,
cc05c43a
PM
511 uint64_t mask,
512 MemTxAttrs attrs)
513{
3c754a93 514 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
cc05c43a 515
23d92d68 516 if (mr->subpage) {
5a68be94 517 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
518 } else if (mr == &io_mem_notdirty) {
519 /* Accesses to code which has previously been translated into a TB show
520 * up in the MMIO path, as accesses to the io_mem_notdirty
521 * MemoryRegion. */
522 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
523 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
524 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 525 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 526 }
cc05c43a
PM
527 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
528}
529
530static MemTxResult access_with_adjusted_size(hwaddr addr,
164a4dcd
AK
531 uint64_t *value,
532 unsigned size,
533 unsigned access_size_min,
534 unsigned access_size_max,
05e015f7
KF
535 MemTxResult (*access_fn)
536 (MemoryRegion *mr,
537 hwaddr addr,
538 uint64_t *value,
539 unsigned size,
98f52cdb 540 signed shift,
05e015f7
KF
541 uint64_t mask,
542 MemTxAttrs attrs),
cc05c43a
PM
543 MemoryRegion *mr,
544 MemTxAttrs attrs)
164a4dcd
AK
545{
546 uint64_t access_mask;
547 unsigned access_size;
548 unsigned i;
cc05c43a 549 MemTxResult r = MEMTX_OK;
164a4dcd
AK
550
551 if (!access_size_min) {
552 access_size_min = 1;
553 }
554 if (!access_size_max) {
555 access_size_max = 4;
556 }
ce5d2f33
PB
557
558 /* FIXME: support unaligned access? */
164a4dcd 559 access_size = MAX(MIN(size, access_size_max), access_size_min);
36960b4d 560 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
e7342aa3
PB
561 if (memory_region_big_endian(mr)) {
562 for (i = 0; i < size; i += access_size) {
05e015f7 563 r |= access_fn(mr, addr + i, value, access_size,
cc05c43a 564 (size - access_size - i) * 8, access_mask, attrs);
e7342aa3
PB
565 }
566 } else {
567 for (i = 0; i < size; i += access_size) {
05e015f7 568 r |= access_fn(mr, addr + i, value, access_size, i * 8,
cc05c43a 569 access_mask, attrs);
e7342aa3 570 }
164a4dcd 571 }
cc05c43a 572 return r;
164a4dcd
AK
573}
574
e2177955
AK
575static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
576{
0d673e36
AK
577 AddressSpace *as;
578
feca4ac1
PB
579 while (mr->container) {
580 mr = mr->container;
e2177955 581 }
0d673e36
AK
582 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
583 if (mr == as->root) {
584 return as;
585 }
e2177955 586 }
eed2bacf 587 return NULL;
e2177955
AK
588}
589
093bc2cd
AK
590/* Render a memory region into the global view. Ranges in @view obscure
591 * ranges in @mr.
592 */
593static void render_memory_region(FlatView *view,
594 MemoryRegion *mr,
08dafab4 595 Int128 base,
fb1cd6f9 596 AddrRange clip,
c26763f8
MAL
597 bool readonly,
598 bool nonvolatile)
093bc2cd
AK
599{
600 MemoryRegion *subregion;
601 unsigned i;
a8170e5e 602 hwaddr offset_in_region;
08dafab4
AK
603 Int128 remain;
604 Int128 now;
093bc2cd
AK
605 FlatRange fr;
606 AddrRange tmp;
607
6bba19ba
AK
608 if (!mr->enabled) {
609 return;
610 }
611
08dafab4 612 int128_addto(&base, int128_make64(mr->addr));
fb1cd6f9 613 readonly |= mr->readonly;
c26763f8 614 nonvolatile |= mr->nonvolatile;
093bc2cd
AK
615
616 tmp = addrrange_make(base, mr->size);
617
618 if (!addrrange_intersects(tmp, clip)) {
619 return;
620 }
621
622 clip = addrrange_intersection(tmp, clip);
623
624 if (mr->alias) {
08dafab4
AK
625 int128_subfrom(&base, int128_make64(mr->alias->addr));
626 int128_subfrom(&base, int128_make64(mr->alias_offset));
c26763f8
MAL
627 render_memory_region(view, mr->alias, base, clip,
628 readonly, nonvolatile);
093bc2cd
AK
629 return;
630 }
631
632 /* Render subregions in priority order. */
633 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
c26763f8
MAL
634 render_memory_region(view, subregion, base, clip,
635 readonly, nonvolatile);
093bc2cd
AK
636 }
637
14a3c10a 638 if (!mr->terminates) {
093bc2cd
AK
639 return;
640 }
641
08dafab4 642 offset_in_region = int128_get64(int128_sub(clip.start, base));
093bc2cd
AK
643 base = clip.start;
644 remain = clip.size;
645
2eb74e1a 646 fr.mr = mr;
6f6a5ef3 647 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
b138e654 648 fr.romd_mode = mr->romd_mode;
2eb74e1a 649 fr.readonly = readonly;
c26763f8 650 fr.nonvolatile = nonvolatile;
3ac7d43a 651 fr.has_coalesced_range = 0;
2eb74e1a 652
093bc2cd 653 /* Render the region itself into any gaps left by the current view. */
08dafab4
AK
654 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
655 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
093bc2cd
AK
656 continue;
657 }
08dafab4
AK
658 if (int128_lt(base, view->ranges[i].addr.start)) {
659 now = int128_min(remain,
660 int128_sub(view->ranges[i].addr.start, base));
093bc2cd
AK
661 fr.offset_in_region = offset_in_region;
662 fr.addr = addrrange_make(base, now);
663 flatview_insert(view, i, &fr);
664 ++i;
08dafab4
AK
665 int128_addto(&base, now);
666 offset_in_region += int128_get64(now);
667 int128_subfrom(&remain, now);
093bc2cd 668 }
d26a8cae
AK
669 now = int128_sub(int128_min(int128_add(base, remain),
670 addrrange_end(view->ranges[i].addr)),
671 base);
672 int128_addto(&base, now);
673 offset_in_region += int128_get64(now);
674 int128_subfrom(&remain, now);
093bc2cd 675 }
08dafab4 676 if (int128_nz(remain)) {
093bc2cd
AK
677 fr.offset_in_region = offset_in_region;
678 fr.addr = addrrange_make(base, remain);
679 flatview_insert(view, i, &fr);
680 }
681}
682
89c177bb
AK
683static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
684{
e673ba9a
PB
685 while (mr->enabled) {
686 if (mr->alias) {
687 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
688 /* The alias is included in its entirety. Use it as
689 * the "real" root, so that we can share more FlatViews.
690 */
691 mr = mr->alias;
692 continue;
693 }
694 } else if (!mr->terminates) {
695 unsigned int found = 0;
696 MemoryRegion *child, *next = NULL;
697 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
698 if (child->enabled) {
699 if (++found > 1) {
700 next = NULL;
701 break;
702 }
703 if (!child->addr && int128_ge(mr->size, child->size)) {
704 /* A child is included in its entirety. If it's the only
705 * enabled one, use it in the hope of finding an alias down the
706 * way. This will also let us share FlatViews.
707 */
708 next = child;
709 }
710 }
711 }
092aa2fc
AK
712 if (found == 0) {
713 return NULL;
714 }
e673ba9a
PB
715 if (next) {
716 mr = next;
717 continue;
718 }
719 }
720
092aa2fc 721 return mr;
89c177bb
AK
722 }
723
092aa2fc 724 return NULL;
89c177bb
AK
725}
726
093bc2cd 727/* Render a memory topology into a list of disjoint absolute ranges. */
a9a0c06d 728static FlatView *generate_memory_topology(MemoryRegion *mr)
093bc2cd 729{
9bf561e3 730 int i;
a9a0c06d 731 FlatView *view;
093bc2cd 732
89c177bb 733 view = flatview_new(mr);
093bc2cd 734
83f3c251 735 if (mr) {
a9a0c06d 736 render_memory_region(view, mr, int128_zero(),
c26763f8
MAL
737 addrrange_make(int128_zero(), int128_2_64()),
738 false, false);
83f3c251 739 }
a9a0c06d 740 flatview_simplify(view);
093bc2cd 741
9bf561e3
AK
742 view->dispatch = address_space_dispatch_new(view);
743 for (i = 0; i < view->nr; i++) {
744 MemoryRegionSection mrs =
745 section_from_flat_range(&view->ranges[i], view);
746 flatview_add_to_dispatch(view, &mrs);
747 }
748 address_space_dispatch_compact(view->dispatch);
967dc9b1 749 g_hash_table_replace(flat_views, mr, view);
9bf561e3 750
093bc2cd
AK
751 return view;
752}
753
3e9d69e7
AK
754static void address_space_add_del_ioeventfds(AddressSpace *as,
755 MemoryRegionIoeventfd *fds_new,
756 unsigned fds_new_nb,
757 MemoryRegionIoeventfd *fds_old,
758 unsigned fds_old_nb)
759{
760 unsigned iold, inew;
80a1ea37
AK
761 MemoryRegionIoeventfd *fd;
762 MemoryRegionSection section;
3e9d69e7
AK
763
764 /* Generate a symmetric difference of the old and new fd sets, adding
765 * and deleting as necessary.
766 */
767
768 iold = inew = 0;
769 while (iold < fds_old_nb || inew < fds_new_nb) {
770 if (iold < fds_old_nb
771 && (inew == fds_new_nb
73bb753d
TB
772 || memory_region_ioeventfd_before(&fds_old[iold],
773 &fds_new[inew]))) {
80a1ea37
AK
774 fd = &fds_old[iold];
775 section = (MemoryRegionSection) {
16620684 776 .fv = address_space_to_flatview(as),
80a1ea37 777 .offset_within_address_space = int128_get64(fd->addr.start),
052e87b0 778 .size = fd->addr.size,
80a1ea37 779 };
9a54635d 780 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
753d5e14 781 fd->match_data, fd->data, fd->e);
3e9d69e7
AK
782 ++iold;
783 } else if (inew < fds_new_nb
784 && (iold == fds_old_nb
73bb753d
TB
785 || memory_region_ioeventfd_before(&fds_new[inew],
786 &fds_old[iold]))) {
80a1ea37
AK
787 fd = &fds_new[inew];
788 section = (MemoryRegionSection) {
16620684 789 .fv = address_space_to_flatview(as),
80a1ea37 790 .offset_within_address_space = int128_get64(fd->addr.start),
052e87b0 791 .size = fd->addr.size,
80a1ea37 792 };
9a54635d 793 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
753d5e14 794 fd->match_data, fd->data, fd->e);
3e9d69e7
AK
795 ++inew;
796 } else {
797 ++iold;
798 ++inew;
799 }
800 }
801}
802
48564041 803FlatView *address_space_get_flatview(AddressSpace *as)
856d7245
PB
804{
805 FlatView *view;
806
374f2981 807 rcu_read_lock();
447b0d0b 808 do {
16620684 809 view = address_space_to_flatview(as);
447b0d0b
PB
810 /* If somebody has replaced as->current_map concurrently,
811 * flatview_ref returns false.
812 */
813 } while (!flatview_ref(view));
374f2981 814 rcu_read_unlock();
856d7245
PB
815 return view;
816}
817
3e9d69e7
AK
818static void address_space_update_ioeventfds(AddressSpace *as)
819{
99e86347 820 FlatView *view;
3e9d69e7
AK
821 FlatRange *fr;
822 unsigned ioeventfd_nb = 0;
823 MemoryRegionIoeventfd *ioeventfds = NULL;
824 AddrRange tmp;
825 unsigned i;
826
856d7245 827 view = address_space_get_flatview(as);
99e86347 828 FOR_EACH_FLAT_RANGE(fr, view) {
3e9d69e7
AK
829 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
830 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
08dafab4
AK
831 int128_sub(fr->addr.start,
832 int128_make64(fr->offset_in_region)));
3e9d69e7
AK
833 if (addrrange_intersects(fr->addr, tmp)) {
834 ++ioeventfd_nb;
7267c094 835 ioeventfds = g_realloc(ioeventfds,
3e9d69e7
AK
836 ioeventfd_nb * sizeof(*ioeventfds));
837 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
838 ioeventfds[ioeventfd_nb-1].addr = tmp;
839 }
840 }
841 }
842
843 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
844 as->ioeventfds, as->ioeventfd_nb);
845
7267c094 846 g_free(as->ioeventfds);
3e9d69e7
AK
847 as->ioeventfds = ioeventfds;
848 as->ioeventfd_nb = ioeventfd_nb;
856d7245 849 flatview_unref(view);
3e9d69e7
AK
850}
851
909bf763
PB
852static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
853{
1f7af804
PB
854 if (!fr->has_coalesced_range) {
855 return;
856 }
857
3ac7d43a
PB
858 if (--fr->has_coalesced_range > 0) {
859 return;
860 }
861
909bf763
PB
862 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
863 int128_get64(fr->addr.start),
864 int128_get64(fr->addr.size));
865}
866
867static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
868{
869 MemoryRegion *mr = fr->mr;
870 CoalescedMemoryRange *cmr;
871 AddrRange tmp;
872
1f7af804
PB
873 if (QTAILQ_EMPTY(&mr->coalesced)) {
874 return;
875 }
876
3ac7d43a
PB
877 if (fr->has_coalesced_range++) {
878 return;
879 }
880
909bf763
PB
881 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
882 tmp = addrrange_shift(cmr->addr,
883 int128_sub(fr->addr.start,
884 int128_make64(fr->offset_in_region)));
885 if (!addrrange_intersects(tmp, fr->addr)) {
886 continue;
887 }
888 tmp = addrrange_intersection(tmp, fr->addr);
889 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
890 int128_get64(tmp.start),
891 int128_get64(tmp.size));
892 }
893}
894
b8af1afb 895static void address_space_update_topology_pass(AddressSpace *as,
a9a0c06d
PB
896 const FlatView *old_view,
897 const FlatView *new_view,
b8af1afb 898 bool adding)
093bc2cd 899{
093bc2cd
AK
900 unsigned iold, inew;
901 FlatRange *frold, *frnew;
093bc2cd
AK
902
903 /* Generate a symmetric difference of the old and new memory maps.
904 * Kill ranges in the old map, and instantiate ranges in the new map.
905 */
906 iold = inew = 0;
a9a0c06d
PB
907 while (iold < old_view->nr || inew < new_view->nr) {
908 if (iold < old_view->nr) {
909 frold = &old_view->ranges[iold];
093bc2cd
AK
910 } else {
911 frold = NULL;
912 }
a9a0c06d
PB
913 if (inew < new_view->nr) {
914 frnew = &new_view->ranges[inew];
093bc2cd
AK
915 } else {
916 frnew = NULL;
917 }
918
919 if (frold
920 && (!frnew
08dafab4
AK
921 || int128_lt(frold->addr.start, frnew->addr.start)
922 || (int128_eq(frold->addr.start, frnew->addr.start)
093bc2cd 923 && !flatrange_equal(frold, frnew)))) {
41a6e477 924 /* In old but not in new, or in both but attributes changed. */
093bc2cd 925
b8af1afb 926 if (!adding) {
3ac7d43a 927 flat_range_coalesced_io_del(frold, as);
72e22d2f 928 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
b8af1afb
AK
929 }
930
093bc2cd
AK
931 ++iold;
932 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
41a6e477 933 /* In both and unchanged (except logging may have changed) */
093bc2cd 934
4f826024 935 if (adding) {
50c1e149 936 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
b2dfd71c
PB
937 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
938 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
939 frold->dirty_log_mask,
940 frnew->dirty_log_mask);
941 }
942 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
943 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
944 frold->dirty_log_mask,
945 frnew->dirty_log_mask);
b8af1afb 946 }
5a583347
AK
947 }
948
093bc2cd
AK
949 ++iold;
950 ++inew;
093bc2cd
AK
951 } else {
952 /* In new */
953
b8af1afb 954 if (adding) {
72e22d2f 955 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
3ac7d43a 956 flat_range_coalesced_io_add(frnew, as);
b8af1afb
AK
957 }
958
093bc2cd
AK
959 ++inew;
960 }
961 }
b8af1afb
AK
962}
963
967dc9b1
AK
964static void flatviews_init(void)
965{
092aa2fc
AK
966 static FlatView *empty_view;
967
967dc9b1
AK
968 if (flat_views) {
969 return;
970 }
971
972 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
973 (GDestroyNotify) flatview_unref);
092aa2fc
AK
974 if (!empty_view) {
975 empty_view = generate_memory_topology(NULL);
976 /* We keep it alive forever in the global variable. */
977 flatview_ref(empty_view);
978 } else {
979 g_hash_table_replace(flat_views, NULL, empty_view);
980 flatview_ref(empty_view);
981 }
967dc9b1
AK
982}
983
984static void flatviews_reset(void)
985{
986 AddressSpace *as;
987
988 if (flat_views) {
989 g_hash_table_unref(flat_views);
990 flat_views = NULL;
991 }
992 flatviews_init();
993
994 /* Render unique FVs */
995 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
996 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
997
998 if (g_hash_table_lookup(flat_views, physmr)) {
999 continue;
1000 }
1001
1002 generate_memory_topology(physmr);
1003 }
1004}
1005
1006static void address_space_set_flatview(AddressSpace *as)
b8af1afb 1007{
67ace39b 1008 FlatView *old_view = address_space_to_flatview(as);
967dc9b1
AK
1009 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1010 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1011
1012 assert(new_view);
1013
67ace39b
AK
1014 if (old_view == new_view) {
1015 return;
1016 }
1017
1018 if (old_view) {
1019 flatview_ref(old_view);
1020 }
1021
967dc9b1 1022 flatview_ref(new_view);
9a62e24f
AK
1023
1024 if (!QTAILQ_EMPTY(&as->listeners)) {
67ace39b
AK
1025 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1026
1027 if (!old_view2) {
1028 old_view2 = &tmpview;
1029 }
1030 address_space_update_topology_pass(as, old_view2, new_view, false);
1031 address_space_update_topology_pass(as, old_view2, new_view, true);
9a62e24f 1032 }
b8af1afb 1033
374f2981
PB
1034 /* Writes are protected by the BQL. */
1035 atomic_rcu_set(&as->current_map, new_view);
67ace39b
AK
1036 if (old_view) {
1037 flatview_unref(old_view);
1038 }
856d7245
PB
1039
1040 /* Note that all the old MemoryRegions are still alive up to this
1041 * point. This relieves most MemoryListeners from the need to
1042 * ref/unref the MemoryRegions they get---unless they use them
1043 * outside the iothread mutex, in which case precise reference
1044 * counting is necessary.
1045 */
67ace39b
AK
1046 if (old_view) {
1047 flatview_unref(old_view);
1048 }
093bc2cd
AK
1049}
1050
202fc01b
AK
1051static void address_space_update_topology(AddressSpace *as)
1052{
1053 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1054
1055 flatviews_init();
1056 if (!g_hash_table_lookup(flat_views, physmr)) {
1057 generate_memory_topology(physmr);
1058 }
1059 address_space_set_flatview(as);
1060}
1061
4ef4db86
AK
1062void memory_region_transaction_begin(void)
1063{
bb880ded 1064 qemu_flush_coalesced_mmio_buffer();
4ef4db86
AK
1065 ++memory_region_transaction_depth;
1066}
1067
1068void memory_region_transaction_commit(void)
1069{
0d673e36
AK
1070 AddressSpace *as;
1071
4ef4db86 1072 assert(memory_region_transaction_depth);
8d04fb55
JK
1073 assert(qemu_mutex_iothread_locked());
1074
4ef4db86 1075 --memory_region_transaction_depth;
4dc56152
GA
1076 if (!memory_region_transaction_depth) {
1077 if (memory_region_update_pending) {
967dc9b1
AK
1078 flatviews_reset();
1079
4dc56152 1080 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
02e2b95f 1081
4dc56152 1082 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
967dc9b1 1083 address_space_set_flatview(as);
02218487 1084 address_space_update_ioeventfds(as);
4dc56152 1085 }
ade9c1aa 1086 memory_region_update_pending = false;
0b152095 1087 ioeventfd_update_pending = false;
4dc56152
GA
1088 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1089 } else if (ioeventfd_update_pending) {
1090 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1091 address_space_update_ioeventfds(as);
1092 }
ade9c1aa 1093 ioeventfd_update_pending = false;
4dc56152 1094 }
4dc56152 1095 }
4ef4db86
AK
1096}
1097
545e92e0
AK
1098static void memory_region_destructor_none(MemoryRegion *mr)
1099{
1100}
1101
1102static void memory_region_destructor_ram(MemoryRegion *mr)
1103{
f1060c55 1104 qemu_ram_free(mr->ram_block);
545e92e0
AK
1105}
1106
b4fefef9
PC
1107static bool memory_region_need_escape(char c)
1108{
1109 return c == '/' || c == '[' || c == '\\' || c == ']';
1110}
1111
1112static char *memory_region_escape_name(const char *name)
1113{
1114 const char *p;
1115 char *escaped, *q;
1116 uint8_t c;
1117 size_t bytes = 0;
1118
1119 for (p = name; *p; p++) {
1120 bytes += memory_region_need_escape(*p) ? 4 : 1;
1121 }
1122 if (bytes == p - name) {
1123 return g_memdup(name, bytes + 1);
1124 }
1125
1126 escaped = g_malloc(bytes + 1);
1127 for (p = name, q = escaped; *p; p++) {
1128 c = *p;
1129 if (unlikely(memory_region_need_escape(c))) {
1130 *q++ = '\\';
1131 *q++ = 'x';
1132 *q++ = "0123456789abcdef"[c >> 4];
1133 c = "0123456789abcdef"[c & 15];
1134 }
1135 *q++ = c;
1136 }
1137 *q = 0;
1138 return escaped;
1139}
1140
3df9d748
AK
1141static void memory_region_do_init(MemoryRegion *mr,
1142 Object *owner,
1143 const char *name,
1144 uint64_t size)
093bc2cd 1145{
08dafab4
AK
1146 mr->size = int128_make64(size);
1147 if (size == UINT64_MAX) {
1148 mr->size = int128_2_64();
1149 }
302fa283 1150 mr->name = g_strdup(name);
612263cf 1151 mr->owner = owner;
58eaa217 1152 mr->ram_block = NULL;
b4fefef9
PC
1153
1154 if (name) {
843ef73a
PC
1155 char *escaped_name = memory_region_escape_name(name);
1156 char *name_array = g_strdup_printf("%s[*]", escaped_name);
612263cf
PB
1157
1158 if (!owner) {
1159 owner = container_get(qdev_get_machine(), "/unattached");
1160 }
1161
843ef73a 1162 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
b4fefef9 1163 object_unref(OBJECT(mr));
843ef73a
PC
1164 g_free(name_array);
1165 g_free(escaped_name);
b4fefef9
PC
1166 }
1167}
1168
3df9d748
AK
1169void memory_region_init(MemoryRegion *mr,
1170 Object *owner,
1171 const char *name,
1172 uint64_t size)
1173{
1174 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1175 memory_region_do_init(mr, owner, name, size);
1176}
1177
d7bce999
EB
1178static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1179 void *opaque, Error **errp)
409ddd01
PC
1180{
1181 MemoryRegion *mr = MEMORY_REGION(obj);
1182 uint64_t value = mr->addr;
1183
51e72bc1 1184 visit_type_uint64(v, name, &value, errp);
409ddd01
PC
1185}
1186
d7bce999
EB
1187static void memory_region_get_container(Object *obj, Visitor *v,
1188 const char *name, void *opaque,
1189 Error **errp)
409ddd01
PC
1190{
1191 MemoryRegion *mr = MEMORY_REGION(obj);
1192 gchar *path = (gchar *)"";
1193
1194 if (mr->container) {
1195 path = object_get_canonical_path(OBJECT(mr->container));
1196 }
51e72bc1 1197 visit_type_str(v, name, &path, errp);
409ddd01
PC
1198 if (mr->container) {
1199 g_free(path);
1200 }
1201}
1202
1203static Object *memory_region_resolve_container(Object *obj, void *opaque,
1204 const char *part)
1205{
1206 MemoryRegion *mr = MEMORY_REGION(obj);
1207
1208 return OBJECT(mr->container);
1209}
1210
d7bce999
EB
1211static void memory_region_get_priority(Object *obj, Visitor *v,
1212 const char *name, void *opaque,
1213 Error **errp)
d33382da
PC
1214{
1215 MemoryRegion *mr = MEMORY_REGION(obj);
1216 int32_t value = mr->priority;
1217
51e72bc1 1218 visit_type_int32(v, name, &value, errp);
d33382da
PC
1219}
1220
d7bce999
EB
1221static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1222 void *opaque, Error **errp)
52aef7bb
PC
1223{
1224 MemoryRegion *mr = MEMORY_REGION(obj);
1225 uint64_t value = memory_region_size(mr);
1226
51e72bc1 1227 visit_type_uint64(v, name, &value, errp);
52aef7bb
PC
1228}
1229
b4fefef9
PC
1230static void memory_region_initfn(Object *obj)
1231{
1232 MemoryRegion *mr = MEMORY_REGION(obj);
409ddd01 1233 ObjectProperty *op;
b4fefef9
PC
1234
1235 mr->ops = &unassigned_mem_ops;
6bba19ba 1236 mr->enabled = true;
5f9a5ea1 1237 mr->romd_mode = true;
196ea131 1238 mr->global_locking = true;
545e92e0 1239 mr->destructor = memory_region_destructor_none;
093bc2cd 1240 QTAILQ_INIT(&mr->subregions);
093bc2cd 1241 QTAILQ_INIT(&mr->coalesced);
409ddd01
PC
1242
1243 op = object_property_add(OBJECT(mr), "container",
1244 "link<" TYPE_MEMORY_REGION ">",
1245 memory_region_get_container,
1246 NULL, /* memory_region_set_container */
1247 NULL, NULL, &error_abort);
1248 op->resolve = memory_region_resolve_container;
1249
1250 object_property_add(OBJECT(mr), "addr", "uint64",
1251 memory_region_get_addr,
1252 NULL, /* memory_region_set_addr */
1253 NULL, NULL, &error_abort);
d33382da
PC
1254 object_property_add(OBJECT(mr), "priority", "uint32",
1255 memory_region_get_priority,
1256 NULL, /* memory_region_set_priority */
1257 NULL, NULL, &error_abort);
52aef7bb
PC
1258 object_property_add(OBJECT(mr), "size", "uint64",
1259 memory_region_get_size,
1260 NULL, /* memory_region_set_size, */
1261 NULL, NULL, &error_abort);
093bc2cd
AK
1262}
1263
3df9d748
AK
1264static void iommu_memory_region_initfn(Object *obj)
1265{
1266 MemoryRegion *mr = MEMORY_REGION(obj);
1267
1268 mr->is_iommu = true;
1269}
1270
b018ddf6
PB
1271static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1272 unsigned size)
1273{
1274#ifdef DEBUG_UNASSIGNED
1275 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1276#endif
4917cf44 1277 if (current_cpu != NULL) {
dbea78a4
PM
1278 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1279 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
c658b94f 1280 }
68a7439a 1281 return 0;
b018ddf6
PB
1282}
1283
1284static void unassigned_mem_write(void *opaque, hwaddr addr,
1285 uint64_t val, unsigned size)
1286{
1287#ifdef DEBUG_UNASSIGNED
1288 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1289#endif
4917cf44
AF
1290 if (current_cpu != NULL) {
1291 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
c658b94f 1292 }
b018ddf6
PB
1293}
1294
d197063f 1295static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
8372d383
PM
1296 unsigned size, bool is_write,
1297 MemTxAttrs attrs)
d197063f
PB
1298{
1299 return false;
1300}
1301
1302const MemoryRegionOps unassigned_mem_ops = {
1303 .valid.accepts = unassigned_mem_accepts,
1304 .endianness = DEVICE_NATIVE_ENDIAN,
1305};
1306
4a2e242b
AW
1307static uint64_t memory_region_ram_device_read(void *opaque,
1308 hwaddr addr, unsigned size)
1309{
1310 MemoryRegion *mr = opaque;
1311 uint64_t data = (uint64_t)~0;
1312
1313 switch (size) {
1314 case 1:
1315 data = *(uint8_t *)(mr->ram_block->host + addr);
1316 break;
1317 case 2:
1318 data = *(uint16_t *)(mr->ram_block->host + addr);
1319 break;
1320 case 4:
1321 data = *(uint32_t *)(mr->ram_block->host + addr);
1322 break;
1323 case 8:
1324 data = *(uint64_t *)(mr->ram_block->host + addr);
1325 break;
1326 }
1327
1328 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1329
1330 return data;
1331}
1332
1333static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1334 uint64_t data, unsigned size)
1335{
1336 MemoryRegion *mr = opaque;
1337
1338 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1339
1340 switch (size) {
1341 case 1:
1342 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1343 break;
1344 case 2:
1345 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1346 break;
1347 case 4:
1348 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1349 break;
1350 case 8:
1351 *(uint64_t *)(mr->ram_block->host + addr) = data;
1352 break;
1353 }
1354}
1355
1356static const MemoryRegionOps ram_device_mem_ops = {
1357 .read = memory_region_ram_device_read,
1358 .write = memory_region_ram_device_write,
c99a29e7 1359 .endianness = DEVICE_HOST_ENDIAN,
4a2e242b
AW
1360 .valid = {
1361 .min_access_size = 1,
1362 .max_access_size = 8,
1363 .unaligned = true,
1364 },
1365 .impl = {
1366 .min_access_size = 1,
1367 .max_access_size = 8,
1368 .unaligned = true,
1369 },
1370};
1371
d2702032
PB
1372bool memory_region_access_valid(MemoryRegion *mr,
1373 hwaddr addr,
1374 unsigned size,
6d7b9a6c
PM
1375 bool is_write,
1376 MemTxAttrs attrs)
093bc2cd 1377{
a014ed07
PB
1378 int access_size_min, access_size_max;
1379 int access_size, i;
897fa7cf 1380
093bc2cd
AK
1381 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1382 return false;
1383 }
1384
a014ed07 1385 if (!mr->ops->valid.accepts) {
093bc2cd
AK
1386 return true;
1387 }
1388
a014ed07
PB
1389 access_size_min = mr->ops->valid.min_access_size;
1390 if (!mr->ops->valid.min_access_size) {
1391 access_size_min = 1;
1392 }
1393
1394 access_size_max = mr->ops->valid.max_access_size;
1395 if (!mr->ops->valid.max_access_size) {
1396 access_size_max = 4;
1397 }
1398
1399 access_size = MAX(MIN(size, access_size_max), access_size_min);
1400 for (i = 0; i < size; i += access_size) {
1401 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
8372d383 1402 is_write, attrs)) {
a014ed07
PB
1403 return false;
1404 }
093bc2cd 1405 }
a014ed07 1406
093bc2cd
AK
1407 return true;
1408}
1409
cc05c43a
PM
1410static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1411 hwaddr addr,
1412 uint64_t *pval,
1413 unsigned size,
1414 MemTxAttrs attrs)
093bc2cd 1415{
cc05c43a 1416 *pval = 0;
093bc2cd 1417
ce5d2f33 1418 if (mr->ops->read) {
cc05c43a
PM
1419 return access_with_adjusted_size(addr, pval, size,
1420 mr->ops->impl.min_access_size,
1421 mr->ops->impl.max_access_size,
1422 memory_region_read_accessor,
1423 mr, attrs);
62a0db94 1424 } else {
cc05c43a
PM
1425 return access_with_adjusted_size(addr, pval, size,
1426 mr->ops->impl.min_access_size,
1427 mr->ops->impl.max_access_size,
1428 memory_region_read_with_attrs_accessor,
1429 mr, attrs);
74901c3b 1430 }
093bc2cd
AK
1431}
1432
3b643495
PM
1433MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1434 hwaddr addr,
1435 uint64_t *pval,
1436 unsigned size,
1437 MemTxAttrs attrs)
a621f38d 1438{
cc05c43a
PM
1439 MemTxResult r;
1440
6d7b9a6c 1441 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
791af8c8 1442 *pval = unassigned_mem_read(mr, addr, size);
cc05c43a 1443 return MEMTX_DECODE_ERROR;
791af8c8 1444 }
a621f38d 1445
cc05c43a 1446 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
791af8c8 1447 adjust_endianness(mr, pval, size);
cc05c43a 1448 return r;
a621f38d 1449}
093bc2cd 1450
8c56c1a5
PF
1451/* Return true if an eventfd was signalled */
1452static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1453 hwaddr addr,
1454 uint64_t data,
1455 unsigned size,
1456 MemTxAttrs attrs)
1457{
1458 MemoryRegionIoeventfd ioeventfd = {
1459 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1460 .data = data,
1461 };
1462 unsigned i;
1463
1464 for (i = 0; i < mr->ioeventfd_nb; i++) {
1465 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1466 ioeventfd.e = mr->ioeventfds[i].e;
1467
73bb753d 1468 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
8c56c1a5
PF
1469 event_notifier_set(ioeventfd.e);
1470 return true;
1471 }
1472 }
1473
1474 return false;
1475}
1476
3b643495
PM
1477MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1478 hwaddr addr,
1479 uint64_t data,
1480 unsigned size,
1481 MemTxAttrs attrs)
a621f38d 1482{
6d7b9a6c 1483 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
b018ddf6 1484 unassigned_mem_write(mr, addr, data, size);
cc05c43a 1485 return MEMTX_DECODE_ERROR;
093bc2cd
AK
1486 }
1487
a621f38d
AK
1488 adjust_endianness(mr, &data, size);
1489
8c56c1a5
PF
1490 if ((!kvm_eventfds_enabled()) &&
1491 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1492 return MEMTX_OK;
1493 }
1494
ce5d2f33 1495 if (mr->ops->write) {
cc05c43a
PM
1496 return access_with_adjusted_size(addr, &data, size,
1497 mr->ops->impl.min_access_size,
1498 mr->ops->impl.max_access_size,
1499 memory_region_write_accessor, mr,
1500 attrs);
62a0db94 1501 } else {
cc05c43a
PM
1502 return
1503 access_with_adjusted_size(addr, &data, size,
1504 mr->ops->impl.min_access_size,
1505 mr->ops->impl.max_access_size,
1506 memory_region_write_with_attrs_accessor,
1507 mr, attrs);
74901c3b 1508 }
093bc2cd
AK
1509}
1510
093bc2cd 1511void memory_region_init_io(MemoryRegion *mr,
2c9b15ca 1512 Object *owner,
093bc2cd
AK
1513 const MemoryRegionOps *ops,
1514 void *opaque,
1515 const char *name,
1516 uint64_t size)
1517{
2c9b15ca 1518 memory_region_init(mr, owner, name, size);
6d6d2abf 1519 mr->ops = ops ? ops : &unassigned_mem_ops;
093bc2cd 1520 mr->opaque = opaque;
14a3c10a 1521 mr->terminates = true;
093bc2cd
AK
1522}
1523
1cfe48c1
PM
1524void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1525 Object *owner,
1526 const char *name,
1527 uint64_t size,
1528 Error **errp)
06329cce
MA
1529{
1530 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1531}
1532
1533void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1534 Object *owner,
1535 const char *name,
1536 uint64_t size,
1537 bool share,
1538 Error **errp)
093bc2cd 1539{
1cd3d492 1540 Error *err = NULL;
2c9b15ca 1541 memory_region_init(mr, owner, name, size);
8ea9252a 1542 mr->ram = true;
14a3c10a 1543 mr->terminates = true;
545e92e0 1544 mr->destructor = memory_region_destructor_ram;
1cd3d492 1545 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
677e7805 1546 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1cd3d492
IM
1547 if (err) {
1548 mr->size = int128_zero();
1549 object_unparent(OBJECT(mr));
1550 error_propagate(errp, err);
1551 }
0b183fc8
PB
1552}
1553
60786ef3
MT
1554void memory_region_init_resizeable_ram(MemoryRegion *mr,
1555 Object *owner,
1556 const char *name,
1557 uint64_t size,
1558 uint64_t max_size,
1559 void (*resized)(const char*,
1560 uint64_t length,
1561 void *host),
1562 Error **errp)
1563{
1cd3d492 1564 Error *err = NULL;
60786ef3
MT
1565 memory_region_init(mr, owner, name, size);
1566 mr->ram = true;
1567 mr->terminates = true;
1568 mr->destructor = memory_region_destructor_ram;
8e41fb63 1569 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1cd3d492 1570 mr, &err);
677e7805 1571 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1cd3d492
IM
1572 if (err) {
1573 mr->size = int128_zero();
1574 object_unparent(OBJECT(mr));
1575 error_propagate(errp, err);
1576 }
60786ef3
MT
1577}
1578
d5dbde46 1579#ifdef CONFIG_POSIX
0b183fc8
PB
1580void memory_region_init_ram_from_file(MemoryRegion *mr,
1581 struct Object *owner,
1582 const char *name,
1583 uint64_t size,
98376843 1584 uint64_t align,
cbfc0171 1585 uint32_t ram_flags,
7f56e740
PB
1586 const char *path,
1587 Error **errp)
0b183fc8 1588{
1cd3d492 1589 Error *err = NULL;
0b183fc8
PB
1590 memory_region_init(mr, owner, name, size);
1591 mr->ram = true;
1592 mr->terminates = true;
1593 mr->destructor = memory_region_destructor_ram;
98376843 1594 mr->align = align;
1cd3d492 1595 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
677e7805 1596 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1cd3d492
IM
1597 if (err) {
1598 mr->size = int128_zero();
1599 object_unparent(OBJECT(mr));
1600 error_propagate(errp, err);
1601 }
093bc2cd 1602}
fea617c5
MAL
1603
1604void memory_region_init_ram_from_fd(MemoryRegion *mr,
1605 struct Object *owner,
1606 const char *name,
1607 uint64_t size,
1608 bool share,
1609 int fd,
1610 Error **errp)
1611{
1cd3d492 1612 Error *err = NULL;
fea617c5
MAL
1613 memory_region_init(mr, owner, name, size);
1614 mr->ram = true;
1615 mr->terminates = true;
1616 mr->destructor = memory_region_destructor_ram;
cbfc0171
JH
1617 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1618 share ? RAM_SHARED : 0,
1cd3d492 1619 fd, &err);
fea617c5 1620 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1cd3d492
IM
1621 if (err) {
1622 mr->size = int128_zero();
1623 object_unparent(OBJECT(mr));
1624 error_propagate(errp, err);
1625 }
fea617c5 1626}
0b183fc8 1627#endif
093bc2cd
AK
1628
1629void memory_region_init_ram_ptr(MemoryRegion *mr,
2c9b15ca 1630 Object *owner,
093bc2cd
AK
1631 const char *name,
1632 uint64_t size,
1633 void *ptr)
1634{
2c9b15ca 1635 memory_region_init(mr, owner, name, size);
8ea9252a 1636 mr->ram = true;
14a3c10a 1637 mr->terminates = true;
fc3e7665 1638 mr->destructor = memory_region_destructor_ram;
677e7805 1639 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
ef701d7b
HT
1640
1641 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1642 assert(ptr != NULL);
8e41fb63 1643 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
093bc2cd
AK
1644}
1645
21e00fa5
AW
1646void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1647 Object *owner,
1648 const char *name,
1649 uint64_t size,
1650 void *ptr)
e4dc3f59 1651{
21e00fa5
AW
1652 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1653 mr->ram_device = true;
4a2e242b
AW
1654 mr->ops = &ram_device_mem_ops;
1655 mr->opaque = mr;
e4dc3f59
ND
1656}
1657
093bc2cd 1658void memory_region_init_alias(MemoryRegion *mr,
2c9b15ca 1659 Object *owner,
093bc2cd
AK
1660 const char *name,
1661 MemoryRegion *orig,
a8170e5e 1662 hwaddr offset,
093bc2cd
AK
1663 uint64_t size)
1664{
2c9b15ca 1665 memory_region_init(mr, owner, name, size);
093bc2cd
AK
1666 mr->alias = orig;
1667 mr->alias_offset = offset;
1668}
1669
b59821a9
PM
1670void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1671 struct Object *owner,
1672 const char *name,
1673 uint64_t size,
1674 Error **errp)
a1777f7f 1675{
1cd3d492 1676 Error *err = NULL;
a1777f7f
PM
1677 memory_region_init(mr, owner, name, size);
1678 mr->ram = true;
1679 mr->readonly = true;
1680 mr->terminates = true;
1681 mr->destructor = memory_region_destructor_ram;
1cd3d492 1682 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
a1777f7f 1683 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1cd3d492
IM
1684 if (err) {
1685 mr->size = int128_zero();
1686 object_unparent(OBJECT(mr));
1687 error_propagate(errp, err);
1688 }
a1777f7f
PM
1689}
1690
b59821a9
PM
1691void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1692 Object *owner,
1693 const MemoryRegionOps *ops,
1694 void *opaque,
1695 const char *name,
1696 uint64_t size,
1697 Error **errp)
d0a9b5bc 1698{
1cd3d492 1699 Error *err = NULL;
39e0b03d 1700 assert(ops);
2c9b15ca 1701 memory_region_init(mr, owner, name, size);
7bc2b9cd 1702 mr->ops = ops;
75f5941c 1703 mr->opaque = opaque;
d0a9b5bc 1704 mr->terminates = true;
75c578dc 1705 mr->rom_device = true;
58268c8d 1706 mr->destructor = memory_region_destructor_ram;
1cd3d492
IM
1707 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1708 if (err) {
1709 mr->size = int128_zero();
1710 object_unparent(OBJECT(mr));
1711 error_propagate(errp, err);
1712 }
d0a9b5bc
AK
1713}
1714
1221a474
AK
1715void memory_region_init_iommu(void *_iommu_mr,
1716 size_t instance_size,
1717 const char *mrtypename,
2c9b15ca 1718 Object *owner,
30951157
AK
1719 const char *name,
1720 uint64_t size)
1721{
1221a474 1722 struct IOMMUMemoryRegion *iommu_mr;
3df9d748
AK
1723 struct MemoryRegion *mr;
1724
1221a474
AK
1725 object_initialize(_iommu_mr, instance_size, mrtypename);
1726 mr = MEMORY_REGION(_iommu_mr);
3df9d748
AK
1727 memory_region_do_init(mr, owner, name, size);
1728 iommu_mr = IOMMU_MEMORY_REGION(mr);
30951157 1729 mr->terminates = true; /* then re-forwards */
3df9d748
AK
1730 QLIST_INIT(&iommu_mr->iommu_notify);
1731 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
30951157
AK
1732}
1733
b4fefef9 1734static void memory_region_finalize(Object *obj)
093bc2cd 1735{
b4fefef9
PC
1736 MemoryRegion *mr = MEMORY_REGION(obj);
1737
2e2b8eb7
PB
1738 assert(!mr->container);
1739
1740 /* We know the region is not visible in any address space (it
1741 * does not have a container and cannot be a root either because
1742 * it has no references, so we can blindly clear mr->enabled.
1743 * memory_region_set_enabled instead could trigger a transaction
1744 * and cause an infinite loop.
1745 */
1746 mr->enabled = false;
1747 memory_region_transaction_begin();
1748 while (!QTAILQ_EMPTY(&mr->subregions)) {
1749 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1750 memory_region_del_subregion(mr, subregion);
1751 }
1752 memory_region_transaction_commit();
1753
545e92e0 1754 mr->destructor(mr);
093bc2cd 1755 memory_region_clear_coalescing(mr);
302fa283 1756 g_free((char *)mr->name);
7267c094 1757 g_free(mr->ioeventfds);
093bc2cd
AK
1758}
1759
803c0816
PB
1760Object *memory_region_owner(MemoryRegion *mr)
1761{
22a893e4
PB
1762 Object *obj = OBJECT(mr);
1763 return obj->parent;
803c0816
PB
1764}
1765
46637be2
PB
1766void memory_region_ref(MemoryRegion *mr)
1767{
22a893e4
PB
1768 /* MMIO callbacks most likely will access data that belongs
1769 * to the owner, hence the need to ref/unref the owner whenever
1770 * the memory region is in use.
1771 *
1772 * The memory region is a child of its owner. As long as the
1773 * owner doesn't call unparent itself on the memory region,
1774 * ref-ing the owner will also keep the memory region alive.
612263cf
PB
1775 * Memory regions without an owner are supposed to never go away;
1776 * we do not ref/unref them because it slows down DMA sensibly.
22a893e4 1777 */
612263cf
PB
1778 if (mr && mr->owner) {
1779 object_ref(mr->owner);
46637be2
PB
1780 }
1781}
1782
1783void memory_region_unref(MemoryRegion *mr)
1784{
612263cf
PB
1785 if (mr && mr->owner) {
1786 object_unref(mr->owner);
46637be2
PB
1787 }
1788}
1789
093bc2cd
AK
1790uint64_t memory_region_size(MemoryRegion *mr)
1791{
08dafab4
AK
1792 if (int128_eq(mr->size, int128_2_64())) {
1793 return UINT64_MAX;
1794 }
1795 return int128_get64(mr->size);
093bc2cd
AK
1796}
1797
5d546d4b 1798const char *memory_region_name(const MemoryRegion *mr)
8991c79b 1799{
d1dd32af
PC
1800 if (!mr->name) {
1801 ((MemoryRegion *)mr)->name =
1802 object_get_canonical_path_component(OBJECT(mr));
1803 }
302fa283 1804 return mr->name;
8991c79b
AK
1805}
1806
21e00fa5 1807bool memory_region_is_ram_device(MemoryRegion *mr)
e4dc3f59 1808{
21e00fa5 1809 return mr->ram_device;
e4dc3f59
ND
1810}
1811
2d1a35be 1812uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
55043ba3 1813{
6f6a5ef3 1814 uint8_t mask = mr->dirty_log_mask;
adaad61c 1815 if (global_dirty_log && mr->ram_block) {
6f6a5ef3
PB
1816 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1817 }
1818 return mask;
55043ba3
AK
1819}
1820
2d1a35be
PB
1821bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1822{
1823 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1824}
1825
3df9d748 1826static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
5bf3d319
PX
1827{
1828 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1829 IOMMUNotifier *iommu_notifier;
1221a474 1830 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
5bf3d319 1831
3df9d748 1832 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
5bf3d319
PX
1833 flags |= iommu_notifier->notifier_flags;
1834 }
1835
1221a474
AK
1836 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1837 imrc->notify_flag_changed(iommu_mr,
1838 iommu_mr->iommu_notify_flags,
1839 flags);
5bf3d319
PX
1840 }
1841
3df9d748 1842 iommu_mr->iommu_notify_flags = flags;
5bf3d319
PX
1843}
1844
cdb30812
PX
1845void memory_region_register_iommu_notifier(MemoryRegion *mr,
1846 IOMMUNotifier *n)
06866575 1847{
3df9d748
AK
1848 IOMMUMemoryRegion *iommu_mr;
1849
efcd38c5
JW
1850 if (mr->alias) {
1851 memory_region_register_iommu_notifier(mr->alias, n);
1852 return;
1853 }
1854
cdb30812 1855 /* We need to register for at least one bitfield */
3df9d748 1856 iommu_mr = IOMMU_MEMORY_REGION(mr);
cdb30812 1857 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
698feb5e 1858 assert(n->start <= n->end);
cb1efcf4
PM
1859 assert(n->iommu_idx >= 0 &&
1860 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1861
3df9d748
AK
1862 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1863 memory_region_update_iommu_notify_flags(iommu_mr);
06866575
DG
1864}
1865
3df9d748 1866uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
a788f227 1867{
1221a474
AK
1868 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1869
1870 if (imrc->get_min_page_size) {
1871 return imrc->get_min_page_size(iommu_mr);
f682e9c2
AK
1872 }
1873 return TARGET_PAGE_SIZE;
1874}
1875
3df9d748 1876void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
f682e9c2 1877{
3df9d748 1878 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1221a474 1879 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
f682e9c2 1880 hwaddr addr, granularity;
a788f227
DG
1881 IOMMUTLBEntry iotlb;
1882
faa362e3 1883 /* If the IOMMU has its own replay callback, override */
1221a474
AK
1884 if (imrc->replay) {
1885 imrc->replay(iommu_mr, n);
faa362e3
PX
1886 return;
1887 }
1888
3df9d748 1889 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
f682e9c2 1890
a788f227 1891 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
2c91bcf2 1892 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
a788f227
DG
1893 if (iotlb.perm != IOMMU_NONE) {
1894 n->notify(n, &iotlb);
1895 }
1896
1897 /* if (2^64 - MR size) < granularity, it's possible to get an
1898 * infinite loop here. This should catch such a wraparound */
1899 if ((addr + granularity) < addr) {
1900 break;
1901 }
1902 }
1903}
1904
3df9d748 1905void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
de472e4a
PX
1906{
1907 IOMMUNotifier *notifier;
1908
3df9d748
AK
1909 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1910 memory_region_iommu_replay(iommu_mr, notifier);
de472e4a
PX
1911 }
1912}
1913
cdb30812
PX
1914void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1915 IOMMUNotifier *n)
06866575 1916{
3df9d748
AK
1917 IOMMUMemoryRegion *iommu_mr;
1918
efcd38c5
JW
1919 if (mr->alias) {
1920 memory_region_unregister_iommu_notifier(mr->alias, n);
1921 return;
1922 }
cdb30812 1923 QLIST_REMOVE(n, node);
3df9d748
AK
1924 iommu_mr = IOMMU_MEMORY_REGION(mr);
1925 memory_region_update_iommu_notify_flags(iommu_mr);
06866575
DG
1926}
1927
bd2bfa4c
PX
1928void memory_region_notify_one(IOMMUNotifier *notifier,
1929 IOMMUTLBEntry *entry)
06866575 1930{
cdb30812
PX
1931 IOMMUNotifierFlag request_flags;
1932
bd2bfa4c
PX
1933 /*
1934 * Skip the notification if the notification does not overlap
1935 * with registered range.
1936 */
b021d1c0 1937 if (notifier->start > entry->iova + entry->addr_mask ||
bd2bfa4c
PX
1938 notifier->end < entry->iova) {
1939 return;
1940 }
cdb30812 1941
bd2bfa4c 1942 if (entry->perm & IOMMU_RW) {
cdb30812
PX
1943 request_flags = IOMMU_NOTIFIER_MAP;
1944 } else {
1945 request_flags = IOMMU_NOTIFIER_UNMAP;
1946 }
1947
bd2bfa4c
PX
1948 if (notifier->notifier_flags & request_flags) {
1949 notifier->notify(notifier, entry);
1950 }
1951}
1952
3df9d748 1953void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
cb1efcf4 1954 int iommu_idx,
bd2bfa4c
PX
1955 IOMMUTLBEntry entry)
1956{
1957 IOMMUNotifier *iommu_notifier;
1958
3df9d748 1959 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
bd2bfa4c 1960
3df9d748 1961 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
cb1efcf4
PM
1962 if (iommu_notifier->iommu_idx == iommu_idx) {
1963 memory_region_notify_one(iommu_notifier, &entry);
1964 }
cdb30812 1965 }
06866575
DG
1966}
1967
f1334de6
AK
1968int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1969 enum IOMMUMemoryRegionAttr attr,
1970 void *data)
1971{
1972 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1973
1974 if (!imrc->get_attr) {
1975 return -EINVAL;
1976 }
1977
1978 return imrc->get_attr(iommu_mr, attr, data);
1979}
1980
21f40209
PM
1981int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1982 MemTxAttrs attrs)
1983{
1984 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1985
1986 if (!imrc->attrs_to_index) {
1987 return 0;
1988 }
1989
1990 return imrc->attrs_to_index(iommu_mr, attrs);
1991}
1992
1993int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
1994{
1995 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1996
1997 if (!imrc->num_indexes) {
1998 return 1;
1999 }
2000
2001 return imrc->num_indexes(iommu_mr);
2002}
2003
093bc2cd
AK
2004void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2005{
5a583347 2006 uint8_t mask = 1 << client;
deb809ed 2007 uint8_t old_logging;
5a583347 2008
dbddac6d 2009 assert(client == DIRTY_MEMORY_VGA);
deb809ed
PB
2010 old_logging = mr->vga_logging_count;
2011 mr->vga_logging_count += log ? 1 : -1;
2012 if (!!old_logging == !!mr->vga_logging_count) {
2013 return;
2014 }
2015
59023ef4 2016 memory_region_transaction_begin();
5a583347 2017 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
22bde714 2018 memory_region_update_pending |= mr->enabled;
59023ef4 2019 memory_region_transaction_commit();
093bc2cd
AK
2020}
2021
a8170e5e
AK
2022bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
2023 hwaddr size, unsigned client)
093bc2cd 2024{
8e41fb63
FZ
2025 assert(mr->ram_block);
2026 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
2027 size, client);
093bc2cd
AK
2028}
2029
a8170e5e
AK
2030void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2031 hwaddr size)
093bc2cd 2032{
8e41fb63
FZ
2033 assert(mr->ram_block);
2034 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2035 size,
58d2707e 2036 memory_region_get_dirty_log_mask(mr));
093bc2cd
AK
2037}
2038
0fe1eca7 2039static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
093bc2cd 2040{
0a752eee 2041 MemoryListener *listener;
0d673e36 2042 AddressSpace *as;
0a752eee 2043 FlatView *view;
5a583347
AK
2044 FlatRange *fr;
2045
0a752eee
PB
2046 /* If the same address space has multiple log_sync listeners, we
2047 * visit that address space's FlatView multiple times. But because
2048 * log_sync listeners are rare, it's still cheaper than walking each
2049 * address space once.
2050 */
2051 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2052 if (!listener->log_sync) {
2053 continue;
2054 }
2055 as = listener->address_space;
2056 view = address_space_get_flatview(as);
99e86347 2057 FOR_EACH_FLAT_RANGE(fr, view) {
3ebb1817 2058 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
16620684 2059 MemoryRegionSection mrs = section_from_flat_range(fr, view);
0a752eee 2060 listener->log_sync(listener, &mrs);
0d673e36 2061 }
5a583347 2062 }
856d7245 2063 flatview_unref(view);
5a583347 2064 }
093bc2cd
AK
2065}
2066
0fe1eca7
PB
2067DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2068 hwaddr addr,
2069 hwaddr size,
2070 unsigned client)
2071{
2072 assert(mr->ram_block);
2073 memory_region_sync_dirty_bitmap(mr);
2074 return cpu_physical_memory_snapshot_and_clear_dirty(
2075 memory_region_get_ram_addr(mr) + addr, size, client);
2076}
2077
2078bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2079 hwaddr addr, hwaddr size)
2080{
2081 assert(mr->ram_block);
2082 return cpu_physical_memory_snapshot_get_dirty(snap,
2083 memory_region_get_ram_addr(mr) + addr, size);
2084}
2085
093bc2cd
AK
2086void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2087{
fb1cd6f9 2088 if (mr->readonly != readonly) {
59023ef4 2089 memory_region_transaction_begin();
fb1cd6f9 2090 mr->readonly = readonly;
22bde714 2091 memory_region_update_pending |= mr->enabled;
59023ef4 2092 memory_region_transaction_commit();
fb1cd6f9 2093 }
093bc2cd
AK
2094}
2095
c26763f8
MAL
2096void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2097{
2098 if (mr->nonvolatile != nonvolatile) {
2099 memory_region_transaction_begin();
2100 mr->nonvolatile = nonvolatile;
2101 memory_region_update_pending |= mr->enabled;
2102 memory_region_transaction_commit();
2103 }
2104}
2105
5f9a5ea1 2106void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
d0a9b5bc 2107{
5f9a5ea1 2108 if (mr->romd_mode != romd_mode) {
59023ef4 2109 memory_region_transaction_begin();
5f9a5ea1 2110 mr->romd_mode = romd_mode;
22bde714 2111 memory_region_update_pending |= mr->enabled;
59023ef4 2112 memory_region_transaction_commit();
d0a9b5bc
AK
2113 }
2114}
2115
a8170e5e
AK
2116void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2117 hwaddr size, unsigned client)
093bc2cd 2118{
8e41fb63
FZ
2119 assert(mr->ram_block);
2120 cpu_physical_memory_test_and_clear_dirty(
2121 memory_region_get_ram_addr(mr) + addr, size, client);
093bc2cd
AK
2122}
2123
a35ba7be
PB
2124int memory_region_get_fd(MemoryRegion *mr)
2125{
4ff87573
PB
2126 int fd;
2127
2128 rcu_read_lock();
2129 while (mr->alias) {
2130 mr = mr->alias;
a35ba7be 2131 }
4ff87573
PB
2132 fd = mr->ram_block->fd;
2133 rcu_read_unlock();
a35ba7be 2134
4ff87573
PB
2135 return fd;
2136}
a35ba7be 2137
093bc2cd
AK
2138void *memory_region_get_ram_ptr(MemoryRegion *mr)
2139{
49b24afc
PB
2140 void *ptr;
2141 uint64_t offset = 0;
093bc2cd 2142
49b24afc
PB
2143 rcu_read_lock();
2144 while (mr->alias) {
2145 offset += mr->alias_offset;
2146 mr = mr->alias;
2147 }
8e41fb63 2148 assert(mr->ram_block);
0878d0e1 2149 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
49b24afc 2150 rcu_read_unlock();
093bc2cd 2151
0878d0e1 2152 return ptr;
093bc2cd
AK
2153}
2154
07bdaa41
PB
2155MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2156{
2157 RAMBlock *block;
2158
2159 block = qemu_ram_block_from_host(ptr, false, offset);
2160 if (!block) {
2161 return NULL;
2162 }
2163
2164 return block->mr;
2165}
2166
7ebb2745
FZ
2167ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2168{
2169 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2170}
2171
37d7c084
PB
2172void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2173{
8e41fb63 2174 assert(mr->ram_block);
37d7c084 2175
fa53a0e5 2176 qemu_ram_resize(mr->ram_block, newsize, errp);
37d7c084
PB
2177}
2178
0d673e36 2179static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
093bc2cd 2180{
99e86347 2181 FlatView *view;
093bc2cd 2182 FlatRange *fr;
093bc2cd 2183
856d7245 2184 view = address_space_get_flatview(as);
99e86347 2185 FOR_EACH_FLAT_RANGE(fr, view) {
093bc2cd 2186 if (fr->mr == mr) {
909bf763
PB
2187 flat_range_coalesced_io_del(fr, as);
2188 flat_range_coalesced_io_add(fr, as);
093bc2cd
AK
2189 }
2190 }
856d7245 2191 flatview_unref(view);
093bc2cd
AK
2192}
2193
0d673e36
AK
2194static void memory_region_update_coalesced_range(MemoryRegion *mr)
2195{
2196 AddressSpace *as;
2197
2198 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2199 memory_region_update_coalesced_range_as(mr, as);
2200 }
2201}
2202
093bc2cd
AK
2203void memory_region_set_coalescing(MemoryRegion *mr)
2204{
2205 memory_region_clear_coalescing(mr);
08dafab4 2206 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
093bc2cd
AK
2207}
2208
2209void memory_region_add_coalescing(MemoryRegion *mr,
a8170e5e 2210 hwaddr offset,
093bc2cd
AK
2211 uint64_t size)
2212{
7267c094 2213 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
093bc2cd 2214
08dafab4 2215 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
093bc2cd
AK
2216 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2217 memory_region_update_coalesced_range(mr);
d410515e 2218 memory_region_set_flush_coalesced(mr);
093bc2cd
AK
2219}
2220
2221void memory_region_clear_coalescing(MemoryRegion *mr)
2222{
2223 CoalescedMemoryRange *cmr;
ab5b3db5 2224 bool updated = false;
093bc2cd 2225
d410515e
JK
2226 qemu_flush_coalesced_mmio_buffer();
2227 mr->flush_coalesced_mmio = false;
2228
093bc2cd
AK
2229 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2230 cmr = QTAILQ_FIRST(&mr->coalesced);
2231 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
7267c094 2232 g_free(cmr);
ab5b3db5
FZ
2233 updated = true;
2234 }
2235
2236 if (updated) {
2237 memory_region_update_coalesced_range(mr);
093bc2cd 2238 }
093bc2cd
AK
2239}
2240
d410515e
JK
2241void memory_region_set_flush_coalesced(MemoryRegion *mr)
2242{
2243 mr->flush_coalesced_mmio = true;
2244}
2245
2246void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2247{
2248 qemu_flush_coalesced_mmio_buffer();
2249 if (QTAILQ_EMPTY(&mr->coalesced)) {
2250 mr->flush_coalesced_mmio = false;
2251 }
2252}
2253
196ea131
JK
2254void memory_region_clear_global_locking(MemoryRegion *mr)
2255{
2256 mr->global_locking = false;
2257}
2258
8c56c1a5
PF
2259static bool userspace_eventfd_warning;
2260
3e9d69e7 2261void memory_region_add_eventfd(MemoryRegion *mr,
a8170e5e 2262 hwaddr addr,
3e9d69e7
AK
2263 unsigned size,
2264 bool match_data,
2265 uint64_t data,
753d5e14 2266 EventNotifier *e)
3e9d69e7
AK
2267{
2268 MemoryRegionIoeventfd mrfd = {
08dafab4
AK
2269 .addr.start = int128_make64(addr),
2270 .addr.size = int128_make64(size),
3e9d69e7
AK
2271 .match_data = match_data,
2272 .data = data,
753d5e14 2273 .e = e,
3e9d69e7
AK
2274 };
2275 unsigned i;
2276
8c56c1a5
PF
2277 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2278 userspace_eventfd_warning))) {
2279 userspace_eventfd_warning = true;
2280 error_report("Using eventfd without MMIO binding in KVM. "
2281 "Suboptimal performance expected");
2282 }
2283
b8aecea2
JW
2284 if (size) {
2285 adjust_endianness(mr, &mrfd.data, size);
2286 }
59023ef4 2287 memory_region_transaction_begin();
3e9d69e7 2288 for (i = 0; i < mr->ioeventfd_nb; ++i) {
73bb753d 2289 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
3e9d69e7
AK
2290 break;
2291 }
2292 }
2293 ++mr->ioeventfd_nb;
7267c094 2294 mr->ioeventfds = g_realloc(mr->ioeventfds,
3e9d69e7
AK
2295 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2296 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2297 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2298 mr->ioeventfds[i] = mrfd;
4dc56152 2299 ioeventfd_update_pending |= mr->enabled;
59023ef4 2300 memory_region_transaction_commit();
3e9d69e7
AK
2301}
2302
2303void memory_region_del_eventfd(MemoryRegion *mr,
a8170e5e 2304 hwaddr addr,
3e9d69e7
AK
2305 unsigned size,
2306 bool match_data,
2307 uint64_t data,
753d5e14 2308 EventNotifier *e)
3e9d69e7
AK
2309{
2310 MemoryRegionIoeventfd mrfd = {
08dafab4
AK
2311 .addr.start = int128_make64(addr),
2312 .addr.size = int128_make64(size),
3e9d69e7
AK
2313 .match_data = match_data,
2314 .data = data,
753d5e14 2315 .e = e,
3e9d69e7
AK
2316 };
2317 unsigned i;
2318
b8aecea2
JW
2319 if (size) {
2320 adjust_endianness(mr, &mrfd.data, size);
2321 }
59023ef4 2322 memory_region_transaction_begin();
3e9d69e7 2323 for (i = 0; i < mr->ioeventfd_nb; ++i) {
73bb753d 2324 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
3e9d69e7
AK
2325 break;
2326 }
2327 }
2328 assert(i != mr->ioeventfd_nb);
2329 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2330 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2331 --mr->ioeventfd_nb;
7267c094 2332 mr->ioeventfds = g_realloc(mr->ioeventfds,
3e9d69e7 2333 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
4dc56152 2334 ioeventfd_update_pending |= mr->enabled;
59023ef4 2335 memory_region_transaction_commit();
3e9d69e7
AK
2336}
2337
feca4ac1 2338static void memory_region_update_container_subregions(MemoryRegion *subregion)
093bc2cd 2339{
feca4ac1 2340 MemoryRegion *mr = subregion->container;
093bc2cd
AK
2341 MemoryRegion *other;
2342
59023ef4
JK
2343 memory_region_transaction_begin();
2344
dfde4e6e 2345 memory_region_ref(subregion);
093bc2cd
AK
2346 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2347 if (subregion->priority >= other->priority) {
2348 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2349 goto done;
2350 }
2351 }
2352 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2353done:
22bde714 2354 memory_region_update_pending |= mr->enabled && subregion->enabled;
59023ef4 2355 memory_region_transaction_commit();
093bc2cd
AK
2356}
2357
0598701a
PC
2358static void memory_region_add_subregion_common(MemoryRegion *mr,
2359 hwaddr offset,
2360 MemoryRegion *subregion)
2361{
feca4ac1
PB
2362 assert(!subregion->container);
2363 subregion->container = mr;
0598701a 2364 subregion->addr = offset;
feca4ac1 2365 memory_region_update_container_subregions(subregion);
0598701a 2366}
093bc2cd
AK
2367
2368void memory_region_add_subregion(MemoryRegion *mr,
a8170e5e 2369 hwaddr offset,
093bc2cd
AK
2370 MemoryRegion *subregion)
2371{
093bc2cd
AK
2372 subregion->priority = 0;
2373 memory_region_add_subregion_common(mr, offset, subregion);
2374}
2375
2376void memory_region_add_subregion_overlap(MemoryRegion *mr,
a8170e5e 2377 hwaddr offset,
093bc2cd 2378 MemoryRegion *subregion,
a1ff8ae0 2379 int priority)
093bc2cd 2380{
093bc2cd
AK
2381 subregion->priority = priority;
2382 memory_region_add_subregion_common(mr, offset, subregion);
2383}
2384
2385void memory_region_del_subregion(MemoryRegion *mr,
2386 MemoryRegion *subregion)
2387{
59023ef4 2388 memory_region_transaction_begin();
feca4ac1
PB
2389 assert(subregion->container == mr);
2390 subregion->container = NULL;
093bc2cd 2391 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
dfde4e6e 2392 memory_region_unref(subregion);
22bde714 2393 memory_region_update_pending |= mr->enabled && subregion->enabled;
59023ef4 2394 memory_region_transaction_commit();
6bba19ba
AK
2395}
2396
2397void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2398{
2399 if (enabled == mr->enabled) {
2400 return;
2401 }
59023ef4 2402 memory_region_transaction_begin();
6bba19ba 2403 mr->enabled = enabled;
22bde714 2404 memory_region_update_pending = true;
59023ef4 2405 memory_region_transaction_commit();
093bc2cd 2406}
1c0ffa58 2407
e7af4c67
MT
2408void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2409{
2410 Int128 s = int128_make64(size);
2411
2412 if (size == UINT64_MAX) {
2413 s = int128_2_64();
2414 }
2415 if (int128_eq(s, mr->size)) {
2416 return;
2417 }
2418 memory_region_transaction_begin();
2419 mr->size = s;
2420 memory_region_update_pending = true;
2421 memory_region_transaction_commit();
2422}
2423
67891b8a 2424static void memory_region_readd_subregion(MemoryRegion *mr)
2282e1af 2425{
feca4ac1 2426 MemoryRegion *container = mr->container;
2282e1af 2427
feca4ac1 2428 if (container) {
67891b8a
PC
2429 memory_region_transaction_begin();
2430 memory_region_ref(mr);
feca4ac1
PB
2431 memory_region_del_subregion(container, mr);
2432 mr->container = container;
2433 memory_region_update_container_subregions(mr);
67891b8a
PC
2434 memory_region_unref(mr);
2435 memory_region_transaction_commit();
2282e1af 2436 }
67891b8a 2437}
2282e1af 2438
67891b8a
PC
2439void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2440{
2441 if (addr != mr->addr) {
2442 mr->addr = addr;
2443 memory_region_readd_subregion(mr);
2444 }
2282e1af
AK
2445}
2446
a8170e5e 2447void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
4703359e 2448{
4703359e 2449 assert(mr->alias);
4703359e 2450
59023ef4 2451 if (offset == mr->alias_offset) {
4703359e
AK
2452 return;
2453 }
2454
59023ef4
JK
2455 memory_region_transaction_begin();
2456 mr->alias_offset = offset;
22bde714 2457 memory_region_update_pending |= mr->enabled;
59023ef4 2458 memory_region_transaction_commit();
4703359e
AK
2459}
2460
a2b257d6
IM
2461uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2462{
2463 return mr->align;
2464}
2465
e2177955
AK
2466static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2467{
2468 const AddrRange *addr = addr_;
2469 const FlatRange *fr = fr_;
2470
2471 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2472 return -1;
2473 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2474 return 1;
2475 }
2476 return 0;
2477}
2478
99e86347 2479static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
e2177955 2480{
99e86347 2481 return bsearch(&addr, view->ranges, view->nr,
e2177955
AK
2482 sizeof(FlatRange), cmp_flatrange_addr);
2483}
2484
eed2bacf
IM
2485bool memory_region_is_mapped(MemoryRegion *mr)
2486{
2487 return mr->container ? true : false;
2488}
2489
c6742b14
PB
2490/* Same as memory_region_find, but it does not add a reference to the
2491 * returned region. It must be called from an RCU critical section.
2492 */
2493static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2494 hwaddr addr, uint64_t size)
e2177955 2495{
052e87b0 2496 MemoryRegionSection ret = { .mr = NULL };
73034e9e
PB
2497 MemoryRegion *root;
2498 AddressSpace *as;
2499 AddrRange range;
99e86347 2500 FlatView *view;
73034e9e
PB
2501 FlatRange *fr;
2502
2503 addr += mr->addr;
feca4ac1
PB
2504 for (root = mr; root->container; ) {
2505 root = root->container;
73034e9e
PB
2506 addr += root->addr;
2507 }
e2177955 2508
73034e9e 2509 as = memory_region_to_address_space(root);
eed2bacf
IM
2510 if (!as) {
2511 return ret;
2512 }
73034e9e 2513 range = addrrange_make(int128_make64(addr), int128_make64(size));
99e86347 2514
16620684 2515 view = address_space_to_flatview(as);
99e86347 2516 fr = flatview_lookup(view, range);
e2177955 2517 if (!fr) {
c6742b14 2518 return ret;
e2177955
AK
2519 }
2520
99e86347 2521 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
e2177955
AK
2522 --fr;
2523 }
2524
2525 ret.mr = fr->mr;
16620684 2526 ret.fv = view;
e2177955
AK
2527 range = addrrange_intersection(range, fr->addr);
2528 ret.offset_within_region = fr->offset_in_region;
2529 ret.offset_within_region += int128_get64(int128_sub(range.start,
2530 fr->addr.start));
052e87b0 2531 ret.size = range.size;
e2177955 2532 ret.offset_within_address_space = int128_get64(range.start);
7a8499e8 2533 ret.readonly = fr->readonly;
c26763f8 2534 ret.nonvolatile = fr->nonvolatile;
c6742b14
PB
2535 return ret;
2536}
2537
2538MemoryRegionSection memory_region_find(MemoryRegion *mr,
2539 hwaddr addr, uint64_t size)
2540{
2541 MemoryRegionSection ret;
2542 rcu_read_lock();
2543 ret = memory_region_find_rcu(mr, addr, size);
2544 if (ret.mr) {
2545 memory_region_ref(ret.mr);
2546 }
2b647668 2547 rcu_read_unlock();
e2177955
AK
2548 return ret;
2549}
2550
c6742b14
PB
2551bool memory_region_present(MemoryRegion *container, hwaddr addr)
2552{
2553 MemoryRegion *mr;
2554
2555 rcu_read_lock();
2556 mr = memory_region_find_rcu(container, addr, 1).mr;
2557 rcu_read_unlock();
2558 return mr && mr != container;
2559}
2560
9c1f8f44 2561void memory_global_dirty_log_sync(void)
86e775c6 2562{
3ebb1817 2563 memory_region_sync_dirty_bitmap(NULL);
7664e80c
AK
2564}
2565
19310760
JZ
2566static VMChangeStateEntry *vmstate_change;
2567
7664e80c
AK
2568void memory_global_dirty_log_start(void)
2569{
19310760
JZ
2570 if (vmstate_change) {
2571 qemu_del_vm_change_state_handler(vmstate_change);
2572 vmstate_change = NULL;
2573 }
2574
7664e80c 2575 global_dirty_log = true;
6f6a5ef3 2576
7376e582 2577 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
6f6a5ef3
PB
2578
2579 /* Refresh DIRTY_LOG_MIGRATION bit. */
2580 memory_region_transaction_begin();
2581 memory_region_update_pending = true;
2582 memory_region_transaction_commit();
7664e80c
AK
2583}
2584
19310760 2585static void memory_global_dirty_log_do_stop(void)
7664e80c 2586{
7664e80c 2587 global_dirty_log = false;
6f6a5ef3
PB
2588
2589 /* Refresh DIRTY_LOG_MIGRATION bit. */
2590 memory_region_transaction_begin();
2591 memory_region_update_pending = true;
2592 memory_region_transaction_commit();
2593
7376e582 2594 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
7664e80c
AK
2595}
2596
19310760
JZ
2597static void memory_vm_change_state_handler(void *opaque, int running,
2598 RunState state)
2599{
2600 if (running) {
2601 memory_global_dirty_log_do_stop();
2602
2603 if (vmstate_change) {
2604 qemu_del_vm_change_state_handler(vmstate_change);
2605 vmstate_change = NULL;
2606 }
2607 }
2608}
2609
2610void memory_global_dirty_log_stop(void)
2611{
2612 if (!runstate_is_running()) {
2613 if (vmstate_change) {
2614 return;
2615 }
2616 vmstate_change = qemu_add_vm_change_state_handler(
2617 memory_vm_change_state_handler, NULL);
2618 return;
2619 }
2620
2621 memory_global_dirty_log_do_stop();
2622}
2623
7664e80c
AK
2624static void listener_add_address_space(MemoryListener *listener,
2625 AddressSpace *as)
2626{
99e86347 2627 FlatView *view;
7664e80c
AK
2628 FlatRange *fr;
2629
680a4783
PB
2630 if (listener->begin) {
2631 listener->begin(listener);
2632 }
7664e80c 2633 if (global_dirty_log) {
975aefe0
AK
2634 if (listener->log_global_start) {
2635 listener->log_global_start(listener);
2636 }
7664e80c 2637 }
975aefe0 2638
856d7245 2639 view = address_space_get_flatview(as);
99e86347 2640 FOR_EACH_FLAT_RANGE(fr, view) {
279836f8
DH
2641 MemoryRegionSection section = section_from_flat_range(fr, view);
2642
975aefe0
AK
2643 if (listener->region_add) {
2644 listener->region_add(listener, &section);
2645 }
ae990e6c
DH
2646 if (fr->dirty_log_mask && listener->log_start) {
2647 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2648 }
7664e80c 2649 }
680a4783
PB
2650 if (listener->commit) {
2651 listener->commit(listener);
2652 }
856d7245 2653 flatview_unref(view);
7664e80c
AK
2654}
2655
d25836ca
PX
2656static void listener_del_address_space(MemoryListener *listener,
2657 AddressSpace *as)
2658{
2659 FlatView *view;
2660 FlatRange *fr;
2661
2662 if (listener->begin) {
2663 listener->begin(listener);
2664 }
2665 view = address_space_get_flatview(as);
2666 FOR_EACH_FLAT_RANGE(fr, view) {
2667 MemoryRegionSection section = section_from_flat_range(fr, view);
2668
2669 if (fr->dirty_log_mask && listener->log_stop) {
2670 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2671 }
2672 if (listener->region_del) {
2673 listener->region_del(listener, &section);
2674 }
2675 }
2676 if (listener->commit) {
2677 listener->commit(listener);
2678 }
2679 flatview_unref(view);
2680}
2681
d45fa784 2682void memory_listener_register(MemoryListener *listener, AddressSpace *as)
7664e80c 2683{
72e22d2f
AK
2684 MemoryListener *other = NULL;
2685
d45fa784 2686 listener->address_space = as;
72e22d2f 2687 if (QTAILQ_EMPTY(&memory_listeners)
eae3eb3e 2688 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
72e22d2f
AK
2689 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2690 } else {
2691 QTAILQ_FOREACH(other, &memory_listeners, link) {
2692 if (listener->priority < other->priority) {
2693 break;
2694 }
2695 }
2696 QTAILQ_INSERT_BEFORE(other, listener, link);
2697 }
0d673e36 2698
9a54635d 2699 if (QTAILQ_EMPTY(&as->listeners)
eae3eb3e 2700 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
9a54635d
PB
2701 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2702 } else {
2703 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2704 if (listener->priority < other->priority) {
2705 break;
2706 }
2707 }
2708 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2709 }
2710
d45fa784 2711 listener_add_address_space(listener, as);
7664e80c
AK
2712}
2713
2714void memory_listener_unregister(MemoryListener *listener)
2715{
1d8280c1
PB
2716 if (!listener->address_space) {
2717 return;
2718 }
2719
d25836ca 2720 listener_del_address_space(listener, listener->address_space);
72e22d2f 2721 QTAILQ_REMOVE(&memory_listeners, listener, link);
9a54635d 2722 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
1d8280c1 2723 listener->address_space = NULL;
86e775c6 2724}
e2177955 2725
7dca8043 2726void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
1c0ffa58 2727{
ac95190e 2728 memory_region_ref(root);
8786db7c 2729 as->root = root;
67ace39b 2730 as->current_map = NULL;
4c19eb72
AK
2731 as->ioeventfd_nb = 0;
2732 as->ioeventfds = NULL;
9a54635d 2733 QTAILQ_INIT(&as->listeners);
0d673e36 2734 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
7dca8043 2735 as->name = g_strdup(name ? name : "anonymous");
202fc01b
AK
2736 address_space_update_topology(as);
2737 address_space_update_ioeventfds(as);
1c0ffa58 2738}
658b2224 2739
374f2981 2740static void do_address_space_destroy(AddressSpace *as)
83f3c251 2741{
9a54635d 2742 assert(QTAILQ_EMPTY(&as->listeners));
078c44f4 2743
856d7245 2744 flatview_unref(as->current_map);
7dca8043 2745 g_free(as->name);
4c19eb72 2746 g_free(as->ioeventfds);
ac95190e 2747 memory_region_unref(as->root);
83f3c251
AK
2748}
2749
374f2981
PB
2750void address_space_destroy(AddressSpace *as)
2751{
ac95190e
PB
2752 MemoryRegion *root = as->root;
2753
374f2981
PB
2754 /* Flush out anything from MemoryListeners listening in on this */
2755 memory_region_transaction_begin();
2756 as->root = NULL;
2757 memory_region_transaction_commit();
2758 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2759
2760 /* At this point, as->dispatch and as->current_map are dummy
2761 * entries that the guest should never use. Wait for the old
2762 * values to expire before freeing the data.
2763 */
ac95190e 2764 as->root = root;
374f2981
PB
2765 call_rcu(as, do_address_space_destroy, rcu);
2766}
2767
4e831901
PX
2768static const char *memory_region_type(MemoryRegion *mr)
2769{
2770 if (memory_region_is_ram_device(mr)) {
2771 return "ramd";
2772 } else if (memory_region_is_romd(mr)) {
2773 return "romd";
2774 } else if (memory_region_is_rom(mr)) {
2775 return "rom";
2776 } else if (memory_region_is_ram(mr)) {
2777 return "ram";
2778 } else {
2779 return "i/o";
2780 }
2781}
2782
314e2987
BS
2783typedef struct MemoryRegionList MemoryRegionList;
2784
2785struct MemoryRegionList {
2786 const MemoryRegion *mr;
a16878d2 2787 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
314e2987
BS
2788};
2789
b58deb34 2790typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
314e2987 2791
4e831901
PX
2792#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2793 int128_sub((size), int128_one())) : 0)
2794#define MTREE_INDENT " "
2795
fc051ae6
AK
2796static void mtree_expand_owner(fprintf_function mon_printf, void *f,
2797 const char *label, Object *obj)
2798{
2799 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2800
2801 mon_printf(f, " %s:{%s", label, dev ? "dev" : "obj");
2802 if (dev && dev->id) {
2803 mon_printf(f, " id=%s", dev->id);
2804 } else {
2805 gchar *canonical_path = object_get_canonical_path(obj);
2806 if (canonical_path) {
2807 mon_printf(f, " path=%s", canonical_path);
2808 g_free(canonical_path);
2809 } else {
2810 mon_printf(f, " type=%s", object_get_typename(obj));
2811 }
2812 }
2813 mon_printf(f, "}");
2814}
2815
2816static void mtree_print_mr_owner(fprintf_function mon_printf, void *f,
2817 const MemoryRegion *mr)
2818{
2819 Object *owner = mr->owner;
2820 Object *parent = memory_region_owner((MemoryRegion *)mr);
2821
2822 if (!owner && !parent) {
2823 mon_printf(f, " orphan");
2824 return;
2825 }
2826 if (owner) {
2827 mtree_expand_owner(mon_printf, f, "owner", owner);
2828 }
2829 if (parent && parent != owner) {
2830 mtree_expand_owner(mon_printf, f, "parent", parent);
2831 }
2832}
2833
314e2987
BS
2834static void mtree_print_mr(fprintf_function mon_printf, void *f,
2835 const MemoryRegion *mr, unsigned int level,
a8170e5e 2836 hwaddr base,
fc051ae6
AK
2837 MemoryRegionListHead *alias_print_queue,
2838 bool owner)
314e2987 2839{
9479c57a
JK
2840 MemoryRegionList *new_ml, *ml, *next_ml;
2841 MemoryRegionListHead submr_print_queue;
314e2987
BS
2842 const MemoryRegion *submr;
2843 unsigned int i;
b31f8412 2844 hwaddr cur_start, cur_end;
314e2987 2845
f8a9f720 2846 if (!mr) {
314e2987
BS
2847 return;
2848 }
2849
2850 for (i = 0; i < level; i++) {
4e831901 2851 mon_printf(f, MTREE_INDENT);
314e2987
BS
2852 }
2853
b31f8412
PX
2854 cur_start = base + mr->addr;
2855 cur_end = cur_start + MR_SIZE(mr->size);
2856
2857 /*
2858 * Try to detect overflow of memory region. This should never
2859 * happen normally. When it happens, we dump something to warn the
2860 * user who is observing this.
2861 */
2862 if (cur_start < base || cur_end < cur_start) {
2863 mon_printf(f, "[DETECTED OVERFLOW!] ");
2864 }
2865
314e2987
BS
2866 if (mr->alias) {
2867 MemoryRegionList *ml;
2868 bool found = false;
2869
2870 /* check if the alias is already in the queue */
a16878d2 2871 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
f54bb15f 2872 if (ml->mr == mr->alias) {
314e2987
BS
2873 found = true;
2874 }
2875 }
2876
2877 if (!found) {
2878 ml = g_new(MemoryRegionList, 1);
2879 ml->mr = mr->alias;
a16878d2 2880 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
314e2987 2881 }
4896d74b 2882 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
c26763f8 2883 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
fc051ae6 2884 "-" TARGET_FMT_plx "%s",
b31f8412 2885 cur_start, cur_end,
4b474ba7 2886 mr->priority,
c26763f8 2887 mr->nonvolatile ? "nv-" : "",
4e831901 2888 memory_region_type((MemoryRegion *)mr),
3fb18b4d
PC
2889 memory_region_name(mr),
2890 memory_region_name(mr->alias),
314e2987 2891 mr->alias_offset,
4e831901 2892 mr->alias_offset + MR_SIZE(mr->size),
f8a9f720 2893 mr->enabled ? "" : " [disabled]");
fc051ae6
AK
2894 if (owner) {
2895 mtree_print_mr_owner(mon_printf, f, mr);
2896 }
314e2987 2897 } else {
4896d74b 2898 mon_printf(f,
c26763f8 2899 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s%s): %s%s",
b31f8412 2900 cur_start, cur_end,
4b474ba7 2901 mr->priority,
c26763f8 2902 mr->nonvolatile ? "nv-" : "",
4e831901 2903 memory_region_type((MemoryRegion *)mr),
f8a9f720
GH
2904 memory_region_name(mr),
2905 mr->enabled ? "" : " [disabled]");
fc051ae6
AK
2906 if (owner) {
2907 mtree_print_mr_owner(mon_printf, f, mr);
2908 }
314e2987 2909 }
fc051ae6 2910 mon_printf(f, "\n");
9479c57a
JK
2911
2912 QTAILQ_INIT(&submr_print_queue);
2913
314e2987 2914 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
9479c57a
JK
2915 new_ml = g_new(MemoryRegionList, 1);
2916 new_ml->mr = submr;
a16878d2 2917 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
9479c57a
JK
2918 if (new_ml->mr->addr < ml->mr->addr ||
2919 (new_ml->mr->addr == ml->mr->addr &&
2920 new_ml->mr->priority > ml->mr->priority)) {
a16878d2 2921 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
9479c57a
JK
2922 new_ml = NULL;
2923 break;
2924 }
2925 }
2926 if (new_ml) {
a16878d2 2927 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
9479c57a
JK
2928 }
2929 }
2930
a16878d2 2931 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
b31f8412 2932 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
fc051ae6 2933 alias_print_queue, owner);
9479c57a
JK
2934 }
2935
a16878d2 2936 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
9479c57a 2937 g_free(ml);
314e2987
BS
2938 }
2939}
2940
5e8fd947
AK
2941struct FlatViewInfo {
2942 fprintf_function mon_printf;
2943 void *f;
2944 int counter;
2945 bool dispatch_tree;
fc051ae6 2946 bool owner;
5e8fd947
AK
2947};
2948
2949static void mtree_print_flatview(gpointer key, gpointer value,
2950 gpointer user_data)
57bb40c9 2951{
5e8fd947
AK
2952 FlatView *view = key;
2953 GArray *fv_address_spaces = value;
2954 struct FlatViewInfo *fvi = user_data;
2955 fprintf_function p = fvi->mon_printf;
2956 void *f = fvi->f;
57bb40c9
PX
2957 FlatRange *range = &view->ranges[0];
2958 MemoryRegion *mr;
2959 int n = view->nr;
5e8fd947
AK
2960 int i;
2961 AddressSpace *as;
2962
2963 p(f, "FlatView #%d\n", fvi->counter);
2964 ++fvi->counter;
2965
2966 for (i = 0; i < fv_address_spaces->len; ++i) {
2967 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2968 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2969 if (as->root->alias) {
2970 p(f, ", alias %s", memory_region_name(as->root->alias));
2971 }
2972 p(f, "\n");
2973 }
2974
2975 p(f, " Root memory region: %s\n",
2976 view->root ? memory_region_name(view->root) : "(none)");
57bb40c9
PX
2977
2978 if (n <= 0) {
5e8fd947 2979 p(f, MTREE_INDENT "No rendered FlatView\n\n");
57bb40c9
PX
2980 return;
2981 }
2982
2983 while (n--) {
2984 mr = range->mr;
377a07aa
PB
2985 if (range->offset_in_region) {
2986 p(f, MTREE_INDENT TARGET_FMT_plx "-"
c26763f8 2987 TARGET_FMT_plx " (prio %d, %s%s): %s @" TARGET_FMT_plx,
377a07aa
PB
2988 int128_get64(range->addr.start),
2989 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2990 mr->priority,
c26763f8 2991 range->nonvolatile ? "nv-" : "",
377a07aa
PB
2992 range->readonly ? "rom" : memory_region_type(mr),
2993 memory_region_name(mr),
2994 range->offset_in_region);
2995 } else {
2996 p(f, MTREE_INDENT TARGET_FMT_plx "-"
c26763f8 2997 TARGET_FMT_plx " (prio %d, %s%s): %s",
377a07aa
PB
2998 int128_get64(range->addr.start),
2999 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3000 mr->priority,
c26763f8 3001 range->nonvolatile ? "nv-" : "",
377a07aa
PB
3002 range->readonly ? "rom" : memory_region_type(mr),
3003 memory_region_name(mr));
3004 }
fc051ae6
AK
3005 if (fvi->owner) {
3006 mtree_print_mr_owner(p, f, mr);
3007 }
3008 p(f, "\n");
57bb40c9
PX
3009 range++;
3010 }
3011
5e8fd947
AK
3012#if !defined(CONFIG_USER_ONLY)
3013 if (fvi->dispatch_tree && view->root) {
3014 mtree_print_dispatch(p, f, view->dispatch, view->root);
3015 }
3016#endif
3017
3018 p(f, "\n");
3019}
3020
3021static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3022 gpointer user_data)
3023{
3024 FlatView *view = key;
3025 GArray *fv_address_spaces = value;
3026
3027 g_array_unref(fv_address_spaces);
57bb40c9 3028 flatview_unref(view);
5e8fd947
AK
3029
3030 return true;
57bb40c9
PX
3031}
3032
5e8fd947 3033void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
fc051ae6 3034 bool dispatch_tree, bool owner)
314e2987
BS
3035{
3036 MemoryRegionListHead ml_head;
3037 MemoryRegionList *ml, *ml2;
0d673e36 3038 AddressSpace *as;
314e2987 3039
57bb40c9 3040 if (flatview) {
5e8fd947
AK
3041 FlatView *view;
3042 struct FlatViewInfo fvi = {
3043 .mon_printf = mon_printf,
3044 .f = f,
3045 .counter = 0,
fc051ae6
AK
3046 .dispatch_tree = dispatch_tree,
3047 .owner = owner,
5e8fd947
AK
3048 };
3049 GArray *fv_address_spaces;
3050 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3051
3052 /* Gather all FVs in one table */
57bb40c9 3053 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
5e8fd947
AK
3054 view = address_space_get_flatview(as);
3055
3056 fv_address_spaces = g_hash_table_lookup(views, view);
3057 if (!fv_address_spaces) {
3058 fv_address_spaces = g_array_new(false, false, sizeof(as));
3059 g_hash_table_insert(views, view, fv_address_spaces);
3060 }
3061
3062 g_array_append_val(fv_address_spaces, as);
57bb40c9 3063 }
5e8fd947
AK
3064
3065 /* Print */
3066 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3067
3068 /* Free */
3069 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3070 g_hash_table_unref(views);
3071
57bb40c9
PX
3072 return;
3073 }
3074
314e2987
BS
3075 QTAILQ_INIT(&ml_head);
3076
0d673e36 3077 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
e48816aa 3078 mon_printf(f, "address-space: %s\n", as->name);
fc051ae6 3079 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head, owner);
e48816aa 3080 mon_printf(f, "\n");
b9f9be88
BS
3081 }
3082
314e2987 3083 /* print aliased regions */
a16878d2 3084 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
e48816aa 3085 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
fc051ae6 3086 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head, owner);
e48816aa 3087 mon_printf(f, "\n");
314e2987
BS
3088 }
3089
a16878d2 3090 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
88365e47 3091 g_free(ml);
314e2987 3092 }
314e2987 3093}
b4fefef9 3094
b08199c6
PM
3095void memory_region_init_ram(MemoryRegion *mr,
3096 struct Object *owner,
3097 const char *name,
3098 uint64_t size,
3099 Error **errp)
3100{
3101 DeviceState *owner_dev;
3102 Error *err = NULL;
3103
3104 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3105 if (err) {
3106 error_propagate(errp, err);
3107 return;
3108 }
3109 /* This will assert if owner is neither NULL nor a DeviceState.
3110 * We only want the owner here for the purposes of defining a
3111 * unique name for migration. TODO: Ideally we should implement
3112 * a naming scheme for Objects which are not DeviceStates, in
3113 * which case we can relax this restriction.
3114 */
3115 owner_dev = DEVICE(owner);
3116 vmstate_register_ram(mr, owner_dev);
3117}
3118
3119void memory_region_init_rom(MemoryRegion *mr,
3120 struct Object *owner,
3121 const char *name,
3122 uint64_t size,
3123 Error **errp)
3124{
3125 DeviceState *owner_dev;
3126 Error *err = NULL;
3127
3128 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3129 if (err) {
3130 error_propagate(errp, err);
3131 return;
3132 }
3133 /* This will assert if owner is neither NULL nor a DeviceState.
3134 * We only want the owner here for the purposes of defining a
3135 * unique name for migration. TODO: Ideally we should implement
3136 * a naming scheme for Objects which are not DeviceStates, in
3137 * which case we can relax this restriction.
3138 */
3139 owner_dev = DEVICE(owner);
3140 vmstate_register_ram(mr, owner_dev);
3141}
3142
3143void memory_region_init_rom_device(MemoryRegion *mr,
3144 struct Object *owner,
3145 const MemoryRegionOps *ops,
3146 void *opaque,
3147 const char *name,
3148 uint64_t size,
3149 Error **errp)
3150{
3151 DeviceState *owner_dev;
3152 Error *err = NULL;
3153
3154 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3155 name, size, &err);
3156 if (err) {
3157 error_propagate(errp, err);
3158 return;
3159 }
3160 /* This will assert if owner is neither NULL nor a DeviceState.
3161 * We only want the owner here for the purposes of defining a
3162 * unique name for migration. TODO: Ideally we should implement
3163 * a naming scheme for Objects which are not DeviceStates, in
3164 * which case we can relax this restriction.
3165 */
3166 owner_dev = DEVICE(owner);
3167 vmstate_register_ram(mr, owner_dev);
3168}
3169
b4fefef9
PC
3170static const TypeInfo memory_region_info = {
3171 .parent = TYPE_OBJECT,
3172 .name = TYPE_MEMORY_REGION,
3173 .instance_size = sizeof(MemoryRegion),
3174 .instance_init = memory_region_initfn,
3175 .instance_finalize = memory_region_finalize,
3176};
3177
3df9d748
AK
3178static const TypeInfo iommu_memory_region_info = {
3179 .parent = TYPE_MEMORY_REGION,
3180 .name = TYPE_IOMMU_MEMORY_REGION,
1221a474 3181 .class_size = sizeof(IOMMUMemoryRegionClass),
3df9d748
AK
3182 .instance_size = sizeof(IOMMUMemoryRegion),
3183 .instance_init = iommu_memory_region_initfn,
1221a474 3184 .abstract = true,
3df9d748
AK
3185};
3186
b4fefef9
PC
3187static void memory_register_types(void)
3188{
3189 type_register_static(&memory_region_info);
3df9d748 3190 type_register_static(&iommu_memory_region_info);
b4fefef9
PC
3191}
3192
3193type_init(memory_register_types)