]> git.proxmox.com Git - mirror_qemu.git/blame - memory.c
job: Move transactions to Job
[mirror_qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
093bc2cd
AK
14 */
15
d38ea87a 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
33c11879
PB
18#include "qemu-common.h"
19#include "cpu.h"
022c62cb
PB
20#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
409ddd01 23#include "qapi/visitor.h"
1de7afc9 24#include "qemu/bitops.h"
8c56c1a5 25#include "qemu/error-report.h"
2c9b15ca 26#include "qom/object.h"
0ab8ed18 27#include "trace-root.h"
093bc2cd 28
022c62cb 29#include "exec/memory-internal.h"
220c3ebd 30#include "exec/ram_addr.h"
8c56c1a5 31#include "sysemu/kvm.h"
e1c57ab8 32#include "sysemu/sysemu.h"
c9356746
FK
33#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
b08199c6 35#include "migration/vmstate.h"
67d95c15 36
d197063f
PB
37//#define DEBUG_UNASSIGNED
38
22bde714
JK
39static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
4dc56152 41static bool ioeventfd_update_pending;
7664e80c
AK
42static bool global_dirty_log = false;
43
72e22d2f
AK
44static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
4ef4db86 46
0d673e36
AK
47static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
967dc9b1
AK
50static GHashTable *flat_views;
51
093bc2cd
AK
52typedef struct AddrRange AddrRange;
53
8417cebf 54/*
c9cdaa3a 55 * Note that signed integers are needed for negative offsetting in aliases
8417cebf
AK
56 * (large MemoryRegion::alias_offset).
57 */
093bc2cd 58struct AddrRange {
08dafab4
AK
59 Int128 start;
60 Int128 size;
093bc2cd
AK
61};
62
08dafab4 63static AddrRange addrrange_make(Int128 start, Int128 size)
093bc2cd
AK
64{
65 return (AddrRange) { start, size };
66}
67
68static bool addrrange_equal(AddrRange r1, AddrRange r2)
69{
08dafab4 70 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
093bc2cd
AK
71}
72
08dafab4 73static Int128 addrrange_end(AddrRange r)
093bc2cd 74{
08dafab4 75 return int128_add(r.start, r.size);
093bc2cd
AK
76}
77
08dafab4 78static AddrRange addrrange_shift(AddrRange range, Int128 delta)
093bc2cd 79{
08dafab4 80 int128_addto(&range.start, delta);
093bc2cd
AK
81 return range;
82}
83
08dafab4
AK
84static bool addrrange_contains(AddrRange range, Int128 addr)
85{
86 return int128_ge(addr, range.start)
87 && int128_lt(addr, addrrange_end(range));
88}
89
093bc2cd
AK
90static bool addrrange_intersects(AddrRange r1, AddrRange r2)
91{
08dafab4
AK
92 return addrrange_contains(r1, r2.start)
93 || addrrange_contains(r2, r1.start);
093bc2cd
AK
94}
95
96static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
97{
08dafab4
AK
98 Int128 start = int128_max(r1.start, r2.start);
99 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
100 return addrrange_make(start, int128_sub(end, start));
093bc2cd
AK
101}
102
0e0d36b4
AK
103enum ListenerDirection { Forward, Reverse };
104
7376e582 105#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
0e0d36b4
AK
106 do { \
107 MemoryListener *_listener; \
108 \
109 switch (_direction) { \
110 case Forward: \
111 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
975aefe0
AK
112 if (_listener->_callback) { \
113 _listener->_callback(_listener, ##_args); \
114 } \
0e0d36b4
AK
115 } \
116 break; \
117 case Reverse: \
118 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
119 memory_listeners, link) { \
975aefe0
AK
120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
0e0d36b4
AK
123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
9a54635d 130#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
7376e582
AK
131 do { \
132 MemoryListener *_listener; \
9a54635d 133 struct memory_listeners_as *list = &(_as)->listeners; \
7376e582
AK
134 \
135 switch (_direction) { \
136 case Forward: \
9a54635d
PB
137 QTAILQ_FOREACH(_listener, list, link_as) { \
138 if (_listener->_callback) { \
7376e582
AK
139 _listener->_callback(_listener, _section, ##_args); \
140 } \
141 } \
142 break; \
143 case Reverse: \
9a54635d
PB
144 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
145 link_as) { \
146 if (_listener->_callback) { \
7376e582
AK
147 _listener->_callback(_listener, _section, ##_args); \
148 } \
149 } \
150 break; \
151 default: \
152 abort(); \
153 } \
154 } while (0)
155
dfde4e6e 156/* No need to ref/unref .mr, the FlatRange keeps it alive. */
b2dfd71c 157#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
9c1f8f44 158 do { \
16620684
AK
159 MemoryRegionSection mrs = section_from_flat_range(fr, \
160 address_space_to_flatview(as)); \
9a54635d 161 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
9c1f8f44 162 } while(0)
0e0d36b4 163
093bc2cd
AK
164struct CoalescedMemoryRange {
165 AddrRange addr;
166 QTAILQ_ENTRY(CoalescedMemoryRange) link;
167};
168
3e9d69e7
AK
169struct MemoryRegionIoeventfd {
170 AddrRange addr;
171 bool match_data;
172 uint64_t data;
753d5e14 173 EventNotifier *e;
3e9d69e7
AK
174};
175
176static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
177 MemoryRegionIoeventfd b)
178{
08dafab4 179 if (int128_lt(a.addr.start, b.addr.start)) {
3e9d69e7 180 return true;
08dafab4 181 } else if (int128_gt(a.addr.start, b.addr.start)) {
3e9d69e7 182 return false;
08dafab4 183 } else if (int128_lt(a.addr.size, b.addr.size)) {
3e9d69e7 184 return true;
08dafab4 185 } else if (int128_gt(a.addr.size, b.addr.size)) {
3e9d69e7
AK
186 return false;
187 } else if (a.match_data < b.match_data) {
188 return true;
189 } else if (a.match_data > b.match_data) {
190 return false;
191 } else if (a.match_data) {
192 if (a.data < b.data) {
193 return true;
194 } else if (a.data > b.data) {
195 return false;
196 }
197 }
753d5e14 198 if (a.e < b.e) {
3e9d69e7 199 return true;
753d5e14 200 } else if (a.e > b.e) {
3e9d69e7
AK
201 return false;
202 }
203 return false;
204}
205
206static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
207 MemoryRegionIoeventfd b)
208{
209 return !memory_region_ioeventfd_before(a, b)
210 && !memory_region_ioeventfd_before(b, a);
211}
212
093bc2cd
AK
213/* Range of memory in the global map. Addresses are absolute. */
214struct FlatRange {
215 MemoryRegion *mr;
a8170e5e 216 hwaddr offset_in_region;
093bc2cd 217 AddrRange addr;
5a583347 218 uint8_t dirty_log_mask;
b138e654 219 bool romd_mode;
fb1cd6f9 220 bool readonly;
093bc2cd
AK
221};
222
cc31e6e7
AK
223typedef struct AddressSpaceOps AddressSpaceOps;
224
093bc2cd
AK
225#define FOR_EACH_FLAT_RANGE(var, view) \
226 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
227
9c1f8f44 228static inline MemoryRegionSection
16620684 229section_from_flat_range(FlatRange *fr, FlatView *fv)
9c1f8f44
PB
230{
231 return (MemoryRegionSection) {
232 .mr = fr->mr,
16620684 233 .fv = fv,
9c1f8f44
PB
234 .offset_within_region = fr->offset_in_region,
235 .size = fr->addr.size,
236 .offset_within_address_space = int128_get64(fr->addr.start),
237 .readonly = fr->readonly,
238 };
239}
240
093bc2cd
AK
241static bool flatrange_equal(FlatRange *a, FlatRange *b)
242{
243 return a->mr == b->mr
244 && addrrange_equal(a->addr, b->addr)
d0a9b5bc 245 && a->offset_in_region == b->offset_in_region
b138e654 246 && a->romd_mode == b->romd_mode
fb1cd6f9 247 && a->readonly == b->readonly;
093bc2cd
AK
248}
249
89c177bb 250static FlatView *flatview_new(MemoryRegion *mr_root)
093bc2cd 251{
cc94cd6d
AK
252 FlatView *view;
253
254 view = g_new0(FlatView, 1);
856d7245 255 view->ref = 1;
89c177bb
AK
256 view->root = mr_root;
257 memory_region_ref(mr_root);
02d9651d 258 trace_flatview_new(view, mr_root);
cc94cd6d
AK
259
260 return view;
093bc2cd
AK
261}
262
263/* Insert a range into a given position. Caller is responsible for maintaining
264 * sorting order.
265 */
266static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
267{
268 if (view->nr == view->nr_allocated) {
269 view->nr_allocated = MAX(2 * view->nr, 10);
7267c094 270 view->ranges = g_realloc(view->ranges,
093bc2cd
AK
271 view->nr_allocated * sizeof(*view->ranges));
272 }
273 memmove(view->ranges + pos + 1, view->ranges + pos,
274 (view->nr - pos) * sizeof(FlatRange));
275 view->ranges[pos] = *range;
dfde4e6e 276 memory_region_ref(range->mr);
093bc2cd
AK
277 ++view->nr;
278}
279
280static void flatview_destroy(FlatView *view)
281{
dfde4e6e
PB
282 int i;
283
02d9651d 284 trace_flatview_destroy(view, view->root);
66a6df1d
AK
285 if (view->dispatch) {
286 address_space_dispatch_free(view->dispatch);
287 }
dfde4e6e
PB
288 for (i = 0; i < view->nr; i++) {
289 memory_region_unref(view->ranges[i].mr);
290 }
7267c094 291 g_free(view->ranges);
89c177bb 292 memory_region_unref(view->root);
a9a0c06d 293 g_free(view);
093bc2cd
AK
294}
295
447b0d0b 296static bool flatview_ref(FlatView *view)
856d7245 297{
447b0d0b 298 return atomic_fetch_inc_nonzero(&view->ref) > 0;
856d7245
PB
299}
300
48564041 301void flatview_unref(FlatView *view)
856d7245
PB
302{
303 if (atomic_fetch_dec(&view->ref) == 1) {
02d9651d 304 trace_flatview_destroy_rcu(view, view->root);
092aa2fc 305 assert(view->root);
66a6df1d 306 call_rcu(view, flatview_destroy, rcu);
856d7245
PB
307 }
308}
309
3d8e6bf9
AK
310static bool can_merge(FlatRange *r1, FlatRange *r2)
311{
08dafab4 312 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
3d8e6bf9 313 && r1->mr == r2->mr
08dafab4
AK
314 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
315 r1->addr.size),
316 int128_make64(r2->offset_in_region))
d0a9b5bc 317 && r1->dirty_log_mask == r2->dirty_log_mask
b138e654 318 && r1->romd_mode == r2->romd_mode
fb1cd6f9 319 && r1->readonly == r2->readonly;
3d8e6bf9
AK
320}
321
8508e024 322/* Attempt to simplify a view by merging adjacent ranges */
3d8e6bf9
AK
323static void flatview_simplify(FlatView *view)
324{
325 unsigned i, j;
326
327 i = 0;
328 while (i < view->nr) {
329 j = i + 1;
330 while (j < view->nr
331 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
08dafab4 332 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
3d8e6bf9
AK
333 ++j;
334 }
335 ++i;
336 memmove(&view->ranges[i], &view->ranges[j],
337 (view->nr - j) * sizeof(view->ranges[j]));
338 view->nr -= j - i;
339 }
340}
341
e7342aa3
PB
342static bool memory_region_big_endian(MemoryRegion *mr)
343{
344#ifdef TARGET_WORDS_BIGENDIAN
345 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
346#else
347 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
348#endif
349}
350
e11ef3d1
PB
351static bool memory_region_wrong_endianness(MemoryRegion *mr)
352{
353#ifdef TARGET_WORDS_BIGENDIAN
354 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
355#else
356 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
357#endif
358}
359
360static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
361{
362 if (memory_region_wrong_endianness(mr)) {
363 switch (size) {
364 case 1:
365 break;
366 case 2:
367 *data = bswap16(*data);
368 break;
369 case 4:
370 *data = bswap32(*data);
371 break;
372 case 8:
373 *data = bswap64(*data);
374 break;
375 default:
376 abort();
377 }
378 }
379}
380
4779dc1d
HB
381static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
382{
383 MemoryRegion *root;
384 hwaddr abs_addr = offset;
385
386 abs_addr += mr->addr;
387 for (root = mr; root->container; ) {
388 root = root->container;
389 abs_addr += root->addr;
390 }
391
392 return abs_addr;
393}
394
5a68be94
HB
395static int get_cpu_index(void)
396{
397 if (current_cpu) {
398 return current_cpu->cpu_index;
399 }
400 return -1;
401}
402
cc05c43a
PM
403static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
404 hwaddr addr,
405 uint64_t *value,
406 unsigned size,
407 unsigned shift,
408 uint64_t mask,
409 MemTxAttrs attrs)
410{
411 uint64_t tmp;
412
413 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
23d92d68 414 if (mr->subpage) {
5a68be94 415 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
416 } else if (mr == &io_mem_notdirty) {
417 /* Accesses to code which has previously been translated into a TB show
418 * up in the MMIO path, as accesses to the io_mem_notdirty
419 * MemoryRegion. */
420 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
421 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
422 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 423 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 424 }
cc05c43a
PM
425 *value |= (tmp & mask) << shift;
426 return MEMTX_OK;
427}
428
429static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
ce5d2f33
PB
430 hwaddr addr,
431 uint64_t *value,
432 unsigned size,
433 unsigned shift,
cc05c43a
PM
434 uint64_t mask,
435 MemTxAttrs attrs)
ce5d2f33 436{
ce5d2f33
PB
437 uint64_t tmp;
438
cc05c43a 439 tmp = mr->ops->read(mr->opaque, addr, size);
23d92d68 440 if (mr->subpage) {
5a68be94 441 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
442 } else if (mr == &io_mem_notdirty) {
443 /* Accesses to code which has previously been translated into a TB show
444 * up in the MMIO path, as accesses to the io_mem_notdirty
445 * MemoryRegion. */
446 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
447 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
448 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 449 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 450 }
ce5d2f33 451 *value |= (tmp & mask) << shift;
cc05c43a 452 return MEMTX_OK;
ce5d2f33
PB
453}
454
cc05c43a
PM
455static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
456 hwaddr addr,
457 uint64_t *value,
458 unsigned size,
459 unsigned shift,
460 uint64_t mask,
461 MemTxAttrs attrs)
164a4dcd 462{
cc05c43a
PM
463 uint64_t tmp = 0;
464 MemTxResult r;
164a4dcd 465
cc05c43a 466 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
23d92d68 467 if (mr->subpage) {
5a68be94 468 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
469 } else if (mr == &io_mem_notdirty) {
470 /* Accesses to code which has previously been translated into a TB show
471 * up in the MMIO path, as accesses to the io_mem_notdirty
472 * MemoryRegion. */
473 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
474 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
475 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 476 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 477 }
164a4dcd 478 *value |= (tmp & mask) << shift;
cc05c43a 479 return r;
164a4dcd
AK
480}
481
cc05c43a
PM
482static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
483 hwaddr addr,
484 uint64_t *value,
485 unsigned size,
486 unsigned shift,
487 uint64_t mask,
488 MemTxAttrs attrs)
ce5d2f33 489{
ce5d2f33
PB
490 uint64_t tmp;
491
492 tmp = (*value >> shift) & mask;
23d92d68 493 if (mr->subpage) {
5a68be94 494 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
495 } else if (mr == &io_mem_notdirty) {
496 /* Accesses to code which has previously been translated into a TB show
497 * up in the MMIO path, as accesses to the io_mem_notdirty
498 * MemoryRegion. */
499 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
500 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
501 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 502 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 503 }
ce5d2f33 504 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
cc05c43a 505 return MEMTX_OK;
ce5d2f33
PB
506}
507
cc05c43a
PM
508static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
509 hwaddr addr,
510 uint64_t *value,
511 unsigned size,
512 unsigned shift,
513 uint64_t mask,
514 MemTxAttrs attrs)
164a4dcd 515{
164a4dcd
AK
516 uint64_t tmp;
517
518 tmp = (*value >> shift) & mask;
23d92d68 519 if (mr->subpage) {
5a68be94 520 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
521 } else if (mr == &io_mem_notdirty) {
522 /* Accesses to code which has previously been translated into a TB show
523 * up in the MMIO path, as accesses to the io_mem_notdirty
524 * MemoryRegion. */
525 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
526 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
527 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 528 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 529 }
164a4dcd 530 mr->ops->write(mr->opaque, addr, tmp, size);
cc05c43a 531 return MEMTX_OK;
164a4dcd
AK
532}
533
cc05c43a
PM
534static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
535 hwaddr addr,
536 uint64_t *value,
537 unsigned size,
538 unsigned shift,
539 uint64_t mask,
540 MemTxAttrs attrs)
541{
542 uint64_t tmp;
543
cc05c43a 544 tmp = (*value >> shift) & mask;
23d92d68 545 if (mr->subpage) {
5a68be94 546 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
547 } else if (mr == &io_mem_notdirty) {
548 /* Accesses to code which has previously been translated into a TB show
549 * up in the MMIO path, as accesses to the io_mem_notdirty
550 * MemoryRegion. */
551 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
552 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
553 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 554 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 555 }
cc05c43a
PM
556 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
557}
558
559static MemTxResult access_with_adjusted_size(hwaddr addr,
164a4dcd
AK
560 uint64_t *value,
561 unsigned size,
562 unsigned access_size_min,
563 unsigned access_size_max,
05e015f7
KF
564 MemTxResult (*access_fn)
565 (MemoryRegion *mr,
566 hwaddr addr,
567 uint64_t *value,
568 unsigned size,
569 unsigned shift,
570 uint64_t mask,
571 MemTxAttrs attrs),
cc05c43a
PM
572 MemoryRegion *mr,
573 MemTxAttrs attrs)
164a4dcd
AK
574{
575 uint64_t access_mask;
576 unsigned access_size;
577 unsigned i;
cc05c43a 578 MemTxResult r = MEMTX_OK;
164a4dcd
AK
579
580 if (!access_size_min) {
581 access_size_min = 1;
582 }
583 if (!access_size_max) {
584 access_size_max = 4;
585 }
ce5d2f33
PB
586
587 /* FIXME: support unaligned access? */
164a4dcd
AK
588 access_size = MAX(MIN(size, access_size_max), access_size_min);
589 access_mask = -1ULL >> (64 - access_size * 8);
e7342aa3
PB
590 if (memory_region_big_endian(mr)) {
591 for (i = 0; i < size; i += access_size) {
05e015f7 592 r |= access_fn(mr, addr + i, value, access_size,
cc05c43a 593 (size - access_size - i) * 8, access_mask, attrs);
e7342aa3
PB
594 }
595 } else {
596 for (i = 0; i < size; i += access_size) {
05e015f7 597 r |= access_fn(mr, addr + i, value, access_size, i * 8,
cc05c43a 598 access_mask, attrs);
e7342aa3 599 }
164a4dcd 600 }
cc05c43a 601 return r;
164a4dcd
AK
602}
603
e2177955
AK
604static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
605{
0d673e36
AK
606 AddressSpace *as;
607
feca4ac1
PB
608 while (mr->container) {
609 mr = mr->container;
e2177955 610 }
0d673e36
AK
611 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
612 if (mr == as->root) {
613 return as;
614 }
e2177955 615 }
eed2bacf 616 return NULL;
e2177955
AK
617}
618
093bc2cd
AK
619/* Render a memory region into the global view. Ranges in @view obscure
620 * ranges in @mr.
621 */
622static void render_memory_region(FlatView *view,
623 MemoryRegion *mr,
08dafab4 624 Int128 base,
fb1cd6f9
AK
625 AddrRange clip,
626 bool readonly)
093bc2cd
AK
627{
628 MemoryRegion *subregion;
629 unsigned i;
a8170e5e 630 hwaddr offset_in_region;
08dafab4
AK
631 Int128 remain;
632 Int128 now;
093bc2cd
AK
633 FlatRange fr;
634 AddrRange tmp;
635
6bba19ba
AK
636 if (!mr->enabled) {
637 return;
638 }
639
08dafab4 640 int128_addto(&base, int128_make64(mr->addr));
fb1cd6f9 641 readonly |= mr->readonly;
093bc2cd
AK
642
643 tmp = addrrange_make(base, mr->size);
644
645 if (!addrrange_intersects(tmp, clip)) {
646 return;
647 }
648
649 clip = addrrange_intersection(tmp, clip);
650
651 if (mr->alias) {
08dafab4
AK
652 int128_subfrom(&base, int128_make64(mr->alias->addr));
653 int128_subfrom(&base, int128_make64(mr->alias_offset));
fb1cd6f9 654 render_memory_region(view, mr->alias, base, clip, readonly);
093bc2cd
AK
655 return;
656 }
657
658 /* Render subregions in priority order. */
659 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
fb1cd6f9 660 render_memory_region(view, subregion, base, clip, readonly);
093bc2cd
AK
661 }
662
14a3c10a 663 if (!mr->terminates) {
093bc2cd
AK
664 return;
665 }
666
08dafab4 667 offset_in_region = int128_get64(int128_sub(clip.start, base));
093bc2cd
AK
668 base = clip.start;
669 remain = clip.size;
670
2eb74e1a 671 fr.mr = mr;
6f6a5ef3 672 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
b138e654 673 fr.romd_mode = mr->romd_mode;
2eb74e1a
PC
674 fr.readonly = readonly;
675
093bc2cd 676 /* Render the region itself into any gaps left by the current view. */
08dafab4
AK
677 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
678 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
093bc2cd
AK
679 continue;
680 }
08dafab4
AK
681 if (int128_lt(base, view->ranges[i].addr.start)) {
682 now = int128_min(remain,
683 int128_sub(view->ranges[i].addr.start, base));
093bc2cd
AK
684 fr.offset_in_region = offset_in_region;
685 fr.addr = addrrange_make(base, now);
686 flatview_insert(view, i, &fr);
687 ++i;
08dafab4
AK
688 int128_addto(&base, now);
689 offset_in_region += int128_get64(now);
690 int128_subfrom(&remain, now);
093bc2cd 691 }
d26a8cae
AK
692 now = int128_sub(int128_min(int128_add(base, remain),
693 addrrange_end(view->ranges[i].addr)),
694 base);
695 int128_addto(&base, now);
696 offset_in_region += int128_get64(now);
697 int128_subfrom(&remain, now);
093bc2cd 698 }
08dafab4 699 if (int128_nz(remain)) {
093bc2cd
AK
700 fr.offset_in_region = offset_in_region;
701 fr.addr = addrrange_make(base, remain);
702 flatview_insert(view, i, &fr);
703 }
704}
705
89c177bb
AK
706static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
707{
e673ba9a
PB
708 while (mr->enabled) {
709 if (mr->alias) {
710 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
711 /* The alias is included in its entirety. Use it as
712 * the "real" root, so that we can share more FlatViews.
713 */
714 mr = mr->alias;
715 continue;
716 }
717 } else if (!mr->terminates) {
718 unsigned int found = 0;
719 MemoryRegion *child, *next = NULL;
720 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
721 if (child->enabled) {
722 if (++found > 1) {
723 next = NULL;
724 break;
725 }
726 if (!child->addr && int128_ge(mr->size, child->size)) {
727 /* A child is included in its entirety. If it's the only
728 * enabled one, use it in the hope of finding an alias down the
729 * way. This will also let us share FlatViews.
730 */
731 next = child;
732 }
733 }
734 }
092aa2fc
AK
735 if (found == 0) {
736 return NULL;
737 }
e673ba9a
PB
738 if (next) {
739 mr = next;
740 continue;
741 }
742 }
743
092aa2fc 744 return mr;
89c177bb
AK
745 }
746
092aa2fc 747 return NULL;
89c177bb
AK
748}
749
093bc2cd 750/* Render a memory topology into a list of disjoint absolute ranges. */
a9a0c06d 751static FlatView *generate_memory_topology(MemoryRegion *mr)
093bc2cd 752{
9bf561e3 753 int i;
a9a0c06d 754 FlatView *view;
093bc2cd 755
89c177bb 756 view = flatview_new(mr);
093bc2cd 757
83f3c251 758 if (mr) {
a9a0c06d 759 render_memory_region(view, mr, int128_zero(),
83f3c251
AK
760 addrrange_make(int128_zero(), int128_2_64()), false);
761 }
a9a0c06d 762 flatview_simplify(view);
093bc2cd 763
9bf561e3
AK
764 view->dispatch = address_space_dispatch_new(view);
765 for (i = 0; i < view->nr; i++) {
766 MemoryRegionSection mrs =
767 section_from_flat_range(&view->ranges[i], view);
768 flatview_add_to_dispatch(view, &mrs);
769 }
770 address_space_dispatch_compact(view->dispatch);
967dc9b1 771 g_hash_table_replace(flat_views, mr, view);
9bf561e3 772
093bc2cd
AK
773 return view;
774}
775
3e9d69e7
AK
776static void address_space_add_del_ioeventfds(AddressSpace *as,
777 MemoryRegionIoeventfd *fds_new,
778 unsigned fds_new_nb,
779 MemoryRegionIoeventfd *fds_old,
780 unsigned fds_old_nb)
781{
782 unsigned iold, inew;
80a1ea37
AK
783 MemoryRegionIoeventfd *fd;
784 MemoryRegionSection section;
3e9d69e7
AK
785
786 /* Generate a symmetric difference of the old and new fd sets, adding
787 * and deleting as necessary.
788 */
789
790 iold = inew = 0;
791 while (iold < fds_old_nb || inew < fds_new_nb) {
792 if (iold < fds_old_nb
793 && (inew == fds_new_nb
794 || memory_region_ioeventfd_before(fds_old[iold],
795 fds_new[inew]))) {
80a1ea37
AK
796 fd = &fds_old[iold];
797 section = (MemoryRegionSection) {
16620684 798 .fv = address_space_to_flatview(as),
80a1ea37 799 .offset_within_address_space = int128_get64(fd->addr.start),
052e87b0 800 .size = fd->addr.size,
80a1ea37 801 };
9a54635d 802 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
753d5e14 803 fd->match_data, fd->data, fd->e);
3e9d69e7
AK
804 ++iold;
805 } else if (inew < fds_new_nb
806 && (iold == fds_old_nb
807 || memory_region_ioeventfd_before(fds_new[inew],
808 fds_old[iold]))) {
80a1ea37
AK
809 fd = &fds_new[inew];
810 section = (MemoryRegionSection) {
16620684 811 .fv = address_space_to_flatview(as),
80a1ea37 812 .offset_within_address_space = int128_get64(fd->addr.start),
052e87b0 813 .size = fd->addr.size,
80a1ea37 814 };
9a54635d 815 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
753d5e14 816 fd->match_data, fd->data, fd->e);
3e9d69e7
AK
817 ++inew;
818 } else {
819 ++iold;
820 ++inew;
821 }
822 }
823}
824
48564041 825FlatView *address_space_get_flatview(AddressSpace *as)
856d7245
PB
826{
827 FlatView *view;
828
374f2981 829 rcu_read_lock();
447b0d0b 830 do {
16620684 831 view = address_space_to_flatview(as);
447b0d0b
PB
832 /* If somebody has replaced as->current_map concurrently,
833 * flatview_ref returns false.
834 */
835 } while (!flatview_ref(view));
374f2981 836 rcu_read_unlock();
856d7245
PB
837 return view;
838}
839
3e9d69e7
AK
840static void address_space_update_ioeventfds(AddressSpace *as)
841{
99e86347 842 FlatView *view;
3e9d69e7
AK
843 FlatRange *fr;
844 unsigned ioeventfd_nb = 0;
845 MemoryRegionIoeventfd *ioeventfds = NULL;
846 AddrRange tmp;
847 unsigned i;
848
856d7245 849 view = address_space_get_flatview(as);
99e86347 850 FOR_EACH_FLAT_RANGE(fr, view) {
3e9d69e7
AK
851 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
852 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
08dafab4
AK
853 int128_sub(fr->addr.start,
854 int128_make64(fr->offset_in_region)));
3e9d69e7
AK
855 if (addrrange_intersects(fr->addr, tmp)) {
856 ++ioeventfd_nb;
7267c094 857 ioeventfds = g_realloc(ioeventfds,
3e9d69e7
AK
858 ioeventfd_nb * sizeof(*ioeventfds));
859 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
860 ioeventfds[ioeventfd_nb-1].addr = tmp;
861 }
862 }
863 }
864
865 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
866 as->ioeventfds, as->ioeventfd_nb);
867
7267c094 868 g_free(as->ioeventfds);
3e9d69e7
AK
869 as->ioeventfds = ioeventfds;
870 as->ioeventfd_nb = ioeventfd_nb;
856d7245 871 flatview_unref(view);
3e9d69e7
AK
872}
873
b8af1afb 874static void address_space_update_topology_pass(AddressSpace *as,
a9a0c06d
PB
875 const FlatView *old_view,
876 const FlatView *new_view,
b8af1afb 877 bool adding)
093bc2cd 878{
093bc2cd
AK
879 unsigned iold, inew;
880 FlatRange *frold, *frnew;
093bc2cd
AK
881
882 /* Generate a symmetric difference of the old and new memory maps.
883 * Kill ranges in the old map, and instantiate ranges in the new map.
884 */
885 iold = inew = 0;
a9a0c06d
PB
886 while (iold < old_view->nr || inew < new_view->nr) {
887 if (iold < old_view->nr) {
888 frold = &old_view->ranges[iold];
093bc2cd
AK
889 } else {
890 frold = NULL;
891 }
a9a0c06d
PB
892 if (inew < new_view->nr) {
893 frnew = &new_view->ranges[inew];
093bc2cd
AK
894 } else {
895 frnew = NULL;
896 }
897
898 if (frold
899 && (!frnew
08dafab4
AK
900 || int128_lt(frold->addr.start, frnew->addr.start)
901 || (int128_eq(frold->addr.start, frnew->addr.start)
093bc2cd 902 && !flatrange_equal(frold, frnew)))) {
41a6e477 903 /* In old but not in new, or in both but attributes changed. */
093bc2cd 904
b8af1afb 905 if (!adding) {
72e22d2f 906 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
b8af1afb
AK
907 }
908
093bc2cd
AK
909 ++iold;
910 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
41a6e477 911 /* In both and unchanged (except logging may have changed) */
093bc2cd 912
b8af1afb 913 if (adding) {
50c1e149 914 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
b2dfd71c
PB
915 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
916 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
917 frold->dirty_log_mask,
918 frnew->dirty_log_mask);
919 }
920 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
921 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
922 frold->dirty_log_mask,
923 frnew->dirty_log_mask);
b8af1afb 924 }
5a583347
AK
925 }
926
093bc2cd
AK
927 ++iold;
928 ++inew;
093bc2cd
AK
929 } else {
930 /* In new */
931
b8af1afb 932 if (adding) {
72e22d2f 933 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
b8af1afb
AK
934 }
935
093bc2cd
AK
936 ++inew;
937 }
938 }
b8af1afb
AK
939}
940
967dc9b1
AK
941static void flatviews_init(void)
942{
092aa2fc
AK
943 static FlatView *empty_view;
944
967dc9b1
AK
945 if (flat_views) {
946 return;
947 }
948
949 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
950 (GDestroyNotify) flatview_unref);
092aa2fc
AK
951 if (!empty_view) {
952 empty_view = generate_memory_topology(NULL);
953 /* We keep it alive forever in the global variable. */
954 flatview_ref(empty_view);
955 } else {
956 g_hash_table_replace(flat_views, NULL, empty_view);
957 flatview_ref(empty_view);
958 }
967dc9b1
AK
959}
960
961static void flatviews_reset(void)
962{
963 AddressSpace *as;
964
965 if (flat_views) {
966 g_hash_table_unref(flat_views);
967 flat_views = NULL;
968 }
969 flatviews_init();
970
971 /* Render unique FVs */
972 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
973 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
974
975 if (g_hash_table_lookup(flat_views, physmr)) {
976 continue;
977 }
978
979 generate_memory_topology(physmr);
980 }
981}
982
983static void address_space_set_flatview(AddressSpace *as)
b8af1afb 984{
67ace39b 985 FlatView *old_view = address_space_to_flatview(as);
967dc9b1
AK
986 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
987 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
988
989 assert(new_view);
990
67ace39b
AK
991 if (old_view == new_view) {
992 return;
993 }
994
995 if (old_view) {
996 flatview_ref(old_view);
997 }
998
967dc9b1 999 flatview_ref(new_view);
9a62e24f
AK
1000
1001 if (!QTAILQ_EMPTY(&as->listeners)) {
67ace39b
AK
1002 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1003
1004 if (!old_view2) {
1005 old_view2 = &tmpview;
1006 }
1007 address_space_update_topology_pass(as, old_view2, new_view, false);
1008 address_space_update_topology_pass(as, old_view2, new_view, true);
9a62e24f 1009 }
b8af1afb 1010
374f2981
PB
1011 /* Writes are protected by the BQL. */
1012 atomic_rcu_set(&as->current_map, new_view);
67ace39b
AK
1013 if (old_view) {
1014 flatview_unref(old_view);
1015 }
856d7245
PB
1016
1017 /* Note that all the old MemoryRegions are still alive up to this
1018 * point. This relieves most MemoryListeners from the need to
1019 * ref/unref the MemoryRegions they get---unless they use them
1020 * outside the iothread mutex, in which case precise reference
1021 * counting is necessary.
1022 */
67ace39b
AK
1023 if (old_view) {
1024 flatview_unref(old_view);
1025 }
093bc2cd
AK
1026}
1027
202fc01b
AK
1028static void address_space_update_topology(AddressSpace *as)
1029{
1030 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1031
1032 flatviews_init();
1033 if (!g_hash_table_lookup(flat_views, physmr)) {
1034 generate_memory_topology(physmr);
1035 }
1036 address_space_set_flatview(as);
1037}
1038
4ef4db86
AK
1039void memory_region_transaction_begin(void)
1040{
bb880ded 1041 qemu_flush_coalesced_mmio_buffer();
4ef4db86
AK
1042 ++memory_region_transaction_depth;
1043}
1044
1045void memory_region_transaction_commit(void)
1046{
0d673e36
AK
1047 AddressSpace *as;
1048
4ef4db86 1049 assert(memory_region_transaction_depth);
8d04fb55
JK
1050 assert(qemu_mutex_iothread_locked());
1051
4ef4db86 1052 --memory_region_transaction_depth;
4dc56152
GA
1053 if (!memory_region_transaction_depth) {
1054 if (memory_region_update_pending) {
967dc9b1
AK
1055 flatviews_reset();
1056
4dc56152 1057 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
02e2b95f 1058
4dc56152 1059 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
967dc9b1 1060 address_space_set_flatview(as);
02218487 1061 address_space_update_ioeventfds(as);
4dc56152 1062 }
ade9c1aa 1063 memory_region_update_pending = false;
0b152095 1064 ioeventfd_update_pending = false;
4dc56152
GA
1065 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1066 } else if (ioeventfd_update_pending) {
1067 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1068 address_space_update_ioeventfds(as);
1069 }
ade9c1aa 1070 ioeventfd_update_pending = false;
4dc56152 1071 }
4dc56152 1072 }
4ef4db86
AK
1073}
1074
545e92e0
AK
1075static void memory_region_destructor_none(MemoryRegion *mr)
1076{
1077}
1078
1079static void memory_region_destructor_ram(MemoryRegion *mr)
1080{
f1060c55 1081 qemu_ram_free(mr->ram_block);
545e92e0
AK
1082}
1083
b4fefef9
PC
1084static bool memory_region_need_escape(char c)
1085{
1086 return c == '/' || c == '[' || c == '\\' || c == ']';
1087}
1088
1089static char *memory_region_escape_name(const char *name)
1090{
1091 const char *p;
1092 char *escaped, *q;
1093 uint8_t c;
1094 size_t bytes = 0;
1095
1096 for (p = name; *p; p++) {
1097 bytes += memory_region_need_escape(*p) ? 4 : 1;
1098 }
1099 if (bytes == p - name) {
1100 return g_memdup(name, bytes + 1);
1101 }
1102
1103 escaped = g_malloc(bytes + 1);
1104 for (p = name, q = escaped; *p; p++) {
1105 c = *p;
1106 if (unlikely(memory_region_need_escape(c))) {
1107 *q++ = '\\';
1108 *q++ = 'x';
1109 *q++ = "0123456789abcdef"[c >> 4];
1110 c = "0123456789abcdef"[c & 15];
1111 }
1112 *q++ = c;
1113 }
1114 *q = 0;
1115 return escaped;
1116}
1117
3df9d748
AK
1118static void memory_region_do_init(MemoryRegion *mr,
1119 Object *owner,
1120 const char *name,
1121 uint64_t size)
093bc2cd 1122{
08dafab4
AK
1123 mr->size = int128_make64(size);
1124 if (size == UINT64_MAX) {
1125 mr->size = int128_2_64();
1126 }
302fa283 1127 mr->name = g_strdup(name);
612263cf 1128 mr->owner = owner;
58eaa217 1129 mr->ram_block = NULL;
b4fefef9
PC
1130
1131 if (name) {
843ef73a
PC
1132 char *escaped_name = memory_region_escape_name(name);
1133 char *name_array = g_strdup_printf("%s[*]", escaped_name);
612263cf
PB
1134
1135 if (!owner) {
1136 owner = container_get(qdev_get_machine(), "/unattached");
1137 }
1138
843ef73a 1139 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
b4fefef9 1140 object_unref(OBJECT(mr));
843ef73a
PC
1141 g_free(name_array);
1142 g_free(escaped_name);
b4fefef9
PC
1143 }
1144}
1145
3df9d748
AK
1146void memory_region_init(MemoryRegion *mr,
1147 Object *owner,
1148 const char *name,
1149 uint64_t size)
1150{
1151 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1152 memory_region_do_init(mr, owner, name, size);
1153}
1154
d7bce999
EB
1155static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1156 void *opaque, Error **errp)
409ddd01
PC
1157{
1158 MemoryRegion *mr = MEMORY_REGION(obj);
1159 uint64_t value = mr->addr;
1160
51e72bc1 1161 visit_type_uint64(v, name, &value, errp);
409ddd01
PC
1162}
1163
d7bce999
EB
1164static void memory_region_get_container(Object *obj, Visitor *v,
1165 const char *name, void *opaque,
1166 Error **errp)
409ddd01
PC
1167{
1168 MemoryRegion *mr = MEMORY_REGION(obj);
1169 gchar *path = (gchar *)"";
1170
1171 if (mr->container) {
1172 path = object_get_canonical_path(OBJECT(mr->container));
1173 }
51e72bc1 1174 visit_type_str(v, name, &path, errp);
409ddd01
PC
1175 if (mr->container) {
1176 g_free(path);
1177 }
1178}
1179
1180static Object *memory_region_resolve_container(Object *obj, void *opaque,
1181 const char *part)
1182{
1183 MemoryRegion *mr = MEMORY_REGION(obj);
1184
1185 return OBJECT(mr->container);
1186}
1187
d7bce999
EB
1188static void memory_region_get_priority(Object *obj, Visitor *v,
1189 const char *name, void *opaque,
1190 Error **errp)
d33382da
PC
1191{
1192 MemoryRegion *mr = MEMORY_REGION(obj);
1193 int32_t value = mr->priority;
1194
51e72bc1 1195 visit_type_int32(v, name, &value, errp);
d33382da
PC
1196}
1197
d7bce999
EB
1198static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1199 void *opaque, Error **errp)
52aef7bb
PC
1200{
1201 MemoryRegion *mr = MEMORY_REGION(obj);
1202 uint64_t value = memory_region_size(mr);
1203
51e72bc1 1204 visit_type_uint64(v, name, &value, errp);
52aef7bb
PC
1205}
1206
b4fefef9
PC
1207static void memory_region_initfn(Object *obj)
1208{
1209 MemoryRegion *mr = MEMORY_REGION(obj);
409ddd01 1210 ObjectProperty *op;
b4fefef9
PC
1211
1212 mr->ops = &unassigned_mem_ops;
6bba19ba 1213 mr->enabled = true;
5f9a5ea1 1214 mr->romd_mode = true;
196ea131 1215 mr->global_locking = true;
545e92e0 1216 mr->destructor = memory_region_destructor_none;
093bc2cd 1217 QTAILQ_INIT(&mr->subregions);
093bc2cd 1218 QTAILQ_INIT(&mr->coalesced);
409ddd01
PC
1219
1220 op = object_property_add(OBJECT(mr), "container",
1221 "link<" TYPE_MEMORY_REGION ">",
1222 memory_region_get_container,
1223 NULL, /* memory_region_set_container */
1224 NULL, NULL, &error_abort);
1225 op->resolve = memory_region_resolve_container;
1226
1227 object_property_add(OBJECT(mr), "addr", "uint64",
1228 memory_region_get_addr,
1229 NULL, /* memory_region_set_addr */
1230 NULL, NULL, &error_abort);
d33382da
PC
1231 object_property_add(OBJECT(mr), "priority", "uint32",
1232 memory_region_get_priority,
1233 NULL, /* memory_region_set_priority */
1234 NULL, NULL, &error_abort);
52aef7bb
PC
1235 object_property_add(OBJECT(mr), "size", "uint64",
1236 memory_region_get_size,
1237 NULL, /* memory_region_set_size, */
1238 NULL, NULL, &error_abort);
093bc2cd
AK
1239}
1240
3df9d748
AK
1241static void iommu_memory_region_initfn(Object *obj)
1242{
1243 MemoryRegion *mr = MEMORY_REGION(obj);
1244
1245 mr->is_iommu = true;
1246}
1247
b018ddf6
PB
1248static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1249 unsigned size)
1250{
1251#ifdef DEBUG_UNASSIGNED
1252 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1253#endif
4917cf44
AF
1254 if (current_cpu != NULL) {
1255 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
c658b94f 1256 }
68a7439a 1257 return 0;
b018ddf6
PB
1258}
1259
1260static void unassigned_mem_write(void *opaque, hwaddr addr,
1261 uint64_t val, unsigned size)
1262{
1263#ifdef DEBUG_UNASSIGNED
1264 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1265#endif
4917cf44
AF
1266 if (current_cpu != NULL) {
1267 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
c658b94f 1268 }
b018ddf6
PB
1269}
1270
d197063f
PB
1271static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1272 unsigned size, bool is_write)
1273{
1274 return false;
1275}
1276
1277const MemoryRegionOps unassigned_mem_ops = {
1278 .valid.accepts = unassigned_mem_accepts,
1279 .endianness = DEVICE_NATIVE_ENDIAN,
1280};
1281
4a2e242b
AW
1282static uint64_t memory_region_ram_device_read(void *opaque,
1283 hwaddr addr, unsigned size)
1284{
1285 MemoryRegion *mr = opaque;
1286 uint64_t data = (uint64_t)~0;
1287
1288 switch (size) {
1289 case 1:
1290 data = *(uint8_t *)(mr->ram_block->host + addr);
1291 break;
1292 case 2:
1293 data = *(uint16_t *)(mr->ram_block->host + addr);
1294 break;
1295 case 4:
1296 data = *(uint32_t *)(mr->ram_block->host + addr);
1297 break;
1298 case 8:
1299 data = *(uint64_t *)(mr->ram_block->host + addr);
1300 break;
1301 }
1302
1303 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1304
1305 return data;
1306}
1307
1308static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1309 uint64_t data, unsigned size)
1310{
1311 MemoryRegion *mr = opaque;
1312
1313 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1314
1315 switch (size) {
1316 case 1:
1317 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1318 break;
1319 case 2:
1320 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1321 break;
1322 case 4:
1323 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1324 break;
1325 case 8:
1326 *(uint64_t *)(mr->ram_block->host + addr) = data;
1327 break;
1328 }
1329}
1330
1331static const MemoryRegionOps ram_device_mem_ops = {
1332 .read = memory_region_ram_device_read,
1333 .write = memory_region_ram_device_write,
c99a29e7 1334 .endianness = DEVICE_HOST_ENDIAN,
4a2e242b
AW
1335 .valid = {
1336 .min_access_size = 1,
1337 .max_access_size = 8,
1338 .unaligned = true,
1339 },
1340 .impl = {
1341 .min_access_size = 1,
1342 .max_access_size = 8,
1343 .unaligned = true,
1344 },
1345};
1346
d2702032
PB
1347bool memory_region_access_valid(MemoryRegion *mr,
1348 hwaddr addr,
1349 unsigned size,
1350 bool is_write)
093bc2cd 1351{
a014ed07
PB
1352 int access_size_min, access_size_max;
1353 int access_size, i;
897fa7cf 1354
093bc2cd
AK
1355 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1356 return false;
1357 }
1358
a014ed07 1359 if (!mr->ops->valid.accepts) {
093bc2cd
AK
1360 return true;
1361 }
1362
a014ed07
PB
1363 access_size_min = mr->ops->valid.min_access_size;
1364 if (!mr->ops->valid.min_access_size) {
1365 access_size_min = 1;
1366 }
1367
1368 access_size_max = mr->ops->valid.max_access_size;
1369 if (!mr->ops->valid.max_access_size) {
1370 access_size_max = 4;
1371 }
1372
1373 access_size = MAX(MIN(size, access_size_max), access_size_min);
1374 for (i = 0; i < size; i += access_size) {
1375 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1376 is_write)) {
1377 return false;
1378 }
093bc2cd 1379 }
a014ed07 1380
093bc2cd
AK
1381 return true;
1382}
1383
cc05c43a
PM
1384static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1385 hwaddr addr,
1386 uint64_t *pval,
1387 unsigned size,
1388 MemTxAttrs attrs)
093bc2cd 1389{
cc05c43a 1390 *pval = 0;
093bc2cd 1391
ce5d2f33 1392 if (mr->ops->read) {
cc05c43a
PM
1393 return access_with_adjusted_size(addr, pval, size,
1394 mr->ops->impl.min_access_size,
1395 mr->ops->impl.max_access_size,
1396 memory_region_read_accessor,
1397 mr, attrs);
1398 } else if (mr->ops->read_with_attrs) {
1399 return access_with_adjusted_size(addr, pval, size,
1400 mr->ops->impl.min_access_size,
1401 mr->ops->impl.max_access_size,
1402 memory_region_read_with_attrs_accessor,
1403 mr, attrs);
ce5d2f33 1404 } else {
cc05c43a
PM
1405 return access_with_adjusted_size(addr, pval, size, 1, 4,
1406 memory_region_oldmmio_read_accessor,
1407 mr, attrs);
74901c3b 1408 }
093bc2cd
AK
1409}
1410
3b643495
PM
1411MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1412 hwaddr addr,
1413 uint64_t *pval,
1414 unsigned size,
1415 MemTxAttrs attrs)
a621f38d 1416{
cc05c43a
PM
1417 MemTxResult r;
1418
791af8c8
PB
1419 if (!memory_region_access_valid(mr, addr, size, false)) {
1420 *pval = unassigned_mem_read(mr, addr, size);
cc05c43a 1421 return MEMTX_DECODE_ERROR;
791af8c8 1422 }
a621f38d 1423
cc05c43a 1424 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
791af8c8 1425 adjust_endianness(mr, pval, size);
cc05c43a 1426 return r;
a621f38d 1427}
093bc2cd 1428
8c56c1a5
PF
1429/* Return true if an eventfd was signalled */
1430static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1431 hwaddr addr,
1432 uint64_t data,
1433 unsigned size,
1434 MemTxAttrs attrs)
1435{
1436 MemoryRegionIoeventfd ioeventfd = {
1437 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1438 .data = data,
1439 };
1440 unsigned i;
1441
1442 for (i = 0; i < mr->ioeventfd_nb; i++) {
1443 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1444 ioeventfd.e = mr->ioeventfds[i].e;
1445
1446 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1447 event_notifier_set(ioeventfd.e);
1448 return true;
1449 }
1450 }
1451
1452 return false;
1453}
1454
3b643495
PM
1455MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1456 hwaddr addr,
1457 uint64_t data,
1458 unsigned size,
1459 MemTxAttrs attrs)
a621f38d 1460{
897fa7cf 1461 if (!memory_region_access_valid(mr, addr, size, true)) {
b018ddf6 1462 unassigned_mem_write(mr, addr, data, size);
cc05c43a 1463 return MEMTX_DECODE_ERROR;
093bc2cd
AK
1464 }
1465
a621f38d
AK
1466 adjust_endianness(mr, &data, size);
1467
8c56c1a5
PF
1468 if ((!kvm_eventfds_enabled()) &&
1469 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1470 return MEMTX_OK;
1471 }
1472
ce5d2f33 1473 if (mr->ops->write) {
cc05c43a
PM
1474 return access_with_adjusted_size(addr, &data, size,
1475 mr->ops->impl.min_access_size,
1476 mr->ops->impl.max_access_size,
1477 memory_region_write_accessor, mr,
1478 attrs);
1479 } else if (mr->ops->write_with_attrs) {
1480 return
1481 access_with_adjusted_size(addr, &data, size,
1482 mr->ops->impl.min_access_size,
1483 mr->ops->impl.max_access_size,
1484 memory_region_write_with_attrs_accessor,
1485 mr, attrs);
ce5d2f33 1486 } else {
cc05c43a
PM
1487 return access_with_adjusted_size(addr, &data, size, 1, 4,
1488 memory_region_oldmmio_write_accessor,
1489 mr, attrs);
74901c3b 1490 }
093bc2cd
AK
1491}
1492
093bc2cd 1493void memory_region_init_io(MemoryRegion *mr,
2c9b15ca 1494 Object *owner,
093bc2cd
AK
1495 const MemoryRegionOps *ops,
1496 void *opaque,
1497 const char *name,
1498 uint64_t size)
1499{
2c9b15ca 1500 memory_region_init(mr, owner, name, size);
6d6d2abf 1501 mr->ops = ops ? ops : &unassigned_mem_ops;
093bc2cd 1502 mr->opaque = opaque;
14a3c10a 1503 mr->terminates = true;
093bc2cd
AK
1504}
1505
1cfe48c1
PM
1506void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1507 Object *owner,
1508 const char *name,
1509 uint64_t size,
1510 Error **errp)
06329cce
MA
1511{
1512 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1513}
1514
1515void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1516 Object *owner,
1517 const char *name,
1518 uint64_t size,
1519 bool share,
1520 Error **errp)
093bc2cd 1521{
2c9b15ca 1522 memory_region_init(mr, owner, name, size);
8ea9252a 1523 mr->ram = true;
14a3c10a 1524 mr->terminates = true;
545e92e0 1525 mr->destructor = memory_region_destructor_ram;
06329cce 1526 mr->ram_block = qemu_ram_alloc(size, share, mr, errp);
677e7805 1527 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
0b183fc8
PB
1528}
1529
60786ef3
MT
1530void memory_region_init_resizeable_ram(MemoryRegion *mr,
1531 Object *owner,
1532 const char *name,
1533 uint64_t size,
1534 uint64_t max_size,
1535 void (*resized)(const char*,
1536 uint64_t length,
1537 void *host),
1538 Error **errp)
1539{
1540 memory_region_init(mr, owner, name, size);
1541 mr->ram = true;
1542 mr->terminates = true;
1543 mr->destructor = memory_region_destructor_ram;
8e41fb63
FZ
1544 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1545 mr, errp);
677e7805 1546 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
60786ef3
MT
1547}
1548
0b183fc8
PB
1549#ifdef __linux__
1550void memory_region_init_ram_from_file(MemoryRegion *mr,
1551 struct Object *owner,
1552 const char *name,
1553 uint64_t size,
98376843 1554 uint64_t align,
dbcb8981 1555 bool share,
7f56e740
PB
1556 const char *path,
1557 Error **errp)
0b183fc8
PB
1558{
1559 memory_region_init(mr, owner, name, size);
1560 mr->ram = true;
1561 mr->terminates = true;
1562 mr->destructor = memory_region_destructor_ram;
98376843 1563 mr->align = align;
8e41fb63 1564 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
677e7805 1565 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
093bc2cd 1566}
fea617c5
MAL
1567
1568void memory_region_init_ram_from_fd(MemoryRegion *mr,
1569 struct Object *owner,
1570 const char *name,
1571 uint64_t size,
1572 bool share,
1573 int fd,
1574 Error **errp)
1575{
1576 memory_region_init(mr, owner, name, size);
1577 mr->ram = true;
1578 mr->terminates = true;
1579 mr->destructor = memory_region_destructor_ram;
1580 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1581 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1582}
0b183fc8 1583#endif
093bc2cd
AK
1584
1585void memory_region_init_ram_ptr(MemoryRegion *mr,
2c9b15ca 1586 Object *owner,
093bc2cd
AK
1587 const char *name,
1588 uint64_t size,
1589 void *ptr)
1590{
2c9b15ca 1591 memory_region_init(mr, owner, name, size);
8ea9252a 1592 mr->ram = true;
14a3c10a 1593 mr->terminates = true;
fc3e7665 1594 mr->destructor = memory_region_destructor_ram;
677e7805 1595 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
ef701d7b
HT
1596
1597 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1598 assert(ptr != NULL);
8e41fb63 1599 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
093bc2cd
AK
1600}
1601
21e00fa5
AW
1602void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1603 Object *owner,
1604 const char *name,
1605 uint64_t size,
1606 void *ptr)
e4dc3f59 1607{
21e00fa5
AW
1608 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1609 mr->ram_device = true;
4a2e242b
AW
1610 mr->ops = &ram_device_mem_ops;
1611 mr->opaque = mr;
e4dc3f59
ND
1612}
1613
093bc2cd 1614void memory_region_init_alias(MemoryRegion *mr,
2c9b15ca 1615 Object *owner,
093bc2cd
AK
1616 const char *name,
1617 MemoryRegion *orig,
a8170e5e 1618 hwaddr offset,
093bc2cd
AK
1619 uint64_t size)
1620{
2c9b15ca 1621 memory_region_init(mr, owner, name, size);
093bc2cd
AK
1622 mr->alias = orig;
1623 mr->alias_offset = offset;
1624}
1625
b59821a9
PM
1626void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1627 struct Object *owner,
1628 const char *name,
1629 uint64_t size,
1630 Error **errp)
a1777f7f
PM
1631{
1632 memory_region_init(mr, owner, name, size);
1633 mr->ram = true;
1634 mr->readonly = true;
1635 mr->terminates = true;
1636 mr->destructor = memory_region_destructor_ram;
06329cce 1637 mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
a1777f7f
PM
1638 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1639}
1640
b59821a9
PM
1641void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1642 Object *owner,
1643 const MemoryRegionOps *ops,
1644 void *opaque,
1645 const char *name,
1646 uint64_t size,
1647 Error **errp)
d0a9b5bc 1648{
39e0b03d 1649 assert(ops);
2c9b15ca 1650 memory_region_init(mr, owner, name, size);
7bc2b9cd 1651 mr->ops = ops;
75f5941c 1652 mr->opaque = opaque;
d0a9b5bc 1653 mr->terminates = true;
75c578dc 1654 mr->rom_device = true;
58268c8d 1655 mr->destructor = memory_region_destructor_ram;
06329cce 1656 mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
d0a9b5bc
AK
1657}
1658
1221a474
AK
1659void memory_region_init_iommu(void *_iommu_mr,
1660 size_t instance_size,
1661 const char *mrtypename,
2c9b15ca 1662 Object *owner,
30951157
AK
1663 const char *name,
1664 uint64_t size)
1665{
1221a474 1666 struct IOMMUMemoryRegion *iommu_mr;
3df9d748
AK
1667 struct MemoryRegion *mr;
1668
1221a474
AK
1669 object_initialize(_iommu_mr, instance_size, mrtypename);
1670 mr = MEMORY_REGION(_iommu_mr);
3df9d748
AK
1671 memory_region_do_init(mr, owner, name, size);
1672 iommu_mr = IOMMU_MEMORY_REGION(mr);
30951157 1673 mr->terminates = true; /* then re-forwards */
3df9d748
AK
1674 QLIST_INIT(&iommu_mr->iommu_notify);
1675 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
30951157
AK
1676}
1677
b4fefef9 1678static void memory_region_finalize(Object *obj)
093bc2cd 1679{
b4fefef9
PC
1680 MemoryRegion *mr = MEMORY_REGION(obj);
1681
2e2b8eb7
PB
1682 assert(!mr->container);
1683
1684 /* We know the region is not visible in any address space (it
1685 * does not have a container and cannot be a root either because
1686 * it has no references, so we can blindly clear mr->enabled.
1687 * memory_region_set_enabled instead could trigger a transaction
1688 * and cause an infinite loop.
1689 */
1690 mr->enabled = false;
1691 memory_region_transaction_begin();
1692 while (!QTAILQ_EMPTY(&mr->subregions)) {
1693 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1694 memory_region_del_subregion(mr, subregion);
1695 }
1696 memory_region_transaction_commit();
1697
545e92e0 1698 mr->destructor(mr);
093bc2cd 1699 memory_region_clear_coalescing(mr);
302fa283 1700 g_free((char *)mr->name);
7267c094 1701 g_free(mr->ioeventfds);
093bc2cd
AK
1702}
1703
803c0816
PB
1704Object *memory_region_owner(MemoryRegion *mr)
1705{
22a893e4
PB
1706 Object *obj = OBJECT(mr);
1707 return obj->parent;
803c0816
PB
1708}
1709
46637be2
PB
1710void memory_region_ref(MemoryRegion *mr)
1711{
22a893e4
PB
1712 /* MMIO callbacks most likely will access data that belongs
1713 * to the owner, hence the need to ref/unref the owner whenever
1714 * the memory region is in use.
1715 *
1716 * The memory region is a child of its owner. As long as the
1717 * owner doesn't call unparent itself on the memory region,
1718 * ref-ing the owner will also keep the memory region alive.
612263cf
PB
1719 * Memory regions without an owner are supposed to never go away;
1720 * we do not ref/unref them because it slows down DMA sensibly.
22a893e4 1721 */
612263cf
PB
1722 if (mr && mr->owner) {
1723 object_ref(mr->owner);
46637be2
PB
1724 }
1725}
1726
1727void memory_region_unref(MemoryRegion *mr)
1728{
612263cf
PB
1729 if (mr && mr->owner) {
1730 object_unref(mr->owner);
46637be2
PB
1731 }
1732}
1733
093bc2cd
AK
1734uint64_t memory_region_size(MemoryRegion *mr)
1735{
08dafab4
AK
1736 if (int128_eq(mr->size, int128_2_64())) {
1737 return UINT64_MAX;
1738 }
1739 return int128_get64(mr->size);
093bc2cd
AK
1740}
1741
5d546d4b 1742const char *memory_region_name(const MemoryRegion *mr)
8991c79b 1743{
d1dd32af
PC
1744 if (!mr->name) {
1745 ((MemoryRegion *)mr)->name =
1746 object_get_canonical_path_component(OBJECT(mr));
1747 }
302fa283 1748 return mr->name;
8991c79b
AK
1749}
1750
21e00fa5 1751bool memory_region_is_ram_device(MemoryRegion *mr)
e4dc3f59 1752{
21e00fa5 1753 return mr->ram_device;
e4dc3f59
ND
1754}
1755
2d1a35be 1756uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
55043ba3 1757{
6f6a5ef3 1758 uint8_t mask = mr->dirty_log_mask;
adaad61c 1759 if (global_dirty_log && mr->ram_block) {
6f6a5ef3
PB
1760 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1761 }
1762 return mask;
55043ba3
AK
1763}
1764
2d1a35be
PB
1765bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1766{
1767 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1768}
1769
3df9d748 1770static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
5bf3d319
PX
1771{
1772 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1773 IOMMUNotifier *iommu_notifier;
1221a474 1774 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
5bf3d319 1775
3df9d748 1776 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
5bf3d319
PX
1777 flags |= iommu_notifier->notifier_flags;
1778 }
1779
1221a474
AK
1780 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1781 imrc->notify_flag_changed(iommu_mr,
1782 iommu_mr->iommu_notify_flags,
1783 flags);
5bf3d319
PX
1784 }
1785
3df9d748 1786 iommu_mr->iommu_notify_flags = flags;
5bf3d319
PX
1787}
1788
cdb30812
PX
1789void memory_region_register_iommu_notifier(MemoryRegion *mr,
1790 IOMMUNotifier *n)
06866575 1791{
3df9d748
AK
1792 IOMMUMemoryRegion *iommu_mr;
1793
efcd38c5
JW
1794 if (mr->alias) {
1795 memory_region_register_iommu_notifier(mr->alias, n);
1796 return;
1797 }
1798
cdb30812 1799 /* We need to register for at least one bitfield */
3df9d748 1800 iommu_mr = IOMMU_MEMORY_REGION(mr);
cdb30812 1801 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
698feb5e 1802 assert(n->start <= n->end);
3df9d748
AK
1803 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1804 memory_region_update_iommu_notify_flags(iommu_mr);
06866575
DG
1805}
1806
3df9d748 1807uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
a788f227 1808{
1221a474
AK
1809 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1810
1811 if (imrc->get_min_page_size) {
1812 return imrc->get_min_page_size(iommu_mr);
f682e9c2
AK
1813 }
1814 return TARGET_PAGE_SIZE;
1815}
1816
3df9d748 1817void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
f682e9c2 1818{
3df9d748 1819 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1221a474 1820 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
f682e9c2 1821 hwaddr addr, granularity;
a788f227
DG
1822 IOMMUTLBEntry iotlb;
1823
faa362e3 1824 /* If the IOMMU has its own replay callback, override */
1221a474
AK
1825 if (imrc->replay) {
1826 imrc->replay(iommu_mr, n);
faa362e3
PX
1827 return;
1828 }
1829
3df9d748 1830 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
f682e9c2 1831
a788f227 1832 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1221a474 1833 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
a788f227
DG
1834 if (iotlb.perm != IOMMU_NONE) {
1835 n->notify(n, &iotlb);
1836 }
1837
1838 /* if (2^64 - MR size) < granularity, it's possible to get an
1839 * infinite loop here. This should catch such a wraparound */
1840 if ((addr + granularity) < addr) {
1841 break;
1842 }
1843 }
1844}
1845
3df9d748 1846void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
de472e4a
PX
1847{
1848 IOMMUNotifier *notifier;
1849
3df9d748
AK
1850 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1851 memory_region_iommu_replay(iommu_mr, notifier);
de472e4a
PX
1852 }
1853}
1854
cdb30812
PX
1855void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1856 IOMMUNotifier *n)
06866575 1857{
3df9d748
AK
1858 IOMMUMemoryRegion *iommu_mr;
1859
efcd38c5
JW
1860 if (mr->alias) {
1861 memory_region_unregister_iommu_notifier(mr->alias, n);
1862 return;
1863 }
cdb30812 1864 QLIST_REMOVE(n, node);
3df9d748
AK
1865 iommu_mr = IOMMU_MEMORY_REGION(mr);
1866 memory_region_update_iommu_notify_flags(iommu_mr);
06866575
DG
1867}
1868
bd2bfa4c
PX
1869void memory_region_notify_one(IOMMUNotifier *notifier,
1870 IOMMUTLBEntry *entry)
06866575 1871{
cdb30812
PX
1872 IOMMUNotifierFlag request_flags;
1873
bd2bfa4c
PX
1874 /*
1875 * Skip the notification if the notification does not overlap
1876 * with registered range.
1877 */
b021d1c0 1878 if (notifier->start > entry->iova + entry->addr_mask ||
bd2bfa4c
PX
1879 notifier->end < entry->iova) {
1880 return;
1881 }
cdb30812 1882
bd2bfa4c 1883 if (entry->perm & IOMMU_RW) {
cdb30812
PX
1884 request_flags = IOMMU_NOTIFIER_MAP;
1885 } else {
1886 request_flags = IOMMU_NOTIFIER_UNMAP;
1887 }
1888
bd2bfa4c
PX
1889 if (notifier->notifier_flags & request_flags) {
1890 notifier->notify(notifier, entry);
1891 }
1892}
1893
3df9d748 1894void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
bd2bfa4c
PX
1895 IOMMUTLBEntry entry)
1896{
1897 IOMMUNotifier *iommu_notifier;
1898
3df9d748 1899 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
bd2bfa4c 1900
3df9d748 1901 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
bd2bfa4c 1902 memory_region_notify_one(iommu_notifier, &entry);
cdb30812 1903 }
06866575
DG
1904}
1905
f1334de6
AK
1906int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1907 enum IOMMUMemoryRegionAttr attr,
1908 void *data)
1909{
1910 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1911
1912 if (!imrc->get_attr) {
1913 return -EINVAL;
1914 }
1915
1916 return imrc->get_attr(iommu_mr, attr, data);
1917}
1918
093bc2cd
AK
1919void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1920{
5a583347 1921 uint8_t mask = 1 << client;
deb809ed 1922 uint8_t old_logging;
5a583347 1923
dbddac6d 1924 assert(client == DIRTY_MEMORY_VGA);
deb809ed
PB
1925 old_logging = mr->vga_logging_count;
1926 mr->vga_logging_count += log ? 1 : -1;
1927 if (!!old_logging == !!mr->vga_logging_count) {
1928 return;
1929 }
1930
59023ef4 1931 memory_region_transaction_begin();
5a583347 1932 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
22bde714 1933 memory_region_update_pending |= mr->enabled;
59023ef4 1934 memory_region_transaction_commit();
093bc2cd
AK
1935}
1936
a8170e5e
AK
1937bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1938 hwaddr size, unsigned client)
093bc2cd 1939{
8e41fb63
FZ
1940 assert(mr->ram_block);
1941 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1942 size, client);
093bc2cd
AK
1943}
1944
a8170e5e
AK
1945void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1946 hwaddr size)
093bc2cd 1947{
8e41fb63
FZ
1948 assert(mr->ram_block);
1949 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1950 size,
58d2707e 1951 memory_region_get_dirty_log_mask(mr));
093bc2cd
AK
1952}
1953
0fe1eca7 1954static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
093bc2cd 1955{
0a752eee 1956 MemoryListener *listener;
0d673e36 1957 AddressSpace *as;
0a752eee 1958 FlatView *view;
5a583347
AK
1959 FlatRange *fr;
1960
0a752eee
PB
1961 /* If the same address space has multiple log_sync listeners, we
1962 * visit that address space's FlatView multiple times. But because
1963 * log_sync listeners are rare, it's still cheaper than walking each
1964 * address space once.
1965 */
1966 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1967 if (!listener->log_sync) {
1968 continue;
1969 }
1970 as = listener->address_space;
1971 view = address_space_get_flatview(as);
99e86347 1972 FOR_EACH_FLAT_RANGE(fr, view) {
3ebb1817 1973 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
16620684 1974 MemoryRegionSection mrs = section_from_flat_range(fr, view);
0a752eee 1975 listener->log_sync(listener, &mrs);
0d673e36 1976 }
5a583347 1977 }
856d7245 1978 flatview_unref(view);
5a583347 1979 }
093bc2cd
AK
1980}
1981
0fe1eca7
PB
1982DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1983 hwaddr addr,
1984 hwaddr size,
1985 unsigned client)
1986{
1987 assert(mr->ram_block);
1988 memory_region_sync_dirty_bitmap(mr);
1989 return cpu_physical_memory_snapshot_and_clear_dirty(
1990 memory_region_get_ram_addr(mr) + addr, size, client);
1991}
1992
1993bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1994 hwaddr addr, hwaddr size)
1995{
1996 assert(mr->ram_block);
1997 return cpu_physical_memory_snapshot_get_dirty(snap,
1998 memory_region_get_ram_addr(mr) + addr, size);
1999}
2000
093bc2cd
AK
2001void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2002{
fb1cd6f9 2003 if (mr->readonly != readonly) {
59023ef4 2004 memory_region_transaction_begin();
fb1cd6f9 2005 mr->readonly = readonly;
22bde714 2006 memory_region_update_pending |= mr->enabled;
59023ef4 2007 memory_region_transaction_commit();
fb1cd6f9 2008 }
093bc2cd
AK
2009}
2010
5f9a5ea1 2011void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
d0a9b5bc 2012{
5f9a5ea1 2013 if (mr->romd_mode != romd_mode) {
59023ef4 2014 memory_region_transaction_begin();
5f9a5ea1 2015 mr->romd_mode = romd_mode;
22bde714 2016 memory_region_update_pending |= mr->enabled;
59023ef4 2017 memory_region_transaction_commit();
d0a9b5bc
AK
2018 }
2019}
2020
a8170e5e
AK
2021void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2022 hwaddr size, unsigned client)
093bc2cd 2023{
8e41fb63
FZ
2024 assert(mr->ram_block);
2025 cpu_physical_memory_test_and_clear_dirty(
2026 memory_region_get_ram_addr(mr) + addr, size, client);
093bc2cd
AK
2027}
2028
a35ba7be
PB
2029int memory_region_get_fd(MemoryRegion *mr)
2030{
4ff87573
PB
2031 int fd;
2032
2033 rcu_read_lock();
2034 while (mr->alias) {
2035 mr = mr->alias;
a35ba7be 2036 }
4ff87573
PB
2037 fd = mr->ram_block->fd;
2038 rcu_read_unlock();
a35ba7be 2039
4ff87573
PB
2040 return fd;
2041}
a35ba7be 2042
093bc2cd
AK
2043void *memory_region_get_ram_ptr(MemoryRegion *mr)
2044{
49b24afc
PB
2045 void *ptr;
2046 uint64_t offset = 0;
093bc2cd 2047
49b24afc
PB
2048 rcu_read_lock();
2049 while (mr->alias) {
2050 offset += mr->alias_offset;
2051 mr = mr->alias;
2052 }
8e41fb63 2053 assert(mr->ram_block);
0878d0e1 2054 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
49b24afc 2055 rcu_read_unlock();
093bc2cd 2056
0878d0e1 2057 return ptr;
093bc2cd
AK
2058}
2059
07bdaa41
PB
2060MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2061{
2062 RAMBlock *block;
2063
2064 block = qemu_ram_block_from_host(ptr, false, offset);
2065 if (!block) {
2066 return NULL;
2067 }
2068
2069 return block->mr;
2070}
2071
7ebb2745
FZ
2072ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2073{
2074 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2075}
2076
37d7c084
PB
2077void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2078{
8e41fb63 2079 assert(mr->ram_block);
37d7c084 2080
fa53a0e5 2081 qemu_ram_resize(mr->ram_block, newsize, errp);
37d7c084
PB
2082}
2083
0d673e36 2084static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
093bc2cd 2085{
99e86347 2086 FlatView *view;
093bc2cd
AK
2087 FlatRange *fr;
2088 CoalescedMemoryRange *cmr;
2089 AddrRange tmp;
95d2994a 2090 MemoryRegionSection section;
093bc2cd 2091
856d7245 2092 view = address_space_get_flatview(as);
99e86347 2093 FOR_EACH_FLAT_RANGE(fr, view) {
093bc2cd 2094 if (fr->mr == mr) {
95d2994a 2095 section = (MemoryRegionSection) {
16620684 2096 .fv = view,
95d2994a 2097 .offset_within_address_space = int128_get64(fr->addr.start),
052e87b0 2098 .size = fr->addr.size,
95d2994a
AK
2099 };
2100
9a54635d 2101 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
95d2994a
AK
2102 int128_get64(fr->addr.start),
2103 int128_get64(fr->addr.size));
093bc2cd
AK
2104 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2105 tmp = addrrange_shift(cmr->addr,
08dafab4
AK
2106 int128_sub(fr->addr.start,
2107 int128_make64(fr->offset_in_region)));
093bc2cd
AK
2108 if (!addrrange_intersects(tmp, fr->addr)) {
2109 continue;
2110 }
2111 tmp = addrrange_intersection(tmp, fr->addr);
9a54635d 2112 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
95d2994a
AK
2113 int128_get64(tmp.start),
2114 int128_get64(tmp.size));
093bc2cd
AK
2115 }
2116 }
2117 }
856d7245 2118 flatview_unref(view);
093bc2cd
AK
2119}
2120
0d673e36
AK
2121static void memory_region_update_coalesced_range(MemoryRegion *mr)
2122{
2123 AddressSpace *as;
2124
2125 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2126 memory_region_update_coalesced_range_as(mr, as);
2127 }
2128}
2129
093bc2cd
AK
2130void memory_region_set_coalescing(MemoryRegion *mr)
2131{
2132 memory_region_clear_coalescing(mr);
08dafab4 2133 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
093bc2cd
AK
2134}
2135
2136void memory_region_add_coalescing(MemoryRegion *mr,
a8170e5e 2137 hwaddr offset,
093bc2cd
AK
2138 uint64_t size)
2139{
7267c094 2140 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
093bc2cd 2141
08dafab4 2142 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
093bc2cd
AK
2143 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2144 memory_region_update_coalesced_range(mr);
d410515e 2145 memory_region_set_flush_coalesced(mr);
093bc2cd
AK
2146}
2147
2148void memory_region_clear_coalescing(MemoryRegion *mr)
2149{
2150 CoalescedMemoryRange *cmr;
ab5b3db5 2151 bool updated = false;
093bc2cd 2152
d410515e
JK
2153 qemu_flush_coalesced_mmio_buffer();
2154 mr->flush_coalesced_mmio = false;
2155
093bc2cd
AK
2156 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2157 cmr = QTAILQ_FIRST(&mr->coalesced);
2158 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
7267c094 2159 g_free(cmr);
ab5b3db5
FZ
2160 updated = true;
2161 }
2162
2163 if (updated) {
2164 memory_region_update_coalesced_range(mr);
093bc2cd 2165 }
093bc2cd
AK
2166}
2167
d410515e
JK
2168void memory_region_set_flush_coalesced(MemoryRegion *mr)
2169{
2170 mr->flush_coalesced_mmio = true;
2171}
2172
2173void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2174{
2175 qemu_flush_coalesced_mmio_buffer();
2176 if (QTAILQ_EMPTY(&mr->coalesced)) {
2177 mr->flush_coalesced_mmio = false;
2178 }
2179}
2180
196ea131
JK
2181void memory_region_clear_global_locking(MemoryRegion *mr)
2182{
2183 mr->global_locking = false;
2184}
2185
8c56c1a5
PF
2186static bool userspace_eventfd_warning;
2187
3e9d69e7 2188void memory_region_add_eventfd(MemoryRegion *mr,
a8170e5e 2189 hwaddr addr,
3e9d69e7
AK
2190 unsigned size,
2191 bool match_data,
2192 uint64_t data,
753d5e14 2193 EventNotifier *e)
3e9d69e7
AK
2194{
2195 MemoryRegionIoeventfd mrfd = {
08dafab4
AK
2196 .addr.start = int128_make64(addr),
2197 .addr.size = int128_make64(size),
3e9d69e7
AK
2198 .match_data = match_data,
2199 .data = data,
753d5e14 2200 .e = e,
3e9d69e7
AK
2201 };
2202 unsigned i;
2203
8c56c1a5
PF
2204 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2205 userspace_eventfd_warning))) {
2206 userspace_eventfd_warning = true;
2207 error_report("Using eventfd without MMIO binding in KVM. "
2208 "Suboptimal performance expected");
2209 }
2210
b8aecea2
JW
2211 if (size) {
2212 adjust_endianness(mr, &mrfd.data, size);
2213 }
59023ef4 2214 memory_region_transaction_begin();
3e9d69e7
AK
2215 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2216 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2217 break;
2218 }
2219 }
2220 ++mr->ioeventfd_nb;
7267c094 2221 mr->ioeventfds = g_realloc(mr->ioeventfds,
3e9d69e7
AK
2222 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2223 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2224 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2225 mr->ioeventfds[i] = mrfd;
4dc56152 2226 ioeventfd_update_pending |= mr->enabled;
59023ef4 2227 memory_region_transaction_commit();
3e9d69e7
AK
2228}
2229
2230void memory_region_del_eventfd(MemoryRegion *mr,
a8170e5e 2231 hwaddr addr,
3e9d69e7
AK
2232 unsigned size,
2233 bool match_data,
2234 uint64_t data,
753d5e14 2235 EventNotifier *e)
3e9d69e7
AK
2236{
2237 MemoryRegionIoeventfd mrfd = {
08dafab4
AK
2238 .addr.start = int128_make64(addr),
2239 .addr.size = int128_make64(size),
3e9d69e7
AK
2240 .match_data = match_data,
2241 .data = data,
753d5e14 2242 .e = e,
3e9d69e7
AK
2243 };
2244 unsigned i;
2245
b8aecea2
JW
2246 if (size) {
2247 adjust_endianness(mr, &mrfd.data, size);
2248 }
59023ef4 2249 memory_region_transaction_begin();
3e9d69e7
AK
2250 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2251 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2252 break;
2253 }
2254 }
2255 assert(i != mr->ioeventfd_nb);
2256 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2257 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2258 --mr->ioeventfd_nb;
7267c094 2259 mr->ioeventfds = g_realloc(mr->ioeventfds,
3e9d69e7 2260 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
4dc56152 2261 ioeventfd_update_pending |= mr->enabled;
59023ef4 2262 memory_region_transaction_commit();
3e9d69e7
AK
2263}
2264
feca4ac1 2265static void memory_region_update_container_subregions(MemoryRegion *subregion)
093bc2cd 2266{
feca4ac1 2267 MemoryRegion *mr = subregion->container;
093bc2cd
AK
2268 MemoryRegion *other;
2269
59023ef4
JK
2270 memory_region_transaction_begin();
2271
dfde4e6e 2272 memory_region_ref(subregion);
093bc2cd
AK
2273 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2274 if (subregion->priority >= other->priority) {
2275 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2276 goto done;
2277 }
2278 }
2279 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2280done:
22bde714 2281 memory_region_update_pending |= mr->enabled && subregion->enabled;
59023ef4 2282 memory_region_transaction_commit();
093bc2cd
AK
2283}
2284
0598701a
PC
2285static void memory_region_add_subregion_common(MemoryRegion *mr,
2286 hwaddr offset,
2287 MemoryRegion *subregion)
2288{
feca4ac1
PB
2289 assert(!subregion->container);
2290 subregion->container = mr;
0598701a 2291 subregion->addr = offset;
feca4ac1 2292 memory_region_update_container_subregions(subregion);
0598701a 2293}
093bc2cd
AK
2294
2295void memory_region_add_subregion(MemoryRegion *mr,
a8170e5e 2296 hwaddr offset,
093bc2cd
AK
2297 MemoryRegion *subregion)
2298{
093bc2cd
AK
2299 subregion->priority = 0;
2300 memory_region_add_subregion_common(mr, offset, subregion);
2301}
2302
2303void memory_region_add_subregion_overlap(MemoryRegion *mr,
a8170e5e 2304 hwaddr offset,
093bc2cd 2305 MemoryRegion *subregion,
a1ff8ae0 2306 int priority)
093bc2cd 2307{
093bc2cd
AK
2308 subregion->priority = priority;
2309 memory_region_add_subregion_common(mr, offset, subregion);
2310}
2311
2312void memory_region_del_subregion(MemoryRegion *mr,
2313 MemoryRegion *subregion)
2314{
59023ef4 2315 memory_region_transaction_begin();
feca4ac1
PB
2316 assert(subregion->container == mr);
2317 subregion->container = NULL;
093bc2cd 2318 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
dfde4e6e 2319 memory_region_unref(subregion);
22bde714 2320 memory_region_update_pending |= mr->enabled && subregion->enabled;
59023ef4 2321 memory_region_transaction_commit();
6bba19ba
AK
2322}
2323
2324void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2325{
2326 if (enabled == mr->enabled) {
2327 return;
2328 }
59023ef4 2329 memory_region_transaction_begin();
6bba19ba 2330 mr->enabled = enabled;
22bde714 2331 memory_region_update_pending = true;
59023ef4 2332 memory_region_transaction_commit();
093bc2cd 2333}
1c0ffa58 2334
e7af4c67
MT
2335void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2336{
2337 Int128 s = int128_make64(size);
2338
2339 if (size == UINT64_MAX) {
2340 s = int128_2_64();
2341 }
2342 if (int128_eq(s, mr->size)) {
2343 return;
2344 }
2345 memory_region_transaction_begin();
2346 mr->size = s;
2347 memory_region_update_pending = true;
2348 memory_region_transaction_commit();
2349}
2350
67891b8a 2351static void memory_region_readd_subregion(MemoryRegion *mr)
2282e1af 2352{
feca4ac1 2353 MemoryRegion *container = mr->container;
2282e1af 2354
feca4ac1 2355 if (container) {
67891b8a
PC
2356 memory_region_transaction_begin();
2357 memory_region_ref(mr);
feca4ac1
PB
2358 memory_region_del_subregion(container, mr);
2359 mr->container = container;
2360 memory_region_update_container_subregions(mr);
67891b8a
PC
2361 memory_region_unref(mr);
2362 memory_region_transaction_commit();
2282e1af 2363 }
67891b8a 2364}
2282e1af 2365
67891b8a
PC
2366void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2367{
2368 if (addr != mr->addr) {
2369 mr->addr = addr;
2370 memory_region_readd_subregion(mr);
2371 }
2282e1af
AK
2372}
2373
a8170e5e 2374void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
4703359e 2375{
4703359e 2376 assert(mr->alias);
4703359e 2377
59023ef4 2378 if (offset == mr->alias_offset) {
4703359e
AK
2379 return;
2380 }
2381
59023ef4
JK
2382 memory_region_transaction_begin();
2383 mr->alias_offset = offset;
22bde714 2384 memory_region_update_pending |= mr->enabled;
59023ef4 2385 memory_region_transaction_commit();
4703359e
AK
2386}
2387
a2b257d6
IM
2388uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2389{
2390 return mr->align;
2391}
2392
e2177955
AK
2393static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2394{
2395 const AddrRange *addr = addr_;
2396 const FlatRange *fr = fr_;
2397
2398 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2399 return -1;
2400 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2401 return 1;
2402 }
2403 return 0;
2404}
2405
99e86347 2406static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
e2177955 2407{
99e86347 2408 return bsearch(&addr, view->ranges, view->nr,
e2177955
AK
2409 sizeof(FlatRange), cmp_flatrange_addr);
2410}
2411
eed2bacf
IM
2412bool memory_region_is_mapped(MemoryRegion *mr)
2413{
2414 return mr->container ? true : false;
2415}
2416
c6742b14
PB
2417/* Same as memory_region_find, but it does not add a reference to the
2418 * returned region. It must be called from an RCU critical section.
2419 */
2420static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2421 hwaddr addr, uint64_t size)
e2177955 2422{
052e87b0 2423 MemoryRegionSection ret = { .mr = NULL };
73034e9e
PB
2424 MemoryRegion *root;
2425 AddressSpace *as;
2426 AddrRange range;
99e86347 2427 FlatView *view;
73034e9e
PB
2428 FlatRange *fr;
2429
2430 addr += mr->addr;
feca4ac1
PB
2431 for (root = mr; root->container; ) {
2432 root = root->container;
73034e9e
PB
2433 addr += root->addr;
2434 }
e2177955 2435
73034e9e 2436 as = memory_region_to_address_space(root);
eed2bacf
IM
2437 if (!as) {
2438 return ret;
2439 }
73034e9e 2440 range = addrrange_make(int128_make64(addr), int128_make64(size));
99e86347 2441
16620684 2442 view = address_space_to_flatview(as);
99e86347 2443 fr = flatview_lookup(view, range);
e2177955 2444 if (!fr) {
c6742b14 2445 return ret;
e2177955
AK
2446 }
2447
99e86347 2448 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
e2177955
AK
2449 --fr;
2450 }
2451
2452 ret.mr = fr->mr;
16620684 2453 ret.fv = view;
e2177955
AK
2454 range = addrrange_intersection(range, fr->addr);
2455 ret.offset_within_region = fr->offset_in_region;
2456 ret.offset_within_region += int128_get64(int128_sub(range.start,
2457 fr->addr.start));
052e87b0 2458 ret.size = range.size;
e2177955 2459 ret.offset_within_address_space = int128_get64(range.start);
7a8499e8 2460 ret.readonly = fr->readonly;
c6742b14
PB
2461 return ret;
2462}
2463
2464MemoryRegionSection memory_region_find(MemoryRegion *mr,
2465 hwaddr addr, uint64_t size)
2466{
2467 MemoryRegionSection ret;
2468 rcu_read_lock();
2469 ret = memory_region_find_rcu(mr, addr, size);
2470 if (ret.mr) {
2471 memory_region_ref(ret.mr);
2472 }
2b647668 2473 rcu_read_unlock();
e2177955
AK
2474 return ret;
2475}
2476
c6742b14
PB
2477bool memory_region_present(MemoryRegion *container, hwaddr addr)
2478{
2479 MemoryRegion *mr;
2480
2481 rcu_read_lock();
2482 mr = memory_region_find_rcu(container, addr, 1).mr;
2483 rcu_read_unlock();
2484 return mr && mr != container;
2485}
2486
9c1f8f44 2487void memory_global_dirty_log_sync(void)
86e775c6 2488{
3ebb1817 2489 memory_region_sync_dirty_bitmap(NULL);
7664e80c
AK
2490}
2491
19310760
JZ
2492static VMChangeStateEntry *vmstate_change;
2493
7664e80c
AK
2494void memory_global_dirty_log_start(void)
2495{
19310760
JZ
2496 if (vmstate_change) {
2497 qemu_del_vm_change_state_handler(vmstate_change);
2498 vmstate_change = NULL;
2499 }
2500
7664e80c 2501 global_dirty_log = true;
6f6a5ef3 2502
7376e582 2503 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
6f6a5ef3
PB
2504
2505 /* Refresh DIRTY_LOG_MIGRATION bit. */
2506 memory_region_transaction_begin();
2507 memory_region_update_pending = true;
2508 memory_region_transaction_commit();
7664e80c
AK
2509}
2510
19310760 2511static void memory_global_dirty_log_do_stop(void)
7664e80c 2512{
7664e80c 2513 global_dirty_log = false;
6f6a5ef3
PB
2514
2515 /* Refresh DIRTY_LOG_MIGRATION bit. */
2516 memory_region_transaction_begin();
2517 memory_region_update_pending = true;
2518 memory_region_transaction_commit();
2519
7376e582 2520 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
7664e80c
AK
2521}
2522
19310760
JZ
2523static void memory_vm_change_state_handler(void *opaque, int running,
2524 RunState state)
2525{
2526 if (running) {
2527 memory_global_dirty_log_do_stop();
2528
2529 if (vmstate_change) {
2530 qemu_del_vm_change_state_handler(vmstate_change);
2531 vmstate_change = NULL;
2532 }
2533 }
2534}
2535
2536void memory_global_dirty_log_stop(void)
2537{
2538 if (!runstate_is_running()) {
2539 if (vmstate_change) {
2540 return;
2541 }
2542 vmstate_change = qemu_add_vm_change_state_handler(
2543 memory_vm_change_state_handler, NULL);
2544 return;
2545 }
2546
2547 memory_global_dirty_log_do_stop();
2548}
2549
7664e80c
AK
2550static void listener_add_address_space(MemoryListener *listener,
2551 AddressSpace *as)
2552{
99e86347 2553 FlatView *view;
7664e80c
AK
2554 FlatRange *fr;
2555
680a4783
PB
2556 if (listener->begin) {
2557 listener->begin(listener);
2558 }
7664e80c 2559 if (global_dirty_log) {
975aefe0
AK
2560 if (listener->log_global_start) {
2561 listener->log_global_start(listener);
2562 }
7664e80c 2563 }
975aefe0 2564
856d7245 2565 view = address_space_get_flatview(as);
99e86347 2566 FOR_EACH_FLAT_RANGE(fr, view) {
279836f8
DH
2567 MemoryRegionSection section = section_from_flat_range(fr, view);
2568
975aefe0
AK
2569 if (listener->region_add) {
2570 listener->region_add(listener, &section);
2571 }
ae990e6c
DH
2572 if (fr->dirty_log_mask && listener->log_start) {
2573 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2574 }
7664e80c 2575 }
680a4783
PB
2576 if (listener->commit) {
2577 listener->commit(listener);
2578 }
856d7245 2579 flatview_unref(view);
7664e80c
AK
2580}
2581
d25836ca
PX
2582static void listener_del_address_space(MemoryListener *listener,
2583 AddressSpace *as)
2584{
2585 FlatView *view;
2586 FlatRange *fr;
2587
2588 if (listener->begin) {
2589 listener->begin(listener);
2590 }
2591 view = address_space_get_flatview(as);
2592 FOR_EACH_FLAT_RANGE(fr, view) {
2593 MemoryRegionSection section = section_from_flat_range(fr, view);
2594
2595 if (fr->dirty_log_mask && listener->log_stop) {
2596 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2597 }
2598 if (listener->region_del) {
2599 listener->region_del(listener, &section);
2600 }
2601 }
2602 if (listener->commit) {
2603 listener->commit(listener);
2604 }
2605 flatview_unref(view);
2606}
2607
d45fa784 2608void memory_listener_register(MemoryListener *listener, AddressSpace *as)
7664e80c 2609{
72e22d2f
AK
2610 MemoryListener *other = NULL;
2611
d45fa784 2612 listener->address_space = as;
72e22d2f
AK
2613 if (QTAILQ_EMPTY(&memory_listeners)
2614 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2615 memory_listeners)->priority) {
2616 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2617 } else {
2618 QTAILQ_FOREACH(other, &memory_listeners, link) {
2619 if (listener->priority < other->priority) {
2620 break;
2621 }
2622 }
2623 QTAILQ_INSERT_BEFORE(other, listener, link);
2624 }
0d673e36 2625
9a54635d
PB
2626 if (QTAILQ_EMPTY(&as->listeners)
2627 || listener->priority >= QTAILQ_LAST(&as->listeners,
2628 memory_listeners)->priority) {
2629 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2630 } else {
2631 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2632 if (listener->priority < other->priority) {
2633 break;
2634 }
2635 }
2636 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2637 }
2638
d45fa784 2639 listener_add_address_space(listener, as);
7664e80c
AK
2640}
2641
2642void memory_listener_unregister(MemoryListener *listener)
2643{
1d8280c1
PB
2644 if (!listener->address_space) {
2645 return;
2646 }
2647
d25836ca 2648 listener_del_address_space(listener, listener->address_space);
72e22d2f 2649 QTAILQ_REMOVE(&memory_listeners, listener, link);
9a54635d 2650 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
1d8280c1 2651 listener->address_space = NULL;
86e775c6 2652}
e2177955 2653
c9356746
FK
2654bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2655{
2656 void *host;
2657 unsigned size = 0;
2658 unsigned offset = 0;
2659 Object *new_interface;
2660
2661 if (!mr || !mr->ops->request_ptr) {
2662 return false;
2663 }
2664
2665 /*
2666 * Avoid an update if the request_ptr call
2667 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2668 * a cache.
2669 */
2670 memory_region_transaction_begin();
2671
2672 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2673
2674 if (!host || !size) {
2675 memory_region_transaction_commit();
2676 return false;
2677 }
2678
2679 new_interface = object_new("mmio_interface");
2680 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2681 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2682 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2683 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2684 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2685 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2686
2687 memory_region_transaction_commit();
2688 return true;
2689}
2690
2691typedef struct MMIOPtrInvalidate {
2692 MemoryRegion *mr;
2693 hwaddr offset;
2694 unsigned size;
2695 int busy;
2696 int allocated;
2697} MMIOPtrInvalidate;
2698
2699#define MAX_MMIO_INVALIDATE 10
2700static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2701
2702static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2703 run_on_cpu_data data)
2704{
2705 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2706 MemoryRegion *mr = invalidate_data->mr;
2707 hwaddr offset = invalidate_data->offset;
2708 unsigned size = invalidate_data->size;
2709 MemoryRegionSection section = memory_region_find(mr, offset, size);
2710
2711 qemu_mutex_lock_iothread();
2712
2713 /* Reset dirty so this doesn't happen later. */
2714 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2715
2716 if (section.mr != mr) {
2717 /* memory_region_find add a ref on section.mr */
2718 memory_region_unref(section.mr);
2719 if (MMIO_INTERFACE(section.mr->owner)) {
2720 /* We found the interface just drop it. */
2721 object_property_set_bool(section.mr->owner, false, "realized",
2722 NULL);
2723 object_unref(section.mr->owner);
2724 object_unparent(section.mr->owner);
2725 }
2726 }
2727
2728 qemu_mutex_unlock_iothread();
2729
2730 if (invalidate_data->allocated) {
2731 g_free(invalidate_data);
2732 } else {
2733 invalidate_data->busy = 0;
2734 }
2735}
2736
2737void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2738 unsigned size)
2739{
2740 size_t i;
2741 MMIOPtrInvalidate *invalidate_data = NULL;
2742
2743 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2744 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2745 invalidate_data = &mmio_ptr_invalidate_list[i];
2746 break;
2747 }
2748 }
2749
2750 if (!invalidate_data) {
2751 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2752 invalidate_data->allocated = 1;
2753 }
2754
2755 invalidate_data->mr = mr;
2756 invalidate_data->offset = offset;
2757 invalidate_data->size = size;
2758
2759 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2760 RUN_ON_CPU_HOST_PTR(invalidate_data));
2761}
2762
7dca8043 2763void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
1c0ffa58 2764{
ac95190e 2765 memory_region_ref(root);
8786db7c 2766 as->root = root;
67ace39b 2767 as->current_map = NULL;
4c19eb72
AK
2768 as->ioeventfd_nb = 0;
2769 as->ioeventfds = NULL;
9a54635d 2770 QTAILQ_INIT(&as->listeners);
0d673e36 2771 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
7dca8043 2772 as->name = g_strdup(name ? name : "anonymous");
202fc01b
AK
2773 address_space_update_topology(as);
2774 address_space_update_ioeventfds(as);
1c0ffa58 2775}
658b2224 2776
374f2981 2777static void do_address_space_destroy(AddressSpace *as)
83f3c251 2778{
9a54635d 2779 assert(QTAILQ_EMPTY(&as->listeners));
078c44f4 2780
856d7245 2781 flatview_unref(as->current_map);
7dca8043 2782 g_free(as->name);
4c19eb72 2783 g_free(as->ioeventfds);
ac95190e 2784 memory_region_unref(as->root);
83f3c251
AK
2785}
2786
374f2981
PB
2787void address_space_destroy(AddressSpace *as)
2788{
ac95190e
PB
2789 MemoryRegion *root = as->root;
2790
374f2981
PB
2791 /* Flush out anything from MemoryListeners listening in on this */
2792 memory_region_transaction_begin();
2793 as->root = NULL;
2794 memory_region_transaction_commit();
2795 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2796
2797 /* At this point, as->dispatch and as->current_map are dummy
2798 * entries that the guest should never use. Wait for the old
2799 * values to expire before freeing the data.
2800 */
ac95190e 2801 as->root = root;
374f2981
PB
2802 call_rcu(as, do_address_space_destroy, rcu);
2803}
2804
4e831901
PX
2805static const char *memory_region_type(MemoryRegion *mr)
2806{
2807 if (memory_region_is_ram_device(mr)) {
2808 return "ramd";
2809 } else if (memory_region_is_romd(mr)) {
2810 return "romd";
2811 } else if (memory_region_is_rom(mr)) {
2812 return "rom";
2813 } else if (memory_region_is_ram(mr)) {
2814 return "ram";
2815 } else {
2816 return "i/o";
2817 }
2818}
2819
314e2987
BS
2820typedef struct MemoryRegionList MemoryRegionList;
2821
2822struct MemoryRegionList {
2823 const MemoryRegion *mr;
a16878d2 2824 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
314e2987
BS
2825};
2826
a16878d2 2827typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
314e2987 2828
4e831901
PX
2829#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2830 int128_sub((size), int128_one())) : 0)
2831#define MTREE_INDENT " "
2832
314e2987
BS
2833static void mtree_print_mr(fprintf_function mon_printf, void *f,
2834 const MemoryRegion *mr, unsigned int level,
a8170e5e 2835 hwaddr base,
9479c57a 2836 MemoryRegionListHead *alias_print_queue)
314e2987 2837{
9479c57a
JK
2838 MemoryRegionList *new_ml, *ml, *next_ml;
2839 MemoryRegionListHead submr_print_queue;
314e2987
BS
2840 const MemoryRegion *submr;
2841 unsigned int i;
b31f8412 2842 hwaddr cur_start, cur_end;
314e2987 2843
f8a9f720 2844 if (!mr) {
314e2987
BS
2845 return;
2846 }
2847
2848 for (i = 0; i < level; i++) {
4e831901 2849 mon_printf(f, MTREE_INDENT);
314e2987
BS
2850 }
2851
b31f8412
PX
2852 cur_start = base + mr->addr;
2853 cur_end = cur_start + MR_SIZE(mr->size);
2854
2855 /*
2856 * Try to detect overflow of memory region. This should never
2857 * happen normally. When it happens, we dump something to warn the
2858 * user who is observing this.
2859 */
2860 if (cur_start < base || cur_end < cur_start) {
2861 mon_printf(f, "[DETECTED OVERFLOW!] ");
2862 }
2863
314e2987
BS
2864 if (mr->alias) {
2865 MemoryRegionList *ml;
2866 bool found = false;
2867
2868 /* check if the alias is already in the queue */
a16878d2 2869 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
f54bb15f 2870 if (ml->mr == mr->alias) {
314e2987
BS
2871 found = true;
2872 }
2873 }
2874
2875 if (!found) {
2876 ml = g_new(MemoryRegionList, 1);
2877 ml->mr = mr->alias;
a16878d2 2878 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
314e2987 2879 }
4896d74b 2880 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
4e831901 2881 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
f8a9f720 2882 "-" TARGET_FMT_plx "%s\n",
b31f8412 2883 cur_start, cur_end,
4b474ba7 2884 mr->priority,
4e831901 2885 memory_region_type((MemoryRegion *)mr),
3fb18b4d
PC
2886 memory_region_name(mr),
2887 memory_region_name(mr->alias),
314e2987 2888 mr->alias_offset,
4e831901 2889 mr->alias_offset + MR_SIZE(mr->size),
f8a9f720 2890 mr->enabled ? "" : " [disabled]");
314e2987 2891 } else {
4896d74b 2892 mon_printf(f,
4e831901 2893 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
b31f8412 2894 cur_start, cur_end,
4b474ba7 2895 mr->priority,
4e831901 2896 memory_region_type((MemoryRegion *)mr),
f8a9f720
GH
2897 memory_region_name(mr),
2898 mr->enabled ? "" : " [disabled]");
314e2987 2899 }
9479c57a
JK
2900
2901 QTAILQ_INIT(&submr_print_queue);
2902
314e2987 2903 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
9479c57a
JK
2904 new_ml = g_new(MemoryRegionList, 1);
2905 new_ml->mr = submr;
a16878d2 2906 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
9479c57a
JK
2907 if (new_ml->mr->addr < ml->mr->addr ||
2908 (new_ml->mr->addr == ml->mr->addr &&
2909 new_ml->mr->priority > ml->mr->priority)) {
a16878d2 2910 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
9479c57a
JK
2911 new_ml = NULL;
2912 break;
2913 }
2914 }
2915 if (new_ml) {
a16878d2 2916 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
9479c57a
JK
2917 }
2918 }
2919
a16878d2 2920 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
b31f8412 2921 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
9479c57a
JK
2922 alias_print_queue);
2923 }
2924
a16878d2 2925 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
9479c57a 2926 g_free(ml);
314e2987
BS
2927 }
2928}
2929
5e8fd947
AK
2930struct FlatViewInfo {
2931 fprintf_function mon_printf;
2932 void *f;
2933 int counter;
2934 bool dispatch_tree;
2935};
2936
2937static void mtree_print_flatview(gpointer key, gpointer value,
2938 gpointer user_data)
57bb40c9 2939{
5e8fd947
AK
2940 FlatView *view = key;
2941 GArray *fv_address_spaces = value;
2942 struct FlatViewInfo *fvi = user_data;
2943 fprintf_function p = fvi->mon_printf;
2944 void *f = fvi->f;
57bb40c9
PX
2945 FlatRange *range = &view->ranges[0];
2946 MemoryRegion *mr;
2947 int n = view->nr;
5e8fd947
AK
2948 int i;
2949 AddressSpace *as;
2950
2951 p(f, "FlatView #%d\n", fvi->counter);
2952 ++fvi->counter;
2953
2954 for (i = 0; i < fv_address_spaces->len; ++i) {
2955 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2956 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2957 if (as->root->alias) {
2958 p(f, ", alias %s", memory_region_name(as->root->alias));
2959 }
2960 p(f, "\n");
2961 }
2962
2963 p(f, " Root memory region: %s\n",
2964 view->root ? memory_region_name(view->root) : "(none)");
57bb40c9
PX
2965
2966 if (n <= 0) {
5e8fd947 2967 p(f, MTREE_INDENT "No rendered FlatView\n\n");
57bb40c9
PX
2968 return;
2969 }
2970
2971 while (n--) {
2972 mr = range->mr;
377a07aa
PB
2973 if (range->offset_in_region) {
2974 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2975 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2976 int128_get64(range->addr.start),
2977 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2978 mr->priority,
2979 range->readonly ? "rom" : memory_region_type(mr),
2980 memory_region_name(mr),
2981 range->offset_in_region);
2982 } else {
2983 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2984 TARGET_FMT_plx " (prio %d, %s): %s\n",
2985 int128_get64(range->addr.start),
2986 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2987 mr->priority,
2988 range->readonly ? "rom" : memory_region_type(mr),
2989 memory_region_name(mr));
2990 }
57bb40c9
PX
2991 range++;
2992 }
2993
5e8fd947
AK
2994#if !defined(CONFIG_USER_ONLY)
2995 if (fvi->dispatch_tree && view->root) {
2996 mtree_print_dispatch(p, f, view->dispatch, view->root);
2997 }
2998#endif
2999
3000 p(f, "\n");
3001}
3002
3003static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3004 gpointer user_data)
3005{
3006 FlatView *view = key;
3007 GArray *fv_address_spaces = value;
3008
3009 g_array_unref(fv_address_spaces);
57bb40c9 3010 flatview_unref(view);
5e8fd947
AK
3011
3012 return true;
57bb40c9
PX
3013}
3014
5e8fd947
AK
3015void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
3016 bool dispatch_tree)
314e2987
BS
3017{
3018 MemoryRegionListHead ml_head;
3019 MemoryRegionList *ml, *ml2;
0d673e36 3020 AddressSpace *as;
314e2987 3021
57bb40c9 3022 if (flatview) {
5e8fd947
AK
3023 FlatView *view;
3024 struct FlatViewInfo fvi = {
3025 .mon_printf = mon_printf,
3026 .f = f,
3027 .counter = 0,
3028 .dispatch_tree = dispatch_tree
3029 };
3030 GArray *fv_address_spaces;
3031 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3032
3033 /* Gather all FVs in one table */
57bb40c9 3034 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
5e8fd947
AK
3035 view = address_space_get_flatview(as);
3036
3037 fv_address_spaces = g_hash_table_lookup(views, view);
3038 if (!fv_address_spaces) {
3039 fv_address_spaces = g_array_new(false, false, sizeof(as));
3040 g_hash_table_insert(views, view, fv_address_spaces);
3041 }
3042
3043 g_array_append_val(fv_address_spaces, as);
57bb40c9 3044 }
5e8fd947
AK
3045
3046 /* Print */
3047 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3048
3049 /* Free */
3050 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3051 g_hash_table_unref(views);
3052
57bb40c9
PX
3053 return;
3054 }
3055
314e2987
BS
3056 QTAILQ_INIT(&ml_head);
3057
0d673e36 3058 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
e48816aa
GH
3059 mon_printf(f, "address-space: %s\n", as->name);
3060 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
3061 mon_printf(f, "\n");
b9f9be88
BS
3062 }
3063
314e2987 3064 /* print aliased regions */
a16878d2 3065 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
e48816aa
GH
3066 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3067 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
3068 mon_printf(f, "\n");
314e2987
BS
3069 }
3070
a16878d2 3071 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
88365e47 3072 g_free(ml);
314e2987 3073 }
314e2987 3074}
b4fefef9 3075
b08199c6
PM
3076void memory_region_init_ram(MemoryRegion *mr,
3077 struct Object *owner,
3078 const char *name,
3079 uint64_t size,
3080 Error **errp)
3081{
3082 DeviceState *owner_dev;
3083 Error *err = NULL;
3084
3085 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3086 if (err) {
3087 error_propagate(errp, err);
3088 return;
3089 }
3090 /* This will assert if owner is neither NULL nor a DeviceState.
3091 * We only want the owner here for the purposes of defining a
3092 * unique name for migration. TODO: Ideally we should implement
3093 * a naming scheme for Objects which are not DeviceStates, in
3094 * which case we can relax this restriction.
3095 */
3096 owner_dev = DEVICE(owner);
3097 vmstate_register_ram(mr, owner_dev);
3098}
3099
3100void memory_region_init_rom(MemoryRegion *mr,
3101 struct Object *owner,
3102 const char *name,
3103 uint64_t size,
3104 Error **errp)
3105{
3106 DeviceState *owner_dev;
3107 Error *err = NULL;
3108
3109 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3110 if (err) {
3111 error_propagate(errp, err);
3112 return;
3113 }
3114 /* This will assert if owner is neither NULL nor a DeviceState.
3115 * We only want the owner here for the purposes of defining a
3116 * unique name for migration. TODO: Ideally we should implement
3117 * a naming scheme for Objects which are not DeviceStates, in
3118 * which case we can relax this restriction.
3119 */
3120 owner_dev = DEVICE(owner);
3121 vmstate_register_ram(mr, owner_dev);
3122}
3123
3124void memory_region_init_rom_device(MemoryRegion *mr,
3125 struct Object *owner,
3126 const MemoryRegionOps *ops,
3127 void *opaque,
3128 const char *name,
3129 uint64_t size,
3130 Error **errp)
3131{
3132 DeviceState *owner_dev;
3133 Error *err = NULL;
3134
3135 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3136 name, size, &err);
3137 if (err) {
3138 error_propagate(errp, err);
3139 return;
3140 }
3141 /* This will assert if owner is neither NULL nor a DeviceState.
3142 * We only want the owner here for the purposes of defining a
3143 * unique name for migration. TODO: Ideally we should implement
3144 * a naming scheme for Objects which are not DeviceStates, in
3145 * which case we can relax this restriction.
3146 */
3147 owner_dev = DEVICE(owner);
3148 vmstate_register_ram(mr, owner_dev);
3149}
3150
b4fefef9
PC
3151static const TypeInfo memory_region_info = {
3152 .parent = TYPE_OBJECT,
3153 .name = TYPE_MEMORY_REGION,
3154 .instance_size = sizeof(MemoryRegion),
3155 .instance_init = memory_region_initfn,
3156 .instance_finalize = memory_region_finalize,
3157};
3158
3df9d748
AK
3159static const TypeInfo iommu_memory_region_info = {
3160 .parent = TYPE_MEMORY_REGION,
3161 .name = TYPE_IOMMU_MEMORY_REGION,
1221a474 3162 .class_size = sizeof(IOMMUMemoryRegionClass),
3df9d748
AK
3163 .instance_size = sizeof(IOMMUMemoryRegion),
3164 .instance_init = iommu_memory_region_initfn,
1221a474 3165 .abstract = true,
3df9d748
AK
3166};
3167
b4fefef9
PC
3168static void memory_register_types(void)
3169{
3170 type_register_static(&memory_region_info);
3df9d748 3171 type_register_static(&iommu_memory_region_info);
b4fefef9
PC
3172}
3173
3174type_init(memory_register_types)