]> git.proxmox.com Git - mirror_qemu.git/blame - memory.c
acpi: add ATSR for q35
[mirror_qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
093bc2cd
AK
14 */
15
d38ea87a 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
33c11879
PB
18#include "qemu-common.h"
19#include "cpu.h"
022c62cb
PB
20#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
409ddd01 23#include "qapi/visitor.h"
1de7afc9 24#include "qemu/bitops.h"
8c56c1a5 25#include "qemu/error-report.h"
2c9b15ca 26#include "qom/object.h"
55d5d048 27#include "trace.h"
093bc2cd 28
022c62cb 29#include "exec/memory-internal.h"
220c3ebd 30#include "exec/ram_addr.h"
8c56c1a5 31#include "sysemu/kvm.h"
e1c57ab8 32#include "sysemu/sysemu.h"
67d95c15 33
d197063f
PB
34//#define DEBUG_UNASSIGNED
35
22bde714
JK
36static unsigned memory_region_transaction_depth;
37static bool memory_region_update_pending;
4dc56152 38static bool ioeventfd_update_pending;
7664e80c
AK
39static bool global_dirty_log = false;
40
72e22d2f
AK
41static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
42 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
4ef4db86 43
0d673e36
AK
44static QTAILQ_HEAD(, AddressSpace) address_spaces
45 = QTAILQ_HEAD_INITIALIZER(address_spaces);
46
093bc2cd
AK
47typedef struct AddrRange AddrRange;
48
8417cebf 49/*
c9cdaa3a 50 * Note that signed integers are needed for negative offsetting in aliases
8417cebf
AK
51 * (large MemoryRegion::alias_offset).
52 */
093bc2cd 53struct AddrRange {
08dafab4
AK
54 Int128 start;
55 Int128 size;
093bc2cd
AK
56};
57
08dafab4 58static AddrRange addrrange_make(Int128 start, Int128 size)
093bc2cd
AK
59{
60 return (AddrRange) { start, size };
61}
62
63static bool addrrange_equal(AddrRange r1, AddrRange r2)
64{
08dafab4 65 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
093bc2cd
AK
66}
67
08dafab4 68static Int128 addrrange_end(AddrRange r)
093bc2cd 69{
08dafab4 70 return int128_add(r.start, r.size);
093bc2cd
AK
71}
72
08dafab4 73static AddrRange addrrange_shift(AddrRange range, Int128 delta)
093bc2cd 74{
08dafab4 75 int128_addto(&range.start, delta);
093bc2cd
AK
76 return range;
77}
78
08dafab4
AK
79static bool addrrange_contains(AddrRange range, Int128 addr)
80{
81 return int128_ge(addr, range.start)
82 && int128_lt(addr, addrrange_end(range));
83}
84
093bc2cd
AK
85static bool addrrange_intersects(AddrRange r1, AddrRange r2)
86{
08dafab4
AK
87 return addrrange_contains(r1, r2.start)
88 || addrrange_contains(r2, r1.start);
093bc2cd
AK
89}
90
91static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
92{
08dafab4
AK
93 Int128 start = int128_max(r1.start, r2.start);
94 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
95 return addrrange_make(start, int128_sub(end, start));
093bc2cd
AK
96}
97
0e0d36b4
AK
98enum ListenerDirection { Forward, Reverse };
99
7376e582 100#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
0e0d36b4
AK
101 do { \
102 MemoryListener *_listener; \
103 \
104 switch (_direction) { \
105 case Forward: \
106 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
975aefe0
AK
107 if (_listener->_callback) { \
108 _listener->_callback(_listener, ##_args); \
109 } \
0e0d36b4
AK
110 } \
111 break; \
112 case Reverse: \
113 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
114 memory_listeners, link) { \
975aefe0
AK
115 if (_listener->_callback) { \
116 _listener->_callback(_listener, ##_args); \
117 } \
0e0d36b4
AK
118 } \
119 break; \
120 default: \
121 abort(); \
122 } \
123 } while (0)
124
9a54635d 125#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
7376e582
AK
126 do { \
127 MemoryListener *_listener; \
9a54635d 128 struct memory_listeners_as *list = &(_as)->listeners; \
7376e582
AK
129 \
130 switch (_direction) { \
131 case Forward: \
9a54635d
PB
132 QTAILQ_FOREACH(_listener, list, link_as) { \
133 if (_listener->_callback) { \
7376e582
AK
134 _listener->_callback(_listener, _section, ##_args); \
135 } \
136 } \
137 break; \
138 case Reverse: \
9a54635d
PB
139 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
140 link_as) { \
141 if (_listener->_callback) { \
7376e582
AK
142 _listener->_callback(_listener, _section, ##_args); \
143 } \
144 } \
145 break; \
146 default: \
147 abort(); \
148 } \
149 } while (0)
150
dfde4e6e 151/* No need to ref/unref .mr, the FlatRange keeps it alive. */
b2dfd71c 152#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
9c1f8f44
PB
153 do { \
154 MemoryRegionSection mrs = section_from_flat_range(fr, as); \
9a54635d 155 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
9c1f8f44 156 } while(0)
0e0d36b4 157
093bc2cd
AK
158struct CoalescedMemoryRange {
159 AddrRange addr;
160 QTAILQ_ENTRY(CoalescedMemoryRange) link;
161};
162
3e9d69e7
AK
163struct MemoryRegionIoeventfd {
164 AddrRange addr;
165 bool match_data;
166 uint64_t data;
753d5e14 167 EventNotifier *e;
3e9d69e7
AK
168};
169
170static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
171 MemoryRegionIoeventfd b)
172{
08dafab4 173 if (int128_lt(a.addr.start, b.addr.start)) {
3e9d69e7 174 return true;
08dafab4 175 } else if (int128_gt(a.addr.start, b.addr.start)) {
3e9d69e7 176 return false;
08dafab4 177 } else if (int128_lt(a.addr.size, b.addr.size)) {
3e9d69e7 178 return true;
08dafab4 179 } else if (int128_gt(a.addr.size, b.addr.size)) {
3e9d69e7
AK
180 return false;
181 } else if (a.match_data < b.match_data) {
182 return true;
183 } else if (a.match_data > b.match_data) {
184 return false;
185 } else if (a.match_data) {
186 if (a.data < b.data) {
187 return true;
188 } else if (a.data > b.data) {
189 return false;
190 }
191 }
753d5e14 192 if (a.e < b.e) {
3e9d69e7 193 return true;
753d5e14 194 } else if (a.e > b.e) {
3e9d69e7
AK
195 return false;
196 }
197 return false;
198}
199
200static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
201 MemoryRegionIoeventfd b)
202{
203 return !memory_region_ioeventfd_before(a, b)
204 && !memory_region_ioeventfd_before(b, a);
205}
206
093bc2cd
AK
207typedef struct FlatRange FlatRange;
208typedef struct FlatView FlatView;
209
210/* Range of memory in the global map. Addresses are absolute. */
211struct FlatRange {
212 MemoryRegion *mr;
a8170e5e 213 hwaddr offset_in_region;
093bc2cd 214 AddrRange addr;
5a583347 215 uint8_t dirty_log_mask;
b138e654 216 bool romd_mode;
fb1cd6f9 217 bool readonly;
093bc2cd
AK
218};
219
220/* Flattened global view of current active memory hierarchy. Kept in sorted
221 * order.
222 */
223struct FlatView {
374f2981 224 struct rcu_head rcu;
856d7245 225 unsigned ref;
093bc2cd
AK
226 FlatRange *ranges;
227 unsigned nr;
228 unsigned nr_allocated;
229};
230
cc31e6e7
AK
231typedef struct AddressSpaceOps AddressSpaceOps;
232
093bc2cd
AK
233#define FOR_EACH_FLAT_RANGE(var, view) \
234 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
235
9c1f8f44
PB
236static inline MemoryRegionSection
237section_from_flat_range(FlatRange *fr, AddressSpace *as)
238{
239 return (MemoryRegionSection) {
240 .mr = fr->mr,
241 .address_space = as,
242 .offset_within_region = fr->offset_in_region,
243 .size = fr->addr.size,
244 .offset_within_address_space = int128_get64(fr->addr.start),
245 .readonly = fr->readonly,
246 };
247}
248
093bc2cd
AK
249static bool flatrange_equal(FlatRange *a, FlatRange *b)
250{
251 return a->mr == b->mr
252 && addrrange_equal(a->addr, b->addr)
d0a9b5bc 253 && a->offset_in_region == b->offset_in_region
b138e654 254 && a->romd_mode == b->romd_mode
fb1cd6f9 255 && a->readonly == b->readonly;
093bc2cd
AK
256}
257
258static void flatview_init(FlatView *view)
259{
856d7245 260 view->ref = 1;
093bc2cd
AK
261 view->ranges = NULL;
262 view->nr = 0;
263 view->nr_allocated = 0;
264}
265
266/* Insert a range into a given position. Caller is responsible for maintaining
267 * sorting order.
268 */
269static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
270{
271 if (view->nr == view->nr_allocated) {
272 view->nr_allocated = MAX(2 * view->nr, 10);
7267c094 273 view->ranges = g_realloc(view->ranges,
093bc2cd
AK
274 view->nr_allocated * sizeof(*view->ranges));
275 }
276 memmove(view->ranges + pos + 1, view->ranges + pos,
277 (view->nr - pos) * sizeof(FlatRange));
278 view->ranges[pos] = *range;
dfde4e6e 279 memory_region_ref(range->mr);
093bc2cd
AK
280 ++view->nr;
281}
282
283static void flatview_destroy(FlatView *view)
284{
dfde4e6e
PB
285 int i;
286
287 for (i = 0; i < view->nr; i++) {
288 memory_region_unref(view->ranges[i].mr);
289 }
7267c094 290 g_free(view->ranges);
a9a0c06d 291 g_free(view);
093bc2cd
AK
292}
293
856d7245
PB
294static void flatview_ref(FlatView *view)
295{
296 atomic_inc(&view->ref);
297}
298
299static void flatview_unref(FlatView *view)
300{
301 if (atomic_fetch_dec(&view->ref) == 1) {
302 flatview_destroy(view);
303 }
304}
305
3d8e6bf9
AK
306static bool can_merge(FlatRange *r1, FlatRange *r2)
307{
08dafab4 308 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
3d8e6bf9 309 && r1->mr == r2->mr
08dafab4
AK
310 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
311 r1->addr.size),
312 int128_make64(r2->offset_in_region))
d0a9b5bc 313 && r1->dirty_log_mask == r2->dirty_log_mask
b138e654 314 && r1->romd_mode == r2->romd_mode
fb1cd6f9 315 && r1->readonly == r2->readonly;
3d8e6bf9
AK
316}
317
8508e024 318/* Attempt to simplify a view by merging adjacent ranges */
3d8e6bf9
AK
319static void flatview_simplify(FlatView *view)
320{
321 unsigned i, j;
322
323 i = 0;
324 while (i < view->nr) {
325 j = i + 1;
326 while (j < view->nr
327 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
08dafab4 328 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
3d8e6bf9
AK
329 ++j;
330 }
331 ++i;
332 memmove(&view->ranges[i], &view->ranges[j],
333 (view->nr - j) * sizeof(view->ranges[j]));
334 view->nr -= j - i;
335 }
336}
337
e7342aa3
PB
338static bool memory_region_big_endian(MemoryRegion *mr)
339{
340#ifdef TARGET_WORDS_BIGENDIAN
341 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
342#else
343 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
344#endif
345}
346
e11ef3d1
PB
347static bool memory_region_wrong_endianness(MemoryRegion *mr)
348{
349#ifdef TARGET_WORDS_BIGENDIAN
350 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
351#else
352 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
353#endif
354}
355
356static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
357{
358 if (memory_region_wrong_endianness(mr)) {
359 switch (size) {
360 case 1:
361 break;
362 case 2:
363 *data = bswap16(*data);
364 break;
365 case 4:
366 *data = bswap32(*data);
367 break;
368 case 8:
369 *data = bswap64(*data);
370 break;
371 default:
372 abort();
373 }
374 }
375}
376
4779dc1d
HB
377static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
378{
379 MemoryRegion *root;
380 hwaddr abs_addr = offset;
381
382 abs_addr += mr->addr;
383 for (root = mr; root->container; ) {
384 root = root->container;
385 abs_addr += root->addr;
386 }
387
388 return abs_addr;
389}
390
5a68be94
HB
391static int get_cpu_index(void)
392{
393 if (current_cpu) {
394 return current_cpu->cpu_index;
395 }
396 return -1;
397}
398
cc05c43a
PM
399static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
400 hwaddr addr,
401 uint64_t *value,
402 unsigned size,
403 unsigned shift,
404 uint64_t mask,
405 MemTxAttrs attrs)
406{
407 uint64_t tmp;
408
409 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
23d92d68 410 if (mr->subpage) {
5a68be94 411 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
412 } else if (mr == &io_mem_notdirty) {
413 /* Accesses to code which has previously been translated into a TB show
414 * up in the MMIO path, as accesses to the io_mem_notdirty
415 * MemoryRegion. */
416 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
417 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
418 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 419 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 420 }
cc05c43a
PM
421 *value |= (tmp & mask) << shift;
422 return MEMTX_OK;
423}
424
425static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
ce5d2f33
PB
426 hwaddr addr,
427 uint64_t *value,
428 unsigned size,
429 unsigned shift,
cc05c43a
PM
430 uint64_t mask,
431 MemTxAttrs attrs)
ce5d2f33 432{
ce5d2f33
PB
433 uint64_t tmp;
434
cc05c43a 435 tmp = mr->ops->read(mr->opaque, addr, size);
23d92d68 436 if (mr->subpage) {
5a68be94 437 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
438 } else if (mr == &io_mem_notdirty) {
439 /* Accesses to code which has previously been translated into a TB show
440 * up in the MMIO path, as accesses to the io_mem_notdirty
441 * MemoryRegion. */
442 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
443 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
444 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 445 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 446 }
ce5d2f33 447 *value |= (tmp & mask) << shift;
cc05c43a 448 return MEMTX_OK;
ce5d2f33
PB
449}
450
cc05c43a
PM
451static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
452 hwaddr addr,
453 uint64_t *value,
454 unsigned size,
455 unsigned shift,
456 uint64_t mask,
457 MemTxAttrs attrs)
164a4dcd 458{
cc05c43a
PM
459 uint64_t tmp = 0;
460 MemTxResult r;
164a4dcd 461
cc05c43a 462 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
23d92d68 463 if (mr->subpage) {
5a68be94 464 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
465 } else if (mr == &io_mem_notdirty) {
466 /* Accesses to code which has previously been translated into a TB show
467 * up in the MMIO path, as accesses to the io_mem_notdirty
468 * MemoryRegion. */
469 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
470 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
471 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 472 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 473 }
164a4dcd 474 *value |= (tmp & mask) << shift;
cc05c43a 475 return r;
164a4dcd
AK
476}
477
cc05c43a
PM
478static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
479 hwaddr addr,
480 uint64_t *value,
481 unsigned size,
482 unsigned shift,
483 uint64_t mask,
484 MemTxAttrs attrs)
ce5d2f33 485{
ce5d2f33
PB
486 uint64_t tmp;
487
488 tmp = (*value >> shift) & mask;
23d92d68 489 if (mr->subpage) {
5a68be94 490 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
491 } else if (mr == &io_mem_notdirty) {
492 /* Accesses to code which has previously been translated into a TB show
493 * up in the MMIO path, as accesses to the io_mem_notdirty
494 * MemoryRegion. */
495 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
496 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
497 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 498 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 499 }
ce5d2f33 500 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
cc05c43a 501 return MEMTX_OK;
ce5d2f33
PB
502}
503
cc05c43a
PM
504static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
505 hwaddr addr,
506 uint64_t *value,
507 unsigned size,
508 unsigned shift,
509 uint64_t mask,
510 MemTxAttrs attrs)
164a4dcd 511{
164a4dcd
AK
512 uint64_t tmp;
513
514 tmp = (*value >> shift) & mask;
23d92d68 515 if (mr->subpage) {
5a68be94 516 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
517 } else if (mr == &io_mem_notdirty) {
518 /* Accesses to code which has previously been translated into a TB show
519 * up in the MMIO path, as accesses to the io_mem_notdirty
520 * MemoryRegion. */
521 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
522 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
523 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 524 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 525 }
164a4dcd 526 mr->ops->write(mr->opaque, addr, tmp, size);
cc05c43a 527 return MEMTX_OK;
164a4dcd
AK
528}
529
cc05c43a
PM
530static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
531 hwaddr addr,
532 uint64_t *value,
533 unsigned size,
534 unsigned shift,
535 uint64_t mask,
536 MemTxAttrs attrs)
537{
538 uint64_t tmp;
539
cc05c43a 540 tmp = (*value >> shift) & mask;
23d92d68 541 if (mr->subpage) {
5a68be94 542 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
543 } else if (mr == &io_mem_notdirty) {
544 /* Accesses to code which has previously been translated into a TB show
545 * up in the MMIO path, as accesses to the io_mem_notdirty
546 * MemoryRegion. */
547 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
548 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
549 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 550 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 551 }
cc05c43a
PM
552 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
553}
554
555static MemTxResult access_with_adjusted_size(hwaddr addr,
164a4dcd
AK
556 uint64_t *value,
557 unsigned size,
558 unsigned access_size_min,
559 unsigned access_size_max,
cc05c43a
PM
560 MemTxResult (*access)(MemoryRegion *mr,
561 hwaddr addr,
562 uint64_t *value,
563 unsigned size,
564 unsigned shift,
565 uint64_t mask,
566 MemTxAttrs attrs),
567 MemoryRegion *mr,
568 MemTxAttrs attrs)
164a4dcd
AK
569{
570 uint64_t access_mask;
571 unsigned access_size;
572 unsigned i;
cc05c43a 573 MemTxResult r = MEMTX_OK;
164a4dcd
AK
574
575 if (!access_size_min) {
576 access_size_min = 1;
577 }
578 if (!access_size_max) {
579 access_size_max = 4;
580 }
ce5d2f33
PB
581
582 /* FIXME: support unaligned access? */
164a4dcd
AK
583 access_size = MAX(MIN(size, access_size_max), access_size_min);
584 access_mask = -1ULL >> (64 - access_size * 8);
e7342aa3
PB
585 if (memory_region_big_endian(mr)) {
586 for (i = 0; i < size; i += access_size) {
cc05c43a
PM
587 r |= access(mr, addr + i, value, access_size,
588 (size - access_size - i) * 8, access_mask, attrs);
e7342aa3
PB
589 }
590 } else {
591 for (i = 0; i < size; i += access_size) {
cc05c43a
PM
592 r |= access(mr, addr + i, value, access_size, i * 8,
593 access_mask, attrs);
e7342aa3 594 }
164a4dcd 595 }
cc05c43a 596 return r;
164a4dcd
AK
597}
598
e2177955
AK
599static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
600{
0d673e36
AK
601 AddressSpace *as;
602
feca4ac1
PB
603 while (mr->container) {
604 mr = mr->container;
e2177955 605 }
0d673e36
AK
606 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
607 if (mr == as->root) {
608 return as;
609 }
e2177955 610 }
eed2bacf 611 return NULL;
e2177955
AK
612}
613
093bc2cd
AK
614/* Render a memory region into the global view. Ranges in @view obscure
615 * ranges in @mr.
616 */
617static void render_memory_region(FlatView *view,
618 MemoryRegion *mr,
08dafab4 619 Int128 base,
fb1cd6f9
AK
620 AddrRange clip,
621 bool readonly)
093bc2cd
AK
622{
623 MemoryRegion *subregion;
624 unsigned i;
a8170e5e 625 hwaddr offset_in_region;
08dafab4
AK
626 Int128 remain;
627 Int128 now;
093bc2cd
AK
628 FlatRange fr;
629 AddrRange tmp;
630
6bba19ba
AK
631 if (!mr->enabled) {
632 return;
633 }
634
08dafab4 635 int128_addto(&base, int128_make64(mr->addr));
fb1cd6f9 636 readonly |= mr->readonly;
093bc2cd
AK
637
638 tmp = addrrange_make(base, mr->size);
639
640 if (!addrrange_intersects(tmp, clip)) {
641 return;
642 }
643
644 clip = addrrange_intersection(tmp, clip);
645
646 if (mr->alias) {
08dafab4
AK
647 int128_subfrom(&base, int128_make64(mr->alias->addr));
648 int128_subfrom(&base, int128_make64(mr->alias_offset));
fb1cd6f9 649 render_memory_region(view, mr->alias, base, clip, readonly);
093bc2cd
AK
650 return;
651 }
652
653 /* Render subregions in priority order. */
654 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
fb1cd6f9 655 render_memory_region(view, subregion, base, clip, readonly);
093bc2cd
AK
656 }
657
14a3c10a 658 if (!mr->terminates) {
093bc2cd
AK
659 return;
660 }
661
08dafab4 662 offset_in_region = int128_get64(int128_sub(clip.start, base));
093bc2cd
AK
663 base = clip.start;
664 remain = clip.size;
665
2eb74e1a 666 fr.mr = mr;
6f6a5ef3 667 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
b138e654 668 fr.romd_mode = mr->romd_mode;
2eb74e1a
PC
669 fr.readonly = readonly;
670
093bc2cd 671 /* Render the region itself into any gaps left by the current view. */
08dafab4
AK
672 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
673 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
093bc2cd
AK
674 continue;
675 }
08dafab4
AK
676 if (int128_lt(base, view->ranges[i].addr.start)) {
677 now = int128_min(remain,
678 int128_sub(view->ranges[i].addr.start, base));
093bc2cd
AK
679 fr.offset_in_region = offset_in_region;
680 fr.addr = addrrange_make(base, now);
681 flatview_insert(view, i, &fr);
682 ++i;
08dafab4
AK
683 int128_addto(&base, now);
684 offset_in_region += int128_get64(now);
685 int128_subfrom(&remain, now);
093bc2cd 686 }
d26a8cae
AK
687 now = int128_sub(int128_min(int128_add(base, remain),
688 addrrange_end(view->ranges[i].addr)),
689 base);
690 int128_addto(&base, now);
691 offset_in_region += int128_get64(now);
692 int128_subfrom(&remain, now);
093bc2cd 693 }
08dafab4 694 if (int128_nz(remain)) {
093bc2cd
AK
695 fr.offset_in_region = offset_in_region;
696 fr.addr = addrrange_make(base, remain);
697 flatview_insert(view, i, &fr);
698 }
699}
700
701/* Render a memory topology into a list of disjoint absolute ranges. */
a9a0c06d 702static FlatView *generate_memory_topology(MemoryRegion *mr)
093bc2cd 703{
a9a0c06d 704 FlatView *view;
093bc2cd 705
a9a0c06d
PB
706 view = g_new(FlatView, 1);
707 flatview_init(view);
093bc2cd 708
83f3c251 709 if (mr) {
a9a0c06d 710 render_memory_region(view, mr, int128_zero(),
83f3c251
AK
711 addrrange_make(int128_zero(), int128_2_64()), false);
712 }
a9a0c06d 713 flatview_simplify(view);
093bc2cd
AK
714
715 return view;
716}
717
3e9d69e7
AK
718static void address_space_add_del_ioeventfds(AddressSpace *as,
719 MemoryRegionIoeventfd *fds_new,
720 unsigned fds_new_nb,
721 MemoryRegionIoeventfd *fds_old,
722 unsigned fds_old_nb)
723{
724 unsigned iold, inew;
80a1ea37
AK
725 MemoryRegionIoeventfd *fd;
726 MemoryRegionSection section;
3e9d69e7
AK
727
728 /* Generate a symmetric difference of the old and new fd sets, adding
729 * and deleting as necessary.
730 */
731
732 iold = inew = 0;
733 while (iold < fds_old_nb || inew < fds_new_nb) {
734 if (iold < fds_old_nb
735 && (inew == fds_new_nb
736 || memory_region_ioeventfd_before(fds_old[iold],
737 fds_new[inew]))) {
80a1ea37
AK
738 fd = &fds_old[iold];
739 section = (MemoryRegionSection) {
f6790af6 740 .address_space = as,
80a1ea37 741 .offset_within_address_space = int128_get64(fd->addr.start),
052e87b0 742 .size = fd->addr.size,
80a1ea37 743 };
9a54635d 744 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
753d5e14 745 fd->match_data, fd->data, fd->e);
3e9d69e7
AK
746 ++iold;
747 } else if (inew < fds_new_nb
748 && (iold == fds_old_nb
749 || memory_region_ioeventfd_before(fds_new[inew],
750 fds_old[iold]))) {
80a1ea37
AK
751 fd = &fds_new[inew];
752 section = (MemoryRegionSection) {
f6790af6 753 .address_space = as,
80a1ea37 754 .offset_within_address_space = int128_get64(fd->addr.start),
052e87b0 755 .size = fd->addr.size,
80a1ea37 756 };
9a54635d 757 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
753d5e14 758 fd->match_data, fd->data, fd->e);
3e9d69e7
AK
759 ++inew;
760 } else {
761 ++iold;
762 ++inew;
763 }
764 }
765}
766
856d7245
PB
767static FlatView *address_space_get_flatview(AddressSpace *as)
768{
769 FlatView *view;
770
374f2981
PB
771 rcu_read_lock();
772 view = atomic_rcu_read(&as->current_map);
856d7245 773 flatview_ref(view);
374f2981 774 rcu_read_unlock();
856d7245
PB
775 return view;
776}
777
3e9d69e7
AK
778static void address_space_update_ioeventfds(AddressSpace *as)
779{
99e86347 780 FlatView *view;
3e9d69e7
AK
781 FlatRange *fr;
782 unsigned ioeventfd_nb = 0;
783 MemoryRegionIoeventfd *ioeventfds = NULL;
784 AddrRange tmp;
785 unsigned i;
786
856d7245 787 view = address_space_get_flatview(as);
99e86347 788 FOR_EACH_FLAT_RANGE(fr, view) {
3e9d69e7
AK
789 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
790 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
08dafab4
AK
791 int128_sub(fr->addr.start,
792 int128_make64(fr->offset_in_region)));
3e9d69e7
AK
793 if (addrrange_intersects(fr->addr, tmp)) {
794 ++ioeventfd_nb;
7267c094 795 ioeventfds = g_realloc(ioeventfds,
3e9d69e7
AK
796 ioeventfd_nb * sizeof(*ioeventfds));
797 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
798 ioeventfds[ioeventfd_nb-1].addr = tmp;
799 }
800 }
801 }
802
803 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
804 as->ioeventfds, as->ioeventfd_nb);
805
7267c094 806 g_free(as->ioeventfds);
3e9d69e7
AK
807 as->ioeventfds = ioeventfds;
808 as->ioeventfd_nb = ioeventfd_nb;
856d7245 809 flatview_unref(view);
3e9d69e7
AK
810}
811
b8af1afb 812static void address_space_update_topology_pass(AddressSpace *as,
a9a0c06d
PB
813 const FlatView *old_view,
814 const FlatView *new_view,
b8af1afb 815 bool adding)
093bc2cd 816{
093bc2cd
AK
817 unsigned iold, inew;
818 FlatRange *frold, *frnew;
093bc2cd
AK
819
820 /* Generate a symmetric difference of the old and new memory maps.
821 * Kill ranges in the old map, and instantiate ranges in the new map.
822 */
823 iold = inew = 0;
a9a0c06d
PB
824 while (iold < old_view->nr || inew < new_view->nr) {
825 if (iold < old_view->nr) {
826 frold = &old_view->ranges[iold];
093bc2cd
AK
827 } else {
828 frold = NULL;
829 }
a9a0c06d
PB
830 if (inew < new_view->nr) {
831 frnew = &new_view->ranges[inew];
093bc2cd
AK
832 } else {
833 frnew = NULL;
834 }
835
836 if (frold
837 && (!frnew
08dafab4
AK
838 || int128_lt(frold->addr.start, frnew->addr.start)
839 || (int128_eq(frold->addr.start, frnew->addr.start)
093bc2cd 840 && !flatrange_equal(frold, frnew)))) {
41a6e477 841 /* In old but not in new, or in both but attributes changed. */
093bc2cd 842
b8af1afb 843 if (!adding) {
72e22d2f 844 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
b8af1afb
AK
845 }
846
093bc2cd
AK
847 ++iold;
848 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
41a6e477 849 /* In both and unchanged (except logging may have changed) */
093bc2cd 850
b8af1afb 851 if (adding) {
50c1e149 852 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
b2dfd71c
PB
853 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
854 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
855 frold->dirty_log_mask,
856 frnew->dirty_log_mask);
857 }
858 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
859 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
860 frold->dirty_log_mask,
861 frnew->dirty_log_mask);
b8af1afb 862 }
5a583347
AK
863 }
864
093bc2cd
AK
865 ++iold;
866 ++inew;
093bc2cd
AK
867 } else {
868 /* In new */
869
b8af1afb 870 if (adding) {
72e22d2f 871 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
b8af1afb
AK
872 }
873
093bc2cd
AK
874 ++inew;
875 }
876 }
b8af1afb
AK
877}
878
879
880static void address_space_update_topology(AddressSpace *as)
881{
856d7245 882 FlatView *old_view = address_space_get_flatview(as);
a9a0c06d 883 FlatView *new_view = generate_memory_topology(as->root);
b8af1afb
AK
884
885 address_space_update_topology_pass(as, old_view, new_view, false);
886 address_space_update_topology_pass(as, old_view, new_view, true);
887
374f2981
PB
888 /* Writes are protected by the BQL. */
889 atomic_rcu_set(&as->current_map, new_view);
890 call_rcu(old_view, flatview_unref, rcu);
856d7245
PB
891
892 /* Note that all the old MemoryRegions are still alive up to this
893 * point. This relieves most MemoryListeners from the need to
894 * ref/unref the MemoryRegions they get---unless they use them
895 * outside the iothread mutex, in which case precise reference
896 * counting is necessary.
897 */
898 flatview_unref(old_view);
899
3e9d69e7 900 address_space_update_ioeventfds(as);
093bc2cd
AK
901}
902
4ef4db86
AK
903void memory_region_transaction_begin(void)
904{
bb880ded 905 qemu_flush_coalesced_mmio_buffer();
4ef4db86
AK
906 ++memory_region_transaction_depth;
907}
908
4dc56152
GA
909static void memory_region_clear_pending(void)
910{
911 memory_region_update_pending = false;
912 ioeventfd_update_pending = false;
913}
914
4ef4db86
AK
915void memory_region_transaction_commit(void)
916{
0d673e36
AK
917 AddressSpace *as;
918
4ef4db86
AK
919 assert(memory_region_transaction_depth);
920 --memory_region_transaction_depth;
4dc56152
GA
921 if (!memory_region_transaction_depth) {
922 if (memory_region_update_pending) {
923 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
02e2b95f 924
4dc56152
GA
925 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
926 address_space_update_topology(as);
927 }
02e2b95f 928
4dc56152
GA
929 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
930 } else if (ioeventfd_update_pending) {
931 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
932 address_space_update_ioeventfds(as);
933 }
934 }
935 memory_region_clear_pending();
936 }
4ef4db86
AK
937}
938
545e92e0
AK
939static void memory_region_destructor_none(MemoryRegion *mr)
940{
941}
942
943static void memory_region_destructor_ram(MemoryRegion *mr)
944{
f1060c55 945 qemu_ram_free(mr->ram_block);
545e92e0
AK
946}
947
b4fefef9
PC
948static bool memory_region_need_escape(char c)
949{
950 return c == '/' || c == '[' || c == '\\' || c == ']';
951}
952
953static char *memory_region_escape_name(const char *name)
954{
955 const char *p;
956 char *escaped, *q;
957 uint8_t c;
958 size_t bytes = 0;
959
960 for (p = name; *p; p++) {
961 bytes += memory_region_need_escape(*p) ? 4 : 1;
962 }
963 if (bytes == p - name) {
964 return g_memdup(name, bytes + 1);
965 }
966
967 escaped = g_malloc(bytes + 1);
968 for (p = name, q = escaped; *p; p++) {
969 c = *p;
970 if (unlikely(memory_region_need_escape(c))) {
971 *q++ = '\\';
972 *q++ = 'x';
973 *q++ = "0123456789abcdef"[c >> 4];
974 c = "0123456789abcdef"[c & 15];
975 }
976 *q++ = c;
977 }
978 *q = 0;
979 return escaped;
980}
981
093bc2cd 982void memory_region_init(MemoryRegion *mr,
2c9b15ca 983 Object *owner,
093bc2cd
AK
984 const char *name,
985 uint64_t size)
986{
22a893e4 987 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
08dafab4
AK
988 mr->size = int128_make64(size);
989 if (size == UINT64_MAX) {
990 mr->size = int128_2_64();
991 }
302fa283 992 mr->name = g_strdup(name);
612263cf 993 mr->owner = owner;
58eaa217 994 mr->ram_block = NULL;
b4fefef9
PC
995
996 if (name) {
843ef73a
PC
997 char *escaped_name = memory_region_escape_name(name);
998 char *name_array = g_strdup_printf("%s[*]", escaped_name);
612263cf
PB
999
1000 if (!owner) {
1001 owner = container_get(qdev_get_machine(), "/unattached");
1002 }
1003
843ef73a 1004 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
b4fefef9 1005 object_unref(OBJECT(mr));
843ef73a
PC
1006 g_free(name_array);
1007 g_free(escaped_name);
b4fefef9
PC
1008 }
1009}
1010
d7bce999
EB
1011static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1012 void *opaque, Error **errp)
409ddd01
PC
1013{
1014 MemoryRegion *mr = MEMORY_REGION(obj);
1015 uint64_t value = mr->addr;
1016
51e72bc1 1017 visit_type_uint64(v, name, &value, errp);
409ddd01
PC
1018}
1019
d7bce999
EB
1020static void memory_region_get_container(Object *obj, Visitor *v,
1021 const char *name, void *opaque,
1022 Error **errp)
409ddd01
PC
1023{
1024 MemoryRegion *mr = MEMORY_REGION(obj);
1025 gchar *path = (gchar *)"";
1026
1027 if (mr->container) {
1028 path = object_get_canonical_path(OBJECT(mr->container));
1029 }
51e72bc1 1030 visit_type_str(v, name, &path, errp);
409ddd01
PC
1031 if (mr->container) {
1032 g_free(path);
1033 }
1034}
1035
1036static Object *memory_region_resolve_container(Object *obj, void *opaque,
1037 const char *part)
1038{
1039 MemoryRegion *mr = MEMORY_REGION(obj);
1040
1041 return OBJECT(mr->container);
1042}
1043
d7bce999
EB
1044static void memory_region_get_priority(Object *obj, Visitor *v,
1045 const char *name, void *opaque,
1046 Error **errp)
d33382da
PC
1047{
1048 MemoryRegion *mr = MEMORY_REGION(obj);
1049 int32_t value = mr->priority;
1050
51e72bc1 1051 visit_type_int32(v, name, &value, errp);
d33382da
PC
1052}
1053
d7bce999
EB
1054static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1055 void *opaque, Error **errp)
52aef7bb
PC
1056{
1057 MemoryRegion *mr = MEMORY_REGION(obj);
1058 uint64_t value = memory_region_size(mr);
1059
51e72bc1 1060 visit_type_uint64(v, name, &value, errp);
52aef7bb
PC
1061}
1062
b4fefef9
PC
1063static void memory_region_initfn(Object *obj)
1064{
1065 MemoryRegion *mr = MEMORY_REGION(obj);
409ddd01 1066 ObjectProperty *op;
b4fefef9
PC
1067
1068 mr->ops = &unassigned_mem_ops;
6bba19ba 1069 mr->enabled = true;
5f9a5ea1 1070 mr->romd_mode = true;
196ea131 1071 mr->global_locking = true;
545e92e0 1072 mr->destructor = memory_region_destructor_none;
093bc2cd 1073 QTAILQ_INIT(&mr->subregions);
093bc2cd 1074 QTAILQ_INIT(&mr->coalesced);
409ddd01
PC
1075
1076 op = object_property_add(OBJECT(mr), "container",
1077 "link<" TYPE_MEMORY_REGION ">",
1078 memory_region_get_container,
1079 NULL, /* memory_region_set_container */
1080 NULL, NULL, &error_abort);
1081 op->resolve = memory_region_resolve_container;
1082
1083 object_property_add(OBJECT(mr), "addr", "uint64",
1084 memory_region_get_addr,
1085 NULL, /* memory_region_set_addr */
1086 NULL, NULL, &error_abort);
d33382da
PC
1087 object_property_add(OBJECT(mr), "priority", "uint32",
1088 memory_region_get_priority,
1089 NULL, /* memory_region_set_priority */
1090 NULL, NULL, &error_abort);
52aef7bb
PC
1091 object_property_add(OBJECT(mr), "size", "uint64",
1092 memory_region_get_size,
1093 NULL, /* memory_region_set_size, */
1094 NULL, NULL, &error_abort);
093bc2cd
AK
1095}
1096
b018ddf6
PB
1097static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1098 unsigned size)
1099{
1100#ifdef DEBUG_UNASSIGNED
1101 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1102#endif
4917cf44
AF
1103 if (current_cpu != NULL) {
1104 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
c658b94f 1105 }
68a7439a 1106 return 0;
b018ddf6
PB
1107}
1108
1109static void unassigned_mem_write(void *opaque, hwaddr addr,
1110 uint64_t val, unsigned size)
1111{
1112#ifdef DEBUG_UNASSIGNED
1113 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1114#endif
4917cf44
AF
1115 if (current_cpu != NULL) {
1116 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
c658b94f 1117 }
b018ddf6
PB
1118}
1119
d197063f
PB
1120static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1121 unsigned size, bool is_write)
1122{
1123 return false;
1124}
1125
1126const MemoryRegionOps unassigned_mem_ops = {
1127 .valid.accepts = unassigned_mem_accepts,
1128 .endianness = DEVICE_NATIVE_ENDIAN,
1129};
1130
4a2e242b
AW
1131static uint64_t memory_region_ram_device_read(void *opaque,
1132 hwaddr addr, unsigned size)
1133{
1134 MemoryRegion *mr = opaque;
1135 uint64_t data = (uint64_t)~0;
1136
1137 switch (size) {
1138 case 1:
1139 data = *(uint8_t *)(mr->ram_block->host + addr);
1140 break;
1141 case 2:
1142 data = *(uint16_t *)(mr->ram_block->host + addr);
1143 break;
1144 case 4:
1145 data = *(uint32_t *)(mr->ram_block->host + addr);
1146 break;
1147 case 8:
1148 data = *(uint64_t *)(mr->ram_block->host + addr);
1149 break;
1150 }
1151
1152 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1153
1154 return data;
1155}
1156
1157static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1158 uint64_t data, unsigned size)
1159{
1160 MemoryRegion *mr = opaque;
1161
1162 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1163
1164 switch (size) {
1165 case 1:
1166 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1167 break;
1168 case 2:
1169 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1170 break;
1171 case 4:
1172 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1173 break;
1174 case 8:
1175 *(uint64_t *)(mr->ram_block->host + addr) = data;
1176 break;
1177 }
1178}
1179
1180static const MemoryRegionOps ram_device_mem_ops = {
1181 .read = memory_region_ram_device_read,
1182 .write = memory_region_ram_device_write,
1183 .endianness = DEVICE_NATIVE_ENDIAN,
1184 .valid = {
1185 .min_access_size = 1,
1186 .max_access_size = 8,
1187 .unaligned = true,
1188 },
1189 .impl = {
1190 .min_access_size = 1,
1191 .max_access_size = 8,
1192 .unaligned = true,
1193 },
1194};
1195
d2702032
PB
1196bool memory_region_access_valid(MemoryRegion *mr,
1197 hwaddr addr,
1198 unsigned size,
1199 bool is_write)
093bc2cd 1200{
a014ed07
PB
1201 int access_size_min, access_size_max;
1202 int access_size, i;
897fa7cf 1203
093bc2cd
AK
1204 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1205 return false;
1206 }
1207
a014ed07 1208 if (!mr->ops->valid.accepts) {
093bc2cd
AK
1209 return true;
1210 }
1211
a014ed07
PB
1212 access_size_min = mr->ops->valid.min_access_size;
1213 if (!mr->ops->valid.min_access_size) {
1214 access_size_min = 1;
1215 }
1216
1217 access_size_max = mr->ops->valid.max_access_size;
1218 if (!mr->ops->valid.max_access_size) {
1219 access_size_max = 4;
1220 }
1221
1222 access_size = MAX(MIN(size, access_size_max), access_size_min);
1223 for (i = 0; i < size; i += access_size) {
1224 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1225 is_write)) {
1226 return false;
1227 }
093bc2cd 1228 }
a014ed07 1229
093bc2cd
AK
1230 return true;
1231}
1232
cc05c43a
PM
1233static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1234 hwaddr addr,
1235 uint64_t *pval,
1236 unsigned size,
1237 MemTxAttrs attrs)
093bc2cd 1238{
cc05c43a 1239 *pval = 0;
093bc2cd 1240
ce5d2f33 1241 if (mr->ops->read) {
cc05c43a
PM
1242 return access_with_adjusted_size(addr, pval, size,
1243 mr->ops->impl.min_access_size,
1244 mr->ops->impl.max_access_size,
1245 memory_region_read_accessor,
1246 mr, attrs);
1247 } else if (mr->ops->read_with_attrs) {
1248 return access_with_adjusted_size(addr, pval, size,
1249 mr->ops->impl.min_access_size,
1250 mr->ops->impl.max_access_size,
1251 memory_region_read_with_attrs_accessor,
1252 mr, attrs);
ce5d2f33 1253 } else {
cc05c43a
PM
1254 return access_with_adjusted_size(addr, pval, size, 1, 4,
1255 memory_region_oldmmio_read_accessor,
1256 mr, attrs);
74901c3b 1257 }
093bc2cd
AK
1258}
1259
3b643495
PM
1260MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1261 hwaddr addr,
1262 uint64_t *pval,
1263 unsigned size,
1264 MemTxAttrs attrs)
a621f38d 1265{
cc05c43a
PM
1266 MemTxResult r;
1267
791af8c8
PB
1268 if (!memory_region_access_valid(mr, addr, size, false)) {
1269 *pval = unassigned_mem_read(mr, addr, size);
cc05c43a 1270 return MEMTX_DECODE_ERROR;
791af8c8 1271 }
a621f38d 1272
cc05c43a 1273 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
791af8c8 1274 adjust_endianness(mr, pval, size);
cc05c43a 1275 return r;
a621f38d 1276}
093bc2cd 1277
8c56c1a5
PF
1278/* Return true if an eventfd was signalled */
1279static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1280 hwaddr addr,
1281 uint64_t data,
1282 unsigned size,
1283 MemTxAttrs attrs)
1284{
1285 MemoryRegionIoeventfd ioeventfd = {
1286 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1287 .data = data,
1288 };
1289 unsigned i;
1290
1291 for (i = 0; i < mr->ioeventfd_nb; i++) {
1292 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1293 ioeventfd.e = mr->ioeventfds[i].e;
1294
1295 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1296 event_notifier_set(ioeventfd.e);
1297 return true;
1298 }
1299 }
1300
1301 return false;
1302}
1303
3b643495
PM
1304MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1305 hwaddr addr,
1306 uint64_t data,
1307 unsigned size,
1308 MemTxAttrs attrs)
a621f38d 1309{
897fa7cf 1310 if (!memory_region_access_valid(mr, addr, size, true)) {
b018ddf6 1311 unassigned_mem_write(mr, addr, data, size);
cc05c43a 1312 return MEMTX_DECODE_ERROR;
093bc2cd
AK
1313 }
1314
a621f38d
AK
1315 adjust_endianness(mr, &data, size);
1316
8c56c1a5
PF
1317 if ((!kvm_eventfds_enabled()) &&
1318 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1319 return MEMTX_OK;
1320 }
1321
ce5d2f33 1322 if (mr->ops->write) {
cc05c43a
PM
1323 return access_with_adjusted_size(addr, &data, size,
1324 mr->ops->impl.min_access_size,
1325 mr->ops->impl.max_access_size,
1326 memory_region_write_accessor, mr,
1327 attrs);
1328 } else if (mr->ops->write_with_attrs) {
1329 return
1330 access_with_adjusted_size(addr, &data, size,
1331 mr->ops->impl.min_access_size,
1332 mr->ops->impl.max_access_size,
1333 memory_region_write_with_attrs_accessor,
1334 mr, attrs);
ce5d2f33 1335 } else {
cc05c43a
PM
1336 return access_with_adjusted_size(addr, &data, size, 1, 4,
1337 memory_region_oldmmio_write_accessor,
1338 mr, attrs);
74901c3b 1339 }
093bc2cd
AK
1340}
1341
093bc2cd 1342void memory_region_init_io(MemoryRegion *mr,
2c9b15ca 1343 Object *owner,
093bc2cd
AK
1344 const MemoryRegionOps *ops,
1345 void *opaque,
1346 const char *name,
1347 uint64_t size)
1348{
2c9b15ca 1349 memory_region_init(mr, owner, name, size);
6d6d2abf 1350 mr->ops = ops ? ops : &unassigned_mem_ops;
093bc2cd 1351 mr->opaque = opaque;
14a3c10a 1352 mr->terminates = true;
093bc2cd
AK
1353}
1354
1355void memory_region_init_ram(MemoryRegion *mr,
2c9b15ca 1356 Object *owner,
093bc2cd 1357 const char *name,
49946538
HT
1358 uint64_t size,
1359 Error **errp)
093bc2cd 1360{
2c9b15ca 1361 memory_region_init(mr, owner, name, size);
8ea9252a 1362 mr->ram = true;
14a3c10a 1363 mr->terminates = true;
545e92e0 1364 mr->destructor = memory_region_destructor_ram;
8e41fb63 1365 mr->ram_block = qemu_ram_alloc(size, mr, errp);
677e7805 1366 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
0b183fc8
PB
1367}
1368
60786ef3
MT
1369void memory_region_init_resizeable_ram(MemoryRegion *mr,
1370 Object *owner,
1371 const char *name,
1372 uint64_t size,
1373 uint64_t max_size,
1374 void (*resized)(const char*,
1375 uint64_t length,
1376 void *host),
1377 Error **errp)
1378{
1379 memory_region_init(mr, owner, name, size);
1380 mr->ram = true;
1381 mr->terminates = true;
1382 mr->destructor = memory_region_destructor_ram;
8e41fb63
FZ
1383 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1384 mr, errp);
677e7805 1385 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
60786ef3
MT
1386}
1387
0b183fc8
PB
1388#ifdef __linux__
1389void memory_region_init_ram_from_file(MemoryRegion *mr,
1390 struct Object *owner,
1391 const char *name,
1392 uint64_t size,
dbcb8981 1393 bool share,
7f56e740
PB
1394 const char *path,
1395 Error **errp)
0b183fc8
PB
1396{
1397 memory_region_init(mr, owner, name, size);
1398 mr->ram = true;
1399 mr->terminates = true;
1400 mr->destructor = memory_region_destructor_ram;
8e41fb63 1401 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
677e7805 1402 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
093bc2cd 1403}
0b183fc8 1404#endif
093bc2cd
AK
1405
1406void memory_region_init_ram_ptr(MemoryRegion *mr,
2c9b15ca 1407 Object *owner,
093bc2cd
AK
1408 const char *name,
1409 uint64_t size,
1410 void *ptr)
1411{
2c9b15ca 1412 memory_region_init(mr, owner, name, size);
8ea9252a 1413 mr->ram = true;
14a3c10a 1414 mr->terminates = true;
fc3e7665 1415 mr->destructor = memory_region_destructor_ram;
677e7805 1416 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
ef701d7b
HT
1417
1418 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1419 assert(ptr != NULL);
8e41fb63 1420 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
093bc2cd
AK
1421}
1422
21e00fa5
AW
1423void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1424 Object *owner,
1425 const char *name,
1426 uint64_t size,
1427 void *ptr)
e4dc3f59 1428{
21e00fa5
AW
1429 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1430 mr->ram_device = true;
4a2e242b
AW
1431 mr->ops = &ram_device_mem_ops;
1432 mr->opaque = mr;
e4dc3f59
ND
1433}
1434
093bc2cd 1435void memory_region_init_alias(MemoryRegion *mr,
2c9b15ca 1436 Object *owner,
093bc2cd
AK
1437 const char *name,
1438 MemoryRegion *orig,
a8170e5e 1439 hwaddr offset,
093bc2cd
AK
1440 uint64_t size)
1441{
2c9b15ca 1442 memory_region_init(mr, owner, name, size);
093bc2cd
AK
1443 mr->alias = orig;
1444 mr->alias_offset = offset;
1445}
1446
a1777f7f
PM
1447void memory_region_init_rom(MemoryRegion *mr,
1448 struct Object *owner,
1449 const char *name,
1450 uint64_t size,
1451 Error **errp)
1452{
1453 memory_region_init(mr, owner, name, size);
1454 mr->ram = true;
1455 mr->readonly = true;
1456 mr->terminates = true;
1457 mr->destructor = memory_region_destructor_ram;
1458 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1459 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1460}
1461
d0a9b5bc 1462void memory_region_init_rom_device(MemoryRegion *mr,
2c9b15ca 1463 Object *owner,
d0a9b5bc 1464 const MemoryRegionOps *ops,
75f5941c 1465 void *opaque,
d0a9b5bc 1466 const char *name,
33e0eb52
HT
1467 uint64_t size,
1468 Error **errp)
d0a9b5bc 1469{
39e0b03d 1470 assert(ops);
2c9b15ca 1471 memory_region_init(mr, owner, name, size);
7bc2b9cd 1472 mr->ops = ops;
75f5941c 1473 mr->opaque = opaque;
d0a9b5bc 1474 mr->terminates = true;
75c578dc 1475 mr->rom_device = true;
58268c8d 1476 mr->destructor = memory_region_destructor_ram;
8e41fb63 1477 mr->ram_block = qemu_ram_alloc(size, mr, errp);
d0a9b5bc
AK
1478}
1479
30951157 1480void memory_region_init_iommu(MemoryRegion *mr,
2c9b15ca 1481 Object *owner,
30951157
AK
1482 const MemoryRegionIOMMUOps *ops,
1483 const char *name,
1484 uint64_t size)
1485{
2c9b15ca 1486 memory_region_init(mr, owner, name, size);
30951157
AK
1487 mr->iommu_ops = ops,
1488 mr->terminates = true; /* then re-forwards */
cdb30812 1489 QLIST_INIT(&mr->iommu_notify);
5bf3d319 1490 mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
30951157
AK
1491}
1492
b4fefef9 1493static void memory_region_finalize(Object *obj)
093bc2cd 1494{
b4fefef9
PC
1495 MemoryRegion *mr = MEMORY_REGION(obj);
1496
2e2b8eb7
PB
1497 assert(!mr->container);
1498
1499 /* We know the region is not visible in any address space (it
1500 * does not have a container and cannot be a root either because
1501 * it has no references, so we can blindly clear mr->enabled.
1502 * memory_region_set_enabled instead could trigger a transaction
1503 * and cause an infinite loop.
1504 */
1505 mr->enabled = false;
1506 memory_region_transaction_begin();
1507 while (!QTAILQ_EMPTY(&mr->subregions)) {
1508 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1509 memory_region_del_subregion(mr, subregion);
1510 }
1511 memory_region_transaction_commit();
1512
545e92e0 1513 mr->destructor(mr);
093bc2cd 1514 memory_region_clear_coalescing(mr);
302fa283 1515 g_free((char *)mr->name);
7267c094 1516 g_free(mr->ioeventfds);
093bc2cd
AK
1517}
1518
803c0816
PB
1519Object *memory_region_owner(MemoryRegion *mr)
1520{
22a893e4
PB
1521 Object *obj = OBJECT(mr);
1522 return obj->parent;
803c0816
PB
1523}
1524
46637be2
PB
1525void memory_region_ref(MemoryRegion *mr)
1526{
22a893e4
PB
1527 /* MMIO callbacks most likely will access data that belongs
1528 * to the owner, hence the need to ref/unref the owner whenever
1529 * the memory region is in use.
1530 *
1531 * The memory region is a child of its owner. As long as the
1532 * owner doesn't call unparent itself on the memory region,
1533 * ref-ing the owner will also keep the memory region alive.
612263cf
PB
1534 * Memory regions without an owner are supposed to never go away;
1535 * we do not ref/unref them because it slows down DMA sensibly.
22a893e4 1536 */
612263cf
PB
1537 if (mr && mr->owner) {
1538 object_ref(mr->owner);
46637be2
PB
1539 }
1540}
1541
1542void memory_region_unref(MemoryRegion *mr)
1543{
612263cf
PB
1544 if (mr && mr->owner) {
1545 object_unref(mr->owner);
46637be2
PB
1546 }
1547}
1548
093bc2cd
AK
1549uint64_t memory_region_size(MemoryRegion *mr)
1550{
08dafab4
AK
1551 if (int128_eq(mr->size, int128_2_64())) {
1552 return UINT64_MAX;
1553 }
1554 return int128_get64(mr->size);
093bc2cd
AK
1555}
1556
5d546d4b 1557const char *memory_region_name(const MemoryRegion *mr)
8991c79b 1558{
d1dd32af
PC
1559 if (!mr->name) {
1560 ((MemoryRegion *)mr)->name =
1561 object_get_canonical_path_component(OBJECT(mr));
1562 }
302fa283 1563 return mr->name;
8991c79b
AK
1564}
1565
21e00fa5 1566bool memory_region_is_ram_device(MemoryRegion *mr)
e4dc3f59 1567{
21e00fa5 1568 return mr->ram_device;
e4dc3f59
ND
1569}
1570
2d1a35be 1571uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
55043ba3 1572{
6f6a5ef3 1573 uint8_t mask = mr->dirty_log_mask;
adaad61c 1574 if (global_dirty_log && mr->ram_block) {
6f6a5ef3
PB
1575 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1576 }
1577 return mask;
55043ba3
AK
1578}
1579
2d1a35be
PB
1580bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1581{
1582 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1583}
1584
5bf3d319
PX
1585static void memory_region_update_iommu_notify_flags(MemoryRegion *mr)
1586{
1587 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1588 IOMMUNotifier *iommu_notifier;
1589
1590 QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
1591 flags |= iommu_notifier->notifier_flags;
1592 }
1593
1594 if (flags != mr->iommu_notify_flags &&
1595 mr->iommu_ops->notify_flag_changed) {
1596 mr->iommu_ops->notify_flag_changed(mr, mr->iommu_notify_flags,
1597 flags);
1598 }
1599
1600 mr->iommu_notify_flags = flags;
1601}
1602
cdb30812
PX
1603void memory_region_register_iommu_notifier(MemoryRegion *mr,
1604 IOMMUNotifier *n)
06866575 1605{
cdb30812
PX
1606 /* We need to register for at least one bitfield */
1607 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
cdb30812 1608 QLIST_INSERT_HEAD(&mr->iommu_notify, n, node);
5bf3d319 1609 memory_region_update_iommu_notify_flags(mr);
06866575
DG
1610}
1611
f682e9c2 1612uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
a788f227 1613{
f682e9c2
AK
1614 assert(memory_region_is_iommu(mr));
1615 if (mr->iommu_ops && mr->iommu_ops->get_min_page_size) {
1616 return mr->iommu_ops->get_min_page_size(mr);
1617 }
1618 return TARGET_PAGE_SIZE;
1619}
1620
cdb30812
PX
1621void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
1622 bool is_write)
f682e9c2
AK
1623{
1624 hwaddr addr, granularity;
a788f227
DG
1625 IOMMUTLBEntry iotlb;
1626
f682e9c2
AK
1627 granularity = memory_region_iommu_get_min_page_size(mr);
1628
a788f227
DG
1629 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1630 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
1631 if (iotlb.perm != IOMMU_NONE) {
1632 n->notify(n, &iotlb);
1633 }
1634
1635 /* if (2^64 - MR size) < granularity, it's possible to get an
1636 * infinite loop here. This should catch such a wraparound */
1637 if ((addr + granularity) < addr) {
1638 break;
1639 }
1640 }
1641}
1642
cdb30812
PX
1643void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1644 IOMMUNotifier *n)
06866575 1645{
cdb30812 1646 QLIST_REMOVE(n, node);
5bf3d319 1647 memory_region_update_iommu_notify_flags(mr);
06866575
DG
1648}
1649
1650void memory_region_notify_iommu(MemoryRegion *mr,
1651 IOMMUTLBEntry entry)
1652{
cdb30812
PX
1653 IOMMUNotifier *iommu_notifier;
1654 IOMMUNotifierFlag request_flags;
1655
06866575 1656 assert(memory_region_is_iommu(mr));
cdb30812
PX
1657
1658 if (entry.perm & IOMMU_RW) {
1659 request_flags = IOMMU_NOTIFIER_MAP;
1660 } else {
1661 request_flags = IOMMU_NOTIFIER_UNMAP;
1662 }
1663
1664 QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
1665 if (iommu_notifier->notifier_flags & request_flags) {
1666 iommu_notifier->notify(iommu_notifier, &entry);
1667 }
1668 }
06866575
DG
1669}
1670
093bc2cd
AK
1671void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1672{
5a583347 1673 uint8_t mask = 1 << client;
deb809ed 1674 uint8_t old_logging;
5a583347 1675
dbddac6d 1676 assert(client == DIRTY_MEMORY_VGA);
deb809ed
PB
1677 old_logging = mr->vga_logging_count;
1678 mr->vga_logging_count += log ? 1 : -1;
1679 if (!!old_logging == !!mr->vga_logging_count) {
1680 return;
1681 }
1682
59023ef4 1683 memory_region_transaction_begin();
5a583347 1684 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
22bde714 1685 memory_region_update_pending |= mr->enabled;
59023ef4 1686 memory_region_transaction_commit();
093bc2cd
AK
1687}
1688
a8170e5e
AK
1689bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1690 hwaddr size, unsigned client)
093bc2cd 1691{
8e41fb63
FZ
1692 assert(mr->ram_block);
1693 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1694 size, client);
093bc2cd
AK
1695}
1696
a8170e5e
AK
1697void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1698 hwaddr size)
093bc2cd 1699{
8e41fb63
FZ
1700 assert(mr->ram_block);
1701 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1702 size,
58d2707e 1703 memory_region_get_dirty_log_mask(mr));
093bc2cd
AK
1704}
1705
6c279db8
JQ
1706bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1707 hwaddr size, unsigned client)
1708{
8e41fb63
FZ
1709 assert(mr->ram_block);
1710 return cpu_physical_memory_test_and_clear_dirty(
1711 memory_region_get_ram_addr(mr) + addr, size, client);
6c279db8
JQ
1712}
1713
1714
093bc2cd
AK
1715void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1716{
0a752eee 1717 MemoryListener *listener;
0d673e36 1718 AddressSpace *as;
0a752eee 1719 FlatView *view;
5a583347
AK
1720 FlatRange *fr;
1721
0a752eee
PB
1722 /* If the same address space has multiple log_sync listeners, we
1723 * visit that address space's FlatView multiple times. But because
1724 * log_sync listeners are rare, it's still cheaper than walking each
1725 * address space once.
1726 */
1727 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1728 if (!listener->log_sync) {
1729 continue;
1730 }
1731 as = listener->address_space;
1732 view = address_space_get_flatview(as);
99e86347 1733 FOR_EACH_FLAT_RANGE(fr, view) {
0d673e36 1734 if (fr->mr == mr) {
0a752eee
PB
1735 MemoryRegionSection mrs = section_from_flat_range(fr, as);
1736 listener->log_sync(listener, &mrs);
0d673e36 1737 }
5a583347 1738 }
856d7245 1739 flatview_unref(view);
5a583347 1740 }
093bc2cd
AK
1741}
1742
1743void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1744{
fb1cd6f9 1745 if (mr->readonly != readonly) {
59023ef4 1746 memory_region_transaction_begin();
fb1cd6f9 1747 mr->readonly = readonly;
22bde714 1748 memory_region_update_pending |= mr->enabled;
59023ef4 1749 memory_region_transaction_commit();
fb1cd6f9 1750 }
093bc2cd
AK
1751}
1752
5f9a5ea1 1753void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
d0a9b5bc 1754{
5f9a5ea1 1755 if (mr->romd_mode != romd_mode) {
59023ef4 1756 memory_region_transaction_begin();
5f9a5ea1 1757 mr->romd_mode = romd_mode;
22bde714 1758 memory_region_update_pending |= mr->enabled;
59023ef4 1759 memory_region_transaction_commit();
d0a9b5bc
AK
1760 }
1761}
1762
a8170e5e
AK
1763void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1764 hwaddr size, unsigned client)
093bc2cd 1765{
8e41fb63
FZ
1766 assert(mr->ram_block);
1767 cpu_physical_memory_test_and_clear_dirty(
1768 memory_region_get_ram_addr(mr) + addr, size, client);
093bc2cd
AK
1769}
1770
a35ba7be
PB
1771int memory_region_get_fd(MemoryRegion *mr)
1772{
4ff87573
PB
1773 int fd;
1774
1775 rcu_read_lock();
1776 while (mr->alias) {
1777 mr = mr->alias;
a35ba7be 1778 }
4ff87573
PB
1779 fd = mr->ram_block->fd;
1780 rcu_read_unlock();
a35ba7be 1781
4ff87573
PB
1782 return fd;
1783}
a35ba7be 1784
4ff87573
PB
1785void memory_region_set_fd(MemoryRegion *mr, int fd)
1786{
1787 rcu_read_lock();
1788 while (mr->alias) {
1789 mr = mr->alias;
1790 }
1791 mr->ram_block->fd = fd;
1792 rcu_read_unlock();
a35ba7be
PB
1793}
1794
093bc2cd
AK
1795void *memory_region_get_ram_ptr(MemoryRegion *mr)
1796{
49b24afc
PB
1797 void *ptr;
1798 uint64_t offset = 0;
093bc2cd 1799
49b24afc
PB
1800 rcu_read_lock();
1801 while (mr->alias) {
1802 offset += mr->alias_offset;
1803 mr = mr->alias;
1804 }
8e41fb63 1805 assert(mr->ram_block);
0878d0e1 1806 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
49b24afc 1807 rcu_read_unlock();
093bc2cd 1808
0878d0e1 1809 return ptr;
093bc2cd
AK
1810}
1811
07bdaa41
PB
1812MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
1813{
1814 RAMBlock *block;
1815
1816 block = qemu_ram_block_from_host(ptr, false, offset);
1817 if (!block) {
1818 return NULL;
1819 }
1820
1821 return block->mr;
1822}
1823
7ebb2745
FZ
1824ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1825{
1826 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1827}
1828
37d7c084
PB
1829void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1830{
8e41fb63 1831 assert(mr->ram_block);
37d7c084 1832
fa53a0e5 1833 qemu_ram_resize(mr->ram_block, newsize, errp);
37d7c084
PB
1834}
1835
0d673e36 1836static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
093bc2cd 1837{
99e86347 1838 FlatView *view;
093bc2cd
AK
1839 FlatRange *fr;
1840 CoalescedMemoryRange *cmr;
1841 AddrRange tmp;
95d2994a 1842 MemoryRegionSection section;
093bc2cd 1843
856d7245 1844 view = address_space_get_flatview(as);
99e86347 1845 FOR_EACH_FLAT_RANGE(fr, view) {
093bc2cd 1846 if (fr->mr == mr) {
95d2994a 1847 section = (MemoryRegionSection) {
f6790af6 1848 .address_space = as,
95d2994a 1849 .offset_within_address_space = int128_get64(fr->addr.start),
052e87b0 1850 .size = fr->addr.size,
95d2994a
AK
1851 };
1852
9a54635d 1853 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
95d2994a
AK
1854 int128_get64(fr->addr.start),
1855 int128_get64(fr->addr.size));
093bc2cd
AK
1856 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1857 tmp = addrrange_shift(cmr->addr,
08dafab4
AK
1858 int128_sub(fr->addr.start,
1859 int128_make64(fr->offset_in_region)));
093bc2cd
AK
1860 if (!addrrange_intersects(tmp, fr->addr)) {
1861 continue;
1862 }
1863 tmp = addrrange_intersection(tmp, fr->addr);
9a54635d 1864 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
95d2994a
AK
1865 int128_get64(tmp.start),
1866 int128_get64(tmp.size));
093bc2cd
AK
1867 }
1868 }
1869 }
856d7245 1870 flatview_unref(view);
093bc2cd
AK
1871}
1872
0d673e36
AK
1873static void memory_region_update_coalesced_range(MemoryRegion *mr)
1874{
1875 AddressSpace *as;
1876
1877 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1878 memory_region_update_coalesced_range_as(mr, as);
1879 }
1880}
1881
093bc2cd
AK
1882void memory_region_set_coalescing(MemoryRegion *mr)
1883{
1884 memory_region_clear_coalescing(mr);
08dafab4 1885 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
093bc2cd
AK
1886}
1887
1888void memory_region_add_coalescing(MemoryRegion *mr,
a8170e5e 1889 hwaddr offset,
093bc2cd
AK
1890 uint64_t size)
1891{
7267c094 1892 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
093bc2cd 1893
08dafab4 1894 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
093bc2cd
AK
1895 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1896 memory_region_update_coalesced_range(mr);
d410515e 1897 memory_region_set_flush_coalesced(mr);
093bc2cd
AK
1898}
1899
1900void memory_region_clear_coalescing(MemoryRegion *mr)
1901{
1902 CoalescedMemoryRange *cmr;
ab5b3db5 1903 bool updated = false;
093bc2cd 1904
d410515e
JK
1905 qemu_flush_coalesced_mmio_buffer();
1906 mr->flush_coalesced_mmio = false;
1907
093bc2cd
AK
1908 while (!QTAILQ_EMPTY(&mr->coalesced)) {
1909 cmr = QTAILQ_FIRST(&mr->coalesced);
1910 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
7267c094 1911 g_free(cmr);
ab5b3db5
FZ
1912 updated = true;
1913 }
1914
1915 if (updated) {
1916 memory_region_update_coalesced_range(mr);
093bc2cd 1917 }
093bc2cd
AK
1918}
1919
d410515e
JK
1920void memory_region_set_flush_coalesced(MemoryRegion *mr)
1921{
1922 mr->flush_coalesced_mmio = true;
1923}
1924
1925void memory_region_clear_flush_coalesced(MemoryRegion *mr)
1926{
1927 qemu_flush_coalesced_mmio_buffer();
1928 if (QTAILQ_EMPTY(&mr->coalesced)) {
1929 mr->flush_coalesced_mmio = false;
1930 }
1931}
1932
196ea131
JK
1933void memory_region_set_global_locking(MemoryRegion *mr)
1934{
1935 mr->global_locking = true;
1936}
1937
1938void memory_region_clear_global_locking(MemoryRegion *mr)
1939{
1940 mr->global_locking = false;
1941}
1942
8c56c1a5
PF
1943static bool userspace_eventfd_warning;
1944
3e9d69e7 1945void memory_region_add_eventfd(MemoryRegion *mr,
a8170e5e 1946 hwaddr addr,
3e9d69e7
AK
1947 unsigned size,
1948 bool match_data,
1949 uint64_t data,
753d5e14 1950 EventNotifier *e)
3e9d69e7
AK
1951{
1952 MemoryRegionIoeventfd mrfd = {
08dafab4
AK
1953 .addr.start = int128_make64(addr),
1954 .addr.size = int128_make64(size),
3e9d69e7
AK
1955 .match_data = match_data,
1956 .data = data,
753d5e14 1957 .e = e,
3e9d69e7
AK
1958 };
1959 unsigned i;
1960
8c56c1a5
PF
1961 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
1962 userspace_eventfd_warning))) {
1963 userspace_eventfd_warning = true;
1964 error_report("Using eventfd without MMIO binding in KVM. "
1965 "Suboptimal performance expected");
1966 }
1967
b8aecea2
JW
1968 if (size) {
1969 adjust_endianness(mr, &mrfd.data, size);
1970 }
59023ef4 1971 memory_region_transaction_begin();
3e9d69e7
AK
1972 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1973 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
1974 break;
1975 }
1976 }
1977 ++mr->ioeventfd_nb;
7267c094 1978 mr->ioeventfds = g_realloc(mr->ioeventfds,
3e9d69e7
AK
1979 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
1980 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
1981 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
1982 mr->ioeventfds[i] = mrfd;
4dc56152 1983 ioeventfd_update_pending |= mr->enabled;
59023ef4 1984 memory_region_transaction_commit();
3e9d69e7
AK
1985}
1986
1987void memory_region_del_eventfd(MemoryRegion *mr,
a8170e5e 1988 hwaddr addr,
3e9d69e7
AK
1989 unsigned size,
1990 bool match_data,
1991 uint64_t data,
753d5e14 1992 EventNotifier *e)
3e9d69e7
AK
1993{
1994 MemoryRegionIoeventfd mrfd = {
08dafab4
AK
1995 .addr.start = int128_make64(addr),
1996 .addr.size = int128_make64(size),
3e9d69e7
AK
1997 .match_data = match_data,
1998 .data = data,
753d5e14 1999 .e = e,
3e9d69e7
AK
2000 };
2001 unsigned i;
2002
b8aecea2
JW
2003 if (size) {
2004 adjust_endianness(mr, &mrfd.data, size);
2005 }
59023ef4 2006 memory_region_transaction_begin();
3e9d69e7
AK
2007 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2008 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2009 break;
2010 }
2011 }
2012 assert(i != mr->ioeventfd_nb);
2013 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2014 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2015 --mr->ioeventfd_nb;
7267c094 2016 mr->ioeventfds = g_realloc(mr->ioeventfds,
3e9d69e7 2017 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
4dc56152 2018 ioeventfd_update_pending |= mr->enabled;
59023ef4 2019 memory_region_transaction_commit();
3e9d69e7
AK
2020}
2021
feca4ac1 2022static void memory_region_update_container_subregions(MemoryRegion *subregion)
093bc2cd 2023{
feca4ac1 2024 MemoryRegion *mr = subregion->container;
093bc2cd
AK
2025 MemoryRegion *other;
2026
59023ef4
JK
2027 memory_region_transaction_begin();
2028
dfde4e6e 2029 memory_region_ref(subregion);
093bc2cd
AK
2030 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2031 if (subregion->priority >= other->priority) {
2032 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2033 goto done;
2034 }
2035 }
2036 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2037done:
22bde714 2038 memory_region_update_pending |= mr->enabled && subregion->enabled;
59023ef4 2039 memory_region_transaction_commit();
093bc2cd
AK
2040}
2041
0598701a
PC
2042static void memory_region_add_subregion_common(MemoryRegion *mr,
2043 hwaddr offset,
2044 MemoryRegion *subregion)
2045{
feca4ac1
PB
2046 assert(!subregion->container);
2047 subregion->container = mr;
0598701a 2048 subregion->addr = offset;
feca4ac1 2049 memory_region_update_container_subregions(subregion);
0598701a 2050}
093bc2cd
AK
2051
2052void memory_region_add_subregion(MemoryRegion *mr,
a8170e5e 2053 hwaddr offset,
093bc2cd
AK
2054 MemoryRegion *subregion)
2055{
093bc2cd
AK
2056 subregion->priority = 0;
2057 memory_region_add_subregion_common(mr, offset, subregion);
2058}
2059
2060void memory_region_add_subregion_overlap(MemoryRegion *mr,
a8170e5e 2061 hwaddr offset,
093bc2cd 2062 MemoryRegion *subregion,
a1ff8ae0 2063 int priority)
093bc2cd 2064{
093bc2cd
AK
2065 subregion->priority = priority;
2066 memory_region_add_subregion_common(mr, offset, subregion);
2067}
2068
2069void memory_region_del_subregion(MemoryRegion *mr,
2070 MemoryRegion *subregion)
2071{
59023ef4 2072 memory_region_transaction_begin();
feca4ac1
PB
2073 assert(subregion->container == mr);
2074 subregion->container = NULL;
093bc2cd 2075 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
dfde4e6e 2076 memory_region_unref(subregion);
22bde714 2077 memory_region_update_pending |= mr->enabled && subregion->enabled;
59023ef4 2078 memory_region_transaction_commit();
6bba19ba
AK
2079}
2080
2081void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2082{
2083 if (enabled == mr->enabled) {
2084 return;
2085 }
59023ef4 2086 memory_region_transaction_begin();
6bba19ba 2087 mr->enabled = enabled;
22bde714 2088 memory_region_update_pending = true;
59023ef4 2089 memory_region_transaction_commit();
093bc2cd 2090}
1c0ffa58 2091
e7af4c67
MT
2092void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2093{
2094 Int128 s = int128_make64(size);
2095
2096 if (size == UINT64_MAX) {
2097 s = int128_2_64();
2098 }
2099 if (int128_eq(s, mr->size)) {
2100 return;
2101 }
2102 memory_region_transaction_begin();
2103 mr->size = s;
2104 memory_region_update_pending = true;
2105 memory_region_transaction_commit();
2106}
2107
67891b8a 2108static void memory_region_readd_subregion(MemoryRegion *mr)
2282e1af 2109{
feca4ac1 2110 MemoryRegion *container = mr->container;
2282e1af 2111
feca4ac1 2112 if (container) {
67891b8a
PC
2113 memory_region_transaction_begin();
2114 memory_region_ref(mr);
feca4ac1
PB
2115 memory_region_del_subregion(container, mr);
2116 mr->container = container;
2117 memory_region_update_container_subregions(mr);
67891b8a
PC
2118 memory_region_unref(mr);
2119 memory_region_transaction_commit();
2282e1af 2120 }
67891b8a 2121}
2282e1af 2122
67891b8a
PC
2123void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2124{
2125 if (addr != mr->addr) {
2126 mr->addr = addr;
2127 memory_region_readd_subregion(mr);
2128 }
2282e1af
AK
2129}
2130
a8170e5e 2131void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
4703359e 2132{
4703359e 2133 assert(mr->alias);
4703359e 2134
59023ef4 2135 if (offset == mr->alias_offset) {
4703359e
AK
2136 return;
2137 }
2138
59023ef4
JK
2139 memory_region_transaction_begin();
2140 mr->alias_offset = offset;
22bde714 2141 memory_region_update_pending |= mr->enabled;
59023ef4 2142 memory_region_transaction_commit();
4703359e
AK
2143}
2144
a2b257d6
IM
2145uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2146{
2147 return mr->align;
2148}
2149
e2177955
AK
2150static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2151{
2152 const AddrRange *addr = addr_;
2153 const FlatRange *fr = fr_;
2154
2155 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2156 return -1;
2157 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2158 return 1;
2159 }
2160 return 0;
2161}
2162
99e86347 2163static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
e2177955 2164{
99e86347 2165 return bsearch(&addr, view->ranges, view->nr,
e2177955
AK
2166 sizeof(FlatRange), cmp_flatrange_addr);
2167}
2168
eed2bacf
IM
2169bool memory_region_is_mapped(MemoryRegion *mr)
2170{
2171 return mr->container ? true : false;
2172}
2173
c6742b14
PB
2174/* Same as memory_region_find, but it does not add a reference to the
2175 * returned region. It must be called from an RCU critical section.
2176 */
2177static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2178 hwaddr addr, uint64_t size)
e2177955 2179{
052e87b0 2180 MemoryRegionSection ret = { .mr = NULL };
73034e9e
PB
2181 MemoryRegion *root;
2182 AddressSpace *as;
2183 AddrRange range;
99e86347 2184 FlatView *view;
73034e9e
PB
2185 FlatRange *fr;
2186
2187 addr += mr->addr;
feca4ac1
PB
2188 for (root = mr; root->container; ) {
2189 root = root->container;
73034e9e
PB
2190 addr += root->addr;
2191 }
e2177955 2192
73034e9e 2193 as = memory_region_to_address_space(root);
eed2bacf
IM
2194 if (!as) {
2195 return ret;
2196 }
73034e9e 2197 range = addrrange_make(int128_make64(addr), int128_make64(size));
99e86347 2198
2b647668 2199 view = atomic_rcu_read(&as->current_map);
99e86347 2200 fr = flatview_lookup(view, range);
e2177955 2201 if (!fr) {
c6742b14 2202 return ret;
e2177955
AK
2203 }
2204
99e86347 2205 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
e2177955
AK
2206 --fr;
2207 }
2208
2209 ret.mr = fr->mr;
73034e9e 2210 ret.address_space = as;
e2177955
AK
2211 range = addrrange_intersection(range, fr->addr);
2212 ret.offset_within_region = fr->offset_in_region;
2213 ret.offset_within_region += int128_get64(int128_sub(range.start,
2214 fr->addr.start));
052e87b0 2215 ret.size = range.size;
e2177955 2216 ret.offset_within_address_space = int128_get64(range.start);
7a8499e8 2217 ret.readonly = fr->readonly;
c6742b14
PB
2218 return ret;
2219}
2220
2221MemoryRegionSection memory_region_find(MemoryRegion *mr,
2222 hwaddr addr, uint64_t size)
2223{
2224 MemoryRegionSection ret;
2225 rcu_read_lock();
2226 ret = memory_region_find_rcu(mr, addr, size);
2227 if (ret.mr) {
2228 memory_region_ref(ret.mr);
2229 }
2b647668 2230 rcu_read_unlock();
e2177955
AK
2231 return ret;
2232}
2233
c6742b14
PB
2234bool memory_region_present(MemoryRegion *container, hwaddr addr)
2235{
2236 MemoryRegion *mr;
2237
2238 rcu_read_lock();
2239 mr = memory_region_find_rcu(container, addr, 1).mr;
2240 rcu_read_unlock();
2241 return mr && mr != container;
2242}
2243
9c1f8f44 2244void memory_global_dirty_log_sync(void)
86e775c6 2245{
9c1f8f44
PB
2246 MemoryListener *listener;
2247 AddressSpace *as;
99e86347 2248 FlatView *view;
7664e80c
AK
2249 FlatRange *fr;
2250
9c1f8f44
PB
2251 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2252 if (!listener->log_sync) {
2253 continue;
2254 }
d45fa784 2255 as = listener->address_space;
9c1f8f44
PB
2256 view = address_space_get_flatview(as);
2257 FOR_EACH_FLAT_RANGE(fr, view) {
adaad61c
PB
2258 if (fr->dirty_log_mask) {
2259 MemoryRegionSection mrs = section_from_flat_range(fr, as);
2260 listener->log_sync(listener, &mrs);
2261 }
9c1f8f44
PB
2262 }
2263 flatview_unref(view);
7664e80c
AK
2264 }
2265}
2266
2267void memory_global_dirty_log_start(void)
2268{
7664e80c 2269 global_dirty_log = true;
6f6a5ef3 2270
7376e582 2271 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
6f6a5ef3
PB
2272
2273 /* Refresh DIRTY_LOG_MIGRATION bit. */
2274 memory_region_transaction_begin();
2275 memory_region_update_pending = true;
2276 memory_region_transaction_commit();
7664e80c
AK
2277}
2278
2279void memory_global_dirty_log_stop(void)
2280{
7664e80c 2281 global_dirty_log = false;
6f6a5ef3
PB
2282
2283 /* Refresh DIRTY_LOG_MIGRATION bit. */
2284 memory_region_transaction_begin();
2285 memory_region_update_pending = true;
2286 memory_region_transaction_commit();
2287
7376e582 2288 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
7664e80c
AK
2289}
2290
2291static void listener_add_address_space(MemoryListener *listener,
2292 AddressSpace *as)
2293{
99e86347 2294 FlatView *view;
7664e80c
AK
2295 FlatRange *fr;
2296
680a4783
PB
2297 if (listener->begin) {
2298 listener->begin(listener);
2299 }
7664e80c 2300 if (global_dirty_log) {
975aefe0
AK
2301 if (listener->log_global_start) {
2302 listener->log_global_start(listener);
2303 }
7664e80c 2304 }
975aefe0 2305
856d7245 2306 view = address_space_get_flatview(as);
99e86347 2307 FOR_EACH_FLAT_RANGE(fr, view) {
7664e80c
AK
2308 MemoryRegionSection section = {
2309 .mr = fr->mr,
f6790af6 2310 .address_space = as,
7664e80c 2311 .offset_within_region = fr->offset_in_region,
052e87b0 2312 .size = fr->addr.size,
7664e80c 2313 .offset_within_address_space = int128_get64(fr->addr.start),
7a8499e8 2314 .readonly = fr->readonly,
7664e80c 2315 };
680a4783
PB
2316 if (fr->dirty_log_mask && listener->log_start) {
2317 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2318 }
975aefe0
AK
2319 if (listener->region_add) {
2320 listener->region_add(listener, &section);
2321 }
7664e80c 2322 }
680a4783
PB
2323 if (listener->commit) {
2324 listener->commit(listener);
2325 }
856d7245 2326 flatview_unref(view);
7664e80c
AK
2327}
2328
d45fa784 2329void memory_listener_register(MemoryListener *listener, AddressSpace *as)
7664e80c 2330{
72e22d2f
AK
2331 MemoryListener *other = NULL;
2332
d45fa784 2333 listener->address_space = as;
72e22d2f
AK
2334 if (QTAILQ_EMPTY(&memory_listeners)
2335 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2336 memory_listeners)->priority) {
2337 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2338 } else {
2339 QTAILQ_FOREACH(other, &memory_listeners, link) {
2340 if (listener->priority < other->priority) {
2341 break;
2342 }
2343 }
2344 QTAILQ_INSERT_BEFORE(other, listener, link);
2345 }
0d673e36 2346
9a54635d
PB
2347 if (QTAILQ_EMPTY(&as->listeners)
2348 || listener->priority >= QTAILQ_LAST(&as->listeners,
2349 memory_listeners)->priority) {
2350 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2351 } else {
2352 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2353 if (listener->priority < other->priority) {
2354 break;
2355 }
2356 }
2357 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2358 }
2359
d45fa784 2360 listener_add_address_space(listener, as);
7664e80c
AK
2361}
2362
2363void memory_listener_unregister(MemoryListener *listener)
2364{
72e22d2f 2365 QTAILQ_REMOVE(&memory_listeners, listener, link);
9a54635d 2366 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
86e775c6 2367}
e2177955 2368
7dca8043 2369void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
1c0ffa58 2370{
ac95190e 2371 memory_region_ref(root);
59023ef4 2372 memory_region_transaction_begin();
f0c02d15 2373 as->ref_count = 1;
8786db7c 2374 as->root = root;
f0c02d15 2375 as->malloced = false;
8786db7c
AK
2376 as->current_map = g_new(FlatView, 1);
2377 flatview_init(as->current_map);
4c19eb72
AK
2378 as->ioeventfd_nb = 0;
2379 as->ioeventfds = NULL;
9a54635d 2380 QTAILQ_INIT(&as->listeners);
0d673e36 2381 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
7dca8043 2382 as->name = g_strdup(name ? name : "anonymous");
ac1970fb 2383 address_space_init_dispatch(as);
f43793c7
PB
2384 memory_region_update_pending |= root->enabled;
2385 memory_region_transaction_commit();
1c0ffa58 2386}
658b2224 2387
374f2981 2388static void do_address_space_destroy(AddressSpace *as)
83f3c251 2389{
f0c02d15 2390 bool do_free = as->malloced;
078c44f4 2391
83f3c251 2392 address_space_destroy_dispatch(as);
9a54635d 2393 assert(QTAILQ_EMPTY(&as->listeners));
078c44f4 2394
856d7245 2395 flatview_unref(as->current_map);
7dca8043 2396 g_free(as->name);
4c19eb72 2397 g_free(as->ioeventfds);
ac95190e 2398 memory_region_unref(as->root);
f0c02d15
PC
2399 if (do_free) {
2400 g_free(as);
2401 }
2402}
2403
2404AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2405{
2406 AddressSpace *as;
2407
2408 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2409 if (root == as->root && as->malloced) {
2410 as->ref_count++;
2411 return as;
2412 }
2413 }
2414
2415 as = g_malloc0(sizeof *as);
2416 address_space_init(as, root, name);
2417 as->malloced = true;
2418 return as;
83f3c251
AK
2419}
2420
374f2981
PB
2421void address_space_destroy(AddressSpace *as)
2422{
ac95190e
PB
2423 MemoryRegion *root = as->root;
2424
f0c02d15
PC
2425 as->ref_count--;
2426 if (as->ref_count) {
2427 return;
2428 }
374f2981
PB
2429 /* Flush out anything from MemoryListeners listening in on this */
2430 memory_region_transaction_begin();
2431 as->root = NULL;
2432 memory_region_transaction_commit();
2433 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
6e48e8f9 2434 address_space_unregister(as);
374f2981
PB
2435
2436 /* At this point, as->dispatch and as->current_map are dummy
2437 * entries that the guest should never use. Wait for the old
2438 * values to expire before freeing the data.
2439 */
ac95190e 2440 as->root = root;
374f2981
PB
2441 call_rcu(as, do_address_space_destroy, rcu);
2442}
2443
314e2987
BS
2444typedef struct MemoryRegionList MemoryRegionList;
2445
2446struct MemoryRegionList {
2447 const MemoryRegion *mr;
314e2987
BS
2448 QTAILQ_ENTRY(MemoryRegionList) queue;
2449};
2450
2451typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
2452
2453static void mtree_print_mr(fprintf_function mon_printf, void *f,
2454 const MemoryRegion *mr, unsigned int level,
a8170e5e 2455 hwaddr base,
9479c57a 2456 MemoryRegionListHead *alias_print_queue)
314e2987 2457{
9479c57a
JK
2458 MemoryRegionList *new_ml, *ml, *next_ml;
2459 MemoryRegionListHead submr_print_queue;
314e2987
BS
2460 const MemoryRegion *submr;
2461 unsigned int i;
2462
f8a9f720 2463 if (!mr) {
314e2987
BS
2464 return;
2465 }
2466
2467 for (i = 0; i < level; i++) {
2468 mon_printf(f, " ");
2469 }
2470
2471 if (mr->alias) {
2472 MemoryRegionList *ml;
2473 bool found = false;
2474
2475 /* check if the alias is already in the queue */
9479c57a 2476 QTAILQ_FOREACH(ml, alias_print_queue, queue) {
f54bb15f 2477 if (ml->mr == mr->alias) {
314e2987
BS
2478 found = true;
2479 }
2480 }
2481
2482 if (!found) {
2483 ml = g_new(MemoryRegionList, 1);
2484 ml->mr = mr->alias;
9479c57a 2485 QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
314e2987 2486 }
4896d74b
JK
2487 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2488 " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
f8a9f720 2489 "-" TARGET_FMT_plx "%s\n",
314e2987 2490 base + mr->addr,
08dafab4 2491 base + mr->addr
fd1d9926
AW
2492 + (int128_nz(mr->size) ?
2493 (hwaddr)int128_get64(int128_sub(mr->size,
2494 int128_one())) : 0),
4b474ba7 2495 mr->priority,
5f9a5ea1
JK
2496 mr->romd_mode ? 'R' : '-',
2497 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2498 : '-',
3fb18b4d
PC
2499 memory_region_name(mr),
2500 memory_region_name(mr->alias),
314e2987 2501 mr->alias_offset,
08dafab4 2502 mr->alias_offset
a66670c7
AK
2503 + (int128_nz(mr->size) ?
2504 (hwaddr)int128_get64(int128_sub(mr->size,
f8a9f720
GH
2505 int128_one())) : 0),
2506 mr->enabled ? "" : " [disabled]");
314e2987 2507 } else {
4896d74b 2508 mon_printf(f,
f8a9f720 2509 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s%s\n",
314e2987 2510 base + mr->addr,
08dafab4 2511 base + mr->addr
fd1d9926
AW
2512 + (int128_nz(mr->size) ?
2513 (hwaddr)int128_get64(int128_sub(mr->size,
2514 int128_one())) : 0),
4b474ba7 2515 mr->priority,
5f9a5ea1
JK
2516 mr->romd_mode ? 'R' : '-',
2517 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2518 : '-',
f8a9f720
GH
2519 memory_region_name(mr),
2520 mr->enabled ? "" : " [disabled]");
314e2987 2521 }
9479c57a
JK
2522
2523 QTAILQ_INIT(&submr_print_queue);
2524
314e2987 2525 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
9479c57a
JK
2526 new_ml = g_new(MemoryRegionList, 1);
2527 new_ml->mr = submr;
2528 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2529 if (new_ml->mr->addr < ml->mr->addr ||
2530 (new_ml->mr->addr == ml->mr->addr &&
2531 new_ml->mr->priority > ml->mr->priority)) {
2532 QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
2533 new_ml = NULL;
2534 break;
2535 }
2536 }
2537 if (new_ml) {
2538 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
2539 }
2540 }
2541
2542 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2543 mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
2544 alias_print_queue);
2545 }
2546
88365e47 2547 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
9479c57a 2548 g_free(ml);
314e2987
BS
2549 }
2550}
2551
2552void mtree_info(fprintf_function mon_printf, void *f)
2553{
2554 MemoryRegionListHead ml_head;
2555 MemoryRegionList *ml, *ml2;
0d673e36 2556 AddressSpace *as;
314e2987
BS
2557
2558 QTAILQ_INIT(&ml_head);
2559
0d673e36 2560 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
e48816aa
GH
2561 mon_printf(f, "address-space: %s\n", as->name);
2562 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2563 mon_printf(f, "\n");
b9f9be88
BS
2564 }
2565
314e2987
BS
2566 /* print aliased regions */
2567 QTAILQ_FOREACH(ml, &ml_head, queue) {
e48816aa
GH
2568 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2569 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2570 mon_printf(f, "\n");
314e2987
BS
2571 }
2572
2573 QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
88365e47 2574 g_free(ml);
314e2987 2575 }
314e2987 2576}
b4fefef9
PC
2577
2578static const TypeInfo memory_region_info = {
2579 .parent = TYPE_OBJECT,
2580 .name = TYPE_MEMORY_REGION,
2581 .instance_size = sizeof(MemoryRegion),
2582 .instance_init = memory_region_initfn,
2583 .instance_finalize = memory_region_finalize,
2584};
2585
2586static void memory_register_types(void)
2587{
2588 type_register_static(&memory_region_info);
2589}
2590
2591type_init(memory_register_types)