]> git.proxmox.com Git - mirror_qemu.git/blame - memory.c
qemu-options: document existance of versioned machine types
[mirror_qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
093bc2cd
AK
14 */
15
d38ea87a 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
33c11879
PB
18#include "qemu-common.h"
19#include "cpu.h"
022c62cb
PB
20#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
409ddd01 23#include "qapi/visitor.h"
1de7afc9 24#include "qemu/bitops.h"
8c56c1a5 25#include "qemu/error-report.h"
2c9b15ca 26#include "qom/object.h"
0ab8ed18 27#include "trace-root.h"
093bc2cd 28
022c62cb 29#include "exec/memory-internal.h"
220c3ebd 30#include "exec/ram_addr.h"
8c56c1a5 31#include "sysemu/kvm.h"
e1c57ab8 32#include "sysemu/sysemu.h"
c9356746
FK
33#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
b08199c6 35#include "migration/vmstate.h"
67d95c15 36
d197063f
PB
37//#define DEBUG_UNASSIGNED
38
22bde714
JK
39static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
4dc56152 41static bool ioeventfd_update_pending;
7664e80c
AK
42static bool global_dirty_log = false;
43
72e22d2f
AK
44static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
4ef4db86 46
0d673e36
AK
47static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
093bc2cd
AK
50typedef struct AddrRange AddrRange;
51
8417cebf 52/*
c9cdaa3a 53 * Note that signed integers are needed for negative offsetting in aliases
8417cebf
AK
54 * (large MemoryRegion::alias_offset).
55 */
093bc2cd 56struct AddrRange {
08dafab4
AK
57 Int128 start;
58 Int128 size;
093bc2cd
AK
59};
60
08dafab4 61static AddrRange addrrange_make(Int128 start, Int128 size)
093bc2cd
AK
62{
63 return (AddrRange) { start, size };
64}
65
66static bool addrrange_equal(AddrRange r1, AddrRange r2)
67{
08dafab4 68 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
093bc2cd
AK
69}
70
08dafab4 71static Int128 addrrange_end(AddrRange r)
093bc2cd 72{
08dafab4 73 return int128_add(r.start, r.size);
093bc2cd
AK
74}
75
08dafab4 76static AddrRange addrrange_shift(AddrRange range, Int128 delta)
093bc2cd 77{
08dafab4 78 int128_addto(&range.start, delta);
093bc2cd
AK
79 return range;
80}
81
08dafab4
AK
82static bool addrrange_contains(AddrRange range, Int128 addr)
83{
84 return int128_ge(addr, range.start)
85 && int128_lt(addr, addrrange_end(range));
86}
87
093bc2cd
AK
88static bool addrrange_intersects(AddrRange r1, AddrRange r2)
89{
08dafab4
AK
90 return addrrange_contains(r1, r2.start)
91 || addrrange_contains(r2, r1.start);
093bc2cd
AK
92}
93
94static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
95{
08dafab4
AK
96 Int128 start = int128_max(r1.start, r2.start);
97 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
98 return addrrange_make(start, int128_sub(end, start));
093bc2cd
AK
99}
100
0e0d36b4
AK
101enum ListenerDirection { Forward, Reverse };
102
7376e582 103#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
0e0d36b4
AK
104 do { \
105 MemoryListener *_listener; \
106 \
107 switch (_direction) { \
108 case Forward: \
109 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
975aefe0
AK
110 if (_listener->_callback) { \
111 _listener->_callback(_listener, ##_args); \
112 } \
0e0d36b4
AK
113 } \
114 break; \
115 case Reverse: \
116 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
117 memory_listeners, link) { \
975aefe0
AK
118 if (_listener->_callback) { \
119 _listener->_callback(_listener, ##_args); \
120 } \
0e0d36b4
AK
121 } \
122 break; \
123 default: \
124 abort(); \
125 } \
126 } while (0)
127
9a54635d 128#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
7376e582
AK
129 do { \
130 MemoryListener *_listener; \
9a54635d 131 struct memory_listeners_as *list = &(_as)->listeners; \
7376e582
AK
132 \
133 switch (_direction) { \
134 case Forward: \
9a54635d
PB
135 QTAILQ_FOREACH(_listener, list, link_as) { \
136 if (_listener->_callback) { \
7376e582
AK
137 _listener->_callback(_listener, _section, ##_args); \
138 } \
139 } \
140 break; \
141 case Reverse: \
9a54635d
PB
142 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
143 link_as) { \
144 if (_listener->_callback) { \
7376e582
AK
145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
dfde4e6e 154/* No need to ref/unref .mr, the FlatRange keeps it alive. */
b2dfd71c 155#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
9c1f8f44
PB
156 do { \
157 MemoryRegionSection mrs = section_from_flat_range(fr, as); \
9a54635d 158 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
9c1f8f44 159 } while(0)
0e0d36b4 160
093bc2cd
AK
161struct CoalescedMemoryRange {
162 AddrRange addr;
163 QTAILQ_ENTRY(CoalescedMemoryRange) link;
164};
165
3e9d69e7
AK
166struct MemoryRegionIoeventfd {
167 AddrRange addr;
168 bool match_data;
169 uint64_t data;
753d5e14 170 EventNotifier *e;
3e9d69e7
AK
171};
172
173static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
174 MemoryRegionIoeventfd b)
175{
08dafab4 176 if (int128_lt(a.addr.start, b.addr.start)) {
3e9d69e7 177 return true;
08dafab4 178 } else if (int128_gt(a.addr.start, b.addr.start)) {
3e9d69e7 179 return false;
08dafab4 180 } else if (int128_lt(a.addr.size, b.addr.size)) {
3e9d69e7 181 return true;
08dafab4 182 } else if (int128_gt(a.addr.size, b.addr.size)) {
3e9d69e7
AK
183 return false;
184 } else if (a.match_data < b.match_data) {
185 return true;
186 } else if (a.match_data > b.match_data) {
187 return false;
188 } else if (a.match_data) {
189 if (a.data < b.data) {
190 return true;
191 } else if (a.data > b.data) {
192 return false;
193 }
194 }
753d5e14 195 if (a.e < b.e) {
3e9d69e7 196 return true;
753d5e14 197 } else if (a.e > b.e) {
3e9d69e7
AK
198 return false;
199 }
200 return false;
201}
202
203static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
204 MemoryRegionIoeventfd b)
205{
206 return !memory_region_ioeventfd_before(a, b)
207 && !memory_region_ioeventfd_before(b, a);
208}
209
093bc2cd
AK
210typedef struct FlatRange FlatRange;
211typedef struct FlatView FlatView;
212
213/* Range of memory in the global map. Addresses are absolute. */
214struct FlatRange {
215 MemoryRegion *mr;
a8170e5e 216 hwaddr offset_in_region;
093bc2cd 217 AddrRange addr;
5a583347 218 uint8_t dirty_log_mask;
b138e654 219 bool romd_mode;
fb1cd6f9 220 bool readonly;
093bc2cd
AK
221};
222
223/* Flattened global view of current active memory hierarchy. Kept in sorted
224 * order.
225 */
226struct FlatView {
374f2981 227 struct rcu_head rcu;
856d7245 228 unsigned ref;
093bc2cd
AK
229 FlatRange *ranges;
230 unsigned nr;
231 unsigned nr_allocated;
232};
233
cc31e6e7
AK
234typedef struct AddressSpaceOps AddressSpaceOps;
235
093bc2cd
AK
236#define FOR_EACH_FLAT_RANGE(var, view) \
237 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
238
9c1f8f44
PB
239static inline MemoryRegionSection
240section_from_flat_range(FlatRange *fr, AddressSpace *as)
241{
242 return (MemoryRegionSection) {
243 .mr = fr->mr,
244 .address_space = as,
245 .offset_within_region = fr->offset_in_region,
246 .size = fr->addr.size,
247 .offset_within_address_space = int128_get64(fr->addr.start),
248 .readonly = fr->readonly,
249 };
250}
251
093bc2cd
AK
252static bool flatrange_equal(FlatRange *a, FlatRange *b)
253{
254 return a->mr == b->mr
255 && addrrange_equal(a->addr, b->addr)
d0a9b5bc 256 && a->offset_in_region == b->offset_in_region
b138e654 257 && a->romd_mode == b->romd_mode
fb1cd6f9 258 && a->readonly == b->readonly;
093bc2cd
AK
259}
260
261static void flatview_init(FlatView *view)
262{
856d7245 263 view->ref = 1;
093bc2cd
AK
264 view->ranges = NULL;
265 view->nr = 0;
266 view->nr_allocated = 0;
267}
268
269/* Insert a range into a given position. Caller is responsible for maintaining
270 * sorting order.
271 */
272static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
273{
274 if (view->nr == view->nr_allocated) {
275 view->nr_allocated = MAX(2 * view->nr, 10);
7267c094 276 view->ranges = g_realloc(view->ranges,
093bc2cd
AK
277 view->nr_allocated * sizeof(*view->ranges));
278 }
279 memmove(view->ranges + pos + 1, view->ranges + pos,
280 (view->nr - pos) * sizeof(FlatRange));
281 view->ranges[pos] = *range;
dfde4e6e 282 memory_region_ref(range->mr);
093bc2cd
AK
283 ++view->nr;
284}
285
286static void flatview_destroy(FlatView *view)
287{
dfde4e6e
PB
288 int i;
289
290 for (i = 0; i < view->nr; i++) {
291 memory_region_unref(view->ranges[i].mr);
292 }
7267c094 293 g_free(view->ranges);
a9a0c06d 294 g_free(view);
093bc2cd
AK
295}
296
856d7245
PB
297static void flatview_ref(FlatView *view)
298{
299 atomic_inc(&view->ref);
300}
301
302static void flatview_unref(FlatView *view)
303{
304 if (atomic_fetch_dec(&view->ref) == 1) {
305 flatview_destroy(view);
306 }
307}
308
3d8e6bf9
AK
309static bool can_merge(FlatRange *r1, FlatRange *r2)
310{
08dafab4 311 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
3d8e6bf9 312 && r1->mr == r2->mr
08dafab4
AK
313 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
314 r1->addr.size),
315 int128_make64(r2->offset_in_region))
d0a9b5bc 316 && r1->dirty_log_mask == r2->dirty_log_mask
b138e654 317 && r1->romd_mode == r2->romd_mode
fb1cd6f9 318 && r1->readonly == r2->readonly;
3d8e6bf9
AK
319}
320
8508e024 321/* Attempt to simplify a view by merging adjacent ranges */
3d8e6bf9
AK
322static void flatview_simplify(FlatView *view)
323{
324 unsigned i, j;
325
326 i = 0;
327 while (i < view->nr) {
328 j = i + 1;
329 while (j < view->nr
330 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
08dafab4 331 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
3d8e6bf9
AK
332 ++j;
333 }
334 ++i;
335 memmove(&view->ranges[i], &view->ranges[j],
336 (view->nr - j) * sizeof(view->ranges[j]));
337 view->nr -= j - i;
338 }
339}
340
e7342aa3
PB
341static bool memory_region_big_endian(MemoryRegion *mr)
342{
343#ifdef TARGET_WORDS_BIGENDIAN
344 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
345#else
346 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
347#endif
348}
349
e11ef3d1
PB
350static bool memory_region_wrong_endianness(MemoryRegion *mr)
351{
352#ifdef TARGET_WORDS_BIGENDIAN
353 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
354#else
355 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
356#endif
357}
358
359static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
360{
361 if (memory_region_wrong_endianness(mr)) {
362 switch (size) {
363 case 1:
364 break;
365 case 2:
366 *data = bswap16(*data);
367 break;
368 case 4:
369 *data = bswap32(*data);
370 break;
371 case 8:
372 *data = bswap64(*data);
373 break;
374 default:
375 abort();
376 }
377 }
378}
379
4779dc1d
HB
380static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
381{
382 MemoryRegion *root;
383 hwaddr abs_addr = offset;
384
385 abs_addr += mr->addr;
386 for (root = mr; root->container; ) {
387 root = root->container;
388 abs_addr += root->addr;
389 }
390
391 return abs_addr;
392}
393
5a68be94
HB
394static int get_cpu_index(void)
395{
396 if (current_cpu) {
397 return current_cpu->cpu_index;
398 }
399 return -1;
400}
401
cc05c43a
PM
402static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
403 hwaddr addr,
404 uint64_t *value,
405 unsigned size,
406 unsigned shift,
407 uint64_t mask,
408 MemTxAttrs attrs)
409{
410 uint64_t tmp;
411
412 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
23d92d68 413 if (mr->subpage) {
5a68be94 414 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
415 } else if (mr == &io_mem_notdirty) {
416 /* Accesses to code which has previously been translated into a TB show
417 * up in the MMIO path, as accesses to the io_mem_notdirty
418 * MemoryRegion. */
419 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
420 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
421 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 422 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 423 }
cc05c43a
PM
424 *value |= (tmp & mask) << shift;
425 return MEMTX_OK;
426}
427
428static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
ce5d2f33
PB
429 hwaddr addr,
430 uint64_t *value,
431 unsigned size,
432 unsigned shift,
cc05c43a
PM
433 uint64_t mask,
434 MemTxAttrs attrs)
ce5d2f33 435{
ce5d2f33
PB
436 uint64_t tmp;
437
cc05c43a 438 tmp = mr->ops->read(mr->opaque, addr, size);
23d92d68 439 if (mr->subpage) {
5a68be94 440 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
441 } else if (mr == &io_mem_notdirty) {
442 /* Accesses to code which has previously been translated into a TB show
443 * up in the MMIO path, as accesses to the io_mem_notdirty
444 * MemoryRegion. */
445 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
446 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
447 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 448 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 449 }
ce5d2f33 450 *value |= (tmp & mask) << shift;
cc05c43a 451 return MEMTX_OK;
ce5d2f33
PB
452}
453
cc05c43a
PM
454static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
455 hwaddr addr,
456 uint64_t *value,
457 unsigned size,
458 unsigned shift,
459 uint64_t mask,
460 MemTxAttrs attrs)
164a4dcd 461{
cc05c43a
PM
462 uint64_t tmp = 0;
463 MemTxResult r;
164a4dcd 464
cc05c43a 465 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
23d92d68 466 if (mr->subpage) {
5a68be94 467 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
468 } else if (mr == &io_mem_notdirty) {
469 /* Accesses to code which has previously been translated into a TB show
470 * up in the MMIO path, as accesses to the io_mem_notdirty
471 * MemoryRegion. */
472 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
473 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
474 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 475 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 476 }
164a4dcd 477 *value |= (tmp & mask) << shift;
cc05c43a 478 return r;
164a4dcd
AK
479}
480
cc05c43a
PM
481static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
482 hwaddr addr,
483 uint64_t *value,
484 unsigned size,
485 unsigned shift,
486 uint64_t mask,
487 MemTxAttrs attrs)
ce5d2f33 488{
ce5d2f33
PB
489 uint64_t tmp;
490
491 tmp = (*value >> shift) & mask;
23d92d68 492 if (mr->subpage) {
5a68be94 493 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
494 } else if (mr == &io_mem_notdirty) {
495 /* Accesses to code which has previously been translated into a TB show
496 * up in the MMIO path, as accesses to the io_mem_notdirty
497 * MemoryRegion. */
498 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
499 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
500 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 501 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 502 }
ce5d2f33 503 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
cc05c43a 504 return MEMTX_OK;
ce5d2f33
PB
505}
506
cc05c43a
PM
507static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
508 hwaddr addr,
509 uint64_t *value,
510 unsigned size,
511 unsigned shift,
512 uint64_t mask,
513 MemTxAttrs attrs)
164a4dcd 514{
164a4dcd
AK
515 uint64_t tmp;
516
517 tmp = (*value >> shift) & mask;
23d92d68 518 if (mr->subpage) {
5a68be94 519 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
520 } else if (mr == &io_mem_notdirty) {
521 /* Accesses to code which has previously been translated into a TB show
522 * up in the MMIO path, as accesses to the io_mem_notdirty
523 * MemoryRegion. */
524 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
525 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
526 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 527 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 528 }
164a4dcd 529 mr->ops->write(mr->opaque, addr, tmp, size);
cc05c43a 530 return MEMTX_OK;
164a4dcd
AK
531}
532
cc05c43a
PM
533static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
534 hwaddr addr,
535 uint64_t *value,
536 unsigned size,
537 unsigned shift,
538 uint64_t mask,
539 MemTxAttrs attrs)
540{
541 uint64_t tmp;
542
cc05c43a 543 tmp = (*value >> shift) & mask;
23d92d68 544 if (mr->subpage) {
5a68be94 545 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
f2d08942
HB
546 } else if (mr == &io_mem_notdirty) {
547 /* Accesses to code which has previously been translated into a TB show
548 * up in the MMIO path, as accesses to the io_mem_notdirty
549 * MemoryRegion. */
550 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
4779dc1d
HB
551 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
552 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
5a68be94 553 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
23d92d68 554 }
cc05c43a
PM
555 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
556}
557
558static MemTxResult access_with_adjusted_size(hwaddr addr,
164a4dcd
AK
559 uint64_t *value,
560 unsigned size,
561 unsigned access_size_min,
562 unsigned access_size_max,
cc05c43a
PM
563 MemTxResult (*access)(MemoryRegion *mr,
564 hwaddr addr,
565 uint64_t *value,
566 unsigned size,
567 unsigned shift,
568 uint64_t mask,
569 MemTxAttrs attrs),
570 MemoryRegion *mr,
571 MemTxAttrs attrs)
164a4dcd
AK
572{
573 uint64_t access_mask;
574 unsigned access_size;
575 unsigned i;
cc05c43a 576 MemTxResult r = MEMTX_OK;
164a4dcd
AK
577
578 if (!access_size_min) {
579 access_size_min = 1;
580 }
581 if (!access_size_max) {
582 access_size_max = 4;
583 }
ce5d2f33
PB
584
585 /* FIXME: support unaligned access? */
164a4dcd
AK
586 access_size = MAX(MIN(size, access_size_max), access_size_min);
587 access_mask = -1ULL >> (64 - access_size * 8);
e7342aa3
PB
588 if (memory_region_big_endian(mr)) {
589 for (i = 0; i < size; i += access_size) {
cc05c43a
PM
590 r |= access(mr, addr + i, value, access_size,
591 (size - access_size - i) * 8, access_mask, attrs);
e7342aa3
PB
592 }
593 } else {
594 for (i = 0; i < size; i += access_size) {
cc05c43a
PM
595 r |= access(mr, addr + i, value, access_size, i * 8,
596 access_mask, attrs);
e7342aa3 597 }
164a4dcd 598 }
cc05c43a 599 return r;
164a4dcd
AK
600}
601
e2177955
AK
602static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
603{
0d673e36
AK
604 AddressSpace *as;
605
feca4ac1
PB
606 while (mr->container) {
607 mr = mr->container;
e2177955 608 }
0d673e36
AK
609 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
610 if (mr == as->root) {
611 return as;
612 }
e2177955 613 }
eed2bacf 614 return NULL;
e2177955
AK
615}
616
093bc2cd
AK
617/* Render a memory region into the global view. Ranges in @view obscure
618 * ranges in @mr.
619 */
620static void render_memory_region(FlatView *view,
621 MemoryRegion *mr,
08dafab4 622 Int128 base,
fb1cd6f9
AK
623 AddrRange clip,
624 bool readonly)
093bc2cd
AK
625{
626 MemoryRegion *subregion;
627 unsigned i;
a8170e5e 628 hwaddr offset_in_region;
08dafab4
AK
629 Int128 remain;
630 Int128 now;
093bc2cd
AK
631 FlatRange fr;
632 AddrRange tmp;
633
6bba19ba
AK
634 if (!mr->enabled) {
635 return;
636 }
637
08dafab4 638 int128_addto(&base, int128_make64(mr->addr));
fb1cd6f9 639 readonly |= mr->readonly;
093bc2cd
AK
640
641 tmp = addrrange_make(base, mr->size);
642
643 if (!addrrange_intersects(tmp, clip)) {
644 return;
645 }
646
647 clip = addrrange_intersection(tmp, clip);
648
649 if (mr->alias) {
08dafab4
AK
650 int128_subfrom(&base, int128_make64(mr->alias->addr));
651 int128_subfrom(&base, int128_make64(mr->alias_offset));
fb1cd6f9 652 render_memory_region(view, mr->alias, base, clip, readonly);
093bc2cd
AK
653 return;
654 }
655
656 /* Render subregions in priority order. */
657 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
fb1cd6f9 658 render_memory_region(view, subregion, base, clip, readonly);
093bc2cd
AK
659 }
660
14a3c10a 661 if (!mr->terminates) {
093bc2cd
AK
662 return;
663 }
664
08dafab4 665 offset_in_region = int128_get64(int128_sub(clip.start, base));
093bc2cd
AK
666 base = clip.start;
667 remain = clip.size;
668
2eb74e1a 669 fr.mr = mr;
6f6a5ef3 670 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
b138e654 671 fr.romd_mode = mr->romd_mode;
2eb74e1a
PC
672 fr.readonly = readonly;
673
093bc2cd 674 /* Render the region itself into any gaps left by the current view. */
08dafab4
AK
675 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
676 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
093bc2cd
AK
677 continue;
678 }
08dafab4
AK
679 if (int128_lt(base, view->ranges[i].addr.start)) {
680 now = int128_min(remain,
681 int128_sub(view->ranges[i].addr.start, base));
093bc2cd
AK
682 fr.offset_in_region = offset_in_region;
683 fr.addr = addrrange_make(base, now);
684 flatview_insert(view, i, &fr);
685 ++i;
08dafab4
AK
686 int128_addto(&base, now);
687 offset_in_region += int128_get64(now);
688 int128_subfrom(&remain, now);
093bc2cd 689 }
d26a8cae
AK
690 now = int128_sub(int128_min(int128_add(base, remain),
691 addrrange_end(view->ranges[i].addr)),
692 base);
693 int128_addto(&base, now);
694 offset_in_region += int128_get64(now);
695 int128_subfrom(&remain, now);
093bc2cd 696 }
08dafab4 697 if (int128_nz(remain)) {
093bc2cd
AK
698 fr.offset_in_region = offset_in_region;
699 fr.addr = addrrange_make(base, remain);
700 flatview_insert(view, i, &fr);
701 }
702}
703
704/* Render a memory topology into a list of disjoint absolute ranges. */
a9a0c06d 705static FlatView *generate_memory_topology(MemoryRegion *mr)
093bc2cd 706{
a9a0c06d 707 FlatView *view;
093bc2cd 708
a9a0c06d
PB
709 view = g_new(FlatView, 1);
710 flatview_init(view);
093bc2cd 711
83f3c251 712 if (mr) {
a9a0c06d 713 render_memory_region(view, mr, int128_zero(),
83f3c251
AK
714 addrrange_make(int128_zero(), int128_2_64()), false);
715 }
a9a0c06d 716 flatview_simplify(view);
093bc2cd
AK
717
718 return view;
719}
720
3e9d69e7
AK
721static void address_space_add_del_ioeventfds(AddressSpace *as,
722 MemoryRegionIoeventfd *fds_new,
723 unsigned fds_new_nb,
724 MemoryRegionIoeventfd *fds_old,
725 unsigned fds_old_nb)
726{
727 unsigned iold, inew;
80a1ea37
AK
728 MemoryRegionIoeventfd *fd;
729 MemoryRegionSection section;
3e9d69e7
AK
730
731 /* Generate a symmetric difference of the old and new fd sets, adding
732 * and deleting as necessary.
733 */
734
735 iold = inew = 0;
736 while (iold < fds_old_nb || inew < fds_new_nb) {
737 if (iold < fds_old_nb
738 && (inew == fds_new_nb
739 || memory_region_ioeventfd_before(fds_old[iold],
740 fds_new[inew]))) {
80a1ea37
AK
741 fd = &fds_old[iold];
742 section = (MemoryRegionSection) {
f6790af6 743 .address_space = as,
80a1ea37 744 .offset_within_address_space = int128_get64(fd->addr.start),
052e87b0 745 .size = fd->addr.size,
80a1ea37 746 };
9a54635d 747 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
753d5e14 748 fd->match_data, fd->data, fd->e);
3e9d69e7
AK
749 ++iold;
750 } else if (inew < fds_new_nb
751 && (iold == fds_old_nb
752 || memory_region_ioeventfd_before(fds_new[inew],
753 fds_old[iold]))) {
80a1ea37
AK
754 fd = &fds_new[inew];
755 section = (MemoryRegionSection) {
f6790af6 756 .address_space = as,
80a1ea37 757 .offset_within_address_space = int128_get64(fd->addr.start),
052e87b0 758 .size = fd->addr.size,
80a1ea37 759 };
9a54635d 760 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
753d5e14 761 fd->match_data, fd->data, fd->e);
3e9d69e7
AK
762 ++inew;
763 } else {
764 ++iold;
765 ++inew;
766 }
767 }
768}
769
856d7245
PB
770static FlatView *address_space_get_flatview(AddressSpace *as)
771{
772 FlatView *view;
773
374f2981
PB
774 rcu_read_lock();
775 view = atomic_rcu_read(&as->current_map);
856d7245 776 flatview_ref(view);
374f2981 777 rcu_read_unlock();
856d7245
PB
778 return view;
779}
780
3e9d69e7
AK
781static void address_space_update_ioeventfds(AddressSpace *as)
782{
99e86347 783 FlatView *view;
3e9d69e7
AK
784 FlatRange *fr;
785 unsigned ioeventfd_nb = 0;
786 MemoryRegionIoeventfd *ioeventfds = NULL;
787 AddrRange tmp;
788 unsigned i;
789
856d7245 790 view = address_space_get_flatview(as);
99e86347 791 FOR_EACH_FLAT_RANGE(fr, view) {
3e9d69e7
AK
792 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
793 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
08dafab4
AK
794 int128_sub(fr->addr.start,
795 int128_make64(fr->offset_in_region)));
3e9d69e7
AK
796 if (addrrange_intersects(fr->addr, tmp)) {
797 ++ioeventfd_nb;
7267c094 798 ioeventfds = g_realloc(ioeventfds,
3e9d69e7
AK
799 ioeventfd_nb * sizeof(*ioeventfds));
800 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
801 ioeventfds[ioeventfd_nb-1].addr = tmp;
802 }
803 }
804 }
805
806 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
807 as->ioeventfds, as->ioeventfd_nb);
808
7267c094 809 g_free(as->ioeventfds);
3e9d69e7
AK
810 as->ioeventfds = ioeventfds;
811 as->ioeventfd_nb = ioeventfd_nb;
856d7245 812 flatview_unref(view);
3e9d69e7
AK
813}
814
b8af1afb 815static void address_space_update_topology_pass(AddressSpace *as,
a9a0c06d
PB
816 const FlatView *old_view,
817 const FlatView *new_view,
b8af1afb 818 bool adding)
093bc2cd 819{
093bc2cd
AK
820 unsigned iold, inew;
821 FlatRange *frold, *frnew;
093bc2cd
AK
822
823 /* Generate a symmetric difference of the old and new memory maps.
824 * Kill ranges in the old map, and instantiate ranges in the new map.
825 */
826 iold = inew = 0;
a9a0c06d
PB
827 while (iold < old_view->nr || inew < new_view->nr) {
828 if (iold < old_view->nr) {
829 frold = &old_view->ranges[iold];
093bc2cd
AK
830 } else {
831 frold = NULL;
832 }
a9a0c06d
PB
833 if (inew < new_view->nr) {
834 frnew = &new_view->ranges[inew];
093bc2cd
AK
835 } else {
836 frnew = NULL;
837 }
838
839 if (frold
840 && (!frnew
08dafab4
AK
841 || int128_lt(frold->addr.start, frnew->addr.start)
842 || (int128_eq(frold->addr.start, frnew->addr.start)
093bc2cd 843 && !flatrange_equal(frold, frnew)))) {
41a6e477 844 /* In old but not in new, or in both but attributes changed. */
093bc2cd 845
b8af1afb 846 if (!adding) {
72e22d2f 847 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
b8af1afb
AK
848 }
849
093bc2cd
AK
850 ++iold;
851 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
41a6e477 852 /* In both and unchanged (except logging may have changed) */
093bc2cd 853
b8af1afb 854 if (adding) {
50c1e149 855 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
b2dfd71c
PB
856 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
857 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
858 frold->dirty_log_mask,
859 frnew->dirty_log_mask);
860 }
861 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
862 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
863 frold->dirty_log_mask,
864 frnew->dirty_log_mask);
b8af1afb 865 }
5a583347
AK
866 }
867
093bc2cd
AK
868 ++iold;
869 ++inew;
093bc2cd
AK
870 } else {
871 /* In new */
872
b8af1afb 873 if (adding) {
72e22d2f 874 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
b8af1afb
AK
875 }
876
093bc2cd
AK
877 ++inew;
878 }
879 }
b8af1afb
AK
880}
881
882
883static void address_space_update_topology(AddressSpace *as)
884{
856d7245 885 FlatView *old_view = address_space_get_flatview(as);
a9a0c06d 886 FlatView *new_view = generate_memory_topology(as->root);
b8af1afb
AK
887
888 address_space_update_topology_pass(as, old_view, new_view, false);
889 address_space_update_topology_pass(as, old_view, new_view, true);
890
374f2981
PB
891 /* Writes are protected by the BQL. */
892 atomic_rcu_set(&as->current_map, new_view);
893 call_rcu(old_view, flatview_unref, rcu);
856d7245
PB
894
895 /* Note that all the old MemoryRegions are still alive up to this
896 * point. This relieves most MemoryListeners from the need to
897 * ref/unref the MemoryRegions they get---unless they use them
898 * outside the iothread mutex, in which case precise reference
899 * counting is necessary.
900 */
901 flatview_unref(old_view);
902
3e9d69e7 903 address_space_update_ioeventfds(as);
093bc2cd
AK
904}
905
4ef4db86
AK
906void memory_region_transaction_begin(void)
907{
bb880ded 908 qemu_flush_coalesced_mmio_buffer();
4ef4db86
AK
909 ++memory_region_transaction_depth;
910}
911
912void memory_region_transaction_commit(void)
913{
0d673e36
AK
914 AddressSpace *as;
915
4ef4db86 916 assert(memory_region_transaction_depth);
8d04fb55
JK
917 assert(qemu_mutex_iothread_locked());
918
4ef4db86 919 --memory_region_transaction_depth;
4dc56152
GA
920 if (!memory_region_transaction_depth) {
921 if (memory_region_update_pending) {
922 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
02e2b95f 923
4dc56152
GA
924 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
925 address_space_update_topology(as);
926 }
ade9c1aa 927 memory_region_update_pending = false;
4dc56152
GA
928 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
929 } else if (ioeventfd_update_pending) {
930 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
931 address_space_update_ioeventfds(as);
932 }
ade9c1aa 933 ioeventfd_update_pending = false;
4dc56152 934 }
4dc56152 935 }
4ef4db86
AK
936}
937
545e92e0
AK
938static void memory_region_destructor_none(MemoryRegion *mr)
939{
940}
941
942static void memory_region_destructor_ram(MemoryRegion *mr)
943{
f1060c55 944 qemu_ram_free(mr->ram_block);
545e92e0
AK
945}
946
b4fefef9
PC
947static bool memory_region_need_escape(char c)
948{
949 return c == '/' || c == '[' || c == '\\' || c == ']';
950}
951
952static char *memory_region_escape_name(const char *name)
953{
954 const char *p;
955 char *escaped, *q;
956 uint8_t c;
957 size_t bytes = 0;
958
959 for (p = name; *p; p++) {
960 bytes += memory_region_need_escape(*p) ? 4 : 1;
961 }
962 if (bytes == p - name) {
963 return g_memdup(name, bytes + 1);
964 }
965
966 escaped = g_malloc(bytes + 1);
967 for (p = name, q = escaped; *p; p++) {
968 c = *p;
969 if (unlikely(memory_region_need_escape(c))) {
970 *q++ = '\\';
971 *q++ = 'x';
972 *q++ = "0123456789abcdef"[c >> 4];
973 c = "0123456789abcdef"[c & 15];
974 }
975 *q++ = c;
976 }
977 *q = 0;
978 return escaped;
979}
980
3df9d748
AK
981static void memory_region_do_init(MemoryRegion *mr,
982 Object *owner,
983 const char *name,
984 uint64_t size)
093bc2cd 985{
08dafab4
AK
986 mr->size = int128_make64(size);
987 if (size == UINT64_MAX) {
988 mr->size = int128_2_64();
989 }
302fa283 990 mr->name = g_strdup(name);
612263cf 991 mr->owner = owner;
58eaa217 992 mr->ram_block = NULL;
b4fefef9
PC
993
994 if (name) {
843ef73a
PC
995 char *escaped_name = memory_region_escape_name(name);
996 char *name_array = g_strdup_printf("%s[*]", escaped_name);
612263cf
PB
997
998 if (!owner) {
999 owner = container_get(qdev_get_machine(), "/unattached");
1000 }
1001
843ef73a 1002 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
b4fefef9 1003 object_unref(OBJECT(mr));
843ef73a
PC
1004 g_free(name_array);
1005 g_free(escaped_name);
b4fefef9
PC
1006 }
1007}
1008
3df9d748
AK
1009void memory_region_init(MemoryRegion *mr,
1010 Object *owner,
1011 const char *name,
1012 uint64_t size)
1013{
1014 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1015 memory_region_do_init(mr, owner, name, size);
1016}
1017
d7bce999
EB
1018static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1019 void *opaque, Error **errp)
409ddd01
PC
1020{
1021 MemoryRegion *mr = MEMORY_REGION(obj);
1022 uint64_t value = mr->addr;
1023
51e72bc1 1024 visit_type_uint64(v, name, &value, errp);
409ddd01
PC
1025}
1026
d7bce999
EB
1027static void memory_region_get_container(Object *obj, Visitor *v,
1028 const char *name, void *opaque,
1029 Error **errp)
409ddd01
PC
1030{
1031 MemoryRegion *mr = MEMORY_REGION(obj);
1032 gchar *path = (gchar *)"";
1033
1034 if (mr->container) {
1035 path = object_get_canonical_path(OBJECT(mr->container));
1036 }
51e72bc1 1037 visit_type_str(v, name, &path, errp);
409ddd01
PC
1038 if (mr->container) {
1039 g_free(path);
1040 }
1041}
1042
1043static Object *memory_region_resolve_container(Object *obj, void *opaque,
1044 const char *part)
1045{
1046 MemoryRegion *mr = MEMORY_REGION(obj);
1047
1048 return OBJECT(mr->container);
1049}
1050
d7bce999
EB
1051static void memory_region_get_priority(Object *obj, Visitor *v,
1052 const char *name, void *opaque,
1053 Error **errp)
d33382da
PC
1054{
1055 MemoryRegion *mr = MEMORY_REGION(obj);
1056 int32_t value = mr->priority;
1057
51e72bc1 1058 visit_type_int32(v, name, &value, errp);
d33382da
PC
1059}
1060
d7bce999
EB
1061static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1062 void *opaque, Error **errp)
52aef7bb
PC
1063{
1064 MemoryRegion *mr = MEMORY_REGION(obj);
1065 uint64_t value = memory_region_size(mr);
1066
51e72bc1 1067 visit_type_uint64(v, name, &value, errp);
52aef7bb
PC
1068}
1069
b4fefef9
PC
1070static void memory_region_initfn(Object *obj)
1071{
1072 MemoryRegion *mr = MEMORY_REGION(obj);
409ddd01 1073 ObjectProperty *op;
b4fefef9
PC
1074
1075 mr->ops = &unassigned_mem_ops;
6bba19ba 1076 mr->enabled = true;
5f9a5ea1 1077 mr->romd_mode = true;
196ea131 1078 mr->global_locking = true;
545e92e0 1079 mr->destructor = memory_region_destructor_none;
093bc2cd 1080 QTAILQ_INIT(&mr->subregions);
093bc2cd 1081 QTAILQ_INIT(&mr->coalesced);
409ddd01
PC
1082
1083 op = object_property_add(OBJECT(mr), "container",
1084 "link<" TYPE_MEMORY_REGION ">",
1085 memory_region_get_container,
1086 NULL, /* memory_region_set_container */
1087 NULL, NULL, &error_abort);
1088 op->resolve = memory_region_resolve_container;
1089
1090 object_property_add(OBJECT(mr), "addr", "uint64",
1091 memory_region_get_addr,
1092 NULL, /* memory_region_set_addr */
1093 NULL, NULL, &error_abort);
d33382da
PC
1094 object_property_add(OBJECT(mr), "priority", "uint32",
1095 memory_region_get_priority,
1096 NULL, /* memory_region_set_priority */
1097 NULL, NULL, &error_abort);
52aef7bb
PC
1098 object_property_add(OBJECT(mr), "size", "uint64",
1099 memory_region_get_size,
1100 NULL, /* memory_region_set_size, */
1101 NULL, NULL, &error_abort);
093bc2cd
AK
1102}
1103
3df9d748
AK
1104static void iommu_memory_region_initfn(Object *obj)
1105{
1106 MemoryRegion *mr = MEMORY_REGION(obj);
1107
1108 mr->is_iommu = true;
1109}
1110
b018ddf6
PB
1111static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1112 unsigned size)
1113{
1114#ifdef DEBUG_UNASSIGNED
1115 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1116#endif
4917cf44
AF
1117 if (current_cpu != NULL) {
1118 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
c658b94f 1119 }
68a7439a 1120 return 0;
b018ddf6
PB
1121}
1122
1123static void unassigned_mem_write(void *opaque, hwaddr addr,
1124 uint64_t val, unsigned size)
1125{
1126#ifdef DEBUG_UNASSIGNED
1127 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1128#endif
4917cf44
AF
1129 if (current_cpu != NULL) {
1130 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
c658b94f 1131 }
b018ddf6
PB
1132}
1133
d197063f
PB
1134static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1135 unsigned size, bool is_write)
1136{
1137 return false;
1138}
1139
1140const MemoryRegionOps unassigned_mem_ops = {
1141 .valid.accepts = unassigned_mem_accepts,
1142 .endianness = DEVICE_NATIVE_ENDIAN,
1143};
1144
4a2e242b
AW
1145static uint64_t memory_region_ram_device_read(void *opaque,
1146 hwaddr addr, unsigned size)
1147{
1148 MemoryRegion *mr = opaque;
1149 uint64_t data = (uint64_t)~0;
1150
1151 switch (size) {
1152 case 1:
1153 data = *(uint8_t *)(mr->ram_block->host + addr);
1154 break;
1155 case 2:
1156 data = *(uint16_t *)(mr->ram_block->host + addr);
1157 break;
1158 case 4:
1159 data = *(uint32_t *)(mr->ram_block->host + addr);
1160 break;
1161 case 8:
1162 data = *(uint64_t *)(mr->ram_block->host + addr);
1163 break;
1164 }
1165
1166 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1167
1168 return data;
1169}
1170
1171static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1172 uint64_t data, unsigned size)
1173{
1174 MemoryRegion *mr = opaque;
1175
1176 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1177
1178 switch (size) {
1179 case 1:
1180 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1181 break;
1182 case 2:
1183 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1184 break;
1185 case 4:
1186 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1187 break;
1188 case 8:
1189 *(uint64_t *)(mr->ram_block->host + addr) = data;
1190 break;
1191 }
1192}
1193
1194static const MemoryRegionOps ram_device_mem_ops = {
1195 .read = memory_region_ram_device_read,
1196 .write = memory_region_ram_device_write,
c99a29e7 1197 .endianness = DEVICE_HOST_ENDIAN,
4a2e242b
AW
1198 .valid = {
1199 .min_access_size = 1,
1200 .max_access_size = 8,
1201 .unaligned = true,
1202 },
1203 .impl = {
1204 .min_access_size = 1,
1205 .max_access_size = 8,
1206 .unaligned = true,
1207 },
1208};
1209
d2702032
PB
1210bool memory_region_access_valid(MemoryRegion *mr,
1211 hwaddr addr,
1212 unsigned size,
1213 bool is_write)
093bc2cd 1214{
a014ed07
PB
1215 int access_size_min, access_size_max;
1216 int access_size, i;
897fa7cf 1217
093bc2cd
AK
1218 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1219 return false;
1220 }
1221
a014ed07 1222 if (!mr->ops->valid.accepts) {
093bc2cd
AK
1223 return true;
1224 }
1225
a014ed07
PB
1226 access_size_min = mr->ops->valid.min_access_size;
1227 if (!mr->ops->valid.min_access_size) {
1228 access_size_min = 1;
1229 }
1230
1231 access_size_max = mr->ops->valid.max_access_size;
1232 if (!mr->ops->valid.max_access_size) {
1233 access_size_max = 4;
1234 }
1235
1236 access_size = MAX(MIN(size, access_size_max), access_size_min);
1237 for (i = 0; i < size; i += access_size) {
1238 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1239 is_write)) {
1240 return false;
1241 }
093bc2cd 1242 }
a014ed07 1243
093bc2cd
AK
1244 return true;
1245}
1246
cc05c43a
PM
1247static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1248 hwaddr addr,
1249 uint64_t *pval,
1250 unsigned size,
1251 MemTxAttrs attrs)
093bc2cd 1252{
cc05c43a 1253 *pval = 0;
093bc2cd 1254
ce5d2f33 1255 if (mr->ops->read) {
cc05c43a
PM
1256 return access_with_adjusted_size(addr, pval, size,
1257 mr->ops->impl.min_access_size,
1258 mr->ops->impl.max_access_size,
1259 memory_region_read_accessor,
1260 mr, attrs);
1261 } else if (mr->ops->read_with_attrs) {
1262 return access_with_adjusted_size(addr, pval, size,
1263 mr->ops->impl.min_access_size,
1264 mr->ops->impl.max_access_size,
1265 memory_region_read_with_attrs_accessor,
1266 mr, attrs);
ce5d2f33 1267 } else {
cc05c43a
PM
1268 return access_with_adjusted_size(addr, pval, size, 1, 4,
1269 memory_region_oldmmio_read_accessor,
1270 mr, attrs);
74901c3b 1271 }
093bc2cd
AK
1272}
1273
3b643495
PM
1274MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1275 hwaddr addr,
1276 uint64_t *pval,
1277 unsigned size,
1278 MemTxAttrs attrs)
a621f38d 1279{
cc05c43a
PM
1280 MemTxResult r;
1281
791af8c8
PB
1282 if (!memory_region_access_valid(mr, addr, size, false)) {
1283 *pval = unassigned_mem_read(mr, addr, size);
cc05c43a 1284 return MEMTX_DECODE_ERROR;
791af8c8 1285 }
a621f38d 1286
cc05c43a 1287 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
791af8c8 1288 adjust_endianness(mr, pval, size);
cc05c43a 1289 return r;
a621f38d 1290}
093bc2cd 1291
8c56c1a5
PF
1292/* Return true if an eventfd was signalled */
1293static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1294 hwaddr addr,
1295 uint64_t data,
1296 unsigned size,
1297 MemTxAttrs attrs)
1298{
1299 MemoryRegionIoeventfd ioeventfd = {
1300 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1301 .data = data,
1302 };
1303 unsigned i;
1304
1305 for (i = 0; i < mr->ioeventfd_nb; i++) {
1306 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1307 ioeventfd.e = mr->ioeventfds[i].e;
1308
1309 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1310 event_notifier_set(ioeventfd.e);
1311 return true;
1312 }
1313 }
1314
1315 return false;
1316}
1317
3b643495
PM
1318MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1319 hwaddr addr,
1320 uint64_t data,
1321 unsigned size,
1322 MemTxAttrs attrs)
a621f38d 1323{
897fa7cf 1324 if (!memory_region_access_valid(mr, addr, size, true)) {
b018ddf6 1325 unassigned_mem_write(mr, addr, data, size);
cc05c43a 1326 return MEMTX_DECODE_ERROR;
093bc2cd
AK
1327 }
1328
a621f38d
AK
1329 adjust_endianness(mr, &data, size);
1330
8c56c1a5
PF
1331 if ((!kvm_eventfds_enabled()) &&
1332 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1333 return MEMTX_OK;
1334 }
1335
ce5d2f33 1336 if (mr->ops->write) {
cc05c43a
PM
1337 return access_with_adjusted_size(addr, &data, size,
1338 mr->ops->impl.min_access_size,
1339 mr->ops->impl.max_access_size,
1340 memory_region_write_accessor, mr,
1341 attrs);
1342 } else if (mr->ops->write_with_attrs) {
1343 return
1344 access_with_adjusted_size(addr, &data, size,
1345 mr->ops->impl.min_access_size,
1346 mr->ops->impl.max_access_size,
1347 memory_region_write_with_attrs_accessor,
1348 mr, attrs);
ce5d2f33 1349 } else {
cc05c43a
PM
1350 return access_with_adjusted_size(addr, &data, size, 1, 4,
1351 memory_region_oldmmio_write_accessor,
1352 mr, attrs);
74901c3b 1353 }
093bc2cd
AK
1354}
1355
093bc2cd 1356void memory_region_init_io(MemoryRegion *mr,
2c9b15ca 1357 Object *owner,
093bc2cd
AK
1358 const MemoryRegionOps *ops,
1359 void *opaque,
1360 const char *name,
1361 uint64_t size)
1362{
2c9b15ca 1363 memory_region_init(mr, owner, name, size);
6d6d2abf 1364 mr->ops = ops ? ops : &unassigned_mem_ops;
093bc2cd 1365 mr->opaque = opaque;
14a3c10a 1366 mr->terminates = true;
093bc2cd
AK
1367}
1368
1cfe48c1
PM
1369void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1370 Object *owner,
1371 const char *name,
1372 uint64_t size,
1373 Error **errp)
093bc2cd 1374{
2c9b15ca 1375 memory_region_init(mr, owner, name, size);
8ea9252a 1376 mr->ram = true;
14a3c10a 1377 mr->terminates = true;
545e92e0 1378 mr->destructor = memory_region_destructor_ram;
8e41fb63 1379 mr->ram_block = qemu_ram_alloc(size, mr, errp);
677e7805 1380 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
0b183fc8
PB
1381}
1382
60786ef3
MT
1383void memory_region_init_resizeable_ram(MemoryRegion *mr,
1384 Object *owner,
1385 const char *name,
1386 uint64_t size,
1387 uint64_t max_size,
1388 void (*resized)(const char*,
1389 uint64_t length,
1390 void *host),
1391 Error **errp)
1392{
1393 memory_region_init(mr, owner, name, size);
1394 mr->ram = true;
1395 mr->terminates = true;
1396 mr->destructor = memory_region_destructor_ram;
8e41fb63
FZ
1397 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1398 mr, errp);
677e7805 1399 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
60786ef3
MT
1400}
1401
0b183fc8
PB
1402#ifdef __linux__
1403void memory_region_init_ram_from_file(MemoryRegion *mr,
1404 struct Object *owner,
1405 const char *name,
1406 uint64_t size,
dbcb8981 1407 bool share,
7f56e740
PB
1408 const char *path,
1409 Error **errp)
0b183fc8
PB
1410{
1411 memory_region_init(mr, owner, name, size);
1412 mr->ram = true;
1413 mr->terminates = true;
1414 mr->destructor = memory_region_destructor_ram;
8e41fb63 1415 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
677e7805 1416 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
093bc2cd 1417}
fea617c5
MAL
1418
1419void memory_region_init_ram_from_fd(MemoryRegion *mr,
1420 struct Object *owner,
1421 const char *name,
1422 uint64_t size,
1423 bool share,
1424 int fd,
1425 Error **errp)
1426{
1427 memory_region_init(mr, owner, name, size);
1428 mr->ram = true;
1429 mr->terminates = true;
1430 mr->destructor = memory_region_destructor_ram;
1431 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1432 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1433}
0b183fc8 1434#endif
093bc2cd
AK
1435
1436void memory_region_init_ram_ptr(MemoryRegion *mr,
2c9b15ca 1437 Object *owner,
093bc2cd
AK
1438 const char *name,
1439 uint64_t size,
1440 void *ptr)
1441{
2c9b15ca 1442 memory_region_init(mr, owner, name, size);
8ea9252a 1443 mr->ram = true;
14a3c10a 1444 mr->terminates = true;
fc3e7665 1445 mr->destructor = memory_region_destructor_ram;
677e7805 1446 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
ef701d7b
HT
1447
1448 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1449 assert(ptr != NULL);
8e41fb63 1450 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
093bc2cd
AK
1451}
1452
21e00fa5
AW
1453void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1454 Object *owner,
1455 const char *name,
1456 uint64_t size,
1457 void *ptr)
e4dc3f59 1458{
21e00fa5
AW
1459 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1460 mr->ram_device = true;
4a2e242b
AW
1461 mr->ops = &ram_device_mem_ops;
1462 mr->opaque = mr;
e4dc3f59
ND
1463}
1464
093bc2cd 1465void memory_region_init_alias(MemoryRegion *mr,
2c9b15ca 1466 Object *owner,
093bc2cd
AK
1467 const char *name,
1468 MemoryRegion *orig,
a8170e5e 1469 hwaddr offset,
093bc2cd
AK
1470 uint64_t size)
1471{
2c9b15ca 1472 memory_region_init(mr, owner, name, size);
093bc2cd
AK
1473 mr->alias = orig;
1474 mr->alias_offset = offset;
1475}
1476
b59821a9
PM
1477void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1478 struct Object *owner,
1479 const char *name,
1480 uint64_t size,
1481 Error **errp)
a1777f7f
PM
1482{
1483 memory_region_init(mr, owner, name, size);
1484 mr->ram = true;
1485 mr->readonly = true;
1486 mr->terminates = true;
1487 mr->destructor = memory_region_destructor_ram;
1488 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1489 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1490}
1491
b59821a9
PM
1492void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1493 Object *owner,
1494 const MemoryRegionOps *ops,
1495 void *opaque,
1496 const char *name,
1497 uint64_t size,
1498 Error **errp)
d0a9b5bc 1499{
39e0b03d 1500 assert(ops);
2c9b15ca 1501 memory_region_init(mr, owner, name, size);
7bc2b9cd 1502 mr->ops = ops;
75f5941c 1503 mr->opaque = opaque;
d0a9b5bc 1504 mr->terminates = true;
75c578dc 1505 mr->rom_device = true;
58268c8d 1506 mr->destructor = memory_region_destructor_ram;
8e41fb63 1507 mr->ram_block = qemu_ram_alloc(size, mr, errp);
d0a9b5bc
AK
1508}
1509
1221a474
AK
1510void memory_region_init_iommu(void *_iommu_mr,
1511 size_t instance_size,
1512 const char *mrtypename,
2c9b15ca 1513 Object *owner,
30951157
AK
1514 const char *name,
1515 uint64_t size)
1516{
1221a474 1517 struct IOMMUMemoryRegion *iommu_mr;
3df9d748
AK
1518 struct MemoryRegion *mr;
1519
1221a474
AK
1520 object_initialize(_iommu_mr, instance_size, mrtypename);
1521 mr = MEMORY_REGION(_iommu_mr);
3df9d748
AK
1522 memory_region_do_init(mr, owner, name, size);
1523 iommu_mr = IOMMU_MEMORY_REGION(mr);
30951157 1524 mr->terminates = true; /* then re-forwards */
3df9d748
AK
1525 QLIST_INIT(&iommu_mr->iommu_notify);
1526 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
30951157
AK
1527}
1528
b4fefef9 1529static void memory_region_finalize(Object *obj)
093bc2cd 1530{
b4fefef9
PC
1531 MemoryRegion *mr = MEMORY_REGION(obj);
1532
2e2b8eb7
PB
1533 assert(!mr->container);
1534
1535 /* We know the region is not visible in any address space (it
1536 * does not have a container and cannot be a root either because
1537 * it has no references, so we can blindly clear mr->enabled.
1538 * memory_region_set_enabled instead could trigger a transaction
1539 * and cause an infinite loop.
1540 */
1541 mr->enabled = false;
1542 memory_region_transaction_begin();
1543 while (!QTAILQ_EMPTY(&mr->subregions)) {
1544 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1545 memory_region_del_subregion(mr, subregion);
1546 }
1547 memory_region_transaction_commit();
1548
545e92e0 1549 mr->destructor(mr);
093bc2cd 1550 memory_region_clear_coalescing(mr);
302fa283 1551 g_free((char *)mr->name);
7267c094 1552 g_free(mr->ioeventfds);
093bc2cd
AK
1553}
1554
803c0816
PB
1555Object *memory_region_owner(MemoryRegion *mr)
1556{
22a893e4
PB
1557 Object *obj = OBJECT(mr);
1558 return obj->parent;
803c0816
PB
1559}
1560
46637be2
PB
1561void memory_region_ref(MemoryRegion *mr)
1562{
22a893e4
PB
1563 /* MMIO callbacks most likely will access data that belongs
1564 * to the owner, hence the need to ref/unref the owner whenever
1565 * the memory region is in use.
1566 *
1567 * The memory region is a child of its owner. As long as the
1568 * owner doesn't call unparent itself on the memory region,
1569 * ref-ing the owner will also keep the memory region alive.
612263cf
PB
1570 * Memory regions without an owner are supposed to never go away;
1571 * we do not ref/unref them because it slows down DMA sensibly.
22a893e4 1572 */
612263cf
PB
1573 if (mr && mr->owner) {
1574 object_ref(mr->owner);
46637be2
PB
1575 }
1576}
1577
1578void memory_region_unref(MemoryRegion *mr)
1579{
612263cf
PB
1580 if (mr && mr->owner) {
1581 object_unref(mr->owner);
46637be2
PB
1582 }
1583}
1584
093bc2cd
AK
1585uint64_t memory_region_size(MemoryRegion *mr)
1586{
08dafab4
AK
1587 if (int128_eq(mr->size, int128_2_64())) {
1588 return UINT64_MAX;
1589 }
1590 return int128_get64(mr->size);
093bc2cd
AK
1591}
1592
5d546d4b 1593const char *memory_region_name(const MemoryRegion *mr)
8991c79b 1594{
d1dd32af
PC
1595 if (!mr->name) {
1596 ((MemoryRegion *)mr)->name =
1597 object_get_canonical_path_component(OBJECT(mr));
1598 }
302fa283 1599 return mr->name;
8991c79b
AK
1600}
1601
21e00fa5 1602bool memory_region_is_ram_device(MemoryRegion *mr)
e4dc3f59 1603{
21e00fa5 1604 return mr->ram_device;
e4dc3f59
ND
1605}
1606
2d1a35be 1607uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
55043ba3 1608{
6f6a5ef3 1609 uint8_t mask = mr->dirty_log_mask;
adaad61c 1610 if (global_dirty_log && mr->ram_block) {
6f6a5ef3
PB
1611 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1612 }
1613 return mask;
55043ba3
AK
1614}
1615
2d1a35be
PB
1616bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1617{
1618 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1619}
1620
3df9d748 1621static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
5bf3d319
PX
1622{
1623 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1624 IOMMUNotifier *iommu_notifier;
1221a474 1625 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
5bf3d319 1626
3df9d748 1627 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
5bf3d319
PX
1628 flags |= iommu_notifier->notifier_flags;
1629 }
1630
1221a474
AK
1631 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1632 imrc->notify_flag_changed(iommu_mr,
1633 iommu_mr->iommu_notify_flags,
1634 flags);
5bf3d319
PX
1635 }
1636
3df9d748 1637 iommu_mr->iommu_notify_flags = flags;
5bf3d319
PX
1638}
1639
cdb30812
PX
1640void memory_region_register_iommu_notifier(MemoryRegion *mr,
1641 IOMMUNotifier *n)
06866575 1642{
3df9d748
AK
1643 IOMMUMemoryRegion *iommu_mr;
1644
efcd38c5
JW
1645 if (mr->alias) {
1646 memory_region_register_iommu_notifier(mr->alias, n);
1647 return;
1648 }
1649
cdb30812 1650 /* We need to register for at least one bitfield */
3df9d748 1651 iommu_mr = IOMMU_MEMORY_REGION(mr);
cdb30812 1652 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
698feb5e 1653 assert(n->start <= n->end);
3df9d748
AK
1654 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1655 memory_region_update_iommu_notify_flags(iommu_mr);
06866575
DG
1656}
1657
3df9d748 1658uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
a788f227 1659{
1221a474
AK
1660 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1661
1662 if (imrc->get_min_page_size) {
1663 return imrc->get_min_page_size(iommu_mr);
f682e9c2
AK
1664 }
1665 return TARGET_PAGE_SIZE;
1666}
1667
3df9d748 1668void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
f682e9c2 1669{
3df9d748 1670 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1221a474 1671 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
f682e9c2 1672 hwaddr addr, granularity;
a788f227
DG
1673 IOMMUTLBEntry iotlb;
1674
faa362e3 1675 /* If the IOMMU has its own replay callback, override */
1221a474
AK
1676 if (imrc->replay) {
1677 imrc->replay(iommu_mr, n);
faa362e3
PX
1678 return;
1679 }
1680
3df9d748 1681 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
f682e9c2 1682
a788f227 1683 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1221a474 1684 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
a788f227
DG
1685 if (iotlb.perm != IOMMU_NONE) {
1686 n->notify(n, &iotlb);
1687 }
1688
1689 /* if (2^64 - MR size) < granularity, it's possible to get an
1690 * infinite loop here. This should catch such a wraparound */
1691 if ((addr + granularity) < addr) {
1692 break;
1693 }
1694 }
1695}
1696
3df9d748 1697void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
de472e4a
PX
1698{
1699 IOMMUNotifier *notifier;
1700
3df9d748
AK
1701 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1702 memory_region_iommu_replay(iommu_mr, notifier);
de472e4a
PX
1703 }
1704}
1705
cdb30812
PX
1706void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1707 IOMMUNotifier *n)
06866575 1708{
3df9d748
AK
1709 IOMMUMemoryRegion *iommu_mr;
1710
efcd38c5
JW
1711 if (mr->alias) {
1712 memory_region_unregister_iommu_notifier(mr->alias, n);
1713 return;
1714 }
cdb30812 1715 QLIST_REMOVE(n, node);
3df9d748
AK
1716 iommu_mr = IOMMU_MEMORY_REGION(mr);
1717 memory_region_update_iommu_notify_flags(iommu_mr);
06866575
DG
1718}
1719
bd2bfa4c
PX
1720void memory_region_notify_one(IOMMUNotifier *notifier,
1721 IOMMUTLBEntry *entry)
06866575 1722{
cdb30812
PX
1723 IOMMUNotifierFlag request_flags;
1724
bd2bfa4c
PX
1725 /*
1726 * Skip the notification if the notification does not overlap
1727 * with registered range.
1728 */
1729 if (notifier->start > entry->iova + entry->addr_mask + 1 ||
1730 notifier->end < entry->iova) {
1731 return;
1732 }
cdb30812 1733
bd2bfa4c 1734 if (entry->perm & IOMMU_RW) {
cdb30812
PX
1735 request_flags = IOMMU_NOTIFIER_MAP;
1736 } else {
1737 request_flags = IOMMU_NOTIFIER_UNMAP;
1738 }
1739
bd2bfa4c
PX
1740 if (notifier->notifier_flags & request_flags) {
1741 notifier->notify(notifier, entry);
1742 }
1743}
1744
3df9d748 1745void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
bd2bfa4c
PX
1746 IOMMUTLBEntry entry)
1747{
1748 IOMMUNotifier *iommu_notifier;
1749
3df9d748 1750 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
bd2bfa4c 1751
3df9d748 1752 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
bd2bfa4c 1753 memory_region_notify_one(iommu_notifier, &entry);
cdb30812 1754 }
06866575
DG
1755}
1756
093bc2cd
AK
1757void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1758{
5a583347 1759 uint8_t mask = 1 << client;
deb809ed 1760 uint8_t old_logging;
5a583347 1761
dbddac6d 1762 assert(client == DIRTY_MEMORY_VGA);
deb809ed
PB
1763 old_logging = mr->vga_logging_count;
1764 mr->vga_logging_count += log ? 1 : -1;
1765 if (!!old_logging == !!mr->vga_logging_count) {
1766 return;
1767 }
1768
59023ef4 1769 memory_region_transaction_begin();
5a583347 1770 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
22bde714 1771 memory_region_update_pending |= mr->enabled;
59023ef4 1772 memory_region_transaction_commit();
093bc2cd
AK
1773}
1774
a8170e5e
AK
1775bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1776 hwaddr size, unsigned client)
093bc2cd 1777{
8e41fb63
FZ
1778 assert(mr->ram_block);
1779 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1780 size, client);
093bc2cd
AK
1781}
1782
a8170e5e
AK
1783void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1784 hwaddr size)
093bc2cd 1785{
8e41fb63
FZ
1786 assert(mr->ram_block);
1787 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1788 size,
58d2707e 1789 memory_region_get_dirty_log_mask(mr));
093bc2cd
AK
1790}
1791
6c279db8
JQ
1792bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1793 hwaddr size, unsigned client)
1794{
8e41fb63
FZ
1795 assert(mr->ram_block);
1796 return cpu_physical_memory_test_and_clear_dirty(
1797 memory_region_get_ram_addr(mr) + addr, size, client);
6c279db8
JQ
1798}
1799
8deaf12c
GH
1800DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1801 hwaddr addr,
1802 hwaddr size,
1803 unsigned client)
1804{
1805 assert(mr->ram_block);
1806 return cpu_physical_memory_snapshot_and_clear_dirty(
1807 memory_region_get_ram_addr(mr) + addr, size, client);
1808}
1809
1810bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1811 hwaddr addr, hwaddr size)
1812{
1813 assert(mr->ram_block);
1814 return cpu_physical_memory_snapshot_get_dirty(snap,
1815 memory_region_get_ram_addr(mr) + addr, size);
1816}
6c279db8 1817
093bc2cd
AK
1818void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1819{
0a752eee 1820 MemoryListener *listener;
0d673e36 1821 AddressSpace *as;
0a752eee 1822 FlatView *view;
5a583347
AK
1823 FlatRange *fr;
1824
0a752eee
PB
1825 /* If the same address space has multiple log_sync listeners, we
1826 * visit that address space's FlatView multiple times. But because
1827 * log_sync listeners are rare, it's still cheaper than walking each
1828 * address space once.
1829 */
1830 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1831 if (!listener->log_sync) {
1832 continue;
1833 }
1834 as = listener->address_space;
1835 view = address_space_get_flatview(as);
99e86347 1836 FOR_EACH_FLAT_RANGE(fr, view) {
0d673e36 1837 if (fr->mr == mr) {
0a752eee
PB
1838 MemoryRegionSection mrs = section_from_flat_range(fr, as);
1839 listener->log_sync(listener, &mrs);
0d673e36 1840 }
5a583347 1841 }
856d7245 1842 flatview_unref(view);
5a583347 1843 }
093bc2cd
AK
1844}
1845
1846void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1847{
fb1cd6f9 1848 if (mr->readonly != readonly) {
59023ef4 1849 memory_region_transaction_begin();
fb1cd6f9 1850 mr->readonly = readonly;
22bde714 1851 memory_region_update_pending |= mr->enabled;
59023ef4 1852 memory_region_transaction_commit();
fb1cd6f9 1853 }
093bc2cd
AK
1854}
1855
5f9a5ea1 1856void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
d0a9b5bc 1857{
5f9a5ea1 1858 if (mr->romd_mode != romd_mode) {
59023ef4 1859 memory_region_transaction_begin();
5f9a5ea1 1860 mr->romd_mode = romd_mode;
22bde714 1861 memory_region_update_pending |= mr->enabled;
59023ef4 1862 memory_region_transaction_commit();
d0a9b5bc
AK
1863 }
1864}
1865
a8170e5e
AK
1866void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1867 hwaddr size, unsigned client)
093bc2cd 1868{
8e41fb63
FZ
1869 assert(mr->ram_block);
1870 cpu_physical_memory_test_and_clear_dirty(
1871 memory_region_get_ram_addr(mr) + addr, size, client);
093bc2cd
AK
1872}
1873
a35ba7be
PB
1874int memory_region_get_fd(MemoryRegion *mr)
1875{
4ff87573
PB
1876 int fd;
1877
1878 rcu_read_lock();
1879 while (mr->alias) {
1880 mr = mr->alias;
a35ba7be 1881 }
4ff87573
PB
1882 fd = mr->ram_block->fd;
1883 rcu_read_unlock();
a35ba7be 1884
4ff87573
PB
1885 return fd;
1886}
a35ba7be 1887
093bc2cd
AK
1888void *memory_region_get_ram_ptr(MemoryRegion *mr)
1889{
49b24afc
PB
1890 void *ptr;
1891 uint64_t offset = 0;
093bc2cd 1892
49b24afc
PB
1893 rcu_read_lock();
1894 while (mr->alias) {
1895 offset += mr->alias_offset;
1896 mr = mr->alias;
1897 }
8e41fb63 1898 assert(mr->ram_block);
0878d0e1 1899 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
49b24afc 1900 rcu_read_unlock();
093bc2cd 1901
0878d0e1 1902 return ptr;
093bc2cd
AK
1903}
1904
07bdaa41
PB
1905MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
1906{
1907 RAMBlock *block;
1908
1909 block = qemu_ram_block_from_host(ptr, false, offset);
1910 if (!block) {
1911 return NULL;
1912 }
1913
1914 return block->mr;
1915}
1916
7ebb2745
FZ
1917ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1918{
1919 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1920}
1921
37d7c084
PB
1922void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1923{
8e41fb63 1924 assert(mr->ram_block);
37d7c084 1925
fa53a0e5 1926 qemu_ram_resize(mr->ram_block, newsize, errp);
37d7c084
PB
1927}
1928
0d673e36 1929static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
093bc2cd 1930{
99e86347 1931 FlatView *view;
093bc2cd
AK
1932 FlatRange *fr;
1933 CoalescedMemoryRange *cmr;
1934 AddrRange tmp;
95d2994a 1935 MemoryRegionSection section;
093bc2cd 1936
856d7245 1937 view = address_space_get_flatview(as);
99e86347 1938 FOR_EACH_FLAT_RANGE(fr, view) {
093bc2cd 1939 if (fr->mr == mr) {
95d2994a 1940 section = (MemoryRegionSection) {
f6790af6 1941 .address_space = as,
95d2994a 1942 .offset_within_address_space = int128_get64(fr->addr.start),
052e87b0 1943 .size = fr->addr.size,
95d2994a
AK
1944 };
1945
9a54635d 1946 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
95d2994a
AK
1947 int128_get64(fr->addr.start),
1948 int128_get64(fr->addr.size));
093bc2cd
AK
1949 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1950 tmp = addrrange_shift(cmr->addr,
08dafab4
AK
1951 int128_sub(fr->addr.start,
1952 int128_make64(fr->offset_in_region)));
093bc2cd
AK
1953 if (!addrrange_intersects(tmp, fr->addr)) {
1954 continue;
1955 }
1956 tmp = addrrange_intersection(tmp, fr->addr);
9a54635d 1957 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
95d2994a
AK
1958 int128_get64(tmp.start),
1959 int128_get64(tmp.size));
093bc2cd
AK
1960 }
1961 }
1962 }
856d7245 1963 flatview_unref(view);
093bc2cd
AK
1964}
1965
0d673e36
AK
1966static void memory_region_update_coalesced_range(MemoryRegion *mr)
1967{
1968 AddressSpace *as;
1969
1970 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1971 memory_region_update_coalesced_range_as(mr, as);
1972 }
1973}
1974
093bc2cd
AK
1975void memory_region_set_coalescing(MemoryRegion *mr)
1976{
1977 memory_region_clear_coalescing(mr);
08dafab4 1978 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
093bc2cd
AK
1979}
1980
1981void memory_region_add_coalescing(MemoryRegion *mr,
a8170e5e 1982 hwaddr offset,
093bc2cd
AK
1983 uint64_t size)
1984{
7267c094 1985 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
093bc2cd 1986
08dafab4 1987 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
093bc2cd
AK
1988 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1989 memory_region_update_coalesced_range(mr);
d410515e 1990 memory_region_set_flush_coalesced(mr);
093bc2cd
AK
1991}
1992
1993void memory_region_clear_coalescing(MemoryRegion *mr)
1994{
1995 CoalescedMemoryRange *cmr;
ab5b3db5 1996 bool updated = false;
093bc2cd 1997
d410515e
JK
1998 qemu_flush_coalesced_mmio_buffer();
1999 mr->flush_coalesced_mmio = false;
2000
093bc2cd
AK
2001 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2002 cmr = QTAILQ_FIRST(&mr->coalesced);
2003 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
7267c094 2004 g_free(cmr);
ab5b3db5
FZ
2005 updated = true;
2006 }
2007
2008 if (updated) {
2009 memory_region_update_coalesced_range(mr);
093bc2cd 2010 }
093bc2cd
AK
2011}
2012
d410515e
JK
2013void memory_region_set_flush_coalesced(MemoryRegion *mr)
2014{
2015 mr->flush_coalesced_mmio = true;
2016}
2017
2018void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2019{
2020 qemu_flush_coalesced_mmio_buffer();
2021 if (QTAILQ_EMPTY(&mr->coalesced)) {
2022 mr->flush_coalesced_mmio = false;
2023 }
2024}
2025
196ea131
JK
2026void memory_region_set_global_locking(MemoryRegion *mr)
2027{
2028 mr->global_locking = true;
2029}
2030
2031void memory_region_clear_global_locking(MemoryRegion *mr)
2032{
2033 mr->global_locking = false;
2034}
2035
8c56c1a5
PF
2036static bool userspace_eventfd_warning;
2037
3e9d69e7 2038void memory_region_add_eventfd(MemoryRegion *mr,
a8170e5e 2039 hwaddr addr,
3e9d69e7
AK
2040 unsigned size,
2041 bool match_data,
2042 uint64_t data,
753d5e14 2043 EventNotifier *e)
3e9d69e7
AK
2044{
2045 MemoryRegionIoeventfd mrfd = {
08dafab4
AK
2046 .addr.start = int128_make64(addr),
2047 .addr.size = int128_make64(size),
3e9d69e7
AK
2048 .match_data = match_data,
2049 .data = data,
753d5e14 2050 .e = e,
3e9d69e7
AK
2051 };
2052 unsigned i;
2053
8c56c1a5
PF
2054 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2055 userspace_eventfd_warning))) {
2056 userspace_eventfd_warning = true;
2057 error_report("Using eventfd without MMIO binding in KVM. "
2058 "Suboptimal performance expected");
2059 }
2060
b8aecea2
JW
2061 if (size) {
2062 adjust_endianness(mr, &mrfd.data, size);
2063 }
59023ef4 2064 memory_region_transaction_begin();
3e9d69e7
AK
2065 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2066 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2067 break;
2068 }
2069 }
2070 ++mr->ioeventfd_nb;
7267c094 2071 mr->ioeventfds = g_realloc(mr->ioeventfds,
3e9d69e7
AK
2072 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2073 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2074 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2075 mr->ioeventfds[i] = mrfd;
4dc56152 2076 ioeventfd_update_pending |= mr->enabled;
59023ef4 2077 memory_region_transaction_commit();
3e9d69e7
AK
2078}
2079
2080void memory_region_del_eventfd(MemoryRegion *mr,
a8170e5e 2081 hwaddr addr,
3e9d69e7
AK
2082 unsigned size,
2083 bool match_data,
2084 uint64_t data,
753d5e14 2085 EventNotifier *e)
3e9d69e7
AK
2086{
2087 MemoryRegionIoeventfd mrfd = {
08dafab4
AK
2088 .addr.start = int128_make64(addr),
2089 .addr.size = int128_make64(size),
3e9d69e7
AK
2090 .match_data = match_data,
2091 .data = data,
753d5e14 2092 .e = e,
3e9d69e7
AK
2093 };
2094 unsigned i;
2095
b8aecea2
JW
2096 if (size) {
2097 adjust_endianness(mr, &mrfd.data, size);
2098 }
59023ef4 2099 memory_region_transaction_begin();
3e9d69e7
AK
2100 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2101 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2102 break;
2103 }
2104 }
2105 assert(i != mr->ioeventfd_nb);
2106 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2107 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2108 --mr->ioeventfd_nb;
7267c094 2109 mr->ioeventfds = g_realloc(mr->ioeventfds,
3e9d69e7 2110 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
4dc56152 2111 ioeventfd_update_pending |= mr->enabled;
59023ef4 2112 memory_region_transaction_commit();
3e9d69e7
AK
2113}
2114
feca4ac1 2115static void memory_region_update_container_subregions(MemoryRegion *subregion)
093bc2cd 2116{
feca4ac1 2117 MemoryRegion *mr = subregion->container;
093bc2cd
AK
2118 MemoryRegion *other;
2119
59023ef4
JK
2120 memory_region_transaction_begin();
2121
dfde4e6e 2122 memory_region_ref(subregion);
093bc2cd
AK
2123 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2124 if (subregion->priority >= other->priority) {
2125 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2126 goto done;
2127 }
2128 }
2129 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2130done:
22bde714 2131 memory_region_update_pending |= mr->enabled && subregion->enabled;
59023ef4 2132 memory_region_transaction_commit();
093bc2cd
AK
2133}
2134
0598701a
PC
2135static void memory_region_add_subregion_common(MemoryRegion *mr,
2136 hwaddr offset,
2137 MemoryRegion *subregion)
2138{
feca4ac1
PB
2139 assert(!subregion->container);
2140 subregion->container = mr;
0598701a 2141 subregion->addr = offset;
feca4ac1 2142 memory_region_update_container_subregions(subregion);
0598701a 2143}
093bc2cd
AK
2144
2145void memory_region_add_subregion(MemoryRegion *mr,
a8170e5e 2146 hwaddr offset,
093bc2cd
AK
2147 MemoryRegion *subregion)
2148{
093bc2cd
AK
2149 subregion->priority = 0;
2150 memory_region_add_subregion_common(mr, offset, subregion);
2151}
2152
2153void memory_region_add_subregion_overlap(MemoryRegion *mr,
a8170e5e 2154 hwaddr offset,
093bc2cd 2155 MemoryRegion *subregion,
a1ff8ae0 2156 int priority)
093bc2cd 2157{
093bc2cd
AK
2158 subregion->priority = priority;
2159 memory_region_add_subregion_common(mr, offset, subregion);
2160}
2161
2162void memory_region_del_subregion(MemoryRegion *mr,
2163 MemoryRegion *subregion)
2164{
59023ef4 2165 memory_region_transaction_begin();
feca4ac1
PB
2166 assert(subregion->container == mr);
2167 subregion->container = NULL;
093bc2cd 2168 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
dfde4e6e 2169 memory_region_unref(subregion);
22bde714 2170 memory_region_update_pending |= mr->enabled && subregion->enabled;
59023ef4 2171 memory_region_transaction_commit();
6bba19ba
AK
2172}
2173
2174void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2175{
2176 if (enabled == mr->enabled) {
2177 return;
2178 }
59023ef4 2179 memory_region_transaction_begin();
6bba19ba 2180 mr->enabled = enabled;
22bde714 2181 memory_region_update_pending = true;
59023ef4 2182 memory_region_transaction_commit();
093bc2cd 2183}
1c0ffa58 2184
e7af4c67
MT
2185void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2186{
2187 Int128 s = int128_make64(size);
2188
2189 if (size == UINT64_MAX) {
2190 s = int128_2_64();
2191 }
2192 if (int128_eq(s, mr->size)) {
2193 return;
2194 }
2195 memory_region_transaction_begin();
2196 mr->size = s;
2197 memory_region_update_pending = true;
2198 memory_region_transaction_commit();
2199}
2200
67891b8a 2201static void memory_region_readd_subregion(MemoryRegion *mr)
2282e1af 2202{
feca4ac1 2203 MemoryRegion *container = mr->container;
2282e1af 2204
feca4ac1 2205 if (container) {
67891b8a
PC
2206 memory_region_transaction_begin();
2207 memory_region_ref(mr);
feca4ac1
PB
2208 memory_region_del_subregion(container, mr);
2209 mr->container = container;
2210 memory_region_update_container_subregions(mr);
67891b8a
PC
2211 memory_region_unref(mr);
2212 memory_region_transaction_commit();
2282e1af 2213 }
67891b8a 2214}
2282e1af 2215
67891b8a
PC
2216void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2217{
2218 if (addr != mr->addr) {
2219 mr->addr = addr;
2220 memory_region_readd_subregion(mr);
2221 }
2282e1af
AK
2222}
2223
a8170e5e 2224void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
4703359e 2225{
4703359e 2226 assert(mr->alias);
4703359e 2227
59023ef4 2228 if (offset == mr->alias_offset) {
4703359e
AK
2229 return;
2230 }
2231
59023ef4
JK
2232 memory_region_transaction_begin();
2233 mr->alias_offset = offset;
22bde714 2234 memory_region_update_pending |= mr->enabled;
59023ef4 2235 memory_region_transaction_commit();
4703359e
AK
2236}
2237
a2b257d6
IM
2238uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2239{
2240 return mr->align;
2241}
2242
e2177955
AK
2243static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2244{
2245 const AddrRange *addr = addr_;
2246 const FlatRange *fr = fr_;
2247
2248 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2249 return -1;
2250 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2251 return 1;
2252 }
2253 return 0;
2254}
2255
99e86347 2256static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
e2177955 2257{
99e86347 2258 return bsearch(&addr, view->ranges, view->nr,
e2177955
AK
2259 sizeof(FlatRange), cmp_flatrange_addr);
2260}
2261
eed2bacf
IM
2262bool memory_region_is_mapped(MemoryRegion *mr)
2263{
2264 return mr->container ? true : false;
2265}
2266
c6742b14
PB
2267/* Same as memory_region_find, but it does not add a reference to the
2268 * returned region. It must be called from an RCU critical section.
2269 */
2270static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2271 hwaddr addr, uint64_t size)
e2177955 2272{
052e87b0 2273 MemoryRegionSection ret = { .mr = NULL };
73034e9e
PB
2274 MemoryRegion *root;
2275 AddressSpace *as;
2276 AddrRange range;
99e86347 2277 FlatView *view;
73034e9e
PB
2278 FlatRange *fr;
2279
2280 addr += mr->addr;
feca4ac1
PB
2281 for (root = mr; root->container; ) {
2282 root = root->container;
73034e9e
PB
2283 addr += root->addr;
2284 }
e2177955 2285
73034e9e 2286 as = memory_region_to_address_space(root);
eed2bacf
IM
2287 if (!as) {
2288 return ret;
2289 }
73034e9e 2290 range = addrrange_make(int128_make64(addr), int128_make64(size));
99e86347 2291
2b647668 2292 view = atomic_rcu_read(&as->current_map);
99e86347 2293 fr = flatview_lookup(view, range);
e2177955 2294 if (!fr) {
c6742b14 2295 return ret;
e2177955
AK
2296 }
2297
99e86347 2298 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
e2177955
AK
2299 --fr;
2300 }
2301
2302 ret.mr = fr->mr;
73034e9e 2303 ret.address_space = as;
e2177955
AK
2304 range = addrrange_intersection(range, fr->addr);
2305 ret.offset_within_region = fr->offset_in_region;
2306 ret.offset_within_region += int128_get64(int128_sub(range.start,
2307 fr->addr.start));
052e87b0 2308 ret.size = range.size;
e2177955 2309 ret.offset_within_address_space = int128_get64(range.start);
7a8499e8 2310 ret.readonly = fr->readonly;
c6742b14
PB
2311 return ret;
2312}
2313
2314MemoryRegionSection memory_region_find(MemoryRegion *mr,
2315 hwaddr addr, uint64_t size)
2316{
2317 MemoryRegionSection ret;
2318 rcu_read_lock();
2319 ret = memory_region_find_rcu(mr, addr, size);
2320 if (ret.mr) {
2321 memory_region_ref(ret.mr);
2322 }
2b647668 2323 rcu_read_unlock();
e2177955
AK
2324 return ret;
2325}
2326
c6742b14
PB
2327bool memory_region_present(MemoryRegion *container, hwaddr addr)
2328{
2329 MemoryRegion *mr;
2330
2331 rcu_read_lock();
2332 mr = memory_region_find_rcu(container, addr, 1).mr;
2333 rcu_read_unlock();
2334 return mr && mr != container;
2335}
2336
9c1f8f44 2337void memory_global_dirty_log_sync(void)
86e775c6 2338{
9c1f8f44
PB
2339 MemoryListener *listener;
2340 AddressSpace *as;
99e86347 2341 FlatView *view;
7664e80c
AK
2342 FlatRange *fr;
2343
9c1f8f44
PB
2344 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2345 if (!listener->log_sync) {
2346 continue;
2347 }
d45fa784 2348 as = listener->address_space;
9c1f8f44
PB
2349 view = address_space_get_flatview(as);
2350 FOR_EACH_FLAT_RANGE(fr, view) {
adaad61c
PB
2351 if (fr->dirty_log_mask) {
2352 MemoryRegionSection mrs = section_from_flat_range(fr, as);
2353 listener->log_sync(listener, &mrs);
2354 }
9c1f8f44
PB
2355 }
2356 flatview_unref(view);
7664e80c
AK
2357 }
2358}
2359
2360void memory_global_dirty_log_start(void)
2361{
7664e80c 2362 global_dirty_log = true;
6f6a5ef3 2363
7376e582 2364 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
6f6a5ef3
PB
2365
2366 /* Refresh DIRTY_LOG_MIGRATION bit. */
2367 memory_region_transaction_begin();
2368 memory_region_update_pending = true;
2369 memory_region_transaction_commit();
7664e80c
AK
2370}
2371
2372void memory_global_dirty_log_stop(void)
2373{
7664e80c 2374 global_dirty_log = false;
6f6a5ef3
PB
2375
2376 /* Refresh DIRTY_LOG_MIGRATION bit. */
2377 memory_region_transaction_begin();
2378 memory_region_update_pending = true;
2379 memory_region_transaction_commit();
2380
7376e582 2381 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
7664e80c
AK
2382}
2383
2384static void listener_add_address_space(MemoryListener *listener,
2385 AddressSpace *as)
2386{
99e86347 2387 FlatView *view;
7664e80c
AK
2388 FlatRange *fr;
2389
680a4783
PB
2390 if (listener->begin) {
2391 listener->begin(listener);
2392 }
7664e80c 2393 if (global_dirty_log) {
975aefe0
AK
2394 if (listener->log_global_start) {
2395 listener->log_global_start(listener);
2396 }
7664e80c 2397 }
975aefe0 2398
856d7245 2399 view = address_space_get_flatview(as);
99e86347 2400 FOR_EACH_FLAT_RANGE(fr, view) {
7664e80c
AK
2401 MemoryRegionSection section = {
2402 .mr = fr->mr,
f6790af6 2403 .address_space = as,
7664e80c 2404 .offset_within_region = fr->offset_in_region,
052e87b0 2405 .size = fr->addr.size,
7664e80c 2406 .offset_within_address_space = int128_get64(fr->addr.start),
7a8499e8 2407 .readonly = fr->readonly,
7664e80c 2408 };
680a4783
PB
2409 if (fr->dirty_log_mask && listener->log_start) {
2410 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2411 }
975aefe0
AK
2412 if (listener->region_add) {
2413 listener->region_add(listener, &section);
2414 }
7664e80c 2415 }
680a4783
PB
2416 if (listener->commit) {
2417 listener->commit(listener);
2418 }
856d7245 2419 flatview_unref(view);
7664e80c
AK
2420}
2421
d45fa784 2422void memory_listener_register(MemoryListener *listener, AddressSpace *as)
7664e80c 2423{
72e22d2f
AK
2424 MemoryListener *other = NULL;
2425
d45fa784 2426 listener->address_space = as;
72e22d2f
AK
2427 if (QTAILQ_EMPTY(&memory_listeners)
2428 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2429 memory_listeners)->priority) {
2430 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2431 } else {
2432 QTAILQ_FOREACH(other, &memory_listeners, link) {
2433 if (listener->priority < other->priority) {
2434 break;
2435 }
2436 }
2437 QTAILQ_INSERT_BEFORE(other, listener, link);
2438 }
0d673e36 2439
9a54635d
PB
2440 if (QTAILQ_EMPTY(&as->listeners)
2441 || listener->priority >= QTAILQ_LAST(&as->listeners,
2442 memory_listeners)->priority) {
2443 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2444 } else {
2445 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2446 if (listener->priority < other->priority) {
2447 break;
2448 }
2449 }
2450 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2451 }
2452
d45fa784 2453 listener_add_address_space(listener, as);
7664e80c
AK
2454}
2455
2456void memory_listener_unregister(MemoryListener *listener)
2457{
1d8280c1
PB
2458 if (!listener->address_space) {
2459 return;
2460 }
2461
72e22d2f 2462 QTAILQ_REMOVE(&memory_listeners, listener, link);
9a54635d 2463 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
1d8280c1 2464 listener->address_space = NULL;
86e775c6 2465}
e2177955 2466
c9356746
FK
2467bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2468{
2469 void *host;
2470 unsigned size = 0;
2471 unsigned offset = 0;
2472 Object *new_interface;
2473
2474 if (!mr || !mr->ops->request_ptr) {
2475 return false;
2476 }
2477
2478 /*
2479 * Avoid an update if the request_ptr call
2480 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2481 * a cache.
2482 */
2483 memory_region_transaction_begin();
2484
2485 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2486
2487 if (!host || !size) {
2488 memory_region_transaction_commit();
2489 return false;
2490 }
2491
2492 new_interface = object_new("mmio_interface");
2493 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2494 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2495 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2496 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2497 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2498 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2499
2500 memory_region_transaction_commit();
2501 return true;
2502}
2503
2504typedef struct MMIOPtrInvalidate {
2505 MemoryRegion *mr;
2506 hwaddr offset;
2507 unsigned size;
2508 int busy;
2509 int allocated;
2510} MMIOPtrInvalidate;
2511
2512#define MAX_MMIO_INVALIDATE 10
2513static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2514
2515static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2516 run_on_cpu_data data)
2517{
2518 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2519 MemoryRegion *mr = invalidate_data->mr;
2520 hwaddr offset = invalidate_data->offset;
2521 unsigned size = invalidate_data->size;
2522 MemoryRegionSection section = memory_region_find(mr, offset, size);
2523
2524 qemu_mutex_lock_iothread();
2525
2526 /* Reset dirty so this doesn't happen later. */
2527 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2528
2529 if (section.mr != mr) {
2530 /* memory_region_find add a ref on section.mr */
2531 memory_region_unref(section.mr);
2532 if (MMIO_INTERFACE(section.mr->owner)) {
2533 /* We found the interface just drop it. */
2534 object_property_set_bool(section.mr->owner, false, "realized",
2535 NULL);
2536 object_unref(section.mr->owner);
2537 object_unparent(section.mr->owner);
2538 }
2539 }
2540
2541 qemu_mutex_unlock_iothread();
2542
2543 if (invalidate_data->allocated) {
2544 g_free(invalidate_data);
2545 } else {
2546 invalidate_data->busy = 0;
2547 }
2548}
2549
2550void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2551 unsigned size)
2552{
2553 size_t i;
2554 MMIOPtrInvalidate *invalidate_data = NULL;
2555
2556 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2557 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2558 invalidate_data = &mmio_ptr_invalidate_list[i];
2559 break;
2560 }
2561 }
2562
2563 if (!invalidate_data) {
2564 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2565 invalidate_data->allocated = 1;
2566 }
2567
2568 invalidate_data->mr = mr;
2569 invalidate_data->offset = offset;
2570 invalidate_data->size = size;
2571
2572 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2573 RUN_ON_CPU_HOST_PTR(invalidate_data));
2574}
2575
7dca8043 2576void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
1c0ffa58 2577{
ac95190e 2578 memory_region_ref(root);
59023ef4 2579 memory_region_transaction_begin();
f0c02d15 2580 as->ref_count = 1;
8786db7c 2581 as->root = root;
f0c02d15 2582 as->malloced = false;
8786db7c
AK
2583 as->current_map = g_new(FlatView, 1);
2584 flatview_init(as->current_map);
4c19eb72
AK
2585 as->ioeventfd_nb = 0;
2586 as->ioeventfds = NULL;
9a54635d 2587 QTAILQ_INIT(&as->listeners);
0d673e36 2588 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
7dca8043 2589 as->name = g_strdup(name ? name : "anonymous");
ac1970fb 2590 address_space_init_dispatch(as);
f43793c7
PB
2591 memory_region_update_pending |= root->enabled;
2592 memory_region_transaction_commit();
1c0ffa58 2593}
658b2224 2594
374f2981 2595static void do_address_space_destroy(AddressSpace *as)
83f3c251 2596{
f0c02d15 2597 bool do_free = as->malloced;
078c44f4 2598
83f3c251 2599 address_space_destroy_dispatch(as);
9a54635d 2600 assert(QTAILQ_EMPTY(&as->listeners));
078c44f4 2601
856d7245 2602 flatview_unref(as->current_map);
7dca8043 2603 g_free(as->name);
4c19eb72 2604 g_free(as->ioeventfds);
ac95190e 2605 memory_region_unref(as->root);
f0c02d15
PC
2606 if (do_free) {
2607 g_free(as);
2608 }
2609}
2610
2611AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2612{
2613 AddressSpace *as;
2614
2615 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2616 if (root == as->root && as->malloced) {
2617 as->ref_count++;
2618 return as;
2619 }
2620 }
2621
2622 as = g_malloc0(sizeof *as);
2623 address_space_init(as, root, name);
2624 as->malloced = true;
2625 return as;
83f3c251
AK
2626}
2627
374f2981
PB
2628void address_space_destroy(AddressSpace *as)
2629{
ac95190e
PB
2630 MemoryRegion *root = as->root;
2631
f0c02d15
PC
2632 as->ref_count--;
2633 if (as->ref_count) {
2634 return;
2635 }
374f2981
PB
2636 /* Flush out anything from MemoryListeners listening in on this */
2637 memory_region_transaction_begin();
2638 as->root = NULL;
2639 memory_region_transaction_commit();
2640 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
6e48e8f9 2641 address_space_unregister(as);
374f2981
PB
2642
2643 /* At this point, as->dispatch and as->current_map are dummy
2644 * entries that the guest should never use. Wait for the old
2645 * values to expire before freeing the data.
2646 */
ac95190e 2647 as->root = root;
374f2981
PB
2648 call_rcu(as, do_address_space_destroy, rcu);
2649}
2650
4e831901
PX
2651static const char *memory_region_type(MemoryRegion *mr)
2652{
2653 if (memory_region_is_ram_device(mr)) {
2654 return "ramd";
2655 } else if (memory_region_is_romd(mr)) {
2656 return "romd";
2657 } else if (memory_region_is_rom(mr)) {
2658 return "rom";
2659 } else if (memory_region_is_ram(mr)) {
2660 return "ram";
2661 } else {
2662 return "i/o";
2663 }
2664}
2665
314e2987
BS
2666typedef struct MemoryRegionList MemoryRegionList;
2667
2668struct MemoryRegionList {
2669 const MemoryRegion *mr;
314e2987
BS
2670 QTAILQ_ENTRY(MemoryRegionList) queue;
2671};
2672
2673typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
2674
4e831901
PX
2675#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2676 int128_sub((size), int128_one())) : 0)
2677#define MTREE_INDENT " "
2678
314e2987
BS
2679static void mtree_print_mr(fprintf_function mon_printf, void *f,
2680 const MemoryRegion *mr, unsigned int level,
a8170e5e 2681 hwaddr base,
9479c57a 2682 MemoryRegionListHead *alias_print_queue)
314e2987 2683{
9479c57a
JK
2684 MemoryRegionList *new_ml, *ml, *next_ml;
2685 MemoryRegionListHead submr_print_queue;
314e2987
BS
2686 const MemoryRegion *submr;
2687 unsigned int i;
b31f8412 2688 hwaddr cur_start, cur_end;
314e2987 2689
f8a9f720 2690 if (!mr) {
314e2987
BS
2691 return;
2692 }
2693
2694 for (i = 0; i < level; i++) {
4e831901 2695 mon_printf(f, MTREE_INDENT);
314e2987
BS
2696 }
2697
b31f8412
PX
2698 cur_start = base + mr->addr;
2699 cur_end = cur_start + MR_SIZE(mr->size);
2700
2701 /*
2702 * Try to detect overflow of memory region. This should never
2703 * happen normally. When it happens, we dump something to warn the
2704 * user who is observing this.
2705 */
2706 if (cur_start < base || cur_end < cur_start) {
2707 mon_printf(f, "[DETECTED OVERFLOW!] ");
2708 }
2709
314e2987
BS
2710 if (mr->alias) {
2711 MemoryRegionList *ml;
2712 bool found = false;
2713
2714 /* check if the alias is already in the queue */
9479c57a 2715 QTAILQ_FOREACH(ml, alias_print_queue, queue) {
f54bb15f 2716 if (ml->mr == mr->alias) {
314e2987
BS
2717 found = true;
2718 }
2719 }
2720
2721 if (!found) {
2722 ml = g_new(MemoryRegionList, 1);
2723 ml->mr = mr->alias;
9479c57a 2724 QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
314e2987 2725 }
4896d74b 2726 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
4e831901 2727 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
f8a9f720 2728 "-" TARGET_FMT_plx "%s\n",
b31f8412 2729 cur_start, cur_end,
4b474ba7 2730 mr->priority,
4e831901 2731 memory_region_type((MemoryRegion *)mr),
3fb18b4d
PC
2732 memory_region_name(mr),
2733 memory_region_name(mr->alias),
314e2987 2734 mr->alias_offset,
4e831901 2735 mr->alias_offset + MR_SIZE(mr->size),
f8a9f720 2736 mr->enabled ? "" : " [disabled]");
314e2987 2737 } else {
4896d74b 2738 mon_printf(f,
4e831901 2739 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
b31f8412 2740 cur_start, cur_end,
4b474ba7 2741 mr->priority,
4e831901 2742 memory_region_type((MemoryRegion *)mr),
f8a9f720
GH
2743 memory_region_name(mr),
2744 mr->enabled ? "" : " [disabled]");
314e2987 2745 }
9479c57a
JK
2746
2747 QTAILQ_INIT(&submr_print_queue);
2748
314e2987 2749 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
9479c57a
JK
2750 new_ml = g_new(MemoryRegionList, 1);
2751 new_ml->mr = submr;
2752 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2753 if (new_ml->mr->addr < ml->mr->addr ||
2754 (new_ml->mr->addr == ml->mr->addr &&
2755 new_ml->mr->priority > ml->mr->priority)) {
2756 QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
2757 new_ml = NULL;
2758 break;
2759 }
2760 }
2761 if (new_ml) {
2762 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
2763 }
2764 }
2765
2766 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
b31f8412 2767 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
9479c57a
JK
2768 alias_print_queue);
2769 }
2770
88365e47 2771 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
9479c57a 2772 g_free(ml);
314e2987
BS
2773 }
2774}
2775
57bb40c9
PX
2776static void mtree_print_flatview(fprintf_function p, void *f,
2777 AddressSpace *as)
2778{
2779 FlatView *view = address_space_get_flatview(as);
2780 FlatRange *range = &view->ranges[0];
2781 MemoryRegion *mr;
2782 int n = view->nr;
2783
2784 if (n <= 0) {
2785 p(f, MTREE_INDENT "No rendered FlatView for "
2786 "address space '%s'\n", as->name);
2787 flatview_unref(view);
2788 return;
2789 }
2790
2791 while (n--) {
2792 mr = range->mr;
377a07aa
PB
2793 if (range->offset_in_region) {
2794 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2795 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2796 int128_get64(range->addr.start),
2797 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2798 mr->priority,
2799 range->readonly ? "rom" : memory_region_type(mr),
2800 memory_region_name(mr),
2801 range->offset_in_region);
2802 } else {
2803 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2804 TARGET_FMT_plx " (prio %d, %s): %s\n",
2805 int128_get64(range->addr.start),
2806 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2807 mr->priority,
2808 range->readonly ? "rom" : memory_region_type(mr),
2809 memory_region_name(mr));
2810 }
57bb40c9
PX
2811 range++;
2812 }
2813
2814 flatview_unref(view);
2815}
2816
2817void mtree_info(fprintf_function mon_printf, void *f, bool flatview)
314e2987
BS
2818{
2819 MemoryRegionListHead ml_head;
2820 MemoryRegionList *ml, *ml2;
0d673e36 2821 AddressSpace *as;
314e2987 2822
57bb40c9
PX
2823 if (flatview) {
2824 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2825 mon_printf(f, "address-space (flat view): %s\n", as->name);
2826 mtree_print_flatview(mon_printf, f, as);
2827 mon_printf(f, "\n");
2828 }
2829 return;
2830 }
2831
314e2987
BS
2832 QTAILQ_INIT(&ml_head);
2833
0d673e36 2834 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
e48816aa
GH
2835 mon_printf(f, "address-space: %s\n", as->name);
2836 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2837 mon_printf(f, "\n");
b9f9be88
BS
2838 }
2839
314e2987
BS
2840 /* print aliased regions */
2841 QTAILQ_FOREACH(ml, &ml_head, queue) {
e48816aa
GH
2842 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2843 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2844 mon_printf(f, "\n");
314e2987
BS
2845 }
2846
2847 QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
88365e47 2848 g_free(ml);
314e2987 2849 }
314e2987 2850}
b4fefef9 2851
b08199c6
PM
2852void memory_region_init_ram(MemoryRegion *mr,
2853 struct Object *owner,
2854 const char *name,
2855 uint64_t size,
2856 Error **errp)
2857{
2858 DeviceState *owner_dev;
2859 Error *err = NULL;
2860
2861 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
2862 if (err) {
2863 error_propagate(errp, err);
2864 return;
2865 }
2866 /* This will assert if owner is neither NULL nor a DeviceState.
2867 * We only want the owner here for the purposes of defining a
2868 * unique name for migration. TODO: Ideally we should implement
2869 * a naming scheme for Objects which are not DeviceStates, in
2870 * which case we can relax this restriction.
2871 */
2872 owner_dev = DEVICE(owner);
2873 vmstate_register_ram(mr, owner_dev);
2874}
2875
2876void memory_region_init_rom(MemoryRegion *mr,
2877 struct Object *owner,
2878 const char *name,
2879 uint64_t size,
2880 Error **errp)
2881{
2882 DeviceState *owner_dev;
2883 Error *err = NULL;
2884
2885 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
2886 if (err) {
2887 error_propagate(errp, err);
2888 return;
2889 }
2890 /* This will assert if owner is neither NULL nor a DeviceState.
2891 * We only want the owner here for the purposes of defining a
2892 * unique name for migration. TODO: Ideally we should implement
2893 * a naming scheme for Objects which are not DeviceStates, in
2894 * which case we can relax this restriction.
2895 */
2896 owner_dev = DEVICE(owner);
2897 vmstate_register_ram(mr, owner_dev);
2898}
2899
2900void memory_region_init_rom_device(MemoryRegion *mr,
2901 struct Object *owner,
2902 const MemoryRegionOps *ops,
2903 void *opaque,
2904 const char *name,
2905 uint64_t size,
2906 Error **errp)
2907{
2908 DeviceState *owner_dev;
2909 Error *err = NULL;
2910
2911 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
2912 name, size, &err);
2913 if (err) {
2914 error_propagate(errp, err);
2915 return;
2916 }
2917 /* This will assert if owner is neither NULL nor a DeviceState.
2918 * We only want the owner here for the purposes of defining a
2919 * unique name for migration. TODO: Ideally we should implement
2920 * a naming scheme for Objects which are not DeviceStates, in
2921 * which case we can relax this restriction.
2922 */
2923 owner_dev = DEVICE(owner);
2924 vmstate_register_ram(mr, owner_dev);
2925}
2926
b4fefef9
PC
2927static const TypeInfo memory_region_info = {
2928 .parent = TYPE_OBJECT,
2929 .name = TYPE_MEMORY_REGION,
2930 .instance_size = sizeof(MemoryRegion),
2931 .instance_init = memory_region_initfn,
2932 .instance_finalize = memory_region_finalize,
2933};
2934
3df9d748
AK
2935static const TypeInfo iommu_memory_region_info = {
2936 .parent = TYPE_MEMORY_REGION,
2937 .name = TYPE_IOMMU_MEMORY_REGION,
1221a474 2938 .class_size = sizeof(IOMMUMemoryRegionClass),
3df9d748
AK
2939 .instance_size = sizeof(IOMMUMemoryRegion),
2940 .instance_init = iommu_memory_region_initfn,
1221a474 2941 .abstract = true,
3df9d748
AK
2942};
2943
b4fefef9
PC
2944static void memory_register_types(void)
2945{
2946 type_register_static(&memory_region_info);
3df9d748 2947 type_register_static(&iommu_memory_region_info);
b4fefef9
PC
2948}
2949
2950type_init(memory_register_types)