]> git.proxmox.com Git - mirror_qemu.git/blob - softmmu/memory.c
Merge tag 'pull-request-2023-05-22' of https://gitlab.com/thuth/qemu into staging
[mirror_qemu.git] / softmmu / memory.c
1 /*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/log.h"
18 #include "qapi/error.h"
19 #include "exec/memory.h"
20 #include "qapi/visitor.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/main-loop.h"
24 #include "qemu/qemu-print.h"
25 #include "qom/object.h"
26 #include "trace.h"
27
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/runstate.h"
32 #include "sysemu/tcg.h"
33 #include "qemu/accel.h"
34 #include "hw/boards.h"
35 #include "migration/vmstate.h"
36 #include "exec/address-spaces.h"
37
38 //#define DEBUG_UNASSIGNED
39
40 static unsigned memory_region_transaction_depth;
41 static bool memory_region_update_pending;
42 static bool ioeventfd_update_pending;
43 unsigned int global_dirty_tracking;
44
45 static QTAILQ_HEAD(, MemoryListener) memory_listeners
46 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
47
48 static QTAILQ_HEAD(, AddressSpace) address_spaces
49 = QTAILQ_HEAD_INITIALIZER(address_spaces);
50
51 static GHashTable *flat_views;
52
53 typedef struct AddrRange AddrRange;
54
55 /*
56 * Note that signed integers are needed for negative offsetting in aliases
57 * (large MemoryRegion::alias_offset).
58 */
59 struct AddrRange {
60 Int128 start;
61 Int128 size;
62 };
63
64 static AddrRange addrrange_make(Int128 start, Int128 size)
65 {
66 return (AddrRange) { start, size };
67 }
68
69 static bool addrrange_equal(AddrRange r1, AddrRange r2)
70 {
71 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
72 }
73
74 static Int128 addrrange_end(AddrRange r)
75 {
76 return int128_add(r.start, r.size);
77 }
78
79 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
80 {
81 int128_addto(&range.start, delta);
82 return range;
83 }
84
85 static bool addrrange_contains(AddrRange range, Int128 addr)
86 {
87 return int128_ge(addr, range.start)
88 && int128_lt(addr, addrrange_end(range));
89 }
90
91 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
92 {
93 return addrrange_contains(r1, r2.start)
94 || addrrange_contains(r2, r1.start);
95 }
96
97 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
98 {
99 Int128 start = int128_max(r1.start, r2.start);
100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
101 return addrrange_make(start, int128_sub(end, start));
102 }
103
104 enum ListenerDirection { Forward, Reverse };
105
106 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
107 do { \
108 MemoryListener *_listener; \
109 \
110 switch (_direction) { \
111 case Forward: \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
115 } \
116 } \
117 break; \
118 case Reverse: \
119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
130 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
131 do { \
132 MemoryListener *_listener; \
133 \
134 switch (_direction) { \
135 case Forward: \
136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
137 if (_listener->_callback) { \
138 _listener->_callback(_listener, _section, ##_args); \
139 } \
140 } \
141 break; \
142 case Reverse: \
143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
144 if (_listener->_callback) { \
145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
154 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
155 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
156 do { \
157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
160 } while(0)
161
162 struct CoalescedMemoryRange {
163 AddrRange addr;
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165 };
166
167 struct MemoryRegionIoeventfd {
168 AddrRange addr;
169 bool match_data;
170 uint64_t data;
171 EventNotifier *e;
172 };
173
174 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
176 {
177 if (int128_lt(a->addr.start, b->addr.start)) {
178 return true;
179 } else if (int128_gt(a->addr.start, b->addr.start)) {
180 return false;
181 } else if (int128_lt(a->addr.size, b->addr.size)) {
182 return true;
183 } else if (int128_gt(a->addr.size, b->addr.size)) {
184 return false;
185 } else if (a->match_data < b->match_data) {
186 return true;
187 } else if (a->match_data > b->match_data) {
188 return false;
189 } else if (a->match_data) {
190 if (a->data < b->data) {
191 return true;
192 } else if (a->data > b->data) {
193 return false;
194 }
195 }
196 if (a->e < b->e) {
197 return true;
198 } else if (a->e > b->e) {
199 return false;
200 }
201 return false;
202 }
203
204 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
206 {
207 if (int128_eq(a->addr.start, b->addr.start) &&
208 (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
209 (int128_eq(a->addr.size, b->addr.size) &&
210 (a->match_data == b->match_data) &&
211 ((a->match_data && (a->data == b->data)) || !a->match_data) &&
212 (a->e == b->e))))
213 return true;
214
215 return false;
216 }
217
218 /* Range of memory in the global map. Addresses are absolute. */
219 struct FlatRange {
220 MemoryRegion *mr;
221 hwaddr offset_in_region;
222 AddrRange addr;
223 uint8_t dirty_log_mask;
224 bool romd_mode;
225 bool readonly;
226 bool nonvolatile;
227 };
228
229 #define FOR_EACH_FLAT_RANGE(var, view) \
230 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
231
232 static inline MemoryRegionSection
233 section_from_flat_range(FlatRange *fr, FlatView *fv)
234 {
235 return (MemoryRegionSection) {
236 .mr = fr->mr,
237 .fv = fv,
238 .offset_within_region = fr->offset_in_region,
239 .size = fr->addr.size,
240 .offset_within_address_space = int128_get64(fr->addr.start),
241 .readonly = fr->readonly,
242 .nonvolatile = fr->nonvolatile,
243 };
244 }
245
246 static bool flatrange_equal(FlatRange *a, FlatRange *b)
247 {
248 return a->mr == b->mr
249 && addrrange_equal(a->addr, b->addr)
250 && a->offset_in_region == b->offset_in_region
251 && a->romd_mode == b->romd_mode
252 && a->readonly == b->readonly
253 && a->nonvolatile == b->nonvolatile;
254 }
255
256 static FlatView *flatview_new(MemoryRegion *mr_root)
257 {
258 FlatView *view;
259
260 view = g_new0(FlatView, 1);
261 view->ref = 1;
262 view->root = mr_root;
263 memory_region_ref(mr_root);
264 trace_flatview_new(view, mr_root);
265
266 return view;
267 }
268
269 /* Insert a range into a given position. Caller is responsible for maintaining
270 * sorting order.
271 */
272 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
273 {
274 if (view->nr == view->nr_allocated) {
275 view->nr_allocated = MAX(2 * view->nr, 10);
276 view->ranges = g_realloc(view->ranges,
277 view->nr_allocated * sizeof(*view->ranges));
278 }
279 memmove(view->ranges + pos + 1, view->ranges + pos,
280 (view->nr - pos) * sizeof(FlatRange));
281 view->ranges[pos] = *range;
282 memory_region_ref(range->mr);
283 ++view->nr;
284 }
285
286 static void flatview_destroy(FlatView *view)
287 {
288 int i;
289
290 trace_flatview_destroy(view, view->root);
291 if (view->dispatch) {
292 address_space_dispatch_free(view->dispatch);
293 }
294 for (i = 0; i < view->nr; i++) {
295 memory_region_unref(view->ranges[i].mr);
296 }
297 g_free(view->ranges);
298 memory_region_unref(view->root);
299 g_free(view);
300 }
301
302 static bool flatview_ref(FlatView *view)
303 {
304 return qatomic_fetch_inc_nonzero(&view->ref) > 0;
305 }
306
307 void flatview_unref(FlatView *view)
308 {
309 if (qatomic_fetch_dec(&view->ref) == 1) {
310 trace_flatview_destroy_rcu(view, view->root);
311 assert(view->root);
312 call_rcu(view, flatview_destroy, rcu);
313 }
314 }
315
316 static bool can_merge(FlatRange *r1, FlatRange *r2)
317 {
318 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
319 && r1->mr == r2->mr
320 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
321 r1->addr.size),
322 int128_make64(r2->offset_in_region))
323 && r1->dirty_log_mask == r2->dirty_log_mask
324 && r1->romd_mode == r2->romd_mode
325 && r1->readonly == r2->readonly
326 && r1->nonvolatile == r2->nonvolatile;
327 }
328
329 /* Attempt to simplify a view by merging adjacent ranges */
330 static void flatview_simplify(FlatView *view)
331 {
332 unsigned i, j, k;
333
334 i = 0;
335 while (i < view->nr) {
336 j = i + 1;
337 while (j < view->nr
338 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
339 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
340 ++j;
341 }
342 ++i;
343 for (k = i; k < j; k++) {
344 memory_region_unref(view->ranges[k].mr);
345 }
346 memmove(&view->ranges[i], &view->ranges[j],
347 (view->nr - j) * sizeof(view->ranges[j]));
348 view->nr -= j - i;
349 }
350 }
351
352 static bool memory_region_big_endian(MemoryRegion *mr)
353 {
354 #if TARGET_BIG_ENDIAN
355 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
356 #else
357 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
358 #endif
359 }
360
361 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
362 {
363 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
364 switch (op & MO_SIZE) {
365 case MO_8:
366 break;
367 case MO_16:
368 *data = bswap16(*data);
369 break;
370 case MO_32:
371 *data = bswap32(*data);
372 break;
373 case MO_64:
374 *data = bswap64(*data);
375 break;
376 default:
377 g_assert_not_reached();
378 }
379 }
380 }
381
382 static inline void memory_region_shift_read_access(uint64_t *value,
383 signed shift,
384 uint64_t mask,
385 uint64_t tmp)
386 {
387 if (shift >= 0) {
388 *value |= (tmp & mask) << shift;
389 } else {
390 *value |= (tmp & mask) >> -shift;
391 }
392 }
393
394 static inline uint64_t memory_region_shift_write_access(uint64_t *value,
395 signed shift,
396 uint64_t mask)
397 {
398 uint64_t tmp;
399
400 if (shift >= 0) {
401 tmp = (*value >> shift) & mask;
402 } else {
403 tmp = (*value << -shift) & mask;
404 }
405
406 return tmp;
407 }
408
409 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
410 {
411 MemoryRegion *root;
412 hwaddr abs_addr = offset;
413
414 abs_addr += mr->addr;
415 for (root = mr; root->container; ) {
416 root = root->container;
417 abs_addr += root->addr;
418 }
419
420 return abs_addr;
421 }
422
423 static int get_cpu_index(void)
424 {
425 if (current_cpu) {
426 return current_cpu->cpu_index;
427 }
428 return -1;
429 }
430
431 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
432 hwaddr addr,
433 uint64_t *value,
434 unsigned size,
435 signed shift,
436 uint64_t mask,
437 MemTxAttrs attrs)
438 {
439 uint64_t tmp;
440
441 tmp = mr->ops->read(mr->opaque, addr, size);
442 if (mr->subpage) {
443 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
444 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
445 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
446 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
447 memory_region_name(mr));
448 }
449 memory_region_shift_read_access(value, shift, mask, tmp);
450 return MEMTX_OK;
451 }
452
453 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
454 hwaddr addr,
455 uint64_t *value,
456 unsigned size,
457 signed shift,
458 uint64_t mask,
459 MemTxAttrs attrs)
460 {
461 uint64_t tmp = 0;
462 MemTxResult r;
463
464 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
465 if (mr->subpage) {
466 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
467 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
468 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
469 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
470 memory_region_name(mr));
471 }
472 memory_region_shift_read_access(value, shift, mask, tmp);
473 return r;
474 }
475
476 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
477 hwaddr addr,
478 uint64_t *value,
479 unsigned size,
480 signed shift,
481 uint64_t mask,
482 MemTxAttrs attrs)
483 {
484 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
485
486 if (mr->subpage) {
487 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
488 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
489 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
490 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
491 memory_region_name(mr));
492 }
493 mr->ops->write(mr->opaque, addr, tmp, size);
494 return MEMTX_OK;
495 }
496
497 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
498 hwaddr addr,
499 uint64_t *value,
500 unsigned size,
501 signed shift,
502 uint64_t mask,
503 MemTxAttrs attrs)
504 {
505 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
506
507 if (mr->subpage) {
508 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
509 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
510 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
511 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
512 memory_region_name(mr));
513 }
514 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
515 }
516
517 static MemTxResult access_with_adjusted_size(hwaddr addr,
518 uint64_t *value,
519 unsigned size,
520 unsigned access_size_min,
521 unsigned access_size_max,
522 MemTxResult (*access_fn)
523 (MemoryRegion *mr,
524 hwaddr addr,
525 uint64_t *value,
526 unsigned size,
527 signed shift,
528 uint64_t mask,
529 MemTxAttrs attrs),
530 MemoryRegion *mr,
531 MemTxAttrs attrs)
532 {
533 uint64_t access_mask;
534 unsigned access_size;
535 unsigned i;
536 MemTxResult r = MEMTX_OK;
537 bool reentrancy_guard_applied = false;
538
539 if (!access_size_min) {
540 access_size_min = 1;
541 }
542 if (!access_size_max) {
543 access_size_max = 4;
544 }
545
546 /* Do not allow more than one simultaneous access to a device's IO Regions */
547 if (mr->dev && !mr->disable_reentrancy_guard &&
548 !mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) {
549 if (mr->dev->mem_reentrancy_guard.engaged_in_io) {
550 warn_report_once("Blocked re-entrant IO on MemoryRegion: "
551 "%s at addr: 0x%" HWADDR_PRIX,
552 memory_region_name(mr), addr);
553 return MEMTX_ACCESS_ERROR;
554 }
555 mr->dev->mem_reentrancy_guard.engaged_in_io = true;
556 reentrancy_guard_applied = true;
557 }
558
559 /* FIXME: support unaligned access? */
560 access_size = MAX(MIN(size, access_size_max), access_size_min);
561 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
562 if (memory_region_big_endian(mr)) {
563 for (i = 0; i < size; i += access_size) {
564 r |= access_fn(mr, addr + i, value, access_size,
565 (size - access_size - i) * 8, access_mask, attrs);
566 }
567 } else {
568 for (i = 0; i < size; i += access_size) {
569 r |= access_fn(mr, addr + i, value, access_size, i * 8,
570 access_mask, attrs);
571 }
572 }
573 if (mr->dev && reentrancy_guard_applied) {
574 mr->dev->mem_reentrancy_guard.engaged_in_io = false;
575 }
576 return r;
577 }
578
579 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
580 {
581 AddressSpace *as;
582
583 while (mr->container) {
584 mr = mr->container;
585 }
586 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
587 if (mr == as->root) {
588 return as;
589 }
590 }
591 return NULL;
592 }
593
594 /* Render a memory region into the global view. Ranges in @view obscure
595 * ranges in @mr.
596 */
597 static void render_memory_region(FlatView *view,
598 MemoryRegion *mr,
599 Int128 base,
600 AddrRange clip,
601 bool readonly,
602 bool nonvolatile)
603 {
604 MemoryRegion *subregion;
605 unsigned i;
606 hwaddr offset_in_region;
607 Int128 remain;
608 Int128 now;
609 FlatRange fr;
610 AddrRange tmp;
611
612 if (!mr->enabled) {
613 return;
614 }
615
616 int128_addto(&base, int128_make64(mr->addr));
617 readonly |= mr->readonly;
618 nonvolatile |= mr->nonvolatile;
619
620 tmp = addrrange_make(base, mr->size);
621
622 if (!addrrange_intersects(tmp, clip)) {
623 return;
624 }
625
626 clip = addrrange_intersection(tmp, clip);
627
628 if (mr->alias) {
629 int128_subfrom(&base, int128_make64(mr->alias->addr));
630 int128_subfrom(&base, int128_make64(mr->alias_offset));
631 render_memory_region(view, mr->alias, base, clip,
632 readonly, nonvolatile);
633 return;
634 }
635
636 /* Render subregions in priority order. */
637 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
638 render_memory_region(view, subregion, base, clip,
639 readonly, nonvolatile);
640 }
641
642 if (!mr->terminates) {
643 return;
644 }
645
646 offset_in_region = int128_get64(int128_sub(clip.start, base));
647 base = clip.start;
648 remain = clip.size;
649
650 fr.mr = mr;
651 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
652 fr.romd_mode = mr->romd_mode;
653 fr.readonly = readonly;
654 fr.nonvolatile = nonvolatile;
655
656 /* Render the region itself into any gaps left by the current view. */
657 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
658 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
659 continue;
660 }
661 if (int128_lt(base, view->ranges[i].addr.start)) {
662 now = int128_min(remain,
663 int128_sub(view->ranges[i].addr.start, base));
664 fr.offset_in_region = offset_in_region;
665 fr.addr = addrrange_make(base, now);
666 flatview_insert(view, i, &fr);
667 ++i;
668 int128_addto(&base, now);
669 offset_in_region += int128_get64(now);
670 int128_subfrom(&remain, now);
671 }
672 now = int128_sub(int128_min(int128_add(base, remain),
673 addrrange_end(view->ranges[i].addr)),
674 base);
675 int128_addto(&base, now);
676 offset_in_region += int128_get64(now);
677 int128_subfrom(&remain, now);
678 }
679 if (int128_nz(remain)) {
680 fr.offset_in_region = offset_in_region;
681 fr.addr = addrrange_make(base, remain);
682 flatview_insert(view, i, &fr);
683 }
684 }
685
686 void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
687 {
688 FlatRange *fr;
689
690 assert(fv);
691 assert(cb);
692
693 FOR_EACH_FLAT_RANGE(fr, fv) {
694 if (cb(fr->addr.start, fr->addr.size, fr->mr,
695 fr->offset_in_region, opaque)) {
696 break;
697 }
698 }
699 }
700
701 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
702 {
703 while (mr->enabled) {
704 if (mr->alias) {
705 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
706 /* The alias is included in its entirety. Use it as
707 * the "real" root, so that we can share more FlatViews.
708 */
709 mr = mr->alias;
710 continue;
711 }
712 } else if (!mr->terminates) {
713 unsigned int found = 0;
714 MemoryRegion *child, *next = NULL;
715 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
716 if (child->enabled) {
717 if (++found > 1) {
718 next = NULL;
719 break;
720 }
721 if (!child->addr && int128_ge(mr->size, child->size)) {
722 /* A child is included in its entirety. If it's the only
723 * enabled one, use it in the hope of finding an alias down the
724 * way. This will also let us share FlatViews.
725 */
726 next = child;
727 }
728 }
729 }
730 if (found == 0) {
731 return NULL;
732 }
733 if (next) {
734 mr = next;
735 continue;
736 }
737 }
738
739 return mr;
740 }
741
742 return NULL;
743 }
744
745 /* Render a memory topology into a list of disjoint absolute ranges. */
746 static FlatView *generate_memory_topology(MemoryRegion *mr)
747 {
748 int i;
749 FlatView *view;
750
751 view = flatview_new(mr);
752
753 if (mr) {
754 render_memory_region(view, mr, int128_zero(),
755 addrrange_make(int128_zero(), int128_2_64()),
756 false, false);
757 }
758 flatview_simplify(view);
759
760 view->dispatch = address_space_dispatch_new(view);
761 for (i = 0; i < view->nr; i++) {
762 MemoryRegionSection mrs =
763 section_from_flat_range(&view->ranges[i], view);
764 flatview_add_to_dispatch(view, &mrs);
765 }
766 address_space_dispatch_compact(view->dispatch);
767 g_hash_table_replace(flat_views, mr, view);
768
769 return view;
770 }
771
772 static void address_space_add_del_ioeventfds(AddressSpace *as,
773 MemoryRegionIoeventfd *fds_new,
774 unsigned fds_new_nb,
775 MemoryRegionIoeventfd *fds_old,
776 unsigned fds_old_nb)
777 {
778 unsigned iold, inew;
779 MemoryRegionIoeventfd *fd;
780 MemoryRegionSection section;
781
782 /* Generate a symmetric difference of the old and new fd sets, adding
783 * and deleting as necessary.
784 */
785
786 iold = inew = 0;
787 while (iold < fds_old_nb || inew < fds_new_nb) {
788 if (iold < fds_old_nb
789 && (inew == fds_new_nb
790 || memory_region_ioeventfd_before(&fds_old[iold],
791 &fds_new[inew]))) {
792 fd = &fds_old[iold];
793 section = (MemoryRegionSection) {
794 .fv = address_space_to_flatview(as),
795 .offset_within_address_space = int128_get64(fd->addr.start),
796 .size = fd->addr.size,
797 };
798 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
799 fd->match_data, fd->data, fd->e);
800 ++iold;
801 } else if (inew < fds_new_nb
802 && (iold == fds_old_nb
803 || memory_region_ioeventfd_before(&fds_new[inew],
804 &fds_old[iold]))) {
805 fd = &fds_new[inew];
806 section = (MemoryRegionSection) {
807 .fv = address_space_to_flatview(as),
808 .offset_within_address_space = int128_get64(fd->addr.start),
809 .size = fd->addr.size,
810 };
811 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
812 fd->match_data, fd->data, fd->e);
813 ++inew;
814 } else {
815 ++iold;
816 ++inew;
817 }
818 }
819 }
820
821 FlatView *address_space_get_flatview(AddressSpace *as)
822 {
823 FlatView *view;
824
825 RCU_READ_LOCK_GUARD();
826 do {
827 view = address_space_to_flatview(as);
828 /* If somebody has replaced as->current_map concurrently,
829 * flatview_ref returns false.
830 */
831 } while (!flatview_ref(view));
832 return view;
833 }
834
835 static void address_space_update_ioeventfds(AddressSpace *as)
836 {
837 FlatView *view;
838 FlatRange *fr;
839 unsigned ioeventfd_nb = 0;
840 unsigned ioeventfd_max;
841 MemoryRegionIoeventfd *ioeventfds;
842 AddrRange tmp;
843 unsigned i;
844
845 /*
846 * It is likely that the number of ioeventfds hasn't changed much, so use
847 * the previous size as the starting value, with some headroom to avoid
848 * gratuitous reallocations.
849 */
850 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
851 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
852
853 view = address_space_get_flatview(as);
854 FOR_EACH_FLAT_RANGE(fr, view) {
855 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
856 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
857 int128_sub(fr->addr.start,
858 int128_make64(fr->offset_in_region)));
859 if (addrrange_intersects(fr->addr, tmp)) {
860 ++ioeventfd_nb;
861 if (ioeventfd_nb > ioeventfd_max) {
862 ioeventfd_max = MAX(ioeventfd_max * 2, 4);
863 ioeventfds = g_realloc(ioeventfds,
864 ioeventfd_max * sizeof(*ioeventfds));
865 }
866 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
867 ioeventfds[ioeventfd_nb-1].addr = tmp;
868 }
869 }
870 }
871
872 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
873 as->ioeventfds, as->ioeventfd_nb);
874
875 g_free(as->ioeventfds);
876 as->ioeventfds = ioeventfds;
877 as->ioeventfd_nb = ioeventfd_nb;
878 flatview_unref(view);
879 }
880
881 /*
882 * Notify the memory listeners about the coalesced IO change events of
883 * range `cmr'. Only the part that has intersection of the specified
884 * FlatRange will be sent.
885 */
886 static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
887 CoalescedMemoryRange *cmr, bool add)
888 {
889 AddrRange tmp;
890
891 tmp = addrrange_shift(cmr->addr,
892 int128_sub(fr->addr.start,
893 int128_make64(fr->offset_in_region)));
894 if (!addrrange_intersects(tmp, fr->addr)) {
895 return;
896 }
897 tmp = addrrange_intersection(tmp, fr->addr);
898
899 if (add) {
900 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
901 int128_get64(tmp.start),
902 int128_get64(tmp.size));
903 } else {
904 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
905 int128_get64(tmp.start),
906 int128_get64(tmp.size));
907 }
908 }
909
910 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
911 {
912 CoalescedMemoryRange *cmr;
913
914 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
915 flat_range_coalesced_io_notify(fr, as, cmr, false);
916 }
917 }
918
919 static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
920 {
921 MemoryRegion *mr = fr->mr;
922 CoalescedMemoryRange *cmr;
923
924 if (QTAILQ_EMPTY(&mr->coalesced)) {
925 return;
926 }
927
928 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
929 flat_range_coalesced_io_notify(fr, as, cmr, true);
930 }
931 }
932
933 static void address_space_update_topology_pass(AddressSpace *as,
934 const FlatView *old_view,
935 const FlatView *new_view,
936 bool adding)
937 {
938 unsigned iold, inew;
939 FlatRange *frold, *frnew;
940
941 /* Generate a symmetric difference of the old and new memory maps.
942 * Kill ranges in the old map, and instantiate ranges in the new map.
943 */
944 iold = inew = 0;
945 while (iold < old_view->nr || inew < new_view->nr) {
946 if (iold < old_view->nr) {
947 frold = &old_view->ranges[iold];
948 } else {
949 frold = NULL;
950 }
951 if (inew < new_view->nr) {
952 frnew = &new_view->ranges[inew];
953 } else {
954 frnew = NULL;
955 }
956
957 if (frold
958 && (!frnew
959 || int128_lt(frold->addr.start, frnew->addr.start)
960 || (int128_eq(frold->addr.start, frnew->addr.start)
961 && !flatrange_equal(frold, frnew)))) {
962 /* In old but not in new, or in both but attributes changed. */
963
964 if (!adding) {
965 flat_range_coalesced_io_del(frold, as);
966 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
967 }
968
969 ++iold;
970 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
971 /* In both and unchanged (except logging may have changed) */
972
973 if (adding) {
974 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
975 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
976 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
977 frold->dirty_log_mask,
978 frnew->dirty_log_mask);
979 }
980 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
981 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
982 frold->dirty_log_mask,
983 frnew->dirty_log_mask);
984 }
985 }
986
987 ++iold;
988 ++inew;
989 } else {
990 /* In new */
991
992 if (adding) {
993 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
994 flat_range_coalesced_io_add(frnew, as);
995 }
996
997 ++inew;
998 }
999 }
1000 }
1001
1002 static void flatviews_init(void)
1003 {
1004 static FlatView *empty_view;
1005
1006 if (flat_views) {
1007 return;
1008 }
1009
1010 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
1011 (GDestroyNotify) flatview_unref);
1012 if (!empty_view) {
1013 empty_view = generate_memory_topology(NULL);
1014 /* We keep it alive forever in the global variable. */
1015 flatview_ref(empty_view);
1016 } else {
1017 g_hash_table_replace(flat_views, NULL, empty_view);
1018 flatview_ref(empty_view);
1019 }
1020 }
1021
1022 static void flatviews_reset(void)
1023 {
1024 AddressSpace *as;
1025
1026 if (flat_views) {
1027 g_hash_table_unref(flat_views);
1028 flat_views = NULL;
1029 }
1030 flatviews_init();
1031
1032 /* Render unique FVs */
1033 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1034 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1035
1036 if (g_hash_table_lookup(flat_views, physmr)) {
1037 continue;
1038 }
1039
1040 generate_memory_topology(physmr);
1041 }
1042 }
1043
1044 static void address_space_set_flatview(AddressSpace *as)
1045 {
1046 FlatView *old_view = address_space_to_flatview(as);
1047 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1048 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1049
1050 assert(new_view);
1051
1052 if (old_view == new_view) {
1053 return;
1054 }
1055
1056 if (old_view) {
1057 flatview_ref(old_view);
1058 }
1059
1060 flatview_ref(new_view);
1061
1062 if (!QTAILQ_EMPTY(&as->listeners)) {
1063 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1064
1065 if (!old_view2) {
1066 old_view2 = &tmpview;
1067 }
1068 address_space_update_topology_pass(as, old_view2, new_view, false);
1069 address_space_update_topology_pass(as, old_view2, new_view, true);
1070 }
1071
1072 /* Writes are protected by the BQL. */
1073 qatomic_rcu_set(&as->current_map, new_view);
1074 if (old_view) {
1075 flatview_unref(old_view);
1076 }
1077
1078 /* Note that all the old MemoryRegions are still alive up to this
1079 * point. This relieves most MemoryListeners from the need to
1080 * ref/unref the MemoryRegions they get---unless they use them
1081 * outside the iothread mutex, in which case precise reference
1082 * counting is necessary.
1083 */
1084 if (old_view) {
1085 flatview_unref(old_view);
1086 }
1087 }
1088
1089 static void address_space_update_topology(AddressSpace *as)
1090 {
1091 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1092
1093 flatviews_init();
1094 if (!g_hash_table_lookup(flat_views, physmr)) {
1095 generate_memory_topology(physmr);
1096 }
1097 address_space_set_flatview(as);
1098 }
1099
1100 void memory_region_transaction_begin(void)
1101 {
1102 qemu_flush_coalesced_mmio_buffer();
1103 ++memory_region_transaction_depth;
1104 }
1105
1106 void memory_region_transaction_commit(void)
1107 {
1108 AddressSpace *as;
1109
1110 assert(memory_region_transaction_depth);
1111 assert(qemu_mutex_iothread_locked());
1112
1113 --memory_region_transaction_depth;
1114 if (!memory_region_transaction_depth) {
1115 if (memory_region_update_pending) {
1116 flatviews_reset();
1117
1118 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1119
1120 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1121 address_space_set_flatview(as);
1122 address_space_update_ioeventfds(as);
1123 }
1124 memory_region_update_pending = false;
1125 ioeventfd_update_pending = false;
1126 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1127 } else if (ioeventfd_update_pending) {
1128 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1129 address_space_update_ioeventfds(as);
1130 }
1131 ioeventfd_update_pending = false;
1132 }
1133 }
1134 }
1135
1136 static void memory_region_destructor_none(MemoryRegion *mr)
1137 {
1138 }
1139
1140 static void memory_region_destructor_ram(MemoryRegion *mr)
1141 {
1142 qemu_ram_free(mr->ram_block);
1143 }
1144
1145 static bool memory_region_need_escape(char c)
1146 {
1147 return c == '/' || c == '[' || c == '\\' || c == ']';
1148 }
1149
1150 static char *memory_region_escape_name(const char *name)
1151 {
1152 const char *p;
1153 char *escaped, *q;
1154 uint8_t c;
1155 size_t bytes = 0;
1156
1157 for (p = name; *p; p++) {
1158 bytes += memory_region_need_escape(*p) ? 4 : 1;
1159 }
1160 if (bytes == p - name) {
1161 return g_memdup(name, bytes + 1);
1162 }
1163
1164 escaped = g_malloc(bytes + 1);
1165 for (p = name, q = escaped; *p; p++) {
1166 c = *p;
1167 if (unlikely(memory_region_need_escape(c))) {
1168 *q++ = '\\';
1169 *q++ = 'x';
1170 *q++ = "0123456789abcdef"[c >> 4];
1171 c = "0123456789abcdef"[c & 15];
1172 }
1173 *q++ = c;
1174 }
1175 *q = 0;
1176 return escaped;
1177 }
1178
1179 static void memory_region_do_init(MemoryRegion *mr,
1180 Object *owner,
1181 const char *name,
1182 uint64_t size)
1183 {
1184 mr->size = int128_make64(size);
1185 if (size == UINT64_MAX) {
1186 mr->size = int128_2_64();
1187 }
1188 mr->name = g_strdup(name);
1189 mr->owner = owner;
1190 mr->dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE);
1191 mr->ram_block = NULL;
1192
1193 if (name) {
1194 char *escaped_name = memory_region_escape_name(name);
1195 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1196
1197 if (!owner) {
1198 owner = container_get(qdev_get_machine(), "/unattached");
1199 }
1200
1201 object_property_add_child(owner, name_array, OBJECT(mr));
1202 object_unref(OBJECT(mr));
1203 g_free(name_array);
1204 g_free(escaped_name);
1205 }
1206 }
1207
1208 void memory_region_init(MemoryRegion *mr,
1209 Object *owner,
1210 const char *name,
1211 uint64_t size)
1212 {
1213 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1214 memory_region_do_init(mr, owner, name, size);
1215 }
1216
1217 static void memory_region_get_container(Object *obj, Visitor *v,
1218 const char *name, void *opaque,
1219 Error **errp)
1220 {
1221 MemoryRegion *mr = MEMORY_REGION(obj);
1222 char *path = (char *)"";
1223
1224 if (mr->container) {
1225 path = object_get_canonical_path(OBJECT(mr->container));
1226 }
1227 visit_type_str(v, name, &path, errp);
1228 if (mr->container) {
1229 g_free(path);
1230 }
1231 }
1232
1233 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1234 const char *part)
1235 {
1236 MemoryRegion *mr = MEMORY_REGION(obj);
1237
1238 return OBJECT(mr->container);
1239 }
1240
1241 static void memory_region_get_priority(Object *obj, Visitor *v,
1242 const char *name, void *opaque,
1243 Error **errp)
1244 {
1245 MemoryRegion *mr = MEMORY_REGION(obj);
1246 int32_t value = mr->priority;
1247
1248 visit_type_int32(v, name, &value, errp);
1249 }
1250
1251 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1252 void *opaque, Error **errp)
1253 {
1254 MemoryRegion *mr = MEMORY_REGION(obj);
1255 uint64_t value = memory_region_size(mr);
1256
1257 visit_type_uint64(v, name, &value, errp);
1258 }
1259
1260 static void memory_region_initfn(Object *obj)
1261 {
1262 MemoryRegion *mr = MEMORY_REGION(obj);
1263 ObjectProperty *op;
1264
1265 mr->ops = &unassigned_mem_ops;
1266 mr->enabled = true;
1267 mr->romd_mode = true;
1268 mr->destructor = memory_region_destructor_none;
1269 QTAILQ_INIT(&mr->subregions);
1270 QTAILQ_INIT(&mr->coalesced);
1271
1272 op = object_property_add(OBJECT(mr), "container",
1273 "link<" TYPE_MEMORY_REGION ">",
1274 memory_region_get_container,
1275 NULL, /* memory_region_set_container */
1276 NULL, NULL);
1277 op->resolve = memory_region_resolve_container;
1278
1279 object_property_add_uint64_ptr(OBJECT(mr), "addr",
1280 &mr->addr, OBJ_PROP_FLAG_READ);
1281 object_property_add(OBJECT(mr), "priority", "uint32",
1282 memory_region_get_priority,
1283 NULL, /* memory_region_set_priority */
1284 NULL, NULL);
1285 object_property_add(OBJECT(mr), "size", "uint64",
1286 memory_region_get_size,
1287 NULL, /* memory_region_set_size, */
1288 NULL, NULL);
1289 }
1290
1291 static void iommu_memory_region_initfn(Object *obj)
1292 {
1293 MemoryRegion *mr = MEMORY_REGION(obj);
1294
1295 mr->is_iommu = true;
1296 }
1297
1298 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1299 unsigned size)
1300 {
1301 #ifdef DEBUG_UNASSIGNED
1302 printf("Unassigned mem read " HWADDR_FMT_plx "\n", addr);
1303 #endif
1304 return 0;
1305 }
1306
1307 static void unassigned_mem_write(void *opaque, hwaddr addr,
1308 uint64_t val, unsigned size)
1309 {
1310 #ifdef DEBUG_UNASSIGNED
1311 printf("Unassigned mem write " HWADDR_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1312 #endif
1313 }
1314
1315 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1316 unsigned size, bool is_write,
1317 MemTxAttrs attrs)
1318 {
1319 return false;
1320 }
1321
1322 const MemoryRegionOps unassigned_mem_ops = {
1323 .valid.accepts = unassigned_mem_accepts,
1324 .endianness = DEVICE_NATIVE_ENDIAN,
1325 };
1326
1327 static uint64_t memory_region_ram_device_read(void *opaque,
1328 hwaddr addr, unsigned size)
1329 {
1330 MemoryRegion *mr = opaque;
1331 uint64_t data = (uint64_t)~0;
1332
1333 switch (size) {
1334 case 1:
1335 data = *(uint8_t *)(mr->ram_block->host + addr);
1336 break;
1337 case 2:
1338 data = *(uint16_t *)(mr->ram_block->host + addr);
1339 break;
1340 case 4:
1341 data = *(uint32_t *)(mr->ram_block->host + addr);
1342 break;
1343 case 8:
1344 data = *(uint64_t *)(mr->ram_block->host + addr);
1345 break;
1346 }
1347
1348 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1349
1350 return data;
1351 }
1352
1353 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1354 uint64_t data, unsigned size)
1355 {
1356 MemoryRegion *mr = opaque;
1357
1358 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1359
1360 switch (size) {
1361 case 1:
1362 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1363 break;
1364 case 2:
1365 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1366 break;
1367 case 4:
1368 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1369 break;
1370 case 8:
1371 *(uint64_t *)(mr->ram_block->host + addr) = data;
1372 break;
1373 }
1374 }
1375
1376 static const MemoryRegionOps ram_device_mem_ops = {
1377 .read = memory_region_ram_device_read,
1378 .write = memory_region_ram_device_write,
1379 .endianness = DEVICE_HOST_ENDIAN,
1380 .valid = {
1381 .min_access_size = 1,
1382 .max_access_size = 8,
1383 .unaligned = true,
1384 },
1385 .impl = {
1386 .min_access_size = 1,
1387 .max_access_size = 8,
1388 .unaligned = true,
1389 },
1390 };
1391
1392 bool memory_region_access_valid(MemoryRegion *mr,
1393 hwaddr addr,
1394 unsigned size,
1395 bool is_write,
1396 MemTxAttrs attrs)
1397 {
1398 if (mr->ops->valid.accepts
1399 && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
1400 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1401 ", size %u, region '%s', reason: rejected\n",
1402 is_write ? "write" : "read",
1403 addr, size, memory_region_name(mr));
1404 return false;
1405 }
1406
1407 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1408 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1409 ", size %u, region '%s', reason: unaligned\n",
1410 is_write ? "write" : "read",
1411 addr, size, memory_region_name(mr));
1412 return false;
1413 }
1414
1415 /* Treat zero as compatibility all valid */
1416 if (!mr->ops->valid.max_access_size) {
1417 return true;
1418 }
1419
1420 if (size > mr->ops->valid.max_access_size
1421 || size < mr->ops->valid.min_access_size) {
1422 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1423 ", size %u, region '%s', reason: invalid size "
1424 "(min:%u max:%u)\n",
1425 is_write ? "write" : "read",
1426 addr, size, memory_region_name(mr),
1427 mr->ops->valid.min_access_size,
1428 mr->ops->valid.max_access_size);
1429 return false;
1430 }
1431 return true;
1432 }
1433
1434 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1435 hwaddr addr,
1436 uint64_t *pval,
1437 unsigned size,
1438 MemTxAttrs attrs)
1439 {
1440 *pval = 0;
1441
1442 if (mr->ops->read) {
1443 return access_with_adjusted_size(addr, pval, size,
1444 mr->ops->impl.min_access_size,
1445 mr->ops->impl.max_access_size,
1446 memory_region_read_accessor,
1447 mr, attrs);
1448 } else {
1449 return access_with_adjusted_size(addr, pval, size,
1450 mr->ops->impl.min_access_size,
1451 mr->ops->impl.max_access_size,
1452 memory_region_read_with_attrs_accessor,
1453 mr, attrs);
1454 }
1455 }
1456
1457 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1458 hwaddr addr,
1459 uint64_t *pval,
1460 MemOp op,
1461 MemTxAttrs attrs)
1462 {
1463 unsigned size = memop_size(op);
1464 MemTxResult r;
1465
1466 if (mr->alias) {
1467 return memory_region_dispatch_read(mr->alias,
1468 mr->alias_offset + addr,
1469 pval, op, attrs);
1470 }
1471 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1472 *pval = unassigned_mem_read(mr, addr, size);
1473 return MEMTX_DECODE_ERROR;
1474 }
1475
1476 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1477 adjust_endianness(mr, pval, op);
1478 return r;
1479 }
1480
1481 /* Return true if an eventfd was signalled */
1482 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1483 hwaddr addr,
1484 uint64_t data,
1485 unsigned size,
1486 MemTxAttrs attrs)
1487 {
1488 MemoryRegionIoeventfd ioeventfd = {
1489 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1490 .data = data,
1491 };
1492 unsigned i;
1493
1494 for (i = 0; i < mr->ioeventfd_nb; i++) {
1495 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1496 ioeventfd.e = mr->ioeventfds[i].e;
1497
1498 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1499 event_notifier_set(ioeventfd.e);
1500 return true;
1501 }
1502 }
1503
1504 return false;
1505 }
1506
1507 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1508 hwaddr addr,
1509 uint64_t data,
1510 MemOp op,
1511 MemTxAttrs attrs)
1512 {
1513 unsigned size = memop_size(op);
1514
1515 if (mr->alias) {
1516 return memory_region_dispatch_write(mr->alias,
1517 mr->alias_offset + addr,
1518 data, op, attrs);
1519 }
1520 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1521 unassigned_mem_write(mr, addr, data, size);
1522 return MEMTX_DECODE_ERROR;
1523 }
1524
1525 adjust_endianness(mr, &data, op);
1526
1527 if ((!kvm_eventfds_enabled()) &&
1528 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1529 return MEMTX_OK;
1530 }
1531
1532 if (mr->ops->write) {
1533 return access_with_adjusted_size(addr, &data, size,
1534 mr->ops->impl.min_access_size,
1535 mr->ops->impl.max_access_size,
1536 memory_region_write_accessor, mr,
1537 attrs);
1538 } else {
1539 return
1540 access_with_adjusted_size(addr, &data, size,
1541 mr->ops->impl.min_access_size,
1542 mr->ops->impl.max_access_size,
1543 memory_region_write_with_attrs_accessor,
1544 mr, attrs);
1545 }
1546 }
1547
1548 void memory_region_init_io(MemoryRegion *mr,
1549 Object *owner,
1550 const MemoryRegionOps *ops,
1551 void *opaque,
1552 const char *name,
1553 uint64_t size)
1554 {
1555 memory_region_init(mr, owner, name, size);
1556 mr->ops = ops ? ops : &unassigned_mem_ops;
1557 mr->opaque = opaque;
1558 mr->terminates = true;
1559 }
1560
1561 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1562 Object *owner,
1563 const char *name,
1564 uint64_t size,
1565 Error **errp)
1566 {
1567 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
1568 }
1569
1570 void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1571 Object *owner,
1572 const char *name,
1573 uint64_t size,
1574 uint32_t ram_flags,
1575 Error **errp)
1576 {
1577 Error *err = NULL;
1578 memory_region_init(mr, owner, name, size);
1579 mr->ram = true;
1580 mr->terminates = true;
1581 mr->destructor = memory_region_destructor_ram;
1582 mr->ram_block = qemu_ram_alloc(size, ram_flags, mr, &err);
1583 if (err) {
1584 mr->size = int128_zero();
1585 object_unparent(OBJECT(mr));
1586 error_propagate(errp, err);
1587 }
1588 }
1589
1590 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1591 Object *owner,
1592 const char *name,
1593 uint64_t size,
1594 uint64_t max_size,
1595 void (*resized)(const char*,
1596 uint64_t length,
1597 void *host),
1598 Error **errp)
1599 {
1600 Error *err = NULL;
1601 memory_region_init(mr, owner, name, size);
1602 mr->ram = true;
1603 mr->terminates = true;
1604 mr->destructor = memory_region_destructor_ram;
1605 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1606 mr, &err);
1607 if (err) {
1608 mr->size = int128_zero();
1609 object_unparent(OBJECT(mr));
1610 error_propagate(errp, err);
1611 }
1612 }
1613
1614 #ifdef CONFIG_POSIX
1615 void memory_region_init_ram_from_file(MemoryRegion *mr,
1616 Object *owner,
1617 const char *name,
1618 uint64_t size,
1619 uint64_t align,
1620 uint32_t ram_flags,
1621 const char *path,
1622 bool readonly,
1623 Error **errp)
1624 {
1625 Error *err = NULL;
1626 memory_region_init(mr, owner, name, size);
1627 mr->ram = true;
1628 mr->readonly = readonly;
1629 mr->terminates = true;
1630 mr->destructor = memory_region_destructor_ram;
1631 mr->align = align;
1632 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
1633 readonly, &err);
1634 if (err) {
1635 mr->size = int128_zero();
1636 object_unparent(OBJECT(mr));
1637 error_propagate(errp, err);
1638 }
1639 }
1640
1641 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1642 Object *owner,
1643 const char *name,
1644 uint64_t size,
1645 uint32_t ram_flags,
1646 int fd,
1647 ram_addr_t offset,
1648 Error **errp)
1649 {
1650 Error *err = NULL;
1651 memory_region_init(mr, owner, name, size);
1652 mr->ram = true;
1653 mr->terminates = true;
1654 mr->destructor = memory_region_destructor_ram;
1655 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset,
1656 false, &err);
1657 if (err) {
1658 mr->size = int128_zero();
1659 object_unparent(OBJECT(mr));
1660 error_propagate(errp, err);
1661 }
1662 }
1663 #endif
1664
1665 void memory_region_init_ram_ptr(MemoryRegion *mr,
1666 Object *owner,
1667 const char *name,
1668 uint64_t size,
1669 void *ptr)
1670 {
1671 memory_region_init(mr, owner, name, size);
1672 mr->ram = true;
1673 mr->terminates = true;
1674 mr->destructor = memory_region_destructor_ram;
1675
1676 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1677 assert(ptr != NULL);
1678 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1679 }
1680
1681 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1682 Object *owner,
1683 const char *name,
1684 uint64_t size,
1685 void *ptr)
1686 {
1687 memory_region_init(mr, owner, name, size);
1688 mr->ram = true;
1689 mr->terminates = true;
1690 mr->ram_device = true;
1691 mr->ops = &ram_device_mem_ops;
1692 mr->opaque = mr;
1693 mr->destructor = memory_region_destructor_ram;
1694
1695 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1696 assert(ptr != NULL);
1697 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1698 }
1699
1700 void memory_region_init_alias(MemoryRegion *mr,
1701 Object *owner,
1702 const char *name,
1703 MemoryRegion *orig,
1704 hwaddr offset,
1705 uint64_t size)
1706 {
1707 memory_region_init(mr, owner, name, size);
1708 mr->alias = orig;
1709 mr->alias_offset = offset;
1710 }
1711
1712 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1713 Object *owner,
1714 const char *name,
1715 uint64_t size,
1716 Error **errp)
1717 {
1718 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
1719 mr->readonly = true;
1720 }
1721
1722 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1723 Object *owner,
1724 const MemoryRegionOps *ops,
1725 void *opaque,
1726 const char *name,
1727 uint64_t size,
1728 Error **errp)
1729 {
1730 Error *err = NULL;
1731 assert(ops);
1732 memory_region_init(mr, owner, name, size);
1733 mr->ops = ops;
1734 mr->opaque = opaque;
1735 mr->terminates = true;
1736 mr->rom_device = true;
1737 mr->destructor = memory_region_destructor_ram;
1738 mr->ram_block = qemu_ram_alloc(size, 0, mr, &err);
1739 if (err) {
1740 mr->size = int128_zero();
1741 object_unparent(OBJECT(mr));
1742 error_propagate(errp, err);
1743 }
1744 }
1745
1746 void memory_region_init_iommu(void *_iommu_mr,
1747 size_t instance_size,
1748 const char *mrtypename,
1749 Object *owner,
1750 const char *name,
1751 uint64_t size)
1752 {
1753 struct IOMMUMemoryRegion *iommu_mr;
1754 struct MemoryRegion *mr;
1755
1756 object_initialize(_iommu_mr, instance_size, mrtypename);
1757 mr = MEMORY_REGION(_iommu_mr);
1758 memory_region_do_init(mr, owner, name, size);
1759 iommu_mr = IOMMU_MEMORY_REGION(mr);
1760 mr->terminates = true; /* then re-forwards */
1761 QLIST_INIT(&iommu_mr->iommu_notify);
1762 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1763 }
1764
1765 static void memory_region_finalize(Object *obj)
1766 {
1767 MemoryRegion *mr = MEMORY_REGION(obj);
1768
1769 assert(!mr->container);
1770
1771 /* We know the region is not visible in any address space (it
1772 * does not have a container and cannot be a root either because
1773 * it has no references, so we can blindly clear mr->enabled.
1774 * memory_region_set_enabled instead could trigger a transaction
1775 * and cause an infinite loop.
1776 */
1777 mr->enabled = false;
1778 memory_region_transaction_begin();
1779 while (!QTAILQ_EMPTY(&mr->subregions)) {
1780 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1781 memory_region_del_subregion(mr, subregion);
1782 }
1783 memory_region_transaction_commit();
1784
1785 mr->destructor(mr);
1786 memory_region_clear_coalescing(mr);
1787 g_free((char *)mr->name);
1788 g_free(mr->ioeventfds);
1789 }
1790
1791 Object *memory_region_owner(MemoryRegion *mr)
1792 {
1793 Object *obj = OBJECT(mr);
1794 return obj->parent;
1795 }
1796
1797 void memory_region_ref(MemoryRegion *mr)
1798 {
1799 /* MMIO callbacks most likely will access data that belongs
1800 * to the owner, hence the need to ref/unref the owner whenever
1801 * the memory region is in use.
1802 *
1803 * The memory region is a child of its owner. As long as the
1804 * owner doesn't call unparent itself on the memory region,
1805 * ref-ing the owner will also keep the memory region alive.
1806 * Memory regions without an owner are supposed to never go away;
1807 * we do not ref/unref them because it slows down DMA sensibly.
1808 */
1809 if (mr && mr->owner) {
1810 object_ref(mr->owner);
1811 }
1812 }
1813
1814 void memory_region_unref(MemoryRegion *mr)
1815 {
1816 if (mr && mr->owner) {
1817 object_unref(mr->owner);
1818 }
1819 }
1820
1821 uint64_t memory_region_size(MemoryRegion *mr)
1822 {
1823 if (int128_eq(mr->size, int128_2_64())) {
1824 return UINT64_MAX;
1825 }
1826 return int128_get64(mr->size);
1827 }
1828
1829 const char *memory_region_name(const MemoryRegion *mr)
1830 {
1831 if (!mr->name) {
1832 ((MemoryRegion *)mr)->name =
1833 g_strdup(object_get_canonical_path_component(OBJECT(mr)));
1834 }
1835 return mr->name;
1836 }
1837
1838 bool memory_region_is_ram_device(MemoryRegion *mr)
1839 {
1840 return mr->ram_device;
1841 }
1842
1843 bool memory_region_is_protected(MemoryRegion *mr)
1844 {
1845 return mr->ram && (mr->ram_block->flags & RAM_PROTECTED);
1846 }
1847
1848 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1849 {
1850 uint8_t mask = mr->dirty_log_mask;
1851 RAMBlock *rb = mr->ram_block;
1852
1853 if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) ||
1854 memory_region_is_iommu(mr))) {
1855 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1856 }
1857
1858 if (tcg_enabled() && rb) {
1859 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
1860 mask |= (1 << DIRTY_MEMORY_CODE);
1861 }
1862 return mask;
1863 }
1864
1865 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1866 {
1867 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1868 }
1869
1870 static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
1871 Error **errp)
1872 {
1873 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1874 IOMMUNotifier *iommu_notifier;
1875 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1876 int ret = 0;
1877
1878 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1879 flags |= iommu_notifier->notifier_flags;
1880 }
1881
1882 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1883 ret = imrc->notify_flag_changed(iommu_mr,
1884 iommu_mr->iommu_notify_flags,
1885 flags, errp);
1886 }
1887
1888 if (!ret) {
1889 iommu_mr->iommu_notify_flags = flags;
1890 }
1891 return ret;
1892 }
1893
1894 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1895 uint64_t page_size_mask,
1896 Error **errp)
1897 {
1898 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1899 int ret = 0;
1900
1901 if (imrc->iommu_set_page_size_mask) {
1902 ret = imrc->iommu_set_page_size_mask(iommu_mr, page_size_mask, errp);
1903 }
1904 return ret;
1905 }
1906
1907 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1908 IOMMUNotifier *n, Error **errp)
1909 {
1910 IOMMUMemoryRegion *iommu_mr;
1911 int ret;
1912
1913 if (mr->alias) {
1914 return memory_region_register_iommu_notifier(mr->alias, n, errp);
1915 }
1916
1917 /* We need to register for at least one bitfield */
1918 iommu_mr = IOMMU_MEMORY_REGION(mr);
1919 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1920 assert(n->start <= n->end);
1921 assert(n->iommu_idx >= 0 &&
1922 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1923
1924 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1925 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
1926 if (ret) {
1927 QLIST_REMOVE(n, node);
1928 }
1929 return ret;
1930 }
1931
1932 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1933 {
1934 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1935
1936 if (imrc->get_min_page_size) {
1937 return imrc->get_min_page_size(iommu_mr);
1938 }
1939 return TARGET_PAGE_SIZE;
1940 }
1941
1942 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1943 {
1944 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1945 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1946 hwaddr addr, granularity;
1947 IOMMUTLBEntry iotlb;
1948
1949 /* If the IOMMU has its own replay callback, override */
1950 if (imrc->replay) {
1951 imrc->replay(iommu_mr, n);
1952 return;
1953 }
1954
1955 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1956
1957 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1958 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1959 if (iotlb.perm != IOMMU_NONE) {
1960 n->notify(n, &iotlb);
1961 }
1962
1963 /* if (2^64 - MR size) < granularity, it's possible to get an
1964 * infinite loop here. This should catch such a wraparound */
1965 if ((addr + granularity) < addr) {
1966 break;
1967 }
1968 }
1969 }
1970
1971 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1972 IOMMUNotifier *n)
1973 {
1974 IOMMUMemoryRegion *iommu_mr;
1975
1976 if (mr->alias) {
1977 memory_region_unregister_iommu_notifier(mr->alias, n);
1978 return;
1979 }
1980 QLIST_REMOVE(n, node);
1981 iommu_mr = IOMMU_MEMORY_REGION(mr);
1982 memory_region_update_iommu_notify_flags(iommu_mr, NULL);
1983 }
1984
1985 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1986 IOMMUTLBEvent *event)
1987 {
1988 IOMMUTLBEntry *entry = &event->entry;
1989 hwaddr entry_end = entry->iova + entry->addr_mask;
1990 IOMMUTLBEntry tmp = *entry;
1991
1992 if (event->type == IOMMU_NOTIFIER_UNMAP) {
1993 assert(entry->perm == IOMMU_NONE);
1994 }
1995
1996 /*
1997 * Skip the notification if the notification does not overlap
1998 * with registered range.
1999 */
2000 if (notifier->start > entry_end || notifier->end < entry->iova) {
2001 return;
2002 }
2003
2004 if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
2005 /* Crop (iova, addr_mask) to range */
2006 tmp.iova = MAX(tmp.iova, notifier->start);
2007 tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
2008 } else {
2009 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
2010 }
2011
2012 if (event->type & notifier->notifier_flags) {
2013 notifier->notify(notifier, &tmp);
2014 }
2015 }
2016
2017 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier)
2018 {
2019 IOMMUTLBEvent event;
2020
2021 event.type = IOMMU_NOTIFIER_UNMAP;
2022 event.entry.target_as = &address_space_memory;
2023 event.entry.iova = notifier->start;
2024 event.entry.perm = IOMMU_NONE;
2025 event.entry.addr_mask = notifier->end - notifier->start;
2026
2027 memory_region_notify_iommu_one(notifier, &event);
2028 }
2029
2030 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
2031 int iommu_idx,
2032 IOMMUTLBEvent event)
2033 {
2034 IOMMUNotifier *iommu_notifier;
2035
2036 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
2037
2038 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2039 if (iommu_notifier->iommu_idx == iommu_idx) {
2040 memory_region_notify_iommu_one(iommu_notifier, &event);
2041 }
2042 }
2043 }
2044
2045 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
2046 enum IOMMUMemoryRegionAttr attr,
2047 void *data)
2048 {
2049 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2050
2051 if (!imrc->get_attr) {
2052 return -EINVAL;
2053 }
2054
2055 return imrc->get_attr(iommu_mr, attr, data);
2056 }
2057
2058 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
2059 MemTxAttrs attrs)
2060 {
2061 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2062
2063 if (!imrc->attrs_to_index) {
2064 return 0;
2065 }
2066
2067 return imrc->attrs_to_index(iommu_mr, attrs);
2068 }
2069
2070 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2071 {
2072 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2073
2074 if (!imrc->num_indexes) {
2075 return 1;
2076 }
2077
2078 return imrc->num_indexes(iommu_mr);
2079 }
2080
2081 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
2082 {
2083 if (!memory_region_is_mapped(mr) || !memory_region_is_ram(mr)) {
2084 return NULL;
2085 }
2086 return mr->rdm;
2087 }
2088
2089 void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2090 RamDiscardManager *rdm)
2091 {
2092 g_assert(memory_region_is_ram(mr) && !memory_region_is_mapped(mr));
2093 g_assert(!rdm || !mr->rdm);
2094 mr->rdm = rdm;
2095 }
2096
2097 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
2098 const MemoryRegion *mr)
2099 {
2100 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2101
2102 g_assert(rdmc->get_min_granularity);
2103 return rdmc->get_min_granularity(rdm, mr);
2104 }
2105
2106 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
2107 const MemoryRegionSection *section)
2108 {
2109 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2110
2111 g_assert(rdmc->is_populated);
2112 return rdmc->is_populated(rdm, section);
2113 }
2114
2115 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
2116 MemoryRegionSection *section,
2117 ReplayRamPopulate replay_fn,
2118 void *opaque)
2119 {
2120 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2121
2122 g_assert(rdmc->replay_populated);
2123 return rdmc->replay_populated(rdm, section, replay_fn, opaque);
2124 }
2125
2126 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
2127 MemoryRegionSection *section,
2128 ReplayRamDiscard replay_fn,
2129 void *opaque)
2130 {
2131 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2132
2133 g_assert(rdmc->replay_discarded);
2134 rdmc->replay_discarded(rdm, section, replay_fn, opaque);
2135 }
2136
2137 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
2138 RamDiscardListener *rdl,
2139 MemoryRegionSection *section)
2140 {
2141 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2142
2143 g_assert(rdmc->register_listener);
2144 rdmc->register_listener(rdm, rdl, section);
2145 }
2146
2147 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
2148 RamDiscardListener *rdl)
2149 {
2150 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2151
2152 g_assert(rdmc->unregister_listener);
2153 rdmc->unregister_listener(rdm, rdl);
2154 }
2155
2156 /* Called with rcu_read_lock held. */
2157 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
2158 ram_addr_t *ram_addr, bool *read_only,
2159 bool *mr_has_discard_manager)
2160 {
2161 MemoryRegion *mr;
2162 hwaddr xlat;
2163 hwaddr len = iotlb->addr_mask + 1;
2164 bool writable = iotlb->perm & IOMMU_WO;
2165
2166 if (mr_has_discard_manager) {
2167 *mr_has_discard_manager = false;
2168 }
2169 /*
2170 * The IOMMU TLB entry we have just covers translation through
2171 * this IOMMU to its immediate target. We need to translate
2172 * it the rest of the way through to memory.
2173 */
2174 mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
2175 &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
2176 if (!memory_region_is_ram(mr)) {
2177 error_report("iommu map to non memory area %" HWADDR_PRIx "", xlat);
2178 return false;
2179 } else if (memory_region_has_ram_discard_manager(mr)) {
2180 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
2181 MemoryRegionSection tmp = {
2182 .mr = mr,
2183 .offset_within_region = xlat,
2184 .size = int128_make64(len),
2185 };
2186 if (mr_has_discard_manager) {
2187 *mr_has_discard_manager = true;
2188 }
2189 /*
2190 * Malicious VMs can map memory into the IOMMU, which is expected
2191 * to remain discarded. vfio will pin all pages, populating memory.
2192 * Disallow that. vmstate priorities make sure any RamDiscardManager
2193 * were already restored before IOMMUs are restored.
2194 */
2195 if (!ram_discard_manager_is_populated(rdm, &tmp)) {
2196 error_report("iommu map to discarded memory (e.g., unplugged via"
2197 " virtio-mem): %" HWADDR_PRIx "",
2198 iotlb->translated_addr);
2199 return false;
2200 }
2201 }
2202
2203 /*
2204 * Translation truncates length to the IOMMU page size,
2205 * check that it did not truncate too much.
2206 */
2207 if (len & iotlb->addr_mask) {
2208 error_report("iommu has granularity incompatible with target AS");
2209 return false;
2210 }
2211
2212 if (vaddr) {
2213 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
2214 }
2215
2216 if (ram_addr) {
2217 *ram_addr = memory_region_get_ram_addr(mr) + xlat;
2218 }
2219
2220 if (read_only) {
2221 *read_only = !writable || mr->readonly;
2222 }
2223
2224 return true;
2225 }
2226
2227 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2228 {
2229 uint8_t mask = 1 << client;
2230 uint8_t old_logging;
2231
2232 assert(client == DIRTY_MEMORY_VGA);
2233 old_logging = mr->vga_logging_count;
2234 mr->vga_logging_count += log ? 1 : -1;
2235 if (!!old_logging == !!mr->vga_logging_count) {
2236 return;
2237 }
2238
2239 memory_region_transaction_begin();
2240 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2241 memory_region_update_pending |= mr->enabled;
2242 memory_region_transaction_commit();
2243 }
2244
2245 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2246 hwaddr size)
2247 {
2248 assert(mr->ram_block);
2249 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2250 size,
2251 memory_region_get_dirty_log_mask(mr));
2252 }
2253
2254 /*
2255 * If memory region `mr' is NULL, do global sync. Otherwise, sync
2256 * dirty bitmap for the specified memory region.
2257 */
2258 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage)
2259 {
2260 MemoryListener *listener;
2261 AddressSpace *as;
2262 FlatView *view;
2263 FlatRange *fr;
2264
2265 /* If the same address space has multiple log_sync listeners, we
2266 * visit that address space's FlatView multiple times. But because
2267 * log_sync listeners are rare, it's still cheaper than walking each
2268 * address space once.
2269 */
2270 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2271 if (listener->log_sync) {
2272 as = listener->address_space;
2273 view = address_space_get_flatview(as);
2274 FOR_EACH_FLAT_RANGE(fr, view) {
2275 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2276 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2277 listener->log_sync(listener, &mrs);
2278 }
2279 }
2280 flatview_unref(view);
2281 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0);
2282 } else if (listener->log_sync_global) {
2283 /*
2284 * No matter whether MR is specified, what we can do here
2285 * is to do a global sync, because we are not capable to
2286 * sync in a finer granularity.
2287 */
2288 listener->log_sync_global(listener, last_stage);
2289 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
2290 }
2291 }
2292 }
2293
2294 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2295 hwaddr len)
2296 {
2297 MemoryRegionSection mrs;
2298 MemoryListener *listener;
2299 AddressSpace *as;
2300 FlatView *view;
2301 FlatRange *fr;
2302 hwaddr sec_start, sec_end, sec_size;
2303
2304 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2305 if (!listener->log_clear) {
2306 continue;
2307 }
2308 as = listener->address_space;
2309 view = address_space_get_flatview(as);
2310 FOR_EACH_FLAT_RANGE(fr, view) {
2311 if (!fr->dirty_log_mask || fr->mr != mr) {
2312 /*
2313 * Clear dirty bitmap operation only applies to those
2314 * regions whose dirty logging is at least enabled
2315 */
2316 continue;
2317 }
2318
2319 mrs = section_from_flat_range(fr, view);
2320
2321 sec_start = MAX(mrs.offset_within_region, start);
2322 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2323 sec_end = MIN(sec_end, start + len);
2324
2325 if (sec_start >= sec_end) {
2326 /*
2327 * If this memory region section has no intersection
2328 * with the requested range, skip.
2329 */
2330 continue;
2331 }
2332
2333 /* Valid case; shrink the section if needed */
2334 mrs.offset_within_address_space +=
2335 sec_start - mrs.offset_within_region;
2336 mrs.offset_within_region = sec_start;
2337 sec_size = sec_end - sec_start;
2338 mrs.size = int128_make64(sec_size);
2339 listener->log_clear(listener, &mrs);
2340 }
2341 flatview_unref(view);
2342 }
2343 }
2344
2345 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2346 hwaddr addr,
2347 hwaddr size,
2348 unsigned client)
2349 {
2350 DirtyBitmapSnapshot *snapshot;
2351 assert(mr->ram_block);
2352 memory_region_sync_dirty_bitmap(mr, false);
2353 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2354 memory_global_after_dirty_log_sync();
2355 return snapshot;
2356 }
2357
2358 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2359 hwaddr addr, hwaddr size)
2360 {
2361 assert(mr->ram_block);
2362 return cpu_physical_memory_snapshot_get_dirty(snap,
2363 memory_region_get_ram_addr(mr) + addr, size);
2364 }
2365
2366 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2367 {
2368 if (mr->readonly != readonly) {
2369 memory_region_transaction_begin();
2370 mr->readonly = readonly;
2371 memory_region_update_pending |= mr->enabled;
2372 memory_region_transaction_commit();
2373 }
2374 }
2375
2376 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2377 {
2378 if (mr->nonvolatile != nonvolatile) {
2379 memory_region_transaction_begin();
2380 mr->nonvolatile = nonvolatile;
2381 memory_region_update_pending |= mr->enabled;
2382 memory_region_transaction_commit();
2383 }
2384 }
2385
2386 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2387 {
2388 if (mr->romd_mode != romd_mode) {
2389 memory_region_transaction_begin();
2390 mr->romd_mode = romd_mode;
2391 memory_region_update_pending |= mr->enabled;
2392 memory_region_transaction_commit();
2393 }
2394 }
2395
2396 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2397 hwaddr size, unsigned client)
2398 {
2399 assert(mr->ram_block);
2400 cpu_physical_memory_test_and_clear_dirty(
2401 memory_region_get_ram_addr(mr) + addr, size, client);
2402 }
2403
2404 int memory_region_get_fd(MemoryRegion *mr)
2405 {
2406 RCU_READ_LOCK_GUARD();
2407 while (mr->alias) {
2408 mr = mr->alias;
2409 }
2410 return mr->ram_block->fd;
2411 }
2412
2413 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2414 {
2415 uint64_t offset = 0;
2416
2417 RCU_READ_LOCK_GUARD();
2418 while (mr->alias) {
2419 offset += mr->alias_offset;
2420 mr = mr->alias;
2421 }
2422 assert(mr->ram_block);
2423 return qemu_map_ram_ptr(mr->ram_block, offset);
2424 }
2425
2426 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2427 {
2428 RAMBlock *block;
2429
2430 block = qemu_ram_block_from_host(ptr, false, offset);
2431 if (!block) {
2432 return NULL;
2433 }
2434
2435 return block->mr;
2436 }
2437
2438 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2439 {
2440 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2441 }
2442
2443 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2444 {
2445 assert(mr->ram_block);
2446
2447 qemu_ram_resize(mr->ram_block, newsize, errp);
2448 }
2449
2450 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
2451 {
2452 if (mr->ram_block) {
2453 qemu_ram_msync(mr->ram_block, addr, size);
2454 }
2455 }
2456
2457 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
2458 {
2459 /*
2460 * Might be extended case needed to cover
2461 * different types of memory regions
2462 */
2463 if (mr->dirty_log_mask) {
2464 memory_region_msync(mr, addr, size);
2465 }
2466 }
2467
2468 /*
2469 * Call proper memory listeners about the change on the newly
2470 * added/removed CoalescedMemoryRange.
2471 */
2472 static void memory_region_update_coalesced_range(MemoryRegion *mr,
2473 CoalescedMemoryRange *cmr,
2474 bool add)
2475 {
2476 AddressSpace *as;
2477 FlatView *view;
2478 FlatRange *fr;
2479
2480 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2481 view = address_space_get_flatview(as);
2482 FOR_EACH_FLAT_RANGE(fr, view) {
2483 if (fr->mr == mr) {
2484 flat_range_coalesced_io_notify(fr, as, cmr, add);
2485 }
2486 }
2487 flatview_unref(view);
2488 }
2489 }
2490
2491 void memory_region_set_coalescing(MemoryRegion *mr)
2492 {
2493 memory_region_clear_coalescing(mr);
2494 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2495 }
2496
2497 void memory_region_add_coalescing(MemoryRegion *mr,
2498 hwaddr offset,
2499 uint64_t size)
2500 {
2501 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2502
2503 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2504 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2505 memory_region_update_coalesced_range(mr, cmr, true);
2506 memory_region_set_flush_coalesced(mr);
2507 }
2508
2509 void memory_region_clear_coalescing(MemoryRegion *mr)
2510 {
2511 CoalescedMemoryRange *cmr;
2512
2513 if (QTAILQ_EMPTY(&mr->coalesced)) {
2514 return;
2515 }
2516
2517 qemu_flush_coalesced_mmio_buffer();
2518 mr->flush_coalesced_mmio = false;
2519
2520 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2521 cmr = QTAILQ_FIRST(&mr->coalesced);
2522 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2523 memory_region_update_coalesced_range(mr, cmr, false);
2524 g_free(cmr);
2525 }
2526 }
2527
2528 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2529 {
2530 mr->flush_coalesced_mmio = true;
2531 }
2532
2533 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2534 {
2535 qemu_flush_coalesced_mmio_buffer();
2536 if (QTAILQ_EMPTY(&mr->coalesced)) {
2537 mr->flush_coalesced_mmio = false;
2538 }
2539 }
2540
2541 static bool userspace_eventfd_warning;
2542
2543 void memory_region_add_eventfd(MemoryRegion *mr,
2544 hwaddr addr,
2545 unsigned size,
2546 bool match_data,
2547 uint64_t data,
2548 EventNotifier *e)
2549 {
2550 MemoryRegionIoeventfd mrfd = {
2551 .addr.start = int128_make64(addr),
2552 .addr.size = int128_make64(size),
2553 .match_data = match_data,
2554 .data = data,
2555 .e = e,
2556 };
2557 unsigned i;
2558
2559 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2560 userspace_eventfd_warning))) {
2561 userspace_eventfd_warning = true;
2562 error_report("Using eventfd without MMIO binding in KVM. "
2563 "Suboptimal performance expected");
2564 }
2565
2566 if (size) {
2567 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2568 }
2569 memory_region_transaction_begin();
2570 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2571 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2572 break;
2573 }
2574 }
2575 ++mr->ioeventfd_nb;
2576 mr->ioeventfds = g_realloc(mr->ioeventfds,
2577 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2578 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2579 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2580 mr->ioeventfds[i] = mrfd;
2581 ioeventfd_update_pending |= mr->enabled;
2582 memory_region_transaction_commit();
2583 }
2584
2585 void memory_region_del_eventfd(MemoryRegion *mr,
2586 hwaddr addr,
2587 unsigned size,
2588 bool match_data,
2589 uint64_t data,
2590 EventNotifier *e)
2591 {
2592 MemoryRegionIoeventfd mrfd = {
2593 .addr.start = int128_make64(addr),
2594 .addr.size = int128_make64(size),
2595 .match_data = match_data,
2596 .data = data,
2597 .e = e,
2598 };
2599 unsigned i;
2600
2601 if (size) {
2602 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2603 }
2604 memory_region_transaction_begin();
2605 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2606 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2607 break;
2608 }
2609 }
2610 assert(i != mr->ioeventfd_nb);
2611 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2612 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2613 --mr->ioeventfd_nb;
2614 mr->ioeventfds = g_realloc(mr->ioeventfds,
2615 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2616 ioeventfd_update_pending |= mr->enabled;
2617 memory_region_transaction_commit();
2618 }
2619
2620 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2621 {
2622 MemoryRegion *mr = subregion->container;
2623 MemoryRegion *other;
2624
2625 memory_region_transaction_begin();
2626
2627 memory_region_ref(subregion);
2628 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2629 if (subregion->priority >= other->priority) {
2630 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2631 goto done;
2632 }
2633 }
2634 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2635 done:
2636 memory_region_update_pending |= mr->enabled && subregion->enabled;
2637 memory_region_transaction_commit();
2638 }
2639
2640 static void memory_region_add_subregion_common(MemoryRegion *mr,
2641 hwaddr offset,
2642 MemoryRegion *subregion)
2643 {
2644 MemoryRegion *alias;
2645
2646 assert(!subregion->container);
2647 subregion->container = mr;
2648 for (alias = subregion->alias; alias; alias = alias->alias) {
2649 alias->mapped_via_alias++;
2650 }
2651 subregion->addr = offset;
2652 memory_region_update_container_subregions(subregion);
2653 }
2654
2655 void memory_region_add_subregion(MemoryRegion *mr,
2656 hwaddr offset,
2657 MemoryRegion *subregion)
2658 {
2659 subregion->priority = 0;
2660 memory_region_add_subregion_common(mr, offset, subregion);
2661 }
2662
2663 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2664 hwaddr offset,
2665 MemoryRegion *subregion,
2666 int priority)
2667 {
2668 subregion->priority = priority;
2669 memory_region_add_subregion_common(mr, offset, subregion);
2670 }
2671
2672 void memory_region_del_subregion(MemoryRegion *mr,
2673 MemoryRegion *subregion)
2674 {
2675 MemoryRegion *alias;
2676
2677 memory_region_transaction_begin();
2678 assert(subregion->container == mr);
2679 subregion->container = NULL;
2680 for (alias = subregion->alias; alias; alias = alias->alias) {
2681 alias->mapped_via_alias--;
2682 assert(alias->mapped_via_alias >= 0);
2683 }
2684 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2685 memory_region_unref(subregion);
2686 memory_region_update_pending |= mr->enabled && subregion->enabled;
2687 memory_region_transaction_commit();
2688 }
2689
2690 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2691 {
2692 if (enabled == mr->enabled) {
2693 return;
2694 }
2695 memory_region_transaction_begin();
2696 mr->enabled = enabled;
2697 memory_region_update_pending = true;
2698 memory_region_transaction_commit();
2699 }
2700
2701 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2702 {
2703 Int128 s = int128_make64(size);
2704
2705 if (size == UINT64_MAX) {
2706 s = int128_2_64();
2707 }
2708 if (int128_eq(s, mr->size)) {
2709 return;
2710 }
2711 memory_region_transaction_begin();
2712 mr->size = s;
2713 memory_region_update_pending = true;
2714 memory_region_transaction_commit();
2715 }
2716
2717 static void memory_region_readd_subregion(MemoryRegion *mr)
2718 {
2719 MemoryRegion *container = mr->container;
2720
2721 if (container) {
2722 memory_region_transaction_begin();
2723 memory_region_ref(mr);
2724 memory_region_del_subregion(container, mr);
2725 memory_region_add_subregion_common(container, mr->addr, mr);
2726 memory_region_unref(mr);
2727 memory_region_transaction_commit();
2728 }
2729 }
2730
2731 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2732 {
2733 if (addr != mr->addr) {
2734 mr->addr = addr;
2735 memory_region_readd_subregion(mr);
2736 }
2737 }
2738
2739 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2740 {
2741 assert(mr->alias);
2742
2743 if (offset == mr->alias_offset) {
2744 return;
2745 }
2746
2747 memory_region_transaction_begin();
2748 mr->alias_offset = offset;
2749 memory_region_update_pending |= mr->enabled;
2750 memory_region_transaction_commit();
2751 }
2752
2753 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2754 {
2755 return mr->align;
2756 }
2757
2758 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2759 {
2760 const AddrRange *addr = addr_;
2761 const FlatRange *fr = fr_;
2762
2763 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2764 return -1;
2765 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2766 return 1;
2767 }
2768 return 0;
2769 }
2770
2771 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2772 {
2773 return bsearch(&addr, view->ranges, view->nr,
2774 sizeof(FlatRange), cmp_flatrange_addr);
2775 }
2776
2777 bool memory_region_is_mapped(MemoryRegion *mr)
2778 {
2779 return !!mr->container || mr->mapped_via_alias;
2780 }
2781
2782 /* Same as memory_region_find, but it does not add a reference to the
2783 * returned region. It must be called from an RCU critical section.
2784 */
2785 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2786 hwaddr addr, uint64_t size)
2787 {
2788 MemoryRegionSection ret = { .mr = NULL };
2789 MemoryRegion *root;
2790 AddressSpace *as;
2791 AddrRange range;
2792 FlatView *view;
2793 FlatRange *fr;
2794
2795 addr += mr->addr;
2796 for (root = mr; root->container; ) {
2797 root = root->container;
2798 addr += root->addr;
2799 }
2800
2801 as = memory_region_to_address_space(root);
2802 if (!as) {
2803 return ret;
2804 }
2805 range = addrrange_make(int128_make64(addr), int128_make64(size));
2806
2807 view = address_space_to_flatview(as);
2808 fr = flatview_lookup(view, range);
2809 if (!fr) {
2810 return ret;
2811 }
2812
2813 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2814 --fr;
2815 }
2816
2817 ret.mr = fr->mr;
2818 ret.fv = view;
2819 range = addrrange_intersection(range, fr->addr);
2820 ret.offset_within_region = fr->offset_in_region;
2821 ret.offset_within_region += int128_get64(int128_sub(range.start,
2822 fr->addr.start));
2823 ret.size = range.size;
2824 ret.offset_within_address_space = int128_get64(range.start);
2825 ret.readonly = fr->readonly;
2826 ret.nonvolatile = fr->nonvolatile;
2827 return ret;
2828 }
2829
2830 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2831 hwaddr addr, uint64_t size)
2832 {
2833 MemoryRegionSection ret;
2834 RCU_READ_LOCK_GUARD();
2835 ret = memory_region_find_rcu(mr, addr, size);
2836 if (ret.mr) {
2837 memory_region_ref(ret.mr);
2838 }
2839 return ret;
2840 }
2841
2842 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s)
2843 {
2844 MemoryRegionSection *tmp = g_new(MemoryRegionSection, 1);
2845
2846 *tmp = *s;
2847 if (tmp->mr) {
2848 memory_region_ref(tmp->mr);
2849 }
2850 if (tmp->fv) {
2851 bool ret = flatview_ref(tmp->fv);
2852
2853 g_assert(ret);
2854 }
2855 return tmp;
2856 }
2857
2858 void memory_region_section_free_copy(MemoryRegionSection *s)
2859 {
2860 if (s->fv) {
2861 flatview_unref(s->fv);
2862 }
2863 if (s->mr) {
2864 memory_region_unref(s->mr);
2865 }
2866 g_free(s);
2867 }
2868
2869 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2870 {
2871 MemoryRegion *mr;
2872
2873 RCU_READ_LOCK_GUARD();
2874 mr = memory_region_find_rcu(container, addr, 1).mr;
2875 return mr && mr != container;
2876 }
2877
2878 void memory_global_dirty_log_sync(bool last_stage)
2879 {
2880 memory_region_sync_dirty_bitmap(NULL, last_stage);
2881 }
2882
2883 void memory_global_after_dirty_log_sync(void)
2884 {
2885 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2886 }
2887
2888 /*
2889 * Dirty track stop flags that are postponed due to VM being stopped. Should
2890 * only be used within vmstate_change hook.
2891 */
2892 static unsigned int postponed_stop_flags;
2893 static VMChangeStateEntry *vmstate_change;
2894 static void memory_global_dirty_log_stop_postponed_run(void);
2895
2896 void memory_global_dirty_log_start(unsigned int flags)
2897 {
2898 unsigned int old_flags;
2899
2900 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
2901
2902 if (vmstate_change) {
2903 /* If there is postponed stop(), operate on it first */
2904 postponed_stop_flags &= ~flags;
2905 memory_global_dirty_log_stop_postponed_run();
2906 }
2907
2908 flags &= ~global_dirty_tracking;
2909 if (!flags) {
2910 return;
2911 }
2912
2913 old_flags = global_dirty_tracking;
2914 global_dirty_tracking |= flags;
2915 trace_global_dirty_changed(global_dirty_tracking);
2916
2917 if (!old_flags) {
2918 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2919 memory_region_transaction_begin();
2920 memory_region_update_pending = true;
2921 memory_region_transaction_commit();
2922 }
2923 }
2924
2925 static void memory_global_dirty_log_do_stop(unsigned int flags)
2926 {
2927 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
2928 assert((global_dirty_tracking & flags) == flags);
2929 global_dirty_tracking &= ~flags;
2930
2931 trace_global_dirty_changed(global_dirty_tracking);
2932
2933 if (!global_dirty_tracking) {
2934 memory_region_transaction_begin();
2935 memory_region_update_pending = true;
2936 memory_region_transaction_commit();
2937 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2938 }
2939 }
2940
2941 /*
2942 * Execute the postponed dirty log stop operations if there is, then reset
2943 * everything (including the flags and the vmstate change hook).
2944 */
2945 static void memory_global_dirty_log_stop_postponed_run(void)
2946 {
2947 /* This must be called with the vmstate handler registered */
2948 assert(vmstate_change);
2949
2950 /* Note: postponed_stop_flags can be cleared in log start routine */
2951 if (postponed_stop_flags) {
2952 memory_global_dirty_log_do_stop(postponed_stop_flags);
2953 postponed_stop_flags = 0;
2954 }
2955
2956 qemu_del_vm_change_state_handler(vmstate_change);
2957 vmstate_change = NULL;
2958 }
2959
2960 static void memory_vm_change_state_handler(void *opaque, bool running,
2961 RunState state)
2962 {
2963 if (running) {
2964 memory_global_dirty_log_stop_postponed_run();
2965 }
2966 }
2967
2968 void memory_global_dirty_log_stop(unsigned int flags)
2969 {
2970 if (!runstate_is_running()) {
2971 /* Postpone the dirty log stop, e.g., to when VM starts again */
2972 if (vmstate_change) {
2973 /* Batch with previous postponed flags */
2974 postponed_stop_flags |= flags;
2975 } else {
2976 postponed_stop_flags = flags;
2977 vmstate_change = qemu_add_vm_change_state_handler(
2978 memory_vm_change_state_handler, NULL);
2979 }
2980 return;
2981 }
2982
2983 memory_global_dirty_log_do_stop(flags);
2984 }
2985
2986 static void listener_add_address_space(MemoryListener *listener,
2987 AddressSpace *as)
2988 {
2989 FlatView *view;
2990 FlatRange *fr;
2991
2992 if (listener->begin) {
2993 listener->begin(listener);
2994 }
2995 if (global_dirty_tracking) {
2996 if (listener->log_global_start) {
2997 listener->log_global_start(listener);
2998 }
2999 }
3000
3001 view = address_space_get_flatview(as);
3002 FOR_EACH_FLAT_RANGE(fr, view) {
3003 MemoryRegionSection section = section_from_flat_range(fr, view);
3004
3005 if (listener->region_add) {
3006 listener->region_add(listener, &section);
3007 }
3008 if (fr->dirty_log_mask && listener->log_start) {
3009 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
3010 }
3011 }
3012 if (listener->commit) {
3013 listener->commit(listener);
3014 }
3015 flatview_unref(view);
3016 }
3017
3018 static void listener_del_address_space(MemoryListener *listener,
3019 AddressSpace *as)
3020 {
3021 FlatView *view;
3022 FlatRange *fr;
3023
3024 if (listener->begin) {
3025 listener->begin(listener);
3026 }
3027 view = address_space_get_flatview(as);
3028 FOR_EACH_FLAT_RANGE(fr, view) {
3029 MemoryRegionSection section = section_from_flat_range(fr, view);
3030
3031 if (fr->dirty_log_mask && listener->log_stop) {
3032 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
3033 }
3034 if (listener->region_del) {
3035 listener->region_del(listener, &section);
3036 }
3037 }
3038 if (listener->commit) {
3039 listener->commit(listener);
3040 }
3041 flatview_unref(view);
3042 }
3043
3044 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
3045 {
3046 MemoryListener *other = NULL;
3047
3048 /* Only one of them can be defined for a listener */
3049 assert(!(listener->log_sync && listener->log_sync_global));
3050
3051 listener->address_space = as;
3052 if (QTAILQ_EMPTY(&memory_listeners)
3053 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
3054 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
3055 } else {
3056 QTAILQ_FOREACH(other, &memory_listeners, link) {
3057 if (listener->priority < other->priority) {
3058 break;
3059 }
3060 }
3061 QTAILQ_INSERT_BEFORE(other, listener, link);
3062 }
3063
3064 if (QTAILQ_EMPTY(&as->listeners)
3065 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
3066 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
3067 } else {
3068 QTAILQ_FOREACH(other, &as->listeners, link_as) {
3069 if (listener->priority < other->priority) {
3070 break;
3071 }
3072 }
3073 QTAILQ_INSERT_BEFORE(other, listener, link_as);
3074 }
3075
3076 listener_add_address_space(listener, as);
3077 }
3078
3079 void memory_listener_unregister(MemoryListener *listener)
3080 {
3081 if (!listener->address_space) {
3082 return;
3083 }
3084
3085 listener_del_address_space(listener, listener->address_space);
3086 QTAILQ_REMOVE(&memory_listeners, listener, link);
3087 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
3088 listener->address_space = NULL;
3089 }
3090
3091 void address_space_remove_listeners(AddressSpace *as)
3092 {
3093 while (!QTAILQ_EMPTY(&as->listeners)) {
3094 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
3095 }
3096 }
3097
3098 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
3099 {
3100 memory_region_ref(root);
3101 as->root = root;
3102 as->current_map = NULL;
3103 as->ioeventfd_nb = 0;
3104 as->ioeventfds = NULL;
3105 QTAILQ_INIT(&as->listeners);
3106 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
3107 as->name = g_strdup(name ? name : "anonymous");
3108 address_space_update_topology(as);
3109 address_space_update_ioeventfds(as);
3110 }
3111
3112 static void do_address_space_destroy(AddressSpace *as)
3113 {
3114 assert(QTAILQ_EMPTY(&as->listeners));
3115
3116 flatview_unref(as->current_map);
3117 g_free(as->name);
3118 g_free(as->ioeventfds);
3119 memory_region_unref(as->root);
3120 }
3121
3122 void address_space_destroy(AddressSpace *as)
3123 {
3124 MemoryRegion *root = as->root;
3125
3126 /* Flush out anything from MemoryListeners listening in on this */
3127 memory_region_transaction_begin();
3128 as->root = NULL;
3129 memory_region_transaction_commit();
3130 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
3131
3132 /* At this point, as->dispatch and as->current_map are dummy
3133 * entries that the guest should never use. Wait for the old
3134 * values to expire before freeing the data.
3135 */
3136 as->root = root;
3137 call_rcu(as, do_address_space_destroy, rcu);
3138 }
3139
3140 static const char *memory_region_type(MemoryRegion *mr)
3141 {
3142 if (mr->alias) {
3143 return memory_region_type(mr->alias);
3144 }
3145 if (memory_region_is_ram_device(mr)) {
3146 return "ramd";
3147 } else if (memory_region_is_romd(mr)) {
3148 return "romd";
3149 } else if (memory_region_is_rom(mr)) {
3150 return "rom";
3151 } else if (memory_region_is_ram(mr)) {
3152 return "ram";
3153 } else {
3154 return "i/o";
3155 }
3156 }
3157
3158 typedef struct MemoryRegionList MemoryRegionList;
3159
3160 struct MemoryRegionList {
3161 const MemoryRegion *mr;
3162 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
3163 };
3164
3165 typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
3166
3167 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3168 int128_sub((size), int128_one())) : 0)
3169 #define MTREE_INDENT " "
3170
3171 static void mtree_expand_owner(const char *label, Object *obj)
3172 {
3173 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
3174
3175 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
3176 if (dev && dev->id) {
3177 qemu_printf(" id=%s", dev->id);
3178 } else {
3179 char *canonical_path = object_get_canonical_path(obj);
3180 if (canonical_path) {
3181 qemu_printf(" path=%s", canonical_path);
3182 g_free(canonical_path);
3183 } else {
3184 qemu_printf(" type=%s", object_get_typename(obj));
3185 }
3186 }
3187 qemu_printf("}");
3188 }
3189
3190 static void mtree_print_mr_owner(const MemoryRegion *mr)
3191 {
3192 Object *owner = mr->owner;
3193 Object *parent = memory_region_owner((MemoryRegion *)mr);
3194
3195 if (!owner && !parent) {
3196 qemu_printf(" orphan");
3197 return;
3198 }
3199 if (owner) {
3200 mtree_expand_owner("owner", owner);
3201 }
3202 if (parent && parent != owner) {
3203 mtree_expand_owner("parent", parent);
3204 }
3205 }
3206
3207 static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
3208 hwaddr base,
3209 MemoryRegionListHead *alias_print_queue,
3210 bool owner, bool display_disabled)
3211 {
3212 MemoryRegionList *new_ml, *ml, *next_ml;
3213 MemoryRegionListHead submr_print_queue;
3214 const MemoryRegion *submr;
3215 unsigned int i;
3216 hwaddr cur_start, cur_end;
3217
3218 if (!mr) {
3219 return;
3220 }
3221
3222 cur_start = base + mr->addr;
3223 cur_end = cur_start + MR_SIZE(mr->size);
3224
3225 /*
3226 * Try to detect overflow of memory region. This should never
3227 * happen normally. When it happens, we dump something to warn the
3228 * user who is observing this.
3229 */
3230 if (cur_start < base || cur_end < cur_start) {
3231 qemu_printf("[DETECTED OVERFLOW!] ");
3232 }
3233
3234 if (mr->alias) {
3235 MemoryRegionList *ml;
3236 bool found = false;
3237
3238 /* check if the alias is already in the queue */
3239 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
3240 if (ml->mr == mr->alias) {
3241 found = true;
3242 }
3243 }
3244
3245 if (!found) {
3246 ml = g_new(MemoryRegionList, 1);
3247 ml->mr = mr->alias;
3248 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
3249 }
3250 if (mr->enabled || display_disabled) {
3251 for (i = 0; i < level; i++) {
3252 qemu_printf(MTREE_INDENT);
3253 }
3254 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
3255 " (prio %d, %s%s): alias %s @%s " HWADDR_FMT_plx
3256 "-" HWADDR_FMT_plx "%s",
3257 cur_start, cur_end,
3258 mr->priority,
3259 mr->nonvolatile ? "nv-" : "",
3260 memory_region_type((MemoryRegion *)mr),
3261 memory_region_name(mr),
3262 memory_region_name(mr->alias),
3263 mr->alias_offset,
3264 mr->alias_offset + MR_SIZE(mr->size),
3265 mr->enabled ? "" : " [disabled]");
3266 if (owner) {
3267 mtree_print_mr_owner(mr);
3268 }
3269 qemu_printf("\n");
3270 }
3271 } else {
3272 if (mr->enabled || display_disabled) {
3273 for (i = 0; i < level; i++) {
3274 qemu_printf(MTREE_INDENT);
3275 }
3276 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
3277 " (prio %d, %s%s): %s%s",
3278 cur_start, cur_end,
3279 mr->priority,
3280 mr->nonvolatile ? "nv-" : "",
3281 memory_region_type((MemoryRegion *)mr),
3282 memory_region_name(mr),
3283 mr->enabled ? "" : " [disabled]");
3284 if (owner) {
3285 mtree_print_mr_owner(mr);
3286 }
3287 qemu_printf("\n");
3288 }
3289 }
3290
3291 QTAILQ_INIT(&submr_print_queue);
3292
3293 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
3294 new_ml = g_new(MemoryRegionList, 1);
3295 new_ml->mr = submr;
3296 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3297 if (new_ml->mr->addr < ml->mr->addr ||
3298 (new_ml->mr->addr == ml->mr->addr &&
3299 new_ml->mr->priority > ml->mr->priority)) {
3300 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
3301 new_ml = NULL;
3302 break;
3303 }
3304 }
3305 if (new_ml) {
3306 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
3307 }
3308 }
3309
3310 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3311 mtree_print_mr(ml->mr, level + 1, cur_start,
3312 alias_print_queue, owner, display_disabled);
3313 }
3314
3315 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3316 g_free(ml);
3317 }
3318 }
3319
3320 struct FlatViewInfo {
3321 int counter;
3322 bool dispatch_tree;
3323 bool owner;
3324 AccelClass *ac;
3325 };
3326
3327 static void mtree_print_flatview(gpointer key, gpointer value,
3328 gpointer user_data)
3329 {
3330 FlatView *view = key;
3331 GArray *fv_address_spaces = value;
3332 struct FlatViewInfo *fvi = user_data;
3333 FlatRange *range = &view->ranges[0];
3334 MemoryRegion *mr;
3335 int n = view->nr;
3336 int i;
3337 AddressSpace *as;
3338
3339 qemu_printf("FlatView #%d\n", fvi->counter);
3340 ++fvi->counter;
3341
3342 for (i = 0; i < fv_address_spaces->len; ++i) {
3343 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3344 qemu_printf(" AS \"%s\", root: %s",
3345 as->name, memory_region_name(as->root));
3346 if (as->root->alias) {
3347 qemu_printf(", alias %s", memory_region_name(as->root->alias));
3348 }
3349 qemu_printf("\n");
3350 }
3351
3352 qemu_printf(" Root memory region: %s\n",
3353 view->root ? memory_region_name(view->root) : "(none)");
3354
3355 if (n <= 0) {
3356 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
3357 return;
3358 }
3359
3360 while (n--) {
3361 mr = range->mr;
3362 if (range->offset_in_region) {
3363 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
3364 " (prio %d, %s%s): %s @" HWADDR_FMT_plx,
3365 int128_get64(range->addr.start),
3366 int128_get64(range->addr.start)
3367 + MR_SIZE(range->addr.size),
3368 mr->priority,
3369 range->nonvolatile ? "nv-" : "",
3370 range->readonly ? "rom" : memory_region_type(mr),
3371 memory_region_name(mr),
3372 range->offset_in_region);
3373 } else {
3374 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
3375 " (prio %d, %s%s): %s",
3376 int128_get64(range->addr.start),
3377 int128_get64(range->addr.start)
3378 + MR_SIZE(range->addr.size),
3379 mr->priority,
3380 range->nonvolatile ? "nv-" : "",
3381 range->readonly ? "rom" : memory_region_type(mr),
3382 memory_region_name(mr));
3383 }
3384 if (fvi->owner) {
3385 mtree_print_mr_owner(mr);
3386 }
3387
3388 if (fvi->ac) {
3389 for (i = 0; i < fv_address_spaces->len; ++i) {
3390 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3391 if (fvi->ac->has_memory(current_machine, as,
3392 int128_get64(range->addr.start),
3393 MR_SIZE(range->addr.size) + 1)) {
3394 qemu_printf(" %s", fvi->ac->name);
3395 }
3396 }
3397 }
3398 qemu_printf("\n");
3399 range++;
3400 }
3401
3402 #if !defined(CONFIG_USER_ONLY)
3403 if (fvi->dispatch_tree && view->root) {
3404 mtree_print_dispatch(view->dispatch, view->root);
3405 }
3406 #endif
3407
3408 qemu_printf("\n");
3409 }
3410
3411 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3412 gpointer user_data)
3413 {
3414 FlatView *view = key;
3415 GArray *fv_address_spaces = value;
3416
3417 g_array_unref(fv_address_spaces);
3418 flatview_unref(view);
3419
3420 return true;
3421 }
3422
3423 static void mtree_info_flatview(bool dispatch_tree, bool owner)
3424 {
3425 struct FlatViewInfo fvi = {
3426 .counter = 0,
3427 .dispatch_tree = dispatch_tree,
3428 .owner = owner,
3429 };
3430 AddressSpace *as;
3431 FlatView *view;
3432 GArray *fv_address_spaces;
3433 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3434 AccelClass *ac = ACCEL_GET_CLASS(current_accel());
3435
3436 if (ac->has_memory) {
3437 fvi.ac = ac;
3438 }
3439
3440 /* Gather all FVs in one table */
3441 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3442 view = address_space_get_flatview(as);
3443
3444 fv_address_spaces = g_hash_table_lookup(views, view);
3445 if (!fv_address_spaces) {
3446 fv_address_spaces = g_array_new(false, false, sizeof(as));
3447 g_hash_table_insert(views, view, fv_address_spaces);
3448 }
3449
3450 g_array_append_val(fv_address_spaces, as);
3451 }
3452
3453 /* Print */
3454 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3455
3456 /* Free */
3457 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3458 g_hash_table_unref(views);
3459 }
3460
3461 struct AddressSpaceInfo {
3462 MemoryRegionListHead *ml_head;
3463 bool owner;
3464 bool disabled;
3465 };
3466
3467 /* Returns negative value if a < b; zero if a = b; positive value if a > b. */
3468 static gint address_space_compare_name(gconstpointer a, gconstpointer b)
3469 {
3470 const AddressSpace *as_a = a;
3471 const AddressSpace *as_b = b;
3472
3473 return g_strcmp0(as_a->name, as_b->name);
3474 }
3475
3476 static void mtree_print_as_name(gpointer data, gpointer user_data)
3477 {
3478 AddressSpace *as = data;
3479
3480 qemu_printf("address-space: %s\n", as->name);
3481 }
3482
3483 static void mtree_print_as(gpointer key, gpointer value, gpointer user_data)
3484 {
3485 MemoryRegion *mr = key;
3486 GSList *as_same_root_mr_list = value;
3487 struct AddressSpaceInfo *asi = user_data;
3488
3489 g_slist_foreach(as_same_root_mr_list, mtree_print_as_name, NULL);
3490 mtree_print_mr(mr, 1, 0, asi->ml_head, asi->owner, asi->disabled);
3491 qemu_printf("\n");
3492 }
3493
3494 static gboolean mtree_info_as_free(gpointer key, gpointer value,
3495 gpointer user_data)
3496 {
3497 GSList *as_same_root_mr_list = value;
3498
3499 g_slist_free(as_same_root_mr_list);
3500
3501 return true;
3502 }
3503
3504 static void mtree_info_as(bool dispatch_tree, bool owner, bool disabled)
3505 {
3506 MemoryRegionListHead ml_head;
3507 MemoryRegionList *ml, *ml2;
3508 AddressSpace *as;
3509 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3510 GSList *as_same_root_mr_list;
3511 struct AddressSpaceInfo asi = {
3512 .ml_head = &ml_head,
3513 .owner = owner,
3514 .disabled = disabled,
3515 };
3516
3517 QTAILQ_INIT(&ml_head);
3518
3519 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3520 /* Create hashtable, key=AS root MR, value = list of AS */
3521 as_same_root_mr_list = g_hash_table_lookup(views, as->root);
3522 as_same_root_mr_list = g_slist_insert_sorted(as_same_root_mr_list, as,
3523 address_space_compare_name);
3524 g_hash_table_insert(views, as->root, as_same_root_mr_list);
3525 }
3526
3527 /* print address spaces */
3528 g_hash_table_foreach(views, mtree_print_as, &asi);
3529 g_hash_table_foreach_remove(views, mtree_info_as_free, 0);
3530 g_hash_table_unref(views);
3531
3532 /* print aliased regions */
3533 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3534 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3535 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
3536 qemu_printf("\n");
3537 }
3538
3539 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3540 g_free(ml);
3541 }
3542 }
3543
3544 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
3545 {
3546 if (flatview) {
3547 mtree_info_flatview(dispatch_tree, owner);
3548 } else {
3549 mtree_info_as(dispatch_tree, owner, disabled);
3550 }
3551 }
3552
3553 void memory_region_init_ram(MemoryRegion *mr,
3554 Object *owner,
3555 const char *name,
3556 uint64_t size,
3557 Error **errp)
3558 {
3559 DeviceState *owner_dev;
3560 Error *err = NULL;
3561
3562 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3563 if (err) {
3564 error_propagate(errp, err);
3565 return;
3566 }
3567 /* This will assert if owner is neither NULL nor a DeviceState.
3568 * We only want the owner here for the purposes of defining a
3569 * unique name for migration. TODO: Ideally we should implement
3570 * a naming scheme for Objects which are not DeviceStates, in
3571 * which case we can relax this restriction.
3572 */
3573 owner_dev = DEVICE(owner);
3574 vmstate_register_ram(mr, owner_dev);
3575 }
3576
3577 void memory_region_init_rom(MemoryRegion *mr,
3578 Object *owner,
3579 const char *name,
3580 uint64_t size,
3581 Error **errp)
3582 {
3583 DeviceState *owner_dev;
3584 Error *err = NULL;
3585
3586 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3587 if (err) {
3588 error_propagate(errp, err);
3589 return;
3590 }
3591 /* This will assert if owner is neither NULL nor a DeviceState.
3592 * We only want the owner here for the purposes of defining a
3593 * unique name for migration. TODO: Ideally we should implement
3594 * a naming scheme for Objects which are not DeviceStates, in
3595 * which case we can relax this restriction.
3596 */
3597 owner_dev = DEVICE(owner);
3598 vmstate_register_ram(mr, owner_dev);
3599 }
3600
3601 void memory_region_init_rom_device(MemoryRegion *mr,
3602 Object *owner,
3603 const MemoryRegionOps *ops,
3604 void *opaque,
3605 const char *name,
3606 uint64_t size,
3607 Error **errp)
3608 {
3609 DeviceState *owner_dev;
3610 Error *err = NULL;
3611
3612 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3613 name, size, &err);
3614 if (err) {
3615 error_propagate(errp, err);
3616 return;
3617 }
3618 /* This will assert if owner is neither NULL nor a DeviceState.
3619 * We only want the owner here for the purposes of defining a
3620 * unique name for migration. TODO: Ideally we should implement
3621 * a naming scheme for Objects which are not DeviceStates, in
3622 * which case we can relax this restriction.
3623 */
3624 owner_dev = DEVICE(owner);
3625 vmstate_register_ram(mr, owner_dev);
3626 }
3627
3628 /*
3629 * Support softmmu builds with CONFIG_FUZZ using a weak symbol and a stub for
3630 * the fuzz_dma_read_cb callback
3631 */
3632 #ifdef CONFIG_FUZZ
3633 void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
3634 size_t len,
3635 MemoryRegion *mr)
3636 {
3637 }
3638 #endif
3639
3640 static const TypeInfo memory_region_info = {
3641 .parent = TYPE_OBJECT,
3642 .name = TYPE_MEMORY_REGION,
3643 .class_size = sizeof(MemoryRegionClass),
3644 .instance_size = sizeof(MemoryRegion),
3645 .instance_init = memory_region_initfn,
3646 .instance_finalize = memory_region_finalize,
3647 };
3648
3649 static const TypeInfo iommu_memory_region_info = {
3650 .parent = TYPE_MEMORY_REGION,
3651 .name = TYPE_IOMMU_MEMORY_REGION,
3652 .class_size = sizeof(IOMMUMemoryRegionClass),
3653 .instance_size = sizeof(IOMMUMemoryRegion),
3654 .instance_init = iommu_memory_region_initfn,
3655 .abstract = true,
3656 };
3657
3658 static const TypeInfo ram_discard_manager_info = {
3659 .parent = TYPE_INTERFACE,
3660 .name = TYPE_RAM_DISCARD_MANAGER,
3661 .class_size = sizeof(RamDiscardManagerClass),
3662 };
3663
3664 static void memory_register_types(void)
3665 {
3666 type_register_static(&memory_region_info);
3667 type_register_static(&iommu_memory_region_info);
3668 type_register_static(&ram_discard_manager_info);
3669 }
3670
3671 type_init(memory_register_types)