]> git.proxmox.com Git - mirror_qemu.git/blob - memory.c
Merge remote-tracking branch 'remotes/kraxel/tags/vga-20180821-pull-request' into...
[mirror_qemu.git] / memory.c
1 /*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "cpu.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
22 #include "qapi/visitor.h"
23 #include "qemu/bitops.h"
24 #include "qemu/error-report.h"
25 #include "qom/object.h"
26 #include "trace-root.h"
27
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/sysemu.h"
32 #include "hw/qdev-properties.h"
33 #include "migration/vmstate.h"
34
35 //#define DEBUG_UNASSIGNED
36
37 static unsigned memory_region_transaction_depth;
38 static bool memory_region_update_pending;
39 static bool ioeventfd_update_pending;
40 static bool global_dirty_log = false;
41
42 static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
43 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
44
45 static QTAILQ_HEAD(, AddressSpace) address_spaces
46 = QTAILQ_HEAD_INITIALIZER(address_spaces);
47
48 static GHashTable *flat_views;
49
50 typedef struct AddrRange AddrRange;
51
52 /*
53 * Note that signed integers are needed for negative offsetting in aliases
54 * (large MemoryRegion::alias_offset).
55 */
56 struct AddrRange {
57 Int128 start;
58 Int128 size;
59 };
60
61 static AddrRange addrrange_make(Int128 start, Int128 size)
62 {
63 return (AddrRange) { start, size };
64 }
65
66 static bool addrrange_equal(AddrRange r1, AddrRange r2)
67 {
68 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
69 }
70
71 static Int128 addrrange_end(AddrRange r)
72 {
73 return int128_add(r.start, r.size);
74 }
75
76 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
77 {
78 int128_addto(&range.start, delta);
79 return range;
80 }
81
82 static bool addrrange_contains(AddrRange range, Int128 addr)
83 {
84 return int128_ge(addr, range.start)
85 && int128_lt(addr, addrrange_end(range));
86 }
87
88 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
89 {
90 return addrrange_contains(r1, r2.start)
91 || addrrange_contains(r2, r1.start);
92 }
93
94 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
95 {
96 Int128 start = int128_max(r1.start, r2.start);
97 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
98 return addrrange_make(start, int128_sub(end, start));
99 }
100
101 enum ListenerDirection { Forward, Reverse };
102
103 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
104 do { \
105 MemoryListener *_listener; \
106 \
107 switch (_direction) { \
108 case Forward: \
109 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
110 if (_listener->_callback) { \
111 _listener->_callback(_listener, ##_args); \
112 } \
113 } \
114 break; \
115 case Reverse: \
116 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
117 memory_listeners, link) { \
118 if (_listener->_callback) { \
119 _listener->_callback(_listener, ##_args); \
120 } \
121 } \
122 break; \
123 default: \
124 abort(); \
125 } \
126 } while (0)
127
128 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
129 do { \
130 MemoryListener *_listener; \
131 struct memory_listeners_as *list = &(_as)->listeners; \
132 \
133 switch (_direction) { \
134 case Forward: \
135 QTAILQ_FOREACH(_listener, list, link_as) { \
136 if (_listener->_callback) { \
137 _listener->_callback(_listener, _section, ##_args); \
138 } \
139 } \
140 break; \
141 case Reverse: \
142 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
143 link_as) { \
144 if (_listener->_callback) { \
145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
154 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
155 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
156 do { \
157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
160 } while(0)
161
162 struct CoalescedMemoryRange {
163 AddrRange addr;
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165 };
166
167 struct MemoryRegionIoeventfd {
168 AddrRange addr;
169 bool match_data;
170 uint64_t data;
171 EventNotifier *e;
172 };
173
174 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
176 {
177 if (int128_lt(a->addr.start, b->addr.start)) {
178 return true;
179 } else if (int128_gt(a->addr.start, b->addr.start)) {
180 return false;
181 } else if (int128_lt(a->addr.size, b->addr.size)) {
182 return true;
183 } else if (int128_gt(a->addr.size, b->addr.size)) {
184 return false;
185 } else if (a->match_data < b->match_data) {
186 return true;
187 } else if (a->match_data > b->match_data) {
188 return false;
189 } else if (a->match_data) {
190 if (a->data < b->data) {
191 return true;
192 } else if (a->data > b->data) {
193 return false;
194 }
195 }
196 if (a->e < b->e) {
197 return true;
198 } else if (a->e > b->e) {
199 return false;
200 }
201 return false;
202 }
203
204 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
206 {
207 return !memory_region_ioeventfd_before(a, b)
208 && !memory_region_ioeventfd_before(b, a);
209 }
210
211 /* Range of memory in the global map. Addresses are absolute. */
212 struct FlatRange {
213 MemoryRegion *mr;
214 hwaddr offset_in_region;
215 AddrRange addr;
216 uint8_t dirty_log_mask;
217 bool romd_mode;
218 bool readonly;
219 };
220
221 #define FOR_EACH_FLAT_RANGE(var, view) \
222 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
223
224 static inline MemoryRegionSection
225 section_from_flat_range(FlatRange *fr, FlatView *fv)
226 {
227 return (MemoryRegionSection) {
228 .mr = fr->mr,
229 .fv = fv,
230 .offset_within_region = fr->offset_in_region,
231 .size = fr->addr.size,
232 .offset_within_address_space = int128_get64(fr->addr.start),
233 .readonly = fr->readonly,
234 };
235 }
236
237 static bool flatrange_equal(FlatRange *a, FlatRange *b)
238 {
239 return a->mr == b->mr
240 && addrrange_equal(a->addr, b->addr)
241 && a->offset_in_region == b->offset_in_region
242 && a->romd_mode == b->romd_mode
243 && a->readonly == b->readonly;
244 }
245
246 static FlatView *flatview_new(MemoryRegion *mr_root)
247 {
248 FlatView *view;
249
250 view = g_new0(FlatView, 1);
251 view->ref = 1;
252 view->root = mr_root;
253 memory_region_ref(mr_root);
254 trace_flatview_new(view, mr_root);
255
256 return view;
257 }
258
259 /* Insert a range into a given position. Caller is responsible for maintaining
260 * sorting order.
261 */
262 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
263 {
264 if (view->nr == view->nr_allocated) {
265 view->nr_allocated = MAX(2 * view->nr, 10);
266 view->ranges = g_realloc(view->ranges,
267 view->nr_allocated * sizeof(*view->ranges));
268 }
269 memmove(view->ranges + pos + 1, view->ranges + pos,
270 (view->nr - pos) * sizeof(FlatRange));
271 view->ranges[pos] = *range;
272 memory_region_ref(range->mr);
273 ++view->nr;
274 }
275
276 static void flatview_destroy(FlatView *view)
277 {
278 int i;
279
280 trace_flatview_destroy(view, view->root);
281 if (view->dispatch) {
282 address_space_dispatch_free(view->dispatch);
283 }
284 for (i = 0; i < view->nr; i++) {
285 memory_region_unref(view->ranges[i].mr);
286 }
287 g_free(view->ranges);
288 memory_region_unref(view->root);
289 g_free(view);
290 }
291
292 static bool flatview_ref(FlatView *view)
293 {
294 return atomic_fetch_inc_nonzero(&view->ref) > 0;
295 }
296
297 void flatview_unref(FlatView *view)
298 {
299 if (atomic_fetch_dec(&view->ref) == 1) {
300 trace_flatview_destroy_rcu(view, view->root);
301 assert(view->root);
302 call_rcu(view, flatview_destroy, rcu);
303 }
304 }
305
306 static bool can_merge(FlatRange *r1, FlatRange *r2)
307 {
308 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
309 && r1->mr == r2->mr
310 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
311 r1->addr.size),
312 int128_make64(r2->offset_in_region))
313 && r1->dirty_log_mask == r2->dirty_log_mask
314 && r1->romd_mode == r2->romd_mode
315 && r1->readonly == r2->readonly;
316 }
317
318 /* Attempt to simplify a view by merging adjacent ranges */
319 static void flatview_simplify(FlatView *view)
320 {
321 unsigned i, j;
322
323 i = 0;
324 while (i < view->nr) {
325 j = i + 1;
326 while (j < view->nr
327 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
328 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
329 ++j;
330 }
331 ++i;
332 memmove(&view->ranges[i], &view->ranges[j],
333 (view->nr - j) * sizeof(view->ranges[j]));
334 view->nr -= j - i;
335 }
336 }
337
338 static bool memory_region_big_endian(MemoryRegion *mr)
339 {
340 #ifdef TARGET_WORDS_BIGENDIAN
341 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
342 #else
343 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
344 #endif
345 }
346
347 static bool memory_region_wrong_endianness(MemoryRegion *mr)
348 {
349 #ifdef TARGET_WORDS_BIGENDIAN
350 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
351 #else
352 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
353 #endif
354 }
355
356 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
357 {
358 if (memory_region_wrong_endianness(mr)) {
359 switch (size) {
360 case 1:
361 break;
362 case 2:
363 *data = bswap16(*data);
364 break;
365 case 4:
366 *data = bswap32(*data);
367 break;
368 case 8:
369 *data = bswap64(*data);
370 break;
371 default:
372 abort();
373 }
374 }
375 }
376
377 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
378 {
379 MemoryRegion *root;
380 hwaddr abs_addr = offset;
381
382 abs_addr += mr->addr;
383 for (root = mr; root->container; ) {
384 root = root->container;
385 abs_addr += root->addr;
386 }
387
388 return abs_addr;
389 }
390
391 static int get_cpu_index(void)
392 {
393 if (current_cpu) {
394 return current_cpu->cpu_index;
395 }
396 return -1;
397 }
398
399 static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
400 hwaddr addr,
401 uint64_t *value,
402 unsigned size,
403 unsigned shift,
404 uint64_t mask,
405 MemTxAttrs attrs)
406 {
407 uint64_t tmp;
408
409 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
410 if (mr->subpage) {
411 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
412 } else if (mr == &io_mem_notdirty) {
413 /* Accesses to code which has previously been translated into a TB show
414 * up in the MMIO path, as accesses to the io_mem_notdirty
415 * MemoryRegion. */
416 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
417 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
418 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
419 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
420 }
421 *value |= (tmp & mask) << shift;
422 return MEMTX_OK;
423 }
424
425 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
426 hwaddr addr,
427 uint64_t *value,
428 unsigned size,
429 unsigned shift,
430 uint64_t mask,
431 MemTxAttrs attrs)
432 {
433 uint64_t tmp;
434
435 tmp = mr->ops->read(mr->opaque, addr, size);
436 if (mr->subpage) {
437 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
438 } else if (mr == &io_mem_notdirty) {
439 /* Accesses to code which has previously been translated into a TB show
440 * up in the MMIO path, as accesses to the io_mem_notdirty
441 * MemoryRegion. */
442 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
443 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
444 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
445 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
446 }
447 *value |= (tmp & mask) << shift;
448 return MEMTX_OK;
449 }
450
451 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
452 hwaddr addr,
453 uint64_t *value,
454 unsigned size,
455 unsigned shift,
456 uint64_t mask,
457 MemTxAttrs attrs)
458 {
459 uint64_t tmp = 0;
460 MemTxResult r;
461
462 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
463 if (mr->subpage) {
464 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
465 } else if (mr == &io_mem_notdirty) {
466 /* Accesses to code which has previously been translated into a TB show
467 * up in the MMIO path, as accesses to the io_mem_notdirty
468 * MemoryRegion. */
469 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
470 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
471 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
472 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
473 }
474 *value |= (tmp & mask) << shift;
475 return r;
476 }
477
478 static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
479 hwaddr addr,
480 uint64_t *value,
481 unsigned size,
482 unsigned shift,
483 uint64_t mask,
484 MemTxAttrs attrs)
485 {
486 uint64_t tmp;
487
488 tmp = (*value >> shift) & mask;
489 if (mr->subpage) {
490 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
491 } else if (mr == &io_mem_notdirty) {
492 /* Accesses to code which has previously been translated into a TB show
493 * up in the MMIO path, as accesses to the io_mem_notdirty
494 * MemoryRegion. */
495 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
496 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
497 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
498 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
499 }
500 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
501 return MEMTX_OK;
502 }
503
504 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
505 hwaddr addr,
506 uint64_t *value,
507 unsigned size,
508 unsigned shift,
509 uint64_t mask,
510 MemTxAttrs attrs)
511 {
512 uint64_t tmp;
513
514 tmp = (*value >> shift) & mask;
515 if (mr->subpage) {
516 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
517 } else if (mr == &io_mem_notdirty) {
518 /* Accesses to code which has previously been translated into a TB show
519 * up in the MMIO path, as accesses to the io_mem_notdirty
520 * MemoryRegion. */
521 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
522 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
523 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
524 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
525 }
526 mr->ops->write(mr->opaque, addr, tmp, size);
527 return MEMTX_OK;
528 }
529
530 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
531 hwaddr addr,
532 uint64_t *value,
533 unsigned size,
534 unsigned shift,
535 uint64_t mask,
536 MemTxAttrs attrs)
537 {
538 uint64_t tmp;
539
540 tmp = (*value >> shift) & mask;
541 if (mr->subpage) {
542 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
543 } else if (mr == &io_mem_notdirty) {
544 /* Accesses to code which has previously been translated into a TB show
545 * up in the MMIO path, as accesses to the io_mem_notdirty
546 * MemoryRegion. */
547 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
548 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
549 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
550 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
551 }
552 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
553 }
554
555 static MemTxResult access_with_adjusted_size(hwaddr addr,
556 uint64_t *value,
557 unsigned size,
558 unsigned access_size_min,
559 unsigned access_size_max,
560 MemTxResult (*access_fn)
561 (MemoryRegion *mr,
562 hwaddr addr,
563 uint64_t *value,
564 unsigned size,
565 unsigned shift,
566 uint64_t mask,
567 MemTxAttrs attrs),
568 MemoryRegion *mr,
569 MemTxAttrs attrs)
570 {
571 uint64_t access_mask;
572 unsigned access_size;
573 unsigned i;
574 MemTxResult r = MEMTX_OK;
575
576 if (!access_size_min) {
577 access_size_min = 1;
578 }
579 if (!access_size_max) {
580 access_size_max = 4;
581 }
582
583 /* FIXME: support unaligned access? */
584 access_size = MAX(MIN(size, access_size_max), access_size_min);
585 access_mask = -1ULL >> (64 - access_size * 8);
586 if (memory_region_big_endian(mr)) {
587 for (i = 0; i < size; i += access_size) {
588 r |= access_fn(mr, addr + i, value, access_size,
589 (size - access_size - i) * 8, access_mask, attrs);
590 }
591 } else {
592 for (i = 0; i < size; i += access_size) {
593 r |= access_fn(mr, addr + i, value, access_size, i * 8,
594 access_mask, attrs);
595 }
596 }
597 return r;
598 }
599
600 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
601 {
602 AddressSpace *as;
603
604 while (mr->container) {
605 mr = mr->container;
606 }
607 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
608 if (mr == as->root) {
609 return as;
610 }
611 }
612 return NULL;
613 }
614
615 /* Render a memory region into the global view. Ranges in @view obscure
616 * ranges in @mr.
617 */
618 static void render_memory_region(FlatView *view,
619 MemoryRegion *mr,
620 Int128 base,
621 AddrRange clip,
622 bool readonly)
623 {
624 MemoryRegion *subregion;
625 unsigned i;
626 hwaddr offset_in_region;
627 Int128 remain;
628 Int128 now;
629 FlatRange fr;
630 AddrRange tmp;
631
632 if (!mr->enabled) {
633 return;
634 }
635
636 int128_addto(&base, int128_make64(mr->addr));
637 readonly |= mr->readonly;
638
639 tmp = addrrange_make(base, mr->size);
640
641 if (!addrrange_intersects(tmp, clip)) {
642 return;
643 }
644
645 clip = addrrange_intersection(tmp, clip);
646
647 if (mr->alias) {
648 int128_subfrom(&base, int128_make64(mr->alias->addr));
649 int128_subfrom(&base, int128_make64(mr->alias_offset));
650 render_memory_region(view, mr->alias, base, clip, readonly);
651 return;
652 }
653
654 /* Render subregions in priority order. */
655 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
656 render_memory_region(view, subregion, base, clip, readonly);
657 }
658
659 if (!mr->terminates) {
660 return;
661 }
662
663 offset_in_region = int128_get64(int128_sub(clip.start, base));
664 base = clip.start;
665 remain = clip.size;
666
667 fr.mr = mr;
668 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
669 fr.romd_mode = mr->romd_mode;
670 fr.readonly = readonly;
671
672 /* Render the region itself into any gaps left by the current view. */
673 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
674 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
675 continue;
676 }
677 if (int128_lt(base, view->ranges[i].addr.start)) {
678 now = int128_min(remain,
679 int128_sub(view->ranges[i].addr.start, base));
680 fr.offset_in_region = offset_in_region;
681 fr.addr = addrrange_make(base, now);
682 flatview_insert(view, i, &fr);
683 ++i;
684 int128_addto(&base, now);
685 offset_in_region += int128_get64(now);
686 int128_subfrom(&remain, now);
687 }
688 now = int128_sub(int128_min(int128_add(base, remain),
689 addrrange_end(view->ranges[i].addr)),
690 base);
691 int128_addto(&base, now);
692 offset_in_region += int128_get64(now);
693 int128_subfrom(&remain, now);
694 }
695 if (int128_nz(remain)) {
696 fr.offset_in_region = offset_in_region;
697 fr.addr = addrrange_make(base, remain);
698 flatview_insert(view, i, &fr);
699 }
700 }
701
702 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
703 {
704 while (mr->enabled) {
705 if (mr->alias) {
706 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
707 /* The alias is included in its entirety. Use it as
708 * the "real" root, so that we can share more FlatViews.
709 */
710 mr = mr->alias;
711 continue;
712 }
713 } else if (!mr->terminates) {
714 unsigned int found = 0;
715 MemoryRegion *child, *next = NULL;
716 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
717 if (child->enabled) {
718 if (++found > 1) {
719 next = NULL;
720 break;
721 }
722 if (!child->addr && int128_ge(mr->size, child->size)) {
723 /* A child is included in its entirety. If it's the only
724 * enabled one, use it in the hope of finding an alias down the
725 * way. This will also let us share FlatViews.
726 */
727 next = child;
728 }
729 }
730 }
731 if (found == 0) {
732 return NULL;
733 }
734 if (next) {
735 mr = next;
736 continue;
737 }
738 }
739
740 return mr;
741 }
742
743 return NULL;
744 }
745
746 /* Render a memory topology into a list of disjoint absolute ranges. */
747 static FlatView *generate_memory_topology(MemoryRegion *mr)
748 {
749 int i;
750 FlatView *view;
751
752 view = flatview_new(mr);
753
754 if (mr) {
755 render_memory_region(view, mr, int128_zero(),
756 addrrange_make(int128_zero(), int128_2_64()), false);
757 }
758 flatview_simplify(view);
759
760 view->dispatch = address_space_dispatch_new(view);
761 for (i = 0; i < view->nr; i++) {
762 MemoryRegionSection mrs =
763 section_from_flat_range(&view->ranges[i], view);
764 flatview_add_to_dispatch(view, &mrs);
765 }
766 address_space_dispatch_compact(view->dispatch);
767 g_hash_table_replace(flat_views, mr, view);
768
769 return view;
770 }
771
772 static void address_space_add_del_ioeventfds(AddressSpace *as,
773 MemoryRegionIoeventfd *fds_new,
774 unsigned fds_new_nb,
775 MemoryRegionIoeventfd *fds_old,
776 unsigned fds_old_nb)
777 {
778 unsigned iold, inew;
779 MemoryRegionIoeventfd *fd;
780 MemoryRegionSection section;
781
782 /* Generate a symmetric difference of the old and new fd sets, adding
783 * and deleting as necessary.
784 */
785
786 iold = inew = 0;
787 while (iold < fds_old_nb || inew < fds_new_nb) {
788 if (iold < fds_old_nb
789 && (inew == fds_new_nb
790 || memory_region_ioeventfd_before(&fds_old[iold],
791 &fds_new[inew]))) {
792 fd = &fds_old[iold];
793 section = (MemoryRegionSection) {
794 .fv = address_space_to_flatview(as),
795 .offset_within_address_space = int128_get64(fd->addr.start),
796 .size = fd->addr.size,
797 };
798 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
799 fd->match_data, fd->data, fd->e);
800 ++iold;
801 } else if (inew < fds_new_nb
802 && (iold == fds_old_nb
803 || memory_region_ioeventfd_before(&fds_new[inew],
804 &fds_old[iold]))) {
805 fd = &fds_new[inew];
806 section = (MemoryRegionSection) {
807 .fv = address_space_to_flatview(as),
808 .offset_within_address_space = int128_get64(fd->addr.start),
809 .size = fd->addr.size,
810 };
811 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
812 fd->match_data, fd->data, fd->e);
813 ++inew;
814 } else {
815 ++iold;
816 ++inew;
817 }
818 }
819 }
820
821 FlatView *address_space_get_flatview(AddressSpace *as)
822 {
823 FlatView *view;
824
825 rcu_read_lock();
826 do {
827 view = address_space_to_flatview(as);
828 /* If somebody has replaced as->current_map concurrently,
829 * flatview_ref returns false.
830 */
831 } while (!flatview_ref(view));
832 rcu_read_unlock();
833 return view;
834 }
835
836 static void address_space_update_ioeventfds(AddressSpace *as)
837 {
838 FlatView *view;
839 FlatRange *fr;
840 unsigned ioeventfd_nb = 0;
841 MemoryRegionIoeventfd *ioeventfds = NULL;
842 AddrRange tmp;
843 unsigned i;
844
845 view = address_space_get_flatview(as);
846 FOR_EACH_FLAT_RANGE(fr, view) {
847 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
848 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
849 int128_sub(fr->addr.start,
850 int128_make64(fr->offset_in_region)));
851 if (addrrange_intersects(fr->addr, tmp)) {
852 ++ioeventfd_nb;
853 ioeventfds = g_realloc(ioeventfds,
854 ioeventfd_nb * sizeof(*ioeventfds));
855 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
856 ioeventfds[ioeventfd_nb-1].addr = tmp;
857 }
858 }
859 }
860
861 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
862 as->ioeventfds, as->ioeventfd_nb);
863
864 g_free(as->ioeventfds);
865 as->ioeventfds = ioeventfds;
866 as->ioeventfd_nb = ioeventfd_nb;
867 flatview_unref(view);
868 }
869
870 static void address_space_update_topology_pass(AddressSpace *as,
871 const FlatView *old_view,
872 const FlatView *new_view,
873 bool adding)
874 {
875 unsigned iold, inew;
876 FlatRange *frold, *frnew;
877
878 /* Generate a symmetric difference of the old and new memory maps.
879 * Kill ranges in the old map, and instantiate ranges in the new map.
880 */
881 iold = inew = 0;
882 while (iold < old_view->nr || inew < new_view->nr) {
883 if (iold < old_view->nr) {
884 frold = &old_view->ranges[iold];
885 } else {
886 frold = NULL;
887 }
888 if (inew < new_view->nr) {
889 frnew = &new_view->ranges[inew];
890 } else {
891 frnew = NULL;
892 }
893
894 if (frold
895 && (!frnew
896 || int128_lt(frold->addr.start, frnew->addr.start)
897 || (int128_eq(frold->addr.start, frnew->addr.start)
898 && !flatrange_equal(frold, frnew)))) {
899 /* In old but not in new, or in both but attributes changed. */
900
901 if (!adding) {
902 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
903 }
904
905 ++iold;
906 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
907 /* In both and unchanged (except logging may have changed) */
908
909 if (adding) {
910 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
911 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
912 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
913 frold->dirty_log_mask,
914 frnew->dirty_log_mask);
915 }
916 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
917 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
918 frold->dirty_log_mask,
919 frnew->dirty_log_mask);
920 }
921 }
922
923 ++iold;
924 ++inew;
925 } else {
926 /* In new */
927
928 if (adding) {
929 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
930 }
931
932 ++inew;
933 }
934 }
935 }
936
937 static void flatviews_init(void)
938 {
939 static FlatView *empty_view;
940
941 if (flat_views) {
942 return;
943 }
944
945 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
946 (GDestroyNotify) flatview_unref);
947 if (!empty_view) {
948 empty_view = generate_memory_topology(NULL);
949 /* We keep it alive forever in the global variable. */
950 flatview_ref(empty_view);
951 } else {
952 g_hash_table_replace(flat_views, NULL, empty_view);
953 flatview_ref(empty_view);
954 }
955 }
956
957 static void flatviews_reset(void)
958 {
959 AddressSpace *as;
960
961 if (flat_views) {
962 g_hash_table_unref(flat_views);
963 flat_views = NULL;
964 }
965 flatviews_init();
966
967 /* Render unique FVs */
968 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
969 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
970
971 if (g_hash_table_lookup(flat_views, physmr)) {
972 continue;
973 }
974
975 generate_memory_topology(physmr);
976 }
977 }
978
979 static void address_space_set_flatview(AddressSpace *as)
980 {
981 FlatView *old_view = address_space_to_flatview(as);
982 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
983 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
984
985 assert(new_view);
986
987 if (old_view == new_view) {
988 return;
989 }
990
991 if (old_view) {
992 flatview_ref(old_view);
993 }
994
995 flatview_ref(new_view);
996
997 if (!QTAILQ_EMPTY(&as->listeners)) {
998 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
999
1000 if (!old_view2) {
1001 old_view2 = &tmpview;
1002 }
1003 address_space_update_topology_pass(as, old_view2, new_view, false);
1004 address_space_update_topology_pass(as, old_view2, new_view, true);
1005 }
1006
1007 /* Writes are protected by the BQL. */
1008 atomic_rcu_set(&as->current_map, new_view);
1009 if (old_view) {
1010 flatview_unref(old_view);
1011 }
1012
1013 /* Note that all the old MemoryRegions are still alive up to this
1014 * point. This relieves most MemoryListeners from the need to
1015 * ref/unref the MemoryRegions they get---unless they use them
1016 * outside the iothread mutex, in which case precise reference
1017 * counting is necessary.
1018 */
1019 if (old_view) {
1020 flatview_unref(old_view);
1021 }
1022 }
1023
1024 static void address_space_update_topology(AddressSpace *as)
1025 {
1026 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1027
1028 flatviews_init();
1029 if (!g_hash_table_lookup(flat_views, physmr)) {
1030 generate_memory_topology(physmr);
1031 }
1032 address_space_set_flatview(as);
1033 }
1034
1035 void memory_region_transaction_begin(void)
1036 {
1037 qemu_flush_coalesced_mmio_buffer();
1038 ++memory_region_transaction_depth;
1039 }
1040
1041 void memory_region_transaction_commit(void)
1042 {
1043 AddressSpace *as;
1044
1045 assert(memory_region_transaction_depth);
1046 assert(qemu_mutex_iothread_locked());
1047
1048 --memory_region_transaction_depth;
1049 if (!memory_region_transaction_depth) {
1050 if (memory_region_update_pending) {
1051 flatviews_reset();
1052
1053 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1054
1055 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1056 address_space_set_flatview(as);
1057 address_space_update_ioeventfds(as);
1058 }
1059 memory_region_update_pending = false;
1060 ioeventfd_update_pending = false;
1061 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1062 } else if (ioeventfd_update_pending) {
1063 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1064 address_space_update_ioeventfds(as);
1065 }
1066 ioeventfd_update_pending = false;
1067 }
1068 }
1069 }
1070
1071 static void memory_region_destructor_none(MemoryRegion *mr)
1072 {
1073 }
1074
1075 static void memory_region_destructor_ram(MemoryRegion *mr)
1076 {
1077 qemu_ram_free(mr->ram_block);
1078 }
1079
1080 static bool memory_region_need_escape(char c)
1081 {
1082 return c == '/' || c == '[' || c == '\\' || c == ']';
1083 }
1084
1085 static char *memory_region_escape_name(const char *name)
1086 {
1087 const char *p;
1088 char *escaped, *q;
1089 uint8_t c;
1090 size_t bytes = 0;
1091
1092 for (p = name; *p; p++) {
1093 bytes += memory_region_need_escape(*p) ? 4 : 1;
1094 }
1095 if (bytes == p - name) {
1096 return g_memdup(name, bytes + 1);
1097 }
1098
1099 escaped = g_malloc(bytes + 1);
1100 for (p = name, q = escaped; *p; p++) {
1101 c = *p;
1102 if (unlikely(memory_region_need_escape(c))) {
1103 *q++ = '\\';
1104 *q++ = 'x';
1105 *q++ = "0123456789abcdef"[c >> 4];
1106 c = "0123456789abcdef"[c & 15];
1107 }
1108 *q++ = c;
1109 }
1110 *q = 0;
1111 return escaped;
1112 }
1113
1114 static void memory_region_do_init(MemoryRegion *mr,
1115 Object *owner,
1116 const char *name,
1117 uint64_t size)
1118 {
1119 mr->size = int128_make64(size);
1120 if (size == UINT64_MAX) {
1121 mr->size = int128_2_64();
1122 }
1123 mr->name = g_strdup(name);
1124 mr->owner = owner;
1125 mr->ram_block = NULL;
1126
1127 if (name) {
1128 char *escaped_name = memory_region_escape_name(name);
1129 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1130
1131 if (!owner) {
1132 owner = container_get(qdev_get_machine(), "/unattached");
1133 }
1134
1135 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1136 object_unref(OBJECT(mr));
1137 g_free(name_array);
1138 g_free(escaped_name);
1139 }
1140 }
1141
1142 void memory_region_init(MemoryRegion *mr,
1143 Object *owner,
1144 const char *name,
1145 uint64_t size)
1146 {
1147 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1148 memory_region_do_init(mr, owner, name, size);
1149 }
1150
1151 static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1152 void *opaque, Error **errp)
1153 {
1154 MemoryRegion *mr = MEMORY_REGION(obj);
1155 uint64_t value = mr->addr;
1156
1157 visit_type_uint64(v, name, &value, errp);
1158 }
1159
1160 static void memory_region_get_container(Object *obj, Visitor *v,
1161 const char *name, void *opaque,
1162 Error **errp)
1163 {
1164 MemoryRegion *mr = MEMORY_REGION(obj);
1165 gchar *path = (gchar *)"";
1166
1167 if (mr->container) {
1168 path = object_get_canonical_path(OBJECT(mr->container));
1169 }
1170 visit_type_str(v, name, &path, errp);
1171 if (mr->container) {
1172 g_free(path);
1173 }
1174 }
1175
1176 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1177 const char *part)
1178 {
1179 MemoryRegion *mr = MEMORY_REGION(obj);
1180
1181 return OBJECT(mr->container);
1182 }
1183
1184 static void memory_region_get_priority(Object *obj, Visitor *v,
1185 const char *name, void *opaque,
1186 Error **errp)
1187 {
1188 MemoryRegion *mr = MEMORY_REGION(obj);
1189 int32_t value = mr->priority;
1190
1191 visit_type_int32(v, name, &value, errp);
1192 }
1193
1194 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1195 void *opaque, Error **errp)
1196 {
1197 MemoryRegion *mr = MEMORY_REGION(obj);
1198 uint64_t value = memory_region_size(mr);
1199
1200 visit_type_uint64(v, name, &value, errp);
1201 }
1202
1203 static void memory_region_initfn(Object *obj)
1204 {
1205 MemoryRegion *mr = MEMORY_REGION(obj);
1206 ObjectProperty *op;
1207
1208 mr->ops = &unassigned_mem_ops;
1209 mr->enabled = true;
1210 mr->romd_mode = true;
1211 mr->global_locking = true;
1212 mr->destructor = memory_region_destructor_none;
1213 QTAILQ_INIT(&mr->subregions);
1214 QTAILQ_INIT(&mr->coalesced);
1215
1216 op = object_property_add(OBJECT(mr), "container",
1217 "link<" TYPE_MEMORY_REGION ">",
1218 memory_region_get_container,
1219 NULL, /* memory_region_set_container */
1220 NULL, NULL, &error_abort);
1221 op->resolve = memory_region_resolve_container;
1222
1223 object_property_add(OBJECT(mr), "addr", "uint64",
1224 memory_region_get_addr,
1225 NULL, /* memory_region_set_addr */
1226 NULL, NULL, &error_abort);
1227 object_property_add(OBJECT(mr), "priority", "uint32",
1228 memory_region_get_priority,
1229 NULL, /* memory_region_set_priority */
1230 NULL, NULL, &error_abort);
1231 object_property_add(OBJECT(mr), "size", "uint64",
1232 memory_region_get_size,
1233 NULL, /* memory_region_set_size, */
1234 NULL, NULL, &error_abort);
1235 }
1236
1237 static void iommu_memory_region_initfn(Object *obj)
1238 {
1239 MemoryRegion *mr = MEMORY_REGION(obj);
1240
1241 mr->is_iommu = true;
1242 }
1243
1244 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1245 unsigned size)
1246 {
1247 #ifdef DEBUG_UNASSIGNED
1248 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1249 #endif
1250 if (current_cpu != NULL) {
1251 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1252 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
1253 }
1254 return 0;
1255 }
1256
1257 static void unassigned_mem_write(void *opaque, hwaddr addr,
1258 uint64_t val, unsigned size)
1259 {
1260 #ifdef DEBUG_UNASSIGNED
1261 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1262 #endif
1263 if (current_cpu != NULL) {
1264 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1265 }
1266 }
1267
1268 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1269 unsigned size, bool is_write,
1270 MemTxAttrs attrs)
1271 {
1272 return false;
1273 }
1274
1275 const MemoryRegionOps unassigned_mem_ops = {
1276 .valid.accepts = unassigned_mem_accepts,
1277 .endianness = DEVICE_NATIVE_ENDIAN,
1278 };
1279
1280 static uint64_t memory_region_ram_device_read(void *opaque,
1281 hwaddr addr, unsigned size)
1282 {
1283 MemoryRegion *mr = opaque;
1284 uint64_t data = (uint64_t)~0;
1285
1286 switch (size) {
1287 case 1:
1288 data = *(uint8_t *)(mr->ram_block->host + addr);
1289 break;
1290 case 2:
1291 data = *(uint16_t *)(mr->ram_block->host + addr);
1292 break;
1293 case 4:
1294 data = *(uint32_t *)(mr->ram_block->host + addr);
1295 break;
1296 case 8:
1297 data = *(uint64_t *)(mr->ram_block->host + addr);
1298 break;
1299 }
1300
1301 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1302
1303 return data;
1304 }
1305
1306 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1307 uint64_t data, unsigned size)
1308 {
1309 MemoryRegion *mr = opaque;
1310
1311 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1312
1313 switch (size) {
1314 case 1:
1315 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1316 break;
1317 case 2:
1318 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1319 break;
1320 case 4:
1321 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1322 break;
1323 case 8:
1324 *(uint64_t *)(mr->ram_block->host + addr) = data;
1325 break;
1326 }
1327 }
1328
1329 static const MemoryRegionOps ram_device_mem_ops = {
1330 .read = memory_region_ram_device_read,
1331 .write = memory_region_ram_device_write,
1332 .endianness = DEVICE_HOST_ENDIAN,
1333 .valid = {
1334 .min_access_size = 1,
1335 .max_access_size = 8,
1336 .unaligned = true,
1337 },
1338 .impl = {
1339 .min_access_size = 1,
1340 .max_access_size = 8,
1341 .unaligned = true,
1342 },
1343 };
1344
1345 bool memory_region_access_valid(MemoryRegion *mr,
1346 hwaddr addr,
1347 unsigned size,
1348 bool is_write,
1349 MemTxAttrs attrs)
1350 {
1351 int access_size_min, access_size_max;
1352 int access_size, i;
1353
1354 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1355 return false;
1356 }
1357
1358 if (!mr->ops->valid.accepts) {
1359 return true;
1360 }
1361
1362 access_size_min = mr->ops->valid.min_access_size;
1363 if (!mr->ops->valid.min_access_size) {
1364 access_size_min = 1;
1365 }
1366
1367 access_size_max = mr->ops->valid.max_access_size;
1368 if (!mr->ops->valid.max_access_size) {
1369 access_size_max = 4;
1370 }
1371
1372 access_size = MAX(MIN(size, access_size_max), access_size_min);
1373 for (i = 0; i < size; i += access_size) {
1374 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1375 is_write, attrs)) {
1376 return false;
1377 }
1378 }
1379
1380 return true;
1381 }
1382
1383 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1384 hwaddr addr,
1385 uint64_t *pval,
1386 unsigned size,
1387 MemTxAttrs attrs)
1388 {
1389 *pval = 0;
1390
1391 if (mr->ops->read) {
1392 return access_with_adjusted_size(addr, pval, size,
1393 mr->ops->impl.min_access_size,
1394 mr->ops->impl.max_access_size,
1395 memory_region_read_accessor,
1396 mr, attrs);
1397 } else if (mr->ops->read_with_attrs) {
1398 return access_with_adjusted_size(addr, pval, size,
1399 mr->ops->impl.min_access_size,
1400 mr->ops->impl.max_access_size,
1401 memory_region_read_with_attrs_accessor,
1402 mr, attrs);
1403 } else {
1404 return access_with_adjusted_size(addr, pval, size, 1, 4,
1405 memory_region_oldmmio_read_accessor,
1406 mr, attrs);
1407 }
1408 }
1409
1410 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1411 hwaddr addr,
1412 uint64_t *pval,
1413 unsigned size,
1414 MemTxAttrs attrs)
1415 {
1416 MemTxResult r;
1417
1418 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1419 *pval = unassigned_mem_read(mr, addr, size);
1420 return MEMTX_DECODE_ERROR;
1421 }
1422
1423 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1424 adjust_endianness(mr, pval, size);
1425 return r;
1426 }
1427
1428 /* Return true if an eventfd was signalled */
1429 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1430 hwaddr addr,
1431 uint64_t data,
1432 unsigned size,
1433 MemTxAttrs attrs)
1434 {
1435 MemoryRegionIoeventfd ioeventfd = {
1436 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1437 .data = data,
1438 };
1439 unsigned i;
1440
1441 for (i = 0; i < mr->ioeventfd_nb; i++) {
1442 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1443 ioeventfd.e = mr->ioeventfds[i].e;
1444
1445 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1446 event_notifier_set(ioeventfd.e);
1447 return true;
1448 }
1449 }
1450
1451 return false;
1452 }
1453
1454 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1455 hwaddr addr,
1456 uint64_t data,
1457 unsigned size,
1458 MemTxAttrs attrs)
1459 {
1460 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1461 unassigned_mem_write(mr, addr, data, size);
1462 return MEMTX_DECODE_ERROR;
1463 }
1464
1465 adjust_endianness(mr, &data, size);
1466
1467 if ((!kvm_eventfds_enabled()) &&
1468 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1469 return MEMTX_OK;
1470 }
1471
1472 if (mr->ops->write) {
1473 return access_with_adjusted_size(addr, &data, size,
1474 mr->ops->impl.min_access_size,
1475 mr->ops->impl.max_access_size,
1476 memory_region_write_accessor, mr,
1477 attrs);
1478 } else if (mr->ops->write_with_attrs) {
1479 return
1480 access_with_adjusted_size(addr, &data, size,
1481 mr->ops->impl.min_access_size,
1482 mr->ops->impl.max_access_size,
1483 memory_region_write_with_attrs_accessor,
1484 mr, attrs);
1485 } else {
1486 return access_with_adjusted_size(addr, &data, size, 1, 4,
1487 memory_region_oldmmio_write_accessor,
1488 mr, attrs);
1489 }
1490 }
1491
1492 void memory_region_init_io(MemoryRegion *mr,
1493 Object *owner,
1494 const MemoryRegionOps *ops,
1495 void *opaque,
1496 const char *name,
1497 uint64_t size)
1498 {
1499 memory_region_init(mr, owner, name, size);
1500 mr->ops = ops ? ops : &unassigned_mem_ops;
1501 mr->opaque = opaque;
1502 mr->terminates = true;
1503 }
1504
1505 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1506 Object *owner,
1507 const char *name,
1508 uint64_t size,
1509 Error **errp)
1510 {
1511 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1512 }
1513
1514 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1515 Object *owner,
1516 const char *name,
1517 uint64_t size,
1518 bool share,
1519 Error **errp)
1520 {
1521 memory_region_init(mr, owner, name, size);
1522 mr->ram = true;
1523 mr->terminates = true;
1524 mr->destructor = memory_region_destructor_ram;
1525 mr->ram_block = qemu_ram_alloc(size, share, mr, errp);
1526 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1527 }
1528
1529 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1530 Object *owner,
1531 const char *name,
1532 uint64_t size,
1533 uint64_t max_size,
1534 void (*resized)(const char*,
1535 uint64_t length,
1536 void *host),
1537 Error **errp)
1538 {
1539 memory_region_init(mr, owner, name, size);
1540 mr->ram = true;
1541 mr->terminates = true;
1542 mr->destructor = memory_region_destructor_ram;
1543 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1544 mr, errp);
1545 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1546 }
1547
1548 #ifdef __linux__
1549 void memory_region_init_ram_from_file(MemoryRegion *mr,
1550 struct Object *owner,
1551 const char *name,
1552 uint64_t size,
1553 uint64_t align,
1554 uint32_t ram_flags,
1555 const char *path,
1556 Error **errp)
1557 {
1558 memory_region_init(mr, owner, name, size);
1559 mr->ram = true;
1560 mr->terminates = true;
1561 mr->destructor = memory_region_destructor_ram;
1562 mr->align = align;
1563 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, errp);
1564 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1565 }
1566
1567 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1568 struct Object *owner,
1569 const char *name,
1570 uint64_t size,
1571 bool share,
1572 int fd,
1573 Error **errp)
1574 {
1575 memory_region_init(mr, owner, name, size);
1576 mr->ram = true;
1577 mr->terminates = true;
1578 mr->destructor = memory_region_destructor_ram;
1579 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1580 share ? RAM_SHARED : 0,
1581 fd, errp);
1582 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1583 }
1584 #endif
1585
1586 void memory_region_init_ram_ptr(MemoryRegion *mr,
1587 Object *owner,
1588 const char *name,
1589 uint64_t size,
1590 void *ptr)
1591 {
1592 memory_region_init(mr, owner, name, size);
1593 mr->ram = true;
1594 mr->terminates = true;
1595 mr->destructor = memory_region_destructor_ram;
1596 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1597
1598 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1599 assert(ptr != NULL);
1600 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1601 }
1602
1603 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1604 Object *owner,
1605 const char *name,
1606 uint64_t size,
1607 void *ptr)
1608 {
1609 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1610 mr->ram_device = true;
1611 mr->ops = &ram_device_mem_ops;
1612 mr->opaque = mr;
1613 }
1614
1615 void memory_region_init_alias(MemoryRegion *mr,
1616 Object *owner,
1617 const char *name,
1618 MemoryRegion *orig,
1619 hwaddr offset,
1620 uint64_t size)
1621 {
1622 memory_region_init(mr, owner, name, size);
1623 mr->alias = orig;
1624 mr->alias_offset = offset;
1625 }
1626
1627 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1628 struct Object *owner,
1629 const char *name,
1630 uint64_t size,
1631 Error **errp)
1632 {
1633 memory_region_init(mr, owner, name, size);
1634 mr->ram = true;
1635 mr->readonly = true;
1636 mr->terminates = true;
1637 mr->destructor = memory_region_destructor_ram;
1638 mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
1639 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1640 }
1641
1642 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1643 Object *owner,
1644 const MemoryRegionOps *ops,
1645 void *opaque,
1646 const char *name,
1647 uint64_t size,
1648 Error **errp)
1649 {
1650 assert(ops);
1651 memory_region_init(mr, owner, name, size);
1652 mr->ops = ops;
1653 mr->opaque = opaque;
1654 mr->terminates = true;
1655 mr->rom_device = true;
1656 mr->destructor = memory_region_destructor_ram;
1657 mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
1658 }
1659
1660 void memory_region_init_iommu(void *_iommu_mr,
1661 size_t instance_size,
1662 const char *mrtypename,
1663 Object *owner,
1664 const char *name,
1665 uint64_t size)
1666 {
1667 struct IOMMUMemoryRegion *iommu_mr;
1668 struct MemoryRegion *mr;
1669
1670 object_initialize(_iommu_mr, instance_size, mrtypename);
1671 mr = MEMORY_REGION(_iommu_mr);
1672 memory_region_do_init(mr, owner, name, size);
1673 iommu_mr = IOMMU_MEMORY_REGION(mr);
1674 mr->terminates = true; /* then re-forwards */
1675 QLIST_INIT(&iommu_mr->iommu_notify);
1676 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1677 }
1678
1679 static void memory_region_finalize(Object *obj)
1680 {
1681 MemoryRegion *mr = MEMORY_REGION(obj);
1682
1683 assert(!mr->container);
1684
1685 /* We know the region is not visible in any address space (it
1686 * does not have a container and cannot be a root either because
1687 * it has no references, so we can blindly clear mr->enabled.
1688 * memory_region_set_enabled instead could trigger a transaction
1689 * and cause an infinite loop.
1690 */
1691 mr->enabled = false;
1692 memory_region_transaction_begin();
1693 while (!QTAILQ_EMPTY(&mr->subregions)) {
1694 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1695 memory_region_del_subregion(mr, subregion);
1696 }
1697 memory_region_transaction_commit();
1698
1699 mr->destructor(mr);
1700 memory_region_clear_coalescing(mr);
1701 g_free((char *)mr->name);
1702 g_free(mr->ioeventfds);
1703 }
1704
1705 Object *memory_region_owner(MemoryRegion *mr)
1706 {
1707 Object *obj = OBJECT(mr);
1708 return obj->parent;
1709 }
1710
1711 void memory_region_ref(MemoryRegion *mr)
1712 {
1713 /* MMIO callbacks most likely will access data that belongs
1714 * to the owner, hence the need to ref/unref the owner whenever
1715 * the memory region is in use.
1716 *
1717 * The memory region is a child of its owner. As long as the
1718 * owner doesn't call unparent itself on the memory region,
1719 * ref-ing the owner will also keep the memory region alive.
1720 * Memory regions without an owner are supposed to never go away;
1721 * we do not ref/unref them because it slows down DMA sensibly.
1722 */
1723 if (mr && mr->owner) {
1724 object_ref(mr->owner);
1725 }
1726 }
1727
1728 void memory_region_unref(MemoryRegion *mr)
1729 {
1730 if (mr && mr->owner) {
1731 object_unref(mr->owner);
1732 }
1733 }
1734
1735 uint64_t memory_region_size(MemoryRegion *mr)
1736 {
1737 if (int128_eq(mr->size, int128_2_64())) {
1738 return UINT64_MAX;
1739 }
1740 return int128_get64(mr->size);
1741 }
1742
1743 const char *memory_region_name(const MemoryRegion *mr)
1744 {
1745 if (!mr->name) {
1746 ((MemoryRegion *)mr)->name =
1747 object_get_canonical_path_component(OBJECT(mr));
1748 }
1749 return mr->name;
1750 }
1751
1752 bool memory_region_is_ram_device(MemoryRegion *mr)
1753 {
1754 return mr->ram_device;
1755 }
1756
1757 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1758 {
1759 uint8_t mask = mr->dirty_log_mask;
1760 if (global_dirty_log && mr->ram_block) {
1761 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1762 }
1763 return mask;
1764 }
1765
1766 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1767 {
1768 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1769 }
1770
1771 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
1772 {
1773 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1774 IOMMUNotifier *iommu_notifier;
1775 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1776
1777 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1778 flags |= iommu_notifier->notifier_flags;
1779 }
1780
1781 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1782 imrc->notify_flag_changed(iommu_mr,
1783 iommu_mr->iommu_notify_flags,
1784 flags);
1785 }
1786
1787 iommu_mr->iommu_notify_flags = flags;
1788 }
1789
1790 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1791 IOMMUNotifier *n)
1792 {
1793 IOMMUMemoryRegion *iommu_mr;
1794
1795 if (mr->alias) {
1796 memory_region_register_iommu_notifier(mr->alias, n);
1797 return;
1798 }
1799
1800 /* We need to register for at least one bitfield */
1801 iommu_mr = IOMMU_MEMORY_REGION(mr);
1802 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1803 assert(n->start <= n->end);
1804 assert(n->iommu_idx >= 0 &&
1805 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1806
1807 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1808 memory_region_update_iommu_notify_flags(iommu_mr);
1809 }
1810
1811 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1812 {
1813 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1814
1815 if (imrc->get_min_page_size) {
1816 return imrc->get_min_page_size(iommu_mr);
1817 }
1818 return TARGET_PAGE_SIZE;
1819 }
1820
1821 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1822 {
1823 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1824 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1825 hwaddr addr, granularity;
1826 IOMMUTLBEntry iotlb;
1827
1828 /* If the IOMMU has its own replay callback, override */
1829 if (imrc->replay) {
1830 imrc->replay(iommu_mr, n);
1831 return;
1832 }
1833
1834 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1835
1836 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1837 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1838 if (iotlb.perm != IOMMU_NONE) {
1839 n->notify(n, &iotlb);
1840 }
1841
1842 /* if (2^64 - MR size) < granularity, it's possible to get an
1843 * infinite loop here. This should catch such a wraparound */
1844 if ((addr + granularity) < addr) {
1845 break;
1846 }
1847 }
1848 }
1849
1850 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
1851 {
1852 IOMMUNotifier *notifier;
1853
1854 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1855 memory_region_iommu_replay(iommu_mr, notifier);
1856 }
1857 }
1858
1859 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1860 IOMMUNotifier *n)
1861 {
1862 IOMMUMemoryRegion *iommu_mr;
1863
1864 if (mr->alias) {
1865 memory_region_unregister_iommu_notifier(mr->alias, n);
1866 return;
1867 }
1868 QLIST_REMOVE(n, node);
1869 iommu_mr = IOMMU_MEMORY_REGION(mr);
1870 memory_region_update_iommu_notify_flags(iommu_mr);
1871 }
1872
1873 void memory_region_notify_one(IOMMUNotifier *notifier,
1874 IOMMUTLBEntry *entry)
1875 {
1876 IOMMUNotifierFlag request_flags;
1877
1878 /*
1879 * Skip the notification if the notification does not overlap
1880 * with registered range.
1881 */
1882 if (notifier->start > entry->iova + entry->addr_mask ||
1883 notifier->end < entry->iova) {
1884 return;
1885 }
1886
1887 if (entry->perm & IOMMU_RW) {
1888 request_flags = IOMMU_NOTIFIER_MAP;
1889 } else {
1890 request_flags = IOMMU_NOTIFIER_UNMAP;
1891 }
1892
1893 if (notifier->notifier_flags & request_flags) {
1894 notifier->notify(notifier, entry);
1895 }
1896 }
1897
1898 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1899 int iommu_idx,
1900 IOMMUTLBEntry entry)
1901 {
1902 IOMMUNotifier *iommu_notifier;
1903
1904 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
1905
1906 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1907 if (iommu_notifier->iommu_idx == iommu_idx) {
1908 memory_region_notify_one(iommu_notifier, &entry);
1909 }
1910 }
1911 }
1912
1913 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1914 enum IOMMUMemoryRegionAttr attr,
1915 void *data)
1916 {
1917 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1918
1919 if (!imrc->get_attr) {
1920 return -EINVAL;
1921 }
1922
1923 return imrc->get_attr(iommu_mr, attr, data);
1924 }
1925
1926 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1927 MemTxAttrs attrs)
1928 {
1929 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1930
1931 if (!imrc->attrs_to_index) {
1932 return 0;
1933 }
1934
1935 return imrc->attrs_to_index(iommu_mr, attrs);
1936 }
1937
1938 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
1939 {
1940 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1941
1942 if (!imrc->num_indexes) {
1943 return 1;
1944 }
1945
1946 return imrc->num_indexes(iommu_mr);
1947 }
1948
1949 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1950 {
1951 uint8_t mask = 1 << client;
1952 uint8_t old_logging;
1953
1954 assert(client == DIRTY_MEMORY_VGA);
1955 old_logging = mr->vga_logging_count;
1956 mr->vga_logging_count += log ? 1 : -1;
1957 if (!!old_logging == !!mr->vga_logging_count) {
1958 return;
1959 }
1960
1961 memory_region_transaction_begin();
1962 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1963 memory_region_update_pending |= mr->enabled;
1964 memory_region_transaction_commit();
1965 }
1966
1967 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1968 hwaddr size, unsigned client)
1969 {
1970 assert(mr->ram_block);
1971 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1972 size, client);
1973 }
1974
1975 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1976 hwaddr size)
1977 {
1978 assert(mr->ram_block);
1979 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1980 size,
1981 memory_region_get_dirty_log_mask(mr));
1982 }
1983
1984 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1985 {
1986 MemoryListener *listener;
1987 AddressSpace *as;
1988 FlatView *view;
1989 FlatRange *fr;
1990
1991 /* If the same address space has multiple log_sync listeners, we
1992 * visit that address space's FlatView multiple times. But because
1993 * log_sync listeners are rare, it's still cheaper than walking each
1994 * address space once.
1995 */
1996 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1997 if (!listener->log_sync) {
1998 continue;
1999 }
2000 as = listener->address_space;
2001 view = address_space_get_flatview(as);
2002 FOR_EACH_FLAT_RANGE(fr, view) {
2003 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2004 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2005 listener->log_sync(listener, &mrs);
2006 }
2007 }
2008 flatview_unref(view);
2009 }
2010 }
2011
2012 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2013 hwaddr addr,
2014 hwaddr size,
2015 unsigned client)
2016 {
2017 assert(mr->ram_block);
2018 memory_region_sync_dirty_bitmap(mr);
2019 return cpu_physical_memory_snapshot_and_clear_dirty(
2020 memory_region_get_ram_addr(mr) + addr, size, client);
2021 }
2022
2023 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2024 hwaddr addr, hwaddr size)
2025 {
2026 assert(mr->ram_block);
2027 return cpu_physical_memory_snapshot_get_dirty(snap,
2028 memory_region_get_ram_addr(mr) + addr, size);
2029 }
2030
2031 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2032 {
2033 if (mr->readonly != readonly) {
2034 memory_region_transaction_begin();
2035 mr->readonly = readonly;
2036 memory_region_update_pending |= mr->enabled;
2037 memory_region_transaction_commit();
2038 }
2039 }
2040
2041 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2042 {
2043 if (mr->romd_mode != romd_mode) {
2044 memory_region_transaction_begin();
2045 mr->romd_mode = romd_mode;
2046 memory_region_update_pending |= mr->enabled;
2047 memory_region_transaction_commit();
2048 }
2049 }
2050
2051 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2052 hwaddr size, unsigned client)
2053 {
2054 assert(mr->ram_block);
2055 cpu_physical_memory_test_and_clear_dirty(
2056 memory_region_get_ram_addr(mr) + addr, size, client);
2057 }
2058
2059 int memory_region_get_fd(MemoryRegion *mr)
2060 {
2061 int fd;
2062
2063 rcu_read_lock();
2064 while (mr->alias) {
2065 mr = mr->alias;
2066 }
2067 fd = mr->ram_block->fd;
2068 rcu_read_unlock();
2069
2070 return fd;
2071 }
2072
2073 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2074 {
2075 void *ptr;
2076 uint64_t offset = 0;
2077
2078 rcu_read_lock();
2079 while (mr->alias) {
2080 offset += mr->alias_offset;
2081 mr = mr->alias;
2082 }
2083 assert(mr->ram_block);
2084 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2085 rcu_read_unlock();
2086
2087 return ptr;
2088 }
2089
2090 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2091 {
2092 RAMBlock *block;
2093
2094 block = qemu_ram_block_from_host(ptr, false, offset);
2095 if (!block) {
2096 return NULL;
2097 }
2098
2099 return block->mr;
2100 }
2101
2102 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2103 {
2104 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2105 }
2106
2107 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2108 {
2109 assert(mr->ram_block);
2110
2111 qemu_ram_resize(mr->ram_block, newsize, errp);
2112 }
2113
2114 static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
2115 {
2116 FlatView *view;
2117 FlatRange *fr;
2118 CoalescedMemoryRange *cmr;
2119 AddrRange tmp;
2120 MemoryRegionSection section;
2121
2122 view = address_space_get_flatview(as);
2123 FOR_EACH_FLAT_RANGE(fr, view) {
2124 if (fr->mr == mr) {
2125 section = (MemoryRegionSection) {
2126 .fv = view,
2127 .offset_within_address_space = int128_get64(fr->addr.start),
2128 .size = fr->addr.size,
2129 };
2130
2131 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
2132 int128_get64(fr->addr.start),
2133 int128_get64(fr->addr.size));
2134 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2135 tmp = addrrange_shift(cmr->addr,
2136 int128_sub(fr->addr.start,
2137 int128_make64(fr->offset_in_region)));
2138 if (!addrrange_intersects(tmp, fr->addr)) {
2139 continue;
2140 }
2141 tmp = addrrange_intersection(tmp, fr->addr);
2142 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
2143 int128_get64(tmp.start),
2144 int128_get64(tmp.size));
2145 }
2146 }
2147 }
2148 flatview_unref(view);
2149 }
2150
2151 static void memory_region_update_coalesced_range(MemoryRegion *mr)
2152 {
2153 AddressSpace *as;
2154
2155 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2156 memory_region_update_coalesced_range_as(mr, as);
2157 }
2158 }
2159
2160 void memory_region_set_coalescing(MemoryRegion *mr)
2161 {
2162 memory_region_clear_coalescing(mr);
2163 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2164 }
2165
2166 void memory_region_add_coalescing(MemoryRegion *mr,
2167 hwaddr offset,
2168 uint64_t size)
2169 {
2170 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2171
2172 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2173 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2174 memory_region_update_coalesced_range(mr);
2175 memory_region_set_flush_coalesced(mr);
2176 }
2177
2178 void memory_region_clear_coalescing(MemoryRegion *mr)
2179 {
2180 CoalescedMemoryRange *cmr;
2181 bool updated = false;
2182
2183 qemu_flush_coalesced_mmio_buffer();
2184 mr->flush_coalesced_mmio = false;
2185
2186 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2187 cmr = QTAILQ_FIRST(&mr->coalesced);
2188 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2189 g_free(cmr);
2190 updated = true;
2191 }
2192
2193 if (updated) {
2194 memory_region_update_coalesced_range(mr);
2195 }
2196 }
2197
2198 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2199 {
2200 mr->flush_coalesced_mmio = true;
2201 }
2202
2203 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2204 {
2205 qemu_flush_coalesced_mmio_buffer();
2206 if (QTAILQ_EMPTY(&mr->coalesced)) {
2207 mr->flush_coalesced_mmio = false;
2208 }
2209 }
2210
2211 void memory_region_clear_global_locking(MemoryRegion *mr)
2212 {
2213 mr->global_locking = false;
2214 }
2215
2216 static bool userspace_eventfd_warning;
2217
2218 void memory_region_add_eventfd(MemoryRegion *mr,
2219 hwaddr addr,
2220 unsigned size,
2221 bool match_data,
2222 uint64_t data,
2223 EventNotifier *e)
2224 {
2225 MemoryRegionIoeventfd mrfd = {
2226 .addr.start = int128_make64(addr),
2227 .addr.size = int128_make64(size),
2228 .match_data = match_data,
2229 .data = data,
2230 .e = e,
2231 };
2232 unsigned i;
2233
2234 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2235 userspace_eventfd_warning))) {
2236 userspace_eventfd_warning = true;
2237 error_report("Using eventfd without MMIO binding in KVM. "
2238 "Suboptimal performance expected");
2239 }
2240
2241 if (size) {
2242 adjust_endianness(mr, &mrfd.data, size);
2243 }
2244 memory_region_transaction_begin();
2245 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2246 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2247 break;
2248 }
2249 }
2250 ++mr->ioeventfd_nb;
2251 mr->ioeventfds = g_realloc(mr->ioeventfds,
2252 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2253 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2254 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2255 mr->ioeventfds[i] = mrfd;
2256 ioeventfd_update_pending |= mr->enabled;
2257 memory_region_transaction_commit();
2258 }
2259
2260 void memory_region_del_eventfd(MemoryRegion *mr,
2261 hwaddr addr,
2262 unsigned size,
2263 bool match_data,
2264 uint64_t data,
2265 EventNotifier *e)
2266 {
2267 MemoryRegionIoeventfd mrfd = {
2268 .addr.start = int128_make64(addr),
2269 .addr.size = int128_make64(size),
2270 .match_data = match_data,
2271 .data = data,
2272 .e = e,
2273 };
2274 unsigned i;
2275
2276 if (size) {
2277 adjust_endianness(mr, &mrfd.data, size);
2278 }
2279 memory_region_transaction_begin();
2280 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2281 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2282 break;
2283 }
2284 }
2285 assert(i != mr->ioeventfd_nb);
2286 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2287 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2288 --mr->ioeventfd_nb;
2289 mr->ioeventfds = g_realloc(mr->ioeventfds,
2290 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2291 ioeventfd_update_pending |= mr->enabled;
2292 memory_region_transaction_commit();
2293 }
2294
2295 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2296 {
2297 MemoryRegion *mr = subregion->container;
2298 MemoryRegion *other;
2299
2300 memory_region_transaction_begin();
2301
2302 memory_region_ref(subregion);
2303 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2304 if (subregion->priority >= other->priority) {
2305 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2306 goto done;
2307 }
2308 }
2309 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2310 done:
2311 memory_region_update_pending |= mr->enabled && subregion->enabled;
2312 memory_region_transaction_commit();
2313 }
2314
2315 static void memory_region_add_subregion_common(MemoryRegion *mr,
2316 hwaddr offset,
2317 MemoryRegion *subregion)
2318 {
2319 assert(!subregion->container);
2320 subregion->container = mr;
2321 subregion->addr = offset;
2322 memory_region_update_container_subregions(subregion);
2323 }
2324
2325 void memory_region_add_subregion(MemoryRegion *mr,
2326 hwaddr offset,
2327 MemoryRegion *subregion)
2328 {
2329 subregion->priority = 0;
2330 memory_region_add_subregion_common(mr, offset, subregion);
2331 }
2332
2333 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2334 hwaddr offset,
2335 MemoryRegion *subregion,
2336 int priority)
2337 {
2338 subregion->priority = priority;
2339 memory_region_add_subregion_common(mr, offset, subregion);
2340 }
2341
2342 void memory_region_del_subregion(MemoryRegion *mr,
2343 MemoryRegion *subregion)
2344 {
2345 memory_region_transaction_begin();
2346 assert(subregion->container == mr);
2347 subregion->container = NULL;
2348 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2349 memory_region_unref(subregion);
2350 memory_region_update_pending |= mr->enabled && subregion->enabled;
2351 memory_region_transaction_commit();
2352 }
2353
2354 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2355 {
2356 if (enabled == mr->enabled) {
2357 return;
2358 }
2359 memory_region_transaction_begin();
2360 mr->enabled = enabled;
2361 memory_region_update_pending = true;
2362 memory_region_transaction_commit();
2363 }
2364
2365 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2366 {
2367 Int128 s = int128_make64(size);
2368
2369 if (size == UINT64_MAX) {
2370 s = int128_2_64();
2371 }
2372 if (int128_eq(s, mr->size)) {
2373 return;
2374 }
2375 memory_region_transaction_begin();
2376 mr->size = s;
2377 memory_region_update_pending = true;
2378 memory_region_transaction_commit();
2379 }
2380
2381 static void memory_region_readd_subregion(MemoryRegion *mr)
2382 {
2383 MemoryRegion *container = mr->container;
2384
2385 if (container) {
2386 memory_region_transaction_begin();
2387 memory_region_ref(mr);
2388 memory_region_del_subregion(container, mr);
2389 mr->container = container;
2390 memory_region_update_container_subregions(mr);
2391 memory_region_unref(mr);
2392 memory_region_transaction_commit();
2393 }
2394 }
2395
2396 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2397 {
2398 if (addr != mr->addr) {
2399 mr->addr = addr;
2400 memory_region_readd_subregion(mr);
2401 }
2402 }
2403
2404 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2405 {
2406 assert(mr->alias);
2407
2408 if (offset == mr->alias_offset) {
2409 return;
2410 }
2411
2412 memory_region_transaction_begin();
2413 mr->alias_offset = offset;
2414 memory_region_update_pending |= mr->enabled;
2415 memory_region_transaction_commit();
2416 }
2417
2418 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2419 {
2420 return mr->align;
2421 }
2422
2423 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2424 {
2425 const AddrRange *addr = addr_;
2426 const FlatRange *fr = fr_;
2427
2428 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2429 return -1;
2430 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2431 return 1;
2432 }
2433 return 0;
2434 }
2435
2436 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2437 {
2438 return bsearch(&addr, view->ranges, view->nr,
2439 sizeof(FlatRange), cmp_flatrange_addr);
2440 }
2441
2442 bool memory_region_is_mapped(MemoryRegion *mr)
2443 {
2444 return mr->container ? true : false;
2445 }
2446
2447 /* Same as memory_region_find, but it does not add a reference to the
2448 * returned region. It must be called from an RCU critical section.
2449 */
2450 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2451 hwaddr addr, uint64_t size)
2452 {
2453 MemoryRegionSection ret = { .mr = NULL };
2454 MemoryRegion *root;
2455 AddressSpace *as;
2456 AddrRange range;
2457 FlatView *view;
2458 FlatRange *fr;
2459
2460 addr += mr->addr;
2461 for (root = mr; root->container; ) {
2462 root = root->container;
2463 addr += root->addr;
2464 }
2465
2466 as = memory_region_to_address_space(root);
2467 if (!as) {
2468 return ret;
2469 }
2470 range = addrrange_make(int128_make64(addr), int128_make64(size));
2471
2472 view = address_space_to_flatview(as);
2473 fr = flatview_lookup(view, range);
2474 if (!fr) {
2475 return ret;
2476 }
2477
2478 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2479 --fr;
2480 }
2481
2482 ret.mr = fr->mr;
2483 ret.fv = view;
2484 range = addrrange_intersection(range, fr->addr);
2485 ret.offset_within_region = fr->offset_in_region;
2486 ret.offset_within_region += int128_get64(int128_sub(range.start,
2487 fr->addr.start));
2488 ret.size = range.size;
2489 ret.offset_within_address_space = int128_get64(range.start);
2490 ret.readonly = fr->readonly;
2491 return ret;
2492 }
2493
2494 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2495 hwaddr addr, uint64_t size)
2496 {
2497 MemoryRegionSection ret;
2498 rcu_read_lock();
2499 ret = memory_region_find_rcu(mr, addr, size);
2500 if (ret.mr) {
2501 memory_region_ref(ret.mr);
2502 }
2503 rcu_read_unlock();
2504 return ret;
2505 }
2506
2507 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2508 {
2509 MemoryRegion *mr;
2510
2511 rcu_read_lock();
2512 mr = memory_region_find_rcu(container, addr, 1).mr;
2513 rcu_read_unlock();
2514 return mr && mr != container;
2515 }
2516
2517 void memory_global_dirty_log_sync(void)
2518 {
2519 memory_region_sync_dirty_bitmap(NULL);
2520 }
2521
2522 static VMChangeStateEntry *vmstate_change;
2523
2524 void memory_global_dirty_log_start(void)
2525 {
2526 if (vmstate_change) {
2527 qemu_del_vm_change_state_handler(vmstate_change);
2528 vmstate_change = NULL;
2529 }
2530
2531 global_dirty_log = true;
2532
2533 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2534
2535 /* Refresh DIRTY_LOG_MIGRATION bit. */
2536 memory_region_transaction_begin();
2537 memory_region_update_pending = true;
2538 memory_region_transaction_commit();
2539 }
2540
2541 static void memory_global_dirty_log_do_stop(void)
2542 {
2543 global_dirty_log = false;
2544
2545 /* Refresh DIRTY_LOG_MIGRATION bit. */
2546 memory_region_transaction_begin();
2547 memory_region_update_pending = true;
2548 memory_region_transaction_commit();
2549
2550 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2551 }
2552
2553 static void memory_vm_change_state_handler(void *opaque, int running,
2554 RunState state)
2555 {
2556 if (running) {
2557 memory_global_dirty_log_do_stop();
2558
2559 if (vmstate_change) {
2560 qemu_del_vm_change_state_handler(vmstate_change);
2561 vmstate_change = NULL;
2562 }
2563 }
2564 }
2565
2566 void memory_global_dirty_log_stop(void)
2567 {
2568 if (!runstate_is_running()) {
2569 if (vmstate_change) {
2570 return;
2571 }
2572 vmstate_change = qemu_add_vm_change_state_handler(
2573 memory_vm_change_state_handler, NULL);
2574 return;
2575 }
2576
2577 memory_global_dirty_log_do_stop();
2578 }
2579
2580 static void listener_add_address_space(MemoryListener *listener,
2581 AddressSpace *as)
2582 {
2583 FlatView *view;
2584 FlatRange *fr;
2585
2586 if (listener->begin) {
2587 listener->begin(listener);
2588 }
2589 if (global_dirty_log) {
2590 if (listener->log_global_start) {
2591 listener->log_global_start(listener);
2592 }
2593 }
2594
2595 view = address_space_get_flatview(as);
2596 FOR_EACH_FLAT_RANGE(fr, view) {
2597 MemoryRegionSection section = section_from_flat_range(fr, view);
2598
2599 if (listener->region_add) {
2600 listener->region_add(listener, &section);
2601 }
2602 if (fr->dirty_log_mask && listener->log_start) {
2603 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2604 }
2605 }
2606 if (listener->commit) {
2607 listener->commit(listener);
2608 }
2609 flatview_unref(view);
2610 }
2611
2612 static void listener_del_address_space(MemoryListener *listener,
2613 AddressSpace *as)
2614 {
2615 FlatView *view;
2616 FlatRange *fr;
2617
2618 if (listener->begin) {
2619 listener->begin(listener);
2620 }
2621 view = address_space_get_flatview(as);
2622 FOR_EACH_FLAT_RANGE(fr, view) {
2623 MemoryRegionSection section = section_from_flat_range(fr, view);
2624
2625 if (fr->dirty_log_mask && listener->log_stop) {
2626 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2627 }
2628 if (listener->region_del) {
2629 listener->region_del(listener, &section);
2630 }
2631 }
2632 if (listener->commit) {
2633 listener->commit(listener);
2634 }
2635 flatview_unref(view);
2636 }
2637
2638 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2639 {
2640 MemoryListener *other = NULL;
2641
2642 listener->address_space = as;
2643 if (QTAILQ_EMPTY(&memory_listeners)
2644 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2645 memory_listeners)->priority) {
2646 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2647 } else {
2648 QTAILQ_FOREACH(other, &memory_listeners, link) {
2649 if (listener->priority < other->priority) {
2650 break;
2651 }
2652 }
2653 QTAILQ_INSERT_BEFORE(other, listener, link);
2654 }
2655
2656 if (QTAILQ_EMPTY(&as->listeners)
2657 || listener->priority >= QTAILQ_LAST(&as->listeners,
2658 memory_listeners)->priority) {
2659 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2660 } else {
2661 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2662 if (listener->priority < other->priority) {
2663 break;
2664 }
2665 }
2666 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2667 }
2668
2669 listener_add_address_space(listener, as);
2670 }
2671
2672 void memory_listener_unregister(MemoryListener *listener)
2673 {
2674 if (!listener->address_space) {
2675 return;
2676 }
2677
2678 listener_del_address_space(listener, listener->address_space);
2679 QTAILQ_REMOVE(&memory_listeners, listener, link);
2680 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2681 listener->address_space = NULL;
2682 }
2683
2684 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2685 {
2686 memory_region_ref(root);
2687 as->root = root;
2688 as->current_map = NULL;
2689 as->ioeventfd_nb = 0;
2690 as->ioeventfds = NULL;
2691 QTAILQ_INIT(&as->listeners);
2692 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2693 as->name = g_strdup(name ? name : "anonymous");
2694 address_space_update_topology(as);
2695 address_space_update_ioeventfds(as);
2696 }
2697
2698 static void do_address_space_destroy(AddressSpace *as)
2699 {
2700 assert(QTAILQ_EMPTY(&as->listeners));
2701
2702 flatview_unref(as->current_map);
2703 g_free(as->name);
2704 g_free(as->ioeventfds);
2705 memory_region_unref(as->root);
2706 }
2707
2708 void address_space_destroy(AddressSpace *as)
2709 {
2710 MemoryRegion *root = as->root;
2711
2712 /* Flush out anything from MemoryListeners listening in on this */
2713 memory_region_transaction_begin();
2714 as->root = NULL;
2715 memory_region_transaction_commit();
2716 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2717
2718 /* At this point, as->dispatch and as->current_map are dummy
2719 * entries that the guest should never use. Wait for the old
2720 * values to expire before freeing the data.
2721 */
2722 as->root = root;
2723 call_rcu(as, do_address_space_destroy, rcu);
2724 }
2725
2726 static const char *memory_region_type(MemoryRegion *mr)
2727 {
2728 if (memory_region_is_ram_device(mr)) {
2729 return "ramd";
2730 } else if (memory_region_is_romd(mr)) {
2731 return "romd";
2732 } else if (memory_region_is_rom(mr)) {
2733 return "rom";
2734 } else if (memory_region_is_ram(mr)) {
2735 return "ram";
2736 } else {
2737 return "i/o";
2738 }
2739 }
2740
2741 typedef struct MemoryRegionList MemoryRegionList;
2742
2743 struct MemoryRegionList {
2744 const MemoryRegion *mr;
2745 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
2746 };
2747
2748 typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
2749
2750 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2751 int128_sub((size), int128_one())) : 0)
2752 #define MTREE_INDENT " "
2753
2754 static void mtree_expand_owner(fprintf_function mon_printf, void *f,
2755 const char *label, Object *obj)
2756 {
2757 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2758
2759 mon_printf(f, " %s:{%s", label, dev ? "dev" : "obj");
2760 if (dev && dev->id) {
2761 mon_printf(f, " id=%s", dev->id);
2762 } else {
2763 gchar *canonical_path = object_get_canonical_path(obj);
2764 if (canonical_path) {
2765 mon_printf(f, " path=%s", canonical_path);
2766 g_free(canonical_path);
2767 } else {
2768 mon_printf(f, " type=%s", object_get_typename(obj));
2769 }
2770 }
2771 mon_printf(f, "}");
2772 }
2773
2774 static void mtree_print_mr_owner(fprintf_function mon_printf, void *f,
2775 const MemoryRegion *mr)
2776 {
2777 Object *owner = mr->owner;
2778 Object *parent = memory_region_owner((MemoryRegion *)mr);
2779
2780 if (!owner && !parent) {
2781 mon_printf(f, " orphan");
2782 return;
2783 }
2784 if (owner) {
2785 mtree_expand_owner(mon_printf, f, "owner", owner);
2786 }
2787 if (parent && parent != owner) {
2788 mtree_expand_owner(mon_printf, f, "parent", parent);
2789 }
2790 }
2791
2792 static void mtree_print_mr(fprintf_function mon_printf, void *f,
2793 const MemoryRegion *mr, unsigned int level,
2794 hwaddr base,
2795 MemoryRegionListHead *alias_print_queue,
2796 bool owner)
2797 {
2798 MemoryRegionList *new_ml, *ml, *next_ml;
2799 MemoryRegionListHead submr_print_queue;
2800 const MemoryRegion *submr;
2801 unsigned int i;
2802 hwaddr cur_start, cur_end;
2803
2804 if (!mr) {
2805 return;
2806 }
2807
2808 for (i = 0; i < level; i++) {
2809 mon_printf(f, MTREE_INDENT);
2810 }
2811
2812 cur_start = base + mr->addr;
2813 cur_end = cur_start + MR_SIZE(mr->size);
2814
2815 /*
2816 * Try to detect overflow of memory region. This should never
2817 * happen normally. When it happens, we dump something to warn the
2818 * user who is observing this.
2819 */
2820 if (cur_start < base || cur_end < cur_start) {
2821 mon_printf(f, "[DETECTED OVERFLOW!] ");
2822 }
2823
2824 if (mr->alias) {
2825 MemoryRegionList *ml;
2826 bool found = false;
2827
2828 /* check if the alias is already in the queue */
2829 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
2830 if (ml->mr == mr->alias) {
2831 found = true;
2832 }
2833 }
2834
2835 if (!found) {
2836 ml = g_new(MemoryRegionList, 1);
2837 ml->mr = mr->alias;
2838 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
2839 }
2840 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2841 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
2842 "-" TARGET_FMT_plx "%s",
2843 cur_start, cur_end,
2844 mr->priority,
2845 memory_region_type((MemoryRegion *)mr),
2846 memory_region_name(mr),
2847 memory_region_name(mr->alias),
2848 mr->alias_offset,
2849 mr->alias_offset + MR_SIZE(mr->size),
2850 mr->enabled ? "" : " [disabled]");
2851 if (owner) {
2852 mtree_print_mr_owner(mon_printf, f, mr);
2853 }
2854 } else {
2855 mon_printf(f,
2856 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s",
2857 cur_start, cur_end,
2858 mr->priority,
2859 memory_region_type((MemoryRegion *)mr),
2860 memory_region_name(mr),
2861 mr->enabled ? "" : " [disabled]");
2862 if (owner) {
2863 mtree_print_mr_owner(mon_printf, f, mr);
2864 }
2865 }
2866 mon_printf(f, "\n");
2867
2868 QTAILQ_INIT(&submr_print_queue);
2869
2870 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2871 new_ml = g_new(MemoryRegionList, 1);
2872 new_ml->mr = submr;
2873 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2874 if (new_ml->mr->addr < ml->mr->addr ||
2875 (new_ml->mr->addr == ml->mr->addr &&
2876 new_ml->mr->priority > ml->mr->priority)) {
2877 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
2878 new_ml = NULL;
2879 break;
2880 }
2881 }
2882 if (new_ml) {
2883 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
2884 }
2885 }
2886
2887 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2888 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
2889 alias_print_queue, owner);
2890 }
2891
2892 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
2893 g_free(ml);
2894 }
2895 }
2896
2897 struct FlatViewInfo {
2898 fprintf_function mon_printf;
2899 void *f;
2900 int counter;
2901 bool dispatch_tree;
2902 bool owner;
2903 };
2904
2905 static void mtree_print_flatview(gpointer key, gpointer value,
2906 gpointer user_data)
2907 {
2908 FlatView *view = key;
2909 GArray *fv_address_spaces = value;
2910 struct FlatViewInfo *fvi = user_data;
2911 fprintf_function p = fvi->mon_printf;
2912 void *f = fvi->f;
2913 FlatRange *range = &view->ranges[0];
2914 MemoryRegion *mr;
2915 int n = view->nr;
2916 int i;
2917 AddressSpace *as;
2918
2919 p(f, "FlatView #%d\n", fvi->counter);
2920 ++fvi->counter;
2921
2922 for (i = 0; i < fv_address_spaces->len; ++i) {
2923 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2924 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2925 if (as->root->alias) {
2926 p(f, ", alias %s", memory_region_name(as->root->alias));
2927 }
2928 p(f, "\n");
2929 }
2930
2931 p(f, " Root memory region: %s\n",
2932 view->root ? memory_region_name(view->root) : "(none)");
2933
2934 if (n <= 0) {
2935 p(f, MTREE_INDENT "No rendered FlatView\n\n");
2936 return;
2937 }
2938
2939 while (n--) {
2940 mr = range->mr;
2941 if (range->offset_in_region) {
2942 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2943 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx,
2944 int128_get64(range->addr.start),
2945 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2946 mr->priority,
2947 range->readonly ? "rom" : memory_region_type(mr),
2948 memory_region_name(mr),
2949 range->offset_in_region);
2950 } else {
2951 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2952 TARGET_FMT_plx " (prio %d, %s): %s",
2953 int128_get64(range->addr.start),
2954 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2955 mr->priority,
2956 range->readonly ? "rom" : memory_region_type(mr),
2957 memory_region_name(mr));
2958 }
2959 if (fvi->owner) {
2960 mtree_print_mr_owner(p, f, mr);
2961 }
2962 p(f, "\n");
2963 range++;
2964 }
2965
2966 #if !defined(CONFIG_USER_ONLY)
2967 if (fvi->dispatch_tree && view->root) {
2968 mtree_print_dispatch(p, f, view->dispatch, view->root);
2969 }
2970 #endif
2971
2972 p(f, "\n");
2973 }
2974
2975 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
2976 gpointer user_data)
2977 {
2978 FlatView *view = key;
2979 GArray *fv_address_spaces = value;
2980
2981 g_array_unref(fv_address_spaces);
2982 flatview_unref(view);
2983
2984 return true;
2985 }
2986
2987 void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
2988 bool dispatch_tree, bool owner)
2989 {
2990 MemoryRegionListHead ml_head;
2991 MemoryRegionList *ml, *ml2;
2992 AddressSpace *as;
2993
2994 if (flatview) {
2995 FlatView *view;
2996 struct FlatViewInfo fvi = {
2997 .mon_printf = mon_printf,
2998 .f = f,
2999 .counter = 0,
3000 .dispatch_tree = dispatch_tree,
3001 .owner = owner,
3002 };
3003 GArray *fv_address_spaces;
3004 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3005
3006 /* Gather all FVs in one table */
3007 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3008 view = address_space_get_flatview(as);
3009
3010 fv_address_spaces = g_hash_table_lookup(views, view);
3011 if (!fv_address_spaces) {
3012 fv_address_spaces = g_array_new(false, false, sizeof(as));
3013 g_hash_table_insert(views, view, fv_address_spaces);
3014 }
3015
3016 g_array_append_val(fv_address_spaces, as);
3017 }
3018
3019 /* Print */
3020 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3021
3022 /* Free */
3023 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3024 g_hash_table_unref(views);
3025
3026 return;
3027 }
3028
3029 QTAILQ_INIT(&ml_head);
3030
3031 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3032 mon_printf(f, "address-space: %s\n", as->name);
3033 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head, owner);
3034 mon_printf(f, "\n");
3035 }
3036
3037 /* print aliased regions */
3038 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3039 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3040 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head, owner);
3041 mon_printf(f, "\n");
3042 }
3043
3044 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3045 g_free(ml);
3046 }
3047 }
3048
3049 void memory_region_init_ram(MemoryRegion *mr,
3050 struct Object *owner,
3051 const char *name,
3052 uint64_t size,
3053 Error **errp)
3054 {
3055 DeviceState *owner_dev;
3056 Error *err = NULL;
3057
3058 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3059 if (err) {
3060 error_propagate(errp, err);
3061 return;
3062 }
3063 /* This will assert if owner is neither NULL nor a DeviceState.
3064 * We only want the owner here for the purposes of defining a
3065 * unique name for migration. TODO: Ideally we should implement
3066 * a naming scheme for Objects which are not DeviceStates, in
3067 * which case we can relax this restriction.
3068 */
3069 owner_dev = DEVICE(owner);
3070 vmstate_register_ram(mr, owner_dev);
3071 }
3072
3073 void memory_region_init_rom(MemoryRegion *mr,
3074 struct Object *owner,
3075 const char *name,
3076 uint64_t size,
3077 Error **errp)
3078 {
3079 DeviceState *owner_dev;
3080 Error *err = NULL;
3081
3082 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3083 if (err) {
3084 error_propagate(errp, err);
3085 return;
3086 }
3087 /* This will assert if owner is neither NULL nor a DeviceState.
3088 * We only want the owner here for the purposes of defining a
3089 * unique name for migration. TODO: Ideally we should implement
3090 * a naming scheme for Objects which are not DeviceStates, in
3091 * which case we can relax this restriction.
3092 */
3093 owner_dev = DEVICE(owner);
3094 vmstate_register_ram(mr, owner_dev);
3095 }
3096
3097 void memory_region_init_rom_device(MemoryRegion *mr,
3098 struct Object *owner,
3099 const MemoryRegionOps *ops,
3100 void *opaque,
3101 const char *name,
3102 uint64_t size,
3103 Error **errp)
3104 {
3105 DeviceState *owner_dev;
3106 Error *err = NULL;
3107
3108 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3109 name, size, &err);
3110 if (err) {
3111 error_propagate(errp, err);
3112 return;
3113 }
3114 /* This will assert if owner is neither NULL nor a DeviceState.
3115 * We only want the owner here for the purposes of defining a
3116 * unique name for migration. TODO: Ideally we should implement
3117 * a naming scheme for Objects which are not DeviceStates, in
3118 * which case we can relax this restriction.
3119 */
3120 owner_dev = DEVICE(owner);
3121 vmstate_register_ram(mr, owner_dev);
3122 }
3123
3124 static const TypeInfo memory_region_info = {
3125 .parent = TYPE_OBJECT,
3126 .name = TYPE_MEMORY_REGION,
3127 .instance_size = sizeof(MemoryRegion),
3128 .instance_init = memory_region_initfn,
3129 .instance_finalize = memory_region_finalize,
3130 };
3131
3132 static const TypeInfo iommu_memory_region_info = {
3133 .parent = TYPE_MEMORY_REGION,
3134 .name = TYPE_IOMMU_MEMORY_REGION,
3135 .class_size = sizeof(IOMMUMemoryRegionClass),
3136 .instance_size = sizeof(IOMMUMemoryRegion),
3137 .instance_init = iommu_memory_region_initfn,
3138 .abstract = true,
3139 };
3140
3141 static void memory_register_types(void)
3142 {
3143 type_register_static(&memory_region_info);
3144 type_register_static(&iommu_memory_region_info);
3145 }
3146
3147 type_init(memory_register_types)