]> git.proxmox.com Git - mirror_qemu.git/blame - memory.c
memory: add backward compatibility for old portio registration
[mirror_qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include "memory.h"
1c0ffa58 15#include "exec-memory.h"
658b2224 16#include "ioport.h"
093bc2cd
AK
17#include <assert.h>
18
19typedef struct AddrRange AddrRange;
20
21struct AddrRange {
22 uint64_t start;
23 uint64_t size;
24};
25
26static AddrRange addrrange_make(uint64_t start, uint64_t size)
27{
28 return (AddrRange) { start, size };
29}
30
31static bool addrrange_equal(AddrRange r1, AddrRange r2)
32{
33 return r1.start == r2.start && r1.size == r2.size;
34}
35
36static uint64_t addrrange_end(AddrRange r)
37{
38 return r.start + r.size;
39}
40
41static AddrRange addrrange_shift(AddrRange range, int64_t delta)
42{
43 range.start += delta;
44 return range;
45}
46
47static bool addrrange_intersects(AddrRange r1, AddrRange r2)
48{
49 return (r1.start >= r2.start && r1.start < r2.start + r2.size)
50 || (r2.start >= r1.start && r2.start < r1.start + r1.size);
51}
52
53static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
54{
55 uint64_t start = MAX(r1.start, r2.start);
56 /* off-by-one arithmetic to prevent overflow */
57 uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1);
58 return addrrange_make(start, end - start + 1);
59}
60
61struct CoalescedMemoryRange {
62 AddrRange addr;
63 QTAILQ_ENTRY(CoalescedMemoryRange) link;
64};
65
66typedef struct FlatRange FlatRange;
67typedef struct FlatView FlatView;
68
69/* Range of memory in the global map. Addresses are absolute. */
70struct FlatRange {
71 MemoryRegion *mr;
72 target_phys_addr_t offset_in_region;
73 AddrRange addr;
5a583347 74 uint8_t dirty_log_mask;
093bc2cd
AK
75};
76
77/* Flattened global view of current active memory hierarchy. Kept in sorted
78 * order.
79 */
80struct FlatView {
81 FlatRange *ranges;
82 unsigned nr;
83 unsigned nr_allocated;
84};
85
cc31e6e7
AK
86typedef struct AddressSpace AddressSpace;
87typedef struct AddressSpaceOps AddressSpaceOps;
88
89/* A system address space - I/O, memory, etc. */
90struct AddressSpace {
91 const AddressSpaceOps *ops;
92 MemoryRegion *root;
93 FlatView current_map;
94};
95
96struct AddressSpaceOps {
97 void (*range_add)(AddressSpace *as, FlatRange *fr);
98 void (*range_del)(AddressSpace *as, FlatRange *fr);
99 void (*log_start)(AddressSpace *as, FlatRange *fr);
100 void (*log_stop)(AddressSpace *as, FlatRange *fr);
101};
102
093bc2cd
AK
103#define FOR_EACH_FLAT_RANGE(var, view) \
104 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
105
093bc2cd
AK
106static bool flatrange_equal(FlatRange *a, FlatRange *b)
107{
108 return a->mr == b->mr
109 && addrrange_equal(a->addr, b->addr)
110 && a->offset_in_region == b->offset_in_region;
111}
112
113static void flatview_init(FlatView *view)
114{
115 view->ranges = NULL;
116 view->nr = 0;
117 view->nr_allocated = 0;
118}
119
120/* Insert a range into a given position. Caller is responsible for maintaining
121 * sorting order.
122 */
123static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
124{
125 if (view->nr == view->nr_allocated) {
126 view->nr_allocated = MAX(2 * view->nr, 10);
127 view->ranges = qemu_realloc(view->ranges,
128 view->nr_allocated * sizeof(*view->ranges));
129 }
130 memmove(view->ranges + pos + 1, view->ranges + pos,
131 (view->nr - pos) * sizeof(FlatRange));
132 view->ranges[pos] = *range;
133 ++view->nr;
134}
135
136static void flatview_destroy(FlatView *view)
137{
138 qemu_free(view->ranges);
139}
140
3d8e6bf9
AK
141static bool can_merge(FlatRange *r1, FlatRange *r2)
142{
143 return addrrange_end(r1->addr) == r2->addr.start
144 && r1->mr == r2->mr
145 && r1->offset_in_region + r1->addr.size == r2->offset_in_region
146 && r1->dirty_log_mask == r2->dirty_log_mask;
147}
148
149/* Attempt to simplify a view by merging ajacent ranges */
150static void flatview_simplify(FlatView *view)
151{
152 unsigned i, j;
153
154 i = 0;
155 while (i < view->nr) {
156 j = i + 1;
157 while (j < view->nr
158 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
159 view->ranges[i].addr.size += view->ranges[j].addr.size;
160 ++j;
161 }
162 ++i;
163 memmove(&view->ranges[i], &view->ranges[j],
164 (view->nr - j) * sizeof(view->ranges[j]));
165 view->nr -= j - i;
166 }
167}
168
16ef61c9
AK
169static void memory_region_prepare_ram_addr(MemoryRegion *mr);
170
cc31e6e7
AK
171static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
172{
173 ram_addr_t phys_offset, region_offset;
174
16ef61c9
AK
175 memory_region_prepare_ram_addr(fr->mr);
176
cc31e6e7
AK
177 phys_offset = fr->mr->ram_addr;
178 region_offset = fr->offset_in_region;
179 /* cpu_register_physical_memory_log() wants region_offset for
180 * mmio, but prefers offseting phys_offset for RAM. Humour it.
181 */
182 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
183 phys_offset += region_offset;
184 region_offset = 0;
185 }
186
187 cpu_register_physical_memory_log(fr->addr.start,
188 fr->addr.size,
189 phys_offset,
190 region_offset,
191 fr->dirty_log_mask);
192}
193
194static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
195{
196 cpu_register_physical_memory(fr->addr.start, fr->addr.size,
197 IO_MEM_UNASSIGNED);
198}
199
200static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
201{
202 cpu_physical_log_start(fr->addr.start, fr->addr.size);
203}
204
205static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
206{
207 cpu_physical_log_stop(fr->addr.start, fr->addr.size);
208}
209
210static const AddressSpaceOps address_space_ops_memory = {
211 .range_add = as_memory_range_add,
212 .range_del = as_memory_range_del,
213 .log_start = as_memory_log_start,
214 .log_stop = as_memory_log_stop,
215};
216
217static AddressSpace address_space_memory = {
218 .ops = &address_space_ops_memory,
219};
220
627a0e90
AK
221static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset,
222 unsigned width, bool write)
223{
224 const MemoryRegionPortio *mrp;
225
226 for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
227 if (offset >= mrp->offset && offset < mrp->offset + mrp->len
228 && width == mrp->size
229 && (write ? (bool)mrp->write : (bool)mrp->read)) {
230 return mrp;
231 }
232 }
233 return NULL;
234}
235
658b2224
AK
236static void memory_region_iorange_read(IORange *iorange,
237 uint64_t offset,
238 unsigned width,
239 uint64_t *data)
240{
241 MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
242
627a0e90
AK
243 if (mr->ops->old_portio) {
244 const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false);
245
246 *data = ((uint64_t)1 << (width * 8)) - 1;
247 if (mrp) {
248 *data = mrp->read(mr->opaque, offset - mrp->offset);
249 }
250 return;
251 }
658b2224
AK
252 *data = mr->ops->read(mr->opaque, offset, width);
253}
254
255static void memory_region_iorange_write(IORange *iorange,
256 uint64_t offset,
257 unsigned width,
258 uint64_t data)
259{
260 MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
261
627a0e90
AK
262 if (mr->ops->old_portio) {
263 const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true);
264
265 if (mrp) {
266 mrp->write(mr->opaque, offset - mrp->offset, data);
267 }
268 return;
269 }
658b2224
AK
270 mr->ops->write(mr->opaque, offset, data, width);
271}
272
273static const IORangeOps memory_region_iorange_ops = {
274 .read = memory_region_iorange_read,
275 .write = memory_region_iorange_write,
276};
277
278static void as_io_range_add(AddressSpace *as, FlatRange *fr)
279{
280 iorange_init(&fr->mr->iorange, &memory_region_iorange_ops,
281 fr->addr.start,fr->addr.size);
282 ioport_register(&fr->mr->iorange);
283}
284
285static void as_io_range_del(AddressSpace *as, FlatRange *fr)
286{
287 isa_unassign_ioport(fr->addr.start, fr->addr.size);
288}
289
290static const AddressSpaceOps address_space_ops_io = {
291 .range_add = as_io_range_add,
292 .range_del = as_io_range_del,
293};
294
295static AddressSpace address_space_io = {
296 .ops = &address_space_ops_io,
297};
298
093bc2cd
AK
299/* Render a memory region into the global view. Ranges in @view obscure
300 * ranges in @mr.
301 */
302static void render_memory_region(FlatView *view,
303 MemoryRegion *mr,
304 target_phys_addr_t base,
305 AddrRange clip)
306{
307 MemoryRegion *subregion;
308 unsigned i;
309 target_phys_addr_t offset_in_region;
310 uint64_t remain;
311 uint64_t now;
312 FlatRange fr;
313 AddrRange tmp;
314
315 base += mr->addr;
316
317 tmp = addrrange_make(base, mr->size);
318
319 if (!addrrange_intersects(tmp, clip)) {
320 return;
321 }
322
323 clip = addrrange_intersection(tmp, clip);
324
325 if (mr->alias) {
326 base -= mr->alias->addr;
327 base -= mr->alias_offset;
328 render_memory_region(view, mr->alias, base, clip);
329 return;
330 }
331
332 /* Render subregions in priority order. */
333 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
334 render_memory_region(view, subregion, base, clip);
335 }
336
14a3c10a 337 if (!mr->terminates) {
093bc2cd
AK
338 return;
339 }
340
341 offset_in_region = clip.start - base;
342 base = clip.start;
343 remain = clip.size;
344
345 /* Render the region itself into any gaps left by the current view. */
346 for (i = 0; i < view->nr && remain; ++i) {
347 if (base >= addrrange_end(view->ranges[i].addr)) {
348 continue;
349 }
350 if (base < view->ranges[i].addr.start) {
351 now = MIN(remain, view->ranges[i].addr.start - base);
352 fr.mr = mr;
353 fr.offset_in_region = offset_in_region;
354 fr.addr = addrrange_make(base, now);
5a583347 355 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
356 flatview_insert(view, i, &fr);
357 ++i;
358 base += now;
359 offset_in_region += now;
360 remain -= now;
361 }
362 if (base == view->ranges[i].addr.start) {
363 now = MIN(remain, view->ranges[i].addr.size);
364 base += now;
365 offset_in_region += now;
366 remain -= now;
367 }
368 }
369 if (remain) {
370 fr.mr = mr;
371 fr.offset_in_region = offset_in_region;
372 fr.addr = addrrange_make(base, remain);
5a583347 373 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
374 flatview_insert(view, i, &fr);
375 }
376}
377
378/* Render a memory topology into a list of disjoint absolute ranges. */
379static FlatView generate_memory_topology(MemoryRegion *mr)
380{
381 FlatView view;
382
383 flatview_init(&view);
384
385 render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX));
3d8e6bf9 386 flatview_simplify(&view);
093bc2cd
AK
387
388 return view;
389}
390
cc31e6e7 391static void address_space_update_topology(AddressSpace *as)
093bc2cd 392{
cc31e6e7
AK
393 FlatView old_view = as->current_map;
394 FlatView new_view = generate_memory_topology(as->root);
093bc2cd
AK
395 unsigned iold, inew;
396 FlatRange *frold, *frnew;
093bc2cd
AK
397
398 /* Generate a symmetric difference of the old and new memory maps.
399 * Kill ranges in the old map, and instantiate ranges in the new map.
400 */
401 iold = inew = 0;
402 while (iold < old_view.nr || inew < new_view.nr) {
403 if (iold < old_view.nr) {
404 frold = &old_view.ranges[iold];
405 } else {
406 frold = NULL;
407 }
408 if (inew < new_view.nr) {
409 frnew = &new_view.ranges[inew];
410 } else {
411 frnew = NULL;
412 }
413
414 if (frold
415 && (!frnew
416 || frold->addr.start < frnew->addr.start
417 || (frold->addr.start == frnew->addr.start
418 && !flatrange_equal(frold, frnew)))) {
419 /* In old, but (not in new, or in new but attributes changed). */
420
cc31e6e7 421 as->ops->range_del(as, frold);
093bc2cd
AK
422 ++iold;
423 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
424 /* In both (logging may have changed) */
425
5a583347 426 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
cc31e6e7 427 as->ops->log_stop(as, frnew);
5a583347 428 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
cc31e6e7 429 as->ops->log_start(as, frnew);
5a583347
AK
430 }
431
093bc2cd
AK
432 ++iold;
433 ++inew;
093bc2cd
AK
434 } else {
435 /* In new */
436
cc31e6e7 437 as->ops->range_add(as, frnew);
093bc2cd
AK
438 ++inew;
439 }
440 }
cc31e6e7 441 as->current_map = new_view;
093bc2cd
AK
442 flatview_destroy(&old_view);
443}
444
cc31e6e7
AK
445static void memory_region_update_topology(void)
446{
658b2224
AK
447 if (address_space_memory.root) {
448 address_space_update_topology(&address_space_memory);
449 }
450 if (address_space_io.root) {
451 address_space_update_topology(&address_space_io);
452 }
cc31e6e7
AK
453}
454
093bc2cd
AK
455void memory_region_init(MemoryRegion *mr,
456 const char *name,
457 uint64_t size)
458{
459 mr->ops = NULL;
460 mr->parent = NULL;
461 mr->size = size;
462 mr->addr = 0;
463 mr->offset = 0;
14a3c10a 464 mr->terminates = false;
093bc2cd
AK
465 mr->priority = 0;
466 mr->may_overlap = false;
467 mr->alias = NULL;
468 QTAILQ_INIT(&mr->subregions);
469 memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
470 QTAILQ_INIT(&mr->coalesced);
471 mr->name = qemu_strdup(name);
5a583347 472 mr->dirty_log_mask = 0;
093bc2cd
AK
473}
474
475static bool memory_region_access_valid(MemoryRegion *mr,
476 target_phys_addr_t addr,
477 unsigned size)
478{
479 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
480 return false;
481 }
482
483 /* Treat zero as compatibility all valid */
484 if (!mr->ops->valid.max_access_size) {
485 return true;
486 }
487
488 if (size > mr->ops->valid.max_access_size
489 || size < mr->ops->valid.min_access_size) {
490 return false;
491 }
492 return true;
493}
494
495static uint32_t memory_region_read_thunk_n(void *_mr,
496 target_phys_addr_t addr,
497 unsigned size)
498{
499 MemoryRegion *mr = _mr;
500 unsigned access_size, access_size_min, access_size_max;
501 uint64_t access_mask;
502 uint32_t data = 0, tmp;
503 unsigned i;
504
505 if (!memory_region_access_valid(mr, addr, size)) {
506 return -1U; /* FIXME: better signalling */
507 }
508
509 /* FIXME: support unaligned access */
510
511 access_size_min = mr->ops->impl.min_access_size;
512 if (!access_size_min) {
513 access_size_min = 1;
514 }
515 access_size_max = mr->ops->impl.max_access_size;
516 if (!access_size_max) {
517 access_size_max = 4;
518 }
519 access_size = MAX(MIN(size, access_size_max), access_size_min);
520 access_mask = -1ULL >> (64 - access_size * 8);
521 addr += mr->offset;
522 for (i = 0; i < size; i += access_size) {
523 /* FIXME: big-endian support */
524 tmp = mr->ops->read(mr->opaque, addr + i, access_size);
525 data |= (tmp & access_mask) << (i * 8);
526 }
527
528 return data;
529}
530
531static void memory_region_write_thunk_n(void *_mr,
532 target_phys_addr_t addr,
533 unsigned size,
534 uint64_t data)
535{
536 MemoryRegion *mr = _mr;
537 unsigned access_size, access_size_min, access_size_max;
538 uint64_t access_mask;
539 unsigned i;
540
541 if (!memory_region_access_valid(mr, addr, size)) {
542 return; /* FIXME: better signalling */
543 }
544
545 /* FIXME: support unaligned access */
546
547 access_size_min = mr->ops->impl.min_access_size;
548 if (!access_size_min) {
549 access_size_min = 1;
550 }
551 access_size_max = mr->ops->impl.max_access_size;
552 if (!access_size_max) {
553 access_size_max = 4;
554 }
555 access_size = MAX(MIN(size, access_size_max), access_size_min);
556 access_mask = -1ULL >> (64 - access_size * 8);
557 addr += mr->offset;
558 for (i = 0; i < size; i += access_size) {
559 /* FIXME: big-endian support */
560 mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask,
561 access_size);
562 }
563}
564
565static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
566{
567 return memory_region_read_thunk_n(mr, addr, 1);
568}
569
570static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
571{
572 return memory_region_read_thunk_n(mr, addr, 2);
573}
574
575static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
576{
577 return memory_region_read_thunk_n(mr, addr, 4);
578}
579
580static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
581 uint32_t data)
582{
583 memory_region_write_thunk_n(mr, addr, 1, data);
584}
585
586static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
587 uint32_t data)
588{
589 memory_region_write_thunk_n(mr, addr, 2, data);
590}
591
592static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
593 uint32_t data)
594{
595 memory_region_write_thunk_n(mr, addr, 4, data);
596}
597
598static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
599 memory_region_read_thunk_b,
600 memory_region_read_thunk_w,
601 memory_region_read_thunk_l,
602};
603
604static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
605 memory_region_write_thunk_b,
606 memory_region_write_thunk_w,
607 memory_region_write_thunk_l,
608};
609
16ef61c9
AK
610static void memory_region_prepare_ram_addr(MemoryRegion *mr)
611{
612 if (mr->backend_registered) {
613 return;
614 }
615
616 mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
617 memory_region_write_thunk,
618 mr,
619 mr->ops->endianness);
620 mr->backend_registered = true;
621}
622
093bc2cd
AK
623void memory_region_init_io(MemoryRegion *mr,
624 const MemoryRegionOps *ops,
625 void *opaque,
626 const char *name,
627 uint64_t size)
628{
629 memory_region_init(mr, name, size);
630 mr->ops = ops;
631 mr->opaque = opaque;
14a3c10a 632 mr->terminates = true;
16ef61c9 633 mr->backend_registered = false;
093bc2cd
AK
634}
635
636void memory_region_init_ram(MemoryRegion *mr,
637 DeviceState *dev,
638 const char *name,
639 uint64_t size)
640{
641 memory_region_init(mr, name, size);
14a3c10a 642 mr->terminates = true;
093bc2cd 643 mr->ram_addr = qemu_ram_alloc(dev, name, size);
16ef61c9 644 mr->backend_registered = true;
093bc2cd
AK
645}
646
647void memory_region_init_ram_ptr(MemoryRegion *mr,
648 DeviceState *dev,
649 const char *name,
650 uint64_t size,
651 void *ptr)
652{
653 memory_region_init(mr, name, size);
14a3c10a 654 mr->terminates = true;
093bc2cd 655 mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
16ef61c9 656 mr->backend_registered = true;
093bc2cd
AK
657}
658
659void memory_region_init_alias(MemoryRegion *mr,
660 const char *name,
661 MemoryRegion *orig,
662 target_phys_addr_t offset,
663 uint64_t size)
664{
665 memory_region_init(mr, name, size);
666 mr->alias = orig;
667 mr->alias_offset = offset;
668}
669
670void memory_region_destroy(MemoryRegion *mr)
671{
672 assert(QTAILQ_EMPTY(&mr->subregions));
673 memory_region_clear_coalescing(mr);
674 qemu_free((char *)mr->name);
675}
676
677uint64_t memory_region_size(MemoryRegion *mr)
678{
679 return mr->size;
680}
681
682void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
683{
684 mr->offset = offset;
685}
686
687void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
688{
5a583347
AK
689 uint8_t mask = 1 << client;
690
691 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
692 memory_region_update_topology();
093bc2cd
AK
693}
694
695bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
696 unsigned client)
697{
14a3c10a 698 assert(mr->terminates);
5a583347 699 return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
093bc2cd
AK
700}
701
702void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
703{
14a3c10a 704 assert(mr->terminates);
5a583347 705 return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
093bc2cd
AK
706}
707
708void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
709{
5a583347
AK
710 FlatRange *fr;
711
cc31e6e7 712 FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
5a583347
AK
713 if (fr->mr == mr) {
714 cpu_physical_sync_dirty_bitmap(fr->addr.start,
715 fr->addr.start + fr->addr.size);
716 }
717 }
093bc2cd
AK
718}
719
720void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
721{
722 /* FIXME */
723}
724
725void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
726 target_phys_addr_t size, unsigned client)
727{
14a3c10a 728 assert(mr->terminates);
5a583347
AK
729 cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
730 mr->ram_addr + addr + size,
731 1 << client);
093bc2cd
AK
732}
733
734void *memory_region_get_ram_ptr(MemoryRegion *mr)
735{
736 if (mr->alias) {
737 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
738 }
739
14a3c10a 740 assert(mr->terminates);
093bc2cd
AK
741
742 return qemu_get_ram_ptr(mr->ram_addr);
743}
744
745static void memory_region_update_coalesced_range(MemoryRegion *mr)
746{
747 FlatRange *fr;
748 CoalescedMemoryRange *cmr;
749 AddrRange tmp;
750
cc31e6e7 751 FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
093bc2cd
AK
752 if (fr->mr == mr) {
753 qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size);
754 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
755 tmp = addrrange_shift(cmr->addr,
756 fr->addr.start - fr->offset_in_region);
757 if (!addrrange_intersects(tmp, fr->addr)) {
758 continue;
759 }
760 tmp = addrrange_intersection(tmp, fr->addr);
761 qemu_register_coalesced_mmio(tmp.start, tmp.size);
762 }
763 }
764 }
765}
766
767void memory_region_set_coalescing(MemoryRegion *mr)
768{
769 memory_region_clear_coalescing(mr);
770 memory_region_add_coalescing(mr, 0, mr->size);
771}
772
773void memory_region_add_coalescing(MemoryRegion *mr,
774 target_phys_addr_t offset,
775 uint64_t size)
776{
777 CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr));
778
779 cmr->addr = addrrange_make(offset, size);
780 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
781 memory_region_update_coalesced_range(mr);
782}
783
784void memory_region_clear_coalescing(MemoryRegion *mr)
785{
786 CoalescedMemoryRange *cmr;
787
788 while (!QTAILQ_EMPTY(&mr->coalesced)) {
789 cmr = QTAILQ_FIRST(&mr->coalesced);
790 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
791 qemu_free(cmr);
792 }
793 memory_region_update_coalesced_range(mr);
794}
795
796static void memory_region_add_subregion_common(MemoryRegion *mr,
797 target_phys_addr_t offset,
798 MemoryRegion *subregion)
799{
800 MemoryRegion *other;
801
802 assert(!subregion->parent);
803 subregion->parent = mr;
804 subregion->addr = offset;
805 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
806 if (subregion->may_overlap || other->may_overlap) {
807 continue;
808 }
809 if (offset >= other->offset + other->size
810 || offset + subregion->size <= other->offset) {
811 continue;
812 }
813 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
814 (unsigned long long)offset,
815 (unsigned long long)subregion->size,
816 (unsigned long long)other->offset,
817 (unsigned long long)other->size);
818 }
819 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
820 if (subregion->priority >= other->priority) {
821 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
822 goto done;
823 }
824 }
825 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
826done:
827 memory_region_update_topology();
828}
829
830
831void memory_region_add_subregion(MemoryRegion *mr,
832 target_phys_addr_t offset,
833 MemoryRegion *subregion)
834{
835 subregion->may_overlap = false;
836 subregion->priority = 0;
837 memory_region_add_subregion_common(mr, offset, subregion);
838}
839
840void memory_region_add_subregion_overlap(MemoryRegion *mr,
841 target_phys_addr_t offset,
842 MemoryRegion *subregion,
843 unsigned priority)
844{
845 subregion->may_overlap = true;
846 subregion->priority = priority;
847 memory_region_add_subregion_common(mr, offset, subregion);
848}
849
850void memory_region_del_subregion(MemoryRegion *mr,
851 MemoryRegion *subregion)
852{
853 assert(subregion->parent == mr);
854 subregion->parent = NULL;
855 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
856 memory_region_update_topology();
857}
1c0ffa58
AK
858
859void set_system_memory_map(MemoryRegion *mr)
860{
cc31e6e7 861 address_space_memory.root = mr;
1c0ffa58
AK
862 memory_region_update_topology();
863}
658b2224
AK
864
865void set_system_io_map(MemoryRegion *mr)
866{
867 address_space_io.root = mr;
868 memory_region_update_topology();
869}