]> git.proxmox.com Git - qemu.git/blame - memory.c
memory: add backward compatibility for old mmio registration
[qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include "memory.h"
1c0ffa58 15#include "exec-memory.h"
658b2224 16#include "ioport.h"
74901c3b 17#include "bitops.h"
093bc2cd
AK
18#include <assert.h>
19
20typedef struct AddrRange AddrRange;
21
22struct AddrRange {
23 uint64_t start;
24 uint64_t size;
25};
26
27static AddrRange addrrange_make(uint64_t start, uint64_t size)
28{
29 return (AddrRange) { start, size };
30}
31
32static bool addrrange_equal(AddrRange r1, AddrRange r2)
33{
34 return r1.start == r2.start && r1.size == r2.size;
35}
36
37static uint64_t addrrange_end(AddrRange r)
38{
39 return r.start + r.size;
40}
41
42static AddrRange addrrange_shift(AddrRange range, int64_t delta)
43{
44 range.start += delta;
45 return range;
46}
47
48static bool addrrange_intersects(AddrRange r1, AddrRange r2)
49{
50 return (r1.start >= r2.start && r1.start < r2.start + r2.size)
51 || (r2.start >= r1.start && r2.start < r1.start + r1.size);
52}
53
54static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
55{
56 uint64_t start = MAX(r1.start, r2.start);
57 /* off-by-one arithmetic to prevent overflow */
58 uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1);
59 return addrrange_make(start, end - start + 1);
60}
61
62struct CoalescedMemoryRange {
63 AddrRange addr;
64 QTAILQ_ENTRY(CoalescedMemoryRange) link;
65};
66
67typedef struct FlatRange FlatRange;
68typedef struct FlatView FlatView;
69
70/* Range of memory in the global map. Addresses are absolute. */
71struct FlatRange {
72 MemoryRegion *mr;
73 target_phys_addr_t offset_in_region;
74 AddrRange addr;
5a583347 75 uint8_t dirty_log_mask;
093bc2cd
AK
76};
77
78/* Flattened global view of current active memory hierarchy. Kept in sorted
79 * order.
80 */
81struct FlatView {
82 FlatRange *ranges;
83 unsigned nr;
84 unsigned nr_allocated;
85};
86
cc31e6e7
AK
87typedef struct AddressSpace AddressSpace;
88typedef struct AddressSpaceOps AddressSpaceOps;
89
90/* A system address space - I/O, memory, etc. */
91struct AddressSpace {
92 const AddressSpaceOps *ops;
93 MemoryRegion *root;
94 FlatView current_map;
95};
96
97struct AddressSpaceOps {
98 void (*range_add)(AddressSpace *as, FlatRange *fr);
99 void (*range_del)(AddressSpace *as, FlatRange *fr);
100 void (*log_start)(AddressSpace *as, FlatRange *fr);
101 void (*log_stop)(AddressSpace *as, FlatRange *fr);
102};
103
093bc2cd
AK
104#define FOR_EACH_FLAT_RANGE(var, view) \
105 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
106
093bc2cd
AK
107static bool flatrange_equal(FlatRange *a, FlatRange *b)
108{
109 return a->mr == b->mr
110 && addrrange_equal(a->addr, b->addr)
111 && a->offset_in_region == b->offset_in_region;
112}
113
114static void flatview_init(FlatView *view)
115{
116 view->ranges = NULL;
117 view->nr = 0;
118 view->nr_allocated = 0;
119}
120
121/* Insert a range into a given position. Caller is responsible for maintaining
122 * sorting order.
123 */
124static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
125{
126 if (view->nr == view->nr_allocated) {
127 view->nr_allocated = MAX(2 * view->nr, 10);
128 view->ranges = qemu_realloc(view->ranges,
129 view->nr_allocated * sizeof(*view->ranges));
130 }
131 memmove(view->ranges + pos + 1, view->ranges + pos,
132 (view->nr - pos) * sizeof(FlatRange));
133 view->ranges[pos] = *range;
134 ++view->nr;
135}
136
137static void flatview_destroy(FlatView *view)
138{
139 qemu_free(view->ranges);
140}
141
3d8e6bf9
AK
142static bool can_merge(FlatRange *r1, FlatRange *r2)
143{
144 return addrrange_end(r1->addr) == r2->addr.start
145 && r1->mr == r2->mr
146 && r1->offset_in_region + r1->addr.size == r2->offset_in_region
147 && r1->dirty_log_mask == r2->dirty_log_mask;
148}
149
150/* Attempt to simplify a view by merging ajacent ranges */
151static void flatview_simplify(FlatView *view)
152{
153 unsigned i, j;
154
155 i = 0;
156 while (i < view->nr) {
157 j = i + 1;
158 while (j < view->nr
159 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
160 view->ranges[i].addr.size += view->ranges[j].addr.size;
161 ++j;
162 }
163 ++i;
164 memmove(&view->ranges[i], &view->ranges[j],
165 (view->nr - j) * sizeof(view->ranges[j]));
166 view->nr -= j - i;
167 }
168}
169
16ef61c9
AK
170static void memory_region_prepare_ram_addr(MemoryRegion *mr);
171
cc31e6e7
AK
172static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
173{
174 ram_addr_t phys_offset, region_offset;
175
16ef61c9
AK
176 memory_region_prepare_ram_addr(fr->mr);
177
cc31e6e7
AK
178 phys_offset = fr->mr->ram_addr;
179 region_offset = fr->offset_in_region;
180 /* cpu_register_physical_memory_log() wants region_offset for
181 * mmio, but prefers offseting phys_offset for RAM. Humour it.
182 */
183 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
184 phys_offset += region_offset;
185 region_offset = 0;
186 }
187
188 cpu_register_physical_memory_log(fr->addr.start,
189 fr->addr.size,
190 phys_offset,
191 region_offset,
192 fr->dirty_log_mask);
193}
194
195static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
196{
197 cpu_register_physical_memory(fr->addr.start, fr->addr.size,
198 IO_MEM_UNASSIGNED);
199}
200
201static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
202{
203 cpu_physical_log_start(fr->addr.start, fr->addr.size);
204}
205
206static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
207{
208 cpu_physical_log_stop(fr->addr.start, fr->addr.size);
209}
210
211static const AddressSpaceOps address_space_ops_memory = {
212 .range_add = as_memory_range_add,
213 .range_del = as_memory_range_del,
214 .log_start = as_memory_log_start,
215 .log_stop = as_memory_log_stop,
216};
217
218static AddressSpace address_space_memory = {
219 .ops = &address_space_ops_memory,
220};
221
627a0e90
AK
222static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset,
223 unsigned width, bool write)
224{
225 const MemoryRegionPortio *mrp;
226
227 for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
228 if (offset >= mrp->offset && offset < mrp->offset + mrp->len
229 && width == mrp->size
230 && (write ? (bool)mrp->write : (bool)mrp->read)) {
231 return mrp;
232 }
233 }
234 return NULL;
235}
236
658b2224
AK
237static void memory_region_iorange_read(IORange *iorange,
238 uint64_t offset,
239 unsigned width,
240 uint64_t *data)
241{
242 MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
243
627a0e90
AK
244 if (mr->ops->old_portio) {
245 const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false);
246
247 *data = ((uint64_t)1 << (width * 8)) - 1;
248 if (mrp) {
249 *data = mrp->read(mr->opaque, offset - mrp->offset);
250 }
251 return;
252 }
658b2224
AK
253 *data = mr->ops->read(mr->opaque, offset, width);
254}
255
256static void memory_region_iorange_write(IORange *iorange,
257 uint64_t offset,
258 unsigned width,
259 uint64_t data)
260{
261 MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
262
627a0e90
AK
263 if (mr->ops->old_portio) {
264 const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true);
265
266 if (mrp) {
267 mrp->write(mr->opaque, offset - mrp->offset, data);
268 }
269 return;
270 }
658b2224
AK
271 mr->ops->write(mr->opaque, offset, data, width);
272}
273
274static const IORangeOps memory_region_iorange_ops = {
275 .read = memory_region_iorange_read,
276 .write = memory_region_iorange_write,
277};
278
279static void as_io_range_add(AddressSpace *as, FlatRange *fr)
280{
281 iorange_init(&fr->mr->iorange, &memory_region_iorange_ops,
282 fr->addr.start,fr->addr.size);
283 ioport_register(&fr->mr->iorange);
284}
285
286static void as_io_range_del(AddressSpace *as, FlatRange *fr)
287{
288 isa_unassign_ioport(fr->addr.start, fr->addr.size);
289}
290
291static const AddressSpaceOps address_space_ops_io = {
292 .range_add = as_io_range_add,
293 .range_del = as_io_range_del,
294};
295
296static AddressSpace address_space_io = {
297 .ops = &address_space_ops_io,
298};
299
093bc2cd
AK
300/* Render a memory region into the global view. Ranges in @view obscure
301 * ranges in @mr.
302 */
303static void render_memory_region(FlatView *view,
304 MemoryRegion *mr,
305 target_phys_addr_t base,
306 AddrRange clip)
307{
308 MemoryRegion *subregion;
309 unsigned i;
310 target_phys_addr_t offset_in_region;
311 uint64_t remain;
312 uint64_t now;
313 FlatRange fr;
314 AddrRange tmp;
315
316 base += mr->addr;
317
318 tmp = addrrange_make(base, mr->size);
319
320 if (!addrrange_intersects(tmp, clip)) {
321 return;
322 }
323
324 clip = addrrange_intersection(tmp, clip);
325
326 if (mr->alias) {
327 base -= mr->alias->addr;
328 base -= mr->alias_offset;
329 render_memory_region(view, mr->alias, base, clip);
330 return;
331 }
332
333 /* Render subregions in priority order. */
334 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
335 render_memory_region(view, subregion, base, clip);
336 }
337
14a3c10a 338 if (!mr->terminates) {
093bc2cd
AK
339 return;
340 }
341
342 offset_in_region = clip.start - base;
343 base = clip.start;
344 remain = clip.size;
345
346 /* Render the region itself into any gaps left by the current view. */
347 for (i = 0; i < view->nr && remain; ++i) {
348 if (base >= addrrange_end(view->ranges[i].addr)) {
349 continue;
350 }
351 if (base < view->ranges[i].addr.start) {
352 now = MIN(remain, view->ranges[i].addr.start - base);
353 fr.mr = mr;
354 fr.offset_in_region = offset_in_region;
355 fr.addr = addrrange_make(base, now);
5a583347 356 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
357 flatview_insert(view, i, &fr);
358 ++i;
359 base += now;
360 offset_in_region += now;
361 remain -= now;
362 }
363 if (base == view->ranges[i].addr.start) {
364 now = MIN(remain, view->ranges[i].addr.size);
365 base += now;
366 offset_in_region += now;
367 remain -= now;
368 }
369 }
370 if (remain) {
371 fr.mr = mr;
372 fr.offset_in_region = offset_in_region;
373 fr.addr = addrrange_make(base, remain);
5a583347 374 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
375 flatview_insert(view, i, &fr);
376 }
377}
378
379/* Render a memory topology into a list of disjoint absolute ranges. */
380static FlatView generate_memory_topology(MemoryRegion *mr)
381{
382 FlatView view;
383
384 flatview_init(&view);
385
386 render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX));
3d8e6bf9 387 flatview_simplify(&view);
093bc2cd
AK
388
389 return view;
390}
391
cc31e6e7 392static void address_space_update_topology(AddressSpace *as)
093bc2cd 393{
cc31e6e7
AK
394 FlatView old_view = as->current_map;
395 FlatView new_view = generate_memory_topology(as->root);
093bc2cd
AK
396 unsigned iold, inew;
397 FlatRange *frold, *frnew;
093bc2cd
AK
398
399 /* Generate a symmetric difference of the old and new memory maps.
400 * Kill ranges in the old map, and instantiate ranges in the new map.
401 */
402 iold = inew = 0;
403 while (iold < old_view.nr || inew < new_view.nr) {
404 if (iold < old_view.nr) {
405 frold = &old_view.ranges[iold];
406 } else {
407 frold = NULL;
408 }
409 if (inew < new_view.nr) {
410 frnew = &new_view.ranges[inew];
411 } else {
412 frnew = NULL;
413 }
414
415 if (frold
416 && (!frnew
417 || frold->addr.start < frnew->addr.start
418 || (frold->addr.start == frnew->addr.start
419 && !flatrange_equal(frold, frnew)))) {
420 /* In old, but (not in new, or in new but attributes changed). */
421
cc31e6e7 422 as->ops->range_del(as, frold);
093bc2cd
AK
423 ++iold;
424 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
425 /* In both (logging may have changed) */
426
5a583347 427 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
cc31e6e7 428 as->ops->log_stop(as, frnew);
5a583347 429 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
cc31e6e7 430 as->ops->log_start(as, frnew);
5a583347
AK
431 }
432
093bc2cd
AK
433 ++iold;
434 ++inew;
093bc2cd
AK
435 } else {
436 /* In new */
437
cc31e6e7 438 as->ops->range_add(as, frnew);
093bc2cd
AK
439 ++inew;
440 }
441 }
cc31e6e7 442 as->current_map = new_view;
093bc2cd
AK
443 flatview_destroy(&old_view);
444}
445
cc31e6e7
AK
446static void memory_region_update_topology(void)
447{
658b2224
AK
448 if (address_space_memory.root) {
449 address_space_update_topology(&address_space_memory);
450 }
451 if (address_space_io.root) {
452 address_space_update_topology(&address_space_io);
453 }
cc31e6e7
AK
454}
455
093bc2cd
AK
456void memory_region_init(MemoryRegion *mr,
457 const char *name,
458 uint64_t size)
459{
460 mr->ops = NULL;
461 mr->parent = NULL;
462 mr->size = size;
463 mr->addr = 0;
464 mr->offset = 0;
14a3c10a 465 mr->terminates = false;
093bc2cd
AK
466 mr->priority = 0;
467 mr->may_overlap = false;
468 mr->alias = NULL;
469 QTAILQ_INIT(&mr->subregions);
470 memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
471 QTAILQ_INIT(&mr->coalesced);
472 mr->name = qemu_strdup(name);
5a583347 473 mr->dirty_log_mask = 0;
093bc2cd
AK
474}
475
476static bool memory_region_access_valid(MemoryRegion *mr,
477 target_phys_addr_t addr,
478 unsigned size)
479{
480 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
481 return false;
482 }
483
484 /* Treat zero as compatibility all valid */
485 if (!mr->ops->valid.max_access_size) {
486 return true;
487 }
488
489 if (size > mr->ops->valid.max_access_size
490 || size < mr->ops->valid.min_access_size) {
491 return false;
492 }
493 return true;
494}
495
496static uint32_t memory_region_read_thunk_n(void *_mr,
497 target_phys_addr_t addr,
498 unsigned size)
499{
500 MemoryRegion *mr = _mr;
501 unsigned access_size, access_size_min, access_size_max;
502 uint64_t access_mask;
503 uint32_t data = 0, tmp;
504 unsigned i;
505
506 if (!memory_region_access_valid(mr, addr, size)) {
507 return -1U; /* FIXME: better signalling */
508 }
509
74901c3b
AK
510 if (!mr->ops->read) {
511 return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
512 }
513
093bc2cd
AK
514 /* FIXME: support unaligned access */
515
516 access_size_min = mr->ops->impl.min_access_size;
517 if (!access_size_min) {
518 access_size_min = 1;
519 }
520 access_size_max = mr->ops->impl.max_access_size;
521 if (!access_size_max) {
522 access_size_max = 4;
523 }
524 access_size = MAX(MIN(size, access_size_max), access_size_min);
525 access_mask = -1ULL >> (64 - access_size * 8);
526 addr += mr->offset;
527 for (i = 0; i < size; i += access_size) {
528 /* FIXME: big-endian support */
529 tmp = mr->ops->read(mr->opaque, addr + i, access_size);
530 data |= (tmp & access_mask) << (i * 8);
531 }
532
533 return data;
534}
535
536static void memory_region_write_thunk_n(void *_mr,
537 target_phys_addr_t addr,
538 unsigned size,
539 uint64_t data)
540{
541 MemoryRegion *mr = _mr;
542 unsigned access_size, access_size_min, access_size_max;
543 uint64_t access_mask;
544 unsigned i;
545
546 if (!memory_region_access_valid(mr, addr, size)) {
547 return; /* FIXME: better signalling */
548 }
549
74901c3b
AK
550 if (!mr->ops->write) {
551 mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data);
552 return;
553 }
554
093bc2cd
AK
555 /* FIXME: support unaligned access */
556
557 access_size_min = mr->ops->impl.min_access_size;
558 if (!access_size_min) {
559 access_size_min = 1;
560 }
561 access_size_max = mr->ops->impl.max_access_size;
562 if (!access_size_max) {
563 access_size_max = 4;
564 }
565 access_size = MAX(MIN(size, access_size_max), access_size_min);
566 access_mask = -1ULL >> (64 - access_size * 8);
567 addr += mr->offset;
568 for (i = 0; i < size; i += access_size) {
569 /* FIXME: big-endian support */
570 mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask,
571 access_size);
572 }
573}
574
575static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
576{
577 return memory_region_read_thunk_n(mr, addr, 1);
578}
579
580static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
581{
582 return memory_region_read_thunk_n(mr, addr, 2);
583}
584
585static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
586{
587 return memory_region_read_thunk_n(mr, addr, 4);
588}
589
590static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
591 uint32_t data)
592{
593 memory_region_write_thunk_n(mr, addr, 1, data);
594}
595
596static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
597 uint32_t data)
598{
599 memory_region_write_thunk_n(mr, addr, 2, data);
600}
601
602static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
603 uint32_t data)
604{
605 memory_region_write_thunk_n(mr, addr, 4, data);
606}
607
608static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
609 memory_region_read_thunk_b,
610 memory_region_read_thunk_w,
611 memory_region_read_thunk_l,
612};
613
614static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
615 memory_region_write_thunk_b,
616 memory_region_write_thunk_w,
617 memory_region_write_thunk_l,
618};
619
16ef61c9
AK
620static void memory_region_prepare_ram_addr(MemoryRegion *mr)
621{
622 if (mr->backend_registered) {
623 return;
624 }
625
626 mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
627 memory_region_write_thunk,
628 mr,
629 mr->ops->endianness);
630 mr->backend_registered = true;
631}
632
093bc2cd
AK
633void memory_region_init_io(MemoryRegion *mr,
634 const MemoryRegionOps *ops,
635 void *opaque,
636 const char *name,
637 uint64_t size)
638{
639 memory_region_init(mr, name, size);
640 mr->ops = ops;
641 mr->opaque = opaque;
14a3c10a 642 mr->terminates = true;
16ef61c9 643 mr->backend_registered = false;
093bc2cd
AK
644}
645
646void memory_region_init_ram(MemoryRegion *mr,
647 DeviceState *dev,
648 const char *name,
649 uint64_t size)
650{
651 memory_region_init(mr, name, size);
14a3c10a 652 mr->terminates = true;
093bc2cd 653 mr->ram_addr = qemu_ram_alloc(dev, name, size);
16ef61c9 654 mr->backend_registered = true;
093bc2cd
AK
655}
656
657void memory_region_init_ram_ptr(MemoryRegion *mr,
658 DeviceState *dev,
659 const char *name,
660 uint64_t size,
661 void *ptr)
662{
663 memory_region_init(mr, name, size);
14a3c10a 664 mr->terminates = true;
093bc2cd 665 mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
16ef61c9 666 mr->backend_registered = true;
093bc2cd
AK
667}
668
669void memory_region_init_alias(MemoryRegion *mr,
670 const char *name,
671 MemoryRegion *orig,
672 target_phys_addr_t offset,
673 uint64_t size)
674{
675 memory_region_init(mr, name, size);
676 mr->alias = orig;
677 mr->alias_offset = offset;
678}
679
680void memory_region_destroy(MemoryRegion *mr)
681{
682 assert(QTAILQ_EMPTY(&mr->subregions));
683 memory_region_clear_coalescing(mr);
684 qemu_free((char *)mr->name);
685}
686
687uint64_t memory_region_size(MemoryRegion *mr)
688{
689 return mr->size;
690}
691
692void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
693{
694 mr->offset = offset;
695}
696
697void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
698{
5a583347
AK
699 uint8_t mask = 1 << client;
700
701 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
702 memory_region_update_topology();
093bc2cd
AK
703}
704
705bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
706 unsigned client)
707{
14a3c10a 708 assert(mr->terminates);
5a583347 709 return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
093bc2cd
AK
710}
711
712void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
713{
14a3c10a 714 assert(mr->terminates);
5a583347 715 return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
093bc2cd
AK
716}
717
718void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
719{
5a583347
AK
720 FlatRange *fr;
721
cc31e6e7 722 FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
5a583347
AK
723 if (fr->mr == mr) {
724 cpu_physical_sync_dirty_bitmap(fr->addr.start,
725 fr->addr.start + fr->addr.size);
726 }
727 }
093bc2cd
AK
728}
729
730void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
731{
732 /* FIXME */
733}
734
735void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
736 target_phys_addr_t size, unsigned client)
737{
14a3c10a 738 assert(mr->terminates);
5a583347
AK
739 cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
740 mr->ram_addr + addr + size,
741 1 << client);
093bc2cd
AK
742}
743
744void *memory_region_get_ram_ptr(MemoryRegion *mr)
745{
746 if (mr->alias) {
747 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
748 }
749
14a3c10a 750 assert(mr->terminates);
093bc2cd
AK
751
752 return qemu_get_ram_ptr(mr->ram_addr);
753}
754
755static void memory_region_update_coalesced_range(MemoryRegion *mr)
756{
757 FlatRange *fr;
758 CoalescedMemoryRange *cmr;
759 AddrRange tmp;
760
cc31e6e7 761 FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
093bc2cd
AK
762 if (fr->mr == mr) {
763 qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size);
764 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
765 tmp = addrrange_shift(cmr->addr,
766 fr->addr.start - fr->offset_in_region);
767 if (!addrrange_intersects(tmp, fr->addr)) {
768 continue;
769 }
770 tmp = addrrange_intersection(tmp, fr->addr);
771 qemu_register_coalesced_mmio(tmp.start, tmp.size);
772 }
773 }
774 }
775}
776
777void memory_region_set_coalescing(MemoryRegion *mr)
778{
779 memory_region_clear_coalescing(mr);
780 memory_region_add_coalescing(mr, 0, mr->size);
781}
782
783void memory_region_add_coalescing(MemoryRegion *mr,
784 target_phys_addr_t offset,
785 uint64_t size)
786{
787 CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr));
788
789 cmr->addr = addrrange_make(offset, size);
790 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
791 memory_region_update_coalesced_range(mr);
792}
793
794void memory_region_clear_coalescing(MemoryRegion *mr)
795{
796 CoalescedMemoryRange *cmr;
797
798 while (!QTAILQ_EMPTY(&mr->coalesced)) {
799 cmr = QTAILQ_FIRST(&mr->coalesced);
800 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
801 qemu_free(cmr);
802 }
803 memory_region_update_coalesced_range(mr);
804}
805
806static void memory_region_add_subregion_common(MemoryRegion *mr,
807 target_phys_addr_t offset,
808 MemoryRegion *subregion)
809{
810 MemoryRegion *other;
811
812 assert(!subregion->parent);
813 subregion->parent = mr;
814 subregion->addr = offset;
815 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
816 if (subregion->may_overlap || other->may_overlap) {
817 continue;
818 }
819 if (offset >= other->offset + other->size
820 || offset + subregion->size <= other->offset) {
821 continue;
822 }
823 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
824 (unsigned long long)offset,
825 (unsigned long long)subregion->size,
826 (unsigned long long)other->offset,
827 (unsigned long long)other->size);
828 }
829 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
830 if (subregion->priority >= other->priority) {
831 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
832 goto done;
833 }
834 }
835 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
836done:
837 memory_region_update_topology();
838}
839
840
841void memory_region_add_subregion(MemoryRegion *mr,
842 target_phys_addr_t offset,
843 MemoryRegion *subregion)
844{
845 subregion->may_overlap = false;
846 subregion->priority = 0;
847 memory_region_add_subregion_common(mr, offset, subregion);
848}
849
850void memory_region_add_subregion_overlap(MemoryRegion *mr,
851 target_phys_addr_t offset,
852 MemoryRegion *subregion,
853 unsigned priority)
854{
855 subregion->may_overlap = true;
856 subregion->priority = priority;
857 memory_region_add_subregion_common(mr, offset, subregion);
858}
859
860void memory_region_del_subregion(MemoryRegion *mr,
861 MemoryRegion *subregion)
862{
863 assert(subregion->parent == mr);
864 subregion->parent = NULL;
865 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
866 memory_region_update_topology();
867}
1c0ffa58
AK
868
869void set_system_memory_map(MemoryRegion *mr)
870{
cc31e6e7 871 address_space_memory.root = mr;
1c0ffa58
AK
872 memory_region_update_topology();
873}
658b2224
AK
874
875void set_system_io_map(MemoryRegion *mr)
876{
877 address_space_io.root = mr;
878 memory_region_update_topology();
879}