]> git.proxmox.com Git - qemu.git/blame - memory.c
memory: separate building the final memory map into two steps
[qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include "memory.h"
1c0ffa58 15#include "exec-memory.h"
658b2224 16#include "ioport.h"
74901c3b 17#include "bitops.h"
3e9d69e7 18#include "kvm.h"
093bc2cd
AK
19#include <assert.h>
20
21typedef struct AddrRange AddrRange;
22
23struct AddrRange {
24 uint64_t start;
25 uint64_t size;
26};
27
28static AddrRange addrrange_make(uint64_t start, uint64_t size)
29{
30 return (AddrRange) { start, size };
31}
32
33static bool addrrange_equal(AddrRange r1, AddrRange r2)
34{
35 return r1.start == r2.start && r1.size == r2.size;
36}
37
38static uint64_t addrrange_end(AddrRange r)
39{
40 return r.start + r.size;
41}
42
43static AddrRange addrrange_shift(AddrRange range, int64_t delta)
44{
45 range.start += delta;
46 return range;
47}
48
49static bool addrrange_intersects(AddrRange r1, AddrRange r2)
50{
51 return (r1.start >= r2.start && r1.start < r2.start + r2.size)
52 || (r2.start >= r1.start && r2.start < r1.start + r1.size);
53}
54
55static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
56{
57 uint64_t start = MAX(r1.start, r2.start);
58 /* off-by-one arithmetic to prevent overflow */
59 uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1);
60 return addrrange_make(start, end - start + 1);
61}
62
63struct CoalescedMemoryRange {
64 AddrRange addr;
65 QTAILQ_ENTRY(CoalescedMemoryRange) link;
66};
67
3e9d69e7
AK
68struct MemoryRegionIoeventfd {
69 AddrRange addr;
70 bool match_data;
71 uint64_t data;
72 int fd;
73};
74
75static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
76 MemoryRegionIoeventfd b)
77{
78 if (a.addr.start < b.addr.start) {
79 return true;
80 } else if (a.addr.start > b.addr.start) {
81 return false;
82 } else if (a.addr.size < b.addr.size) {
83 return true;
84 } else if (a.addr.size > b.addr.size) {
85 return false;
86 } else if (a.match_data < b.match_data) {
87 return true;
88 } else if (a.match_data > b.match_data) {
89 return false;
90 } else if (a.match_data) {
91 if (a.data < b.data) {
92 return true;
93 } else if (a.data > b.data) {
94 return false;
95 }
96 }
97 if (a.fd < b.fd) {
98 return true;
99 } else if (a.fd > b.fd) {
100 return false;
101 }
102 return false;
103}
104
105static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
106 MemoryRegionIoeventfd b)
107{
108 return !memory_region_ioeventfd_before(a, b)
109 && !memory_region_ioeventfd_before(b, a);
110}
111
093bc2cd
AK
112typedef struct FlatRange FlatRange;
113typedef struct FlatView FlatView;
114
115/* Range of memory in the global map. Addresses are absolute. */
116struct FlatRange {
117 MemoryRegion *mr;
118 target_phys_addr_t offset_in_region;
119 AddrRange addr;
5a583347 120 uint8_t dirty_log_mask;
093bc2cd
AK
121};
122
123/* Flattened global view of current active memory hierarchy. Kept in sorted
124 * order.
125 */
126struct FlatView {
127 FlatRange *ranges;
128 unsigned nr;
129 unsigned nr_allocated;
130};
131
cc31e6e7
AK
132typedef struct AddressSpace AddressSpace;
133typedef struct AddressSpaceOps AddressSpaceOps;
134
135/* A system address space - I/O, memory, etc. */
136struct AddressSpace {
137 const AddressSpaceOps *ops;
138 MemoryRegion *root;
139 FlatView current_map;
3e9d69e7
AK
140 int ioeventfd_nb;
141 MemoryRegionIoeventfd *ioeventfds;
cc31e6e7
AK
142};
143
144struct AddressSpaceOps {
145 void (*range_add)(AddressSpace *as, FlatRange *fr);
146 void (*range_del)(AddressSpace *as, FlatRange *fr);
147 void (*log_start)(AddressSpace *as, FlatRange *fr);
148 void (*log_stop)(AddressSpace *as, FlatRange *fr);
3e9d69e7
AK
149 void (*ioeventfd_add)(AddressSpace *as, MemoryRegionIoeventfd *fd);
150 void (*ioeventfd_del)(AddressSpace *as, MemoryRegionIoeventfd *fd);
cc31e6e7
AK
151};
152
093bc2cd
AK
153#define FOR_EACH_FLAT_RANGE(var, view) \
154 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
155
093bc2cd
AK
156static bool flatrange_equal(FlatRange *a, FlatRange *b)
157{
158 return a->mr == b->mr
159 && addrrange_equal(a->addr, b->addr)
160 && a->offset_in_region == b->offset_in_region;
161}
162
163static void flatview_init(FlatView *view)
164{
165 view->ranges = NULL;
166 view->nr = 0;
167 view->nr_allocated = 0;
168}
169
170/* Insert a range into a given position. Caller is responsible for maintaining
171 * sorting order.
172 */
173static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
174{
175 if (view->nr == view->nr_allocated) {
176 view->nr_allocated = MAX(2 * view->nr, 10);
177 view->ranges = qemu_realloc(view->ranges,
178 view->nr_allocated * sizeof(*view->ranges));
179 }
180 memmove(view->ranges + pos + 1, view->ranges + pos,
181 (view->nr - pos) * sizeof(FlatRange));
182 view->ranges[pos] = *range;
183 ++view->nr;
184}
185
186static void flatview_destroy(FlatView *view)
187{
188 qemu_free(view->ranges);
189}
190
3d8e6bf9
AK
191static bool can_merge(FlatRange *r1, FlatRange *r2)
192{
193 return addrrange_end(r1->addr) == r2->addr.start
194 && r1->mr == r2->mr
195 && r1->offset_in_region + r1->addr.size == r2->offset_in_region
196 && r1->dirty_log_mask == r2->dirty_log_mask;
197}
198
199/* Attempt to simplify a view by merging ajacent ranges */
200static void flatview_simplify(FlatView *view)
201{
202 unsigned i, j;
203
204 i = 0;
205 while (i < view->nr) {
206 j = i + 1;
207 while (j < view->nr
208 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
209 view->ranges[i].addr.size += view->ranges[j].addr.size;
210 ++j;
211 }
212 ++i;
213 memmove(&view->ranges[i], &view->ranges[j],
214 (view->nr - j) * sizeof(view->ranges[j]));
215 view->nr -= j - i;
216 }
217}
218
16ef61c9
AK
219static void memory_region_prepare_ram_addr(MemoryRegion *mr);
220
cc31e6e7
AK
221static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
222{
223 ram_addr_t phys_offset, region_offset;
224
16ef61c9
AK
225 memory_region_prepare_ram_addr(fr->mr);
226
cc31e6e7
AK
227 phys_offset = fr->mr->ram_addr;
228 region_offset = fr->offset_in_region;
229 /* cpu_register_physical_memory_log() wants region_offset for
230 * mmio, but prefers offseting phys_offset for RAM. Humour it.
231 */
232 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
233 phys_offset += region_offset;
234 region_offset = 0;
235 }
236
237 cpu_register_physical_memory_log(fr->addr.start,
238 fr->addr.size,
239 phys_offset,
240 region_offset,
241 fr->dirty_log_mask);
242}
243
244static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
245{
246 cpu_register_physical_memory(fr->addr.start, fr->addr.size,
247 IO_MEM_UNASSIGNED);
248}
249
250static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
251{
252 cpu_physical_log_start(fr->addr.start, fr->addr.size);
253}
254
255static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
256{
257 cpu_physical_log_stop(fr->addr.start, fr->addr.size);
258}
259
3e9d69e7
AK
260static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
261{
262 int r;
263
264 assert(fd->match_data && fd->addr.size == 4);
265
266 r = kvm_set_ioeventfd_mmio_long(fd->fd, fd->addr.start, fd->data, true);
267 if (r < 0) {
268 abort();
269 }
270}
271
272static void as_memory_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
273{
274 int r;
275
276 r = kvm_set_ioeventfd_mmio_long(fd->fd, fd->addr.start, fd->data, false);
277 if (r < 0) {
278 abort();
279 }
280}
281
cc31e6e7
AK
282static const AddressSpaceOps address_space_ops_memory = {
283 .range_add = as_memory_range_add,
284 .range_del = as_memory_range_del,
285 .log_start = as_memory_log_start,
286 .log_stop = as_memory_log_stop,
3e9d69e7
AK
287 .ioeventfd_add = as_memory_ioeventfd_add,
288 .ioeventfd_del = as_memory_ioeventfd_del,
cc31e6e7
AK
289};
290
291static AddressSpace address_space_memory = {
292 .ops = &address_space_ops_memory,
293};
294
627a0e90
AK
295static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset,
296 unsigned width, bool write)
297{
298 const MemoryRegionPortio *mrp;
299
300 for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
301 if (offset >= mrp->offset && offset < mrp->offset + mrp->len
302 && width == mrp->size
303 && (write ? (bool)mrp->write : (bool)mrp->read)) {
304 return mrp;
305 }
306 }
307 return NULL;
308}
309
658b2224
AK
310static void memory_region_iorange_read(IORange *iorange,
311 uint64_t offset,
312 unsigned width,
313 uint64_t *data)
314{
315 MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
316
627a0e90
AK
317 if (mr->ops->old_portio) {
318 const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false);
319
320 *data = ((uint64_t)1 << (width * 8)) - 1;
321 if (mrp) {
322 *data = mrp->read(mr->opaque, offset - mrp->offset);
323 }
324 return;
325 }
658b2224
AK
326 *data = mr->ops->read(mr->opaque, offset, width);
327}
328
329static void memory_region_iorange_write(IORange *iorange,
330 uint64_t offset,
331 unsigned width,
332 uint64_t data)
333{
334 MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
335
627a0e90
AK
336 if (mr->ops->old_portio) {
337 const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true);
338
339 if (mrp) {
340 mrp->write(mr->opaque, offset - mrp->offset, data);
341 }
342 return;
343 }
658b2224
AK
344 mr->ops->write(mr->opaque, offset, data, width);
345}
346
347static const IORangeOps memory_region_iorange_ops = {
348 .read = memory_region_iorange_read,
349 .write = memory_region_iorange_write,
350};
351
352static void as_io_range_add(AddressSpace *as, FlatRange *fr)
353{
354 iorange_init(&fr->mr->iorange, &memory_region_iorange_ops,
355 fr->addr.start,fr->addr.size);
356 ioport_register(&fr->mr->iorange);
357}
358
359static void as_io_range_del(AddressSpace *as, FlatRange *fr)
360{
361 isa_unassign_ioport(fr->addr.start, fr->addr.size);
362}
363
3e9d69e7
AK
364static void as_io_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
365{
366 int r;
367
368 assert(fd->match_data && fd->addr.size == 2);
369
370 r = kvm_set_ioeventfd_pio_word(fd->fd, fd->addr.start, fd->data, true);
371 if (r < 0) {
372 abort();
373 }
374}
375
376static void as_io_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
377{
378 int r;
379
380 r = kvm_set_ioeventfd_pio_word(fd->fd, fd->addr.start, fd->data, false);
381 if (r < 0) {
382 abort();
383 }
384}
385
658b2224
AK
386static const AddressSpaceOps address_space_ops_io = {
387 .range_add = as_io_range_add,
388 .range_del = as_io_range_del,
3e9d69e7
AK
389 .ioeventfd_add = as_io_ioeventfd_add,
390 .ioeventfd_del = as_io_ioeventfd_del,
658b2224
AK
391};
392
393static AddressSpace address_space_io = {
394 .ops = &address_space_ops_io,
395};
396
093bc2cd
AK
397/* Render a memory region into the global view. Ranges in @view obscure
398 * ranges in @mr.
399 */
400static void render_memory_region(FlatView *view,
401 MemoryRegion *mr,
402 target_phys_addr_t base,
403 AddrRange clip)
404{
405 MemoryRegion *subregion;
406 unsigned i;
407 target_phys_addr_t offset_in_region;
408 uint64_t remain;
409 uint64_t now;
410 FlatRange fr;
411 AddrRange tmp;
412
413 base += mr->addr;
414
415 tmp = addrrange_make(base, mr->size);
416
417 if (!addrrange_intersects(tmp, clip)) {
418 return;
419 }
420
421 clip = addrrange_intersection(tmp, clip);
422
423 if (mr->alias) {
424 base -= mr->alias->addr;
425 base -= mr->alias_offset;
426 render_memory_region(view, mr->alias, base, clip);
427 return;
428 }
429
430 /* Render subregions in priority order. */
431 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
432 render_memory_region(view, subregion, base, clip);
433 }
434
14a3c10a 435 if (!mr->terminates) {
093bc2cd
AK
436 return;
437 }
438
439 offset_in_region = clip.start - base;
440 base = clip.start;
441 remain = clip.size;
442
443 /* Render the region itself into any gaps left by the current view. */
444 for (i = 0; i < view->nr && remain; ++i) {
445 if (base >= addrrange_end(view->ranges[i].addr)) {
446 continue;
447 }
448 if (base < view->ranges[i].addr.start) {
449 now = MIN(remain, view->ranges[i].addr.start - base);
450 fr.mr = mr;
451 fr.offset_in_region = offset_in_region;
452 fr.addr = addrrange_make(base, now);
5a583347 453 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
454 flatview_insert(view, i, &fr);
455 ++i;
456 base += now;
457 offset_in_region += now;
458 remain -= now;
459 }
460 if (base == view->ranges[i].addr.start) {
461 now = MIN(remain, view->ranges[i].addr.size);
462 base += now;
463 offset_in_region += now;
464 remain -= now;
465 }
466 }
467 if (remain) {
468 fr.mr = mr;
469 fr.offset_in_region = offset_in_region;
470 fr.addr = addrrange_make(base, remain);
5a583347 471 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
472 flatview_insert(view, i, &fr);
473 }
474}
475
476/* Render a memory topology into a list of disjoint absolute ranges. */
477static FlatView generate_memory_topology(MemoryRegion *mr)
478{
479 FlatView view;
480
481 flatview_init(&view);
482
483 render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX));
3d8e6bf9 484 flatview_simplify(&view);
093bc2cd
AK
485
486 return view;
487}
488
3e9d69e7
AK
489static void address_space_add_del_ioeventfds(AddressSpace *as,
490 MemoryRegionIoeventfd *fds_new,
491 unsigned fds_new_nb,
492 MemoryRegionIoeventfd *fds_old,
493 unsigned fds_old_nb)
494{
495 unsigned iold, inew;
496
497 /* Generate a symmetric difference of the old and new fd sets, adding
498 * and deleting as necessary.
499 */
500
501 iold = inew = 0;
502 while (iold < fds_old_nb || inew < fds_new_nb) {
503 if (iold < fds_old_nb
504 && (inew == fds_new_nb
505 || memory_region_ioeventfd_before(fds_old[iold],
506 fds_new[inew]))) {
507 as->ops->ioeventfd_del(as, &fds_old[iold]);
508 ++iold;
509 } else if (inew < fds_new_nb
510 && (iold == fds_old_nb
511 || memory_region_ioeventfd_before(fds_new[inew],
512 fds_old[iold]))) {
513 as->ops->ioeventfd_add(as, &fds_new[inew]);
514 ++inew;
515 } else {
516 ++iold;
517 ++inew;
518 }
519 }
520}
521
522static void address_space_update_ioeventfds(AddressSpace *as)
523{
524 FlatRange *fr;
525 unsigned ioeventfd_nb = 0;
526 MemoryRegionIoeventfd *ioeventfds = NULL;
527 AddrRange tmp;
528 unsigned i;
529
530 FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
531 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
532 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
533 fr->addr.start - fr->offset_in_region);
534 if (addrrange_intersects(fr->addr, tmp)) {
535 ++ioeventfd_nb;
536 ioeventfds = qemu_realloc(ioeventfds,
537 ioeventfd_nb * sizeof(*ioeventfds));
538 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
539 ioeventfds[ioeventfd_nb-1].addr = tmp;
540 }
541 }
542 }
543
544 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
545 as->ioeventfds, as->ioeventfd_nb);
546
547 qemu_free(as->ioeventfds);
548 as->ioeventfds = ioeventfds;
549 as->ioeventfd_nb = ioeventfd_nb;
550}
551
b8af1afb
AK
552static void address_space_update_topology_pass(AddressSpace *as,
553 FlatView old_view,
554 FlatView new_view,
555 bool adding)
093bc2cd 556{
093bc2cd
AK
557 unsigned iold, inew;
558 FlatRange *frold, *frnew;
093bc2cd
AK
559
560 /* Generate a symmetric difference of the old and new memory maps.
561 * Kill ranges in the old map, and instantiate ranges in the new map.
562 */
563 iold = inew = 0;
564 while (iold < old_view.nr || inew < new_view.nr) {
565 if (iold < old_view.nr) {
566 frold = &old_view.ranges[iold];
567 } else {
568 frold = NULL;
569 }
570 if (inew < new_view.nr) {
571 frnew = &new_view.ranges[inew];
572 } else {
573 frnew = NULL;
574 }
575
576 if (frold
577 && (!frnew
578 || frold->addr.start < frnew->addr.start
579 || (frold->addr.start == frnew->addr.start
580 && !flatrange_equal(frold, frnew)))) {
581 /* In old, but (not in new, or in new but attributes changed). */
582
b8af1afb
AK
583 if (!adding) {
584 as->ops->range_del(as, frold);
585 }
586
093bc2cd
AK
587 ++iold;
588 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
589 /* In both (logging may have changed) */
590
b8af1afb
AK
591 if (adding) {
592 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
593 as->ops->log_stop(as, frnew);
594 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
595 as->ops->log_start(as, frnew);
596 }
5a583347
AK
597 }
598
093bc2cd
AK
599 ++iold;
600 ++inew;
093bc2cd
AK
601 } else {
602 /* In new */
603
b8af1afb
AK
604 if (adding) {
605 as->ops->range_add(as, frnew);
606 }
607
093bc2cd
AK
608 ++inew;
609 }
610 }
b8af1afb
AK
611}
612
613
614static void address_space_update_topology(AddressSpace *as)
615{
616 FlatView old_view = as->current_map;
617 FlatView new_view = generate_memory_topology(as->root);
618
619 address_space_update_topology_pass(as, old_view, new_view, false);
620 address_space_update_topology_pass(as, old_view, new_view, true);
621
cc31e6e7 622 as->current_map = new_view;
093bc2cd 623 flatview_destroy(&old_view);
3e9d69e7 624 address_space_update_ioeventfds(as);
093bc2cd
AK
625}
626
cc31e6e7
AK
627static void memory_region_update_topology(void)
628{
658b2224
AK
629 if (address_space_memory.root) {
630 address_space_update_topology(&address_space_memory);
631 }
632 if (address_space_io.root) {
633 address_space_update_topology(&address_space_io);
634 }
cc31e6e7
AK
635}
636
093bc2cd
AK
637void memory_region_init(MemoryRegion *mr,
638 const char *name,
639 uint64_t size)
640{
641 mr->ops = NULL;
642 mr->parent = NULL;
643 mr->size = size;
644 mr->addr = 0;
645 mr->offset = 0;
14a3c10a 646 mr->terminates = false;
093bc2cd
AK
647 mr->priority = 0;
648 mr->may_overlap = false;
649 mr->alias = NULL;
650 QTAILQ_INIT(&mr->subregions);
651 memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
652 QTAILQ_INIT(&mr->coalesced);
653 mr->name = qemu_strdup(name);
5a583347 654 mr->dirty_log_mask = 0;
3e9d69e7
AK
655 mr->ioeventfd_nb = 0;
656 mr->ioeventfds = NULL;
093bc2cd
AK
657}
658
659static bool memory_region_access_valid(MemoryRegion *mr,
660 target_phys_addr_t addr,
661 unsigned size)
662{
663 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
664 return false;
665 }
666
667 /* Treat zero as compatibility all valid */
668 if (!mr->ops->valid.max_access_size) {
669 return true;
670 }
671
672 if (size > mr->ops->valid.max_access_size
673 || size < mr->ops->valid.min_access_size) {
674 return false;
675 }
676 return true;
677}
678
679static uint32_t memory_region_read_thunk_n(void *_mr,
680 target_phys_addr_t addr,
681 unsigned size)
682{
683 MemoryRegion *mr = _mr;
684 unsigned access_size, access_size_min, access_size_max;
685 uint64_t access_mask;
686 uint32_t data = 0, tmp;
687 unsigned i;
688
689 if (!memory_region_access_valid(mr, addr, size)) {
690 return -1U; /* FIXME: better signalling */
691 }
692
74901c3b
AK
693 if (!mr->ops->read) {
694 return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
695 }
696
093bc2cd
AK
697 /* FIXME: support unaligned access */
698
699 access_size_min = mr->ops->impl.min_access_size;
700 if (!access_size_min) {
701 access_size_min = 1;
702 }
703 access_size_max = mr->ops->impl.max_access_size;
704 if (!access_size_max) {
705 access_size_max = 4;
706 }
707 access_size = MAX(MIN(size, access_size_max), access_size_min);
708 access_mask = -1ULL >> (64 - access_size * 8);
709 addr += mr->offset;
710 for (i = 0; i < size; i += access_size) {
711 /* FIXME: big-endian support */
712 tmp = mr->ops->read(mr->opaque, addr + i, access_size);
713 data |= (tmp & access_mask) << (i * 8);
714 }
715
716 return data;
717}
718
719static void memory_region_write_thunk_n(void *_mr,
720 target_phys_addr_t addr,
721 unsigned size,
722 uint64_t data)
723{
724 MemoryRegion *mr = _mr;
725 unsigned access_size, access_size_min, access_size_max;
726 uint64_t access_mask;
727 unsigned i;
728
729 if (!memory_region_access_valid(mr, addr, size)) {
730 return; /* FIXME: better signalling */
731 }
732
74901c3b
AK
733 if (!mr->ops->write) {
734 mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data);
735 return;
736 }
737
093bc2cd
AK
738 /* FIXME: support unaligned access */
739
740 access_size_min = mr->ops->impl.min_access_size;
741 if (!access_size_min) {
742 access_size_min = 1;
743 }
744 access_size_max = mr->ops->impl.max_access_size;
745 if (!access_size_max) {
746 access_size_max = 4;
747 }
748 access_size = MAX(MIN(size, access_size_max), access_size_min);
749 access_mask = -1ULL >> (64 - access_size * 8);
750 addr += mr->offset;
751 for (i = 0; i < size; i += access_size) {
752 /* FIXME: big-endian support */
753 mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask,
754 access_size);
755 }
756}
757
758static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
759{
760 return memory_region_read_thunk_n(mr, addr, 1);
761}
762
763static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
764{
765 return memory_region_read_thunk_n(mr, addr, 2);
766}
767
768static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
769{
770 return memory_region_read_thunk_n(mr, addr, 4);
771}
772
773static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
774 uint32_t data)
775{
776 memory_region_write_thunk_n(mr, addr, 1, data);
777}
778
779static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
780 uint32_t data)
781{
782 memory_region_write_thunk_n(mr, addr, 2, data);
783}
784
785static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
786 uint32_t data)
787{
788 memory_region_write_thunk_n(mr, addr, 4, data);
789}
790
791static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
792 memory_region_read_thunk_b,
793 memory_region_read_thunk_w,
794 memory_region_read_thunk_l,
795};
796
797static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
798 memory_region_write_thunk_b,
799 memory_region_write_thunk_w,
800 memory_region_write_thunk_l,
801};
802
16ef61c9
AK
803static void memory_region_prepare_ram_addr(MemoryRegion *mr)
804{
805 if (mr->backend_registered) {
806 return;
807 }
808
809 mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
810 memory_region_write_thunk,
811 mr,
812 mr->ops->endianness);
813 mr->backend_registered = true;
814}
815
093bc2cd
AK
816void memory_region_init_io(MemoryRegion *mr,
817 const MemoryRegionOps *ops,
818 void *opaque,
819 const char *name,
820 uint64_t size)
821{
822 memory_region_init(mr, name, size);
823 mr->ops = ops;
824 mr->opaque = opaque;
14a3c10a 825 mr->terminates = true;
16ef61c9 826 mr->backend_registered = false;
093bc2cd
AK
827}
828
829void memory_region_init_ram(MemoryRegion *mr,
830 DeviceState *dev,
831 const char *name,
832 uint64_t size)
833{
834 memory_region_init(mr, name, size);
14a3c10a 835 mr->terminates = true;
093bc2cd 836 mr->ram_addr = qemu_ram_alloc(dev, name, size);
16ef61c9 837 mr->backend_registered = true;
093bc2cd
AK
838}
839
840void memory_region_init_ram_ptr(MemoryRegion *mr,
841 DeviceState *dev,
842 const char *name,
843 uint64_t size,
844 void *ptr)
845{
846 memory_region_init(mr, name, size);
14a3c10a 847 mr->terminates = true;
093bc2cd 848 mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
16ef61c9 849 mr->backend_registered = true;
093bc2cd
AK
850}
851
852void memory_region_init_alias(MemoryRegion *mr,
853 const char *name,
854 MemoryRegion *orig,
855 target_phys_addr_t offset,
856 uint64_t size)
857{
858 memory_region_init(mr, name, size);
859 mr->alias = orig;
860 mr->alias_offset = offset;
861}
862
863void memory_region_destroy(MemoryRegion *mr)
864{
865 assert(QTAILQ_EMPTY(&mr->subregions));
866 memory_region_clear_coalescing(mr);
867 qemu_free((char *)mr->name);
3e9d69e7 868 qemu_free(mr->ioeventfds);
093bc2cd
AK
869}
870
871uint64_t memory_region_size(MemoryRegion *mr)
872{
873 return mr->size;
874}
875
876void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
877{
878 mr->offset = offset;
879}
880
881void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
882{
5a583347
AK
883 uint8_t mask = 1 << client;
884
885 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
886 memory_region_update_topology();
093bc2cd
AK
887}
888
889bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
890 unsigned client)
891{
14a3c10a 892 assert(mr->terminates);
5a583347 893 return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
093bc2cd
AK
894}
895
896void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
897{
14a3c10a 898 assert(mr->terminates);
5a583347 899 return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
093bc2cd
AK
900}
901
902void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
903{
5a583347
AK
904 FlatRange *fr;
905
cc31e6e7 906 FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
5a583347
AK
907 if (fr->mr == mr) {
908 cpu_physical_sync_dirty_bitmap(fr->addr.start,
909 fr->addr.start + fr->addr.size);
910 }
911 }
093bc2cd
AK
912}
913
914void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
915{
916 /* FIXME */
917}
918
919void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
920 target_phys_addr_t size, unsigned client)
921{
14a3c10a 922 assert(mr->terminates);
5a583347
AK
923 cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
924 mr->ram_addr + addr + size,
925 1 << client);
093bc2cd
AK
926}
927
928void *memory_region_get_ram_ptr(MemoryRegion *mr)
929{
930 if (mr->alias) {
931 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
932 }
933
14a3c10a 934 assert(mr->terminates);
093bc2cd
AK
935
936 return qemu_get_ram_ptr(mr->ram_addr);
937}
938
939static void memory_region_update_coalesced_range(MemoryRegion *mr)
940{
941 FlatRange *fr;
942 CoalescedMemoryRange *cmr;
943 AddrRange tmp;
944
cc31e6e7 945 FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
093bc2cd
AK
946 if (fr->mr == mr) {
947 qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size);
948 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
949 tmp = addrrange_shift(cmr->addr,
950 fr->addr.start - fr->offset_in_region);
951 if (!addrrange_intersects(tmp, fr->addr)) {
952 continue;
953 }
954 tmp = addrrange_intersection(tmp, fr->addr);
955 qemu_register_coalesced_mmio(tmp.start, tmp.size);
956 }
957 }
958 }
959}
960
961void memory_region_set_coalescing(MemoryRegion *mr)
962{
963 memory_region_clear_coalescing(mr);
964 memory_region_add_coalescing(mr, 0, mr->size);
965}
966
967void memory_region_add_coalescing(MemoryRegion *mr,
968 target_phys_addr_t offset,
969 uint64_t size)
970{
971 CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr));
972
973 cmr->addr = addrrange_make(offset, size);
974 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
975 memory_region_update_coalesced_range(mr);
976}
977
978void memory_region_clear_coalescing(MemoryRegion *mr)
979{
980 CoalescedMemoryRange *cmr;
981
982 while (!QTAILQ_EMPTY(&mr->coalesced)) {
983 cmr = QTAILQ_FIRST(&mr->coalesced);
984 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
985 qemu_free(cmr);
986 }
987 memory_region_update_coalesced_range(mr);
988}
989
3e9d69e7
AK
990void memory_region_add_eventfd(MemoryRegion *mr,
991 target_phys_addr_t addr,
992 unsigned size,
993 bool match_data,
994 uint64_t data,
995 int fd)
996{
997 MemoryRegionIoeventfd mrfd = {
998 .addr.start = addr,
999 .addr.size = size,
1000 .match_data = match_data,
1001 .data = data,
1002 .fd = fd,
1003 };
1004 unsigned i;
1005
1006 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1007 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
1008 break;
1009 }
1010 }
1011 ++mr->ioeventfd_nb;
1012 mr->ioeventfds = qemu_realloc(mr->ioeventfds,
1013 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
1014 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
1015 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
1016 mr->ioeventfds[i] = mrfd;
1017 memory_region_update_topology();
1018}
1019
1020void memory_region_del_eventfd(MemoryRegion *mr,
1021 target_phys_addr_t addr,
1022 unsigned size,
1023 bool match_data,
1024 uint64_t data,
1025 int fd)
1026{
1027 MemoryRegionIoeventfd mrfd = {
1028 .addr.start = addr,
1029 .addr.size = size,
1030 .match_data = match_data,
1031 .data = data,
1032 .fd = fd,
1033 };
1034 unsigned i;
1035
1036 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1037 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
1038 break;
1039 }
1040 }
1041 assert(i != mr->ioeventfd_nb);
1042 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
1043 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
1044 --mr->ioeventfd_nb;
1045 mr->ioeventfds = qemu_realloc(mr->ioeventfds,
1046 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
1047 memory_region_update_topology();
1048}
1049
093bc2cd
AK
1050static void memory_region_add_subregion_common(MemoryRegion *mr,
1051 target_phys_addr_t offset,
1052 MemoryRegion *subregion)
1053{
1054 MemoryRegion *other;
1055
1056 assert(!subregion->parent);
1057 subregion->parent = mr;
1058 subregion->addr = offset;
1059 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1060 if (subregion->may_overlap || other->may_overlap) {
1061 continue;
1062 }
1063 if (offset >= other->offset + other->size
1064 || offset + subregion->size <= other->offset) {
1065 continue;
1066 }
1067 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
1068 (unsigned long long)offset,
1069 (unsigned long long)subregion->size,
1070 (unsigned long long)other->offset,
1071 (unsigned long long)other->size);
1072 }
1073 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1074 if (subregion->priority >= other->priority) {
1075 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1076 goto done;
1077 }
1078 }
1079 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1080done:
1081 memory_region_update_topology();
1082}
1083
1084
1085void memory_region_add_subregion(MemoryRegion *mr,
1086 target_phys_addr_t offset,
1087 MemoryRegion *subregion)
1088{
1089 subregion->may_overlap = false;
1090 subregion->priority = 0;
1091 memory_region_add_subregion_common(mr, offset, subregion);
1092}
1093
1094void memory_region_add_subregion_overlap(MemoryRegion *mr,
1095 target_phys_addr_t offset,
1096 MemoryRegion *subregion,
1097 unsigned priority)
1098{
1099 subregion->may_overlap = true;
1100 subregion->priority = priority;
1101 memory_region_add_subregion_common(mr, offset, subregion);
1102}
1103
1104void memory_region_del_subregion(MemoryRegion *mr,
1105 MemoryRegion *subregion)
1106{
1107 assert(subregion->parent == mr);
1108 subregion->parent = NULL;
1109 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
1110 memory_region_update_topology();
1111}
1c0ffa58
AK
1112
1113void set_system_memory_map(MemoryRegion *mr)
1114{
cc31e6e7 1115 address_space_memory.root = mr;
1c0ffa58
AK
1116 memory_region_update_topology();
1117}
658b2224
AK
1118
1119void set_system_io_map(MemoryRegion *mr)
1120{
1121 address_space_io.root = mr;
1122 memory_region_update_topology();
1123}