]> git.proxmox.com Git - mirror_qemu.git/blame - memory.c
memory: add ioeventfd support
[mirror_qemu.git] / memory.c
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#include "memory.h"
1c0ffa58 15#include "exec-memory.h"
658b2224 16#include "ioport.h"
74901c3b 17#include "bitops.h"
3e9d69e7 18#include "kvm.h"
093bc2cd
AK
19#include <assert.h>
20
21typedef struct AddrRange AddrRange;
22
23struct AddrRange {
24 uint64_t start;
25 uint64_t size;
26};
27
28static AddrRange addrrange_make(uint64_t start, uint64_t size)
29{
30 return (AddrRange) { start, size };
31}
32
33static bool addrrange_equal(AddrRange r1, AddrRange r2)
34{
35 return r1.start == r2.start && r1.size == r2.size;
36}
37
38static uint64_t addrrange_end(AddrRange r)
39{
40 return r.start + r.size;
41}
42
43static AddrRange addrrange_shift(AddrRange range, int64_t delta)
44{
45 range.start += delta;
46 return range;
47}
48
49static bool addrrange_intersects(AddrRange r1, AddrRange r2)
50{
51 return (r1.start >= r2.start && r1.start < r2.start + r2.size)
52 || (r2.start >= r1.start && r2.start < r1.start + r1.size);
53}
54
55static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
56{
57 uint64_t start = MAX(r1.start, r2.start);
58 /* off-by-one arithmetic to prevent overflow */
59 uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1);
60 return addrrange_make(start, end - start + 1);
61}
62
63struct CoalescedMemoryRange {
64 AddrRange addr;
65 QTAILQ_ENTRY(CoalescedMemoryRange) link;
66};
67
3e9d69e7
AK
68struct MemoryRegionIoeventfd {
69 AddrRange addr;
70 bool match_data;
71 uint64_t data;
72 int fd;
73};
74
75static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
76 MemoryRegionIoeventfd b)
77{
78 if (a.addr.start < b.addr.start) {
79 return true;
80 } else if (a.addr.start > b.addr.start) {
81 return false;
82 } else if (a.addr.size < b.addr.size) {
83 return true;
84 } else if (a.addr.size > b.addr.size) {
85 return false;
86 } else if (a.match_data < b.match_data) {
87 return true;
88 } else if (a.match_data > b.match_data) {
89 return false;
90 } else if (a.match_data) {
91 if (a.data < b.data) {
92 return true;
93 } else if (a.data > b.data) {
94 return false;
95 }
96 }
97 if (a.fd < b.fd) {
98 return true;
99 } else if (a.fd > b.fd) {
100 return false;
101 }
102 return false;
103}
104
105static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
106 MemoryRegionIoeventfd b)
107{
108 return !memory_region_ioeventfd_before(a, b)
109 && !memory_region_ioeventfd_before(b, a);
110}
111
093bc2cd
AK
112typedef struct FlatRange FlatRange;
113typedef struct FlatView FlatView;
114
115/* Range of memory in the global map. Addresses are absolute. */
116struct FlatRange {
117 MemoryRegion *mr;
118 target_phys_addr_t offset_in_region;
119 AddrRange addr;
5a583347 120 uint8_t dirty_log_mask;
093bc2cd
AK
121};
122
123/* Flattened global view of current active memory hierarchy. Kept in sorted
124 * order.
125 */
126struct FlatView {
127 FlatRange *ranges;
128 unsigned nr;
129 unsigned nr_allocated;
130};
131
cc31e6e7
AK
132typedef struct AddressSpace AddressSpace;
133typedef struct AddressSpaceOps AddressSpaceOps;
134
135/* A system address space - I/O, memory, etc. */
136struct AddressSpace {
137 const AddressSpaceOps *ops;
138 MemoryRegion *root;
139 FlatView current_map;
3e9d69e7
AK
140 int ioeventfd_nb;
141 MemoryRegionIoeventfd *ioeventfds;
cc31e6e7
AK
142};
143
144struct AddressSpaceOps {
145 void (*range_add)(AddressSpace *as, FlatRange *fr);
146 void (*range_del)(AddressSpace *as, FlatRange *fr);
147 void (*log_start)(AddressSpace *as, FlatRange *fr);
148 void (*log_stop)(AddressSpace *as, FlatRange *fr);
3e9d69e7
AK
149 void (*ioeventfd_add)(AddressSpace *as, MemoryRegionIoeventfd *fd);
150 void (*ioeventfd_del)(AddressSpace *as, MemoryRegionIoeventfd *fd);
cc31e6e7
AK
151};
152
093bc2cd
AK
153#define FOR_EACH_FLAT_RANGE(var, view) \
154 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
155
093bc2cd
AK
156static bool flatrange_equal(FlatRange *a, FlatRange *b)
157{
158 return a->mr == b->mr
159 && addrrange_equal(a->addr, b->addr)
160 && a->offset_in_region == b->offset_in_region;
161}
162
163static void flatview_init(FlatView *view)
164{
165 view->ranges = NULL;
166 view->nr = 0;
167 view->nr_allocated = 0;
168}
169
170/* Insert a range into a given position. Caller is responsible for maintaining
171 * sorting order.
172 */
173static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
174{
175 if (view->nr == view->nr_allocated) {
176 view->nr_allocated = MAX(2 * view->nr, 10);
177 view->ranges = qemu_realloc(view->ranges,
178 view->nr_allocated * sizeof(*view->ranges));
179 }
180 memmove(view->ranges + pos + 1, view->ranges + pos,
181 (view->nr - pos) * sizeof(FlatRange));
182 view->ranges[pos] = *range;
183 ++view->nr;
184}
185
186static void flatview_destroy(FlatView *view)
187{
188 qemu_free(view->ranges);
189}
190
3d8e6bf9
AK
191static bool can_merge(FlatRange *r1, FlatRange *r2)
192{
193 return addrrange_end(r1->addr) == r2->addr.start
194 && r1->mr == r2->mr
195 && r1->offset_in_region + r1->addr.size == r2->offset_in_region
196 && r1->dirty_log_mask == r2->dirty_log_mask;
197}
198
199/* Attempt to simplify a view by merging ajacent ranges */
200static void flatview_simplify(FlatView *view)
201{
202 unsigned i, j;
203
204 i = 0;
205 while (i < view->nr) {
206 j = i + 1;
207 while (j < view->nr
208 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
209 view->ranges[i].addr.size += view->ranges[j].addr.size;
210 ++j;
211 }
212 ++i;
213 memmove(&view->ranges[i], &view->ranges[j],
214 (view->nr - j) * sizeof(view->ranges[j]));
215 view->nr -= j - i;
216 }
217}
218
16ef61c9
AK
219static void memory_region_prepare_ram_addr(MemoryRegion *mr);
220
cc31e6e7
AK
221static void as_memory_range_add(AddressSpace *as, FlatRange *fr)
222{
223 ram_addr_t phys_offset, region_offset;
224
16ef61c9
AK
225 memory_region_prepare_ram_addr(fr->mr);
226
cc31e6e7
AK
227 phys_offset = fr->mr->ram_addr;
228 region_offset = fr->offset_in_region;
229 /* cpu_register_physical_memory_log() wants region_offset for
230 * mmio, but prefers offseting phys_offset for RAM. Humour it.
231 */
232 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
233 phys_offset += region_offset;
234 region_offset = 0;
235 }
236
237 cpu_register_physical_memory_log(fr->addr.start,
238 fr->addr.size,
239 phys_offset,
240 region_offset,
241 fr->dirty_log_mask);
242}
243
244static void as_memory_range_del(AddressSpace *as, FlatRange *fr)
245{
246 cpu_register_physical_memory(fr->addr.start, fr->addr.size,
247 IO_MEM_UNASSIGNED);
248}
249
250static void as_memory_log_start(AddressSpace *as, FlatRange *fr)
251{
252 cpu_physical_log_start(fr->addr.start, fr->addr.size);
253}
254
255static void as_memory_log_stop(AddressSpace *as, FlatRange *fr)
256{
257 cpu_physical_log_stop(fr->addr.start, fr->addr.size);
258}
259
3e9d69e7
AK
260static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
261{
262 int r;
263
264 assert(fd->match_data && fd->addr.size == 4);
265
266 r = kvm_set_ioeventfd_mmio_long(fd->fd, fd->addr.start, fd->data, true);
267 if (r < 0) {
268 abort();
269 }
270}
271
272static void as_memory_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
273{
274 int r;
275
276 r = kvm_set_ioeventfd_mmio_long(fd->fd, fd->addr.start, fd->data, false);
277 if (r < 0) {
278 abort();
279 }
280}
281
cc31e6e7
AK
282static const AddressSpaceOps address_space_ops_memory = {
283 .range_add = as_memory_range_add,
284 .range_del = as_memory_range_del,
285 .log_start = as_memory_log_start,
286 .log_stop = as_memory_log_stop,
3e9d69e7
AK
287 .ioeventfd_add = as_memory_ioeventfd_add,
288 .ioeventfd_del = as_memory_ioeventfd_del,
cc31e6e7
AK
289};
290
291static AddressSpace address_space_memory = {
292 .ops = &address_space_ops_memory,
293};
294
627a0e90
AK
295static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset,
296 unsigned width, bool write)
297{
298 const MemoryRegionPortio *mrp;
299
300 for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
301 if (offset >= mrp->offset && offset < mrp->offset + mrp->len
302 && width == mrp->size
303 && (write ? (bool)mrp->write : (bool)mrp->read)) {
304 return mrp;
305 }
306 }
307 return NULL;
308}
309
658b2224
AK
310static void memory_region_iorange_read(IORange *iorange,
311 uint64_t offset,
312 unsigned width,
313 uint64_t *data)
314{
315 MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
316
627a0e90
AK
317 if (mr->ops->old_portio) {
318 const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false);
319
320 *data = ((uint64_t)1 << (width * 8)) - 1;
321 if (mrp) {
322 *data = mrp->read(mr->opaque, offset - mrp->offset);
323 }
324 return;
325 }
658b2224
AK
326 *data = mr->ops->read(mr->opaque, offset, width);
327}
328
329static void memory_region_iorange_write(IORange *iorange,
330 uint64_t offset,
331 unsigned width,
332 uint64_t data)
333{
334 MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange);
335
627a0e90
AK
336 if (mr->ops->old_portio) {
337 const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true);
338
339 if (mrp) {
340 mrp->write(mr->opaque, offset - mrp->offset, data);
341 }
342 return;
343 }
658b2224
AK
344 mr->ops->write(mr->opaque, offset, data, width);
345}
346
347static const IORangeOps memory_region_iorange_ops = {
348 .read = memory_region_iorange_read,
349 .write = memory_region_iorange_write,
350};
351
352static void as_io_range_add(AddressSpace *as, FlatRange *fr)
353{
354 iorange_init(&fr->mr->iorange, &memory_region_iorange_ops,
355 fr->addr.start,fr->addr.size);
356 ioport_register(&fr->mr->iorange);
357}
358
359static void as_io_range_del(AddressSpace *as, FlatRange *fr)
360{
361 isa_unassign_ioport(fr->addr.start, fr->addr.size);
362}
363
3e9d69e7
AK
364static void as_io_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd)
365{
366 int r;
367
368 assert(fd->match_data && fd->addr.size == 2);
369
370 r = kvm_set_ioeventfd_pio_word(fd->fd, fd->addr.start, fd->data, true);
371 if (r < 0) {
372 abort();
373 }
374}
375
376static void as_io_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd)
377{
378 int r;
379
380 r = kvm_set_ioeventfd_pio_word(fd->fd, fd->addr.start, fd->data, false);
381 if (r < 0) {
382 abort();
383 }
384}
385
658b2224
AK
386static const AddressSpaceOps address_space_ops_io = {
387 .range_add = as_io_range_add,
388 .range_del = as_io_range_del,
3e9d69e7
AK
389 .ioeventfd_add = as_io_ioeventfd_add,
390 .ioeventfd_del = as_io_ioeventfd_del,
658b2224
AK
391};
392
393static AddressSpace address_space_io = {
394 .ops = &address_space_ops_io,
395};
396
093bc2cd
AK
397/* Render a memory region into the global view. Ranges in @view obscure
398 * ranges in @mr.
399 */
400static void render_memory_region(FlatView *view,
401 MemoryRegion *mr,
402 target_phys_addr_t base,
403 AddrRange clip)
404{
405 MemoryRegion *subregion;
406 unsigned i;
407 target_phys_addr_t offset_in_region;
408 uint64_t remain;
409 uint64_t now;
410 FlatRange fr;
411 AddrRange tmp;
412
413 base += mr->addr;
414
415 tmp = addrrange_make(base, mr->size);
416
417 if (!addrrange_intersects(tmp, clip)) {
418 return;
419 }
420
421 clip = addrrange_intersection(tmp, clip);
422
423 if (mr->alias) {
424 base -= mr->alias->addr;
425 base -= mr->alias_offset;
426 render_memory_region(view, mr->alias, base, clip);
427 return;
428 }
429
430 /* Render subregions in priority order. */
431 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
432 render_memory_region(view, subregion, base, clip);
433 }
434
14a3c10a 435 if (!mr->terminates) {
093bc2cd
AK
436 return;
437 }
438
439 offset_in_region = clip.start - base;
440 base = clip.start;
441 remain = clip.size;
442
443 /* Render the region itself into any gaps left by the current view. */
444 for (i = 0; i < view->nr && remain; ++i) {
445 if (base >= addrrange_end(view->ranges[i].addr)) {
446 continue;
447 }
448 if (base < view->ranges[i].addr.start) {
449 now = MIN(remain, view->ranges[i].addr.start - base);
450 fr.mr = mr;
451 fr.offset_in_region = offset_in_region;
452 fr.addr = addrrange_make(base, now);
5a583347 453 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
454 flatview_insert(view, i, &fr);
455 ++i;
456 base += now;
457 offset_in_region += now;
458 remain -= now;
459 }
460 if (base == view->ranges[i].addr.start) {
461 now = MIN(remain, view->ranges[i].addr.size);
462 base += now;
463 offset_in_region += now;
464 remain -= now;
465 }
466 }
467 if (remain) {
468 fr.mr = mr;
469 fr.offset_in_region = offset_in_region;
470 fr.addr = addrrange_make(base, remain);
5a583347 471 fr.dirty_log_mask = mr->dirty_log_mask;
093bc2cd
AK
472 flatview_insert(view, i, &fr);
473 }
474}
475
476/* Render a memory topology into a list of disjoint absolute ranges. */
477static FlatView generate_memory_topology(MemoryRegion *mr)
478{
479 FlatView view;
480
481 flatview_init(&view);
482
483 render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX));
3d8e6bf9 484 flatview_simplify(&view);
093bc2cd
AK
485
486 return view;
487}
488
3e9d69e7
AK
489static void address_space_add_del_ioeventfds(AddressSpace *as,
490 MemoryRegionIoeventfd *fds_new,
491 unsigned fds_new_nb,
492 MemoryRegionIoeventfd *fds_old,
493 unsigned fds_old_nb)
494{
495 unsigned iold, inew;
496
497 /* Generate a symmetric difference of the old and new fd sets, adding
498 * and deleting as necessary.
499 */
500
501 iold = inew = 0;
502 while (iold < fds_old_nb || inew < fds_new_nb) {
503 if (iold < fds_old_nb
504 && (inew == fds_new_nb
505 || memory_region_ioeventfd_before(fds_old[iold],
506 fds_new[inew]))) {
507 as->ops->ioeventfd_del(as, &fds_old[iold]);
508 ++iold;
509 } else if (inew < fds_new_nb
510 && (iold == fds_old_nb
511 || memory_region_ioeventfd_before(fds_new[inew],
512 fds_old[iold]))) {
513 as->ops->ioeventfd_add(as, &fds_new[inew]);
514 ++inew;
515 } else {
516 ++iold;
517 ++inew;
518 }
519 }
520}
521
522static void address_space_update_ioeventfds(AddressSpace *as)
523{
524 FlatRange *fr;
525 unsigned ioeventfd_nb = 0;
526 MemoryRegionIoeventfd *ioeventfds = NULL;
527 AddrRange tmp;
528 unsigned i;
529
530 FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
531 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
532 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
533 fr->addr.start - fr->offset_in_region);
534 if (addrrange_intersects(fr->addr, tmp)) {
535 ++ioeventfd_nb;
536 ioeventfds = qemu_realloc(ioeventfds,
537 ioeventfd_nb * sizeof(*ioeventfds));
538 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
539 ioeventfds[ioeventfd_nb-1].addr = tmp;
540 }
541 }
542 }
543
544 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
545 as->ioeventfds, as->ioeventfd_nb);
546
547 qemu_free(as->ioeventfds);
548 as->ioeventfds = ioeventfds;
549 as->ioeventfd_nb = ioeventfd_nb;
550}
551
cc31e6e7 552static void address_space_update_topology(AddressSpace *as)
093bc2cd 553{
cc31e6e7
AK
554 FlatView old_view = as->current_map;
555 FlatView new_view = generate_memory_topology(as->root);
093bc2cd
AK
556 unsigned iold, inew;
557 FlatRange *frold, *frnew;
093bc2cd
AK
558
559 /* Generate a symmetric difference of the old and new memory maps.
560 * Kill ranges in the old map, and instantiate ranges in the new map.
561 */
562 iold = inew = 0;
563 while (iold < old_view.nr || inew < new_view.nr) {
564 if (iold < old_view.nr) {
565 frold = &old_view.ranges[iold];
566 } else {
567 frold = NULL;
568 }
569 if (inew < new_view.nr) {
570 frnew = &new_view.ranges[inew];
571 } else {
572 frnew = NULL;
573 }
574
575 if (frold
576 && (!frnew
577 || frold->addr.start < frnew->addr.start
578 || (frold->addr.start == frnew->addr.start
579 && !flatrange_equal(frold, frnew)))) {
580 /* In old, but (not in new, or in new but attributes changed). */
581
cc31e6e7 582 as->ops->range_del(as, frold);
093bc2cd
AK
583 ++iold;
584 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
585 /* In both (logging may have changed) */
586
5a583347 587 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
cc31e6e7 588 as->ops->log_stop(as, frnew);
5a583347 589 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
cc31e6e7 590 as->ops->log_start(as, frnew);
5a583347
AK
591 }
592
093bc2cd
AK
593 ++iold;
594 ++inew;
093bc2cd
AK
595 } else {
596 /* In new */
597
cc31e6e7 598 as->ops->range_add(as, frnew);
093bc2cd
AK
599 ++inew;
600 }
601 }
cc31e6e7 602 as->current_map = new_view;
093bc2cd 603 flatview_destroy(&old_view);
3e9d69e7 604 address_space_update_ioeventfds(as);
093bc2cd
AK
605}
606
cc31e6e7
AK
607static void memory_region_update_topology(void)
608{
658b2224
AK
609 if (address_space_memory.root) {
610 address_space_update_topology(&address_space_memory);
611 }
612 if (address_space_io.root) {
613 address_space_update_topology(&address_space_io);
614 }
cc31e6e7
AK
615}
616
093bc2cd
AK
617void memory_region_init(MemoryRegion *mr,
618 const char *name,
619 uint64_t size)
620{
621 mr->ops = NULL;
622 mr->parent = NULL;
623 mr->size = size;
624 mr->addr = 0;
625 mr->offset = 0;
14a3c10a 626 mr->terminates = false;
093bc2cd
AK
627 mr->priority = 0;
628 mr->may_overlap = false;
629 mr->alias = NULL;
630 QTAILQ_INIT(&mr->subregions);
631 memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
632 QTAILQ_INIT(&mr->coalesced);
633 mr->name = qemu_strdup(name);
5a583347 634 mr->dirty_log_mask = 0;
3e9d69e7
AK
635 mr->ioeventfd_nb = 0;
636 mr->ioeventfds = NULL;
093bc2cd
AK
637}
638
639static bool memory_region_access_valid(MemoryRegion *mr,
640 target_phys_addr_t addr,
641 unsigned size)
642{
643 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
644 return false;
645 }
646
647 /* Treat zero as compatibility all valid */
648 if (!mr->ops->valid.max_access_size) {
649 return true;
650 }
651
652 if (size > mr->ops->valid.max_access_size
653 || size < mr->ops->valid.min_access_size) {
654 return false;
655 }
656 return true;
657}
658
659static uint32_t memory_region_read_thunk_n(void *_mr,
660 target_phys_addr_t addr,
661 unsigned size)
662{
663 MemoryRegion *mr = _mr;
664 unsigned access_size, access_size_min, access_size_max;
665 uint64_t access_mask;
666 uint32_t data = 0, tmp;
667 unsigned i;
668
669 if (!memory_region_access_valid(mr, addr, size)) {
670 return -1U; /* FIXME: better signalling */
671 }
672
74901c3b
AK
673 if (!mr->ops->read) {
674 return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
675 }
676
093bc2cd
AK
677 /* FIXME: support unaligned access */
678
679 access_size_min = mr->ops->impl.min_access_size;
680 if (!access_size_min) {
681 access_size_min = 1;
682 }
683 access_size_max = mr->ops->impl.max_access_size;
684 if (!access_size_max) {
685 access_size_max = 4;
686 }
687 access_size = MAX(MIN(size, access_size_max), access_size_min);
688 access_mask = -1ULL >> (64 - access_size * 8);
689 addr += mr->offset;
690 for (i = 0; i < size; i += access_size) {
691 /* FIXME: big-endian support */
692 tmp = mr->ops->read(mr->opaque, addr + i, access_size);
693 data |= (tmp & access_mask) << (i * 8);
694 }
695
696 return data;
697}
698
699static void memory_region_write_thunk_n(void *_mr,
700 target_phys_addr_t addr,
701 unsigned size,
702 uint64_t data)
703{
704 MemoryRegion *mr = _mr;
705 unsigned access_size, access_size_min, access_size_max;
706 uint64_t access_mask;
707 unsigned i;
708
709 if (!memory_region_access_valid(mr, addr, size)) {
710 return; /* FIXME: better signalling */
711 }
712
74901c3b
AK
713 if (!mr->ops->write) {
714 mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data);
715 return;
716 }
717
093bc2cd
AK
718 /* FIXME: support unaligned access */
719
720 access_size_min = mr->ops->impl.min_access_size;
721 if (!access_size_min) {
722 access_size_min = 1;
723 }
724 access_size_max = mr->ops->impl.max_access_size;
725 if (!access_size_max) {
726 access_size_max = 4;
727 }
728 access_size = MAX(MIN(size, access_size_max), access_size_min);
729 access_mask = -1ULL >> (64 - access_size * 8);
730 addr += mr->offset;
731 for (i = 0; i < size; i += access_size) {
732 /* FIXME: big-endian support */
733 mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask,
734 access_size);
735 }
736}
737
738static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
739{
740 return memory_region_read_thunk_n(mr, addr, 1);
741}
742
743static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
744{
745 return memory_region_read_thunk_n(mr, addr, 2);
746}
747
748static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
749{
750 return memory_region_read_thunk_n(mr, addr, 4);
751}
752
753static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
754 uint32_t data)
755{
756 memory_region_write_thunk_n(mr, addr, 1, data);
757}
758
759static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
760 uint32_t data)
761{
762 memory_region_write_thunk_n(mr, addr, 2, data);
763}
764
765static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
766 uint32_t data)
767{
768 memory_region_write_thunk_n(mr, addr, 4, data);
769}
770
771static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
772 memory_region_read_thunk_b,
773 memory_region_read_thunk_w,
774 memory_region_read_thunk_l,
775};
776
777static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
778 memory_region_write_thunk_b,
779 memory_region_write_thunk_w,
780 memory_region_write_thunk_l,
781};
782
16ef61c9
AK
783static void memory_region_prepare_ram_addr(MemoryRegion *mr)
784{
785 if (mr->backend_registered) {
786 return;
787 }
788
789 mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
790 memory_region_write_thunk,
791 mr,
792 mr->ops->endianness);
793 mr->backend_registered = true;
794}
795
093bc2cd
AK
796void memory_region_init_io(MemoryRegion *mr,
797 const MemoryRegionOps *ops,
798 void *opaque,
799 const char *name,
800 uint64_t size)
801{
802 memory_region_init(mr, name, size);
803 mr->ops = ops;
804 mr->opaque = opaque;
14a3c10a 805 mr->terminates = true;
16ef61c9 806 mr->backend_registered = false;
093bc2cd
AK
807}
808
809void memory_region_init_ram(MemoryRegion *mr,
810 DeviceState *dev,
811 const char *name,
812 uint64_t size)
813{
814 memory_region_init(mr, name, size);
14a3c10a 815 mr->terminates = true;
093bc2cd 816 mr->ram_addr = qemu_ram_alloc(dev, name, size);
16ef61c9 817 mr->backend_registered = true;
093bc2cd
AK
818}
819
820void memory_region_init_ram_ptr(MemoryRegion *mr,
821 DeviceState *dev,
822 const char *name,
823 uint64_t size,
824 void *ptr)
825{
826 memory_region_init(mr, name, size);
14a3c10a 827 mr->terminates = true;
093bc2cd 828 mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
16ef61c9 829 mr->backend_registered = true;
093bc2cd
AK
830}
831
832void memory_region_init_alias(MemoryRegion *mr,
833 const char *name,
834 MemoryRegion *orig,
835 target_phys_addr_t offset,
836 uint64_t size)
837{
838 memory_region_init(mr, name, size);
839 mr->alias = orig;
840 mr->alias_offset = offset;
841}
842
843void memory_region_destroy(MemoryRegion *mr)
844{
845 assert(QTAILQ_EMPTY(&mr->subregions));
846 memory_region_clear_coalescing(mr);
847 qemu_free((char *)mr->name);
3e9d69e7 848 qemu_free(mr->ioeventfds);
093bc2cd
AK
849}
850
851uint64_t memory_region_size(MemoryRegion *mr)
852{
853 return mr->size;
854}
855
856void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
857{
858 mr->offset = offset;
859}
860
861void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
862{
5a583347
AK
863 uint8_t mask = 1 << client;
864
865 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
866 memory_region_update_topology();
093bc2cd
AK
867}
868
869bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
870 unsigned client)
871{
14a3c10a 872 assert(mr->terminates);
5a583347 873 return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
093bc2cd
AK
874}
875
876void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
877{
14a3c10a 878 assert(mr->terminates);
5a583347 879 return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
093bc2cd
AK
880}
881
882void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
883{
5a583347
AK
884 FlatRange *fr;
885
cc31e6e7 886 FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
5a583347
AK
887 if (fr->mr == mr) {
888 cpu_physical_sync_dirty_bitmap(fr->addr.start,
889 fr->addr.start + fr->addr.size);
890 }
891 }
093bc2cd
AK
892}
893
894void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
895{
896 /* FIXME */
897}
898
899void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
900 target_phys_addr_t size, unsigned client)
901{
14a3c10a 902 assert(mr->terminates);
5a583347
AK
903 cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
904 mr->ram_addr + addr + size,
905 1 << client);
093bc2cd
AK
906}
907
908void *memory_region_get_ram_ptr(MemoryRegion *mr)
909{
910 if (mr->alias) {
911 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
912 }
913
14a3c10a 914 assert(mr->terminates);
093bc2cd
AK
915
916 return qemu_get_ram_ptr(mr->ram_addr);
917}
918
919static void memory_region_update_coalesced_range(MemoryRegion *mr)
920{
921 FlatRange *fr;
922 CoalescedMemoryRange *cmr;
923 AddrRange tmp;
924
cc31e6e7 925 FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
093bc2cd
AK
926 if (fr->mr == mr) {
927 qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size);
928 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
929 tmp = addrrange_shift(cmr->addr,
930 fr->addr.start - fr->offset_in_region);
931 if (!addrrange_intersects(tmp, fr->addr)) {
932 continue;
933 }
934 tmp = addrrange_intersection(tmp, fr->addr);
935 qemu_register_coalesced_mmio(tmp.start, tmp.size);
936 }
937 }
938 }
939}
940
941void memory_region_set_coalescing(MemoryRegion *mr)
942{
943 memory_region_clear_coalescing(mr);
944 memory_region_add_coalescing(mr, 0, mr->size);
945}
946
947void memory_region_add_coalescing(MemoryRegion *mr,
948 target_phys_addr_t offset,
949 uint64_t size)
950{
951 CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr));
952
953 cmr->addr = addrrange_make(offset, size);
954 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
955 memory_region_update_coalesced_range(mr);
956}
957
958void memory_region_clear_coalescing(MemoryRegion *mr)
959{
960 CoalescedMemoryRange *cmr;
961
962 while (!QTAILQ_EMPTY(&mr->coalesced)) {
963 cmr = QTAILQ_FIRST(&mr->coalesced);
964 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
965 qemu_free(cmr);
966 }
967 memory_region_update_coalesced_range(mr);
968}
969
3e9d69e7
AK
970void memory_region_add_eventfd(MemoryRegion *mr,
971 target_phys_addr_t addr,
972 unsigned size,
973 bool match_data,
974 uint64_t data,
975 int fd)
976{
977 MemoryRegionIoeventfd mrfd = {
978 .addr.start = addr,
979 .addr.size = size,
980 .match_data = match_data,
981 .data = data,
982 .fd = fd,
983 };
984 unsigned i;
985
986 for (i = 0; i < mr->ioeventfd_nb; ++i) {
987 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
988 break;
989 }
990 }
991 ++mr->ioeventfd_nb;
992 mr->ioeventfds = qemu_realloc(mr->ioeventfds,
993 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
994 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
995 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
996 mr->ioeventfds[i] = mrfd;
997 memory_region_update_topology();
998}
999
1000void memory_region_del_eventfd(MemoryRegion *mr,
1001 target_phys_addr_t addr,
1002 unsigned size,
1003 bool match_data,
1004 uint64_t data,
1005 int fd)
1006{
1007 MemoryRegionIoeventfd mrfd = {
1008 .addr.start = addr,
1009 .addr.size = size,
1010 .match_data = match_data,
1011 .data = data,
1012 .fd = fd,
1013 };
1014 unsigned i;
1015
1016 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1017 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
1018 break;
1019 }
1020 }
1021 assert(i != mr->ioeventfd_nb);
1022 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
1023 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
1024 --mr->ioeventfd_nb;
1025 mr->ioeventfds = qemu_realloc(mr->ioeventfds,
1026 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
1027 memory_region_update_topology();
1028}
1029
093bc2cd
AK
1030static void memory_region_add_subregion_common(MemoryRegion *mr,
1031 target_phys_addr_t offset,
1032 MemoryRegion *subregion)
1033{
1034 MemoryRegion *other;
1035
1036 assert(!subregion->parent);
1037 subregion->parent = mr;
1038 subregion->addr = offset;
1039 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1040 if (subregion->may_overlap || other->may_overlap) {
1041 continue;
1042 }
1043 if (offset >= other->offset + other->size
1044 || offset + subregion->size <= other->offset) {
1045 continue;
1046 }
1047 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
1048 (unsigned long long)offset,
1049 (unsigned long long)subregion->size,
1050 (unsigned long long)other->offset,
1051 (unsigned long long)other->size);
1052 }
1053 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1054 if (subregion->priority >= other->priority) {
1055 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1056 goto done;
1057 }
1058 }
1059 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1060done:
1061 memory_region_update_topology();
1062}
1063
1064
1065void memory_region_add_subregion(MemoryRegion *mr,
1066 target_phys_addr_t offset,
1067 MemoryRegion *subregion)
1068{
1069 subregion->may_overlap = false;
1070 subregion->priority = 0;
1071 memory_region_add_subregion_common(mr, offset, subregion);
1072}
1073
1074void memory_region_add_subregion_overlap(MemoryRegion *mr,
1075 target_phys_addr_t offset,
1076 MemoryRegion *subregion,
1077 unsigned priority)
1078{
1079 subregion->may_overlap = true;
1080 subregion->priority = priority;
1081 memory_region_add_subregion_common(mr, offset, subregion);
1082}
1083
1084void memory_region_del_subregion(MemoryRegion *mr,
1085 MemoryRegion *subregion)
1086{
1087 assert(subregion->parent == mr);
1088 subregion->parent = NULL;
1089 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
1090 memory_region_update_topology();
1091}
1c0ffa58
AK
1092
1093void set_system_memory_map(MemoryRegion *mr)
1094{
cc31e6e7 1095 address_space_memory.root = mr;
1c0ffa58
AK
1096 memory_region_update_topology();
1097}
658b2224
AK
1098
1099void set_system_io_map(MemoryRegion *mr)
1100{
1101 address_space_io.root = mr;
1102 memory_region_update_topology();
1103}