phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
}
+/*
+ * The range in *section* may look like this:
+ *
+ * |s|PPPPPPP|s|
+ *
+ * where s stands for subpage and P for page.
+ */
void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
{
- MemoryRegionSection now = *section, remain = *section;
+ MemoryRegionSection remain = *section;
Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
- if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
- uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
- - now.offset_within_address_space;
+ /* register first subpage */
+ if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
+ uint64_t left = TARGET_PAGE_ALIGN(remain.offset_within_address_space)
+ - remain.offset_within_address_space;
+ MemoryRegionSection now = remain;
now.size = int128_min(int128_make64(left), now.size);
register_subpage(fv, &now);
- } else {
- now.size = int128_zero();
- }
- while (int128_ne(remain.size, now.size)) {
+ if (int128_eq(remain.size, now.size)) {
+ return;
+ }
remain.size = int128_sub(remain.size, now.size);
remain.offset_within_address_space += int128_get64(now.size);
remain.offset_within_region += int128_get64(now.size);
- now = remain;
- if (int128_lt(remain.size, page_size)) {
- register_subpage(fv, &now);
- } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
- now.size = page_size;
- register_subpage(fv, &now);
- } else {
- now.size = int128_and(now.size, int128_neg(page_size));
- register_multipage(fv, &now);
+ }
+
+ /* register whole pages */
+ if (int128_ge(remain.size, page_size)) {
+ MemoryRegionSection now = remain;
+ now.size = int128_and(now.size, int128_neg(page_size));
+ register_multipage(fv, &now);
+ if (int128_eq(remain.size, now.size)) {
+ return;
}
+ remain.size = int128_sub(remain.size, now.size);
+ remain.offset_within_address_space += int128_get64(now.size);
+ remain.offset_within_region += int128_get64(now.size);
}
+
+ /* register last subpage */
+ register_subpage(fv, &remain);
}
void qemu_flush_coalesced_mmio_buffer(void)
long *hpsize_min = opaque;
if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
- long hpsize = host_memory_backend_pagesize(MEMORY_BACKEND(obj));
+ HostMemoryBackend *backend = MEMORY_BACKEND(obj);
+ long hpsize = host_memory_backend_pagesize(backend);
- if (hpsize < *hpsize_min) {
+ if (host_memory_backend_is_mapped(backend) && (hpsize < *hpsize_min)) {
*hpsize_min = hpsize;
}
}
return rb->idstr;
}
+void *qemu_ram_get_host_addr(RAMBlock *rb)
+{
+ return rb->host;
+}
+
+ram_addr_t qemu_ram_get_offset(RAMBlock *rb)
+{
+ return rb->offset;
+}
+
+ram_addr_t qemu_ram_get_used_length(RAMBlock *rb)
+{
+ return rb->used_length;
+}
+
bool qemu_ram_is_shared(RAMBlock *rb)
{
return rb->flags & RAM_SHARED;
rcu_read_lock();
RAMBLOCK_FOREACH(block) {
- ret = func(block->idstr, block->host, block->offset,
- block->used_length, opaque);
- if (ret) {
- break;
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
-int qemu_ram_foreach_migratable_block(RAMBlockIterFunc func, void *opaque)
-{
- RAMBlock *block;
- int ret = 0;
-
- rcu_read_lock();
- RAMBLOCK_FOREACH(block) {
- if (!qemu_ram_is_migratable(block)) {
- continue;
- }
- ret = func(block->idstr, block->host, block->offset,
- block->used_length, opaque);
+ ret = func(block, opaque);
if (ret) {
break;
}