/* Compact a non leaf page entry. Simply detect that the entry has a single child,
* and update our entry so we can skip it and go directly to the destination.
*/
-static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
+static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
{
unsigned valid_ptr = P_L2_SIZE;
int valid = 0;
valid_ptr = i;
valid++;
if (p[i].skip) {
- phys_page_compact(&p[i], nodes, compacted);
+ phys_page_compact(&p[i], nodes);
}
}
static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
{
- DECLARE_BITMAP(compacted, nodes_nb);
-
if (d->phys_map.skip) {
- phys_page_compact(&d->phys_map, d->map.nodes, compacted);
+ phys_page_compact(&d->phys_map, d->map.nodes);
}
}
char *c;
void *area = MAP_FAILED;
int fd = -1;
- int64_t page_size;
if (kvm_enabled() && !kvm_has_sync_mmu()) {
error_setg(errp,
*/
}
- page_size = qemu_fd_getpagesize(fd);
- block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
+ block->page_size = qemu_fd_getpagesize(fd);
+ block->mr->align = MAX(block->page_size, QEMU_VMALLOC_ALIGN);
- if (memory < page_size) {
+ if (memory < block->page_size) {
error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
- "or larger than page size 0x%" PRIx64,
- memory, page_size);
+ "or larger than page size 0x%zx",
+ memory, block->page_size);
goto error;
}
- memory = ROUND_UP(memory, page_size);
+ memory = ROUND_UP(memory, block->page_size);
/*
* ftruncate is not supported by hugetlbfs in older
}
}
+size_t qemu_ram_pagesize(RAMBlock *rb)
+{
+ return rb->page_size;
+}
+
static int memory_try_enable_merging(void *addr, size_t len)
{
if (!machine_mem_merge(current_machine)) {
new_block->max_length = max_size;
assert(max_size >= size);
new_block->fd = -1;
+ new_block->page_size = getpagesize();
new_block->host = host;
if (host) {
new_block->flags |= RAM_PREALLOC;