#ifndef CONFIG_USER_ONLY
#include "hw/xen/xen.h"
+#include "exec/ramlist.h"
struct RAMBlock {
struct rcu_head rcu;
char idstr[256];
/* RCU-enabled, writes protected by the ramlist lock */
QLIST_ENTRY(RAMBlock) next;
+ QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
int fd;
+ size_t page_size;
+ /* dirty bitmap used during migration */
+ unsigned long *bmap;
+ /* bitmap of pages that haven't been sent even once
+ * only maintained and used in postcopy at the moment
+ * where it's used to send the dirtymap at the start
+ * of the postcopy phase
+ */
+ unsigned long *unsentmap;
+ /* bitmap of already received pages in postcopy */
+ unsigned long *receivedmap;
};
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
return (char *)block->host + offset;
}
-/* The dirty memory bitmap is split into fixed-size blocks to allow growth
- * under RCU. The bitmap for a block can be accessed as follows:
- *
- * rcu_read_lock();
- *
- * DirtyMemoryBlocks *blocks =
- * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
- *
- * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
- * unsigned long *block = blocks.blocks[idx];
- * ...access block bitmap...
- *
- * rcu_read_unlock();
+static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
+ RAMBlock *rb)
+{
+ uint64_t host_addr_offset =
+ (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
+ return host_addr_offset >> TARGET_PAGE_BITS;
+}
+
+long qemu_getrampagesize(void);
+
+/**
+ * qemu_ram_alloc_from_file,
+ * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
+ * file or device
*
- * Remember to check for the end of the block when accessing a range of
- * addresses. Move on to the next block if you reach the end.
+ * Parameters:
+ * @size: the size in bytes of the ram block
+ * @mr: the memory region where the ram block is
+ * @ram_flags: specify the properties of the ram block, which can be one
+ * or bit-or of following values
+ * - RAM_SHARED: mmap the backing file or device with MAP_SHARED
+ * Other bits are ignored.
+ * @mem_path or @fd: specify the backing file or device
+ * @errp: pointer to Error*, to store an error if it happens
*
- * Organization into blocks allows dirty memory to grow (but not shrink) under
- * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new
- * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
- * the same. Other threads can safely access existing blocks while dirty
- * memory is being grown. When no threads are using the old DirtyMemoryBlocks
- * anymore it is freed by RCU (but the underlying blocks stay because they are
- * pointed to from the new DirtyMemoryBlocks).
+ * Return:
+ * On success, return a pointer to the ram block.
+ * On failure, return NULL.
*/
-#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
-typedef struct {
- struct rcu_head rcu;
- unsigned long *blocks[];
-} DirtyMemoryBlocks;
-
-typedef struct RAMList {
- QemuMutex mutex;
- RAMBlock *mru_block;
- /* RCU-enabled, writes protected by the ramlist lock. */
- QLIST_HEAD(, RAMBlock) blocks;
- DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
- uint32_t version;
-} RAMList;
-extern RAMList ram_list;
-
-ram_addr_t last_ram_offset(void);
-void qemu_mutex_lock_ramlist(void);
-void qemu_mutex_unlock_ramlist(void);
-
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
- bool share, const char *mem_path,
+ uint32_t ram_flags, const char *mem_path,
Error **errp);
+RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
+ uint32_t ram_flags, int fd,
+ Error **errp);
+
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp);
-RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
+RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, MemoryRegion *mr,
+ Error **errp);
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
uint64_t length,
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
+void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
+
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
rcu_read_unlock();
- xen_modified_memory(start, length);
+ xen_hvm_modified_memory(start, length);
}
#if !defined(_WIN32)
rcu_read_unlock();
- xen_modified_memory(start, pages << TARGET_PAGE_BITS);
+ xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
/*
ram_addr_t length,
unsigned client);
+DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
+ (ram_addr_t start, ram_addr_t length, unsigned client);
+
+bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
+ ram_addr_t start,
+ ram_addr_t length);
+
static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
ram_addr_t length)
{
static inline
-uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
+uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
ram_addr_t start,
- ram_addr_t length)
+ ram_addr_t length,
+ uint64_t *real_dirty_pages)
{
ram_addr_t addr;
- unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
+ unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
uint64_t num_dirty = 0;
+ unsigned long *dest = rb->bmap;
- /* start address is aligned at the start of a word? */
- if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
+ /* start address and length is aligned at the start of a word? */
+ if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
+ (start + rb->offset) &&
+ !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
unsigned long * const *src;
- unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
+ unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
rcu_read_lock();
if (src[idx][offset]) {
unsigned long bits = atomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
+ *real_dirty_pages += ctpopl(bits);
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
rcu_read_unlock();
} else {
+ ram_addr_t offset = rb->offset;
+
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_test_and_clear_dirty(
- start + addr,
+ start + addr + offset,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
+ *real_dirty_pages += 1;
long k = (start + addr) >> TARGET_PAGE_BITS;
if (!test_and_set_bit(k, dest)) {
num_dirty++;
return num_dirty;
}
-
-void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
#endif
#endif