#define RAM_ADDR_H
#ifndef CONFIG_USER_ONLY
+#include "cpu.h"
#include "hw/xen/xen.h"
+#include "sysemu/tcg.h"
#include "exec/ramlist.h"
struct RAMBlock {
struct rcu_head rcu;
struct MemoryRegion *mr;
uint8_t *host;
+ uint8_t *colo_cache; /* For colo, VM's ram cache */
ram_addr_t offset;
ram_addr_t used_length;
ram_addr_t max_length;
size_t page_size;
/* dirty bitmap used during migration */
unsigned long *bmap;
- /* bitmap of pages that haven't been sent even once
- * only maintained and used in postcopy at the moment
- * where it's used to send the dirtymap at the start
- * of the postcopy phase
+ /* bitmap of already received pages in postcopy */
+ unsigned long *receivedmap;
+
+ /*
+ * bitmap to track already cleared dirty bitmap. When the bit is
+ * set, it means the corresponding memory chunk needs a log-clear.
+ * Set this up to non-NULL to enable the capability to postpone
+ * and split clearing of dirty bitmap on the remote node (e.g.,
+ * KVM). The bitmap will be set only when doing global sync.
+ *
+ * NOTE: this bitmap is different comparing to the other bitmaps
+ * in that one bit can represent multiple guest pages (which is
+ * decided by the `clear_bmap_shift' variable below). On
+ * destination side, this should always be NULL, and the variable
+ * `clear_bmap_shift' is meaningless.
*/
- unsigned long *unsentmap;
+ unsigned long *clear_bmap;
+ uint8_t clear_bmap_shift;
};
+/**
+ * clear_bmap_size: calculate clear bitmap size
+ *
+ * @pages: number of guest pages
+ * @shift: guest page number shift
+ *
+ * Returns: number of bits for the clear bitmap
+ */
+static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
+{
+ return DIV_ROUND_UP(pages, 1UL << shift);
+}
+
+/**
+ * clear_bmap_set: set clear bitmap for the page range
+ *
+ * @rb: the ramblock to operate on
+ * @start: the start page number
+ * @size: number of pages to set in the bitmap
+ *
+ * Returns: None
+ */
+static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
+ uint64_t npages)
+{
+ uint8_t shift = rb->clear_bmap_shift;
+
+ bitmap_set_atomic(rb->clear_bmap, start >> shift,
+ clear_bmap_size(npages, shift));
+}
+
+/**
+ * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
+ *
+ * @rb: the ramblock to operate on
+ * @page: the page number to check
+ *
+ * Returns: true if the bit was set, false otherwise
+ */
+static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
+{
+ uint8_t shift = rb->clear_bmap_shift;
+
+ return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
+}
+
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
{
return (b && b->host && offset < b->used_length) ? true : false;
return (char *)block->host + offset;
}
-long qemu_getrampagesize(void);
-unsigned long last_ram_page(void);
+static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
+ RAMBlock *rb)
+{
+ uint64_t host_addr_offset =
+ (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
+ return host_addr_offset >> TARGET_PAGE_BITS;
+}
+
+bool ramblock_is_pmem(RAMBlock *rb);
+
+long qemu_minrampagesize(void);
+long qemu_maxrampagesize(void);
+
+/**
+ * qemu_ram_alloc_from_file,
+ * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
+ * file or device
+ *
+ * Parameters:
+ * @size: the size in bytes of the ram block
+ * @mr: the memory region where the ram block is
+ * @ram_flags: specify the properties of the ram block, which can be one
+ * or bit-or of following values
+ * - RAM_SHARED: mmap the backing file or device with MAP_SHARED
+ * - RAM_PMEM: the backend @mem_path or @fd is persistent memory
+ * Other bits are ignored.
+ * @mem_path or @fd: specify the backing file or device
+ * @errp: pointer to Error*, to store an error if it happens
+ *
+ * Return:
+ * On success, return a pointer to the ram block.
+ * On failure, return NULL.
+ */
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
- bool share, const char *mem_path,
+ uint32_t ram_flags, const char *mem_path,
Error **errp);
RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
- bool share, int fd,
+ uint32_t ram_flags, int fd,
Error **errp);
+
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp);
-RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
+RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, MemoryRegion *mr,
+ Error **errp);
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
uint64_t length,
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
+void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
+
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
-
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ WITH_RCU_READ_LOCK_GUARD() {
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long num = next - base;
+ unsigned long found = find_next_bit(blocks->blocks[idx],
+ num, offset);
+ if (found < num) {
+ dirty = true;
+ break;
+ }
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- base = page - offset;
- while (page < end) {
- unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
- unsigned long num = next - base;
- unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
- if (found < num) {
- dirty = true;
- break;
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
}
-
- page = next;
- idx++;
- offset = 0;
- base += DIRTY_MEMORY_BLOCK_SIZE;
}
- rcu_read_unlock();
-
return dirty;
}
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
base += DIRTY_MEMORY_BLOCK_SIZE;
}
- rcu_read_unlock();
-
return dirty;
}
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- rcu_read_lock();
+ RCU_READ_LOCK_GUARD();
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
set_bit_atomic(offset, blocks->blocks[idx]);
-
- rcu_read_unlock();
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- rcu_read_lock();
+ WITH_RCU_READ_LOCK_GUARD() {
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
+ }
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
- }
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
- idx = page / DIRTY_MEMORY_BLOCK_SIZE;
- offset = page % DIRTY_MEMORY_BLOCK_SIZE;
- base = page - offset;
- while (page < end) {
- unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
+ offset, next - page);
+ }
- if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
- offset, next - page);
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
}
- if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
- offset, next - page);
- }
- if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
- bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
- offset, next - page);
- }
-
- page = next;
- idx++;
- offset = 0;
- base += DIRTY_MEMORY_BLOCK_SIZE;
}
- rcu_read_unlock();
-
xen_hvm_modified_memory(start, length);
}
hwaddr addr;
ram_addr_t ram_addr;
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
- unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
+ unsigned long hpratio = qemu_real_host_page_size / TARGET_PAGE_SIZE;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
DIRTY_MEMORY_BLOCK_SIZE);
- rcu_read_lock();
+ WITH_RCU_READ_LOCK_GUARD() {
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
+ }
- for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
- blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
- }
+ for (k = 0; k < nr; k++) {
+ if (bitmap[k]) {
+ unsigned long temp = leul_to_cpu(bitmap[k]);
- for (k = 0; k < nr; k++) {
- if (bitmap[k]) {
- unsigned long temp = leul_to_cpu(bitmap[k]);
+ atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
- atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
- atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
- if (tcg_enabled()) {
- atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
+ if (global_dirty_log) {
+ atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
+ temp);
+ }
+
+ if (tcg_enabled()) {
+ atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
+ temp);
+ }
}
- }
- if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
- offset = 0;
- idx++;
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
}
}
- rcu_read_unlock();
-
xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
+
+ if (!global_dirty_log) {
+ clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
+ }
+
/*
* bitmap-traveling is faster than memory-traveling (for addr...)
* especially when most of the memory is not dirty.
unsigned client);
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
- (ram_addr_t start, ram_addr_t length, unsigned client);
+ (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
ram_addr_t start,
}
+/* Called with RCU critical section */
static inline
uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
ram_addr_t start,
uint64_t *real_dirty_pages)
{
ram_addr_t addr;
- unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
+ unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
uint64_t num_dirty = 0;
unsigned long *dest = rb->bmap;
- /* start address is aligned at the start of a word? */
- if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
+ /* start address and length is aligned at the start of a word? */
+ if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
+ (start + rb->offset) &&
+ !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
unsigned long * const *src;
- unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
+ unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
DIRTY_MEMORY_BLOCK_SIZE);
-
- rcu_read_lock();
+ unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
src = atomic_rcu_read(
&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
}
}
- rcu_read_unlock();
+ if (rb->clear_bmap) {
+ /*
+ * Postpone the dirty bitmap clear to the point before we
+ * really send the pages, also we will split the clear
+ * dirty procedure into smaller chunks.
+ */
+ clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
+ length >> TARGET_PAGE_BITS);
+ } else {
+ /* Slow path - still do that in a huge chunk */
+ memory_region_clear_dirty_bitmap(rb->mr, start, length);
+ }
} else {
+ ram_addr_t offset = rb->offset;
+
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_test_and_clear_dirty(
- start + addr,
+ start + addr + offset,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
*real_dirty_pages += 1;