/* Dirty tracking enabled because measuring dirty rate */
#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
-#define GLOBAL_DIRTY_MASK (0x3)
+/* Dirty tracking enabled because dirty limit */
+#define GLOBAL_DIRTY_LIMIT (1U << 2)
+
+#define GLOBAL_DIRTY_MASK (0x7)
extern unsigned int global_dirty_tracking;
/*
* Bitmap for different IOMMUNotifier capabilities. Each notifier can
* register with one or multiple IOMMU Notifier capability bit(s).
+ *
+ * Normally there're two use cases for the notifiers:
+ *
+ * (1) When the device needs accurate synchronizations of the vIOMMU page
+ * tables, it needs to register with both MAP|UNMAP notifies (which
+ * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
+ *
+ * Regarding to accurate synchronization, it's when the notified
+ * device maintains a shadow page table and must be notified on each
+ * guest MAP (page table entry creation) and UNMAP (invalidation)
+ * events (e.g. VFIO). Both notifications must be accurate so that
+ * the shadow page table is fully in sync with the guest view.
+ *
+ * (2) When the device doesn't need accurate synchronizations of the
+ * vIOMMU page tables, it needs to register only with UNMAP or
+ * DEVIOTLB_UNMAP notifies.
+ *
+ * It's when the device maintains a cache of IOMMU translations
+ * (IOTLB) and is able to fill that cache by requesting translations
+ * from the vIOMMU through a protocol similar to ATS (Address
+ * Translation Service).
+ *
+ * Note that in this mode the vIOMMU will not maintain a shadowed
+ * page table for the address space, and the UNMAP messages can cover
+ * more than the pages that used to get mapped. The IOMMU notifiee
+ * should be able to take care of over-sized invalidations.
*/
typedef enum {
IOMMU_NOTIFIER_NONE = 0,
* A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
* regions are currently populated to be used/accessed by the VM, notifying
* after parts were discarded (freeing up memory) and before parts will be
- * populated (consuming memory), to be used/acessed by the VM.
+ * populated (consuming memory), to be used/accessed by the VM.
*
* A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
* #MemoryRegion isn't mapped yet; it cannot change while the #MemoryRegion is
* Listeners are called in multiples of the minimum granularity (unless it
* would exceed the registered range) and changes are aligned to the minimum
* granularity within the #MemoryRegion. Listeners have to prepare for memory
- * becomming discarded in a different granularity than it was populated and the
+ * becoming discarded in a different granularity than it was populated and the
* other way around.
*/
struct RamDiscardManagerClass {
void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
RamDiscardListener *rdl);
+bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
+ ram_addr_t *ram_addr, bool *read_only,
+ bool *mr_has_discard_manager);
+
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
bool is_iommu;
RAMBlock *ram_block;
Object *owner;
+ /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
+ DeviceState *dev;
const MemoryRegionOps *ops;
void *opaque;
MemoryRegion *container;
+ int mapped_via_alias; /* Mapped via an alias, container might be NULL */
Int128 size;
hwaddr addr;
void (*destructor)(MemoryRegion *mr);
unsigned ioeventfd_nb;
MemoryRegionIoeventfd *ioeventfds;
RamDiscardManager *rdm; /* Only for RAM */
+
+ /* For devices designed to perform re-entrant IO into their own IO MRs */
+ bool disable_reentrancy_guard;
};
struct IOMMUMemoryRegion {
* its @log_sync must be NULL. Vice versa.
*
* @listener: The #MemoryListener.
+ * @last_stage: The last stage to synchronize the log during migration.
+ * The caller should gurantee that the synchronization with true for
+ * @last_stage is triggered for once after all VCPUs have been stopped.
*/
- void (*log_sync_global)(MemoryListener *listener);
+ void (*log_sync_global)(MemoryListener *listener, bool last_stage);
/**
* @log_clear:
Error **errp);
/**
- * memory_region_init_resizeable_ram: Initialize memory region with resizeable
+ * memory_region_init_resizeable_ram: Initialize memory region with resizable
* RAM. Accesses into the region will
* modify memory directly. Only an initial
* portion of this RAM is actually used.
* @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
* RAM_NORESERVE,
* @path: the path in which to allocate the RAM.
+ * @offset: offset within the file referenced by path
* @readonly: true to open @path for reading, false for read/write.
* @errp: pointer to Error*, to store an error if it happens.
*
uint64_t align,
uint32_t ram_flags,
const char *path,
+ ram_addr_t offset,
bool readonly,
Error **errp);
void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
IOMMUTLBEvent *event);
+/**
+ * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
+ * translation that covers the
+ * range of a notifier
+ *
+ * @notifier: the notifier to be notified
+ */
+void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
+
+
/**
* memory_region_register_iommu_notifier: register a notifier for changes to
* IOMMU translation entries.
* querying the same page multiple times, which is especially useful for
* display updates where the scanlines often are not page aligned.
*
- * The dirty bitmap region which gets copyed into the snapshot (and
+ * The dirty bitmap region which gets copied into the snapshot (and
* cleared afterwards) can be larger than requested. The boundaries
* are rounded up/down so complete bitmap longs (covering 64 pages on
* 64bit hosts) can be copied over into the bitmap snapshot. Which
/**
* memory_region_is_mapped: returns true if #MemoryRegion is mapped
- * into any address space.
+ * into another memory region, which does not necessarily imply that it is
+ * mapped into an address space.
*
* @mr: a #MemoryRegion which should be checked if it's mapped
*/
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
*
* Synchronizes the dirty page log for all address spaces.
+ *
+ * @last_stage: whether this is the last stage of live migration
*/
-void memory_global_dirty_log_sync(void);
+void memory_global_dirty_log_sync(bool last_stage);
/**
* memory_global_dirty_log_sync: synchronize the dirty log for all memory
void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
+bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
+ unsigned size, bool is_write,
+ MemTxAttrs attrs);
+
/**
* memory_region_dispatch_read: perform a read directly to the specified
* MemoryRegion.
hwaddr addr, const void *buf,
hwaddr len);
+int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
+bool prepare_mmio_access(MemoryRegion *mr);
+
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
{
if (is_write) {
}
}
+/**
+ * address_space_set: Fill address space with a constant byte.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @c: constant byte to fill the memory
+ * @len: the number of bytes to fill with the constant byte
+ * @attrs: memory transaction attributes
+ */
+MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
+ uint8_t c, hwaddr len, MemTxAttrs attrs);
+
#ifdef NEED_CPU_H
/* enum device_endian to MemOp. */
static inline MemOp devend_memop(enum device_endian end)
QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
/* Swap if non-host endianness or native (target) endianness */
return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
#else