/* Notify for address space range start <= addr <= end */
hwaddr start;
hwaddr end;
+ int iommu_idx;
QLIST_ENTRY(IOMMUNotifier) node;
};
typedef struct IOMMUNotifier IOMMUNotifier;
+/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
+#define RAM_PREALLOC (1 << 0)
+
+/* RAM is mmap-ed with MAP_SHARED */
+#define RAM_SHARED (1 << 1)
+
+/* Only a portion of RAM (used_length) is actually used, and migrated.
+ * This used_length size can change across reboots.
+ */
+#define RAM_RESIZEABLE (1 << 2)
+
+/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
+ * zero the page and wake waiting processes.
+ * (Set during postcopy)
+ */
+#define RAM_UF_ZEROPAGE (1 << 3)
+
+/* RAM can be migrated */
+#define RAM_MIGRATABLE (1 << 4)
+
+/* RAM is a persistent kind memory */
+#define RAM_PMEM (1 << 5)
+
static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
IOMMUNotifierFlag flags,
- hwaddr start, hwaddr end)
+ hwaddr start, hwaddr end,
+ int iommu_idx)
{
n->notify = fn;
n->notifier_flags = flags;
n->start = start;
n->end = end;
+ n->iommu_idx = iommu_idx;
}
/*
uint64_t data,
unsigned size,
MemTxAttrs attrs);
- /* Instruction execution pre-callback:
- * @addr is the address of the access relative to the @mr.
- * @size is the size of the area returned by the callback.
- * @offset is the location of the pointer inside @mr.
- *
- * Returns a pointer to a location which contains guest code.
- */
- void *(*request_ptr)(void *opaque, hwaddr addr, unsigned *size,
- unsigned *offset);
enum device_endian endianness;
/* Guest-visible constraints: */
* as a machine check exception).
*/
bool (*accepts)(void *opaque, hwaddr addr,
- unsigned size, bool is_write);
+ unsigned size, bool is_write,
+ MemTxAttrs attrs);
} valid;
/* Internal implementation constraints: */
struct {
*/
bool unaligned;
} impl;
+};
- /* If .read and .write are not present, old_mmio may be used for
- * backwards compatibility with old mmio registration
- */
- const MemoryRegionMmio old_mmio;
+enum IOMMUMemoryRegionAttr {
+ IOMMU_ATTR_SPAPR_TCE_FD
};
+/**
+ * IOMMUMemoryRegionClass:
+ *
+ * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
+ * and provide an implementation of at least the @translate method here
+ * to handle requests to the memory region. Other methods are optional.
+ *
+ * The IOMMU implementation must use the IOMMU notifier infrastructure
+ * to report whenever mappings are changed, by calling
+ * memory_region_notify_iommu() (or, if necessary, by calling
+ * memory_region_notify_one() for each registered notifier).
+ *
+ * Conceptually an IOMMU provides a mapping from input address
+ * to an output TLB entry. If the IOMMU is aware of memory transaction
+ * attributes and the output TLB entry depends on the transaction
+ * attributes, we represent this using IOMMU indexes. Each index
+ * selects a particular translation table that the IOMMU has:
+ * @attrs_to_index returns the IOMMU index for a set of transaction attributes
+ * @translate takes an input address and an IOMMU index
+ * and the mapping returned can only depend on the input address and the
+ * IOMMU index.
+ *
+ * Most IOMMUs don't care about the transaction attributes and support
+ * only a single IOMMU index. A more complex IOMMU might have one index
+ * for secure transactions and one for non-secure transactions.
+ */
typedef struct IOMMUMemoryRegionClass {
/* private */
struct DeviceClass parent_class;
/*
- * Return a TLB entry that contains a given address. Flag should
- * be the access permission of this translation operation. We can
- * set flag to IOMMU_NONE to mean that we don't need any
- * read/write permission checks, like, when for region replay.
+ * Return a TLB entry that contains a given address.
+ *
+ * The IOMMUAccessFlags indicated via @flag are optional and may
+ * be specified as IOMMU_NONE to indicate that the caller needs
+ * the full translation information for both reads and writes. If
+ * the access flags are specified then the IOMMU implementation
+ * may use this as an optimization, to stop doing a page table
+ * walk as soon as it knows that the requested permissions are not
+ * allowed. If IOMMU_NONE is passed then the IOMMU must do the
+ * full page table walk and report the permissions in the returned
+ * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
+ * return different mappings for reads and writes.)
+ *
+ * The returned information remains valid while the caller is
+ * holding the big QEMU lock or is inside an RCU critical section;
+ * if the caller wishes to cache the mapping beyond that it must
+ * register an IOMMU notifier so it can invalidate its cached
+ * information when the IOMMU mapping changes.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ * @hwaddr: address to be translated within the memory region
+ * @flag: requested access permissions
+ * @iommu_idx: IOMMU index for the translation
*/
IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
- IOMMUAccessFlags flag);
- /* Returns minimum supported page size */
+ IOMMUAccessFlags flag, int iommu_idx);
+ /* Returns minimum supported page size in bytes.
+ * If this method is not provided then the minimum is assumed to
+ * be TARGET_PAGE_SIZE.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ */
uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
- /* Called when IOMMU Notifier flag changed */
+ /* Called when IOMMU Notifier flag changes (ie when the set of
+ * events which IOMMU users are requesting notification for changes).
+ * Optional method -- need not be provided if the IOMMU does not
+ * need to know exactly which events must be notified.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ * @old_flags: events which previously needed to be notified
+ * @new_flags: events which now need to be notified
+ */
void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
IOMMUNotifierFlag old_flags,
IOMMUNotifierFlag new_flags);
- /* Set this up to provide customized IOMMU replay function */
+ /* Called to handle memory_region_iommu_replay().
+ *
+ * The default implementation of memory_region_iommu_replay() is to
+ * call the IOMMU translate method for every page in the address space
+ * with flag == IOMMU_NONE and then call the notifier if translate
+ * returns a valid mapping. If this method is implemented then it
+ * overrides the default behaviour, and must provide the full semantics
+ * of memory_region_iommu_replay(), by calling @notifier for every
+ * translation present in the IOMMU.
+ *
+ * Optional method -- an IOMMU only needs to provide this method
+ * if the default is inefficient or produces undesirable side effects.
+ *
+ * Note: this is not related to record-and-replay functionality.
+ */
void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
+
+ /* Get IOMMU misc attributes. This is an optional method that
+ * can be used to allow users of the IOMMU to get implementation-specific
+ * information. The IOMMU implements this method to handle calls
+ * by IOMMU users to memory_region_iommu_get_attr() by filling in
+ * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
+ * the IOMMU supports. If the method is unimplemented then
+ * memory_region_iommu_get_attr() will always return -EINVAL.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ * @attr: attribute being queried
+ * @data: memory to fill in with the attribute data
+ *
+ * Returns 0 on success, or a negative errno; in particular
+ * returns -EINVAL for unrecognized or unimplemented attribute types.
+ */
+ int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
+ void *data);
+
+ /* Return the IOMMU index to use for a given set of transaction attributes.
+ *
+ * Optional method: if an IOMMU only supports a single IOMMU index then
+ * the default implementation of memory_region_iommu_attrs_to_index()
+ * will return 0.
+ *
+ * The indexes supported by an IOMMU must be contiguous, starting at 0.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ * @attrs: memory transaction attributes
+ */
+ int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
+
+ /* Return the number of IOMMU indexes this IOMMU supports.
+ *
+ * Optional method: if this method is not provided, then
+ * memory_region_iommu_num_indexes() will return 1, indicating that
+ * only a single IOMMU index is supported.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ */
+ int (*num_indexes)(IOMMUMemoryRegion *iommu);
} IOMMUMemoryRegionClass;
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
bool ram;
bool subpage;
bool readonly; /* For RAM regions */
+ bool nonvolatile;
bool rom_device;
bool flush_coalesced_mmio;
bool global_locking;
MemoryRegion *alias;
hwaddr alias_offset;
int32_t priority;
- QTAILQ_HEAD(subregions, MemoryRegion) subregions;
+ QTAILQ_HEAD(, MemoryRegion) subregions;
QTAILQ_ENTRY(MemoryRegion) subregions_link;
- QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
+ QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
const char *name;
unsigned ioeventfd_nb;
MemoryRegionIoeventfd *ioeventfds;
bool match_data, uint64_t data, EventNotifier *e);
void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
bool match_data, uint64_t data, EventNotifier *e);
- void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
+ void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
hwaddr addr, hwaddr len);
- void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
+ void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
hwaddr addr, hwaddr len);
/* Lower = earlier (during add), later (during del) */
unsigned priority;
int ioeventfd_nb;
struct MemoryRegionIoeventfd *ioeventfds;
- QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners;
+ QTAILQ_HEAD(, MemoryListener) listeners;
QTAILQ_ENTRY(AddressSpace) address_spaces_link;
};
-FlatView *address_space_to_flatview(AddressSpace *as);
+typedef struct AddressSpaceDispatch AddressSpaceDispatch;
+typedef struct FlatRange FlatRange;
+
+/* Flattened global view of current active memory hierarchy. Kept in sorted
+ * order.
+ */
+struct FlatView {
+ struct rcu_head rcu;
+ unsigned ref;
+ FlatRange *ranges;
+ unsigned nr;
+ unsigned nr_allocated;
+ struct AddressSpaceDispatch *dispatch;
+ MemoryRegion *root;
+};
+
+static inline FlatView *address_space_to_flatview(AddressSpace *as)
+{
+ return atomic_rcu_read(&as->current_map);
+}
+
/**
* MemoryRegionSection: describes a fragment of a #MemoryRegion
*
* @mr: the region, or %NULL if empty
- * @address_space: the address space the region is mapped in
+ * @fv: the flat view of the address space the region is mapped in
* @offset_within_region: the beginning of the section, relative to @mr's start
* @size: the size of the section; will not exceed @mr's boundaries
* @offset_within_address_space: the address of the first byte of the section
* relative to the region's address space
* @readonly: writes to this section are ignored
+ * @nonvolatile: this section is non-volatile
*/
struct MemoryRegionSection {
MemoryRegion *mr;
Int128 size;
hwaddr offset_within_address_space;
bool readonly;
+ bool nonvolatile;
};
/**
uint64_t size,
Error **errp);
+/**
+ * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region.
+ * Accesses into the region will
+ * modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @share: allow remapping RAM to different addresses
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function is similar to memory_region_init_ram_nomigrate.
+ * The only difference is part of the RAM region can be remapped.
+ */
+void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ bool share,
+ Error **errp);
+
/**
* memory_region_init_resizeable_ram: Initialize memory region with resizeable
* RAM. Accesses into the region will
uint64_t length,
void *host),
Error **errp);
-#ifdef __linux__
+#ifdef CONFIG_POSIX
+
/**
* memory_region_init_ram_from_file: Initialize RAM memory region with a
* mmap-ed backend.
* @name: Region name, becomes part of RAMBlock name used in migration stream
* must be unique within any device
* @size: size of the region.
- * @share: %true if memory must be mmaped with the MAP_SHARED flag
+ * @align: alignment of the region base address; if 0, the default alignment
+ * (getpagesize()) will be used.
+ * @ram_flags: Memory region features:
+ * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
+ * - RAM_PMEM: the memory is persistent memory
+ * Other bits are ignored now.
* @path: the path in which to allocate the RAM.
* @errp: pointer to Error*, to store an error if it happens.
*
struct Object *owner,
const char *name,
uint64_t size,
- bool share,
+ uint64_t align,
+ uint32_t ram_flags,
const char *path,
Error **errp);
* @mr: the #MemoryRegion to be initialized.
* @owner: the object that tracks the region's reference count
* @ops: callbacks for write access handling (must not be NULL).
+ * @opaque: passed to the read and write callbacks of the @ops structure.
* @name: Region name, becomes part of RAMBlock name used in migration stream
* must be unique within any device
* @size: size of the region.
uint64_t size,
Error **errp);
-/**
- * memory_region_init_reservation: Initialize a memory region that reserves
- * I/O space.
- *
- * A reservation region primariy serves debugging purposes. It claims I/O
- * space that is not supposed to be handled by QEMU itself. Any access via
- * the memory API will cause an abort().
- * This function is deprecated. Use memory_region_init_io() with NULL
- * callbacks instead.
- *
- * @mr: the #MemoryRegion to be initialized
- * @owner: the object that tracks the region's reference count
- * @name: used for debugging; not visible to the user or ABI
- * @size: size of the region.
- */
-static inline void memory_region_init_reservation(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size)
-{
- memory_region_init_io(mr, owner, NULL, mr, name, size);
-}
-
/**
* memory_region_init_iommu: Initialize a memory region of a custom type
* that translates addresses
* An IOMMU region translates addresses and forwards accesses to a target
* memory region.
*
- * @typename: QOM class name
+ * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
+ * @_iommu_mr should be a pointer to enough memory for an instance of
+ * that subclass, @instance_size is the size of that subclass, and
+ * @mrtypename is its name. This function will initialize @_iommu_mr as an
+ * instance of the subclass, and its methods will then be called to handle
+ * accesses to the memory region. See the documentation of
+ * #IOMMUMemoryRegionClass for further details.
+ *
* @_iommu_mr: the #IOMMUMemoryRegion to be initialized
* @instance_size: the IOMMUMemoryRegion subclass instance size
+ * @mrtypename: the type name of the #IOMMUMemoryRegion
* @owner: the object that tracks the region's reference count
- * @ops: a function that translates addresses into the @target region
* @name: used for debugging; not visible to the user or ABI
* @size: size of the region.
*/
/**
* memory_region_is_ram: check whether a memory region is random access
*
- * Returns %true is a memory region is random access.
+ * Returns %true if a memory region is random access.
*
* @mr: the memory region being queried
*/
/**
* memory_region_is_ram_device: check whether a memory region is a ram device
*
- * Returns %true is a memory region is a device backed ram region
+ * Returns %true if a memory region is a device backed ram region
*
* @mr: the memory region being queried
*/
* memory_region_get_iommu_class_nocheck: returns iommu memory region class
* if an iommu or NULL if not
*
- * Returns pointer to IOMMUMemoryRegioniClass if a memory region is an iommu,
- * otherwise NULL. This is fast path avoinding QOM checking, use with caution.
+ * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
+ * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
*
* @mr: the memory region being queried
*/
* should be notified with an UNMAP followed by a MAP.
*
* @iommu_mr: the memory region that was changed
+ * @iommu_idx: the IOMMU index for the translation table which has changed
* @entry: the new entry in the IOMMU translation table. The entry
* replaces all old entries for the same virtual I/O address range.
* Deleted entries have .@perm == 0.
*/
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
+ int iommu_idx,
IOMMUTLBEntry entry);
/**
* a notifier with the minimum page granularity returned by
* mr->iommu_ops->get_page_size().
*
+ * Note: this is not related to record-and-replay functionality.
+ *
* @iommu_mr: the memory region to observe
* @n: the notifier to which to replay iommu mappings
*/
* memory_region_iommu_replay_all: replay existing IOMMU translations
* to all the notifiers registered.
*
+ * Note: this is not related to record-and-replay functionality.
+ *
* @iommu_mr: the memory region to observe
*/
void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr);
void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
IOMMUNotifier *n);
+/**
+ * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
+ * defined on the IOMMU.
+ *
+ * Returns 0 on success, or a negative errno otherwise. In particular,
+ * -EINVAL indicates that the IOMMU does not support the requested
+ * attribute.
+ *
+ * @iommu_mr: the memory region
+ * @attr: the requested attribute
+ * @data: a pointer to the requested attribute data
+ */
+int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
+ enum IOMMUMemoryRegionAttr attr,
+ void *data);
+
+/**
+ * memory_region_iommu_attrs_to_index: return the IOMMU index to
+ * use for translations with the given memory transaction attributes.
+ *
+ * @iommu_mr: the memory region
+ * @attrs: the memory transaction attributes
+ */
+int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
+ MemTxAttrs attrs);
+
+/**
+ * memory_region_iommu_num_indexes: return the total number of IOMMU
+ * indexes that this IOMMU supports.
+ *
+ * @iommu_mr: the memory region
+ */
+int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
+
/**
* memory_region_name: get a memory region's name
*
/**
* memory_region_is_rom: check whether a memory region is ROM
*
- * Returns %true is a memory region is read-only memory.
+ * Returns %true if a memory region is read-only memory.
*
* @mr: the memory region being queried
*/
return mr->ram && mr->readonly;
}
+/**
+ * memory_region_is_nonvolatile: check whether a memory region is non-volatile
+ *
+ * Returns %true is a memory region is non-volatile memory.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
+{
+ return mr->nonvolatile;
+}
/**
* memory_region_get_fd: Get a file descriptor backing a RAM memory region.
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
- * @mr: the memory region being queried.
+ * @ptr: the host pointer to be converted
+ * @offset: the offset within memory region
*/
MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
*/
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
-/**
- * memory_region_get_dirty: Check whether a range of bytes is dirty
- * for a specified client.
- *
- * Checks whether a range of bytes has been written to since the last
- * call to memory_region_reset_dirty() with the same @client. Dirty logging
- * must be enabled.
- *
- * @mr: the memory region being queried.
- * @addr: the address (relative to the start of the region) being queried.
- * @size: the size of the range being queried.
- * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
- * %DIRTY_MEMORY_VGA.
- */
-bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
- hwaddr size, unsigned client);
-
/**
* memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
*
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size);
-/**
- * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
- * for a specified client. It clears them.
- *
- * Checks whether a range of bytes has been written to since the last
- * call to memory_region_reset_dirty() with the same @client. Dirty logging
- * must be enabled.
- *
- * @mr: the memory region being queried.
- * @addr: the address (relative to the start of the region) being queried.
- * @size: the size of the range being queried.
- * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
- * %DIRTY_MEMORY_VGA.
- */
-bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
- hwaddr size, unsigned client);
-
/**
* memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
* bitmap and clear it.
*
* Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
* returns the snapshot. The snapshot can then be used to query dirty
- * status, using memory_region_snapshot_get_dirty. Unlike
- * memory_region_test_and_clear_dirty this allows to query the same
- * page multiple times, which is especially useful for display updates
- * where the scanlines often are not page aligned.
+ * status, using memory_region_snapshot_get_dirty. Snapshotting allows
+ * querying the same page multiple times, which is especially useful for
+ * display updates where the scanlines often are not page aligned.
*
* The dirty bitmap region which gets copyed into the snapshot (and
* cleared afterwards) can be larger than requested. The boundaries
DirtyBitmapSnapshot *snap,
hwaddr addr, hwaddr size);
-/**
- * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
- * any external TLBs (e.g. kvm)
- *
- * Flushes dirty information from accelerators such as kvm and vhost-net
- * and makes it available to users of the memory API.
- *
- * @mr: the region being flushed.
- */
-void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
-
/**
* memory_region_reset_dirty: Mark a range of pages as clean, for a specified
* client.
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size, unsigned client);
+/**
+ * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
+ * TBs (for self-modifying code).
+ *
+ * The MemoryRegionOps->write() callback of a ROM device must use this function
+ * to mark byte ranges that have been modified internally, such as by directly
+ * accessing the memory returned by memory_region_get_ram_ptr().
+ *
+ * This function marks the range dirty and invalidates TBs so that TCG can
+ * detect self-modifying code.
+ *
+ * @mr: the region being flushed.
+ * @addr: the start, relative to the start of the region, of the range being
+ * flushed.
+ * @size: the size, in bytes, of the range being flushed.
+ */
+void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
+
/**
* memory_region_set_readonly: Turn a memory region read-only (or read-write)
*
*/
void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
+/**
+ * memory_region_set_nonvolatile: Turn a memory region non-volatile
+ *
+ * Allows a memory region to be marked as non-volatile.
+ * only useful on RAM regions.
+ *
+ * @mr: the region being updated.
+ * @nonvolatile: whether rhe region is to be non-volatile.
+ */
+void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
+
/**
* memory_region_rom_device_set_romd: enable/disable ROMD mode
*
*/
void memory_region_clear_flush_coalesced(MemoryRegion *mr);
-/**
- * memory_region_set_global_locking: Declares the access processing requires
- * QEMU's global lock.
- *
- * When this is invoked, accesses to the memory region will be processed while
- * holding the global lock of QEMU. This is the default behavior of memory
- * regions.
- *
- * @mr: the memory region to be updated.
- */
-void memory_region_set_global_locking(MemoryRegion *mr);
-
/**
* memory_region_clear_global_locking: Declares that access processing does
* not depend on the QEMU global lock.
* @size: the size of the access to trigger the eventfd
* @match_data: whether to match against @data, instead of just @addr
* @data: the data to match against the guest write
- * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ * @e: event notifier to be triggered when @addr, @size, and @data all match.
**/
void memory_region_add_eventfd(MemoryRegion *mr,
hwaddr addr,
* @size: the size of the access to trigger the eventfd
* @match_data: whether to match against @data, instead of just @addr
* @data: the data to match against the guest write
- * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ * @e: event notifier to be triggered when @addr, @size, and @data all match.
*/
void memory_region_del_eventfd(MemoryRegion *mr,
hwaddr addr,
*/
void memory_global_dirty_log_stop(void);
-void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
- bool dispatch_tree);
-
-/**
- * memory_region_request_mmio_ptr: request a pointer to an mmio
- * MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer.
- * When the device wants to invalidate the pointer it will call
- * memory_region_invalidate_mmio_ptr.
- *
- * @mr: #MemoryRegion to check
- * @addr: address within that region
- *
- * Returns true on success, false otherwise.
- */
-bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr);
-
-/**
- * memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio
- * previously requested.
- * In the end that means that if something wants to execute from this area it
- * will need to request the pointer again.
- *
- * @mr: #MemoryRegion associated to the pointer.
- * @addr: address within that region
- * @size: size of that area.
- */
-void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
- unsigned size);
+void mtree_info(bool flatview, bool dispatch_tree, bool owner);
/**
* memory_region_dispatch_read: perform a read directly to the specified
*/
void address_space_destroy(AddressSpace *as);
+/**
+ * address_space_remove_listeners: unregister all listeners of an address space
+ *
+ * Removes all callbacks previously registered with memory_listener_register()
+ * for @as.
+ *
+ * @as: an initialized #AddressSpace
+ */
+void address_space_remove_listeners(AddressSpace *as);
+
/**
* address_space_rw: read from or write to an address space.
*
* @addr: address within that address space
* @attrs: memory transaction attributes
* @buf: buffer with the data transferred
+ * @len: the number of bytes to read or write
* @is_write: indicates the transfer direction
*/
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs, uint8_t *buf,
- int len, bool is_write);
+ hwaddr len, bool is_write);
/**
* address_space_write: write to address space.
* @addr: address within that address space
* @attrs: memory transaction attributes
* @buf: buffer with the data transferred
+ * @len: the number of bytes to write
*/
MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
- const uint8_t *buf, int len);
+ const uint8_t *buf, hwaddr len);
+
+/**
+ * address_space_write_rom: write to address space, including ROM.
+ *
+ * This function writes to the specified address space, but will
+ * write data to both ROM and RAM. This is used for non-guest
+ * writes like writes from the gdb debug stub or initial loading
+ * of ROM contents.
+ *
+ * Note that portions of the write which attempt to write data to
+ * a device will be silently ignored -- only real RAM and ROM will
+ * be written to.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ * @len: the number of bytes to write
+ */
+MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs,
+ const uint8_t *buf, hwaddr len);
/* address_space_ld*: load from an address space
* address_space_st*: store to an address space
* @result: location to write the success/failure of the transaction;
* if NULL, this information is discarded
*/
-uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result);
-
-uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
-uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
-uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
-uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
-uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
-uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
-uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
-void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
-void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
-void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
-void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
-void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
-void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
-void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
+
+#define SUFFIX
+#define ARG1 as
+#define ARG1_DECL AddressSpace *as
+#include "exec/memory_ldst.inc.h"
+
+#define SUFFIX
+#define ARG1 as
+#define ARG1_DECL AddressSpace *as
+#include "exec/memory_ldst_phys.inc.h"
struct MemoryRegionCache {
+ void *ptr;
hwaddr xlat;
hwaddr len;
- AddressSpace *as;
+ FlatView *fv;
+ MemoryRegionSection mrs;
+ bool is_write;
};
-#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL })
+#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
+
+
+/* address_space_ld*_cached: load from a cached #MemoryRegion
+ * address_space_st*_cached: store into a cached #MemoryRegion
+ *
+ * These functions perform a load or store of the byte, word,
+ * longword or quad to the specified address. The address is
+ * a physical address in the AddressSpace, but it must lie within
+ * a #MemoryRegion that was mapped with address_space_cache_init.
+ *
+ * The _le suffixed functions treat the data as little endian;
+ * _be indicates big endian; no suffix indicates "same endianness
+ * as guest CPU".
+ *
+ * The "guest CPU endianness" accessors are deprecated for use outside
+ * target-* code; devices should be CPU-agnostic and use either the LE
+ * or the BE accessors.
+ *
+ * @cache: previously initialized #MemoryRegionCache to be accessed
+ * @addr: address within the address space
+ * @val: data value, for stores
+ * @attrs: memory transaction attributes
+ * @result: location to write the success/failure of the transaction;
+ * if NULL, this information is discarded
+ */
+
+#define SUFFIX _cached_slow
+#define ARG1 cache
+#define ARG1_DECL MemoryRegionCache *cache
+#include "exec/memory_ldst.inc.h"
+
+/* Inline fast path for direct RAM access. */
+static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len);
+ if (likely(cache->ptr)) {
+ return ldub_p(cache->ptr + addr);
+ } else {
+ return address_space_ldub_cached_slow(cache, addr, attrs, result);
+ }
+}
+
+static inline void address_space_stb_cached(MemoryRegionCache *cache,
+ hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len);
+ if (likely(cache->ptr)) {
+ stb_p(cache->ptr + addr, val);
+ } else {
+ address_space_stb_cached_slow(cache, addr, val, attrs, result);
+ }
+}
+
+#define ENDIANNESS _le
+#include "exec/memory_ldst_cached.inc.h"
+
+#define ENDIANNESS _be
+#include "exec/memory_ldst_cached.inc.h"
+
+#define SUFFIX _cached
+#define ARG1 cache
+#define ARG1_DECL MemoryRegionCache *cache
+#include "exec/memory_ldst_phys.inc.h"
/* address_space_cache_init: prepare for repeated access to a physical
* memory region
*/
void address_space_cache_destroy(MemoryRegionCache *cache);
-/* address_space_ld*_cached: load from a cached #MemoryRegion
- * address_space_st*_cached: store into a cached #MemoryRegion
- *
- * These functions perform a load or store of the byte, word,
- * longword or quad to the specified address. The address is
- * a physical address in the AddressSpace, but it must lie within
- * a #MemoryRegion that was mapped with address_space_cache_init.
- *
- * The _le suffixed functions treat the data as little endian;
- * _be indicates big endian; no suffix indicates "same endianness
- * as guest CPU".
- *
- * The "guest CPU endianness" accessors are deprecated for use outside
- * target-* code; devices should be CPU-agnostic and use either the LE
- * or the BE accessors.
- *
- * @cache: previously initialized #MemoryRegionCache to be accessed
- * @addr: address within the address space
- * @val: data value, for stores
- * @attrs: memory transaction attributes
- * @result: location to write the success/failure of the transaction;
- * if NULL, this information is discarded
- */
-uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result);
-
-uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
-void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
-void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
-void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
-void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
-void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
-void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
/* address_space_get_iotlb_entry: translate an address into an IOTLB
* entry. Should be called from an RCU critical section.
*/
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
- bool is_write);
+ bool is_write, MemTxAttrs attrs);
/* address_space_translate: translate an address range into an address space
* into a MemoryRegion and an address range into that section. Should be
* called from an RCU critical section, to avoid that the last reference
* to the returned region disappears after address_space_translate returns.
*
- * @as: #AddressSpace to be accessed
+ * @fv: #FlatView to be accessed
* @addr: address within that address space
* @xlat: pointer to address within the returned memory region section's
* #MemoryRegion.
* @len: pointer to length
* @is_write: indicates the transfer direction
+ * @attrs: memory attributes
*/
MemoryRegion *flatview_translate(FlatView *fv,
hwaddr addr, hwaddr *xlat,
- hwaddr *len, bool is_write);
+ hwaddr *len, bool is_write,
+ MemTxAttrs attrs);
static inline MemoryRegion *address_space_translate(AddressSpace *as,
hwaddr addr, hwaddr *xlat,
- hwaddr *len, bool is_write)
+ hwaddr *len, bool is_write,
+ MemTxAttrs attrs)
{
return flatview_translate(address_space_to_flatview(as),
- addr, xlat, len, is_write);
+ addr, xlat, len, is_write, attrs);
}
/* address_space_access_valid: check for validity of accessing an address
* @addr: address within that address space
* @len: length of the area to be checked
* @is_write: indicates the transfer direction
+ * @attrs: memory attributes
*/
-bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
+bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
+ bool is_write, MemTxAttrs attrs);
/* address_space_map: map a physical memory region into a host virtual address
*
* @addr: address within that address space
* @plen: pointer to length of buffer; updated on return
* @is_write: indicates the transfer direction
+ * @attrs: memory attributes
*/
void *address_space_map(AddressSpace *as, hwaddr addr,
- hwaddr *plen, bool is_write);
+ hwaddr *plen, bool is_write, MemTxAttrs attrs);
/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
*
* the amount of memory that was actually read or written by the caller.
*
* @as: #AddressSpace used
- * @addr: address within that address space
+ * @buffer: host pointer as returned by address_space_map()
* @len: buffer length as returned by address_space_map()
* @access_len: amount of data actually transferred
* @is_write: indicates the transfer direction
/* Internal functions, part of the implementation of address_space_read. */
+MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf, hwaddr len);
MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
MemTxAttrs attrs, uint8_t *buf,
- int len, hwaddr addr1, hwaddr l,
+ hwaddr len, hwaddr addr1, hwaddr l,
MemoryRegion *mr);
-
-MemTxResult flatview_read_full(FlatView *fv, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf, int len);
void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
+/* Internal functions, part of the implementation of address_space_read_cached
+ * and address_space_write_cached. */
+void address_space_read_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, void *buf, hwaddr len);
+void address_space_write_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, const void *buf, hwaddr len);
+
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
{
if (is_write) {
*
* Return a MemTxResult indicating whether the operation succeeded
* or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault).
+ * IOMMU fault). Called within RCU critical section.
*
* @as: #AddressSpace to be accessed
* @addr: address within that address space
* @buf: buffer with the data transferred
*/
static inline __attribute__((__always_inline__))
-MemTxResult flatview_read(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
- uint8_t *buf, int len)
+MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf,
+ hwaddr len)
{
MemTxResult result = MEMTX_OK;
hwaddr l, addr1;
void *ptr;
MemoryRegion *mr;
+ FlatView *fv;
if (__builtin_constant_p(len)) {
if (len) {
rcu_read_lock();
+ fv = address_space_to_flatview(as);
l = len;
- mr = flatview_translate(fv, addr, &addr1, &l, false);
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
if (len == l && memory_access_is_direct(mr, false)) {
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
memcpy(buf, ptr, len);
rcu_read_unlock();
}
} else {
- result = flatview_read_full(fv, addr, attrs, buf, len);
+ result = address_space_read_full(as, addr, attrs, buf, len);
}
return result;
}
-static inline MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, uint8_t *buf,
- int len)
-{
- return flatview_read(address_space_to_flatview(as), addr, attrs, buf, len);
-}
-
/**
* address_space_read_cached: read from a cached RAM region
*
*/
static inline void
address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
- void *buf, int len)
+ void *buf, hwaddr len)
{
assert(addr < cache->len && len <= cache->len - addr);
- address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
+ if (likely(cache->ptr)) {
+ memcpy(buf, cache->ptr + addr, len);
+ } else {
+ address_space_read_cached_slow(cache, addr, buf, len);
+ }
}
/**
*/
static inline void
address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
- void *buf, int len)
+ void *buf, hwaddr len)
{
assert(addr < cache->len && len <= cache->len - addr);
- address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
+ if (likely(cache->ptr)) {
+ memcpy(cache->ptr + addr, buf, len);
+ } else {
+ address_space_write_cached_slow(cache, addr, buf, len);
+ }
}
#endif