//#define DEBUG_UNASSIGNED
-#define RAM_ADDR_INVALID (~(ram_addr_t)0)
-
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
static bool ioeventfd_update_pending;
/* No need to ref/unref .mr, the FlatRange keeps it alive. */
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
- MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
- .mr = (fr)->mr, \
- .address_space = (as), \
- .offset_within_region = (fr)->offset_in_region, \
- .size = (fr)->addr.size, \
- .offset_within_address_space = int128_get64((fr)->addr.start), \
- .readonly = (fr)->readonly, \
- }), ##_args)
+ do { \
+ MemoryRegionSection mrs = section_from_flat_range(fr, as); \
+ MEMORY_LISTENER_CALL(callback, dir, &mrs, ##_args); \
+ } while(0)
struct CoalescedMemoryRange {
AddrRange addr;
hwaddr offset_in_region;
AddrRange addr;
uint8_t dirty_log_mask;
+ bool romd_mode;
bool readonly;
};
#define FOR_EACH_FLAT_RANGE(var, view) \
for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
+static inline MemoryRegionSection
+section_from_flat_range(FlatRange *fr, AddressSpace *as)
+{
+ return (MemoryRegionSection) {
+ .mr = fr->mr,
+ .address_space = as,
+ .offset_within_region = fr->offset_in_region,
+ .size = fr->addr.size,
+ .offset_within_address_space = int128_get64(fr->addr.start),
+ .readonly = fr->readonly,
+ };
+}
+
static bool flatrange_equal(FlatRange *a, FlatRange *b)
{
return a->mr == b->mr
&& addrrange_equal(a->addr, b->addr)
&& a->offset_in_region == b->offset_in_region
+ && a->romd_mode == b->romd_mode
&& a->readonly == b->readonly;
}
r1->addr.size),
int128_make64(r2->offset_in_region))
&& r1->dirty_log_mask == r2->dirty_log_mask
+ && r1->romd_mode == r2->romd_mode
&& r1->readonly == r2->readonly;
}
fr.mr = mr;
fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
+ fr.romd_mode = mr->romd_mode;
fr.readonly = readonly;
/* Render the region itself into any gaps left by the current view. */
qemu_ram_free(mr->ram_block);
}
-static void memory_region_destructor_rom_device(MemoryRegion *mr)
-{
- qemu_ram_free(mr->ram_block);
-}
-
static bool memory_region_need_escape(char c)
{
return c == '/' || c == '[' || c == '\\' || c == ']';
mr->alias_offset = offset;
}
+void memory_region_init_rom(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp)
+{
+ memory_region_init(mr, owner, name, size);
+ mr->ram = true;
+ mr->readonly = true;
+ mr->terminates = true;
+ mr->destructor = memory_region_destructor_ram;
+ mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+}
+
void memory_region_init_rom_device(MemoryRegion *mr,
Object *owner,
const MemoryRegionOps *ops,
uint64_t size,
Error **errp)
{
+ assert(ops);
memory_region_init(mr, owner, name, size);
mr->ops = ops;
mr->opaque = opaque;
mr->terminates = true;
mr->rom_device = true;
- mr->destructor = memory_region_destructor_rom_device;
+ mr->destructor = memory_region_destructor_ram;
mr->ram_block = qemu_ram_alloc(size, mr, errp);
}
memory_region_init(mr, owner, name, size);
mr->iommu_ops = ops,
mr->terminates = true; /* then re-forwards */
- notifier_list_init(&mr->iommu_notify);
+ QLIST_INIT(&mr->iommu_notify);
+ mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
}
static void memory_region_finalize(Object *obj)
return memory_region_get_dirty_log_mask(mr) & (1 << client);
}
-void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
+static void memory_region_update_iommu_notify_flags(MemoryRegion *mr)
{
- notifier_list_add(&mr->iommu_notify, n);
+ IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
+ IOMMUNotifier *iommu_notifier;
+
+ QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
+ flags |= iommu_notifier->notifier_flags;
+ }
+
+ if (flags != mr->iommu_notify_flags &&
+ mr->iommu_ops->notify_flag_changed) {
+ mr->iommu_ops->notify_flag_changed(mr, mr->iommu_notify_flags,
+ flags);
+ }
+
+ mr->iommu_notify_flags = flags;
}
-void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n,
- hwaddr granularity, bool is_write)
+void memory_region_register_iommu_notifier(MemoryRegion *mr,
+ IOMMUNotifier *n)
{
- hwaddr addr;
+ /* We need to register for at least one bitfield */
+ assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
+ QLIST_INSERT_HEAD(&mr->iommu_notify, n, node);
+ memory_region_update_iommu_notify_flags(mr);
+}
+
+uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
+{
+ assert(memory_region_is_iommu(mr));
+ if (mr->iommu_ops && mr->iommu_ops->get_min_page_size) {
+ return mr->iommu_ops->get_min_page_size(mr);
+ }
+ return TARGET_PAGE_SIZE;
+}
+
+void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
+ bool is_write)
+{
+ hwaddr addr, granularity;
IOMMUTLBEntry iotlb;
+ granularity = memory_region_iommu_get_min_page_size(mr);
+
for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
iotlb = mr->iommu_ops->translate(mr, addr, is_write);
if (iotlb.perm != IOMMU_NONE) {
}
}
-void memory_region_unregister_iommu_notifier(Notifier *n)
+void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
+ IOMMUNotifier *n)
{
- notifier_remove(n);
+ QLIST_REMOVE(n, node);
+ memory_region_update_iommu_notify_flags(mr);
}
void memory_region_notify_iommu(MemoryRegion *mr,
IOMMUTLBEntry entry)
{
+ IOMMUNotifier *iommu_notifier;
+ IOMMUNotifierFlag request_flags;
+
assert(memory_region_is_iommu(mr));
- notifier_list_notify(&mr->iommu_notify, &entry);
+
+ if (entry.perm & IOMMU_RW) {
+ request_flags = IOMMU_NOTIFIER_MAP;
+ } else {
+ request_flags = IOMMU_NOTIFIER_UNMAP;
+ }
+
+ QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
+ if (iommu_notifier->notifier_flags & request_flags) {
+ iommu_notifier->notify(iommu_notifier, &entry);
+ }
+ }
}
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
int memory_region_get_fd(MemoryRegion *mr)
{
- if (mr->alias) {
- return memory_region_get_fd(mr->alias);
+ int fd;
+
+ rcu_read_lock();
+ while (mr->alias) {
+ mr = mr->alias;
}
+ fd = mr->ram_block->fd;
+ rcu_read_unlock();
- assert(mr->ram_block);
+ return fd;
+}
- return qemu_get_ram_fd(memory_region_get_ram_addr(mr));
+void memory_region_set_fd(MemoryRegion *mr, int fd)
+{
+ rcu_read_lock();
+ while (mr->alias) {
+ mr = mr->alias;
+ }
+ mr->ram_block->fd = fd;
+ rcu_read_unlock();
}
void *memory_region_get_ram_ptr(MemoryRegion *mr)
mr = mr->alias;
}
assert(mr->ram_block);
- ptr = qemu_get_ram_ptr(mr->ram_block, memory_region_get_ram_addr(mr));
+ ptr = qemu_map_ram_ptr(mr->ram_block, offset);
rcu_read_unlock();
- return ptr + offset;
+ return ptr;
+}
+
+MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
+{
+ RAMBlock *block;
+
+ block = qemu_ram_block_from_host(ptr, false, offset);
+ if (!block) {
+ return NULL;
+ }
+
+ return block->mr;
}
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
return mr && mr != container;
}
-void address_space_sync_dirty_bitmap(AddressSpace *as)
+void memory_global_dirty_log_sync(void)
{
+ MemoryListener *listener;
+ AddressSpace *as;
FlatView *view;
FlatRange *fr;
- view = address_space_get_flatview(as);
- FOR_EACH_FLAT_RANGE(fr, view) {
- MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
+ QTAILQ_FOREACH(listener, &memory_listeners, link) {
+ if (!listener->log_sync) {
+ continue;
+ }
+ /* Global listeners are being phased out. */
+ assert(listener->address_space_filter);
+ as = listener->address_space_filter;
+ view = address_space_get_flatview(as);
+ FOR_EACH_FLAT_RANGE(fr, view) {
+ MemoryRegionSection mrs = section_from_flat_range(fr, as);
+ listener->log_sync(listener, &mrs);
+ }
+ flatview_unref(view);
}
- flatview_unref(view);
}
void memory_global_dirty_log_start(void)