X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=exec.c;h=bca441f7fd358bfbf003b01f2095db512be3de4f;hb=800d4deda04be016a95fbbf397c830a2d14ff9f6;hp=c930040f839c74d2ffbff46aeedc86e37e1319ac;hpb=a028edeaa6f1c154f06e16440e46b0f876a64077;p=mirror_qemu.git diff --git a/exec.c b/exec.c index c930040f83..bca441f7fd 100644 --- a/exec.c +++ b/exec.c @@ -50,7 +50,7 @@ #include "sysemu/hw_accel.h" #include "exec/address-spaces.h" #include "sysemu/xen-mapcache.h" -#include "trace-root.h" +#include "trace/trace-root.h" #ifdef CONFIG_FALLOCATE_PUNCH_HOLE #include @@ -77,6 +77,10 @@ #include "monitor/monitor.h" +#ifdef CONFIG_LIBDAXCTL +#include +#endif + //#define DEBUG_SUBPAGE #if !defined(CONFIG_USER_ONLY) @@ -94,20 +98,10 @@ AddressSpace address_space_memory; static MemoryRegion io_mem_unassigned; #endif -CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); - -/* current CPU in the current thread. It is only valid inside - cpu_exec() */ -__thread CPUState *current_cpu; - uintptr_t qemu_host_page_size; intptr_t qemu_host_page_mask; #if !defined(CONFIG_USER_ONLY) -/* 0 = Do not count executed instructions. - 1 = Precise instruction counting. - 2 = Adaptive rate instruction counting. */ -int use_icount; typedef struct PhysPageEntry PhysPageEntry; @@ -355,13 +349,13 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, hwaddr addr, bool resolve_subpage) { - MemoryRegionSection *section = atomic_read(&d->mru_section); + MemoryRegionSection *section = qatomic_read(&d->mru_section); subpage_t *subpage; if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || !section_covers_addr(section, addr)) { section = phys_page_find(d, addr); - atomic_set(&d->mru_section, section); + qatomic_set(&d->mru_section, section); } if (resolve_subpage && section->mr->subpage) { subpage = container_of(section->mr, subpage_t, iomem); @@ -629,8 +623,7 @@ static void tcg_register_iommu_notifier(CPUState *cpu, */ MemoryRegion *mr = MEMORY_REGION(iommu_mr); TCGIOMMUNotifier *notifier; - Error *err = NULL; - int i, ret; + int i; for (i = 0; i < cpu->iommu_notifiers->len; i++) { notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i); @@ -659,12 +652,8 @@ static void tcg_register_iommu_notifier(CPUState *cpu, 0, HWADDR_MAX, iommu_idx); - ret = memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, - &err); - if (ret) { - error_report_err(err); - exit(1); - } + memory_region_register_iommu_notifier(notifier->mr, ¬ifier->n, + &error_fatal); } if (!notifier->active) { @@ -697,7 +686,8 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, IOMMUMemoryRegionClass *imrc; IOMMUTLBEntry iotlb; int iommu_idx; - AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); + AddressSpaceDispatch *d = + qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); for (;;) { section = address_space_translate_internal(d, addr, &addr, plen, false); @@ -828,22 +818,6 @@ const VMStateDescription vmstate_cpu_common = { } }; -#endif - -CPUState *qemu_get_cpu(int index) -{ - CPUState *cpu; - - CPU_FOREACH(cpu) { - if (cpu->cpu_index == index) { - return cpu; - } - } - - return NULL; -} - -#if !defined(CONFIG_USER_ONLY) void cpu_address_space_init(CPUState *cpu, int asidx, const char *prefix, MemoryRegion *mr) { @@ -892,6 +866,7 @@ void cpu_exec_unrealizefn(CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); + tlb_destroy(cpu); cpu_list_remove(cpu); if (cc->vmsd != NULL) { @@ -916,6 +891,7 @@ Property cpu_common_props[] = { DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, MemoryRegion *), #endif + DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false), DEFINE_PROP_END_OF_LIST(), }; @@ -946,7 +922,9 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp) qemu_plugin_vcpu_init_hook(cpu); -#ifndef CONFIG_USER_ONLY +#ifdef CONFIG_USER_ONLY + assert(cc->vmsd == NULL); +#else /* !CONFIG_USER_ONLY */ if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); } @@ -1036,6 +1014,7 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags, CPUWatchpoint **watchpoint) { CPUWatchpoint *wp; + vaddr in_page; /* forbid ranges which are empty or run off the end of the address space */ if (len == 0 || (addr + len - 1) < addr) { @@ -1056,7 +1035,12 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); } - tlb_flush_page(cpu, addr); + in_page = -(addr | TARGET_PAGE_MASK); + if (len <= in_page) { + tlb_flush_page(cpu, addr); + } else { + tlb_flush(cpu); + } if (watchpoint) *watchpoint = wp; @@ -1127,7 +1111,7 @@ int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len) int ret = 0; QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { - if (watchpoint_address_matches(wp, addr, TARGET_PAGE_SIZE)) { + if (watchpoint_address_matches(wp, addr, len)) { ret |= wp->flags; } } @@ -1255,7 +1239,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr) { RAMBlock *block; - block = atomic_rcu_read(&ram_list.mru_block); + block = qatomic_rcu_read(&ram_list.mru_block); if (block && addr - block->offset < block->max_length) { return block; } @@ -1281,7 +1265,7 @@ found: * call_rcu(reclaim_ramblock, xxx); * rcu_read_unlock() * - * atomic_rcu_set is not needed here. The block was already published + * qatomic_rcu_set is not needed here. The block was already published * when it was placed into the list. Here we're just making an extra * copy of the pointer. */ @@ -1315,7 +1299,7 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, unsigned client) { DirtyMemoryBlocks *blocks; - unsigned long end, page; + unsigned long end, page, start_page; bool dirty = false; RAMBlock *ramblock; uint64_t mr_offset, mr_size; @@ -1325,10 +1309,11 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, } end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; - page = start >> TARGET_PAGE_BITS; + start_page = start >> TARGET_PAGE_BITS; + page = start_page; WITH_RCU_READ_LOCK_GUARD() { - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); ramblock = qemu_get_ram_block(start); /* Range sanity check on the ramblock */ assert(start >= ramblock->offset && @@ -1345,8 +1330,8 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, page += num; } - mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset; - mr_size = (end - page) << TARGET_PAGE_BITS; + mr_offset = (ram_addr_t)(start_page << TARGET_PAGE_BITS) - ramblock->offset; + mr_size = (end - start_page) << TARGET_PAGE_BITS; memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size); } @@ -1378,7 +1363,7 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty dest = 0; WITH_RCU_READ_LOCK_GUARD() { - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); while (page < end) { unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; @@ -1668,59 +1653,18 @@ static int find_max_backend_pagesize(Object *obj, void *opaque) long qemu_minrampagesize(void) { long hpsize = LONG_MAX; - long mainrampagesize; - Object *memdev_root; - MachineState *ms = MACHINE(qdev_get_machine()); - - mainrampagesize = qemu_mempath_getpagesize(mem_path); - - /* it's possible we have memory-backend objects with - * hugepage-backed RAM. these may get mapped into system - * address space via -numa parameters or memory hotplug - * hooks. we want to take these into account, but we - * also want to make sure these supported hugepage - * sizes are applicable across the entire range of memory - * we may boot from, so we take the min across all - * backends, and assume normal pages in cases where a - * backend isn't backed by hugepages. - */ - memdev_root = object_resolve_path("/objects", NULL); - if (memdev_root) { - object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); - } - if (hpsize == LONG_MAX) { - /* No additional memory regions found ==> Report main RAM page size */ - return mainrampagesize; - } - - /* If NUMA is disabled or the NUMA nodes are not backed with a - * memory-backend, then there is at least one node using "normal" RAM, - * so if its page size is smaller we have got to report that size instead. - */ - if (hpsize > mainrampagesize && - (ms->numa_state == NULL || - ms->numa_state->num_nodes == 0 || - ms->numa_state->nodes[0].node_memdev == NULL)) { - static bool warned; - if (!warned) { - error_report("Huge page support disabled (n/a for main memory)."); - warned = true; - } - return mainrampagesize; - } + Object *memdev_root = object_resolve_path("/objects", NULL); + object_child_foreach(memdev_root, find_min_backend_pagesize, &hpsize); return hpsize; } long qemu_maxrampagesize(void) { - long pagesize = qemu_mempath_getpagesize(mem_path); + long pagesize = 0; Object *memdev_root = object_resolve_path("/objects", NULL); - if (memdev_root) { - object_child_foreach(memdev_root, find_max_backend_pagesize, - &pagesize); - } + object_child_foreach(memdev_root, find_max_backend_pagesize, &pagesize); return pagesize; } #else @@ -1776,6 +1720,46 @@ static int64_t get_file_size(int fd) return size; } +static int64_t get_file_align(int fd) +{ + int64_t align = -1; +#if defined(__linux__) && defined(CONFIG_LIBDAXCTL) + struct stat st; + + if (fstat(fd, &st) < 0) { + return -errno; + } + + /* Special handling for devdax character devices */ + if (S_ISCHR(st.st_mode)) { + g_autofree char *path = NULL; + g_autofree char *rpath = NULL; + struct daxctl_ctx *ctx; + struct daxctl_region *region; + int rc = 0; + + path = g_strdup_printf("/sys/dev/char/%d:%d", + major(st.st_rdev), minor(st.st_rdev)); + rpath = realpath(path, NULL); + + rc = daxctl_new(&ctx); + if (rc) { + return -1; + } + + daxctl_region_foreach(ctx, region) { + if (strstr(rpath, daxctl_region_get_path(region))) { + align = daxctl_region_get_align(region); + break; + } + } + daxctl_unref(ctx); + } +#endif /* defined(__linux__) && defined(CONFIG_LIBDAXCTL) */ + + return align; +} + static int file_ram_open(const char *path, const char *region_name, bool *created, @@ -1843,8 +1827,6 @@ static void *file_ram_alloc(RAMBlock *block, bool truncate, Error **errp) { - Error *err = NULL; - MachineState *ms = MACHINE(qdev_get_machine()); void *area; block->page_size = qemu_fd_getpagesize(fd); @@ -1900,15 +1882,6 @@ static void *file_ram_alloc(RAMBlock *block, return NULL; } - if (mem_prealloc) { - os_mem_prealloc(fd, area, memory, ms->smp.cpus, &err); - if (err) { - error_propagate(errp, err); - qemu_ram_munmap(fd, area, memory); - return NULL; - } - } - block->fd = fd; return area; } @@ -2125,11 +2098,23 @@ static int memory_try_enable_merging(void *addr, size_t len) */ int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) { + const ram_addr_t unaligned_size = newsize; + assert(block); newsize = HOST_PAGE_ALIGN(newsize); if (block->used_length == newsize) { + /* + * We don't have to resize the ram block (which only knows aligned + * sizes), however, we have to notify if the unaligned size changed. + */ + if (unaligned_size != memory_region_size(block->mr)) { + memory_region_set_size(block->mr, unaligned_size); + if (block->resized) { + block->resized(block->idstr, unaligned_size, block->host); + } + } return 0; } @@ -2153,9 +2138,9 @@ int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) block->used_length = newsize; cpu_physical_memory_set_dirty_range(block->offset, block->used_length, DIRTY_CLIENTS_ALL); - memory_region_set_size(block->mr, newsize); + memory_region_set_size(block->mr, unaligned_size); if (block->resized) { - block->resized(block->idstr, newsize, block->host); + block->resized(block->idstr, unaligned_size, block->host); } return 0; } @@ -2166,16 +2151,15 @@ int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) * Otherwise no-op. * @Note: this is supposed to be a synchronous op. */ -void qemu_ram_writeback(RAMBlock *block, ram_addr_t start, ram_addr_t length) +void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) { - void *addr = ramblock_ptr(block, start); - /* The requested range should fit in within the block range */ g_assert((start + length) <= block->used_length); #ifdef CONFIG_LIBPMEM /* The lack of support for pmem should not block the sync */ if (ramblock_is_pmem(block)) { + void *addr = ramblock_ptr(block, start); pmem_persist(addr, length); return; } @@ -2186,6 +2170,7 @@ void qemu_ram_writeback(RAMBlock *block, ram_addr_t start, ram_addr_t length) * specified as persistent (or is not one) - use the msync. * Less optimal but still achieves the same goal */ + void *addr = ramblock_ptr(block, start); if (qemu_msync(addr, length, block->fd)) { warn_report("%s: failed to sync memory range: start: " RAM_ADDR_FMT " length: " RAM_ADDR_FMT, @@ -2214,7 +2199,7 @@ static void dirty_memory_extend(ram_addr_t old_ram_size, DirtyMemoryBlocks *new_blocks; int j; - old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]); + old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]); new_blocks = g_malloc(sizeof(*new_blocks) + sizeof(new_blocks->blocks[0]) * new_num_blocks); @@ -2227,7 +2212,7 @@ static void dirty_memory_extend(ram_addr_t old_ram_size, new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); } - atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); + qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); if (old_blocks) { g_free_rcu(old_blocks, rcu); @@ -2326,7 +2311,7 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, { RAMBlock *new_block; Error *local_err = NULL; - int64_t file_size; + int64_t file_size, file_align; /* Just support these ram flags by now. */ assert((ram_flags & ~(RAM_SHARED | RAM_PMEM)) == 0); @@ -2356,9 +2341,17 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, size = HOST_PAGE_ALIGN(size); file_size = get_file_size(fd); if (file_size > 0 && file_size < size) { - error_setg(errp, "backing store %s size 0x%" PRIx64 + error_setg(errp, "backing store size 0x%" PRIx64 " does not match 'size' option 0x" RAM_ADDR_FMT, - mem_path, file_size, size); + file_size, size); + return NULL; + } + + file_align = get_file_align(fd); + if (file_align > 0 && mr && file_align > mr->align) { + error_setg(errp, "backing store align 0x%" PRIx64 + " is larger than 'align' option 0x%" PRIx64, + file_align, mr->align); return NULL; } @@ -2666,7 +2659,7 @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, } RCU_READ_LOCK_GUARD(); - block = atomic_rcu_read(&ram_list.mru_block); + block = qatomic_rcu_read(&ram_list.mru_block); if (block && block->host && host - block->host < block->max_length) { goto found; } @@ -2750,6 +2743,14 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { if (watchpoint_address_matches(wp, addr, len) && (wp->flags & flags)) { + if (replay_running_debug()) { + /* + * Don't process the watchpoints when we are + * in a reverse debugging operation. + */ + replay_breakpoint(); + return; + } if (flags == BP_MEM_READ) { wp->flags |= BP_WATCHPOINT_HIT_READ; } else { @@ -2788,9 +2789,9 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, } static MemTxResult flatview_read(FlatView *fv, hwaddr addr, - MemTxAttrs attrs, uint8_t *buf, hwaddr len); + MemTxAttrs attrs, void *buf, hwaddr len); static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, - const uint8_t *buf, hwaddr len); + const void *buf, hwaddr len); static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, bool is_write, MemTxAttrs attrs); @@ -2911,7 +2912,7 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu, { int asidx = cpu_asidx_from_attrs(cpu, attrs); CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; - AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch); + AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch); MemoryRegionSection *sections = d->map.sections; return §ions[index & ~TARGET_PAGE_MASK]; @@ -2995,7 +2996,7 @@ static void tcg_commit(MemoryListener *listener) * may have split the RCU critical section. */ d = address_space_to_dispatch(cpuas->as); - atomic_rcu_set(&cpuas->memory_dispatch, d); + qatomic_rcu_set(&cpuas->memory_dispatch, d); tlb_flush(cpuas->cpu); } @@ -3027,11 +3028,12 @@ MemoryRegion *get_system_io(void) /* physical memory access (slow version, mainly for debug) */ #if defined(CONFIG_USER_ONLY) int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, - uint8_t *buf, target_ulong len, int is_write) + void *ptr, target_ulong len, bool is_write) { int flags; target_ulong l, page; void * p; + uint8_t *buf = ptr; while (len > 0) { page = addr & TARGET_PAGE_MASK; @@ -3134,7 +3136,7 @@ static bool prepare_mmio_access(MemoryRegion *mr) bool unlocked = !qemu_mutex_iothread_locked(); bool release_lock = false; - if (unlocked && mr->global_locking) { + if (unlocked) { qemu_mutex_lock_iothread(); unlocked = false; release_lock = true; @@ -3155,14 +3157,15 @@ static bool prepare_mmio_access(MemoryRegion *mr) /* Called within RCU critical section. */ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, MemTxAttrs attrs, - const uint8_t *buf, + const void *ptr, hwaddr len, hwaddr addr1, hwaddr l, MemoryRegion *mr) { - uint8_t *ptr; + uint8_t *ram_ptr; uint64_t val; MemTxResult result = MEMTX_OK; bool release_lock = false; + const uint8_t *buf = ptr; for (;;) { if (!memory_access_is_direct(mr, true)) { @@ -3175,8 +3178,8 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, size_memop(l), attrs); } else { /* RAM case */ - ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); - memcpy(ptr, buf, l); + ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); + memcpy(ram_ptr, buf, l); invalidate_and_set_dirty(mr, addr1, l); } @@ -3202,7 +3205,7 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, /* Called from RCU critical section. */ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, - const uint8_t *buf, hwaddr len) + const void *buf, hwaddr len) { hwaddr l; hwaddr addr1; @@ -3219,14 +3222,15 @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, /* Called within RCU critical section. */ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, - MemTxAttrs attrs, uint8_t *buf, + MemTxAttrs attrs, void *ptr, hwaddr len, hwaddr addr1, hwaddr l, MemoryRegion *mr) { - uint8_t *ptr; + uint8_t *ram_ptr; uint64_t val; MemTxResult result = MEMTX_OK; bool release_lock = false; + uint8_t *buf = ptr; for (;;) { if (!memory_access_is_direct(mr, false)) { @@ -3238,8 +3242,8 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, stn_he_p(buf, l, val); } else { /* RAM case */ - ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); - memcpy(buf, ptr, l); + ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); + memcpy(buf, ram_ptr, l); } if (release_lock) { @@ -3264,7 +3268,7 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, /* Called from RCU critical section. */ static MemTxResult flatview_read(FlatView *fv, hwaddr addr, - MemTxAttrs attrs, uint8_t *buf, hwaddr len) + MemTxAttrs attrs, void *buf, hwaddr len) { hwaddr l; hwaddr addr1; @@ -3277,7 +3281,7 @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr, } MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, - MemTxAttrs attrs, uint8_t *buf, hwaddr len) + MemTxAttrs attrs, void *buf, hwaddr len) { MemTxResult result = MEMTX_OK; FlatView *fv; @@ -3293,7 +3297,7 @@ MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, - const uint8_t *buf, hwaddr len) + const void *buf, hwaddr len) { MemTxResult result = MEMTX_OK; FlatView *fv; @@ -3308,7 +3312,7 @@ MemTxResult address_space_write(AddressSpace *as, hwaddr addr, } MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, - uint8_t *buf, hwaddr len, bool is_write) + void *buf, hwaddr len, bool is_write) { if (is_write) { return address_space_write(as, addr, attrs, buf, len); @@ -3317,8 +3321,8 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, } } -void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, - hwaddr len, int is_write) +void cpu_physical_memory_rw(hwaddr addr, void *buf, + hwaddr len, bool is_write) { address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, buf, len, is_write); @@ -3332,14 +3336,15 @@ enum write_rom_type { static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, - const uint8_t *buf, + const void *ptr, hwaddr len, enum write_rom_type type) { hwaddr l; - uint8_t *ptr; + uint8_t *ram_ptr; hwaddr addr1; MemoryRegion *mr; + const uint8_t *buf = ptr; RCU_READ_LOCK_GUARD(); while (len > 0) { @@ -3351,14 +3356,14 @@ static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, l = memory_access_size(mr, l, addr1); } else { /* ROM/RAM case */ - ptr = qemu_map_ram_ptr(mr->ram_block, addr1); + ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); switch (type) { case WRITE_DATA: - memcpy(ptr, buf, l); + memcpy(ram_ptr, buf, l); invalidate_and_set_dirty(mr, addr1, l); break; case FLUSH_CACHE: - flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l); + flush_icache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr + l); break; } } @@ -3372,7 +3377,7 @@ static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, /* used for ROM loading : can write in RAM and ROM */ MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, - const uint8_t *buf, hwaddr len) + const void *buf, hwaddr len) { return address_space_write_rom_internal(as, addr, attrs, buf, len, WRITE_DATA); @@ -3438,7 +3443,7 @@ void cpu_register_map_client(QEMUBH *bh) qemu_mutex_lock(&map_client_list_lock); client->bh = bh; QLIST_INSERT_HEAD(&map_client_list, client, link); - if (!atomic_read(&bounce.in_use)) { + if (!qatomic_read(&bounce.in_use)) { cpu_notify_map_clients_locked(); } qemu_mutex_unlock(&map_client_list_lock); @@ -3572,7 +3577,8 @@ void *address_space_map(AddressSpace *as, mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); if (!memory_access_is_direct(mr, is_write)) { - if (atomic_xchg(&bounce.in_use, true)) { + if (qatomic_xchg(&bounce.in_use, true)) { + *plen = 0; return NULL; } /* Avoid unbounded allocations */ @@ -3602,11 +3608,11 @@ void *address_space_map(AddressSpace *as, } /* Unmaps a memory region previously mapped by address_space_map(). - * Will also mark the memory as dirty if is_write == 1. access_len gives + * Will also mark the memory as dirty if is_write is true. access_len gives * the amount of memory that was actually read or written by the caller. */ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, - int is_write, hwaddr access_len) + bool is_write, hwaddr access_len) { if (buffer != bounce.buffer) { MemoryRegion *mr; @@ -3630,20 +3636,20 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, qemu_vfree(bounce.buffer); bounce.buffer = NULL; memory_region_unref(bounce.mr); - atomic_mb_set(&bounce.in_use, false); + qatomic_mb_set(&bounce.in_use, false); cpu_notify_map_clients(); } void *cpu_physical_memory_map(hwaddr addr, hwaddr *plen, - int is_write) + bool is_write) { return address_space_map(&address_space_memory, addr, plen, is_write, MEMTXATTRS_UNSPECIFIED); } void cpu_physical_memory_unmap(void *buffer, hwaddr len, - int is_write, hwaddr access_len) + bool is_write, hwaddr access_len) { return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); } @@ -3654,7 +3660,7 @@ void cpu_physical_memory_unmap(void *buffer, hwaddr len, #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) #define RCU_READ_LOCK(...) rcu_read_lock() #define RCU_READ_UNLOCK(...) rcu_read_unlock() -#include "memory_ldst.inc.c" +#include "memory_ldst.c.inc" int64_t address_space_cache_init(MemoryRegionCache *cache, AddressSpace *as, @@ -3751,7 +3757,7 @@ static inline MemoryRegion *address_space_translate_cached( /* Called from RCU critical section. address_space_read_cached uses this * out of line function when the target is an MMIO or IOMMU region. */ -void +MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, void *buf, hwaddr len) { @@ -3761,15 +3767,15 @@ address_space_read_cached_slow(MemoryRegionCache *cache, hwaddr addr, l = len; mr = address_space_translate_cached(cache, addr, &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); - flatview_read_continue(cache->fv, - addr, MEMTXATTRS_UNSPECIFIED, buf, len, - addr1, l, mr); + return flatview_read_continue(cache->fv, + addr, MEMTXATTRS_UNSPECIFIED, buf, len, + addr1, l, mr); } /* Called from RCU critical section. address_space_write_cached uses this * out of line function when the target is an MMIO or IOMMU region. */ -void +MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, const void *buf, hwaddr len) { @@ -3779,9 +3785,9 @@ address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, l = len; mr = address_space_translate_cached(cache, addr, &addr1, &l, true, MEMTXATTRS_UNSPECIFIED); - flatview_write_continue(cache->fv, - addr, MEMTXATTRS_UNSPECIFIED, buf, len, - addr1, l, mr); + return flatview_write_continue(cache->fv, + addr, MEMTXATTRS_UNSPECIFIED, buf, len, + addr1, l, mr); } #define ARG1_DECL MemoryRegionCache *cache @@ -3790,19 +3796,21 @@ address_space_write_cached_slow(MemoryRegionCache *cache, hwaddr addr, #define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__) #define RCU_READ_LOCK() ((void)0) #define RCU_READ_UNLOCK() ((void)0) -#include "memory_ldst.inc.c" +#include "memory_ldst.c.inc" /* virtual memory access for debug (includes writing to ROM) */ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, - uint8_t *buf, target_ulong len, int is_write) + void *ptr, target_ulong len, bool is_write) { hwaddr phys_addr; target_ulong l, page; + uint8_t *buf = ptr; cpu_synchronize_state(cpu); while (len > 0) { int asidx; MemTxAttrs attrs; + MemTxResult res; page = addr & TARGET_PAGE_MASK; phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); @@ -3815,11 +3823,14 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, l = len; phys_addr += (addr & ~TARGET_PAGE_MASK); if (is_write) { - address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, - attrs, buf, l); + res = address_space_write_rom(cpu->cpu_ases[asidx].as, phys_addr, + attrs, buf, l); } else { - address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, - attrs, buf, l, 0); + res = address_space_read(cpu->cpu_ases[asidx].as, phys_addr, + attrs, buf, l); + } + if (res != MEMTX_OK) { + return -1; } len -= l; buf += l; @@ -4083,4 +4094,58 @@ void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) } } +/* + * If positive, discarding RAM is disabled. If negative, discarding RAM is + * required to work and cannot be disabled. + */ +static int ram_block_discard_disabled; + +int ram_block_discard_disable(bool state) +{ + int old; + + if (!state) { + qatomic_dec(&ram_block_discard_disabled); + return 0; + } + + do { + old = qatomic_read(&ram_block_discard_disabled); + if (old < 0) { + return -EBUSY; + } + } while (qatomic_cmpxchg(&ram_block_discard_disabled, + old, old + 1) != old); + return 0; +} + +int ram_block_discard_require(bool state) +{ + int old; + + if (!state) { + qatomic_inc(&ram_block_discard_disabled); + return 0; + } + + do { + old = qatomic_read(&ram_block_discard_disabled); + if (old > 0) { + return -EBUSY; + } + } while (qatomic_cmpxchg(&ram_block_discard_disabled, + old, old - 1) != old); + return 0; +} + +bool ram_block_discard_is_disabled(void) +{ + return qatomic_read(&ram_block_discard_disabled) > 0; +} + +bool ram_block_discard_is_required(void) +{ + return qatomic_read(&ram_block_discard_disabled) < 0; +} + #endif