X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=exec.c;h=95c4356c65a006e1f992e193bd19c45c9f593255;hb=8b7acc79b9adb4dda6cc867b90e3a1e873f4f7e8;hp=51c23692fb09f320943e151b70e81a226feea92a;hpb=e8f2f59aaf2978641b7e073ba623bd4b4a9e864d;p=qemu.git diff --git a/exec.c b/exec.c index 51c23692f..95c4356c6 100644 --- a/exec.c +++ b/exec.c @@ -129,7 +129,6 @@ static PhysPageMap next_map; static void io_mem_init(void); static void memory_map_init(void); -static void *qemu_safe_ram_ptr(ram_addr_t addr); static MemoryRegion io_mem_watch; #endif @@ -410,8 +409,10 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) #else static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) { - tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu, pc) | - (pc & ~TARGET_PAGE_MASK)); + hwaddr phys = cpu_get_phys_page_debug(cpu, pc); + if (phys != -1) { + tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK)); + } } #endif #endif /* TARGET_HAS_ICE */ @@ -625,55 +626,40 @@ void cpu_abort(CPUArchState *env, const char *fmt, ...) abort(); } -CPUArchState *cpu_copy(CPUArchState *env) +#if !defined(CONFIG_USER_ONLY) +static RAMBlock *qemu_get_ram_block(ram_addr_t addr) { - CPUArchState *new_env = cpu_init(env->cpu_model_str); -#if defined(TARGET_HAS_ICE) - CPUBreakpoint *bp; - CPUWatchpoint *wp; -#endif - - /* Reset non arch specific state */ - cpu_reset(ENV_GET_CPU(new_env)); - - /* Copy arch specific state into the new CPU */ - memcpy(new_env, env, sizeof(CPUArchState)); + RAMBlock *block; - /* Clone all break/watchpoints. - Note: Once we support ptrace with hw-debug register access, make sure - BP_CPU break/watchpoints are handled correctly on clone. */ - QTAILQ_INIT(&env->breakpoints); - QTAILQ_INIT(&env->watchpoints); -#if defined(TARGET_HAS_ICE) - QTAILQ_FOREACH(bp, &env->breakpoints, entry) { - cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL); + /* The list is protected by the iothread lock here. */ + block = ram_list.mru_block; + if (block && addr - block->offset < block->length) { + goto found; } - QTAILQ_FOREACH(wp, &env->watchpoints, entry) { - cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1, - wp->flags, NULL); + QTAILQ_FOREACH(block, &ram_list.blocks, next) { + if (addr - block->offset < block->length) { + goto found; + } } -#endif - return new_env; + fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); + abort(); + +found: + ram_list.mru_block = block; + return block; } -#if !defined(CONFIG_USER_ONLY) static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end, uintptr_t length) { - uintptr_t start1; + RAMBlock *block; + ram_addr_t start1; - /* we modify the TLB cache so that the dirty bit will be set again - when accessing the range */ - start1 = (uintptr_t)qemu_safe_ram_ptr(start); - /* Check that we don't span multiple blocks - this breaks the - address comparisons below. */ - if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 - != (end - 1) - start) { - abort(); - } + block = qemu_get_ram_block(start); + assert(block == qemu_get_ram_block(end - 1)); + start1 = (uintptr_t)block->host + (start - block->offset); cpu_tlb_reset_dirty_all(start1, length); - } /* Note: start and end must be within the same ram block. */ @@ -749,14 +735,14 @@ static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, uint16_t section); static subpage_t *subpage_init(AddressSpace *as, hwaddr base); -static void *(*phys_mem_alloc)(ram_addr_t size) = qemu_anon_ram_alloc; +static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc; /* * Set a custom physical guest memory alloator. * Accelerators with unusual needs may need this. Hopefully, we can * get rid of it eventually. */ -void phys_mem_set_alloc(void *(*alloc)(ram_addr_t)) +void phys_mem_set_alloc(void *(*alloc)(size_t)) { phys_mem_alloc = alloc; } @@ -1301,29 +1287,6 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) } #endif /* !_WIN32 */ -static RAMBlock *qemu_get_ram_block(ram_addr_t addr) -{ - RAMBlock *block; - - /* The list is protected by the iothread lock here. */ - block = ram_list.mru_block; - if (block && addr - block->offset < block->length) { - goto found; - } - QTAILQ_FOREACH(block, &ram_list.blocks, next) { - if (addr - block->offset < block->length) { - goto found; - } - } - - fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); - abort(); - -found: - ram_list.mru_block = block; - return block; -} - /* Return a host pointer to ram allocated with qemu_ram_alloc. With the exception of the softmmu code in this file, this should only be used for local memory (e.g. video ram) that the device owns, @@ -1351,40 +1314,6 @@ void *qemu_get_ram_ptr(ram_addr_t addr) return block->host + (addr - block->offset); } -/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as - * qemu_get_ram_ptr but do not touch ram_list.mru_block. - * - * ??? Is this still necessary? - */ -static void *qemu_safe_ram_ptr(ram_addr_t addr) -{ - RAMBlock *block; - - /* The list is protected by the iothread lock here. */ - QTAILQ_FOREACH(block, &ram_list.blocks, next) { - if (addr - block->offset < block->length) { - if (xen_enabled()) { - /* We need to check if the requested address is in the RAM - * because we don't want to map the entire memory in QEMU. - * In that case just map until the end of the page. - */ - if (block->offset == 0) { - return xen_map_cache(addr, 0, 0); - } else if (block->host == NULL) { - block->host = - xen_map_cache(block->offset, block->length, 1); - } - } - return block->host + (addr - block->offset); - } - } - - fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); - abort(); - - return NULL; -} - /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr * but takes a size argument */ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size) @@ -2172,7 +2101,9 @@ void *address_space_map(AddressSpace *as, if (bounce.buffer) { return NULL; } - bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); + /* Avoid unbounded allocations */ + l = MIN(l, TARGET_PAGE_SIZE); + bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); bounce.addr = addr; bounce.len = l;