X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=exec.c;h=b03b5bed81d0c80794536841b187c2c5308093bc;hb=2167f7bc289a90b043826961c2a3271298d51ef6;hp=81f08b78469e9af73501cf5b334e95f80cfc78b8;hpb=98fa4a593220c273f989de9b5af1ad2f132e14d5;p=qemu.git diff --git a/exec.c b/exec.c index 81f08b784..b03b5bed8 100644 --- a/exec.c +++ b/exec.c @@ -32,10 +32,10 @@ #include "hw/qdev.h" #include "osdep.h" #include "kvm.h" +#include "hw/xen.h" #include "qemu-timer.h" #if defined(CONFIG_USER_ONLY) #include -#include #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) #include #if __FreeBSD_version >= 700104 @@ -51,6 +51,9 @@ #include #endif #endif +#else /* !CONFIG_USER_ONLY */ +#include "xen-mapcache.h" +#include "trace.h" #endif //#define DEBUG_TB_INVALIDATE @@ -638,6 +641,9 @@ void cpu_exec_init(CPUState *env) env->numa_node = 0; QTAILQ_INIT(&env->breakpoints); QTAILQ_INIT(&env->watchpoints); +#ifndef CONFIG_USER_ONLY + env->thread_id = qemu_get_thread_id(); +#endif *penv = env; #if defined(CONFIG_USER_ONLY) cpu_list_unlock(); @@ -1067,8 +1073,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, restore the CPU state */ current_tb_modified = 1; - cpu_restore_state(current_tb, env, - env->mem_io_pc, NULL); + cpu_restore_state(current_tb, env, env->mem_io_pc); cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, ¤t_flags); } @@ -1176,7 +1181,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr, restore the CPU state */ current_tb_modified = 1; - cpu_restore_state(current_tb, env, pc, puc); + cpu_restore_state(current_tb, env, pc); cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, ¤t_flags); } @@ -1627,38 +1632,46 @@ static void cpu_unlink_tb(CPUState *env) spin_unlock(&interrupt_lock); } +#ifndef CONFIG_USER_ONLY /* mask must never be zero, except for A20 change call */ -void cpu_interrupt(CPUState *env, int mask) +static void tcg_handle_interrupt(CPUState *env, int mask) { int old_mask; old_mask = env->interrupt_request; env->interrupt_request |= mask; -#ifndef CONFIG_USER_ONLY /* * If called from iothread context, wake the target cpu in * case its halted. */ - if (!qemu_cpu_self(env)) { + if (!qemu_cpu_is_self(env)) { qemu_cpu_kick(env); return; } -#endif if (use_icount) { env->icount_decr.u16.high = 0xffff; -#ifndef CONFIG_USER_ONLY if (!can_do_io(env) && (mask & ~old_mask) != 0) { cpu_abort(env, "Raised interrupt while not in I/O function"); } -#endif } else { cpu_unlink_tb(env); } } +CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; + +#else /* CONFIG_USER_ONLY */ + +void cpu_interrupt(CPUState *env, int mask) +{ + env->interrupt_request |= mask; + cpu_unlink_tb(env); +} +#endif /* CONFIG_USER_ONLY */ + void cpu_reset_interrupt(CPUState *env, int mask) { env->interrupt_request &= ~mask; @@ -1708,11 +1721,12 @@ static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list static void cpu_notify_set_memory(target_phys_addr_t start_addr, ram_addr_t size, - ram_addr_t phys_offset) + ram_addr_t phys_offset, + bool log_dirty) { CPUPhysMemoryClient *client; QLIST_FOREACH(client, &memory_client_list, list) { - client->set_memory(client, start_addr, size, phys_offset); + client->set_memory(client, start_addr, size, phys_offset, log_dirty); } } @@ -1739,8 +1753,21 @@ static int cpu_notify_migration_log(int enable) return 0; } -static void phys_page_for_each_1(CPUPhysMemoryClient *client, - int level, void **lp) +struct last_map { + target_phys_addr_t start_addr; + ram_addr_t size; + ram_addr_t phys_offset; +}; + +/* The l1_phys_map provides the upper P_L1_BITs of the guest physical + * address. Each intermediate table provides the next L2_BITs of guest + * physical address space. The number of levels vary based on host and + * guest configuration, making it efficient to build the final guest + * physical address by seeding the L1 offset and shifting and adding in + * each L2 offset as we recurse through them. */ +static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level, + void **lp, target_phys_addr_t addr, + struct last_map *map) { int i; @@ -1749,16 +1776,32 @@ static void phys_page_for_each_1(CPUPhysMemoryClient *client, } if (level == 0) { PhysPageDesc *pd = *lp; + addr <<= L2_BITS + TARGET_PAGE_BITS; for (i = 0; i < L2_SIZE; ++i) { if (pd[i].phys_offset != IO_MEM_UNASSIGNED) { - client->set_memory(client, pd[i].region_offset, - TARGET_PAGE_SIZE, pd[i].phys_offset); + target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS; + + if (map->size && + start_addr == map->start_addr + map->size && + pd[i].phys_offset == map->phys_offset + map->size) { + + map->size += TARGET_PAGE_SIZE; + continue; + } else if (map->size) { + client->set_memory(client, map->start_addr, + map->size, map->phys_offset, false); + } + + map->start_addr = start_addr; + map->size = TARGET_PAGE_SIZE; + map->phys_offset = pd[i].phys_offset; } } } else { void **pp = *lp; for (i = 0; i < L2_SIZE; ++i) { - phys_page_for_each_1(client, level - 1, pp + i); + phys_page_for_each_1(client, level - 1, pp + i, + (addr << L2_BITS) | i, map); } } } @@ -1766,9 +1809,15 @@ static void phys_page_for_each_1(CPUPhysMemoryClient *client, static void phys_page_for_each(CPUPhysMemoryClient *client) { int i; + struct last_map map = { }; + for (i = 0; i < P_L1_SIZE; ++i) { phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1, - l1_phys_map + 1); + l1_phys_map + i, i, &map); + } + if (map.size) { + client->set_memory(client, map.start_addr, map.size, map.phys_offset, + false); } } @@ -2039,7 +2088,7 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, /* we modify the TLB cache so that the dirty bit will be set again when accessing the range */ start1 = (unsigned long)qemu_safe_ram_ptr(start); - /* Chek that we don't span multiple blocks - this breaks the + /* Check that we don't span multiple blocks - this breaks the address comparisons below. */ if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1 != (end - 1) - start) { @@ -2597,10 +2646,11 @@ static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys, start_addr and region_offset are rounded down to a page boundary before calculating this offset. This should not be a problem unless the low bits of start_addr and region_offset differ. */ -void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, +void cpu_register_physical_memory_log(target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset, - ram_addr_t region_offset) + ram_addr_t region_offset, + bool log_dirty) { target_phys_addr_t addr, end_addr; PhysPageDesc *p; @@ -2608,7 +2658,8 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, ram_addr_t orig_size = size; subpage_t *subpage; - cpu_notify_set_memory(start_addr, size, phys_offset); + assert(size); + cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty); if (phys_offset == IO_MEM_UNASSIGNED) { region_offset = start_addr; @@ -2616,7 +2667,9 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, region_offset &= TARGET_PAGE_MASK; size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; end_addr = start_addr + (target_phys_addr_t)size; - for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { + + addr = start_addr; + do { p = phys_page_find(addr >> TARGET_PAGE_BITS); if (p && p->phys_offset != IO_MEM_UNASSIGNED) { ram_addr_t orig_memory = p->phys_offset; @@ -2668,7 +2721,8 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, } } region_offset += TARGET_PAGE_SIZE; - } + addr += TARGET_PAGE_SIZE; + } while (addr != end_addr); /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ @@ -2865,8 +2919,10 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, } } + new_block->offset = find_ram_offset(size); if (host) { new_block->host = host; + new_block->flags |= RAM_PREALLOC_MASK; } else { if (mem_path) { #if defined (__linux__) && !defined(TARGET_S390X) @@ -2881,18 +2937,28 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, #endif } else { #if defined(TARGET_S390X) && defined(CONFIG_KVM) - /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ - new_block->host = mmap((void*)0x1000000, size, + /* S390 KVM requires the topmost vma of the RAM to be smaller than + an system defined value, which is at least 256GB. Larger systems + have larger values. We put the guest between the end of data + segment (system break) and this value. We use 32GB as a base to + have enough room for the system break to grow. */ + new_block->host = mmap((void*)0x800000000, size, PROT_EXEC|PROT_READ|PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, -1, 0); + MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + if (new_block->host == MAP_FAILED) { + fprintf(stderr, "Allocating RAM failed\n"); + abort(); + } #else - new_block->host = qemu_vmalloc(size); + if (xen_mapcache_enabled()) { + xen_ram_alloc(new_block->offset, size); + } else { + new_block->host = qemu_vmalloc(size); + } #endif qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); } } - - new_block->offset = find_ram_offset(size); new_block->length = size; QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); @@ -2913,6 +2979,19 @@ ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) return qemu_ram_alloc_from_ptr(dev, name, size, NULL); } +void qemu_ram_free_from_ptr(ram_addr_t addr) +{ + RAMBlock *block; + + QLIST_FOREACH(block, &ram_list.blocks, next) { + if (addr == block->offset) { + QLIST_REMOVE(block, next); + qemu_free(block); + return; + } + } +} + void qemu_ram_free(ram_addr_t addr) { RAMBlock *block; @@ -2920,7 +2999,9 @@ void qemu_ram_free(ram_addr_t addr) QLIST_FOREACH(block, &ram_list.blocks, next) { if (addr == block->offset) { QLIST_REMOVE(block, next); - if (mem_path) { + if (block->flags & RAM_PREALLOC_MASK) { + ; + } else if (mem_path) { #if defined (__linux__) && !defined(TARGET_S390X) if (block->fd) { munmap(block->host, block->length); @@ -2928,12 +3009,18 @@ void qemu_ram_free(ram_addr_t addr) } else { qemu_vfree(block->host); } +#else + abort(); #endif } else { #if defined(TARGET_S390X) && defined(CONFIG_KVM) munmap(block->host, block->length); #else - qemu_vfree(block->host); + if (xen_mapcache_enabled()) { + qemu_invalidate_entry(block->host); + } else { + qemu_vfree(block->host); + } #endif } qemu_free(block); @@ -2943,6 +3030,66 @@ void qemu_ram_free(ram_addr_t addr) } +#ifndef _WIN32 +void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) +{ + RAMBlock *block; + ram_addr_t offset; + int flags; + void *area, *vaddr; + + QLIST_FOREACH(block, &ram_list.blocks, next) { + offset = addr - block->offset; + if (offset < block->length) { + vaddr = block->host + offset; + if (block->flags & RAM_PREALLOC_MASK) { + ; + } else { + flags = MAP_FIXED; + munmap(vaddr, length); + if (mem_path) { +#if defined(__linux__) && !defined(TARGET_S390X) + if (block->fd) { +#ifdef MAP_POPULATE + flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED : + MAP_PRIVATE; +#else + flags |= MAP_PRIVATE; +#endif + area = mmap(vaddr, length, PROT_READ | PROT_WRITE, + flags, block->fd, offset); + } else { + flags |= MAP_PRIVATE | MAP_ANONYMOUS; + area = mmap(vaddr, length, PROT_READ | PROT_WRITE, + flags, -1, 0); + } +#else + abort(); +#endif + } else { +#if defined(TARGET_S390X) && defined(CONFIG_KVM) + flags |= MAP_SHARED | MAP_ANONYMOUS; + area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE, + flags, -1, 0); +#else + flags |= MAP_PRIVATE | MAP_ANONYMOUS; + area = mmap(vaddr, length, PROT_READ | PROT_WRITE, + flags, -1, 0); +#endif + } + if (area != vaddr) { + fprintf(stderr, "Could not remap addr: %lx@%lx\n", + length, addr); + exit(1); + } + qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE); + } + return; + } + } +} +#endif /* !_WIN32 */ + /* Return a host pointer to ram allocated with qemu_ram_alloc. With the exception of the softmmu code in this file, this should only be used for local memory (e.g. video ram) that the device owns, @@ -2962,6 +3109,17 @@ void *qemu_get_ram_ptr(ram_addr_t addr) QLIST_REMOVE(block, next); QLIST_INSERT_HEAD(&ram_list.blocks, block, next); } + if (xen_mapcache_enabled()) { + /* We need to check if the requested address is in the RAM + * because we don't want to map the entire memory in QEMU. + * In that case just map until the end of the page. + */ + if (block->offset == 0) { + return qemu_map_cache(addr, 0, 0); + } else if (block->host == NULL) { + block->host = qemu_map_cache(block->offset, block->length, 1); + } + } return block->host + (addr - block->offset); } } @@ -2981,6 +3139,17 @@ void *qemu_safe_ram_ptr(ram_addr_t addr) QLIST_FOREACH(block, &ram_list.blocks, next) { if (addr - block->offset < block->length) { + if (xen_mapcache_enabled()) { + /* We need to check if the requested address is in the RAM + * because we don't want to map the entire memory in QEMU. + * In that case just map until the end of the page. + */ + if (block->offset == 0) { + return qemu_map_cache(addr, 0, 0); + } else if (block->host == NULL) { + block->host = qemu_map_cache(block->offset, block->length, 1); + } + } return block->host + (addr - block->offset); } } @@ -2991,17 +3160,57 @@ void *qemu_safe_ram_ptr(ram_addr_t addr) return NULL; } +/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr + * but takes a size argument */ +void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size) +{ + if (xen_mapcache_enabled()) + return qemu_map_cache(addr, *size, 1); + else { + RAMBlock *block; + + QLIST_FOREACH(block, &ram_list.blocks, next) { + if (addr - block->offset < block->length) { + if (addr - block->offset + *size > block->length) + *size = block->length - addr + block->offset; + return block->host + (addr - block->offset); + } + } + + fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); + abort(); + + *size = 0; + return NULL; + } +} + +void qemu_put_ram_ptr(void *addr) +{ + trace_qemu_put_ram_ptr(addr); +} + int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) { RAMBlock *block; uint8_t *host = ptr; + if (xen_mapcache_enabled()) { + *ram_addr = qemu_ram_addr_from_mapcache(ptr); + return 0; + } + QLIST_FOREACH(block, &ram_list.blocks, next) { + /* This case append when the block is not mapped. */ + if (block->host == NULL) { + continue; + } if (host - block->host < block->length) { *ram_addr = block->offset + (host - block->host); return 0; } } + return -1; } @@ -3023,7 +3232,7 @@ static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 1); #endif return 0; @@ -3034,7 +3243,7 @@ static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr) #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 2); #endif return 0; @@ -3045,7 +3254,7 @@ static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr) #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 4); #endif return 0; @@ -3056,7 +3265,7 @@ static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_ #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 1); #endif } @@ -3066,7 +3275,7 @@ static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_ #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 2); #endif } @@ -3076,7 +3285,7 @@ static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_ #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); #endif -#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) +#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 4); #endif } @@ -3194,7 +3403,7 @@ static void check_watchpoint(int offset, int len_mask, int flags) cpu_abort(env, "check_watchpoint: could not find TB for " "pc=%p", (void *)env->mem_io_pc); } - cpu_restore_state(tb, env, env->mem_io_pc, NULL); + cpu_restore_state(tb, env, env->mem_io_pc); tb_phys_invalidate(tb, -1); if (wp->flags & BP_STOP_BEFORE_ACCESS) { env->exception_index = EXCP_DEBUG; @@ -3696,6 +3905,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, cpu_physical_memory_set_dirty_flags( addr1, (0xff & ~CODE_DIRTY_FLAG)); } + qemu_put_ram_ptr(ptr); } } else { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && @@ -3723,9 +3933,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, } } else { /* RAM case */ - ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + - (addr & ~TARGET_PAGE_MASK); - memcpy(buf, ptr, l); + ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); + memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l); + qemu_put_ram_ptr(ptr); } } len -= l; @@ -3766,6 +3976,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, /* ROM/RAM case */ ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); + qemu_put_ram_ptr(ptr); } len -= l; buf += l; @@ -3831,14 +4042,12 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, int is_write) { target_phys_addr_t len = *plen; - target_phys_addr_t done = 0; + target_phys_addr_t todo = 0; int l; - uint8_t *ret = NULL; - uint8_t *ptr; target_phys_addr_t page; unsigned long pd; PhysPageDesc *p; - unsigned long addr1; + target_phys_addr_t addr1 = addr; while (len > 0) { page = addr & TARGET_PAGE_MASK; @@ -3853,32 +4062,26 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, } if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { - if (done || bounce.buffer) { + if (todo || bounce.buffer) { break; } bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); bounce.addr = addr; bounce.len = l; if (!is_write) { - cpu_physical_memory_rw(addr, bounce.buffer, l, 0); + cpu_physical_memory_read(addr, bounce.buffer, l); } - ptr = bounce.buffer; - } else { - addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); - ptr = qemu_get_ram_ptr(addr1); - } - if (!done) { - ret = ptr; - } else if (ret + done != ptr) { - break; + + *plen = l; + return bounce.buffer; } len -= l; addr += l; - done += l; + todo += l; } - *plen = done; - return ret; + *plen = todo; + return qemu_ram_ptr_length(addr1, plen); } /* Unmaps a memory region previously mapped by cpu_physical_memory_map(). @@ -3907,6 +4110,9 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, access_len -= l; } } + if (xen_mapcache_enabled()) { + qemu_invalidate_entry(buffer); + } return; } if (is_write) { @@ -4181,7 +4387,7 @@ void stw_phys(target_phys_addr_t addr, uint32_t val) void stq_phys(target_phys_addr_t addr, uint64_t val) { val = tswap64(val); - cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); + cpu_physical_memory_write(addr, &val, 8); } /* virtual memory access for debug (includes writing to ROM) */ @@ -4229,7 +4435,7 @@ void cpu_io_recompile(CPUState *env, void *retaddr) retaddr); } n = env->icount_decr.u16.low + tb->icount; - cpu_restore_state(tb, env, (unsigned long)retaddr, NULL); + cpu_restore_state(tb, env, (unsigned long)retaddr); /* Calculate how many instructions had been executed before the fault occurred. */ n = n - env->icount_decr.u16.low;