* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include "config.h"
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/mman.h>
+#endif
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <errno.h>
#include <unistd.h>
#include <inttypes.h>
-#include <sys/mman.h>
-#include "config.h"
#include "cpu.h"
#include "exec-all.h"
/* make various TB consistency checks */
//#define DEBUG_TB_CHECK
+//#define DEBUG_TLB_CHECK
/* threshold to flush the translated code buffer */
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
int phys_ram_size;
int phys_ram_fd;
uint8_t *phys_ram_base;
+uint8_t *phys_ram_dirty;
typedef struct PageDesc {
- /* offset in memory of the page + io_index in the low 12 bits */
- unsigned long phys_offset;
- /* list of TBs intersecting this physical page */
+ /* list of TBs intersecting this ram page */
TranslationBlock *first_tb;
/* in order to optimize self modifying code, we count the number
of lookups we do to a given page to use a bitmap */
#endif
} PageDesc;
+typedef struct PhysPageDesc {
+ /* offset in host memory of the page + io_index in the low 12 bits */
+ unsigned long phys_offset;
+} PhysPageDesc;
+
typedef struct VirtPageDesc {
/* physical address of code page. It is valid only if 'valid_tag'
matches 'virt_valid_tag' */
static void io_mem_init(void);
-unsigned long real_host_page_size;
-unsigned long host_page_bits;
-unsigned long host_page_size;
-unsigned long host_page_mask;
+unsigned long qemu_real_host_page_size;
+unsigned long qemu_host_page_bits;
+unsigned long qemu_host_page_size;
+unsigned long qemu_host_page_mask;
+/* XXX: for system emulation, it could just be an array */
static PageDesc *l1_map[L1_SIZE];
+static PhysPageDesc *l1_phys_map[L1_SIZE];
#if !defined(CONFIG_USER_ONLY)
static VirtPageDesc *l1_virt_map[L1_SIZE];
/* io memory support */
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
+void *io_mem_opaque[IO_MEM_NB_ENTRIES];
static int io_mem_nb;
/* log support */
static void page_init(void)
{
- /* NOTE: we can always suppose that host_page_size >=
+ /* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
- real_host_page_size = getpagesize();
- if (host_page_size == 0)
- host_page_size = real_host_page_size;
- if (host_page_size < TARGET_PAGE_SIZE)
- host_page_size = TARGET_PAGE_SIZE;
- host_page_bits = 0;
- while ((1 << host_page_bits) < host_page_size)
- host_page_bits++;
- host_page_mask = ~(host_page_size - 1);
+#ifdef _WIN32
+ {
+ SYSTEM_INFO system_info;
+ DWORD old_protect;
+
+ GetSystemInfo(&system_info);
+ qemu_real_host_page_size = system_info.dwPageSize;
+
+ VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
+ PAGE_EXECUTE_READWRITE, &old_protect);
+ }
+#else
+ qemu_real_host_page_size = getpagesize();
+ {
+ unsigned long start, end;
+
+ start = (unsigned long)code_gen_buffer;
+ start &= ~(qemu_real_host_page_size - 1);
+
+ end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
+ end += qemu_real_host_page_size - 1;
+ end &= ~(qemu_real_host_page_size - 1);
+
+ mprotect((void *)start, end - start,
+ PROT_READ | PROT_WRITE | PROT_EXEC);
+ }
+#endif
+
+ if (qemu_host_page_size == 0)
+ qemu_host_page_size = qemu_real_host_page_size;
+ if (qemu_host_page_size < TARGET_PAGE_SIZE)
+ qemu_host_page_size = TARGET_PAGE_SIZE;
+ qemu_host_page_bits = 0;
+ while ((1 << qemu_host_page_bits) < qemu_host_page_size)
+ qemu_host_page_bits++;
+ qemu_host_page_mask = ~(qemu_host_page_size - 1);
#if !defined(CONFIG_USER_ONLY)
virt_valid_tag = 1;
#endif
p = *lp;
if (!p) {
/* allocate if not found */
- p = malloc(sizeof(PageDesc) * L2_SIZE);
+ p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
memset(p, 0, sizeof(PageDesc) * L2_SIZE);
*lp = p;
}
return p + (index & (L2_SIZE - 1));
}
+static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
+{
+ PhysPageDesc **lp, *p;
+
+ lp = &l1_phys_map[index >> L2_BITS];
+ p = *lp;
+ if (!p) {
+ /* allocate if not found */
+ p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
+ memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
+ *lp = p;
+ }
+ return p + (index & (L2_SIZE - 1));
+}
+
+static inline PhysPageDesc *phys_page_find(unsigned int index)
+{
+ PhysPageDesc *p;
+
+ p = l1_phys_map[index >> L2_BITS];
+ if (!p)
+ return 0;
+ return p + (index & (L2_SIZE - 1));
+}
+
#if !defined(CONFIG_USER_ONLY)
-static void tlb_protect_code(CPUState *env, uint32_t addr);
-static void tlb_unprotect_code(CPUState *env, uint32_t addr);
-static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr);
+static void tlb_protect_code(CPUState *env, target_ulong addr);
+static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
{
p = *lp;
if (!p) {
/* allocate if not found */
- p = malloc(sizeof(VirtPageDesc) * L2_SIZE);
+ p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
*lp = p;
}
static inline void invalidate_page_bitmap(PageDesc *p)
{
if (p->code_bitmap) {
- free(p->code_bitmap);
+ qemu_free(p->code_bitmap);
p->code_bitmap = NULL;
}
p->code_write_count = 0;
nb_tbs,
nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
#endif
- /* must reset current TB so that interrupts cannot modify the
- links while we are modifying them */
- env->current_tb = NULL;
-
nb_tbs = 0;
for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
tb_hash[i] = NULL;
TranslationBlock *tb1, *tb2, **ptb;
tb_invalidated_flag = 1;
-
+
/* remove the TB from the hash list */
h = tb_hash_func(tb->pc);
ptb = &tb_hash[h];
int n, tb_start, tb_end;
TranslationBlock *tb;
- p->code_bitmap = malloc(TARGET_PAGE_SIZE / 8);
+ p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
if (!p->code_bitmap)
return;
memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
}
}
+#ifdef TARGET_HAS_PRECISE_SMC
+
+static void tb_gen_code(CPUState *env,
+ target_ulong pc, target_ulong cs_base, int flags,
+ int cflags)
+{
+ TranslationBlock *tb;
+ uint8_t *tc_ptr;
+ target_ulong phys_pc, phys_page2, virt_page2;
+ int code_gen_size;
+
+ phys_pc = get_phys_addr_code(env, (unsigned long)pc);
+ tb = tb_alloc((unsigned long)pc);
+ if (!tb) {
+ /* flush must be done */
+ tb_flush(env);
+ /* cannot fail at this point */
+ tb = tb_alloc((unsigned long)pc);
+ }
+ tc_ptr = code_gen_ptr;
+ tb->tc_ptr = tc_ptr;
+ tb->cs_base = cs_base;
+ tb->flags = flags;
+ tb->cflags = cflags;
+ cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
+ code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
+
+ /* check next page if needed */
+ virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
+ phys_page2 = -1;
+ if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
+ phys_page2 = get_phys_addr_code(env, virt_page2);
+ }
+ tb_link_phys(tb, phys_pc, phys_page2);
+}
+#endif
+
/* invalidate all TBs which intersect with the target physical page
starting in range [start;end[. NOTE: start and end must refer to
- the same physical page */
-static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end)
-{
- int n;
+ the same physical page. 'is_cpu_write_access' should be true if called
+ from a real cpu write access: the virtual CPU will exit the current
+ TB if code is modified inside this TB. */
+void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
+ int is_cpu_write_access)
+{
+ int n, current_tb_modified, current_tb_not_found, current_flags;
+ CPUState *env = cpu_single_env;
PageDesc *p;
- TranslationBlock *tb, *tb_next;
+ TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
target_ulong tb_start, tb_end;
+ target_ulong current_pc, current_cs_base;
p = page_find(start >> TARGET_PAGE_BITS);
if (!p)
return;
if (!p->code_bitmap &&
- ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
+ ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
+ is_cpu_write_access) {
/* build code bitmap */
build_page_bitmap(p);
}
/* we remove all the TBs in the range [start, end[ */
/* XXX: see if in some cases it could be faster to invalidate all the code */
+ current_tb_not_found = is_cpu_write_access;
+ current_tb_modified = 0;
+ current_tb = NULL; /* avoid warning */
+ current_pc = 0; /* avoid warning */
+ current_cs_base = 0; /* avoid warning */
+ current_flags = 0; /* avoid warning */
tb = p->first_tb;
while (tb != NULL) {
n = (long)tb & 3;
tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
}
if (!(tb_end <= start || tb_start >= end)) {
+#ifdef TARGET_HAS_PRECISE_SMC
+ if (current_tb_not_found) {
+ current_tb_not_found = 0;
+ current_tb = NULL;
+ if (env->mem_write_pc) {
+ /* now we have a real cpu fault */
+ current_tb = tb_find_pc(env->mem_write_pc);
+ }
+ }
+ if (current_tb == tb &&
+ !(current_tb->cflags & CF_SINGLE_INSN)) {
+ /* If we are modifying the current TB, we must stop
+ its execution. We could be more precise by checking
+ that the modification is after the current PC, but it
+ would require a specialized function to partially
+ restore the CPU state */
+
+ current_tb_modified = 1;
+ cpu_restore_state(current_tb, env,
+ env->mem_write_pc, NULL);
+#if defined(TARGET_I386)
+ current_flags = env->hflags;
+ current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
+ current_cs_base = (target_ulong)env->segs[R_CS].base;
+ current_pc = current_cs_base + env->eip;
+#else
+#error unsupported CPU
+#endif
+ }
+#endif /* TARGET_HAS_PRECISE_SMC */
+ saved_tb = env->current_tb;
+ env->current_tb = NULL;
tb_phys_invalidate(tb, -1);
+ env->current_tb = saved_tb;
+ if (env->interrupt_request && env->current_tb)
+ cpu_interrupt(env, env->interrupt_request);
}
tb = tb_next;
}
/* if no code remaining, no need to continue to use slow writes */
if (!p->first_tb) {
invalidate_page_bitmap(p);
- tlb_unprotect_code_phys(cpu_single_env, start);
+ if (is_cpu_write_access) {
+ tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
+ }
+ }
+#endif
+#ifdef TARGET_HAS_PRECISE_SMC
+ if (current_tb_modified) {
+ /* we generate a block containing just the instruction
+ modifying the memory. It will ensure that it cannot modify
+ itself */
+ env->current_tb = NULL;
+ tb_gen_code(env, current_pc, current_cs_base, current_flags,
+ CF_SINGLE_INSN);
+ cpu_resume_from_signal(env, NULL);
}
#endif
}
{
PageDesc *p;
int offset, b;
-
+#if 0
+ if (1) {
+ if (loglevel) {
+ fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
+ cpu_single_env->mem_write_vaddr, len,
+ cpu_single_env->eip,
+ cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
+ }
+ }
+#endif
p = page_find(start >> TARGET_PAGE_BITS);
if (!p)
return;
goto do_invalidate;
} else {
do_invalidate:
- tb_invalidate_phys_page_range(start, start + len);
- }
-}
-
-/* invalidate all TBs which intersect with the target virtual page
- starting in range [start;end[. This function is usually used when
- the target processor flushes its I-cache. NOTE: start and end must
- refer to the same physical page */
-void tb_invalidate_page_range(target_ulong start, target_ulong end)
-{
- int n;
- PageDesc *p;
- TranslationBlock *tb, *tb_next;
- target_ulong pc;
- target_ulong phys_start;
-
-#if !defined(CONFIG_USER_ONLY)
- {
- VirtPageDesc *vp;
- vp = virt_page_find(start >> TARGET_PAGE_BITS);
- if (!vp)
- return;
- if (vp->valid_tag != virt_valid_tag)
- return;
- phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
- }
-#else
- phys_start = start;
-#endif
- p = page_find(phys_start >> TARGET_PAGE_BITS);
- if (!p)
- return;
- /* we remove all the TBs in the range [start, end[ */
- /* XXX: see if in some cases it could be faster to invalidate all the code */
- tb = p->first_tb;
- while (tb != NULL) {
- n = (long)tb & 3;
- tb = (TranslationBlock *)((long)tb & ~3);
- tb_next = tb->page_next[n];
- pc = tb->pc;
- if (!((pc + tb->size) <= start || pc >= end)) {
- tb_phys_invalidate(tb, -1);
- }
- tb = tb_next;
+ tb_invalidate_phys_page_range(start, start + len, 1);
}
-#if !defined(CONFIG_USER_ONLY)
- /* if no code remaining, no need to continue to use slow writes */
- if (!p->first_tb)
- tlb_unprotect_code(cpu_single_env, start);
-#endif
}
#if !defined(CONFIG_SOFTMMU)
-static void tb_invalidate_phys_page(target_ulong addr)
+static void tb_invalidate_phys_page(target_ulong addr,
+ unsigned long pc, void *puc)
{
- int n;
+ int n, current_flags, current_tb_modified;
+ target_ulong current_pc, current_cs_base;
PageDesc *p;
- TranslationBlock *tb;
+ TranslationBlock *tb, *current_tb;
+#ifdef TARGET_HAS_PRECISE_SMC
+ CPUState *env = cpu_single_env;
+#endif
addr &= TARGET_PAGE_MASK;
p = page_find(addr >> TARGET_PAGE_BITS);
if (!p)
return;
tb = p->first_tb;
+ current_tb_modified = 0;
+ current_tb = NULL;
+ current_pc = 0; /* avoid warning */
+ current_cs_base = 0; /* avoid warning */
+ current_flags = 0; /* avoid warning */
+#ifdef TARGET_HAS_PRECISE_SMC
+ if (tb && pc != 0) {
+ current_tb = tb_find_pc(pc);
+ }
+#endif
while (tb != NULL) {
n = (long)tb & 3;
tb = (TranslationBlock *)((long)tb & ~3);
+#ifdef TARGET_HAS_PRECISE_SMC
+ if (current_tb == tb &&
+ !(current_tb->cflags & CF_SINGLE_INSN)) {
+ /* If we are modifying the current TB, we must stop
+ its execution. We could be more precise by checking
+ that the modification is after the current PC, but it
+ would require a specialized function to partially
+ restore the CPU state */
+
+ current_tb_modified = 1;
+ cpu_restore_state(current_tb, env, pc, puc);
+#if defined(TARGET_I386)
+ current_flags = env->hflags;
+ current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
+ current_cs_base = (target_ulong)env->segs[R_CS].base;
+ current_pc = current_cs_base + env->eip;
+#else
+#error unsupported CPU
+#endif
+ }
+#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate(tb, addr);
tb = tb->page_next[n];
}
p->first_tb = NULL;
+#ifdef TARGET_HAS_PRECISE_SMC
+ if (current_tb_modified) {
+ /* we generate a block containing just the instruction
+ modifying the memory. It will ensure that it cannot modify
+ itself */
+ env->current_tb = NULL;
+ tb_gen_code(env, current_pc, current_cs_base, current_flags,
+ CF_SINGLE_INSN);
+ cpu_resume_from_signal(env, puc);
+ }
+#endif
}
#endif
p->first_tb = (TranslationBlock *)((long)tb | n);
invalidate_page_bitmap(p);
+#if defined(TARGET_HAS_SMC) || 1
+
#if defined(CONFIG_USER_ONLY)
if (p->flags & PAGE_WRITE) {
unsigned long host_start, host_end, addr;
/* force the host page as non writable (writes will have a
page fault + mprotect overhead) */
- host_start = page_addr & host_page_mask;
- host_end = host_start + host_page_size;
+ host_start = page_addr & qemu_host_page_mask;
+ host_end = host_start + qemu_host_page_size;
prot = 0;
for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
prot |= page_get_flags(addr);
- mprotect((void *)host_start, host_page_size,
+ mprotect((void *)host_start, qemu_host_page_size,
(prot & PAGE_BITS) & ~PAGE_WRITE);
#ifdef DEBUG_TB_INVALIDATE
printf("protecting code page: 0x%08lx\n",
tlb_protect_code(cpu_single_env, virt_addr);
}
#endif
+
+#endif /* TARGET_HAS_SMC */
}
/* Allocate a new translation block. Flush the translation buffer if
return NULL;
tb = &tbs[nb_tbs++];
tb->pc = pc;
+ tb->cflags = 0;
return tb;
}
/* save the code memory mappings (needed to invalidate the code) */
addr = tb->pc & TARGET_PAGE_MASK;
vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
+#ifdef DEBUG_TLB_CHECK
+ if (vp->valid_tag == virt_valid_tag &&
+ vp->phys_addr != tb->page_addr[0]) {
+ printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
+ addr, tb->page_addr[0], vp->phys_addr);
+ }
+#endif
vp->phys_addr = tb->page_addr[0];
- vp->valid_tag = virt_valid_tag;
+ if (vp->valid_tag != virt_valid_tag) {
+ vp->valid_tag = virt_valid_tag;
+#if !defined(CONFIG_SOFTMMU)
+ vp->prot = 0;
+#endif
+ }
if (tb->page_addr[1] != -1) {
addr += TARGET_PAGE_SIZE;
vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
+#ifdef DEBUG_TLB_CHECK
+ if (vp->valid_tag == virt_valid_tag &&
+ vp->phys_addr != tb->page_addr[1]) {
+ printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
+ addr, tb->page_addr[1], vp->phys_addr);
+ }
+#endif
vp->phys_addr = tb->page_addr[1];
- vp->valid_tag = virt_valid_tag;
+ if (vp->valid_tag != virt_valid_tag) {
+ vp->valid_tag = virt_valid_tag;
+#if !defined(CONFIG_SOFTMMU)
+ vp->prot = 0;
+#endif
+ }
}
}
#endif
tb->jmp_first = (TranslationBlock *)((long)tb | 2);
tb->jmp_next[0] = NULL;
tb->jmp_next[1] = NULL;
+#ifdef USE_CODE_COPY
+ tb->cflags &= ~CF_FP_USED;
+ if (tb->cflags & CF_TB_FP_USED)
+ tb->cflags |= CF_FP_USED;
+#endif
/* init original jump addresses */
if (tb->tb_next_offset[0] != 0xffff)
tb_reset_jump_recursive2(tb, 1);
}
+static void breakpoint_invalidate(CPUState *env, target_ulong pc)
+{
+ target_ulong phys_addr;
+
+ phys_addr = cpu_get_phys_page_debug(env, pc);
+ tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
+}
+
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
breakpoint is reached */
-int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
+int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
{
-#if defined(TARGET_I386)
+#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
int i;
-
+
for(i = 0; i < env->nb_breakpoints; i++) {
if (env->breakpoints[i] == pc)
return 0;
if (env->nb_breakpoints >= MAX_BREAKPOINTS)
return -1;
env->breakpoints[env->nb_breakpoints++] = pc;
- tb_invalidate_page_range(pc, pc + 1);
+
+ breakpoint_invalidate(env, pc);
return 0;
#else
return -1;
}
/* remove a breakpoint */
-int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
+int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
{
-#if defined(TARGET_I386)
+#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
int i;
for(i = 0; i < env->nb_breakpoints; i++) {
if (env->breakpoints[i] == pc)
memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
(env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
env->nb_breakpoints--;
- tb_invalidate_page_range(pc, pc + 1);
+
+ breakpoint_invalidate(env, pc);
return 0;
#else
return -1;
CPU loop after each instruction */
void cpu_single_step(CPUState *env, int enabled)
{
-#if defined(TARGET_I386)
+#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
if (env->singlestep_enabled != enabled) {
env->singlestep_enabled = enabled;
/* must flush all the translated code to avoid inconsistancies */
void cpu_interrupt(CPUState *env, int mask)
{
TranslationBlock *tb;
-
+ static int interrupt_lock;
+
env->interrupt_request |= mask;
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
tb = env->current_tb;
- if (tb) {
+ if (tb && !testandset(&interrupt_lock)) {
+ env->current_tb = NULL;
tb_reset_jump_recursive(tb);
+ interrupt_lock = 0;
}
}
+void cpu_reset_interrupt(CPUState *env, int mask)
+{
+ env->interrupt_request &= ~mask;
+}
+
+CPULogItem cpu_log_items[] = {
+ { CPU_LOG_TB_OUT_ASM, "out_asm",
+ "show generated host assembly code for each compiled TB" },
+ { CPU_LOG_TB_IN_ASM, "in_asm",
+ "show target assembly code for each compiled TB" },
+ { CPU_LOG_TB_OP, "op",
+ "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
+#ifdef TARGET_I386
+ { CPU_LOG_TB_OP_OPT, "op_opt",
+ "show micro ops after optimization for each compiled TB" },
+#endif
+ { CPU_LOG_INT, "int",
+ "show interrupts/exceptions in short format" },
+ { CPU_LOG_EXEC, "exec",
+ "show trace before each executed TB (lots of logs)" },
+ { CPU_LOG_TB_CPU, "cpu",
+ "show CPU state before bloc translation" },
+#ifdef TARGET_I386
+ { CPU_LOG_PCALL, "pcall",
+ "show protected mode far calls/returns/exceptions" },
+#endif
+ { CPU_LOG_IOPORT, "ioport",
+ "show all i/o ports accesses" },
+ { 0, NULL, NULL },
+};
+
+static int cmp1(const char *s1, int n, const char *s2)
+{
+ if (strlen(s2) != n)
+ return 0;
+ return memcmp(s1, s2, n) == 0;
+}
+
+/* takes a comma separated list of log masks. Return 0 if error. */
+int cpu_str_to_log_mask(const char *str)
+{
+ CPULogItem *item;
+ int mask;
+ const char *p, *p1;
+
+ p = str;
+ mask = 0;
+ for(;;) {
+ p1 = strchr(p, ',');
+ if (!p1)
+ p1 = p + strlen(p);
+ for(item = cpu_log_items; item->mask != 0; item++) {
+ if (cmp1(p, p1 - p, item->name))
+ goto found;
+ }
+ return 0;
+ found:
+ mask |= item->mask;
+ if (*p1 != ',')
+ break;
+ p = p1 + 1;
+ }
+ return mask;
+}
void cpu_abort(CPUState *env, const char *fmt, ...)
{
#if !defined(CONFIG_USER_ONLY)
-void tlb_flush(CPUState *env)
+/* NOTE: if flush_global is true, also flush global entries (not
+ implemented yet) */
+void tlb_flush(CPUState *env, int flush_global)
{
int i;
#endif
}
-static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
+static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
if (addr == (tlb_entry->address &
(TARGET_PAGE_MASK | TLB_INVALID_MASK)))
tlb_entry->address = -1;
}
-void tlb_flush_page(CPUState *env, uint32_t addr)
+void tlb_flush_page(CPUState *env, target_ulong addr)
{
int i, n;
VirtPageDesc *vp;
tb = tb->page_next[n];
}
}
+ vp->valid_tag = 0;
}
#if !defined(CONFIG_SOFTMMU)
#endif
}
-static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
+static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
{
if (addr == (tlb_entry->address &
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
- (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
- tlb_entry->address |= IO_MEM_CODE;
- tlb_entry->addend -= (unsigned long)phys_ram_base;
+ (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
+ (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
+ tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
}
}
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
-static void tlb_protect_code(CPUState *env, uint32_t addr)
+static void tlb_protect_code(CPUState *env, target_ulong addr)
{
int i;
#endif
}
-static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
+static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
+ unsigned long phys_addr)
{
- if (addr == (tlb_entry->address &
- (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
- (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
- tlb_entry->address &= TARGET_PAGE_MASK;
- tlb_entry->addend += (unsigned long)phys_ram_base;
+ if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
+ ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
+ tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
}
}
-/* update the TLB so that writes in virtual page 'addr' are no longer
+/* update the TLB so that writes in physical page 'phys_addr' are no longer
tested self modifying code */
-static void tlb_unprotect_code(CPUState *env, uint32_t addr)
+static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
{
int i;
- addr &= TARGET_PAGE_MASK;
- i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- tlb_unprotect_code1(&env->tlb_write[0][i], addr);
- tlb_unprotect_code1(&env->tlb_write[1][i], addr);
+ phys_addr &= TARGET_PAGE_MASK;
+ phys_addr += (long)phys_ram_base;
+ i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
+ tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
}
-static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
- uint32_t phys_addr)
+static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
+ unsigned long start, unsigned long length)
{
- if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
- ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
- tlb_entry->address &= TARGET_PAGE_MASK;
- tlb_entry->addend += (unsigned long)phys_ram_base;
+ unsigned long addr;
+ if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
+ addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
+ if ((addr - start) < length) {
+ tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
+ }
}
}
-/* update the TLB so that writes in physical page 'phys_addr' are no longer
- tested self modifying code */
-/* XXX: find a way to improve it */
-static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr)
+void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
{
+ CPUState *env;
+ unsigned long length, start1;
int i;
- phys_addr &= TARGET_PAGE_MASK;
+ start &= TARGET_PAGE_MASK;
+ end = TARGET_PAGE_ALIGN(end);
+
+ length = end - start;
+ if (length == 0)
+ return;
+ memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
+
+ env = cpu_single_env;
+ /* we modify the TLB cache so that the dirty bit will be set again
+ when accessing the range */
+ start1 = start + (unsigned long)phys_ram_base;
for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
+ tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
for(i = 0; i < CPU_TLB_SIZE; i++)
- tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
+ tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
+
+#if !defined(CONFIG_SOFTMMU)
+ /* XXX: this is expensive */
+ {
+ VirtPageDesc *p;
+ int j;
+ target_ulong addr;
+
+ for(i = 0; i < L1_SIZE; i++) {
+ p = l1_virt_map[i];
+ if (p) {
+ addr = i << (TARGET_PAGE_BITS + L2_BITS);
+ for(j = 0; j < L2_SIZE; j++) {
+ if (p->valid_tag == virt_valid_tag &&
+ p->phys_addr >= start && p->phys_addr < end &&
+ (p->prot & PROT_WRITE)) {
+ if (addr < MMAP_AREA_END) {
+ mprotect((void *)addr, TARGET_PAGE_SIZE,
+ p->prot & ~PROT_WRITE);
+ }
+ }
+ addr += TARGET_PAGE_SIZE;
+ p++;
+ }
+ }
+ }
+ }
+#endif
+}
+
+static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
+ unsigned long start)
+{
+ unsigned long addr;
+ if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
+ addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
+ if (addr == start) {
+ tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
+ }
+ }
+}
+
+/* update the TLB corresponding to virtual page vaddr and phys addr
+ addr so that it is no longer dirty */
+static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
+{
+ CPUState *env = cpu_single_env;
+ int i;
+
+ phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
+
+ addr &= TARGET_PAGE_MASK;
+ i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ tlb_set_dirty1(&env->tlb_write[0][i], addr);
+ tlb_set_dirty1(&env->tlb_write[1][i], addr);
}
-/* add a new TLB entry. At most a single entry for a given virtual
- address is permitted. */
-int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
+/* add a new TLB entry. At most one entry for a given virtual address
+ is permitted. Return 0 if OK or 2 if the page could not be mapped
+ (can only happen in non SOFTMMU mode for I/O pages or pages
+ conflicting with the host address space). */
+int tlb_set_page(CPUState *env, target_ulong vaddr,
+ target_phys_addr_t paddr, int prot,
int is_user, int is_softmmu)
{
- PageDesc *p;
- target_ulong pd;
+ PhysPageDesc *p;
+ unsigned long pd;
TranslationBlock *first_tb;
unsigned int index;
- target_ulong address, addend;
+ target_ulong address;
+ unsigned long addend;
int ret;
- p = page_find(paddr >> TARGET_PAGE_BITS);
+ p = phys_page_find(paddr >> TARGET_PAGE_BITS);
+ first_tb = NULL;
if (!p) {
pd = IO_MEM_UNASSIGNED;
- first_tb = NULL;
} else {
+ PageDesc *p1;
pd = p->phys_offset;
- first_tb = p->first_tb;
+ if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
+ /* NOTE: we also allocate the page at this stage */
+ p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
+ first_tb = p1->first_tb;
+ }
}
#if defined(DEBUG_TLB)
printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
addend -= vaddr;
- if (prot & PROT_READ) {
+ if (prot & PAGE_READ) {
env->tlb_read[is_user][index].address = address;
env->tlb_read[is_user][index].addend = addend;
} else {
env->tlb_read[is_user][index].address = -1;
env->tlb_read[is_user][index].addend = -1;
}
- if (prot & PROT_WRITE) {
+ if (prot & PAGE_WRITE) {
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
/* ROM: access is ignored (same as unassigned) */
env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
- env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
- } else if (first_tb) {
+ env->tlb_write[is_user][index].addend = addend;
+ } else
+ /* XXX: the PowerPC code seems not ready to handle
+ self modifying code with DCBI */
+#if defined(TARGET_HAS_SMC) || 1
+ if (first_tb) {
/* if code is present, we use a specific memory
handler. It works only for physical memory access */
env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
- env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
+ env->tlb_write[is_user][index].addend = addend;
+ } else
+#endif
+ if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
+ !cpu_physical_memory_is_dirty(pd)) {
+ env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
+ env->tlb_write[is_user][index].addend = addend;
} else {
env->tlb_write[is_user][index].address = address;
env->tlb_write[is_user][index].addend = addend;
ret = 2;
} else {
void *map_addr;
- if (prot & PROT_WRITE) {
- if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || first_tb) {
- /* ROM: we do as if code was inside */
- /* if code is present, we only map as read only and save the
- original mapping */
- VirtPageDesc *vp;
-
- vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
- vp->phys_addr = pd;
- vp->prot = prot;
- vp->valid_tag = virt_valid_tag;
- prot &= ~PAGE_WRITE;
+
+ if (vaddr >= MMAP_AREA_END) {
+ ret = 2;
+ } else {
+ if (prot & PROT_WRITE) {
+ if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
+#if defined(TARGET_HAS_SMC) || 1
+ first_tb ||
+#endif
+ ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
+ !cpu_physical_memory_is_dirty(pd))) {
+ /* ROM: we do as if code was inside */
+ /* if code is present, we only map as read only and save the
+ original mapping */
+ VirtPageDesc *vp;
+
+ vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
+ vp->phys_addr = pd;
+ vp->prot = prot;
+ vp->valid_tag = virt_valid_tag;
+ prot &= ~PAGE_WRITE;
+ }
+ }
+ map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
+ MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
+ if (map_addr == MAP_FAILED) {
+ cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
+ paddr, vaddr);
}
- }
- map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
- MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
- if (map_addr == MAP_FAILED) {
- cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
- paddr, vaddr);
}
}
}
/* called from signal handler: invalidate the code and unprotect the
page. Return TRUE if the fault was succesfully handled. */
-int page_unprotect(unsigned long addr)
+int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
{
#if !defined(CONFIG_SOFTMMU)
VirtPageDesc *vp;
printf("page_unprotect: addr=0x%08x\n", addr);
#endif
addr &= TARGET_PAGE_MASK;
+
+ /* if it is not mapped, no need to worry here */
+ if (addr >= MMAP_AREA_END)
+ return 0;
vp = virt_page_find(addr >> TARGET_PAGE_BITS);
if (!vp)
return 0;
printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
addr, vp->phys_addr, vp->prot);
#endif
- tb_invalidate_phys_page(vp->phys_addr);
- mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot);
+ if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
+ cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
+ (unsigned long)addr, vp->prot);
+ /* set the dirty bit */
+ phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
+ /* flush the code inside */
+ tb_invalidate_phys_page(vp->phys_addr, pc, puc);
return 1;
#else
return 0;
#else
-void tlb_flush(CPUState *env)
-{
-}
-
-void tlb_flush_page(CPUState *env, uint32_t addr)
+void tlb_flush(CPUState *env, int flush_global)
{
}
-void tlb_flush_page_write(CPUState *env, uint32_t addr)
+void tlb_flush_page(CPUState *env, target_ulong addr)
{
}
-int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
+int tlb_set_page(CPUState *env, target_ulong vaddr,
+ target_phys_addr_t paddr, int prot,
int is_user, int is_softmmu)
{
return 0;
if (!(p->flags & PAGE_WRITE) &&
(flags & PAGE_WRITE) &&
p->first_tb) {
- tb_invalidate_phys_page(addr);
+ tb_invalidate_phys_page(addr, 0, NULL);
}
p->flags = flags;
}
/* called from signal handler: invalidate the code and unprotect the
page. Return TRUE if the fault was succesfully handled. */
-int page_unprotect(unsigned long address)
+int page_unprotect(unsigned long address, unsigned long pc, void *puc)
{
unsigned int page_index, prot, pindex;
PageDesc *p, *p1;
unsigned long host_start, host_end, addr;
- host_start = address & host_page_mask;
+ host_start = address & qemu_host_page_mask;
page_index = host_start >> TARGET_PAGE_BITS;
p1 = page_find(page_index);
if (!p1)
return 0;
- host_end = host_start + host_page_size;
+ host_end = host_start + qemu_host_page_size;
p = p1;
prot = 0;
for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
if (prot & PAGE_WRITE_ORG) {
pindex = (address - host_start) >> TARGET_PAGE_BITS;
if (!(p1[pindex].flags & PAGE_WRITE)) {
- mprotect((void *)host_start, host_page_size,
+ mprotect((void *)host_start, qemu_host_page_size,
(prot & PAGE_BITS) | PAGE_WRITE);
p1[pindex].flags |= PAGE_WRITE;
/* and since the content will be modified, we must invalidate
the corresponding translated code. */
- tb_invalidate_phys_page(address);
+ tb_invalidate_phys_page(address, pc, puc);
#ifdef DEBUG_TB_CHECK
tb_invalidate_check(address);
#endif
start &= TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
- page_unprotect(addr);
+ page_unprotect(addr, 0, NULL);
}
}
+static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
+{
+}
#endif /* defined(CONFIG_USER_ONLY) */
/* register physical memory. 'size' must be a multiple of the target
page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
io memory page */
-void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
- long phys_offset)
+void cpu_register_physical_memory(target_phys_addr_t start_addr,
+ unsigned long size,
+ unsigned long phys_offset)
{
unsigned long addr, end_addr;
- PageDesc *p;
+ PhysPageDesc *p;
+ size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
end_addr = start_addr + size;
- for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
- p = page_find_alloc(addr >> TARGET_PAGE_BITS);
+ for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
+ p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
p->phys_offset = phys_offset;
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
phys_offset += TARGET_PAGE_SIZE;
}
}
-static uint32_t unassigned_mem_readb(uint32_t addr)
+static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
{
return 0;
}
-static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
+static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
{
}
/* self modifying code support in soft mmu mode : writing to a page
containing code comes to these functions */
-static void code_mem_writeb(uint32_t addr, uint32_t val)
+static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
{
+ unsigned long phys_addr;
+
+ phys_addr = addr - (unsigned long)phys_ram_base;
#if !defined(CONFIG_USER_ONLY)
- tb_invalidate_phys_page_fast(addr, 1);
+ tb_invalidate_phys_page_fast(phys_addr, 1);
#endif
- stb_raw(phys_ram_base + addr, val);
+ stb_raw((uint8_t *)addr, val);
+ phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
}
-static void code_mem_writew(uint32_t addr, uint32_t val)
+static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
{
+ unsigned long phys_addr;
+
+ phys_addr = addr - (unsigned long)phys_ram_base;
#if !defined(CONFIG_USER_ONLY)
- tb_invalidate_phys_page_fast(addr, 2);
+ tb_invalidate_phys_page_fast(phys_addr, 2);
#endif
- stw_raw(phys_ram_base + addr, val);
+ stw_raw((uint8_t *)addr, val);
+ phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
}
-static void code_mem_writel(uint32_t addr, uint32_t val)
+static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
{
+ unsigned long phys_addr;
+
+ phys_addr = addr - (unsigned long)phys_ram_base;
#if !defined(CONFIG_USER_ONLY)
- tb_invalidate_phys_page_fast(addr, 4);
+ tb_invalidate_phys_page_fast(phys_addr, 4);
#endif
- stl_raw(phys_ram_base + addr, val);
+ stl_raw((uint8_t *)addr, val);
+ phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
}
static CPUReadMemoryFunc *code_mem_read[3] = {
code_mem_writel,
};
+static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
+{
+ stb_raw((uint8_t *)addr, val);
+ tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
+}
+
+static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
+{
+ stw_raw((uint8_t *)addr, val);
+ tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
+}
+
+static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
+{
+ stl_raw((uint8_t *)addr, val);
+ tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
+}
+
+static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
+ notdirty_mem_writeb,
+ notdirty_mem_writew,
+ notdirty_mem_writel,
+};
+
static void io_mem_init(void)
{
- cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
- cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
- cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
- io_mem_nb = 4;
+ cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
+ cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
+ cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
+ cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
+ io_mem_nb = 5;
+
+ /* alloc dirty bits array */
+ phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
}
/* mem_read and mem_write are arrays of functions containing the
cpu_register_physical_memory(). (-1) is returned if error. */
int cpu_register_io_memory(int io_index,
CPUReadMemoryFunc **mem_read,
- CPUWriteMemoryFunc **mem_write)
+ CPUWriteMemoryFunc **mem_write,
+ void *opaque)
{
int i;
io_mem_read[io_index][i] = mem_read[i];
io_mem_write[io_index][i] = mem_write[i];
}
+ io_mem_opaque[io_index] = opaque;
return io_index << IO_MEM_SHIFT;
}
+/* physical memory access (slow version, mainly for debug) */
+#if defined(CONFIG_USER_ONLY)
+void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
+ int len, int is_write)
+{
+ int l, flags;
+ target_ulong page;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ flags = page_get_flags(page);
+ if (!(flags & PAGE_VALID))
+ return;
+ if (is_write) {
+ if (!(flags & PAGE_WRITE))
+ return;
+ memcpy((uint8_t *)addr, buf, len);
+ } else {
+ if (!(flags & PAGE_READ))
+ return;
+ memcpy(buf, (uint8_t *)addr, len);
+ }
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+}
+#else
+void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
+ int len, int is_write)
+{
+ int l, io_index;
+ uint8_t *ptr;
+ uint32_t val;
+ target_phys_addr_t page;
+ unsigned long pd;
+ PhysPageDesc *p;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ p = phys_page_find(page >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if (is_write) {
+ if ((pd & ~TARGET_PAGE_MASK) != 0) {
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (l >= 4 && ((addr & 3) == 0)) {
+ /* 32 bit read access */
+ val = ldl_raw(buf);
+ io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ l = 4;
+ } else if (l >= 2 && ((addr & 1) == 0)) {
+ /* 16 bit read access */
+ val = lduw_raw(buf);
+ io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
+ l = 2;
+ } else {
+ /* 8 bit access */
+ val = ldub_raw(buf);
+ io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
+ l = 1;
+ }
+ } else {
+ unsigned long addr1;
+ addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ /* RAM case */
+ ptr = phys_ram_base + addr1;
+ memcpy(ptr, buf, l);
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
+ /* set dirty bit */
+ phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
+ }
+ } else {
+ if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
+ (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
+ /* I/O case */
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (l >= 4 && ((addr & 3) == 0)) {
+ /* 32 bit read access */
+ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+ stl_raw(buf, val);
+ l = 4;
+ } else if (l >= 2 && ((addr & 1) == 0)) {
+ /* 16 bit read access */
+ val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
+ stw_raw(buf, val);
+ l = 2;
+ } else {
+ /* 8 bit access */
+ val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
+ stb_raw(buf, val);
+ l = 1;
+ }
+ } else {
+ /* RAM case */
+ ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ (addr & ~TARGET_PAGE_MASK);
+ memcpy(buf, ptr, l);
+ }
+ }
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+}
+#endif
+
+/* virtual memory access for debug */
+int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
+ uint8_t *buf, int len, int is_write)
+{
+ int l;
+ target_ulong page, phys_addr;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ phys_addr = cpu_get_phys_page_debug(env, page);
+ /* if no physical page mapped, return an error */
+ if (phys_addr == -1)
+ return -1;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
+ buf, l, is_write);
+ len -= l;
+ buf += l;
+ addr += l;
+ }
+ return 0;
+}
+
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _cmmu