cpu->current_tb = NULL;
memset(env->tlb_table, -1, sizeof(env->tlb_table));
+ memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+ env->vtlb_index = 0;
env->tlb_flush_addr = -1;
env->tlb_flush_mask = 0;
tlb_flush_count++;
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
}
+ /* check whether there are entries that need to be flushed in the vtlb */
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ int k;
+ for (k = 0; k < CPU_VTLB_SIZE; k++) {
+ tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
+ }
+ }
+
tb_flush_jmp_cache(cpu, addr);
}
tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
start1, length);
}
+
+ for (i = 0; i < CPU_VTLB_SIZE; i++) {
+ tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
+ start1, length);
+ }
}
}
}
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
}
+
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ int k;
+ for (k = 0; k < CPU_VTLB_SIZE; k++) {
+ tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
+ }
+ }
}
/* Our TLB does not support large pages, so remember the area covered by
uintptr_t addend;
CPUTLBEntry *te;
hwaddr iotlb, xlat, sz;
+ unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
assert(size >= TARGET_PAGE_SIZE);
if (size != TARGET_PAGE_SIZE) {
prot, &address);
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- env->iotlb[mmu_idx][index] = iotlb - vaddr;
te = &env->tlb_table[mmu_idx][index];
+
+ /* do not discard the translation in te, evict it into a victim tlb */
+ env->tlb_v_table[mmu_idx][vidx] = *te;
+ env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
+
+ /* refill the tlb */
+ env->iotlb[mmu_idx][index] = iotlb - vaddr;
te->addend = addend - vaddr;
if (prot & PAGE_READ) {
te->addr_read = address;
#if !defined(CONFIG_USER_ONLY)
#define CPU_TLB_BITS 8
#define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
+/* use a fully associative victim tlb of 8 entries */
+#define CPU_VTLB_SIZE 8
#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
#define CPU_TLB_ENTRY_BITS 4
#define CPU_COMMON_TLB \
/* The meaning of the MMU modes is defined in the target code. */ \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
- hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
+ hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ hwaddr iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
target_ulong tlb_flush_addr; \
- target_ulong tlb_flush_mask;
+ target_ulong tlb_flush_mask; \
+ target_ulong vtlb_index; \
#else
# define helper_te_st_name helper_le_st_name
#endif
+/* macro to check the victim tlb */
+#define VICTIM_TLB_HIT(ty) \
+({ \
+ /* we are about to do a page table walk. our last hope is the \
+ * victim tlb. try to refill from the victim tlb before walking the \
+ * page table. */ \
+ int vidx; \
+ hwaddr tmpiotlb; \
+ CPUTLBEntry tmptlb; \
+ for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
+ if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
+ /* found entry in victim tlb, swap tlb and iotlb */ \
+ tmptlb = env->tlb_table[mmu_idx][index]; \
+ env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
+ env->tlb_v_table[mmu_idx][vidx] = tmptlb; \
+ tmpiotlb = env->iotlb[mmu_idx][index]; \
+ env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \
+ env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \
+ break; \
+ } \
+ } \
+ /* return true when there is a vtlb hit, i.e. vidx >=0 */ \
+ vidx >= 0; \
+})
+
#ifndef SOFTMMU_CODE_ACCESS
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(ADDR_READ)) {
+ tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(ADDR_READ)) {
+ tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(addr_write)) {
+ tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(addr_write)) {
+ tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}