static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
{
- unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
+ int i, i0 = tb_jmp_cache_hash_page(page_addr);
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
- qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
+ qatomic_set(&jc->array[i0 + i].tb, NULL);
}
}
-static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
-{
- /* Discard jump cache entries for any tb which might potentially
- overlap the flushed page. */
- tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
- tb_jmp_cache_clear_page(cpu, addr);
-}
-
/**
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
* @desc: The CPUTLBDesc portion of the TLB
qemu_spin_unlock(&env_tlb(env)->c.lock);
- cpu_tb_jmp_cache_clear(cpu);
+ tcg_flush_jmp_cache(cpu);
if (to_clean == ALL_MMUIDX_BITS) {
qatomic_set(&env_tlb(env)->c.full_flush_count,
}
qemu_spin_unlock(&env_tlb(env)->c.lock);
- tb_flush_jmp_cache(cpu, addr);
+ /*
+ * Discard jump cache entries for any tb which might potentially
+ * overlap the flushed page, which includes the previous.
+ */
+ tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
+ tb_jmp_cache_clear_page(cpu, addr);
}
/**
* longer to clear each entry individually than it will to clear it all.
*/
if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
- cpu_tb_jmp_cache_clear(cpu);
+ tcg_flush_jmp_cache(cpu);
return;
}
- for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
- tb_flush_jmp_cache(cpu, d.addr + i);
+ /*
+ * Discard jump cache entries for any tb which might potentially
+ * overlap the flushed pages, which includes the previous.
+ */
+ d.addr -= TARGET_PAGE_SIZE;
+ for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
+ tb_jmp_cache_clear_page(cpu, d.addr);
+ d.addr += TARGET_PAGE_SIZE;
}
}
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
- cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
+ cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
+ TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE);
}
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
}
-/* Add a new TLB entry. At most one entry for a given virtual address
+/*
+ * Add a new TLB entry. At most one entry for a given virtual address
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
* supplied size is only used by tlb_flush_page.
*
* Called from TCG-generated code, which is under an RCU read-side
* critical section.
*/
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
- hwaddr paddr, MemTxAttrs attrs, int prot,
- int mmu_idx, target_ulong size)
+void tlb_set_page_full(CPUState *cpu, int mmu_idx,
+ target_ulong vaddr, CPUTLBEntryFull *full)
{
CPUArchState *env = cpu->env_ptr;
CPUTLB *tlb = env_tlb(env);
CPUTLBEntry *te, tn;
hwaddr iotlb, xlat, sz, paddr_page;
target_ulong vaddr_page;
- int asidx = cpu_asidx_from_attrs(cpu, attrs);
- int wp_flags;
+ int asidx, wp_flags, prot;
bool is_ram, is_romd;
assert_cpu_is_self(cpu);
- if (size <= TARGET_PAGE_SIZE) {
+ if (full->lg_page_size <= TARGET_PAGE_BITS) {
sz = TARGET_PAGE_SIZE;
} else {
- tlb_add_large_page(env, mmu_idx, vaddr, size);
- sz = size;
+ sz = (hwaddr)1 << full->lg_page_size;
+ tlb_add_large_page(env, mmu_idx, vaddr, sz);
}
vaddr_page = vaddr & TARGET_PAGE_MASK;
- paddr_page = paddr & TARGET_PAGE_MASK;
+ paddr_page = full->phys_addr & TARGET_PAGE_MASK;
+ prot = full->prot;
+ asidx = cpu_asidx_from_attrs(cpu, full->attrs);
section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
- &xlat, &sz, attrs, &prot);
+ &xlat, &sz, full->attrs, &prot);
assert(sz >= TARGET_PAGE_SIZE);
tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
" prot=%x idx=%d\n",
- vaddr, paddr, prot, mmu_idx);
+ vaddr, full->phys_addr, prot, mmu_idx);
address = vaddr_page;
- if (size < TARGET_PAGE_SIZE) {
+ if (full->lg_page_size < TARGET_PAGE_BITS) {
/* Repeat the MMU check and TLB fill on every access. */
address |= TLB_INVALID_MASK;
}
- if (attrs.byte_swap) {
+ if (full->attrs.byte_swap) {
address |= TLB_BSWAP;
}
* subtract here is that of the page base, and not the same as the
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
*/
+ desc->fulltlb[index] = *full;
desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
- desc->fulltlb[index].attrs = attrs;
+ desc->fulltlb[index].phys_addr = paddr_page;
+ desc->fulltlb[index].prot = prot;
/* Now calculate the new entry */
tn.addend = addend - vaddr_page;
qemu_spin_unlock(&tlb->c.lock);
}
-/* Add a new TLB entry, but without specifying the memory
- * transaction attributes to be used.
- */
+void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
+ hwaddr paddr, MemTxAttrs attrs, int prot,
+ int mmu_idx, target_ulong size)
+{
+ CPUTLBEntryFull full = {
+ .phys_addr = paddr,
+ .attrs = attrs,
+ .prot = prot,
+ .lg_page_size = ctz64(size)
+ };
+
+ assert(is_power_of_2(size));
+ tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
+}
+
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size)
static int probe_access_internal(CPUArchState *env, target_ulong addr,
int fault_size, MMUAccessType access_type,
int mmu_idx, bool nonfault,
- void **phost, uintptr_t retaddr)
+ void **phost, CPUTLBEntryFull **pfull,
+ uintptr_t retaddr)
{
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
mmu_idx, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
+ *pfull = NULL;
return TLB_INVALID_MASK;
}
/* TLB resize via tlb_fill may have moved the entry. */
+ index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
/*
}
flags &= tlb_addr;
+ *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
*phost = NULL;
return flags;
}
-int probe_access_flags(CPUArchState *env, target_ulong addr,
- MMUAccessType access_type, int mmu_idx,
- bool nonfault, void **phost, uintptr_t retaddr)
+int probe_access_full(CPUArchState *env, target_ulong addr,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost, CPUTLBEntryFull **pfull,
+ uintptr_t retaddr)
{
- int flags;
-
- flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
- nonfault, phost, retaddr);
+ int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
+ nonfault, phost, pfull, retaddr);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
-
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
+ notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
flags &= ~TLB_NOTDIRTY;
}
return flags;
}
+int probe_access_flags(CPUArchState *env, target_ulong addr,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost, uintptr_t retaddr)
+{
+ CPUTLBEntryFull *full;
+
+ return probe_access_full(env, addr, access_type, mmu_idx,
+ nonfault, phost, &full, retaddr);
+}
+
void *probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
+ CPUTLBEntryFull *full;
void *host;
int flags;
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- false, &host, retaddr);
+ false, &host, &full, retaddr);
/* Per the interface, size == 0 merely faults the access. */
if (size == 0) {
}
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
-
/* Handle watchpoints. */
if (flags & TLB_WATCHPOINT) {
int wp_access = (access_type == MMU_DATA_STORE
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
MMUAccessType access_type, int mmu_idx)
{
+ CPUTLBEntryFull *full;
void *host;
int flags;
flags = probe_access_internal(env, addr, 0, access_type,
- mmu_idx, true, &host, 0);
+ mmu_idx, true, &host, &full, 0);
/* No combination of flags are expected by the caller. */
return flags ? NULL : host;
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
void **hostp)
{
+ CPUTLBEntryFull *full;
void *p;
(void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
- cpu_mmu_index(env, true), false, &p, 0);
+ cpu_mmu_index(env, true), false, &p, &full, 0);
if (p == NULL) {
return -1;
}