X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=softmmu_template.h;h=4a2b6653f64e7e7689368d76624d9f3611de5520;hb=6d54af0ea9eeee70b4c0eb48bd2ae1d22b207dd4;hp=4d378ca6300cce2c6bfcd2dc1bc82c05de1a8276;hpb=91d35509903464c7f4b9ed56be223d7370d3597c;p=mirror_qemu.git diff --git a/softmmu_template.h b/softmmu_template.h index 4d378ca630..4a2b6653f6 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -21,12 +21,6 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ -#include "qemu/timer.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" - -#define DATA_SIZE (1 << SHIFT) - #if DATA_SIZE == 8 #define SUFFIX q #define LSUFFIX q @@ -84,14 +78,6 @@ # define BSWAP(X) (X) #endif -#ifdef TARGET_WORDS_BIGENDIAN -# define TGT_BE(X) (X) -# define TGT_LE(X) BSWAP(X) -#else -# define TGT_BE(X) BSWAP(X) -# define TGT_LE(X) (X) -#endif - #if DATA_SIZE == 1 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) # define helper_be_ld_name helper_le_ld_name @@ -108,60 +94,14 @@ # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) #endif -#ifdef TARGET_WORDS_BIGENDIAN -# define helper_te_ld_name helper_be_ld_name -# define helper_te_st_name helper_be_st_name -#else -# define helper_te_ld_name helper_le_ld_name -# define helper_te_st_name helper_le_st_name -#endif - -/* macro to check the victim tlb */ -#define VICTIM_TLB_HIT(ty) \ -({ \ - /* we are about to do a page table walk. our last hope is the \ - * victim tlb. try to refill from the victim tlb before walking the \ - * page table. */ \ - int vidx; \ - CPUIOTLBEntry tmpiotlb; \ - CPUTLBEntry tmptlb; \ - for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ - if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ - /* found entry in victim tlb, swap tlb and iotlb */ \ - tmptlb = env->tlb_table[mmu_idx][index]; \ - env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \ - env->tlb_v_table[mmu_idx][vidx] = tmptlb; \ - tmpiotlb = env->iotlb[mmu_idx][index]; \ - env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \ - env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \ - break; \ - } \ - } \ - /* return true when there is a vtlb hit, i.e. vidx >=0 */ \ - vidx >= 0; \ -}) - #ifndef SOFTMMU_CODE_ACCESS static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, - CPUIOTLBEntry *iotlbentry, + size_t mmu_idx, size_t index, target_ulong addr, uintptr_t retaddr) { - uint64_t val; - CPUState *cpu = ENV_GET_CPU(env); - hwaddr physaddr = iotlbentry->addr; - MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); - - physaddr = (physaddr & TARGET_PAGE_MASK) + addr; - cpu->mem_io_pc = retaddr; - if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { - cpu_io_recompile(cpu, retaddr); - } - - cpu->mem_io_vaddr = addr; - memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT, - iotlbentry->attrs); - return val; + CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; + return io_readx(env, iotlbentry, addr, retaddr, DATA_SIZE); } #endif @@ -171,14 +111,11 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, unsigned mmu_idx = get_mmuidx(oi); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - int a_bits = get_alignment_bits(get_memop(oi)); + unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; DATA_TYPE res; - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { + if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); } @@ -186,7 +123,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (!VICTIM_TLB_HIT(ADDR_READ)) { + if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); } @@ -195,15 +132,13 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); res = TGT_LE(res); return res; } @@ -218,10 +153,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, do_unaligned_access: addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ); - res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ); + res1 = helper_le_ld_name(env, addr1, oi, retaddr); + res2 = helper_le_ld_name(env, addr2, oi, retaddr); shift = (addr & (DATA_SIZE - 1)) * 8; /* Little-endian combine. */ @@ -245,14 +178,11 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, unsigned mmu_idx = get_mmuidx(oi); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; - int a_bits = get_alignment_bits(get_memop(oi)); + unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; DATA_TYPE res; - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { + if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); } @@ -260,7 +190,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (!VICTIM_TLB_HIT(ADDR_READ)) { + if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); } @@ -269,15 +199,13 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr); + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); res = TGT_BE(res); return res; } @@ -292,10 +220,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, do_unaligned_access: addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ); - res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ); + res1 = helper_be_ld_name(env, addr1, oi, retaddr); + res2 = helper_be_ld_name(env, addr2, oi, retaddr); shift = (addr & (DATA_SIZE - 1)) * 8; /* Big-endian combine. */ @@ -330,24 +256,13 @@ WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, #endif static inline void glue(io_write, SUFFIX)(CPUArchState *env, - CPUIOTLBEntry *iotlbentry, + size_t mmu_idx, size_t index, DATA_TYPE val, target_ulong addr, uintptr_t retaddr) { - CPUState *cpu = ENV_GET_CPU(env); - hwaddr physaddr = iotlbentry->addr; - MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs); - - physaddr = (physaddr & TARGET_PAGE_MASK) + addr; - if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { - cpu_io_recompile(cpu, retaddr); - } - - cpu->mem_io_vaddr = addr; - cpu->mem_io_pc = retaddr; - memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT, - iotlbentry->attrs); + CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; + return io_writex(env, iotlbentry, val, addr, retaddr, DATA_SIZE); } void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, @@ -356,13 +271,10 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, unsigned mmu_idx = get_mmuidx(oi); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - int a_bits = get_alignment_bits(get_memop(oi)); + unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { + if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } @@ -370,7 +282,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (!VICTIM_TLB_HIT(addr_write)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } tlb_addr = env->tlb_table[mmu_idx][index].addr_write; @@ -378,16 +290,14 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_LE(val); - glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); return; } @@ -395,18 +305,29 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, if (DATA_SIZE > 1 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 >= TARGET_PAGE_SIZE)) { - int i; + int i, index2; + target_ulong page2, tlb_addr2; do_unaligned_access: - /* XXX: not efficient, but simple */ - /* Note: relies on the fact that tlb_fill() does not remove the - * previous page from the TLB cache. */ - for (i = DATA_SIZE - 1; i >= 0; i--) { + /* Ensure the second page is in the TLB. Note that the first page + is already guaranteed to be filled, and that the second page + cannot evict the first. */ + page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; + index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; + if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + && !VICTIM_TLB_HIT(addr_write, page2)) { + tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* XXX: not efficient, but simple. */ + /* This loop must go in the forward direction to avoid issues + with self-modifying code in Windows 64-bit. */ + for (i = 0; i < DATA_SIZE; ++i) { /* Little-endian extract. */ uint8_t val8 = val >> (i * 8); - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - oi, retaddr + GETPC_ADJ); + oi, retaddr); } return; } @@ -426,13 +347,10 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, unsigned mmu_idx = get_mmuidx(oi); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - int a_bits = get_alignment_bits(get_memop(oi)); + unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; - - if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) { + if (addr & ((1 << a_bits) - 1)) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } @@ -440,7 +358,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - if (!VICTIM_TLB_HIT(addr_write)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } tlb_addr = env->tlb_table[mmu_idx][index].addr_write; @@ -448,16 +366,14 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - CPUIOTLBEntry *iotlbentry; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - iotlbentry = &env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_BE(val); - glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); return; } @@ -465,18 +381,29 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, if (DATA_SIZE > 1 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 >= TARGET_PAGE_SIZE)) { - int i; + int i, index2; + target_ulong page2, tlb_addr2; do_unaligned_access: + /* Ensure the second page is in the TLB. Note that the first page + is already guaranteed to be filled, and that the second page + cannot evict the first. */ + page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; + index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; + if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + && !VICTIM_TLB_HIT(addr_write, page2)) { + tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, + mmu_idx, retaddr); + } + /* XXX: not efficient, but simple */ - /* Note: relies on the fact that tlb_fill() does not remove the - * previous page from the TLB cache. */ - for (i = DATA_SIZE - 1; i >= 0; i--) { + /* This loop must go in the forward direction to avoid issues + with self-modifying code. */ + for (i = 0; i < DATA_SIZE; ++i) { /* Big-endian extract. */ uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - oi, retaddr + GETPC_ADJ); + oi, retaddr); } return; } @@ -485,33 +412,9 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); } #endif /* DATA_SIZE > 1 */ - -#if DATA_SIZE == 1 -/* Probe for whether the specified guest write access is permitted. - * If it is not permitted then an exception will be taken in the same - * way as if this were a real write access (and we will not return). - * Otherwise the function will return, and there will be a valid - * entry in the TLB for this access. - */ -void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, - uintptr_t retaddr) -{ - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; - - if ((addr & TARGET_PAGE_MASK) - != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - /* TLB entry is for a different page */ - if (!VICTIM_TLB_HIT(addr_write)) { - tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); - } - } -} -#endif #endif /* !defined(SOFTMMU_CODE_ACCESS) */ #undef READ_ACCESS_TYPE -#undef SHIFT #undef DATA_TYPE #undef SUFFIX #undef LSUFFIX @@ -522,15 +425,9 @@ void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx, #undef USUFFIX #undef SSUFFIX #undef BSWAP -#undef TGT_BE -#undef TGT_LE -#undef CPU_BE -#undef CPU_LE #undef helper_le_ld_name #undef helper_be_ld_name #undef helper_le_lds_name #undef helper_be_lds_name #undef helper_le_st_name #undef helper_be_st_name -#undef helper_te_ld_name -#undef helper_te_st_name