X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=softmmu_template.h;h=4a2b6653f64e7e7689368d76624d9f3611de5520;hb=6d54af0ea9eeee70b4c0eb48bd2ae1d22b207dd4;hp=6b4e615dbf284c6536f0b0d46b5e8cff25552e6b;hpb=776346cd63e5a1ceeeb4d81fa111d911abb73a69;p=mirror_qemu.git diff --git a/softmmu_template.h b/softmmu_template.h index 6b4e615dbf..4a2b6653f6 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -21,12 +21,6 @@ * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ -#include "qemu/timer.h" -#include "exec/address-spaces.h" -#include "exec/memory.h" - -#define DATA_SIZE (1 << SHIFT) - #if DATA_SIZE == 8 #define SUFFIX q #define LSUFFIX q @@ -84,14 +78,6 @@ # define BSWAP(X) (X) #endif -#ifdef TARGET_WORDS_BIGENDIAN -# define TGT_BE(X) (X) -# define TGT_LE(X) BSWAP(X) -#else -# define TGT_BE(X) BSWAP(X) -# define TGT_LE(X) (X) -#endif - #if DATA_SIZE == 1 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX) # define helper_be_ld_name helper_le_ld_name @@ -108,85 +94,36 @@ # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX) #endif -#ifdef TARGET_WORDS_BIGENDIAN -# define helper_te_ld_name helper_be_ld_name -# define helper_te_st_name helper_be_st_name -#else -# define helper_te_ld_name helper_le_ld_name -# define helper_te_st_name helper_le_st_name -#endif - -/* macro to check the victim tlb */ -#define VICTIM_TLB_HIT(ty) \ -({ \ - /* we are about to do a page table walk. our last hope is the \ - * victim tlb. try to refill from the victim tlb before walking the \ - * page table. */ \ - int vidx; \ - hwaddr tmpiotlb; \ - CPUTLBEntry tmptlb; \ - for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \ - if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\ - /* found entry in victim tlb, swap tlb and iotlb */ \ - tmptlb = env->tlb_table[mmu_idx][index]; \ - env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \ - env->tlb_v_table[mmu_idx][vidx] = tmptlb; \ - tmpiotlb = env->iotlb[mmu_idx][index]; \ - env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \ - env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \ - break; \ - } \ - } \ - /* return true when there is a vtlb hit, i.e. vidx >=0 */ \ - vidx >= 0; \ -}) - #ifndef SOFTMMU_CODE_ACCESS static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, - hwaddr physaddr, + size_t mmu_idx, size_t index, target_ulong addr, uintptr_t retaddr) { - uint64_t val; - CPUState *cpu = ENV_GET_CPU(env); - MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr); - - physaddr = (physaddr & TARGET_PAGE_MASK) + addr; - cpu->mem_io_pc = retaddr; - if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) { - cpu_io_recompile(cpu, retaddr); - } - - cpu->mem_io_vaddr = addr; - io_mem_read(mr, physaddr, &val, 1 << SHIFT); - return val; + CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; + return io_readx(env, iotlbentry, addr, retaddr, DATA_SIZE); } #endif -#ifdef SOFTMMU_CODE_ACCESS -static __attribute__((unused)) -#endif -WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, - uintptr_t retaddr) +WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) { + unsigned mmu_idx = get_mmuidx(oi); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; DATA_TYPE res; - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } -#endif - if (!VICTIM_TLB_HIT(ADDR_READ)) { + if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); } @@ -195,15 +132,13 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); res = TGT_LE(res); return res; } @@ -216,16 +151,10 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, DATA_TYPE res1, res2; unsigned shift; do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); -#endif addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ); - res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ); + res1 = helper_le_ld_name(env, addr1, oi, retaddr); + res2 = helper_le_ld_name(env, addr2, oi, retaddr); shift = (addr & (DATA_SIZE - 1)) * 8; /* Little-endian combine. */ @@ -233,14 +162,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, return res; } - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } -#endif - haddr = addr + env->tlb_table[mmu_idx][index].addend; #if DATA_SIZE == 1 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); @@ -251,30 +172,25 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, } #if DATA_SIZE > 1 -#ifdef SOFTMMU_CODE_ACCESS -static __attribute__((unused)) -#endif -WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, - uintptr_t retaddr) +WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) { + unsigned mmu_idx = get_mmuidx(oi); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; DATA_TYPE res; - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, + mmu_idx, retaddr); + } /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } -#endif - if (!VICTIM_TLB_HIT(ADDR_READ)) { + if (!VICTIM_TLB_HIT(ADDR_READ, addr)) { tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr); } @@ -283,15 +199,13 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ - res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr); + res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr); res = TGT_BE(res); return res; } @@ -304,16 +218,10 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, DATA_TYPE res1, res2; unsigned shift; do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); -#endif addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ - res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ); - res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ); + res1 = helper_be_ld_name(env, addr1, oi, retaddr); + res2 = helper_be_ld_name(env, addr2, oi, retaddr); shift = (addr & (DATA_SIZE - 1)) * 8; /* Big-endian combine. */ @@ -321,86 +229,60 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, return res; } - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); - } -#endif - haddr = addr + env->tlb_table[mmu_idx][index].addend; res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); return res; } #endif /* DATA_SIZE > 1 */ -DATA_TYPE -glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, - int mmu_idx) -{ - return helper_te_ld_name (env, addr, mmu_idx, GETRA()); -} - #ifndef SOFTMMU_CODE_ACCESS /* Provide signed versions of the load routines as well. We can of course avoid this for 64-bit data, or for 32-bit data on 32-bit host. */ #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr) + TCGMemOpIdx oi, uintptr_t retaddr) { - return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr); + return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr); } # if DATA_SIZE > 1 WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr, - int mmu_idx, uintptr_t retaddr) + TCGMemOpIdx oi, uintptr_t retaddr) { - return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr); + return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr); } # endif #endif static inline void glue(io_write, SUFFIX)(CPUArchState *env, - hwaddr physaddr, + size_t mmu_idx, size_t index, DATA_TYPE val, target_ulong addr, uintptr_t retaddr) { - CPUState *cpu = ENV_GET_CPU(env); - MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr); - - physaddr = (physaddr & TARGET_PAGE_MASK) + addr; - if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) { - cpu_io_recompile(cpu, retaddr); - } - - cpu->mem_io_vaddr = addr; - cpu->mem_io_pc = retaddr; - io_mem_write(mr, physaddr, val, 1 << SHIFT); + CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index]; + return io_writex(env, iotlbentry, val, addr, retaddr, DATA_SIZE); } void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, - int mmu_idx, uintptr_t retaddr) + TCGMemOpIdx oi, uintptr_t retaddr) { + unsigned mmu_idx = get_mmuidx(oi); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } -#endif - if (!VICTIM_TLB_HIT(addr_write)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } tlb_addr = env->tlb_table[mmu_idx][index].addr_write; @@ -408,16 +290,14 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_LE(val); - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); return; } @@ -425,34 +305,33 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, if (DATA_SIZE > 1 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 >= TARGET_PAGE_SIZE)) { - int i; + int i, index2; + target_ulong page2, tlb_addr2; do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); -#endif - /* XXX: not efficient, but simple */ - /* Note: relies on the fact that tlb_fill() does not remove the - * previous page from the TLB cache. */ - for (i = DATA_SIZE - 1; i >= 0; i--) { + /* Ensure the second page is in the TLB. Note that the first page + is already guaranteed to be filled, and that the second page + cannot evict the first. */ + page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; + index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; + if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + && !VICTIM_TLB_HIT(addr_write, page2)) { + tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, + mmu_idx, retaddr); + } + + /* XXX: not efficient, but simple. */ + /* This loop must go in the forward direction to avoid issues + with self-modifying code in Windows 64-bit. */ + for (i = 0; i < DATA_SIZE; ++i) { /* Little-endian extract. */ uint8_t val8 = val >> (i * 8); - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - mmu_idx, retaddr + GETPC_ADJ); + oi, retaddr); } return; } - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } -#endif - haddr = addr + env->tlb_table[mmu_idx][index].addend; #if DATA_SIZE == 1 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); @@ -463,25 +342,23 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, #if DATA_SIZE > 1 void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, - int mmu_idx, uintptr_t retaddr) + TCGMemOpIdx oi, uintptr_t retaddr) { + unsigned mmu_idx = get_mmuidx(oi); int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; - /* Adjust the given return address. */ - retaddr -= GETPC_ADJ; + if (addr & ((1 << a_bits) - 1)) { + cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, + mmu_idx, retaddr); + } /* If the TLB entry is for a different page, reload and try again. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } -#endif - if (!VICTIM_TLB_HIT(addr_write)) { + if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } tlb_addr = env->tlb_table[mmu_idx][index].addr_write; @@ -489,16 +366,14 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, /* Handle an IO access. */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { - hwaddr ioaddr; if ((addr & (DATA_SIZE - 1)) != 0) { goto do_unaligned_access; } - ioaddr = env->iotlb[mmu_idx][index]; /* ??? Note that the io helpers always read data in the target byte ordering. We should push the LE/BE request down into io. */ val = TGT_BE(val); - glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr); + glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr); return; } @@ -506,50 +381,40 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, if (DATA_SIZE > 1 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 >= TARGET_PAGE_SIZE)) { - int i; + int i, index2; + target_ulong page2, tlb_addr2; do_unaligned_access: -#ifdef ALIGNED_ONLY - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); -#endif + /* Ensure the second page is in the TLB. Note that the first page + is already guaranteed to be filled, and that the second page + cannot evict the first. */ + page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; + index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; + if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK)) + && !VICTIM_TLB_HIT(addr_write, page2)) { + tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE, + mmu_idx, retaddr); + } + /* XXX: not efficient, but simple */ - /* Note: relies on the fact that tlb_fill() does not remove the - * previous page from the TLB cache. */ - for (i = DATA_SIZE - 1; i >= 0; i--) { + /* This loop must go in the forward direction to avoid issues + with self-modifying code. */ + for (i = 0; i < DATA_SIZE; ++i) { /* Big-endian extract. */ uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8)); - /* Note the adjustment at the beginning of the function. - Undo that for the recursion. */ glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8, - mmu_idx, retaddr + GETPC_ADJ); + oi, retaddr); } return; } - /* Handle aligned access or unaligned access in the same page. */ -#ifdef ALIGNED_ONLY - if ((addr & (DATA_SIZE - 1)) != 0) { - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); - } -#endif - haddr = addr + env->tlb_table[mmu_idx][index].addend; glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); } #endif /* DATA_SIZE > 1 */ - -void -glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, - DATA_TYPE val, int mmu_idx) -{ - helper_te_st_name(env, addr, val, mmu_idx, GETRA()); -} - #endif /* !defined(SOFTMMU_CODE_ACCESS) */ #undef READ_ACCESS_TYPE -#undef SHIFT #undef DATA_TYPE #undef SUFFIX #undef LSUFFIX @@ -560,15 +425,9 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr, #undef USUFFIX #undef SSUFFIX #undef BSWAP -#undef TGT_BE -#undef TGT_LE -#undef CPU_BE -#undef CPU_LE #undef helper_le_ld_name #undef helper_be_ld_name #undef helper_le_lds_name #undef helper_be_lds_name #undef helper_le_st_name #undef helper_be_st_name -#undef helper_te_ld_name -#undef helper_te_st_name