]> git.proxmox.com Git - qemu.git/blame - include/exec/softmmu_template.h
Merge remote-tracking branch 'mst/tags/for_anthony' into staging
[qemu.git] / include / exec / softmmu_template.h
CommitLineData
b92e5a22
FB
1/*
2 * Software MMU support
5fafdf24 3 *
efbf29b6
BS
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
b92e5a22
FB
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
8167ee88 22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
b92e5a22 23 */
1de7afc9 24#include "qemu/timer.h"
022c62cb 25#include "exec/memory.h"
29e922b6 26
b92e5a22
FB
27#define DATA_SIZE (1 << SHIFT)
28
29#if DATA_SIZE == 8
30#define SUFFIX q
701e3a5c 31#define LSUFFIX q
c8f94df5 32#define SDATA_TYPE int64_t
b92e5a22
FB
33#elif DATA_SIZE == 4
34#define SUFFIX l
701e3a5c 35#define LSUFFIX l
c8f94df5 36#define SDATA_TYPE int32_t
b92e5a22
FB
37#elif DATA_SIZE == 2
38#define SUFFIX w
701e3a5c 39#define LSUFFIX uw
c8f94df5 40#define SDATA_TYPE int16_t
b92e5a22
FB
41#elif DATA_SIZE == 1
42#define SUFFIX b
701e3a5c 43#define LSUFFIX ub
c8f94df5 44#define SDATA_TYPE int8_t
b92e5a22
FB
45#else
46#error unsupported data size
47#endif
48
c8f94df5
RH
49#define DATA_TYPE glue(u, SDATA_TYPE)
50
51/* For the benefit of TCG generated code, we want to avoid the complication
52 of ABI-specific return type promotion and always return a value extended
53 to the register size of the host. This is tcg_target_long, except in the
54 case of a 32-bit host and 64-bit data, and for that we always have
55 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
56#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
57# define WORD_TYPE DATA_TYPE
58# define USUFFIX SUFFIX
59#else
60# define WORD_TYPE tcg_target_ulong
61# define USUFFIX glue(u, SUFFIX)
62# define SSUFFIX glue(s, SUFFIX)
63#endif
64
b769d8fe
FB
65#ifdef SOFTMMU_CODE_ACCESS
66#define READ_ACCESS_TYPE 2
84b7b8e7 67#define ADDR_READ addr_code
b769d8fe
FB
68#else
69#define READ_ACCESS_TYPE 0
84b7b8e7 70#define ADDR_READ addr_read
b769d8fe
FB
71#endif
72
867b3201
RH
73#if DATA_SIZE == 8
74# define BSWAP(X) bswap64(X)
75#elif DATA_SIZE == 4
76# define BSWAP(X) bswap32(X)
77#elif DATA_SIZE == 2
78# define BSWAP(X) bswap16(X)
79#else
80# define BSWAP(X) (X)
81#endif
82
83#ifdef TARGET_WORDS_BIGENDIAN
84# define TGT_BE(X) (X)
85# define TGT_LE(X) BSWAP(X)
86#else
87# define TGT_BE(X) BSWAP(X)
88# define TGT_LE(X) (X)
89#endif
90
91#if DATA_SIZE == 1
92# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
93# define helper_be_ld_name helper_le_ld_name
94# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
95# define helper_be_lds_name helper_le_lds_name
96# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
97# define helper_be_st_name helper_le_st_name
98#else
99# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
100# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
101# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
102# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
103# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
104# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
105#endif
106
107#ifdef TARGET_WORDS_BIGENDIAN
108# define helper_te_ld_name helper_be_ld_name
109# define helper_te_st_name helper_be_st_name
110#else
111# define helper_te_ld_name helper_le_ld_name
112# define helper_te_st_name helper_le_st_name
113#endif
114
89c33337 115static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
a8170e5e 116 hwaddr physaddr,
2e70f6ef 117 target_ulong addr,
20503968 118 uintptr_t retaddr)
b92e5a22 119{
791af8c8 120 uint64_t val;
37ec01d4
AK
121 MemoryRegion *mr = iotlb_to_region(physaddr);
122
0f459d16 123 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
20503968 124 env->mem_io_pc = retaddr;
0844e007 125 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
2e70f6ef
PB
126 cpu_io_recompile(env, retaddr);
127 }
b92e5a22 128
db8886d3 129 env->mem_io_vaddr = addr;
791af8c8
PB
130 io_mem_read(mr, physaddr, &val, 1 << SHIFT);
131 return val;
b92e5a22
FB
132}
133
e25c3887 134#ifdef SOFTMMU_CODE_ACCESS
867b3201 135static __attribute__((unused))
e25c3887 136#endif
867b3201
RH
137WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
138 uintptr_t retaddr)
b92e5a22 139{
aac1fb05
RH
140 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
141 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
142 uintptr_t haddr;
867b3201 143 DATA_TYPE res;
3b46e624 144
0f842f8a
RH
145 /* Adjust the given return address. */
146 retaddr -= GETPC_ADJ;
147
aac1fb05
RH
148 /* If the TLB entry is for a different page, reload and try again. */
149 if ((addr & TARGET_PAGE_MASK)
150 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
a64d4718 151#ifdef ALIGNED_ONLY
aac1fb05 152 if ((addr & (DATA_SIZE - 1)) != 0) {
89c33337 153 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
aac1fb05 154 }
a64d4718 155#endif
aac1fb05
RH
156 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
157 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
158 }
159
160 /* Handle an IO access. */
161 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
162 hwaddr ioaddr;
163 if ((addr & (DATA_SIZE - 1)) != 0) {
164 goto do_unaligned_access;
b92e5a22 165 }
aac1fb05 166 ioaddr = env->iotlb[mmu_idx][index];
867b3201
RH
167
168 /* ??? Note that the io helpers always read data in the target
169 byte ordering. We should push the LE/BE request down into io. */
170 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
171 res = TGT_LE(res);
172 return res;
aac1fb05
RH
173 }
174
175 /* Handle slow unaligned access (it spans two pages or IO). */
176 if (DATA_SIZE > 1
177 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
178 >= TARGET_PAGE_SIZE)) {
179 target_ulong addr1, addr2;
867b3201 180 DATA_TYPE res1, res2;
aac1fb05
RH
181 unsigned shift;
182 do_unaligned_access:
a64d4718 183#ifdef ALIGNED_ONLY
aac1fb05 184 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
a64d4718 185#endif
aac1fb05
RH
186 addr1 = addr & ~(DATA_SIZE - 1);
187 addr2 = addr1 + DATA_SIZE;
0f842f8a
RH
188 /* Note the adjustment at the beginning of the function.
189 Undo that for the recursion. */
867b3201
RH
190 res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
191 res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
aac1fb05 192 shift = (addr & (DATA_SIZE - 1)) * 8;
867b3201
RH
193
194 /* Little-endian combine. */
aac1fb05 195 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
867b3201
RH
196 return res;
197 }
198
199 /* Handle aligned access or unaligned access in the same page. */
200#ifdef ALIGNED_ONLY
201 if ((addr & (DATA_SIZE - 1)) != 0) {
202 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
203 }
204#endif
205
206 haddr = addr + env->tlb_table[mmu_idx][index].addend;
207#if DATA_SIZE == 1
208 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
209#else
210 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
211#endif
212 return res;
213}
214
215#if DATA_SIZE > 1
216#ifdef SOFTMMU_CODE_ACCESS
217static __attribute__((unused))
218#endif
219WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
220 uintptr_t retaddr)
221{
222 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
223 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
224 uintptr_t haddr;
225 DATA_TYPE res;
226
227 /* Adjust the given return address. */
228 retaddr -= GETPC_ADJ;
229
230 /* If the TLB entry is for a different page, reload and try again. */
231 if ((addr & TARGET_PAGE_MASK)
232 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
233#ifdef ALIGNED_ONLY
234 if ((addr & (DATA_SIZE - 1)) != 0) {
235 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
236 }
237#endif
238 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
239 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
240 }
241
242 /* Handle an IO access. */
243 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
244 hwaddr ioaddr;
245 if ((addr & (DATA_SIZE - 1)) != 0) {
246 goto do_unaligned_access;
247 }
248 ioaddr = env->iotlb[mmu_idx][index];
249
250 /* ??? Note that the io helpers always read data in the target
251 byte ordering. We should push the LE/BE request down into io. */
252 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
253 res = TGT_BE(res);
254 return res;
255 }
256
257 /* Handle slow unaligned access (it spans two pages or IO). */
258 if (DATA_SIZE > 1
259 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
260 >= TARGET_PAGE_SIZE)) {
261 target_ulong addr1, addr2;
262 DATA_TYPE res1, res2;
263 unsigned shift;
264 do_unaligned_access:
265#ifdef ALIGNED_ONLY
266 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
aac1fb05 267#endif
867b3201
RH
268 addr1 = addr & ~(DATA_SIZE - 1);
269 addr2 = addr1 + DATA_SIZE;
270 /* Note the adjustment at the beginning of the function.
271 Undo that for the recursion. */
272 res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
273 res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
274 shift = (addr & (DATA_SIZE - 1)) * 8;
275
276 /* Big-endian combine. */
277 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
aac1fb05
RH
278 return res;
279 }
280
281 /* Handle aligned access or unaligned access in the same page. */
282#ifdef ALIGNED_ONLY
283 if ((addr & (DATA_SIZE - 1)) != 0) {
284 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
b92e5a22 285 }
aac1fb05
RH
286#endif
287
288 haddr = addr + env->tlb_table[mmu_idx][index].addend;
867b3201
RH
289 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
290 return res;
b92e5a22 291}
867b3201 292#endif /* DATA_SIZE > 1 */
b92e5a22 293
e25c3887
RH
294DATA_TYPE
295glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
296 int mmu_idx)
297{
867b3201 298 return helper_te_ld_name (env, addr, mmu_idx, GETRA());
e25c3887
RH
299}
300
b769d8fe
FB
301#ifndef SOFTMMU_CODE_ACCESS
302
c8f94df5
RH
303/* Provide signed versions of the load routines as well. We can of course
304 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
305#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
867b3201
RH
306WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
307 int mmu_idx, uintptr_t retaddr)
308{
309 return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
310}
311
312# if DATA_SIZE > 1
313WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
314 int mmu_idx, uintptr_t retaddr)
c8f94df5 315{
867b3201 316 return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
c8f94df5 317}
867b3201 318# endif
c8f94df5
RH
319#endif
320
89c33337 321static inline void glue(io_write, SUFFIX)(CPUArchState *env,
a8170e5e 322 hwaddr physaddr,
b769d8fe 323 DATA_TYPE val,
0f459d16 324 target_ulong addr,
20503968 325 uintptr_t retaddr)
b769d8fe 326{
37ec01d4
AK
327 MemoryRegion *mr = iotlb_to_region(physaddr);
328
0f459d16 329 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
0844e007 330 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
2e70f6ef
PB
331 cpu_io_recompile(env, retaddr);
332 }
b769d8fe 333
2e70f6ef 334 env->mem_io_vaddr = addr;
20503968 335 env->mem_io_pc = retaddr;
37ec01d4 336 io_mem_write(mr, physaddr, val, 1 << SHIFT);
b769d8fe 337}
b92e5a22 338
867b3201
RH
339void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
340 int mmu_idx, uintptr_t retaddr)
b92e5a22 341{
aac1fb05
RH
342 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
343 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
344 uintptr_t haddr;
3b46e624 345
0f842f8a
RH
346 /* Adjust the given return address. */
347 retaddr -= GETPC_ADJ;
348
aac1fb05
RH
349 /* If the TLB entry is for a different page, reload and try again. */
350 if ((addr & TARGET_PAGE_MASK)
351 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
a64d4718 352#ifdef ALIGNED_ONLY
aac1fb05 353 if ((addr & (DATA_SIZE - 1)) != 0) {
89c33337 354 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
aac1fb05 355 }
a64d4718 356#endif
aac1fb05
RH
357 tlb_fill(env, addr, 1, mmu_idx, retaddr);
358 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
359 }
360
361 /* Handle an IO access. */
362 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
363 hwaddr ioaddr;
364 if ((addr & (DATA_SIZE - 1)) != 0) {
365 goto do_unaligned_access;
366 }
367 ioaddr = env->iotlb[mmu_idx][index];
867b3201
RH
368
369 /* ??? Note that the io helpers always read data in the target
370 byte ordering. We should push the LE/BE request down into io. */
371 val = TGT_LE(val);
aac1fb05
RH
372 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
373 return;
374 }
375
376 /* Handle slow unaligned access (it spans two pages or IO). */
377 if (DATA_SIZE > 1
378 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
379 >= TARGET_PAGE_SIZE)) {
380 int i;
381 do_unaligned_access:
a64d4718 382#ifdef ALIGNED_ONLY
aac1fb05
RH
383 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
384#endif
385 /* XXX: not efficient, but simple */
386 /* Note: relies on the fact that tlb_fill() does not remove the
387 * previous page from the TLB cache. */
388 for (i = DATA_SIZE - 1; i >= 0; i--) {
867b3201 389 /* Little-endian extract. */
aac1fb05 390 uint8_t val8 = val >> (i * 8);
867b3201
RH
391 /* Note the adjustment at the beginning of the function.
392 Undo that for the recursion. */
393 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
394 mmu_idx, retaddr + GETPC_ADJ);
395 }
396 return;
397 }
398
399 /* Handle aligned access or unaligned access in the same page. */
400#ifdef ALIGNED_ONLY
401 if ((addr & (DATA_SIZE - 1)) != 0) {
402 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
403 }
404#endif
405
406 haddr = addr + env->tlb_table[mmu_idx][index].addend;
407#if DATA_SIZE == 1
408 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
409#else
410 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
a64d4718 411#endif
867b3201
RH
412}
413
414#if DATA_SIZE > 1
415void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
416 int mmu_idx, uintptr_t retaddr)
417{
418 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
419 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
420 uintptr_t haddr;
421
422 /* Adjust the given return address. */
423 retaddr -= GETPC_ADJ;
424
425 /* If the TLB entry is for a different page, reload and try again. */
426 if ((addr & TARGET_PAGE_MASK)
427 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
428#ifdef ALIGNED_ONLY
429 if ((addr & (DATA_SIZE - 1)) != 0) {
430 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
431 }
432#endif
433 tlb_fill(env, addr, 1, mmu_idx, retaddr);
434 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
435 }
436
437 /* Handle an IO access. */
438 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
439 hwaddr ioaddr;
440 if ((addr & (DATA_SIZE - 1)) != 0) {
441 goto do_unaligned_access;
442 }
443 ioaddr = env->iotlb[mmu_idx][index];
444
445 /* ??? Note that the io helpers always read data in the target
446 byte ordering. We should push the LE/BE request down into io. */
447 val = TGT_BE(val);
448 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
449 return;
450 }
451
452 /* Handle slow unaligned access (it spans two pages or IO). */
453 if (DATA_SIZE > 1
454 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
455 >= TARGET_PAGE_SIZE)) {
456 int i;
457 do_unaligned_access:
458#ifdef ALIGNED_ONLY
459 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
460#endif
461 /* XXX: not efficient, but simple */
462 /* Note: relies on the fact that tlb_fill() does not remove the
463 * previous page from the TLB cache. */
464 for (i = DATA_SIZE - 1; i >= 0; i--) {
465 /* Big-endian extract. */
466 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
0f842f8a
RH
467 /* Note the adjustment at the beginning of the function.
468 Undo that for the recursion. */
aac1fb05 469 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
0f842f8a 470 mmu_idx, retaddr + GETPC_ADJ);
b92e5a22 471 }
aac1fb05
RH
472 return;
473 }
474
475 /* Handle aligned access or unaligned access in the same page. */
a64d4718 476#ifdef ALIGNED_ONLY
aac1fb05
RH
477 if ((addr & (DATA_SIZE - 1)) != 0) {
478 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
b92e5a22 479 }
aac1fb05
RH
480#endif
481
482 haddr = addr + env->tlb_table[mmu_idx][index].addend;
867b3201 483 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
b92e5a22 484}
867b3201 485#endif /* DATA_SIZE > 1 */
b92e5a22 486
e25c3887
RH
487void
488glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
489 DATA_TYPE val, int mmu_idx)
490{
867b3201 491 helper_te_st_name(env, addr, val, mmu_idx, GETRA());
e25c3887
RH
492}
493
b769d8fe
FB
494#endif /* !defined(SOFTMMU_CODE_ACCESS) */
495
496#undef READ_ACCESS_TYPE
b92e5a22
FB
497#undef SHIFT
498#undef DATA_TYPE
499#undef SUFFIX
701e3a5c 500#undef LSUFFIX
b92e5a22 501#undef DATA_SIZE
84b7b8e7 502#undef ADDR_READ
c8f94df5
RH
503#undef WORD_TYPE
504#undef SDATA_TYPE
505#undef USUFFIX
506#undef SSUFFIX
867b3201
RH
507#undef BSWAP
508#undef TGT_BE
509#undef TGT_LE
510#undef CPU_BE
511#undef CPU_LE
512#undef helper_le_ld_name
513#undef helper_be_ld_name
514#undef helper_le_lds_name
515#undef helper_be_lds_name
516#undef helper_le_st_name
517#undef helper_be_st_name
518#undef helper_te_ld_name
519#undef helper_te_st_name