]>
git.proxmox.com Git - mirror_qemu.git/blob - softmmu_template.h
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #define SDATA_TYPE int64_t
28 #define DATA_TYPE uint64_t
32 #define SDATA_TYPE int32_t
33 #define DATA_TYPE uint32_t
37 #define SDATA_TYPE int16_t
38 #define DATA_TYPE uint16_t
42 #define SDATA_TYPE int8_t
43 #define DATA_TYPE uint8_t
45 #error unsupported data size
49 /* For the benefit of TCG generated code, we want to avoid the complication
50 of ABI-specific return type promotion and always return a value extended
51 to the register size of the host. This is tcg_target_long, except in the
52 case of a 32-bit host and 64-bit data, and for that we always have
53 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
54 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
55 # define WORD_TYPE DATA_TYPE
56 # define USUFFIX SUFFIX
58 # define WORD_TYPE tcg_target_ulong
59 # define USUFFIX glue(u, SUFFIX)
60 # define SSUFFIX glue(s, SUFFIX)
63 #ifdef SOFTMMU_CODE_ACCESS
64 #define READ_ACCESS_TYPE MMU_INST_FETCH
65 #define ADDR_READ addr_code
67 #define READ_ACCESS_TYPE MMU_DATA_LOAD
68 #define ADDR_READ addr_read
72 # define BSWAP(X) bswap64(X)
74 # define BSWAP(X) bswap32(X)
76 # define BSWAP(X) bswap16(X)
81 #ifdef TARGET_WORDS_BIGENDIAN
82 # define TGT_BE(X) (X)
83 # define TGT_LE(X) BSWAP(X)
85 # define TGT_BE(X) BSWAP(X)
86 # define TGT_LE(X) (X)
90 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
91 # define helper_be_ld_name helper_le_ld_name
92 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
93 # define helper_be_lds_name helper_le_lds_name
94 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
95 # define helper_be_st_name helper_le_st_name
97 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
98 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
99 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
100 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
101 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
102 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
105 #ifdef TARGET_WORDS_BIGENDIAN
106 # define helper_te_ld_name helper_be_ld_name
107 # define helper_te_st_name helper_be_st_name
109 # define helper_te_ld_name helper_le_ld_name
110 # define helper_te_st_name helper_le_st_name
113 #ifndef SOFTMMU_CODE_ACCESS
114 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
115 CPUIOTLBEntry
*iotlbentry
,
120 CPUState
*cpu
= ENV_GET_CPU(env
);
121 hwaddr physaddr
= iotlbentry
->addr
;
122 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
124 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
125 cpu
->mem_io_pc
= retaddr
;
126 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
127 cpu_io_recompile(cpu
, retaddr
);
130 cpu
->mem_io_vaddr
= addr
;
131 memory_region_dispatch_read(mr
, physaddr
, &val
, DATA_SIZE
,
137 WORD_TYPE
helper_le_ld_name(CPUArchState
*env
, target_ulong addr
,
138 TCGMemOpIdx oi
, uintptr_t retaddr
)
140 unsigned mmu_idx
= get_mmuidx(oi
);
141 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
142 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
143 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
147 if (addr
& ((1 << a_bits
) - 1)) {
148 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
152 /* If the TLB entry is for a different page, reload and try again. */
153 if ((addr
& TARGET_PAGE_MASK
)
154 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
155 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
156 tlb_fill(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
159 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
162 /* Handle an IO access. */
163 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
164 CPUIOTLBEntry
*iotlbentry
;
165 if ((addr
& (DATA_SIZE
- 1)) != 0) {
166 goto do_unaligned_access
;
168 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
170 /* ??? Note that the io helpers always read data in the target
171 byte ordering. We should push the LE/BE request down into io. */
172 res
= glue(io_read
, SUFFIX
)(env
, iotlbentry
, addr
, retaddr
);
177 /* Handle slow unaligned access (it spans two pages or IO). */
179 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
180 >= TARGET_PAGE_SIZE
)) {
181 target_ulong addr1
, addr2
;
182 DATA_TYPE res1
, res2
;
185 addr1
= addr
& ~(DATA_SIZE
- 1);
186 addr2
= addr1
+ DATA_SIZE
;
187 res1
= helper_le_ld_name(env
, addr1
, oi
, retaddr
);
188 res2
= helper_le_ld_name(env
, addr2
, oi
, retaddr
);
189 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
191 /* Little-endian combine. */
192 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
196 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
198 res
= glue(glue(ld
, LSUFFIX
), _p
)((uint8_t *)haddr
);
200 res
= glue(glue(ld
, LSUFFIX
), _le_p
)((uint8_t *)haddr
);
206 WORD_TYPE
helper_be_ld_name(CPUArchState
*env
, target_ulong addr
,
207 TCGMemOpIdx oi
, uintptr_t retaddr
)
209 unsigned mmu_idx
= get_mmuidx(oi
);
210 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
211 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
212 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
216 if (addr
& ((1 << a_bits
) - 1)) {
217 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
221 /* If the TLB entry is for a different page, reload and try again. */
222 if ((addr
& TARGET_PAGE_MASK
)
223 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
224 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
225 tlb_fill(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
228 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
231 /* Handle an IO access. */
232 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
233 CPUIOTLBEntry
*iotlbentry
;
234 if ((addr
& (DATA_SIZE
- 1)) != 0) {
235 goto do_unaligned_access
;
237 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
239 /* ??? Note that the io helpers always read data in the target
240 byte ordering. We should push the LE/BE request down into io. */
241 res
= glue(io_read
, SUFFIX
)(env
, iotlbentry
, addr
, retaddr
);
246 /* Handle slow unaligned access (it spans two pages or IO). */
248 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
249 >= TARGET_PAGE_SIZE
)) {
250 target_ulong addr1
, addr2
;
251 DATA_TYPE res1
, res2
;
254 addr1
= addr
& ~(DATA_SIZE
- 1);
255 addr2
= addr1
+ DATA_SIZE
;
256 res1
= helper_be_ld_name(env
, addr1
, oi
, retaddr
);
257 res2
= helper_be_ld_name(env
, addr2
, oi
, retaddr
);
258 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
260 /* Big-endian combine. */
261 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
265 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
266 res
= glue(glue(ld
, LSUFFIX
), _be_p
)((uint8_t *)haddr
);
269 #endif /* DATA_SIZE > 1 */
271 #ifndef SOFTMMU_CODE_ACCESS
273 /* Provide signed versions of the load routines as well. We can of course
274 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
275 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
276 WORD_TYPE
helper_le_lds_name(CPUArchState
*env
, target_ulong addr
,
277 TCGMemOpIdx oi
, uintptr_t retaddr
)
279 return (SDATA_TYPE
)helper_le_ld_name(env
, addr
, oi
, retaddr
);
283 WORD_TYPE
helper_be_lds_name(CPUArchState
*env
, target_ulong addr
,
284 TCGMemOpIdx oi
, uintptr_t retaddr
)
286 return (SDATA_TYPE
)helper_be_ld_name(env
, addr
, oi
, retaddr
);
291 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
292 CPUIOTLBEntry
*iotlbentry
,
297 CPUState
*cpu
= ENV_GET_CPU(env
);
298 hwaddr physaddr
= iotlbentry
->addr
;
299 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
301 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
302 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
303 cpu_io_recompile(cpu
, retaddr
);
306 cpu
->mem_io_vaddr
= addr
;
307 cpu
->mem_io_pc
= retaddr
;
308 memory_region_dispatch_write(mr
, physaddr
, val
, DATA_SIZE
,
312 void helper_le_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
313 TCGMemOpIdx oi
, uintptr_t retaddr
)
315 unsigned mmu_idx
= get_mmuidx(oi
);
316 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
317 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
318 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
321 if (addr
& ((1 << a_bits
) - 1)) {
322 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
326 /* If the TLB entry is for a different page, reload and try again. */
327 if ((addr
& TARGET_PAGE_MASK
)
328 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
329 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
330 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
332 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
335 /* Handle an IO access. */
336 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
337 CPUIOTLBEntry
*iotlbentry
;
338 if ((addr
& (DATA_SIZE
- 1)) != 0) {
339 goto do_unaligned_access
;
341 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
343 /* ??? Note that the io helpers always read data in the target
344 byte ordering. We should push the LE/BE request down into io. */
346 glue(io_write
, SUFFIX
)(env
, iotlbentry
, val
, addr
, retaddr
);
350 /* Handle slow unaligned access (it spans two pages or IO). */
352 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
353 >= TARGET_PAGE_SIZE
)) {
355 target_ulong page2
, tlb_addr2
;
357 /* Ensure the second page is in the TLB. Note that the first page
358 is already guaranteed to be filled, and that the second page
359 cannot evict the first. */
360 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
361 index2
= (page2
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
362 tlb_addr2
= env
->tlb_table
[mmu_idx
][index2
].addr_write
;
363 if (page2
!= (tlb_addr2
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))
364 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
365 tlb_fill(ENV_GET_CPU(env
), page2
, MMU_DATA_STORE
,
369 /* XXX: not efficient, but simple. */
370 /* This loop must go in the forward direction to avoid issues
371 with self-modifying code in Windows 64-bit. */
372 for (i
= 0; i
< DATA_SIZE
; ++i
) {
373 /* Little-endian extract. */
374 uint8_t val8
= val
>> (i
* 8);
375 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
381 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
383 glue(glue(st
, SUFFIX
), _p
)((uint8_t *)haddr
, val
);
385 glue(glue(st
, SUFFIX
), _le_p
)((uint8_t *)haddr
, val
);
390 void helper_be_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
391 TCGMemOpIdx oi
, uintptr_t retaddr
)
393 unsigned mmu_idx
= get_mmuidx(oi
);
394 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
395 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
396 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
399 if (addr
& ((1 << a_bits
) - 1)) {
400 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
404 /* If the TLB entry is for a different page, reload and try again. */
405 if ((addr
& TARGET_PAGE_MASK
)
406 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
407 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
408 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
410 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
413 /* Handle an IO access. */
414 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
415 CPUIOTLBEntry
*iotlbentry
;
416 if ((addr
& (DATA_SIZE
- 1)) != 0) {
417 goto do_unaligned_access
;
419 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
421 /* ??? Note that the io helpers always read data in the target
422 byte ordering. We should push the LE/BE request down into io. */
424 glue(io_write
, SUFFIX
)(env
, iotlbentry
, val
, addr
, retaddr
);
428 /* Handle slow unaligned access (it spans two pages or IO). */
430 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
431 >= TARGET_PAGE_SIZE
)) {
433 target_ulong page2
, tlb_addr2
;
435 /* Ensure the second page is in the TLB. Note that the first page
436 is already guaranteed to be filled, and that the second page
437 cannot evict the first. */
438 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
439 index2
= (page2
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
440 tlb_addr2
= env
->tlb_table
[mmu_idx
][index2
].addr_write
;
441 if (page2
!= (tlb_addr2
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))
442 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
443 tlb_fill(ENV_GET_CPU(env
), page2
, MMU_DATA_STORE
,
447 /* XXX: not efficient, but simple */
448 /* This loop must go in the forward direction to avoid issues
449 with self-modifying code. */
450 for (i
= 0; i
< DATA_SIZE
; ++i
) {
451 /* Big-endian extract. */
452 uint8_t val8
= val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8));
453 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
459 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
460 glue(glue(st
, SUFFIX
), _be_p
)((uint8_t *)haddr
, val
);
462 #endif /* DATA_SIZE > 1 */
463 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
465 #undef READ_ACCESS_TYPE
480 #undef helper_le_ld_name
481 #undef helper_be_ld_name
482 #undef helper_le_lds_name
483 #undef helper_be_lds_name
484 #undef helper_le_st_name
485 #undef helper_be_st_name
486 #undef helper_te_ld_name
487 #undef helper_te_st_name