]>
git.proxmox.com Git - mirror_qemu.git/blob - softmmu_template.h
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/address-spaces.h"
26 #include "exec/memory.h"
31 #define SDATA_TYPE int64_t
32 #define DATA_TYPE uint64_t
36 #define SDATA_TYPE int32_t
37 #define DATA_TYPE uint32_t
41 #define SDATA_TYPE int16_t
42 #define DATA_TYPE uint16_t
46 #define SDATA_TYPE int8_t
47 #define DATA_TYPE uint8_t
49 #error unsupported data size
53 /* For the benefit of TCG generated code, we want to avoid the complication
54 of ABI-specific return type promotion and always return a value extended
55 to the register size of the host. This is tcg_target_long, except in the
56 case of a 32-bit host and 64-bit data, and for that we always have
57 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
58 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
59 # define WORD_TYPE DATA_TYPE
60 # define USUFFIX SUFFIX
62 # define WORD_TYPE tcg_target_ulong
63 # define USUFFIX glue(u, SUFFIX)
64 # define SSUFFIX glue(s, SUFFIX)
67 #ifdef SOFTMMU_CODE_ACCESS
68 #define READ_ACCESS_TYPE MMU_INST_FETCH
69 #define ADDR_READ addr_code
71 #define READ_ACCESS_TYPE MMU_DATA_LOAD
72 #define ADDR_READ addr_read
76 # define BSWAP(X) bswap64(X)
78 # define BSWAP(X) bswap32(X)
80 # define BSWAP(X) bswap16(X)
85 #ifdef TARGET_WORDS_BIGENDIAN
86 # define TGT_BE(X) (X)
87 # define TGT_LE(X) BSWAP(X)
89 # define TGT_BE(X) BSWAP(X)
90 # define TGT_LE(X) (X)
94 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
95 # define helper_be_ld_name helper_le_ld_name
96 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
97 # define helper_be_lds_name helper_le_lds_name
98 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
99 # define helper_be_st_name helper_le_st_name
101 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
102 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
103 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
104 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
105 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
106 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
109 #ifdef TARGET_WORDS_BIGENDIAN
110 # define helper_te_ld_name helper_be_ld_name
111 # define helper_te_st_name helper_be_st_name
113 # define helper_te_ld_name helper_le_ld_name
114 # define helper_te_st_name helper_le_st_name
117 #ifndef SOFTMMU_CODE_ACCESS
118 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
119 CPUIOTLBEntry
*iotlbentry
,
124 CPUState
*cpu
= ENV_GET_CPU(env
);
125 hwaddr physaddr
= iotlbentry
->addr
;
126 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
128 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
129 cpu
->mem_io_pc
= retaddr
;
130 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
131 cpu_io_recompile(cpu
, retaddr
);
134 cpu
->mem_io_vaddr
= addr
;
135 memory_region_dispatch_read(mr
, physaddr
, &val
, DATA_SIZE
,
141 WORD_TYPE
helper_le_ld_name(CPUArchState
*env
, target_ulong addr
,
142 TCGMemOpIdx oi
, uintptr_t retaddr
)
144 unsigned mmu_idx
= get_mmuidx(oi
);
145 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
146 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
147 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
151 if (addr
& ((1 << a_bits
) - 1)) {
152 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
156 /* If the TLB entry is for a different page, reload and try again. */
157 if ((addr
& TARGET_PAGE_MASK
)
158 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
159 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
160 tlb_fill(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
163 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
166 /* Handle an IO access. */
167 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
168 CPUIOTLBEntry
*iotlbentry
;
169 if ((addr
& (DATA_SIZE
- 1)) != 0) {
170 goto do_unaligned_access
;
172 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
174 /* ??? Note that the io helpers always read data in the target
175 byte ordering. We should push the LE/BE request down into io. */
176 res
= glue(io_read
, SUFFIX
)(env
, iotlbentry
, addr
, retaddr
);
181 /* Handle slow unaligned access (it spans two pages or IO). */
183 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
184 >= TARGET_PAGE_SIZE
)) {
185 target_ulong addr1
, addr2
;
186 DATA_TYPE res1
, res2
;
189 addr1
= addr
& ~(DATA_SIZE
- 1);
190 addr2
= addr1
+ DATA_SIZE
;
191 res1
= helper_le_ld_name(env
, addr1
, oi
, retaddr
);
192 res2
= helper_le_ld_name(env
, addr2
, oi
, retaddr
);
193 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
195 /* Little-endian combine. */
196 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
200 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
202 res
= glue(glue(ld
, LSUFFIX
), _p
)((uint8_t *)haddr
);
204 res
= glue(glue(ld
, LSUFFIX
), _le_p
)((uint8_t *)haddr
);
210 WORD_TYPE
helper_be_ld_name(CPUArchState
*env
, target_ulong addr
,
211 TCGMemOpIdx oi
, uintptr_t retaddr
)
213 unsigned mmu_idx
= get_mmuidx(oi
);
214 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
215 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
216 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
220 if (addr
& ((1 << a_bits
) - 1)) {
221 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
225 /* If the TLB entry is for a different page, reload and try again. */
226 if ((addr
& TARGET_PAGE_MASK
)
227 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
228 if (!VICTIM_TLB_HIT(ADDR_READ
, addr
)) {
229 tlb_fill(ENV_GET_CPU(env
), addr
, READ_ACCESS_TYPE
,
232 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
235 /* Handle an IO access. */
236 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
237 CPUIOTLBEntry
*iotlbentry
;
238 if ((addr
& (DATA_SIZE
- 1)) != 0) {
239 goto do_unaligned_access
;
241 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
243 /* ??? Note that the io helpers always read data in the target
244 byte ordering. We should push the LE/BE request down into io. */
245 res
= glue(io_read
, SUFFIX
)(env
, iotlbentry
, addr
, retaddr
);
250 /* Handle slow unaligned access (it spans two pages or IO). */
252 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
253 >= TARGET_PAGE_SIZE
)) {
254 target_ulong addr1
, addr2
;
255 DATA_TYPE res1
, res2
;
258 addr1
= addr
& ~(DATA_SIZE
- 1);
259 addr2
= addr1
+ DATA_SIZE
;
260 res1
= helper_be_ld_name(env
, addr1
, oi
, retaddr
);
261 res2
= helper_be_ld_name(env
, addr2
, oi
, retaddr
);
262 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
264 /* Big-endian combine. */
265 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
269 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
270 res
= glue(glue(ld
, LSUFFIX
), _be_p
)((uint8_t *)haddr
);
273 #endif /* DATA_SIZE > 1 */
275 #ifndef SOFTMMU_CODE_ACCESS
277 /* Provide signed versions of the load routines as well. We can of course
278 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
279 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
280 WORD_TYPE
helper_le_lds_name(CPUArchState
*env
, target_ulong addr
,
281 TCGMemOpIdx oi
, uintptr_t retaddr
)
283 return (SDATA_TYPE
)helper_le_ld_name(env
, addr
, oi
, retaddr
);
287 WORD_TYPE
helper_be_lds_name(CPUArchState
*env
, target_ulong addr
,
288 TCGMemOpIdx oi
, uintptr_t retaddr
)
290 return (SDATA_TYPE
)helper_be_ld_name(env
, addr
, oi
, retaddr
);
295 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
296 CPUIOTLBEntry
*iotlbentry
,
301 CPUState
*cpu
= ENV_GET_CPU(env
);
302 hwaddr physaddr
= iotlbentry
->addr
;
303 MemoryRegion
*mr
= iotlb_to_region(cpu
, physaddr
, iotlbentry
->attrs
);
305 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
306 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
307 cpu_io_recompile(cpu
, retaddr
);
310 cpu
->mem_io_vaddr
= addr
;
311 cpu
->mem_io_pc
= retaddr
;
312 memory_region_dispatch_write(mr
, physaddr
, val
, DATA_SIZE
,
316 void helper_le_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
317 TCGMemOpIdx oi
, uintptr_t retaddr
)
319 unsigned mmu_idx
= get_mmuidx(oi
);
320 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
321 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
322 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
325 if (addr
& ((1 << a_bits
) - 1)) {
326 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
330 /* If the TLB entry is for a different page, reload and try again. */
331 if ((addr
& TARGET_PAGE_MASK
)
332 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
333 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
334 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
336 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
339 /* Handle an IO access. */
340 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
341 CPUIOTLBEntry
*iotlbentry
;
342 if ((addr
& (DATA_SIZE
- 1)) != 0) {
343 goto do_unaligned_access
;
345 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
347 /* ??? Note that the io helpers always read data in the target
348 byte ordering. We should push the LE/BE request down into io. */
350 glue(io_write
, SUFFIX
)(env
, iotlbentry
, val
, addr
, retaddr
);
354 /* Handle slow unaligned access (it spans two pages or IO). */
356 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
357 >= TARGET_PAGE_SIZE
)) {
359 target_ulong page2
, tlb_addr2
;
361 /* Ensure the second page is in the TLB. Note that the first page
362 is already guaranteed to be filled, and that the second page
363 cannot evict the first. */
364 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
365 index2
= (page2
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
366 tlb_addr2
= env
->tlb_table
[mmu_idx
][index2
].addr_write
;
367 if (page2
!= (tlb_addr2
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))
368 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
369 tlb_fill(ENV_GET_CPU(env
), page2
, MMU_DATA_STORE
,
373 /* XXX: not efficient, but simple. */
374 /* This loop must go in the forward direction to avoid issues
375 with self-modifying code in Windows 64-bit. */
376 for (i
= 0; i
< DATA_SIZE
; ++i
) {
377 /* Little-endian extract. */
378 uint8_t val8
= val
>> (i
* 8);
379 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
385 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
387 glue(glue(st
, SUFFIX
), _p
)((uint8_t *)haddr
, val
);
389 glue(glue(st
, SUFFIX
), _le_p
)((uint8_t *)haddr
, val
);
394 void helper_be_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
395 TCGMemOpIdx oi
, uintptr_t retaddr
)
397 unsigned mmu_idx
= get_mmuidx(oi
);
398 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
399 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
400 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
403 if (addr
& ((1 << a_bits
) - 1)) {
404 cpu_unaligned_access(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
,
408 /* If the TLB entry is for a different page, reload and try again. */
409 if ((addr
& TARGET_PAGE_MASK
)
410 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
411 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
412 tlb_fill(ENV_GET_CPU(env
), addr
, MMU_DATA_STORE
, mmu_idx
, retaddr
);
414 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
417 /* Handle an IO access. */
418 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
419 CPUIOTLBEntry
*iotlbentry
;
420 if ((addr
& (DATA_SIZE
- 1)) != 0) {
421 goto do_unaligned_access
;
423 iotlbentry
= &env
->iotlb
[mmu_idx
][index
];
425 /* ??? Note that the io helpers always read data in the target
426 byte ordering. We should push the LE/BE request down into io. */
428 glue(io_write
, SUFFIX
)(env
, iotlbentry
, val
, addr
, retaddr
);
432 /* Handle slow unaligned access (it spans two pages or IO). */
434 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
435 >= TARGET_PAGE_SIZE
)) {
437 target_ulong page2
, tlb_addr2
;
439 /* Ensure the second page is in the TLB. Note that the first page
440 is already guaranteed to be filled, and that the second page
441 cannot evict the first. */
442 page2
= (addr
+ DATA_SIZE
) & TARGET_PAGE_MASK
;
443 index2
= (page2
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
444 tlb_addr2
= env
->tlb_table
[mmu_idx
][index2
].addr_write
;
445 if (page2
!= (tlb_addr2
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))
446 && !VICTIM_TLB_HIT(addr_write
, page2
)) {
447 tlb_fill(ENV_GET_CPU(env
), page2
, MMU_DATA_STORE
,
451 /* XXX: not efficient, but simple */
452 /* This loop must go in the forward direction to avoid issues
453 with self-modifying code. */
454 for (i
= 0; i
< DATA_SIZE
; ++i
) {
455 /* Big-endian extract. */
456 uint8_t val8
= val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8));
457 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
463 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
464 glue(glue(st
, SUFFIX
), _be_p
)((uint8_t *)haddr
, val
);
466 #endif /* DATA_SIZE > 1 */
467 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
469 #undef READ_ACCESS_TYPE
484 #undef helper_le_ld_name
485 #undef helper_be_ld_name
486 #undef helper_le_lds_name
487 #undef helper_be_lds_name
488 #undef helper_le_st_name
489 #undef helper_be_st_name
490 #undef helper_te_ld_name
491 #undef helper_te_st_name