]>
git.proxmox.com Git - mirror_qemu.git/blob - include/exec/softmmu_template.h
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/memory.h"
27 #define DATA_SIZE (1 << SHIFT)
32 #define SDATA_TYPE int64_t
33 #define DATA_TYPE uint64_t
37 #define SDATA_TYPE int32_t
38 #define DATA_TYPE uint32_t
42 #define SDATA_TYPE int16_t
43 #define DATA_TYPE uint16_t
47 #define SDATA_TYPE int8_t
48 #define DATA_TYPE uint8_t
50 #error unsupported data size
54 /* For the benefit of TCG generated code, we want to avoid the complication
55 of ABI-specific return type promotion and always return a value extended
56 to the register size of the host. This is tcg_target_long, except in the
57 case of a 32-bit host and 64-bit data, and for that we always have
58 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
59 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
60 # define WORD_TYPE DATA_TYPE
61 # define USUFFIX SUFFIX
63 # define WORD_TYPE tcg_target_ulong
64 # define USUFFIX glue(u, SUFFIX)
65 # define SSUFFIX glue(s, SUFFIX)
68 #ifdef SOFTMMU_CODE_ACCESS
69 #define READ_ACCESS_TYPE 2
70 #define ADDR_READ addr_code
72 #define READ_ACCESS_TYPE 0
73 #define ADDR_READ addr_read
77 # define BSWAP(X) bswap64(X)
79 # define BSWAP(X) bswap32(X)
81 # define BSWAP(X) bswap16(X)
86 #ifdef TARGET_WORDS_BIGENDIAN
87 # define TGT_BE(X) (X)
88 # define TGT_LE(X) BSWAP(X)
90 # define TGT_BE(X) BSWAP(X)
91 # define TGT_LE(X) (X)
95 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
96 # define helper_be_ld_name helper_le_ld_name
97 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
98 # define helper_be_lds_name helper_le_lds_name
99 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
100 # define helper_be_st_name helper_le_st_name
102 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
103 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
104 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
105 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
106 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
107 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
110 #ifdef TARGET_WORDS_BIGENDIAN
111 # define helper_te_ld_name helper_be_ld_name
112 # define helper_te_st_name helper_be_st_name
114 # define helper_te_ld_name helper_le_ld_name
115 # define helper_te_st_name helper_le_st_name
118 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
124 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
126 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
127 env
->mem_io_pc
= retaddr
;
128 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !can_do_io(env
)) {
129 cpu_io_recompile(env
, retaddr
);
132 env
->mem_io_vaddr
= addr
;
133 io_mem_read(mr
, physaddr
, &val
, 1 << SHIFT
);
137 #ifdef SOFTMMU_CODE_ACCESS
138 static __attribute__((unused
))
140 WORD_TYPE
helper_le_ld_name(CPUArchState
*env
, target_ulong addr
, int mmu_idx
,
143 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
144 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
148 /* Adjust the given return address. */
149 retaddr
-= GETPC_ADJ
;
151 /* If the TLB entry is for a different page, reload and try again. */
152 if ((addr
& TARGET_PAGE_MASK
)
153 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
155 if ((addr
& (DATA_SIZE
- 1)) != 0) {
156 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
159 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
160 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
163 /* Handle an IO access. */
164 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
166 if ((addr
& (DATA_SIZE
- 1)) != 0) {
167 goto do_unaligned_access
;
169 ioaddr
= env
->iotlb
[mmu_idx
][index
];
171 /* ??? Note that the io helpers always read data in the target
172 byte ordering. We should push the LE/BE request down into io. */
173 res
= glue(io_read
, SUFFIX
)(env
, ioaddr
, addr
, retaddr
);
178 /* Handle slow unaligned access (it spans two pages or IO). */
180 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
181 >= TARGET_PAGE_SIZE
)) {
182 target_ulong addr1
, addr2
;
183 DATA_TYPE res1
, res2
;
187 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
189 addr1
= addr
& ~(DATA_SIZE
- 1);
190 addr2
= addr1
+ DATA_SIZE
;
191 /* Note the adjustment at the beginning of the function.
192 Undo that for the recursion. */
193 res1
= helper_le_ld_name(env
, addr1
, mmu_idx
, retaddr
+ GETPC_ADJ
);
194 res2
= helper_le_ld_name(env
, addr2
, mmu_idx
, retaddr
+ GETPC_ADJ
);
195 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
197 /* Little-endian combine. */
198 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
202 /* Handle aligned access or unaligned access in the same page. */
204 if ((addr
& (DATA_SIZE
- 1)) != 0) {
205 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
209 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
211 res
= glue(glue(ld
, LSUFFIX
), _p
)((uint8_t *)haddr
);
213 res
= glue(glue(ld
, LSUFFIX
), _le_p
)((uint8_t *)haddr
);
219 #ifdef SOFTMMU_CODE_ACCESS
220 static __attribute__((unused
))
222 WORD_TYPE
helper_be_ld_name(CPUArchState
*env
, target_ulong addr
, int mmu_idx
,
225 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
226 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
230 /* Adjust the given return address. */
231 retaddr
-= GETPC_ADJ
;
233 /* If the TLB entry is for a different page, reload and try again. */
234 if ((addr
& TARGET_PAGE_MASK
)
235 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
237 if ((addr
& (DATA_SIZE
- 1)) != 0) {
238 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
241 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
242 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
245 /* Handle an IO access. */
246 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
248 if ((addr
& (DATA_SIZE
- 1)) != 0) {
249 goto do_unaligned_access
;
251 ioaddr
= env
->iotlb
[mmu_idx
][index
];
253 /* ??? Note that the io helpers always read data in the target
254 byte ordering. We should push the LE/BE request down into io. */
255 res
= glue(io_read
, SUFFIX
)(env
, ioaddr
, addr
, retaddr
);
260 /* Handle slow unaligned access (it spans two pages or IO). */
262 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
263 >= TARGET_PAGE_SIZE
)) {
264 target_ulong addr1
, addr2
;
265 DATA_TYPE res1
, res2
;
269 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
271 addr1
= addr
& ~(DATA_SIZE
- 1);
272 addr2
= addr1
+ DATA_SIZE
;
273 /* Note the adjustment at the beginning of the function.
274 Undo that for the recursion. */
275 res1
= helper_be_ld_name(env
, addr1
, mmu_idx
, retaddr
+ GETPC_ADJ
);
276 res2
= helper_be_ld_name(env
, addr2
, mmu_idx
, retaddr
+ GETPC_ADJ
);
277 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
279 /* Big-endian combine. */
280 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
284 /* Handle aligned access or unaligned access in the same page. */
286 if ((addr
& (DATA_SIZE
- 1)) != 0) {
287 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
291 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
292 res
= glue(glue(ld
, LSUFFIX
), _be_p
)((uint8_t *)haddr
);
295 #endif /* DATA_SIZE > 1 */
298 glue(glue(helper_ld
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
, target_ulong addr
,
301 return helper_te_ld_name (env
, addr
, mmu_idx
, GETRA());
304 #ifndef SOFTMMU_CODE_ACCESS
306 /* Provide signed versions of the load routines as well. We can of course
307 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
308 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
309 WORD_TYPE
helper_le_lds_name(CPUArchState
*env
, target_ulong addr
,
310 int mmu_idx
, uintptr_t retaddr
)
312 return (SDATA_TYPE
)helper_le_ld_name(env
, addr
, mmu_idx
, retaddr
);
316 WORD_TYPE
helper_be_lds_name(CPUArchState
*env
, target_ulong addr
,
317 int mmu_idx
, uintptr_t retaddr
)
319 return (SDATA_TYPE
)helper_be_ld_name(env
, addr
, mmu_idx
, retaddr
);
324 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
330 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
332 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
333 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !can_do_io(env
)) {
334 cpu_io_recompile(env
, retaddr
);
337 env
->mem_io_vaddr
= addr
;
338 env
->mem_io_pc
= retaddr
;
339 io_mem_write(mr
, physaddr
, val
, 1 << SHIFT
);
342 void helper_le_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
343 int mmu_idx
, uintptr_t retaddr
)
345 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
346 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
349 /* Adjust the given return address. */
350 retaddr
-= GETPC_ADJ
;
352 /* If the TLB entry is for a different page, reload and try again. */
353 if ((addr
& TARGET_PAGE_MASK
)
354 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
356 if ((addr
& (DATA_SIZE
- 1)) != 0) {
357 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
360 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
361 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
364 /* Handle an IO access. */
365 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
367 if ((addr
& (DATA_SIZE
- 1)) != 0) {
368 goto do_unaligned_access
;
370 ioaddr
= env
->iotlb
[mmu_idx
][index
];
372 /* ??? Note that the io helpers always read data in the target
373 byte ordering. We should push the LE/BE request down into io. */
375 glue(io_write
, SUFFIX
)(env
, ioaddr
, val
, addr
, retaddr
);
379 /* Handle slow unaligned access (it spans two pages or IO). */
381 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
382 >= TARGET_PAGE_SIZE
)) {
386 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
388 /* XXX: not efficient, but simple */
389 /* Note: relies on the fact that tlb_fill() does not remove the
390 * previous page from the TLB cache. */
391 for (i
= DATA_SIZE
- 1; i
>= 0; i
--) {
392 /* Little-endian extract. */
393 uint8_t val8
= val
>> (i
* 8);
394 /* Note the adjustment at the beginning of the function.
395 Undo that for the recursion. */
396 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
397 mmu_idx
, retaddr
+ GETPC_ADJ
);
402 /* Handle aligned access or unaligned access in the same page. */
404 if ((addr
& (DATA_SIZE
- 1)) != 0) {
405 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
409 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
411 glue(glue(st
, SUFFIX
), _p
)((uint8_t *)haddr
, val
);
413 glue(glue(st
, SUFFIX
), _le_p
)((uint8_t *)haddr
, val
);
418 void helper_be_st_name(CPUArchState
*env
, target_ulong addr
, DATA_TYPE val
,
419 int mmu_idx
, uintptr_t retaddr
)
421 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
422 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
425 /* Adjust the given return address. */
426 retaddr
-= GETPC_ADJ
;
428 /* If the TLB entry is for a different page, reload and try again. */
429 if ((addr
& TARGET_PAGE_MASK
)
430 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
432 if ((addr
& (DATA_SIZE
- 1)) != 0) {
433 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
436 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
437 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
440 /* Handle an IO access. */
441 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
443 if ((addr
& (DATA_SIZE
- 1)) != 0) {
444 goto do_unaligned_access
;
446 ioaddr
= env
->iotlb
[mmu_idx
][index
];
448 /* ??? Note that the io helpers always read data in the target
449 byte ordering. We should push the LE/BE request down into io. */
451 glue(io_write
, SUFFIX
)(env
, ioaddr
, val
, addr
, retaddr
);
455 /* Handle slow unaligned access (it spans two pages or IO). */
457 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
458 >= TARGET_PAGE_SIZE
)) {
462 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
464 /* XXX: not efficient, but simple */
465 /* Note: relies on the fact that tlb_fill() does not remove the
466 * previous page from the TLB cache. */
467 for (i
= DATA_SIZE
- 1; i
>= 0; i
--) {
468 /* Big-endian extract. */
469 uint8_t val8
= val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8));
470 /* Note the adjustment at the beginning of the function.
471 Undo that for the recursion. */
472 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
473 mmu_idx
, retaddr
+ GETPC_ADJ
);
478 /* Handle aligned access or unaligned access in the same page. */
480 if ((addr
& (DATA_SIZE
- 1)) != 0) {
481 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
485 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
486 glue(glue(st
, SUFFIX
), _be_p
)((uint8_t *)haddr
, val
);
488 #endif /* DATA_SIZE > 1 */
491 glue(glue(helper_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
, target_ulong addr
,
492 DATA_TYPE val
, int mmu_idx
)
494 helper_te_st_name(env
, addr
, val
, mmu_idx
, GETRA());
497 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
499 #undef READ_ACCESS_TYPE
515 #undef helper_le_ld_name
516 #undef helper_be_ld_name
517 #undef helper_le_lds_name
518 #undef helper_be_lds_name
519 #undef helper_le_st_name
520 #undef helper_be_st_name
521 #undef helper_te_ld_name
522 #undef helper_te_st_name