]>
git.proxmox.com Git - qemu.git/blob - include/exec/softmmu_template.h
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/memory.h"
27 #define DATA_SIZE (1 << SHIFT)
32 #define SDATA_TYPE int64_t
36 #define SDATA_TYPE int32_t
40 #define SDATA_TYPE int16_t
44 #define SDATA_TYPE int8_t
46 #error unsupported data size
49 #define DATA_TYPE glue(u, SDATA_TYPE)
51 /* For the benefit of TCG generated code, we want to avoid the complication
52 of ABI-specific return type promotion and always return a value extended
53 to the register size of the host. This is tcg_target_long, except in the
54 case of a 32-bit host and 64-bit data, and for that we always have
55 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
56 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
57 # define WORD_TYPE DATA_TYPE
58 # define USUFFIX SUFFIX
60 # define WORD_TYPE tcg_target_ulong
61 # define USUFFIX glue(u, SUFFIX)
62 # define SSUFFIX glue(s, SUFFIX)
65 #ifdef SOFTMMU_CODE_ACCESS
66 #define READ_ACCESS_TYPE 2
67 #define ADDR_READ addr_code
69 #define READ_ACCESS_TYPE 0
70 #define ADDR_READ addr_read
73 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
79 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
81 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
82 env
->mem_io_pc
= retaddr
;
83 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !can_do_io(env
)) {
84 cpu_io_recompile(env
, retaddr
);
87 env
->mem_io_vaddr
= addr
;
88 io_mem_read(mr
, physaddr
, &val
, 1 << SHIFT
);
92 /* handle all cases except unaligned access which span two pages */
93 #ifdef SOFTMMU_CODE_ACCESS
97 glue(glue(helper_ret_ld
, USUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
98 target_ulong addr
, int mmu_idx
,
101 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
102 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
105 /* Adjust the given return address. */
106 retaddr
-= GETPC_ADJ
;
108 /* If the TLB entry is for a different page, reload and try again. */
109 if ((addr
& TARGET_PAGE_MASK
)
110 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
112 if ((addr
& (DATA_SIZE
- 1)) != 0) {
113 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
116 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
117 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
120 /* Handle an IO access. */
121 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
123 if ((addr
& (DATA_SIZE
- 1)) != 0) {
124 goto do_unaligned_access
;
126 ioaddr
= env
->iotlb
[mmu_idx
][index
];
127 return glue(io_read
, SUFFIX
)(env
, ioaddr
, addr
, retaddr
);
130 /* Handle slow unaligned access (it spans two pages or IO). */
132 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
133 >= TARGET_PAGE_SIZE
)) {
134 target_ulong addr1
, addr2
;
135 DATA_TYPE res1
, res2
, res
;
139 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
141 addr1
= addr
& ~(DATA_SIZE
- 1);
142 addr2
= addr1
+ DATA_SIZE
;
143 /* Note the adjustment at the beginning of the function.
144 Undo that for the recursion. */
145 res1
= glue(glue(helper_ret_ld
, USUFFIX
), MMUSUFFIX
)
146 (env
, addr1
, mmu_idx
, retaddr
+ GETPC_ADJ
);
147 res2
= glue(glue(helper_ret_ld
, USUFFIX
), MMUSUFFIX
)
148 (env
, addr2
, mmu_idx
, retaddr
+ GETPC_ADJ
);
149 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
150 #ifdef TARGET_WORDS_BIGENDIAN
151 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
153 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
158 /* Handle aligned access or unaligned access in the same page. */
160 if ((addr
& (DATA_SIZE
- 1)) != 0) {
161 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
165 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
166 /* Note that ldl_raw is defined with type "int". */
167 return (DATA_TYPE
) glue(glue(ld
, LSUFFIX
), _raw
)((uint8_t *)haddr
);
171 glue(glue(helper_ld
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
, target_ulong addr
,
174 return glue(glue(helper_ret_ld
, USUFFIX
), MMUSUFFIX
)(env
, addr
, mmu_idx
,
178 #ifndef SOFTMMU_CODE_ACCESS
180 /* Provide signed versions of the load routines as well. We can of course
181 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
182 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
184 glue(glue(helper_ret_ld
, SSUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
185 target_ulong addr
, int mmu_idx
,
188 return (SDATA_TYPE
) glue(glue(helper_ret_ld
, USUFFIX
), MMUSUFFIX
)
189 (env
, addr
, mmu_idx
, retaddr
);
193 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
199 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
201 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
202 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !can_do_io(env
)) {
203 cpu_io_recompile(env
, retaddr
);
206 env
->mem_io_vaddr
= addr
;
207 env
->mem_io_pc
= retaddr
;
208 io_mem_write(mr
, physaddr
, val
, 1 << SHIFT
);
212 glue(glue(helper_ret_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
213 target_ulong addr
, DATA_TYPE val
,
214 int mmu_idx
, uintptr_t retaddr
)
216 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
217 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
220 /* Adjust the given return address. */
221 retaddr
-= GETPC_ADJ
;
223 /* If the TLB entry is for a different page, reload and try again. */
224 if ((addr
& TARGET_PAGE_MASK
)
225 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
227 if ((addr
& (DATA_SIZE
- 1)) != 0) {
228 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
231 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
232 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
235 /* Handle an IO access. */
236 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
238 if ((addr
& (DATA_SIZE
- 1)) != 0) {
239 goto do_unaligned_access
;
241 ioaddr
= env
->iotlb
[mmu_idx
][index
];
242 glue(io_write
, SUFFIX
)(env
, ioaddr
, val
, addr
, retaddr
);
246 /* Handle slow unaligned access (it spans two pages or IO). */
248 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
249 >= TARGET_PAGE_SIZE
)) {
253 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
255 /* XXX: not efficient, but simple */
256 /* Note: relies on the fact that tlb_fill() does not remove the
257 * previous page from the TLB cache. */
258 for (i
= DATA_SIZE
- 1; i
>= 0; i
--) {
259 #ifdef TARGET_WORDS_BIGENDIAN
260 uint8_t val8
= val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8));
262 uint8_t val8
= val
>> (i
* 8);
264 /* Note the adjustment at the beginning of the function.
265 Undo that for the recursion. */
266 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
267 mmu_idx
, retaddr
+ GETPC_ADJ
);
272 /* Handle aligned access or unaligned access in the same page. */
274 if ((addr
& (DATA_SIZE
- 1)) != 0) {
275 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
279 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
280 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)haddr
, val
);
284 glue(glue(helper_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
, target_ulong addr
,
285 DATA_TYPE val
, int mmu_idx
)
287 glue(glue(helper_ret_st
, SUFFIX
), MMUSUFFIX
)(env
, addr
, val
, mmu_idx
,
291 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
293 #undef READ_ACCESS_TYPE