]>
git.proxmox.com Git - mirror_qemu.git/blob - include/exec/softmmu_template.h
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/memory.h"
27 #define DATA_SIZE (1 << SHIFT)
32 #define DATA_TYPE uint64_t
36 #define DATA_TYPE uint32_t
40 #define DATA_TYPE uint16_t
44 #define DATA_TYPE uint8_t
46 #error unsupported data size
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
57 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
63 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
65 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
66 env
->mem_io_pc
= retaddr
;
67 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !can_do_io(env
)) {
68 cpu_io_recompile(env
, retaddr
);
71 env
->mem_io_vaddr
= addr
;
72 io_mem_read(mr
, physaddr
, &val
, 1 << SHIFT
);
76 /* handle all cases except unaligned access which span two pages */
77 #ifdef SOFTMMU_CODE_ACCESS
81 glue(glue(helper_ret_ld
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
82 target_ulong addr
, int mmu_idx
,
85 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
86 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
89 /* If the TLB entry is for a different page, reload and try again. */
90 if ((addr
& TARGET_PAGE_MASK
)
91 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
93 if ((addr
& (DATA_SIZE
- 1)) != 0) {
94 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
97 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
98 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
101 /* Handle an IO access. */
102 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
104 if ((addr
& (DATA_SIZE
- 1)) != 0) {
105 goto do_unaligned_access
;
107 ioaddr
= env
->iotlb
[mmu_idx
][index
];
108 return glue(io_read
, SUFFIX
)(env
, ioaddr
, addr
, retaddr
);
111 /* Handle slow unaligned access (it spans two pages or IO). */
113 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
114 >= TARGET_PAGE_SIZE
)) {
115 target_ulong addr1
, addr2
;
116 DATA_TYPE res1
, res2
, res
;
120 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
122 addr1
= addr
& ~(DATA_SIZE
- 1);
123 addr2
= addr1
+ DATA_SIZE
;
124 res1
= glue(glue(helper_ret_ld
, SUFFIX
), MMUSUFFIX
)(env
, addr1
,
126 res2
= glue(glue(helper_ret_ld
, SUFFIX
), MMUSUFFIX
)(env
, addr2
,
128 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
129 #ifdef TARGET_WORDS_BIGENDIAN
130 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
132 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
137 /* Handle aligned access or unaligned access in the same page. */
139 if ((addr
& (DATA_SIZE
- 1)) != 0) {
140 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
144 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
145 return glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)haddr
);
149 glue(glue(helper_ld
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
, target_ulong addr
,
152 return glue(glue(helper_ret_ld
, SUFFIX
), MMUSUFFIX
)(env
, addr
, mmu_idx
,
156 #ifndef SOFTMMU_CODE_ACCESS
158 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
164 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
166 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
167 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !can_do_io(env
)) {
168 cpu_io_recompile(env
, retaddr
);
171 env
->mem_io_vaddr
= addr
;
172 env
->mem_io_pc
= retaddr
;
173 io_mem_write(mr
, physaddr
, val
, 1 << SHIFT
);
177 glue(glue(helper_ret_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
178 target_ulong addr
, DATA_TYPE val
,
179 int mmu_idx
, uintptr_t retaddr
)
181 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
182 target_ulong tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
185 /* If the TLB entry is for a different page, reload and try again. */
186 if ((addr
& TARGET_PAGE_MASK
)
187 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
189 if ((addr
& (DATA_SIZE
- 1)) != 0) {
190 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
193 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
194 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
197 /* Handle an IO access. */
198 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
200 if ((addr
& (DATA_SIZE
- 1)) != 0) {
201 goto do_unaligned_access
;
203 ioaddr
= env
->iotlb
[mmu_idx
][index
];
204 glue(io_write
, SUFFIX
)(env
, ioaddr
, val
, addr
, retaddr
);
208 /* Handle slow unaligned access (it spans two pages or IO). */
210 && unlikely((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1
211 >= TARGET_PAGE_SIZE
)) {
215 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
217 /* XXX: not efficient, but simple */
218 /* Note: relies on the fact that tlb_fill() does not remove the
219 * previous page from the TLB cache. */
220 for (i
= DATA_SIZE
- 1; i
>= 0; i
--) {
221 #ifdef TARGET_WORDS_BIGENDIAN
222 uint8_t val8
= val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8));
224 uint8_t val8
= val
>> (i
* 8);
226 glue(helper_ret_stb
, MMUSUFFIX
)(env
, addr
+ i
, val8
,
232 /* Handle aligned access or unaligned access in the same page. */
234 if ((addr
& (DATA_SIZE
- 1)) != 0) {
235 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
239 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
240 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)haddr
, val
);
244 glue(glue(helper_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
, target_ulong addr
,
245 DATA_TYPE val
, int mmu_idx
)
247 glue(glue(helper_ret_st
, SUFFIX
), MMUSUFFIX
)(env
, addr
, val
, mmu_idx
,
251 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
253 #undef READ_ACCESS_TYPE