]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/softmmu_template.h
tcg-i386: Don't perform GETPC adjustment in TCG code
[mirror_qemu.git] / include / exec / softmmu_template.h
CommitLineData
b92e5a22
FB
1/*
2 * Software MMU support
5fafdf24 3 *
efbf29b6
BS
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
b92e5a22
FB
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
8167ee88 22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
b92e5a22 23 */
1de7afc9 24#include "qemu/timer.h"
022c62cb 25#include "exec/memory.h"
29e922b6 26
b92e5a22
FB
27#define DATA_SIZE (1 << SHIFT)
28
29#if DATA_SIZE == 8
30#define SUFFIX q
61382a50 31#define USUFFIX q
b92e5a22
FB
32#define DATA_TYPE uint64_t
33#elif DATA_SIZE == 4
34#define SUFFIX l
61382a50 35#define USUFFIX l
b92e5a22
FB
36#define DATA_TYPE uint32_t
37#elif DATA_SIZE == 2
38#define SUFFIX w
61382a50 39#define USUFFIX uw
b92e5a22
FB
40#define DATA_TYPE uint16_t
41#elif DATA_SIZE == 1
42#define SUFFIX b
61382a50 43#define USUFFIX ub
b92e5a22
FB
44#define DATA_TYPE uint8_t
45#else
46#error unsupported data size
47#endif
48
b769d8fe
FB
49#ifdef SOFTMMU_CODE_ACCESS
50#define READ_ACCESS_TYPE 2
84b7b8e7 51#define ADDR_READ addr_code
b769d8fe
FB
52#else
53#define READ_ACCESS_TYPE 0
84b7b8e7 54#define ADDR_READ addr_read
b769d8fe
FB
55#endif
56
89c33337 57static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
a8170e5e 58 hwaddr physaddr,
2e70f6ef 59 target_ulong addr,
20503968 60 uintptr_t retaddr)
b92e5a22 61{
791af8c8 62 uint64_t val;
37ec01d4
AK
63 MemoryRegion *mr = iotlb_to_region(physaddr);
64
0f459d16 65 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
20503968 66 env->mem_io_pc = retaddr;
0844e007 67 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
2e70f6ef
PB
68 cpu_io_recompile(env, retaddr);
69 }
b92e5a22 70
db8886d3 71 env->mem_io_vaddr = addr;
791af8c8
PB
72 io_mem_read(mr, physaddr, &val, 1 << SHIFT);
73 return val;
b92e5a22
FB
74}
75
b92e5a22 76/* handle all cases except unaligned access which span two pages */
e25c3887
RH
77#ifdef SOFTMMU_CODE_ACCESS
78static
79#endif
e141ab52 80DATA_TYPE
e25c3887
RH
81glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
82 target_ulong addr, int mmu_idx,
83 uintptr_t retaddr)
b92e5a22 84{
aac1fb05
RH
85 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
86 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
87 uintptr_t haddr;
3b46e624 88
0f842f8a
RH
89 /* Adjust the given return address. */
90 retaddr -= GETPC_ADJ;
91
aac1fb05
RH
92 /* If the TLB entry is for a different page, reload and try again. */
93 if ((addr & TARGET_PAGE_MASK)
94 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
a64d4718 95#ifdef ALIGNED_ONLY
aac1fb05 96 if ((addr & (DATA_SIZE - 1)) != 0) {
89c33337 97 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
aac1fb05 98 }
a64d4718 99#endif
aac1fb05
RH
100 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
101 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
102 }
103
104 /* Handle an IO access. */
105 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
106 hwaddr ioaddr;
107 if ((addr & (DATA_SIZE - 1)) != 0) {
108 goto do_unaligned_access;
b92e5a22 109 }
aac1fb05
RH
110 ioaddr = env->iotlb[mmu_idx][index];
111 return glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
112 }
113
114 /* Handle slow unaligned access (it spans two pages or IO). */
115 if (DATA_SIZE > 1
116 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
117 >= TARGET_PAGE_SIZE)) {
118 target_ulong addr1, addr2;
119 DATA_TYPE res1, res2, res;
120 unsigned shift;
121 do_unaligned_access:
a64d4718 122#ifdef ALIGNED_ONLY
aac1fb05 123 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
a64d4718 124#endif
aac1fb05
RH
125 addr1 = addr & ~(DATA_SIZE - 1);
126 addr2 = addr1 + DATA_SIZE;
0f842f8a
RH
127 /* Note the adjustment at the beginning of the function.
128 Undo that for the recursion. */
129 res1 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)
130 (env, addr1, mmu_idx, retaddr + GETPC_ADJ);
131 res2 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)
132 (env, addr2, mmu_idx, retaddr + GETPC_ADJ);
aac1fb05
RH
133 shift = (addr & (DATA_SIZE - 1)) * 8;
134#ifdef TARGET_WORDS_BIGENDIAN
135 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
136#else
137 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
138#endif
139 return res;
140 }
141
142 /* Handle aligned access or unaligned access in the same page. */
143#ifdef ALIGNED_ONLY
144 if ((addr & (DATA_SIZE - 1)) != 0) {
145 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
b92e5a22 146 }
aac1fb05
RH
147#endif
148
149 haddr = addr + env->tlb_table[mmu_idx][index].addend;
150 return glue(glue(ld, USUFFIX), _raw)((uint8_t *)haddr);
b92e5a22
FB
151}
152
e25c3887
RH
153DATA_TYPE
154glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
155 int mmu_idx)
156{
157 return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
0f842f8a 158 GETRA_EXT());
e25c3887
RH
159}
160
b769d8fe
FB
161#ifndef SOFTMMU_CODE_ACCESS
162
89c33337 163static inline void glue(io_write, SUFFIX)(CPUArchState *env,
a8170e5e 164 hwaddr physaddr,
b769d8fe 165 DATA_TYPE val,
0f459d16 166 target_ulong addr,
20503968 167 uintptr_t retaddr)
b769d8fe 168{
37ec01d4
AK
169 MemoryRegion *mr = iotlb_to_region(physaddr);
170
0f459d16 171 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
0844e007 172 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
2e70f6ef
PB
173 cpu_io_recompile(env, retaddr);
174 }
b769d8fe 175
2e70f6ef 176 env->mem_io_vaddr = addr;
20503968 177 env->mem_io_pc = retaddr;
37ec01d4 178 io_mem_write(mr, physaddr, val, 1 << SHIFT);
b769d8fe 179}
b92e5a22 180
e25c3887
RH
181void
182glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
183 target_ulong addr, DATA_TYPE val,
184 int mmu_idx, uintptr_t retaddr)
b92e5a22 185{
aac1fb05
RH
186 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
187 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
188 uintptr_t haddr;
3b46e624 189
0f842f8a
RH
190 /* Adjust the given return address. */
191 retaddr -= GETPC_ADJ;
192
aac1fb05
RH
193 /* If the TLB entry is for a different page, reload and try again. */
194 if ((addr & TARGET_PAGE_MASK)
195 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
a64d4718 196#ifdef ALIGNED_ONLY
aac1fb05 197 if ((addr & (DATA_SIZE - 1)) != 0) {
89c33337 198 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
aac1fb05 199 }
a64d4718 200#endif
aac1fb05
RH
201 tlb_fill(env, addr, 1, mmu_idx, retaddr);
202 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
203 }
204
205 /* Handle an IO access. */
206 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
207 hwaddr ioaddr;
208 if ((addr & (DATA_SIZE - 1)) != 0) {
209 goto do_unaligned_access;
210 }
211 ioaddr = env->iotlb[mmu_idx][index];
212 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
213 return;
214 }
215
216 /* Handle slow unaligned access (it spans two pages or IO). */
217 if (DATA_SIZE > 1
218 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
219 >= TARGET_PAGE_SIZE)) {
220 int i;
221 do_unaligned_access:
a64d4718 222#ifdef ALIGNED_ONLY
aac1fb05
RH
223 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
224#endif
225 /* XXX: not efficient, but simple */
226 /* Note: relies on the fact that tlb_fill() does not remove the
227 * previous page from the TLB cache. */
228 for (i = DATA_SIZE - 1; i >= 0; i--) {
229#ifdef TARGET_WORDS_BIGENDIAN
230 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
231#else
232 uint8_t val8 = val >> (i * 8);
a64d4718 233#endif
0f842f8a
RH
234 /* Note the adjustment at the beginning of the function.
235 Undo that for the recursion. */
aac1fb05 236 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
0f842f8a 237 mmu_idx, retaddr + GETPC_ADJ);
b92e5a22 238 }
aac1fb05
RH
239 return;
240 }
241
242 /* Handle aligned access or unaligned access in the same page. */
a64d4718 243#ifdef ALIGNED_ONLY
aac1fb05
RH
244 if ((addr & (DATA_SIZE - 1)) != 0) {
245 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
b92e5a22 246 }
aac1fb05
RH
247#endif
248
249 haddr = addr + env->tlb_table[mmu_idx][index].addend;
250 glue(glue(st, SUFFIX), _raw)((uint8_t *)haddr, val);
b92e5a22
FB
251}
252
e25c3887
RH
253void
254glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
255 DATA_TYPE val, int mmu_idx)
256{
257 glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
0f842f8a 258 GETRA_EXT());
e25c3887
RH
259}
260
b769d8fe
FB
261#endif /* !defined(SOFTMMU_CODE_ACCESS) */
262
263#undef READ_ACCESS_TYPE
b92e5a22
FB
264#undef SHIFT
265#undef DATA_TYPE
266#undef SUFFIX
61382a50 267#undef USUFFIX
b92e5a22 268#undef DATA_SIZE
84b7b8e7 269#undef ADDR_READ