]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/softmmu_template.h
tcg: Add mmu helpers that take a return address argument
[mirror_qemu.git] / include / exec / softmmu_template.h
1 /*
2 * Software MMU support
3 *
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 #include "qemu/timer.h"
25 #include "exec/memory.h"
26
27 #define DATA_SIZE (1 << SHIFT)
28
29 #if DATA_SIZE == 8
30 #define SUFFIX q
31 #define USUFFIX q
32 #define DATA_TYPE uint64_t
33 #elif DATA_SIZE == 4
34 #define SUFFIX l
35 #define USUFFIX l
36 #define DATA_TYPE uint32_t
37 #elif DATA_SIZE == 2
38 #define SUFFIX w
39 #define USUFFIX uw
40 #define DATA_TYPE uint16_t
41 #elif DATA_SIZE == 1
42 #define SUFFIX b
43 #define USUFFIX ub
44 #define DATA_TYPE uint8_t
45 #else
46 #error unsupported data size
47 #endif
48
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
52 #else
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
55 #endif
56
57 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
58 target_ulong addr,
59 int mmu_idx,
60 uintptr_t retaddr);
61 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
62 hwaddr physaddr,
63 target_ulong addr,
64 uintptr_t retaddr)
65 {
66 uint64_t val;
67 MemoryRegion *mr = iotlb_to_region(physaddr);
68
69 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
70 env->mem_io_pc = retaddr;
71 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
72 cpu_io_recompile(env, retaddr);
73 }
74
75 env->mem_io_vaddr = addr;
76 io_mem_read(mr, physaddr, &val, 1 << SHIFT);
77 return val;
78 }
79
80 /* handle all cases except unaligned access which span two pages */
81 #ifdef SOFTMMU_CODE_ACCESS
82 static
83 #endif
84 DATA_TYPE
85 glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
86 target_ulong addr, int mmu_idx,
87 uintptr_t retaddr)
88 {
89 DATA_TYPE res;
90 int index;
91 target_ulong tlb_addr;
92 hwaddr ioaddr;
93
94 /* test if there is match for unaligned or IO access */
95 /* XXX: could done more in memory macro in a non portable way */
96 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
97 redo:
98 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
99 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
100 if (tlb_addr & ~TARGET_PAGE_MASK) {
101 /* IO access */
102 if ((addr & (DATA_SIZE - 1)) != 0)
103 goto do_unaligned_access;
104 ioaddr = env->iotlb[mmu_idx][index];
105 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
106 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
107 /* slow unaligned access (it spans two pages or IO) */
108 do_unaligned_access:
109 #ifdef ALIGNED_ONLY
110 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
111 #endif
112 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr,
113 mmu_idx, retaddr);
114 } else {
115 /* unaligned/aligned access in the same page */
116 uintptr_t addend;
117 #ifdef ALIGNED_ONLY
118 if ((addr & (DATA_SIZE - 1)) != 0) {
119 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
120 }
121 #endif
122 addend = env->tlb_table[mmu_idx][index].addend;
123 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
124 (addr + addend));
125 }
126 } else {
127 #ifdef ALIGNED_ONLY
128 if ((addr & (DATA_SIZE - 1)) != 0)
129 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
130 #endif
131 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
132 goto redo;
133 }
134 return res;
135 }
136
137 DATA_TYPE
138 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
139 int mmu_idx)
140 {
141 return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
142 GETPC_EXT());
143 }
144
145 /* handle all unaligned cases */
146 static DATA_TYPE
147 glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
148 target_ulong addr,
149 int mmu_idx,
150 uintptr_t retaddr)
151 {
152 DATA_TYPE res, res1, res2;
153 int index, shift;
154 hwaddr ioaddr;
155 target_ulong tlb_addr, addr1, addr2;
156
157 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
158 redo:
159 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
160 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
161 if (tlb_addr & ~TARGET_PAGE_MASK) {
162 /* IO access */
163 if ((addr & (DATA_SIZE - 1)) != 0)
164 goto do_unaligned_access;
165 ioaddr = env->iotlb[mmu_idx][index];
166 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
167 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
168 do_unaligned_access:
169 /* slow unaligned access (it spans two pages) */
170 addr1 = addr & ~(DATA_SIZE - 1);
171 addr2 = addr1 + DATA_SIZE;
172 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1,
173 mmu_idx, retaddr);
174 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2,
175 mmu_idx, retaddr);
176 shift = (addr & (DATA_SIZE - 1)) * 8;
177 #ifdef TARGET_WORDS_BIGENDIAN
178 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
179 #else
180 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
181 #endif
182 res = (DATA_TYPE)res;
183 } else {
184 /* unaligned/aligned access in the same page */
185 uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
186 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
187 (addr + addend));
188 }
189 } else {
190 /* the page is not in the TLB : fill it */
191 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
192 goto redo;
193 }
194 return res;
195 }
196
197 #ifndef SOFTMMU_CODE_ACCESS
198
199 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
200 target_ulong addr,
201 DATA_TYPE val,
202 int mmu_idx,
203 uintptr_t retaddr);
204
205 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
206 hwaddr physaddr,
207 DATA_TYPE val,
208 target_ulong addr,
209 uintptr_t retaddr)
210 {
211 MemoryRegion *mr = iotlb_to_region(physaddr);
212
213 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
214 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
215 cpu_io_recompile(env, retaddr);
216 }
217
218 env->mem_io_vaddr = addr;
219 env->mem_io_pc = retaddr;
220 io_mem_write(mr, physaddr, val, 1 << SHIFT);
221 }
222
223 void
224 glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
225 target_ulong addr, DATA_TYPE val,
226 int mmu_idx, uintptr_t retaddr)
227 {
228 hwaddr ioaddr;
229 target_ulong tlb_addr;
230 int index;
231
232 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
233 redo:
234 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
235 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
236 if (tlb_addr & ~TARGET_PAGE_MASK) {
237 /* IO access */
238 if ((addr & (DATA_SIZE - 1)) != 0)
239 goto do_unaligned_access;
240 ioaddr = env->iotlb[mmu_idx][index];
241 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
242 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
243 do_unaligned_access:
244 #ifdef ALIGNED_ONLY
245 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
246 #endif
247 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val,
248 mmu_idx, retaddr);
249 } else {
250 /* aligned/unaligned access in the same page */
251 uintptr_t addend;
252 #ifdef ALIGNED_ONLY
253 if ((addr & (DATA_SIZE - 1)) != 0) {
254 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
255 }
256 #endif
257 addend = env->tlb_table[mmu_idx][index].addend;
258 glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
259 (addr + addend), val);
260 }
261 } else {
262 /* the page is not in the TLB : fill it */
263 #ifdef ALIGNED_ONLY
264 if ((addr & (DATA_SIZE - 1)) != 0)
265 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
266 #endif
267 tlb_fill(env, addr, 1, mmu_idx, retaddr);
268 goto redo;
269 }
270 }
271
272 void
273 glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
274 DATA_TYPE val, int mmu_idx)
275 {
276 glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
277 GETPC_EXT());
278 }
279
280 /* handles all unaligned cases */
281 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
282 target_ulong addr,
283 DATA_TYPE val,
284 int mmu_idx,
285 uintptr_t retaddr)
286 {
287 hwaddr ioaddr;
288 target_ulong tlb_addr;
289 int index, i;
290
291 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
292 redo:
293 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
294 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
295 if (tlb_addr & ~TARGET_PAGE_MASK) {
296 /* IO access */
297 if ((addr & (DATA_SIZE - 1)) != 0)
298 goto do_unaligned_access;
299 ioaddr = env->iotlb[mmu_idx][index];
300 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
301 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
302 do_unaligned_access:
303 /* XXX: not efficient, but simple */
304 /* Note: relies on the fact that tlb_fill() does not remove the
305 * previous page from the TLB cache. */
306 for(i = DATA_SIZE - 1; i >= 0; i--) {
307 #ifdef TARGET_WORDS_BIGENDIAN
308 glue(slow_stb, MMUSUFFIX)(env, addr + i,
309 val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
310 mmu_idx, retaddr);
311 #else
312 glue(slow_stb, MMUSUFFIX)(env, addr + i,
313 val >> (i * 8),
314 mmu_idx, retaddr);
315 #endif
316 }
317 } else {
318 /* aligned/unaligned access in the same page */
319 uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
320 glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
321 (addr + addend), val);
322 }
323 } else {
324 /* the page is not in the TLB : fill it */
325 tlb_fill(env, addr, 1, mmu_idx, retaddr);
326 goto redo;
327 }
328 }
329
330 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
331
332 #undef READ_ACCESS_TYPE
333 #undef SHIFT
334 #undef DATA_TYPE
335 #undef SUFFIX
336 #undef USUFFIX
337 #undef DATA_SIZE
338 #undef ADDR_READ