]> git.proxmox.com Git - qemu.git/blame - softmmu_template.h
Version 1.0.1
[qemu.git] / softmmu_template.h
CommitLineData
b92e5a22
FB
1/*
2 * Software MMU support
5fafdf24 3 *
efbf29b6
BS
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
b92e5a22
FB
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
8167ee88 22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
b92e5a22 23 */
29e922b6
BS
24#include "qemu-timer.h"
25
b92e5a22
FB
26#define DATA_SIZE (1 << SHIFT)
27
28#if DATA_SIZE == 8
29#define SUFFIX q
61382a50 30#define USUFFIX q
b92e5a22
FB
31#define DATA_TYPE uint64_t
32#elif DATA_SIZE == 4
33#define SUFFIX l
61382a50 34#define USUFFIX l
b92e5a22
FB
35#define DATA_TYPE uint32_t
36#elif DATA_SIZE == 2
37#define SUFFIX w
61382a50 38#define USUFFIX uw
b92e5a22
FB
39#define DATA_TYPE uint16_t
40#elif DATA_SIZE == 1
41#define SUFFIX b
61382a50 42#define USUFFIX ub
b92e5a22
FB
43#define DATA_TYPE uint8_t
44#else
45#error unsupported data size
46#endif
47
b769d8fe
FB
48#ifdef SOFTMMU_CODE_ACCESS
49#define READ_ACCESS_TYPE 2
84b7b8e7 50#define ADDR_READ addr_code
b769d8fe
FB
51#else
52#define READ_ACCESS_TYPE 0
84b7b8e7 53#define ADDR_READ addr_read
b769d8fe
FB
54#endif
55
5fafdf24 56static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
6ebbf390 57 int mmu_idx,
61382a50 58 void *retaddr);
c227f099 59static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
2e70f6ef
PB
60 target_ulong addr,
61 void *retaddr)
b92e5a22
FB
62{
63 DATA_TYPE res;
64 int index;
0f459d16
PB
65 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
66 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
2e70f6ef
PB
67 env->mem_io_pc = (unsigned long)retaddr;
68 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
69 && !can_do_io(env)) {
70 cpu_io_recompile(env, retaddr);
71 }
b92e5a22 72
db8886d3 73 env->mem_io_vaddr = addr;
b92e5a22 74#if SHIFT <= 2
a4193c8a 75 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
b92e5a22
FB
76#else
77#ifdef TARGET_WORDS_BIGENDIAN
a4193c8a
FB
78 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
79 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
b92e5a22 80#else
a4193c8a
FB
81 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
82 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
b92e5a22
FB
83#endif
84#endif /* SHIFT > 2 */
85 return res;
86}
87
b92e5a22 88/* handle all cases except unaligned access which span two pages */
d656469f
FB
89DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
90 int mmu_idx)
b92e5a22
FB
91{
92 DATA_TYPE res;
61382a50 93 int index;
c27004ec 94 target_ulong tlb_addr;
355b1943
PB
95 target_phys_addr_t ioaddr;
96 unsigned long addend;
b92e5a22 97 void *retaddr;
3b46e624 98
b92e5a22
FB
99 /* test if there is match for unaligned or IO access */
100 /* XXX: could done more in memory macro in a non portable way */
b92e5a22
FB
101 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
102 redo:
6ebbf390 103 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
b92e5a22 104 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
b92e5a22
FB
105 if (tlb_addr & ~TARGET_PAGE_MASK) {
106 /* IO access */
107 if ((addr & (DATA_SIZE - 1)) != 0)
108 goto do_unaligned_access;
2e70f6ef 109 retaddr = GETPC();
355b1943
PB
110 ioaddr = env->iotlb[mmu_idx][index];
111 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
98699967 112 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
b92e5a22
FB
113 /* slow unaligned access (it spans two pages or IO) */
114 do_unaligned_access:
61382a50 115 retaddr = GETPC();
a64d4718 116#ifdef ALIGNED_ONLY
6ebbf390 117 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
a64d4718 118#endif
5fafdf24 119 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
6ebbf390 120 mmu_idx, retaddr);
b92e5a22 121 } else {
a64d4718
FB
122 /* unaligned/aligned access in the same page */
123#ifdef ALIGNED_ONLY
124 if ((addr & (DATA_SIZE - 1)) != 0) {
125 retaddr = GETPC();
6ebbf390 126 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
a64d4718
FB
127 }
128#endif
0f459d16
PB
129 addend = env->tlb_table[mmu_idx][index].addend;
130 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
b92e5a22
FB
131 }
132 } else {
133 /* the page is not in the TLB : fill it */
61382a50 134 retaddr = GETPC();
a64d4718
FB
135#ifdef ALIGNED_ONLY
136 if ((addr & (DATA_SIZE - 1)) != 0)
6ebbf390 137 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
a64d4718 138#endif
bccd9ec5 139 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
b92e5a22
FB
140 goto redo;
141 }
142 return res;
143}
144
145/* handle all unaligned cases */
5fafdf24 146static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
6ebbf390 147 int mmu_idx,
61382a50 148 void *retaddr)
b92e5a22
FB
149{
150 DATA_TYPE res, res1, res2;
61382a50 151 int index, shift;
355b1943
PB
152 target_phys_addr_t ioaddr;
153 unsigned long addend;
c27004ec 154 target_ulong tlb_addr, addr1, addr2;
b92e5a22 155
b92e5a22
FB
156 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
157 redo:
6ebbf390 158 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
b92e5a22 159 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
b92e5a22
FB
160 if (tlb_addr & ~TARGET_PAGE_MASK) {
161 /* IO access */
162 if ((addr & (DATA_SIZE - 1)) != 0)
163 goto do_unaligned_access;
355b1943
PB
164 ioaddr = env->iotlb[mmu_idx][index];
165 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
98699967 166 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
b92e5a22
FB
167 do_unaligned_access:
168 /* slow unaligned access (it spans two pages) */
169 addr1 = addr & ~(DATA_SIZE - 1);
170 addr2 = addr1 + DATA_SIZE;
5fafdf24 171 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
6ebbf390 172 mmu_idx, retaddr);
5fafdf24 173 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
6ebbf390 174 mmu_idx, retaddr);
b92e5a22
FB
175 shift = (addr & (DATA_SIZE - 1)) * 8;
176#ifdef TARGET_WORDS_BIGENDIAN
177 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
178#else
179 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
180#endif
6986f88c 181 res = (DATA_TYPE)res;
b92e5a22
FB
182 } else {
183 /* unaligned/aligned access in the same page */
0f459d16
PB
184 addend = env->tlb_table[mmu_idx][index].addend;
185 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
b92e5a22
FB
186 }
187 } else {
188 /* the page is not in the TLB : fill it */
bccd9ec5 189 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
b92e5a22
FB
190 goto redo;
191 }
192 return res;
193}
194
b769d8fe
FB
195#ifndef SOFTMMU_CODE_ACCESS
196
5fafdf24
TS
197static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
198 DATA_TYPE val,
6ebbf390 199 int mmu_idx,
b769d8fe
FB
200 void *retaddr);
201
c227f099 202static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
b769d8fe 203 DATA_TYPE val,
0f459d16 204 target_ulong addr,
b769d8fe
FB
205 void *retaddr)
206{
207 int index;
0f459d16
PB
208 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
209 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
2e70f6ef
PB
210 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
211 && !can_do_io(env)) {
212 cpu_io_recompile(env, retaddr);
213 }
b769d8fe 214
2e70f6ef
PB
215 env->mem_io_vaddr = addr;
216 env->mem_io_pc = (unsigned long)retaddr;
b769d8fe
FB
217#if SHIFT <= 2
218 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
219#else
220#ifdef TARGET_WORDS_BIGENDIAN
221 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
222 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
223#else
224 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
225 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
226#endif
227#endif /* SHIFT > 2 */
228}
b92e5a22 229
d656469f
FB
230void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
231 DATA_TYPE val,
232 int mmu_idx)
b92e5a22 233{
355b1943
PB
234 target_phys_addr_t ioaddr;
235 unsigned long addend;
c27004ec 236 target_ulong tlb_addr;
b92e5a22 237 void *retaddr;
61382a50 238 int index;
3b46e624 239
b92e5a22
FB
240 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
241 redo:
6ebbf390 242 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
b92e5a22 243 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
b92e5a22
FB
244 if (tlb_addr & ~TARGET_PAGE_MASK) {
245 /* IO access */
246 if ((addr & (DATA_SIZE - 1)) != 0)
247 goto do_unaligned_access;
d720b93d 248 retaddr = GETPC();
355b1943
PB
249 ioaddr = env->iotlb[mmu_idx][index];
250 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
98699967 251 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
b92e5a22 252 do_unaligned_access:
61382a50 253 retaddr = GETPC();
a64d4718 254#ifdef ALIGNED_ONLY
6ebbf390 255 do_unaligned_access(addr, 1, mmu_idx, retaddr);
a64d4718 256#endif
5fafdf24 257 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
6ebbf390 258 mmu_idx, retaddr);
b92e5a22
FB
259 } else {
260 /* aligned/unaligned access in the same page */
a64d4718
FB
261#ifdef ALIGNED_ONLY
262 if ((addr & (DATA_SIZE - 1)) != 0) {
263 retaddr = GETPC();
6ebbf390 264 do_unaligned_access(addr, 1, mmu_idx, retaddr);
a64d4718
FB
265 }
266#endif
0f459d16
PB
267 addend = env->tlb_table[mmu_idx][index].addend;
268 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
b92e5a22
FB
269 }
270 } else {
271 /* the page is not in the TLB : fill it */
61382a50 272 retaddr = GETPC();
a64d4718
FB
273#ifdef ALIGNED_ONLY
274 if ((addr & (DATA_SIZE - 1)) != 0)
6ebbf390 275 do_unaligned_access(addr, 1, mmu_idx, retaddr);
a64d4718 276#endif
bccd9ec5 277 tlb_fill(env, addr, 1, mmu_idx, retaddr);
b92e5a22
FB
278 goto redo;
279 }
280}
281
282/* handles all unaligned cases */
5fafdf24 283static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
61382a50 284 DATA_TYPE val,
6ebbf390 285 int mmu_idx,
61382a50 286 void *retaddr)
b92e5a22 287{
355b1943
PB
288 target_phys_addr_t ioaddr;
289 unsigned long addend;
c27004ec 290 target_ulong tlb_addr;
61382a50 291 int index, i;
b92e5a22 292
b92e5a22
FB
293 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
294 redo:
6ebbf390 295 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
b92e5a22 296 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
b92e5a22
FB
297 if (tlb_addr & ~TARGET_PAGE_MASK) {
298 /* IO access */
299 if ((addr & (DATA_SIZE - 1)) != 0)
300 goto do_unaligned_access;
355b1943
PB
301 ioaddr = env->iotlb[mmu_idx][index];
302 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
98699967 303 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
b92e5a22
FB
304 do_unaligned_access:
305 /* XXX: not efficient, but simple */
6c41b272
AZ
306 /* Note: relies on the fact that tlb_fill() does not remove the
307 * previous page from the TLB cache. */
7221fa98 308 for(i = DATA_SIZE - 1; i >= 0; i--) {
b92e5a22 309#ifdef TARGET_WORDS_BIGENDIAN
5fafdf24 310 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
6ebbf390 311 mmu_idx, retaddr);
b92e5a22 312#else
5fafdf24 313 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
6ebbf390 314 mmu_idx, retaddr);
b92e5a22
FB
315#endif
316 }
317 } else {
318 /* aligned/unaligned access in the same page */
0f459d16
PB
319 addend = env->tlb_table[mmu_idx][index].addend;
320 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
b92e5a22
FB
321 }
322 } else {
323 /* the page is not in the TLB : fill it */
bccd9ec5 324 tlb_fill(env, addr, 1, mmu_idx, retaddr);
b92e5a22
FB
325 goto redo;
326 }
327}
328
b769d8fe
FB
329#endif /* !defined(SOFTMMU_CODE_ACCESS) */
330
331#undef READ_ACCESS_TYPE
b92e5a22
FB
332#undef SHIFT
333#undef DATA_TYPE
334#undef SUFFIX
61382a50 335#undef USUFFIX
b92e5a22 336#undef DATA_SIZE
84b7b8e7 337#undef ADDR_READ