]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Software MMU support | |
3 | * | |
4 | * Generate helpers used by TCG for qemu_ld/st ops and code load | |
5 | * functions. | |
6 | * | |
7 | * Included from target op helpers and exec.c. | |
8 | * | |
9 | * Copyright (c) 2003 Fabrice Bellard | |
10 | * | |
11 | * This library is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU Lesser General Public | |
13 | * License as published by the Free Software Foundation; either | |
14 | * version 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This library is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * Lesser General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU Lesser General Public | |
22 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
23 | */ | |
24 | #include "qemu-timer.h" | |
25 | ||
26 | #define DATA_SIZE (1 << SHIFT) | |
27 | ||
28 | #if DATA_SIZE == 8 | |
29 | #define SUFFIX q | |
30 | #define USUFFIX q | |
31 | #define DATA_TYPE uint64_t | |
32 | #elif DATA_SIZE == 4 | |
33 | #define SUFFIX l | |
34 | #define USUFFIX l | |
35 | #define DATA_TYPE uint32_t | |
36 | #elif DATA_SIZE == 2 | |
37 | #define SUFFIX w | |
38 | #define USUFFIX uw | |
39 | #define DATA_TYPE uint16_t | |
40 | #elif DATA_SIZE == 1 | |
41 | #define SUFFIX b | |
42 | #define USUFFIX ub | |
43 | #define DATA_TYPE uint8_t | |
44 | #else | |
45 | #error unsupported data size | |
46 | #endif | |
47 | ||
48 | #ifdef SOFTMMU_CODE_ACCESS | |
49 | #define READ_ACCESS_TYPE 2 | |
50 | #define ADDR_READ addr_code | |
51 | #else | |
52 | #define READ_ACCESS_TYPE 0 | |
53 | #define ADDR_READ addr_read | |
54 | #endif | |
55 | ||
56 | static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, | |
57 | int mmu_idx, | |
58 | void *retaddr); | |
59 | static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr, | |
60 | target_ulong addr, | |
61 | void *retaddr) | |
62 | { | |
63 | DATA_TYPE res; | |
64 | int index; | |
65 | index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
66 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; | |
67 | env->mem_io_pc = (unsigned long)retaddr; | |
68 | if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT) | |
69 | && !can_do_io(env)) { | |
70 | cpu_io_recompile(env, retaddr); | |
71 | } | |
72 | ||
73 | env->mem_io_vaddr = addr; | |
74 | #if SHIFT <= 2 | |
75 | res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr); | |
76 | #else | |
77 | #ifdef TARGET_WORDS_BIGENDIAN | |
78 | res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32; | |
79 | res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4); | |
80 | #else | |
81 | res = io_mem_read[index][2](io_mem_opaque[index], physaddr); | |
82 | res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32; | |
83 | #endif | |
84 | #endif /* SHIFT > 2 */ | |
85 | return res; | |
86 | } | |
87 | ||
88 | /* handle all cases except unaligned access which span two pages */ | |
89 | DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, | |
90 | int mmu_idx) | |
91 | { | |
92 | DATA_TYPE res; | |
93 | int index; | |
94 | target_ulong tlb_addr; | |
95 | target_phys_addr_t ioaddr; | |
96 | unsigned long addend; | |
97 | void *retaddr; | |
98 | ||
99 | /* test if there is match for unaligned or IO access */ | |
100 | /* XXX: could done more in memory macro in a non portable way */ | |
101 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
102 | redo: | |
103 | tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; | |
104 | if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
105 | if (tlb_addr & ~TARGET_PAGE_MASK) { | |
106 | /* IO access */ | |
107 | if ((addr & (DATA_SIZE - 1)) != 0) | |
108 | goto do_unaligned_access; | |
109 | retaddr = GETPC(); | |
110 | ioaddr = env->iotlb[mmu_idx][index]; | |
111 | res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr); | |
112 | } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { | |
113 | /* slow unaligned access (it spans two pages or IO) */ | |
114 | do_unaligned_access: | |
115 | retaddr = GETPC(); | |
116 | #ifdef ALIGNED_ONLY | |
117 | do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); | |
118 | #endif | |
119 | res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr, | |
120 | mmu_idx, retaddr); | |
121 | } else { | |
122 | /* unaligned/aligned access in the same page */ | |
123 | #ifdef ALIGNED_ONLY | |
124 | if ((addr & (DATA_SIZE - 1)) != 0) { | |
125 | retaddr = GETPC(); | |
126 | do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); | |
127 | } | |
128 | #endif | |
129 | addend = env->tlb_table[mmu_idx][index].addend; | |
130 | res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend)); | |
131 | } | |
132 | } else { | |
133 | /* the page is not in the TLB : fill it */ | |
134 | retaddr = GETPC(); | |
135 | #ifdef ALIGNED_ONLY | |
136 | if ((addr & (DATA_SIZE - 1)) != 0) | |
137 | do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); | |
138 | #endif | |
139 | tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); | |
140 | goto redo; | |
141 | } | |
142 | return res; | |
143 | } | |
144 | ||
145 | /* handle all unaligned cases */ | |
146 | static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, | |
147 | int mmu_idx, | |
148 | void *retaddr) | |
149 | { | |
150 | DATA_TYPE res, res1, res2; | |
151 | int index, shift; | |
152 | target_phys_addr_t ioaddr; | |
153 | unsigned long addend; | |
154 | target_ulong tlb_addr, addr1, addr2; | |
155 | ||
156 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
157 | redo: | |
158 | tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; | |
159 | if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
160 | if (tlb_addr & ~TARGET_PAGE_MASK) { | |
161 | /* IO access */ | |
162 | if ((addr & (DATA_SIZE - 1)) != 0) | |
163 | goto do_unaligned_access; | |
164 | ioaddr = env->iotlb[mmu_idx][index]; | |
165 | res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr); | |
166 | } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { | |
167 | do_unaligned_access: | |
168 | /* slow unaligned access (it spans two pages) */ | |
169 | addr1 = addr & ~(DATA_SIZE - 1); | |
170 | addr2 = addr1 + DATA_SIZE; | |
171 | res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1, | |
172 | mmu_idx, retaddr); | |
173 | res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2, | |
174 | mmu_idx, retaddr); | |
175 | shift = (addr & (DATA_SIZE - 1)) * 8; | |
176 | #ifdef TARGET_WORDS_BIGENDIAN | |
177 | res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); | |
178 | #else | |
179 | res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift)); | |
180 | #endif | |
181 | res = (DATA_TYPE)res; | |
182 | } else { | |
183 | /* unaligned/aligned access in the same page */ | |
184 | addend = env->tlb_table[mmu_idx][index].addend; | |
185 | res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend)); | |
186 | } | |
187 | } else { | |
188 | /* the page is not in the TLB : fill it */ | |
189 | tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr); | |
190 | goto redo; | |
191 | } | |
192 | return res; | |
193 | } | |
194 | ||
195 | #ifndef SOFTMMU_CODE_ACCESS | |
196 | ||
197 | static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, | |
198 | DATA_TYPE val, | |
199 | int mmu_idx, | |
200 | void *retaddr); | |
201 | ||
202 | static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr, | |
203 | DATA_TYPE val, | |
204 | target_ulong addr, | |
205 | void *retaddr) | |
206 | { | |
207 | int index; | |
208 | index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
209 | physaddr = (physaddr & TARGET_PAGE_MASK) + addr; | |
210 | if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT) | |
211 | && !can_do_io(env)) { | |
212 | cpu_io_recompile(env, retaddr); | |
213 | } | |
214 | ||
215 | env->mem_io_vaddr = addr; | |
216 | env->mem_io_pc = (unsigned long)retaddr; | |
217 | #if SHIFT <= 2 | |
218 | io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val); | |
219 | #else | |
220 | #ifdef TARGET_WORDS_BIGENDIAN | |
221 | io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32); | |
222 | io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val); | |
223 | #else | |
224 | io_mem_write[index][2](io_mem_opaque[index], physaddr, val); | |
225 | io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32); | |
226 | #endif | |
227 | #endif /* SHIFT > 2 */ | |
228 | } | |
229 | ||
230 | void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, | |
231 | DATA_TYPE val, | |
232 | int mmu_idx) | |
233 | { | |
234 | target_phys_addr_t ioaddr; | |
235 | unsigned long addend; | |
236 | target_ulong tlb_addr; | |
237 | void *retaddr; | |
238 | int index; | |
239 | ||
240 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
241 | redo: | |
242 | tlb_addr = env->tlb_table[mmu_idx][index].addr_write; | |
243 | if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
244 | if (tlb_addr & ~TARGET_PAGE_MASK) { | |
245 | /* IO access */ | |
246 | if ((addr & (DATA_SIZE - 1)) != 0) | |
247 | goto do_unaligned_access; | |
248 | retaddr = GETPC(); | |
249 | ioaddr = env->iotlb[mmu_idx][index]; | |
250 | glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr); | |
251 | } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { | |
252 | do_unaligned_access: | |
253 | retaddr = GETPC(); | |
254 | #ifdef ALIGNED_ONLY | |
255 | do_unaligned_access(addr, 1, mmu_idx, retaddr); | |
256 | #endif | |
257 | glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val, | |
258 | mmu_idx, retaddr); | |
259 | } else { | |
260 | /* aligned/unaligned access in the same page */ | |
261 | #ifdef ALIGNED_ONLY | |
262 | if ((addr & (DATA_SIZE - 1)) != 0) { | |
263 | retaddr = GETPC(); | |
264 | do_unaligned_access(addr, 1, mmu_idx, retaddr); | |
265 | } | |
266 | #endif | |
267 | addend = env->tlb_table[mmu_idx][index].addend; | |
268 | glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val); | |
269 | } | |
270 | } else { | |
271 | /* the page is not in the TLB : fill it */ | |
272 | retaddr = GETPC(); | |
273 | #ifdef ALIGNED_ONLY | |
274 | if ((addr & (DATA_SIZE - 1)) != 0) | |
275 | do_unaligned_access(addr, 1, mmu_idx, retaddr); | |
276 | #endif | |
277 | tlb_fill(env, addr, 1, mmu_idx, retaddr); | |
278 | goto redo; | |
279 | } | |
280 | } | |
281 | ||
282 | /* handles all unaligned cases */ | |
283 | static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, | |
284 | DATA_TYPE val, | |
285 | int mmu_idx, | |
286 | void *retaddr) | |
287 | { | |
288 | target_phys_addr_t ioaddr; | |
289 | unsigned long addend; | |
290 | target_ulong tlb_addr; | |
291 | int index, i; | |
292 | ||
293 | index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
294 | redo: | |
295 | tlb_addr = env->tlb_table[mmu_idx][index].addr_write; | |
296 | if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
297 | if (tlb_addr & ~TARGET_PAGE_MASK) { | |
298 | /* IO access */ | |
299 | if ((addr & (DATA_SIZE - 1)) != 0) | |
300 | goto do_unaligned_access; | |
301 | ioaddr = env->iotlb[mmu_idx][index]; | |
302 | glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr); | |
303 | } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { | |
304 | do_unaligned_access: | |
305 | /* XXX: not efficient, but simple */ | |
306 | /* Note: relies on the fact that tlb_fill() does not remove the | |
307 | * previous page from the TLB cache. */ | |
308 | for(i = DATA_SIZE - 1; i >= 0; i--) { | |
309 | #ifdef TARGET_WORDS_BIGENDIAN | |
310 | glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), | |
311 | mmu_idx, retaddr); | |
312 | #else | |
313 | glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8), | |
314 | mmu_idx, retaddr); | |
315 | #endif | |
316 | } | |
317 | } else { | |
318 | /* aligned/unaligned access in the same page */ | |
319 | addend = env->tlb_table[mmu_idx][index].addend; | |
320 | glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val); | |
321 | } | |
322 | } else { | |
323 | /* the page is not in the TLB : fill it */ | |
324 | tlb_fill(env, addr, 1, mmu_idx, retaddr); | |
325 | goto redo; | |
326 | } | |
327 | } | |
328 | ||
329 | #endif /* !defined(SOFTMMU_CODE_ACCESS) */ | |
330 | ||
331 | #undef READ_ACCESS_TYPE | |
332 | #undef SHIFT | |
333 | #undef DATA_TYPE | |
334 | #undef SUFFIX | |
335 | #undef USUFFIX | |
336 | #undef DATA_SIZE | |
337 | #undef ADDR_READ |