]> git.proxmox.com Git - mirror_qemu.git/blob - softmmu_template.h
vfio-pci: Don't use kvm_irqchip_in_kernel
[mirror_qemu.git] / softmmu_template.h
1 /*
2 * Software MMU support
3 *
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 #include "qemu-timer.h"
25 #include "memory.h"
26
27 #define DATA_SIZE (1 << SHIFT)
28
29 #if DATA_SIZE == 8
30 #define SUFFIX q
31 #define USUFFIX q
32 #define DATA_TYPE uint64_t
33 #elif DATA_SIZE == 4
34 #define SUFFIX l
35 #define USUFFIX l
36 #define DATA_TYPE uint32_t
37 #elif DATA_SIZE == 2
38 #define SUFFIX w
39 #define USUFFIX uw
40 #define DATA_TYPE uint16_t
41 #elif DATA_SIZE == 1
42 #define SUFFIX b
43 #define USUFFIX ub
44 #define DATA_TYPE uint8_t
45 #else
46 #error unsupported data size
47 #endif
48
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
52 #else
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
55 #endif
56
57 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
58 target_ulong addr,
59 int mmu_idx,
60 uintptr_t retaddr);
61 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
62 hwaddr physaddr,
63 target_ulong addr,
64 uintptr_t retaddr)
65 {
66 DATA_TYPE res;
67 MemoryRegion *mr = iotlb_to_region(physaddr);
68
69 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
70 env->mem_io_pc = retaddr;
71 if (mr != &io_mem_ram && mr != &io_mem_rom
72 && mr != &io_mem_unassigned
73 && mr != &io_mem_notdirty
74 && !can_do_io(env)) {
75 cpu_io_recompile(env, retaddr);
76 }
77
78 env->mem_io_vaddr = addr;
79 #if SHIFT <= 2
80 res = io_mem_read(mr, physaddr, 1 << SHIFT);
81 #else
82 #ifdef TARGET_WORDS_BIGENDIAN
83 res = io_mem_read(mr, physaddr, 4) << 32;
84 res |= io_mem_read(mr, physaddr + 4, 4);
85 #else
86 res = io_mem_read(mr, physaddr, 4);
87 res |= io_mem_read(mr, physaddr + 4, 4) << 32;
88 #endif
89 #endif /* SHIFT > 2 */
90 return res;
91 }
92
93 /* handle all cases except unaligned access which span two pages */
94 DATA_TYPE
95 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
96 int mmu_idx)
97 {
98 DATA_TYPE res;
99 int index;
100 target_ulong tlb_addr;
101 hwaddr ioaddr;
102 uintptr_t retaddr;
103
104 /* test if there is match for unaligned or IO access */
105 /* XXX: could done more in memory macro in a non portable way */
106 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
107 redo:
108 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
109 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
110 if (tlb_addr & ~TARGET_PAGE_MASK) {
111 /* IO access */
112 if ((addr & (DATA_SIZE - 1)) != 0)
113 goto do_unaligned_access;
114 retaddr = GETPC_EXT();
115 ioaddr = env->iotlb[mmu_idx][index];
116 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
117 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
118 /* slow unaligned access (it spans two pages or IO) */
119 do_unaligned_access:
120 retaddr = GETPC_EXT();
121 #ifdef ALIGNED_ONLY
122 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
123 #endif
124 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr,
125 mmu_idx, retaddr);
126 } else {
127 /* unaligned/aligned access in the same page */
128 uintptr_t addend;
129 #ifdef ALIGNED_ONLY
130 if ((addr & (DATA_SIZE - 1)) != 0) {
131 retaddr = GETPC_EXT();
132 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
133 }
134 #endif
135 addend = env->tlb_table[mmu_idx][index].addend;
136 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
137 (addr + addend));
138 }
139 } else {
140 /* the page is not in the TLB : fill it */
141 retaddr = GETPC_EXT();
142 #ifdef ALIGNED_ONLY
143 if ((addr & (DATA_SIZE - 1)) != 0)
144 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
145 #endif
146 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
147 goto redo;
148 }
149 return res;
150 }
151
152 /* handle all unaligned cases */
153 static DATA_TYPE
154 glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
155 target_ulong addr,
156 int mmu_idx,
157 uintptr_t retaddr)
158 {
159 DATA_TYPE res, res1, res2;
160 int index, shift;
161 hwaddr ioaddr;
162 target_ulong tlb_addr, addr1, addr2;
163
164 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
165 redo:
166 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
167 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
168 if (tlb_addr & ~TARGET_PAGE_MASK) {
169 /* IO access */
170 if ((addr & (DATA_SIZE - 1)) != 0)
171 goto do_unaligned_access;
172 ioaddr = env->iotlb[mmu_idx][index];
173 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
174 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
175 do_unaligned_access:
176 /* slow unaligned access (it spans two pages) */
177 addr1 = addr & ~(DATA_SIZE - 1);
178 addr2 = addr1 + DATA_SIZE;
179 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1,
180 mmu_idx, retaddr);
181 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2,
182 mmu_idx, retaddr);
183 shift = (addr & (DATA_SIZE - 1)) * 8;
184 #ifdef TARGET_WORDS_BIGENDIAN
185 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
186 #else
187 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
188 #endif
189 res = (DATA_TYPE)res;
190 } else {
191 /* unaligned/aligned access in the same page */
192 uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
193 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
194 (addr + addend));
195 }
196 } else {
197 /* the page is not in the TLB : fill it */
198 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
199 goto redo;
200 }
201 return res;
202 }
203
204 #ifndef SOFTMMU_CODE_ACCESS
205
206 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
207 target_ulong addr,
208 DATA_TYPE val,
209 int mmu_idx,
210 uintptr_t retaddr);
211
212 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
213 hwaddr physaddr,
214 DATA_TYPE val,
215 target_ulong addr,
216 uintptr_t retaddr)
217 {
218 MemoryRegion *mr = iotlb_to_region(physaddr);
219
220 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
221 if (mr != &io_mem_ram && mr != &io_mem_rom
222 && mr != &io_mem_unassigned
223 && mr != &io_mem_notdirty
224 && !can_do_io(env)) {
225 cpu_io_recompile(env, retaddr);
226 }
227
228 env->mem_io_vaddr = addr;
229 env->mem_io_pc = retaddr;
230 #if SHIFT <= 2
231 io_mem_write(mr, physaddr, val, 1 << SHIFT);
232 #else
233 #ifdef TARGET_WORDS_BIGENDIAN
234 io_mem_write(mr, physaddr, (val >> 32), 4);
235 io_mem_write(mr, physaddr + 4, (uint32_t)val, 4);
236 #else
237 io_mem_write(mr, physaddr, (uint32_t)val, 4);
238 io_mem_write(mr, physaddr + 4, val >> 32, 4);
239 #endif
240 #endif /* SHIFT > 2 */
241 }
242
243 void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
244 target_ulong addr, DATA_TYPE val,
245 int mmu_idx)
246 {
247 hwaddr ioaddr;
248 target_ulong tlb_addr;
249 uintptr_t retaddr;
250 int index;
251
252 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
253 redo:
254 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
255 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
256 if (tlb_addr & ~TARGET_PAGE_MASK) {
257 /* IO access */
258 if ((addr & (DATA_SIZE - 1)) != 0)
259 goto do_unaligned_access;
260 retaddr = GETPC_EXT();
261 ioaddr = env->iotlb[mmu_idx][index];
262 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
263 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
264 do_unaligned_access:
265 retaddr = GETPC_EXT();
266 #ifdef ALIGNED_ONLY
267 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
268 #endif
269 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val,
270 mmu_idx, retaddr);
271 } else {
272 /* aligned/unaligned access in the same page */
273 uintptr_t addend;
274 #ifdef ALIGNED_ONLY
275 if ((addr & (DATA_SIZE - 1)) != 0) {
276 retaddr = GETPC_EXT();
277 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
278 }
279 #endif
280 addend = env->tlb_table[mmu_idx][index].addend;
281 glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
282 (addr + addend), val);
283 }
284 } else {
285 /* the page is not in the TLB : fill it */
286 retaddr = GETPC_EXT();
287 #ifdef ALIGNED_ONLY
288 if ((addr & (DATA_SIZE - 1)) != 0)
289 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
290 #endif
291 tlb_fill(env, addr, 1, mmu_idx, retaddr);
292 goto redo;
293 }
294 }
295
296 /* handles all unaligned cases */
297 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
298 target_ulong addr,
299 DATA_TYPE val,
300 int mmu_idx,
301 uintptr_t retaddr)
302 {
303 hwaddr ioaddr;
304 target_ulong tlb_addr;
305 int index, i;
306
307 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
308 redo:
309 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
310 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
311 if (tlb_addr & ~TARGET_PAGE_MASK) {
312 /* IO access */
313 if ((addr & (DATA_SIZE - 1)) != 0)
314 goto do_unaligned_access;
315 ioaddr = env->iotlb[mmu_idx][index];
316 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
317 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
318 do_unaligned_access:
319 /* XXX: not efficient, but simple */
320 /* Note: relies on the fact that tlb_fill() does not remove the
321 * previous page from the TLB cache. */
322 for(i = DATA_SIZE - 1; i >= 0; i--) {
323 #ifdef TARGET_WORDS_BIGENDIAN
324 glue(slow_stb, MMUSUFFIX)(env, addr + i,
325 val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
326 mmu_idx, retaddr);
327 #else
328 glue(slow_stb, MMUSUFFIX)(env, addr + i,
329 val >> (i * 8),
330 mmu_idx, retaddr);
331 #endif
332 }
333 } else {
334 /* aligned/unaligned access in the same page */
335 uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
336 glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
337 (addr + addend), val);
338 }
339 } else {
340 /* the page is not in the TLB : fill it */
341 tlb_fill(env, addr, 1, mmu_idx, retaddr);
342 goto redo;
343 }
344 }
345
346 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
347
348 #undef READ_ACCESS_TYPE
349 #undef SHIFT
350 #undef DATA_TYPE
351 #undef SUFFIX
352 #undef USUFFIX
353 #undef DATA_SIZE
354 #undef ADDR_READ