]> git.proxmox.com Git - mirror_qemu.git/blob - softmmu_template.h
cputlb: Move VICTIM_TLB_HIT out of line
[mirror_qemu.git] / softmmu_template.h
1 /*
2 * Software MMU support
3 *
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 #include "qemu/timer.h"
25 #include "exec/address-spaces.h"
26 #include "exec/memory.h"
27
28 #define DATA_SIZE (1 << SHIFT)
29
30 #if DATA_SIZE == 8
31 #define SUFFIX q
32 #define LSUFFIX q
33 #define SDATA_TYPE int64_t
34 #define DATA_TYPE uint64_t
35 #elif DATA_SIZE == 4
36 #define SUFFIX l
37 #define LSUFFIX l
38 #define SDATA_TYPE int32_t
39 #define DATA_TYPE uint32_t
40 #elif DATA_SIZE == 2
41 #define SUFFIX w
42 #define LSUFFIX uw
43 #define SDATA_TYPE int16_t
44 #define DATA_TYPE uint16_t
45 #elif DATA_SIZE == 1
46 #define SUFFIX b
47 #define LSUFFIX ub
48 #define SDATA_TYPE int8_t
49 #define DATA_TYPE uint8_t
50 #else
51 #error unsupported data size
52 #endif
53
54
55 /* For the benefit of TCG generated code, we want to avoid the complication
56 of ABI-specific return type promotion and always return a value extended
57 to the register size of the host. This is tcg_target_long, except in the
58 case of a 32-bit host and 64-bit data, and for that we always have
59 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
60 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
61 # define WORD_TYPE DATA_TYPE
62 # define USUFFIX SUFFIX
63 #else
64 # define WORD_TYPE tcg_target_ulong
65 # define USUFFIX glue(u, SUFFIX)
66 # define SSUFFIX glue(s, SUFFIX)
67 #endif
68
69 #ifdef SOFTMMU_CODE_ACCESS
70 #define READ_ACCESS_TYPE MMU_INST_FETCH
71 #define ADDR_READ addr_code
72 #else
73 #define READ_ACCESS_TYPE MMU_DATA_LOAD
74 #define ADDR_READ addr_read
75 #endif
76
77 #if DATA_SIZE == 8
78 # define BSWAP(X) bswap64(X)
79 #elif DATA_SIZE == 4
80 # define BSWAP(X) bswap32(X)
81 #elif DATA_SIZE == 2
82 # define BSWAP(X) bswap16(X)
83 #else
84 # define BSWAP(X) (X)
85 #endif
86
87 #ifdef TARGET_WORDS_BIGENDIAN
88 # define TGT_BE(X) (X)
89 # define TGT_LE(X) BSWAP(X)
90 #else
91 # define TGT_BE(X) BSWAP(X)
92 # define TGT_LE(X) (X)
93 #endif
94
95 #if DATA_SIZE == 1
96 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
97 # define helper_be_ld_name helper_le_ld_name
98 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
99 # define helper_be_lds_name helper_le_lds_name
100 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
101 # define helper_be_st_name helper_le_st_name
102 #else
103 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
104 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
105 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
106 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
107 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
108 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
109 #endif
110
111 #ifdef TARGET_WORDS_BIGENDIAN
112 # define helper_te_ld_name helper_be_ld_name
113 # define helper_te_st_name helper_be_st_name
114 #else
115 # define helper_te_ld_name helper_le_ld_name
116 # define helper_te_st_name helper_le_st_name
117 #endif
118
119 #ifndef SOFTMMU_CODE_ACCESS
120 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
121 CPUIOTLBEntry *iotlbentry,
122 target_ulong addr,
123 uintptr_t retaddr)
124 {
125 uint64_t val;
126 CPUState *cpu = ENV_GET_CPU(env);
127 hwaddr physaddr = iotlbentry->addr;
128 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
129
130 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
131 cpu->mem_io_pc = retaddr;
132 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
133 cpu_io_recompile(cpu, retaddr);
134 }
135
136 cpu->mem_io_vaddr = addr;
137 memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT,
138 iotlbentry->attrs);
139 return val;
140 }
141 #endif
142
143 WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
144 TCGMemOpIdx oi, uintptr_t retaddr)
145 {
146 unsigned mmu_idx = get_mmuidx(oi);
147 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
148 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
149 int a_bits = get_alignment_bits(get_memop(oi));
150 uintptr_t haddr;
151 DATA_TYPE res;
152
153 /* Adjust the given return address. */
154 retaddr -= GETPC_ADJ;
155
156 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
157 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
158 mmu_idx, retaddr);
159 }
160
161 /* If the TLB entry is for a different page, reload and try again. */
162 if ((addr & TARGET_PAGE_MASK)
163 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
164 if (!VICTIM_TLB_HIT(ADDR_READ)) {
165 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
166 mmu_idx, retaddr);
167 }
168 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
169 }
170
171 /* Handle an IO access. */
172 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
173 CPUIOTLBEntry *iotlbentry;
174 if ((addr & (DATA_SIZE - 1)) != 0) {
175 goto do_unaligned_access;
176 }
177 iotlbentry = &env->iotlb[mmu_idx][index];
178
179 /* ??? Note that the io helpers always read data in the target
180 byte ordering. We should push the LE/BE request down into io. */
181 res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
182 res = TGT_LE(res);
183 return res;
184 }
185
186 /* Handle slow unaligned access (it spans two pages or IO). */
187 if (DATA_SIZE > 1
188 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
189 >= TARGET_PAGE_SIZE)) {
190 target_ulong addr1, addr2;
191 DATA_TYPE res1, res2;
192 unsigned shift;
193 do_unaligned_access:
194 addr1 = addr & ~(DATA_SIZE - 1);
195 addr2 = addr1 + DATA_SIZE;
196 /* Note the adjustment at the beginning of the function.
197 Undo that for the recursion. */
198 res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
199 res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
200 shift = (addr & (DATA_SIZE - 1)) * 8;
201
202 /* Little-endian combine. */
203 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
204 return res;
205 }
206
207 haddr = addr + env->tlb_table[mmu_idx][index].addend;
208 #if DATA_SIZE == 1
209 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
210 #else
211 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
212 #endif
213 return res;
214 }
215
216 #if DATA_SIZE > 1
217 WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
218 TCGMemOpIdx oi, uintptr_t retaddr)
219 {
220 unsigned mmu_idx = get_mmuidx(oi);
221 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
222 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
223 int a_bits = get_alignment_bits(get_memop(oi));
224 uintptr_t haddr;
225 DATA_TYPE res;
226
227 /* Adjust the given return address. */
228 retaddr -= GETPC_ADJ;
229
230 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
231 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
232 mmu_idx, retaddr);
233 }
234
235 /* If the TLB entry is for a different page, reload and try again. */
236 if ((addr & TARGET_PAGE_MASK)
237 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
238 if (!VICTIM_TLB_HIT(ADDR_READ)) {
239 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
240 mmu_idx, retaddr);
241 }
242 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
243 }
244
245 /* Handle an IO access. */
246 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
247 CPUIOTLBEntry *iotlbentry;
248 if ((addr & (DATA_SIZE - 1)) != 0) {
249 goto do_unaligned_access;
250 }
251 iotlbentry = &env->iotlb[mmu_idx][index];
252
253 /* ??? Note that the io helpers always read data in the target
254 byte ordering. We should push the LE/BE request down into io. */
255 res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
256 res = TGT_BE(res);
257 return res;
258 }
259
260 /* Handle slow unaligned access (it spans two pages or IO). */
261 if (DATA_SIZE > 1
262 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
263 >= TARGET_PAGE_SIZE)) {
264 target_ulong addr1, addr2;
265 DATA_TYPE res1, res2;
266 unsigned shift;
267 do_unaligned_access:
268 addr1 = addr & ~(DATA_SIZE - 1);
269 addr2 = addr1 + DATA_SIZE;
270 /* Note the adjustment at the beginning of the function.
271 Undo that for the recursion. */
272 res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
273 res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
274 shift = (addr & (DATA_SIZE - 1)) * 8;
275
276 /* Big-endian combine. */
277 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
278 return res;
279 }
280
281 haddr = addr + env->tlb_table[mmu_idx][index].addend;
282 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
283 return res;
284 }
285 #endif /* DATA_SIZE > 1 */
286
287 #ifndef SOFTMMU_CODE_ACCESS
288
289 /* Provide signed versions of the load routines as well. We can of course
290 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
291 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
292 WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
293 TCGMemOpIdx oi, uintptr_t retaddr)
294 {
295 return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
296 }
297
298 # if DATA_SIZE > 1
299 WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
300 TCGMemOpIdx oi, uintptr_t retaddr)
301 {
302 return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
303 }
304 # endif
305 #endif
306
307 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
308 CPUIOTLBEntry *iotlbentry,
309 DATA_TYPE val,
310 target_ulong addr,
311 uintptr_t retaddr)
312 {
313 CPUState *cpu = ENV_GET_CPU(env);
314 hwaddr physaddr = iotlbentry->addr;
315 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
316
317 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
318 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
319 cpu_io_recompile(cpu, retaddr);
320 }
321
322 cpu->mem_io_vaddr = addr;
323 cpu->mem_io_pc = retaddr;
324 memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT,
325 iotlbentry->attrs);
326 }
327
328 void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
329 TCGMemOpIdx oi, uintptr_t retaddr)
330 {
331 unsigned mmu_idx = get_mmuidx(oi);
332 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
333 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
334 int a_bits = get_alignment_bits(get_memop(oi));
335 uintptr_t haddr;
336
337 /* Adjust the given return address. */
338 retaddr -= GETPC_ADJ;
339
340 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
341 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
342 mmu_idx, retaddr);
343 }
344
345 /* If the TLB entry is for a different page, reload and try again. */
346 if ((addr & TARGET_PAGE_MASK)
347 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
348 if (!VICTIM_TLB_HIT(addr_write)) {
349 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
350 }
351 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
352 }
353
354 /* Handle an IO access. */
355 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
356 CPUIOTLBEntry *iotlbentry;
357 if ((addr & (DATA_SIZE - 1)) != 0) {
358 goto do_unaligned_access;
359 }
360 iotlbentry = &env->iotlb[mmu_idx][index];
361
362 /* ??? Note that the io helpers always read data in the target
363 byte ordering. We should push the LE/BE request down into io. */
364 val = TGT_LE(val);
365 glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
366 return;
367 }
368
369 /* Handle slow unaligned access (it spans two pages or IO). */
370 if (DATA_SIZE > 1
371 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
372 >= TARGET_PAGE_SIZE)) {
373 int i;
374 do_unaligned_access:
375 /* XXX: not efficient, but simple */
376 /* Note: relies on the fact that tlb_fill() does not remove the
377 * previous page from the TLB cache. */
378 for (i = DATA_SIZE - 1; i >= 0; i--) {
379 /* Little-endian extract. */
380 uint8_t val8 = val >> (i * 8);
381 /* Note the adjustment at the beginning of the function.
382 Undo that for the recursion. */
383 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
384 oi, retaddr + GETPC_ADJ);
385 }
386 return;
387 }
388
389 haddr = addr + env->tlb_table[mmu_idx][index].addend;
390 #if DATA_SIZE == 1
391 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
392 #else
393 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
394 #endif
395 }
396
397 #if DATA_SIZE > 1
398 void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
399 TCGMemOpIdx oi, uintptr_t retaddr)
400 {
401 unsigned mmu_idx = get_mmuidx(oi);
402 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
403 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
404 int a_bits = get_alignment_bits(get_memop(oi));
405 uintptr_t haddr;
406
407 /* Adjust the given return address. */
408 retaddr -= GETPC_ADJ;
409
410 if (a_bits > 0 && (addr & ((1 << a_bits) - 1)) != 0) {
411 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
412 mmu_idx, retaddr);
413 }
414
415 /* If the TLB entry is for a different page, reload and try again. */
416 if ((addr & TARGET_PAGE_MASK)
417 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
418 if (!VICTIM_TLB_HIT(addr_write)) {
419 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
420 }
421 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
422 }
423
424 /* Handle an IO access. */
425 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
426 CPUIOTLBEntry *iotlbentry;
427 if ((addr & (DATA_SIZE - 1)) != 0) {
428 goto do_unaligned_access;
429 }
430 iotlbentry = &env->iotlb[mmu_idx][index];
431
432 /* ??? Note that the io helpers always read data in the target
433 byte ordering. We should push the LE/BE request down into io. */
434 val = TGT_BE(val);
435 glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
436 return;
437 }
438
439 /* Handle slow unaligned access (it spans two pages or IO). */
440 if (DATA_SIZE > 1
441 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
442 >= TARGET_PAGE_SIZE)) {
443 int i;
444 do_unaligned_access:
445 /* XXX: not efficient, but simple */
446 /* Note: relies on the fact that tlb_fill() does not remove the
447 * previous page from the TLB cache. */
448 for (i = DATA_SIZE - 1; i >= 0; i--) {
449 /* Big-endian extract. */
450 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
451 /* Note the adjustment at the beginning of the function.
452 Undo that for the recursion. */
453 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
454 oi, retaddr + GETPC_ADJ);
455 }
456 return;
457 }
458
459 haddr = addr + env->tlb_table[mmu_idx][index].addend;
460 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
461 }
462 #endif /* DATA_SIZE > 1 */
463
464 #if DATA_SIZE == 1
465 /* Probe for whether the specified guest write access is permitted.
466 * If it is not permitted then an exception will be taken in the same
467 * way as if this were a real write access (and we will not return).
468 * Otherwise the function will return, and there will be a valid
469 * entry in the TLB for this access.
470 */
471 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
472 uintptr_t retaddr)
473 {
474 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
475 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
476
477 if ((addr & TARGET_PAGE_MASK)
478 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
479 /* TLB entry is for a different page */
480 if (!VICTIM_TLB_HIT(addr_write)) {
481 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
482 }
483 }
484 }
485 #endif
486 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
487
488 #undef READ_ACCESS_TYPE
489 #undef SHIFT
490 #undef DATA_TYPE
491 #undef SUFFIX
492 #undef LSUFFIX
493 #undef DATA_SIZE
494 #undef ADDR_READ
495 #undef WORD_TYPE
496 #undef SDATA_TYPE
497 #undef USUFFIX
498 #undef SSUFFIX
499 #undef BSWAP
500 #undef TGT_BE
501 #undef TGT_LE
502 #undef CPU_BE
503 #undef CPU_LE
504 #undef helper_le_ld_name
505 #undef helper_be_ld_name
506 #undef helper_le_lds_name
507 #undef helper_be_lds_name
508 #undef helper_le_st_name
509 #undef helper_be_st_name
510 #undef helper_te_ld_name
511 #undef helper_te_st_name