]> git.proxmox.com Git - mirror_qemu.git/blob - softmmu_template.h
cputlb: Move probe_write out of softmmu_template.h
[mirror_qemu.git] / softmmu_template.h
1 /*
2 * Software MMU support
3 *
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 #include "qemu/timer.h"
25 #include "exec/address-spaces.h"
26 #include "exec/memory.h"
27
28 #if DATA_SIZE == 8
29 #define SUFFIX q
30 #define LSUFFIX q
31 #define SDATA_TYPE int64_t
32 #define DATA_TYPE uint64_t
33 #elif DATA_SIZE == 4
34 #define SUFFIX l
35 #define LSUFFIX l
36 #define SDATA_TYPE int32_t
37 #define DATA_TYPE uint32_t
38 #elif DATA_SIZE == 2
39 #define SUFFIX w
40 #define LSUFFIX uw
41 #define SDATA_TYPE int16_t
42 #define DATA_TYPE uint16_t
43 #elif DATA_SIZE == 1
44 #define SUFFIX b
45 #define LSUFFIX ub
46 #define SDATA_TYPE int8_t
47 #define DATA_TYPE uint8_t
48 #else
49 #error unsupported data size
50 #endif
51
52
53 /* For the benefit of TCG generated code, we want to avoid the complication
54 of ABI-specific return type promotion and always return a value extended
55 to the register size of the host. This is tcg_target_long, except in the
56 case of a 32-bit host and 64-bit data, and for that we always have
57 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
58 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
59 # define WORD_TYPE DATA_TYPE
60 # define USUFFIX SUFFIX
61 #else
62 # define WORD_TYPE tcg_target_ulong
63 # define USUFFIX glue(u, SUFFIX)
64 # define SSUFFIX glue(s, SUFFIX)
65 #endif
66
67 #ifdef SOFTMMU_CODE_ACCESS
68 #define READ_ACCESS_TYPE MMU_INST_FETCH
69 #define ADDR_READ addr_code
70 #else
71 #define READ_ACCESS_TYPE MMU_DATA_LOAD
72 #define ADDR_READ addr_read
73 #endif
74
75 #if DATA_SIZE == 8
76 # define BSWAP(X) bswap64(X)
77 #elif DATA_SIZE == 4
78 # define BSWAP(X) bswap32(X)
79 #elif DATA_SIZE == 2
80 # define BSWAP(X) bswap16(X)
81 #else
82 # define BSWAP(X) (X)
83 #endif
84
85 #ifdef TARGET_WORDS_BIGENDIAN
86 # define TGT_BE(X) (X)
87 # define TGT_LE(X) BSWAP(X)
88 #else
89 # define TGT_BE(X) BSWAP(X)
90 # define TGT_LE(X) (X)
91 #endif
92
93 #if DATA_SIZE == 1
94 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
95 # define helper_be_ld_name helper_le_ld_name
96 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
97 # define helper_be_lds_name helper_le_lds_name
98 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
99 # define helper_be_st_name helper_le_st_name
100 #else
101 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
102 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
103 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
104 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
105 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
106 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
107 #endif
108
109 #ifdef TARGET_WORDS_BIGENDIAN
110 # define helper_te_ld_name helper_be_ld_name
111 # define helper_te_st_name helper_be_st_name
112 #else
113 # define helper_te_ld_name helper_le_ld_name
114 # define helper_te_st_name helper_le_st_name
115 #endif
116
117 #ifndef SOFTMMU_CODE_ACCESS
118 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
119 CPUIOTLBEntry *iotlbentry,
120 target_ulong addr,
121 uintptr_t retaddr)
122 {
123 uint64_t val;
124 CPUState *cpu = ENV_GET_CPU(env);
125 hwaddr physaddr = iotlbentry->addr;
126 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
127
128 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
129 cpu->mem_io_pc = retaddr;
130 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
131 cpu_io_recompile(cpu, retaddr);
132 }
133
134 cpu->mem_io_vaddr = addr;
135 memory_region_dispatch_read(mr, physaddr, &val, DATA_SIZE,
136 iotlbentry->attrs);
137 return val;
138 }
139 #endif
140
141 WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
142 TCGMemOpIdx oi, uintptr_t retaddr)
143 {
144 unsigned mmu_idx = get_mmuidx(oi);
145 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
146 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
147 unsigned a_bits = get_alignment_bits(get_memop(oi));
148 uintptr_t haddr;
149 DATA_TYPE res;
150
151 if (addr & ((1 << a_bits) - 1)) {
152 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
153 mmu_idx, retaddr);
154 }
155
156 /* If the TLB entry is for a different page, reload and try again. */
157 if ((addr & TARGET_PAGE_MASK)
158 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
159 if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
160 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
161 mmu_idx, retaddr);
162 }
163 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
164 }
165
166 /* Handle an IO access. */
167 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
168 CPUIOTLBEntry *iotlbentry;
169 if ((addr & (DATA_SIZE - 1)) != 0) {
170 goto do_unaligned_access;
171 }
172 iotlbentry = &env->iotlb[mmu_idx][index];
173
174 /* ??? Note that the io helpers always read data in the target
175 byte ordering. We should push the LE/BE request down into io. */
176 res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
177 res = TGT_LE(res);
178 return res;
179 }
180
181 /* Handle slow unaligned access (it spans two pages or IO). */
182 if (DATA_SIZE > 1
183 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
184 >= TARGET_PAGE_SIZE)) {
185 target_ulong addr1, addr2;
186 DATA_TYPE res1, res2;
187 unsigned shift;
188 do_unaligned_access:
189 addr1 = addr & ~(DATA_SIZE - 1);
190 addr2 = addr1 + DATA_SIZE;
191 res1 = helper_le_ld_name(env, addr1, oi, retaddr);
192 res2 = helper_le_ld_name(env, addr2, oi, retaddr);
193 shift = (addr & (DATA_SIZE - 1)) * 8;
194
195 /* Little-endian combine. */
196 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
197 return res;
198 }
199
200 haddr = addr + env->tlb_table[mmu_idx][index].addend;
201 #if DATA_SIZE == 1
202 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
203 #else
204 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
205 #endif
206 return res;
207 }
208
209 #if DATA_SIZE > 1
210 WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
211 TCGMemOpIdx oi, uintptr_t retaddr)
212 {
213 unsigned mmu_idx = get_mmuidx(oi);
214 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
215 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
216 unsigned a_bits = get_alignment_bits(get_memop(oi));
217 uintptr_t haddr;
218 DATA_TYPE res;
219
220 if (addr & ((1 << a_bits) - 1)) {
221 cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
222 mmu_idx, retaddr);
223 }
224
225 /* If the TLB entry is for a different page, reload and try again. */
226 if ((addr & TARGET_PAGE_MASK)
227 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
228 if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
229 tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
230 mmu_idx, retaddr);
231 }
232 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
233 }
234
235 /* Handle an IO access. */
236 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
237 CPUIOTLBEntry *iotlbentry;
238 if ((addr & (DATA_SIZE - 1)) != 0) {
239 goto do_unaligned_access;
240 }
241 iotlbentry = &env->iotlb[mmu_idx][index];
242
243 /* ??? Note that the io helpers always read data in the target
244 byte ordering. We should push the LE/BE request down into io. */
245 res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
246 res = TGT_BE(res);
247 return res;
248 }
249
250 /* Handle slow unaligned access (it spans two pages or IO). */
251 if (DATA_SIZE > 1
252 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
253 >= TARGET_PAGE_SIZE)) {
254 target_ulong addr1, addr2;
255 DATA_TYPE res1, res2;
256 unsigned shift;
257 do_unaligned_access:
258 addr1 = addr & ~(DATA_SIZE - 1);
259 addr2 = addr1 + DATA_SIZE;
260 res1 = helper_be_ld_name(env, addr1, oi, retaddr);
261 res2 = helper_be_ld_name(env, addr2, oi, retaddr);
262 shift = (addr & (DATA_SIZE - 1)) * 8;
263
264 /* Big-endian combine. */
265 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
266 return res;
267 }
268
269 haddr = addr + env->tlb_table[mmu_idx][index].addend;
270 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
271 return res;
272 }
273 #endif /* DATA_SIZE > 1 */
274
275 #ifndef SOFTMMU_CODE_ACCESS
276
277 /* Provide signed versions of the load routines as well. We can of course
278 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
279 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
280 WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
281 TCGMemOpIdx oi, uintptr_t retaddr)
282 {
283 return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
284 }
285
286 # if DATA_SIZE > 1
287 WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
288 TCGMemOpIdx oi, uintptr_t retaddr)
289 {
290 return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
291 }
292 # endif
293 #endif
294
295 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
296 CPUIOTLBEntry *iotlbentry,
297 DATA_TYPE val,
298 target_ulong addr,
299 uintptr_t retaddr)
300 {
301 CPUState *cpu = ENV_GET_CPU(env);
302 hwaddr physaddr = iotlbentry->addr;
303 MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
304
305 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
306 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
307 cpu_io_recompile(cpu, retaddr);
308 }
309
310 cpu->mem_io_vaddr = addr;
311 cpu->mem_io_pc = retaddr;
312 memory_region_dispatch_write(mr, physaddr, val, DATA_SIZE,
313 iotlbentry->attrs);
314 }
315
316 void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
317 TCGMemOpIdx oi, uintptr_t retaddr)
318 {
319 unsigned mmu_idx = get_mmuidx(oi);
320 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
321 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
322 unsigned a_bits = get_alignment_bits(get_memop(oi));
323 uintptr_t haddr;
324
325 if (addr & ((1 << a_bits) - 1)) {
326 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
327 mmu_idx, retaddr);
328 }
329
330 /* If the TLB entry is for a different page, reload and try again. */
331 if ((addr & TARGET_PAGE_MASK)
332 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
333 if (!VICTIM_TLB_HIT(addr_write, addr)) {
334 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
335 }
336 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
337 }
338
339 /* Handle an IO access. */
340 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
341 CPUIOTLBEntry *iotlbentry;
342 if ((addr & (DATA_SIZE - 1)) != 0) {
343 goto do_unaligned_access;
344 }
345 iotlbentry = &env->iotlb[mmu_idx][index];
346
347 /* ??? Note that the io helpers always read data in the target
348 byte ordering. We should push the LE/BE request down into io. */
349 val = TGT_LE(val);
350 glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
351 return;
352 }
353
354 /* Handle slow unaligned access (it spans two pages or IO). */
355 if (DATA_SIZE > 1
356 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
357 >= TARGET_PAGE_SIZE)) {
358 int i, index2;
359 target_ulong page2, tlb_addr2;
360 do_unaligned_access:
361 /* Ensure the second page is in the TLB. Note that the first page
362 is already guaranteed to be filled, and that the second page
363 cannot evict the first. */
364 page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
365 index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
366 tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
367 if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
368 && !VICTIM_TLB_HIT(addr_write, page2)) {
369 tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
370 mmu_idx, retaddr);
371 }
372
373 /* XXX: not efficient, but simple. */
374 /* This loop must go in the forward direction to avoid issues
375 with self-modifying code in Windows 64-bit. */
376 for (i = 0; i < DATA_SIZE; ++i) {
377 /* Little-endian extract. */
378 uint8_t val8 = val >> (i * 8);
379 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
380 oi, retaddr);
381 }
382 return;
383 }
384
385 haddr = addr + env->tlb_table[mmu_idx][index].addend;
386 #if DATA_SIZE == 1
387 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
388 #else
389 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
390 #endif
391 }
392
393 #if DATA_SIZE > 1
394 void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
395 TCGMemOpIdx oi, uintptr_t retaddr)
396 {
397 unsigned mmu_idx = get_mmuidx(oi);
398 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
399 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
400 unsigned a_bits = get_alignment_bits(get_memop(oi));
401 uintptr_t haddr;
402
403 if (addr & ((1 << a_bits) - 1)) {
404 cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
405 mmu_idx, retaddr);
406 }
407
408 /* If the TLB entry is for a different page, reload and try again. */
409 if ((addr & TARGET_PAGE_MASK)
410 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
411 if (!VICTIM_TLB_HIT(addr_write, addr)) {
412 tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
413 }
414 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
415 }
416
417 /* Handle an IO access. */
418 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
419 CPUIOTLBEntry *iotlbentry;
420 if ((addr & (DATA_SIZE - 1)) != 0) {
421 goto do_unaligned_access;
422 }
423 iotlbentry = &env->iotlb[mmu_idx][index];
424
425 /* ??? Note that the io helpers always read data in the target
426 byte ordering. We should push the LE/BE request down into io. */
427 val = TGT_BE(val);
428 glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
429 return;
430 }
431
432 /* Handle slow unaligned access (it spans two pages or IO). */
433 if (DATA_SIZE > 1
434 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
435 >= TARGET_PAGE_SIZE)) {
436 int i, index2;
437 target_ulong page2, tlb_addr2;
438 do_unaligned_access:
439 /* Ensure the second page is in the TLB. Note that the first page
440 is already guaranteed to be filled, and that the second page
441 cannot evict the first. */
442 page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
443 index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
444 tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
445 if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
446 && !VICTIM_TLB_HIT(addr_write, page2)) {
447 tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
448 mmu_idx, retaddr);
449 }
450
451 /* XXX: not efficient, but simple */
452 /* This loop must go in the forward direction to avoid issues
453 with self-modifying code. */
454 for (i = 0; i < DATA_SIZE; ++i) {
455 /* Big-endian extract. */
456 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
457 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
458 oi, retaddr);
459 }
460 return;
461 }
462
463 haddr = addr + env->tlb_table[mmu_idx][index].addend;
464 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
465 }
466 #endif /* DATA_SIZE > 1 */
467 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
468
469 #undef READ_ACCESS_TYPE
470 #undef DATA_TYPE
471 #undef SUFFIX
472 #undef LSUFFIX
473 #undef DATA_SIZE
474 #undef ADDR_READ
475 #undef WORD_TYPE
476 #undef SDATA_TYPE
477 #undef USUFFIX
478 #undef SSUFFIX
479 #undef BSWAP
480 #undef TGT_BE
481 #undef TGT_LE
482 #undef CPU_BE
483 #undef CPU_LE
484 #undef helper_le_ld_name
485 #undef helper_be_ld_name
486 #undef helper_le_lds_name
487 #undef helper_be_lds_name
488 #undef helper_le_st_name
489 #undef helper_be_st_name
490 #undef helper_te_ld_name
491 #undef helper_te_st_name