]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/softmmu_template.h
qemu 1.7.0 does not build on NetBSD
[mirror_qemu.git] / include / exec / softmmu_template.h
1 /*
2 * Software MMU support
3 *
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 #include "qemu/timer.h"
25 #include "exec/memory.h"
26
27 #define DATA_SIZE (1 << SHIFT)
28
29 #if DATA_SIZE == 8
30 #define SUFFIX q
31 #define LSUFFIX q
32 #define SDATA_TYPE int64_t
33 #define DATA_TYPE uint64_t
34 #elif DATA_SIZE == 4
35 #define SUFFIX l
36 #define LSUFFIX l
37 #define SDATA_TYPE int32_t
38 #define DATA_TYPE uint32_t
39 #elif DATA_SIZE == 2
40 #define SUFFIX w
41 #define LSUFFIX uw
42 #define SDATA_TYPE int16_t
43 #define DATA_TYPE uint16_t
44 #elif DATA_SIZE == 1
45 #define SUFFIX b
46 #define LSUFFIX ub
47 #define SDATA_TYPE int8_t
48 #define DATA_TYPE uint8_t
49 #else
50 #error unsupported data size
51 #endif
52
53
54 /* For the benefit of TCG generated code, we want to avoid the complication
55 of ABI-specific return type promotion and always return a value extended
56 to the register size of the host. This is tcg_target_long, except in the
57 case of a 32-bit host and 64-bit data, and for that we always have
58 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
59 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
60 # define WORD_TYPE DATA_TYPE
61 # define USUFFIX SUFFIX
62 #else
63 # define WORD_TYPE tcg_target_ulong
64 # define USUFFIX glue(u, SUFFIX)
65 # define SSUFFIX glue(s, SUFFIX)
66 #endif
67
68 #ifdef SOFTMMU_CODE_ACCESS
69 #define READ_ACCESS_TYPE 2
70 #define ADDR_READ addr_code
71 #else
72 #define READ_ACCESS_TYPE 0
73 #define ADDR_READ addr_read
74 #endif
75
76 #if DATA_SIZE == 8
77 # define BSWAP(X) bswap64(X)
78 #elif DATA_SIZE == 4
79 # define BSWAP(X) bswap32(X)
80 #elif DATA_SIZE == 2
81 # define BSWAP(X) bswap16(X)
82 #else
83 # define BSWAP(X) (X)
84 #endif
85
86 #ifdef TARGET_WORDS_BIGENDIAN
87 # define TGT_BE(X) (X)
88 # define TGT_LE(X) BSWAP(X)
89 #else
90 # define TGT_BE(X) BSWAP(X)
91 # define TGT_LE(X) (X)
92 #endif
93
94 #if DATA_SIZE == 1
95 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
96 # define helper_be_ld_name helper_le_ld_name
97 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
98 # define helper_be_lds_name helper_le_lds_name
99 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
100 # define helper_be_st_name helper_le_st_name
101 #else
102 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
103 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
104 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
105 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
106 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
107 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
108 #endif
109
110 #ifdef TARGET_WORDS_BIGENDIAN
111 # define helper_te_ld_name helper_be_ld_name
112 # define helper_te_st_name helper_be_st_name
113 #else
114 # define helper_te_ld_name helper_le_ld_name
115 # define helper_te_st_name helper_le_st_name
116 #endif
117
118 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
119 hwaddr physaddr,
120 target_ulong addr,
121 uintptr_t retaddr)
122 {
123 uint64_t val;
124 MemoryRegion *mr = iotlb_to_region(physaddr);
125
126 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
127 env->mem_io_pc = retaddr;
128 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
129 cpu_io_recompile(env, retaddr);
130 }
131
132 env->mem_io_vaddr = addr;
133 io_mem_read(mr, physaddr, &val, 1 << SHIFT);
134 return val;
135 }
136
137 #ifdef SOFTMMU_CODE_ACCESS
138 static __attribute__((unused))
139 #endif
140 WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
141 uintptr_t retaddr)
142 {
143 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
144 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
145 uintptr_t haddr;
146 DATA_TYPE res;
147
148 /* Adjust the given return address. */
149 retaddr -= GETPC_ADJ;
150
151 /* If the TLB entry is for a different page, reload and try again. */
152 if ((addr & TARGET_PAGE_MASK)
153 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
154 #ifdef ALIGNED_ONLY
155 if ((addr & (DATA_SIZE - 1)) != 0) {
156 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
157 }
158 #endif
159 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
160 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
161 }
162
163 /* Handle an IO access. */
164 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
165 hwaddr ioaddr;
166 if ((addr & (DATA_SIZE - 1)) != 0) {
167 goto do_unaligned_access;
168 }
169 ioaddr = env->iotlb[mmu_idx][index];
170
171 /* ??? Note that the io helpers always read data in the target
172 byte ordering. We should push the LE/BE request down into io. */
173 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
174 res = TGT_LE(res);
175 return res;
176 }
177
178 /* Handle slow unaligned access (it spans two pages or IO). */
179 if (DATA_SIZE > 1
180 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
181 >= TARGET_PAGE_SIZE)) {
182 target_ulong addr1, addr2;
183 DATA_TYPE res1, res2;
184 unsigned shift;
185 do_unaligned_access:
186 #ifdef ALIGNED_ONLY
187 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
188 #endif
189 addr1 = addr & ~(DATA_SIZE - 1);
190 addr2 = addr1 + DATA_SIZE;
191 /* Note the adjustment at the beginning of the function.
192 Undo that for the recursion. */
193 res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
194 res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
195 shift = (addr & (DATA_SIZE - 1)) * 8;
196
197 /* Little-endian combine. */
198 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
199 return res;
200 }
201
202 /* Handle aligned access or unaligned access in the same page. */
203 #ifdef ALIGNED_ONLY
204 if ((addr & (DATA_SIZE - 1)) != 0) {
205 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
206 }
207 #endif
208
209 haddr = addr + env->tlb_table[mmu_idx][index].addend;
210 #if DATA_SIZE == 1
211 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
212 #else
213 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
214 #endif
215 return res;
216 }
217
218 #if DATA_SIZE > 1
219 #ifdef SOFTMMU_CODE_ACCESS
220 static __attribute__((unused))
221 #endif
222 WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
223 uintptr_t retaddr)
224 {
225 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
226 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
227 uintptr_t haddr;
228 DATA_TYPE res;
229
230 /* Adjust the given return address. */
231 retaddr -= GETPC_ADJ;
232
233 /* If the TLB entry is for a different page, reload and try again. */
234 if ((addr & TARGET_PAGE_MASK)
235 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
236 #ifdef ALIGNED_ONLY
237 if ((addr & (DATA_SIZE - 1)) != 0) {
238 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
239 }
240 #endif
241 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
242 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
243 }
244
245 /* Handle an IO access. */
246 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
247 hwaddr ioaddr;
248 if ((addr & (DATA_SIZE - 1)) != 0) {
249 goto do_unaligned_access;
250 }
251 ioaddr = env->iotlb[mmu_idx][index];
252
253 /* ??? Note that the io helpers always read data in the target
254 byte ordering. We should push the LE/BE request down into io. */
255 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
256 res = TGT_BE(res);
257 return res;
258 }
259
260 /* Handle slow unaligned access (it spans two pages or IO). */
261 if (DATA_SIZE > 1
262 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
263 >= TARGET_PAGE_SIZE)) {
264 target_ulong addr1, addr2;
265 DATA_TYPE res1, res2;
266 unsigned shift;
267 do_unaligned_access:
268 #ifdef ALIGNED_ONLY
269 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
270 #endif
271 addr1 = addr & ~(DATA_SIZE - 1);
272 addr2 = addr1 + DATA_SIZE;
273 /* Note the adjustment at the beginning of the function.
274 Undo that for the recursion. */
275 res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
276 res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
277 shift = (addr & (DATA_SIZE - 1)) * 8;
278
279 /* Big-endian combine. */
280 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
281 return res;
282 }
283
284 /* Handle aligned access or unaligned access in the same page. */
285 #ifdef ALIGNED_ONLY
286 if ((addr & (DATA_SIZE - 1)) != 0) {
287 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
288 }
289 #endif
290
291 haddr = addr + env->tlb_table[mmu_idx][index].addend;
292 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
293 return res;
294 }
295 #endif /* DATA_SIZE > 1 */
296
297 DATA_TYPE
298 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
299 int mmu_idx)
300 {
301 return helper_te_ld_name (env, addr, mmu_idx, GETRA());
302 }
303
304 #ifndef SOFTMMU_CODE_ACCESS
305
306 /* Provide signed versions of the load routines as well. We can of course
307 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
308 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
309 WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
310 int mmu_idx, uintptr_t retaddr)
311 {
312 return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
313 }
314
315 # if DATA_SIZE > 1
316 WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
317 int mmu_idx, uintptr_t retaddr)
318 {
319 return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
320 }
321 # endif
322 #endif
323
324 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
325 hwaddr physaddr,
326 DATA_TYPE val,
327 target_ulong addr,
328 uintptr_t retaddr)
329 {
330 MemoryRegion *mr = iotlb_to_region(physaddr);
331
332 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
333 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
334 cpu_io_recompile(env, retaddr);
335 }
336
337 env->mem_io_vaddr = addr;
338 env->mem_io_pc = retaddr;
339 io_mem_write(mr, physaddr, val, 1 << SHIFT);
340 }
341
342 void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
343 int mmu_idx, uintptr_t retaddr)
344 {
345 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
346 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
347 uintptr_t haddr;
348
349 /* Adjust the given return address. */
350 retaddr -= GETPC_ADJ;
351
352 /* If the TLB entry is for a different page, reload and try again. */
353 if ((addr & TARGET_PAGE_MASK)
354 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
355 #ifdef ALIGNED_ONLY
356 if ((addr & (DATA_SIZE - 1)) != 0) {
357 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
358 }
359 #endif
360 tlb_fill(env, addr, 1, mmu_idx, retaddr);
361 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
362 }
363
364 /* Handle an IO access. */
365 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
366 hwaddr ioaddr;
367 if ((addr & (DATA_SIZE - 1)) != 0) {
368 goto do_unaligned_access;
369 }
370 ioaddr = env->iotlb[mmu_idx][index];
371
372 /* ??? Note that the io helpers always read data in the target
373 byte ordering. We should push the LE/BE request down into io. */
374 val = TGT_LE(val);
375 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
376 return;
377 }
378
379 /* Handle slow unaligned access (it spans two pages or IO). */
380 if (DATA_SIZE > 1
381 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
382 >= TARGET_PAGE_SIZE)) {
383 int i;
384 do_unaligned_access:
385 #ifdef ALIGNED_ONLY
386 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
387 #endif
388 /* XXX: not efficient, but simple */
389 /* Note: relies on the fact that tlb_fill() does not remove the
390 * previous page from the TLB cache. */
391 for (i = DATA_SIZE - 1; i >= 0; i--) {
392 /* Little-endian extract. */
393 uint8_t val8 = val >> (i * 8);
394 /* Note the adjustment at the beginning of the function.
395 Undo that for the recursion. */
396 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
397 mmu_idx, retaddr + GETPC_ADJ);
398 }
399 return;
400 }
401
402 /* Handle aligned access or unaligned access in the same page. */
403 #ifdef ALIGNED_ONLY
404 if ((addr & (DATA_SIZE - 1)) != 0) {
405 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
406 }
407 #endif
408
409 haddr = addr + env->tlb_table[mmu_idx][index].addend;
410 #if DATA_SIZE == 1
411 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
412 #else
413 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
414 #endif
415 }
416
417 #if DATA_SIZE > 1
418 void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
419 int mmu_idx, uintptr_t retaddr)
420 {
421 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
422 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
423 uintptr_t haddr;
424
425 /* Adjust the given return address. */
426 retaddr -= GETPC_ADJ;
427
428 /* If the TLB entry is for a different page, reload and try again. */
429 if ((addr & TARGET_PAGE_MASK)
430 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
431 #ifdef ALIGNED_ONLY
432 if ((addr & (DATA_SIZE - 1)) != 0) {
433 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
434 }
435 #endif
436 tlb_fill(env, addr, 1, mmu_idx, retaddr);
437 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
438 }
439
440 /* Handle an IO access. */
441 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
442 hwaddr ioaddr;
443 if ((addr & (DATA_SIZE - 1)) != 0) {
444 goto do_unaligned_access;
445 }
446 ioaddr = env->iotlb[mmu_idx][index];
447
448 /* ??? Note that the io helpers always read data in the target
449 byte ordering. We should push the LE/BE request down into io. */
450 val = TGT_BE(val);
451 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
452 return;
453 }
454
455 /* Handle slow unaligned access (it spans two pages or IO). */
456 if (DATA_SIZE > 1
457 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
458 >= TARGET_PAGE_SIZE)) {
459 int i;
460 do_unaligned_access:
461 #ifdef ALIGNED_ONLY
462 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
463 #endif
464 /* XXX: not efficient, but simple */
465 /* Note: relies on the fact that tlb_fill() does not remove the
466 * previous page from the TLB cache. */
467 for (i = DATA_SIZE - 1; i >= 0; i--) {
468 /* Big-endian extract. */
469 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
470 /* Note the adjustment at the beginning of the function.
471 Undo that for the recursion. */
472 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
473 mmu_idx, retaddr + GETPC_ADJ);
474 }
475 return;
476 }
477
478 /* Handle aligned access or unaligned access in the same page. */
479 #ifdef ALIGNED_ONLY
480 if ((addr & (DATA_SIZE - 1)) != 0) {
481 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
482 }
483 #endif
484
485 haddr = addr + env->tlb_table[mmu_idx][index].addend;
486 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
487 }
488 #endif /* DATA_SIZE > 1 */
489
490 void
491 glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
492 DATA_TYPE val, int mmu_idx)
493 {
494 helper_te_st_name(env, addr, val, mmu_idx, GETRA());
495 }
496
497 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
498
499 #undef READ_ACCESS_TYPE
500 #undef SHIFT
501 #undef DATA_TYPE
502 #undef SUFFIX
503 #undef LSUFFIX
504 #undef DATA_SIZE
505 #undef ADDR_READ
506 #undef WORD_TYPE
507 #undef SDATA_TYPE
508 #undef USUFFIX
509 #undef SSUFFIX
510 #undef BSWAP
511 #undef TGT_BE
512 #undef TGT_LE
513 #undef CPU_BE
514 #undef CPU_LE
515 #undef helper_le_ld_name
516 #undef helper_be_ld_name
517 #undef helper_le_lds_name
518 #undef helper_be_lds_name
519 #undef helper_le_st_name
520 #undef helper_be_st_name
521 #undef helper_te_ld_name
522 #undef helper_te_st_name