]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/softmmu_template.h
Merge remote-tracking branch 'mst/tags/for_anthony' into stable-1.5
[mirror_qemu.git] / include / exec / softmmu_template.h
1 /*
2 * Software MMU support
3 *
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 #include "qemu/timer.h"
25 #include "exec/memory.h"
26
27 #define DATA_SIZE (1 << SHIFT)
28
29 #if DATA_SIZE == 8
30 #define SUFFIX q
31 #define USUFFIX q
32 #define DATA_TYPE uint64_t
33 #elif DATA_SIZE == 4
34 #define SUFFIX l
35 #define USUFFIX l
36 #define DATA_TYPE uint32_t
37 #elif DATA_SIZE == 2
38 #define SUFFIX w
39 #define USUFFIX uw
40 #define DATA_TYPE uint16_t
41 #elif DATA_SIZE == 1
42 #define SUFFIX b
43 #define USUFFIX ub
44 #define DATA_TYPE uint8_t
45 #else
46 #error unsupported data size
47 #endif
48
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
52 #else
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
55 #endif
56
57 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
58 hwaddr physaddr,
59 target_ulong addr,
60 uintptr_t retaddr)
61 {
62 uint64_t val;
63 MemoryRegion *mr = iotlb_to_region(physaddr);
64
65 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
66 env->mem_io_pc = retaddr;
67 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
68 cpu_io_recompile(env, retaddr);
69 }
70
71 env->mem_io_vaddr = addr;
72 io_mem_read(mr, physaddr, &val, 1 << SHIFT);
73 return val;
74 }
75
76 /* handle all cases except unaligned access which span two pages */
77 #ifdef SOFTMMU_CODE_ACCESS
78 static
79 #endif
80 DATA_TYPE
81 glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
82 target_ulong addr, int mmu_idx,
83 uintptr_t retaddr)
84 {
85 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
86 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
87 uintptr_t haddr;
88
89 /* If the TLB entry is for a different page, reload and try again. */
90 if ((addr & TARGET_PAGE_MASK)
91 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
92 #ifdef ALIGNED_ONLY
93 if ((addr & (DATA_SIZE - 1)) != 0) {
94 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
95 }
96 #endif
97 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
98 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
99 }
100
101 /* Handle an IO access. */
102 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
103 hwaddr ioaddr;
104 if ((addr & (DATA_SIZE - 1)) != 0) {
105 goto do_unaligned_access;
106 }
107 ioaddr = env->iotlb[mmu_idx][index];
108 return glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
109 }
110
111 /* Handle slow unaligned access (it spans two pages or IO). */
112 if (DATA_SIZE > 1
113 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
114 >= TARGET_PAGE_SIZE)) {
115 target_ulong addr1, addr2;
116 DATA_TYPE res1, res2, res;
117 unsigned shift;
118 do_unaligned_access:
119 #ifdef ALIGNED_ONLY
120 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
121 #endif
122 addr1 = addr & ~(DATA_SIZE - 1);
123 addr2 = addr1 + DATA_SIZE;
124 res1 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr1,
125 mmu_idx, retaddr);
126 res2 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr2,
127 mmu_idx, retaddr);
128 shift = (addr & (DATA_SIZE - 1)) * 8;
129 #ifdef TARGET_WORDS_BIGENDIAN
130 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
131 #else
132 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
133 #endif
134 return res;
135 }
136
137 /* Handle aligned access or unaligned access in the same page. */
138 #ifdef ALIGNED_ONLY
139 if ((addr & (DATA_SIZE - 1)) != 0) {
140 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
141 }
142 #endif
143
144 haddr = addr + env->tlb_table[mmu_idx][index].addend;
145 return glue(glue(ld, USUFFIX), _raw)((uint8_t *)haddr);
146 }
147
148 DATA_TYPE
149 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
150 int mmu_idx)
151 {
152 return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
153 GETPC_EXT());
154 }
155
156 #ifndef SOFTMMU_CODE_ACCESS
157
158 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
159 hwaddr physaddr,
160 DATA_TYPE val,
161 target_ulong addr,
162 uintptr_t retaddr)
163 {
164 MemoryRegion *mr = iotlb_to_region(physaddr);
165
166 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
167 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
168 cpu_io_recompile(env, retaddr);
169 }
170
171 env->mem_io_vaddr = addr;
172 env->mem_io_pc = retaddr;
173 io_mem_write(mr, physaddr, val, 1 << SHIFT);
174 }
175
176 void
177 glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
178 target_ulong addr, DATA_TYPE val,
179 int mmu_idx, uintptr_t retaddr)
180 {
181 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
182 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
183 uintptr_t haddr;
184
185 /* If the TLB entry is for a different page, reload and try again. */
186 if ((addr & TARGET_PAGE_MASK)
187 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
188 #ifdef ALIGNED_ONLY
189 if ((addr & (DATA_SIZE - 1)) != 0) {
190 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
191 }
192 #endif
193 tlb_fill(env, addr, 1, mmu_idx, retaddr);
194 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
195 }
196
197 /* Handle an IO access. */
198 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
199 hwaddr ioaddr;
200 if ((addr & (DATA_SIZE - 1)) != 0) {
201 goto do_unaligned_access;
202 }
203 ioaddr = env->iotlb[mmu_idx][index];
204 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
205 return;
206 }
207
208 /* Handle slow unaligned access (it spans two pages or IO). */
209 if (DATA_SIZE > 1
210 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
211 >= TARGET_PAGE_SIZE)) {
212 int i;
213 do_unaligned_access:
214 #ifdef ALIGNED_ONLY
215 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
216 #endif
217 /* XXX: not efficient, but simple */
218 /* Note: relies on the fact that tlb_fill() does not remove the
219 * previous page from the TLB cache. */
220 for (i = DATA_SIZE - 1; i >= 0; i--) {
221 #ifdef TARGET_WORDS_BIGENDIAN
222 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
223 #else
224 uint8_t val8 = val >> (i * 8);
225 #endif
226 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
227 mmu_idx, retaddr);
228 }
229 return;
230 }
231
232 /* Handle aligned access or unaligned access in the same page. */
233 #ifdef ALIGNED_ONLY
234 if ((addr & (DATA_SIZE - 1)) != 0) {
235 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
236 }
237 #endif
238
239 haddr = addr + env->tlb_table[mmu_idx][index].addend;
240 glue(glue(st, SUFFIX), _raw)((uint8_t *)haddr, val);
241 }
242
243 void
244 glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
245 DATA_TYPE val, int mmu_idx)
246 {
247 glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
248 GETPC_EXT());
249 }
250
251 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
252
253 #undef READ_ACCESS_TYPE
254 #undef SHIFT
255 #undef DATA_TYPE
256 #undef SUFFIX
257 #undef USUFFIX
258 #undef DATA_SIZE
259 #undef ADDR_READ