]> git.proxmox.com Git - qemu.git/blob - include/exec/softmmu_template.h
pvpanic: add API to access io port
[qemu.git] / include / exec / softmmu_template.h
1 /*
2 * Software MMU support
3 *
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 #include "qemu/timer.h"
25 #include "exec/memory.h"
26
27 #define DATA_SIZE (1 << SHIFT)
28
29 #if DATA_SIZE == 8
30 #define SUFFIX q
31 #define LSUFFIX q
32 #define SDATA_TYPE int64_t
33 #elif DATA_SIZE == 4
34 #define SUFFIX l
35 #define LSUFFIX l
36 #define SDATA_TYPE int32_t
37 #elif DATA_SIZE == 2
38 #define SUFFIX w
39 #define LSUFFIX uw
40 #define SDATA_TYPE int16_t
41 #elif DATA_SIZE == 1
42 #define SUFFIX b
43 #define LSUFFIX ub
44 #define SDATA_TYPE int8_t
45 #else
46 #error unsupported data size
47 #endif
48
49 #define DATA_TYPE glue(u, SDATA_TYPE)
50
51 /* For the benefit of TCG generated code, we want to avoid the complication
52 of ABI-specific return type promotion and always return a value extended
53 to the register size of the host. This is tcg_target_long, except in the
54 case of a 32-bit host and 64-bit data, and for that we always have
55 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
56 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
57 # define WORD_TYPE DATA_TYPE
58 # define USUFFIX SUFFIX
59 #else
60 # define WORD_TYPE tcg_target_ulong
61 # define USUFFIX glue(u, SUFFIX)
62 # define SSUFFIX glue(s, SUFFIX)
63 #endif
64
65 #ifdef SOFTMMU_CODE_ACCESS
66 #define READ_ACCESS_TYPE 2
67 #define ADDR_READ addr_code
68 #else
69 #define READ_ACCESS_TYPE 0
70 #define ADDR_READ addr_read
71 #endif
72
73 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
74 hwaddr physaddr,
75 target_ulong addr,
76 uintptr_t retaddr)
77 {
78 uint64_t val;
79 MemoryRegion *mr = iotlb_to_region(physaddr);
80
81 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
82 env->mem_io_pc = retaddr;
83 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
84 cpu_io_recompile(env, retaddr);
85 }
86
87 env->mem_io_vaddr = addr;
88 io_mem_read(mr, physaddr, &val, 1 << SHIFT);
89 return val;
90 }
91
92 /* handle all cases except unaligned access which span two pages */
93 #ifdef SOFTMMU_CODE_ACCESS
94 static
95 #endif
96 WORD_TYPE
97 glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
98 target_ulong addr, int mmu_idx,
99 uintptr_t retaddr)
100 {
101 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
102 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
103 uintptr_t haddr;
104
105 /* Adjust the given return address. */
106 retaddr -= GETPC_ADJ;
107
108 /* If the TLB entry is for a different page, reload and try again. */
109 if ((addr & TARGET_PAGE_MASK)
110 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
111 #ifdef ALIGNED_ONLY
112 if ((addr & (DATA_SIZE - 1)) != 0) {
113 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
114 }
115 #endif
116 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
117 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
118 }
119
120 /* Handle an IO access. */
121 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
122 hwaddr ioaddr;
123 if ((addr & (DATA_SIZE - 1)) != 0) {
124 goto do_unaligned_access;
125 }
126 ioaddr = env->iotlb[mmu_idx][index];
127 return glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
128 }
129
130 /* Handle slow unaligned access (it spans two pages or IO). */
131 if (DATA_SIZE > 1
132 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
133 >= TARGET_PAGE_SIZE)) {
134 target_ulong addr1, addr2;
135 DATA_TYPE res1, res2, res;
136 unsigned shift;
137 do_unaligned_access:
138 #ifdef ALIGNED_ONLY
139 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
140 #endif
141 addr1 = addr & ~(DATA_SIZE - 1);
142 addr2 = addr1 + DATA_SIZE;
143 /* Note the adjustment at the beginning of the function.
144 Undo that for the recursion. */
145 res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
146 (env, addr1, mmu_idx, retaddr + GETPC_ADJ);
147 res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
148 (env, addr2, mmu_idx, retaddr + GETPC_ADJ);
149 shift = (addr & (DATA_SIZE - 1)) * 8;
150 #ifdef TARGET_WORDS_BIGENDIAN
151 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
152 #else
153 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
154 #endif
155 return res;
156 }
157
158 /* Handle aligned access or unaligned access in the same page. */
159 #ifdef ALIGNED_ONLY
160 if ((addr & (DATA_SIZE - 1)) != 0) {
161 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
162 }
163 #endif
164
165 haddr = addr + env->tlb_table[mmu_idx][index].addend;
166 /* Note that ldl_raw is defined with type "int". */
167 return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
168 }
169
170 DATA_TYPE
171 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
172 int mmu_idx)
173 {
174 return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
175 GETRA_EXT());
176 }
177
178 #ifndef SOFTMMU_CODE_ACCESS
179
180 /* Provide signed versions of the load routines as well. We can of course
181 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
182 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
183 WORD_TYPE
184 glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env,
185 target_ulong addr, int mmu_idx,
186 uintptr_t retaddr)
187 {
188 return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
189 (env, addr, mmu_idx, retaddr);
190 }
191 #endif
192
193 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
194 hwaddr physaddr,
195 DATA_TYPE val,
196 target_ulong addr,
197 uintptr_t retaddr)
198 {
199 MemoryRegion *mr = iotlb_to_region(physaddr);
200
201 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
202 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
203 cpu_io_recompile(env, retaddr);
204 }
205
206 env->mem_io_vaddr = addr;
207 env->mem_io_pc = retaddr;
208 io_mem_write(mr, physaddr, val, 1 << SHIFT);
209 }
210
211 void
212 glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
213 target_ulong addr, DATA_TYPE val,
214 int mmu_idx, uintptr_t retaddr)
215 {
216 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
217 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
218 uintptr_t haddr;
219
220 /* Adjust the given return address. */
221 retaddr -= GETPC_ADJ;
222
223 /* If the TLB entry is for a different page, reload and try again. */
224 if ((addr & TARGET_PAGE_MASK)
225 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
226 #ifdef ALIGNED_ONLY
227 if ((addr & (DATA_SIZE - 1)) != 0) {
228 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
229 }
230 #endif
231 tlb_fill(env, addr, 1, mmu_idx, retaddr);
232 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
233 }
234
235 /* Handle an IO access. */
236 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
237 hwaddr ioaddr;
238 if ((addr & (DATA_SIZE - 1)) != 0) {
239 goto do_unaligned_access;
240 }
241 ioaddr = env->iotlb[mmu_idx][index];
242 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
243 return;
244 }
245
246 /* Handle slow unaligned access (it spans two pages or IO). */
247 if (DATA_SIZE > 1
248 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
249 >= TARGET_PAGE_SIZE)) {
250 int i;
251 do_unaligned_access:
252 #ifdef ALIGNED_ONLY
253 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
254 #endif
255 /* XXX: not efficient, but simple */
256 /* Note: relies on the fact that tlb_fill() does not remove the
257 * previous page from the TLB cache. */
258 for (i = DATA_SIZE - 1; i >= 0; i--) {
259 #ifdef TARGET_WORDS_BIGENDIAN
260 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
261 #else
262 uint8_t val8 = val >> (i * 8);
263 #endif
264 /* Note the adjustment at the beginning of the function.
265 Undo that for the recursion. */
266 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
267 mmu_idx, retaddr + GETPC_ADJ);
268 }
269 return;
270 }
271
272 /* Handle aligned access or unaligned access in the same page. */
273 #ifdef ALIGNED_ONLY
274 if ((addr & (DATA_SIZE - 1)) != 0) {
275 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
276 }
277 #endif
278
279 haddr = addr + env->tlb_table[mmu_idx][index].addend;
280 glue(glue(st, SUFFIX), _raw)((uint8_t *)haddr, val);
281 }
282
283 void
284 glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
285 DATA_TYPE val, int mmu_idx)
286 {
287 glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
288 GETRA_EXT());
289 }
290
291 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
292
293 #undef READ_ACCESS_TYPE
294 #undef SHIFT
295 #undef DATA_TYPE
296 #undef SUFFIX
297 #undef LSUFFIX
298 #undef DATA_SIZE
299 #undef ADDR_READ
300 #undef WORD_TYPE
301 #undef SDATA_TYPE
302 #undef USUFFIX
303 #undef SSUFFIX