]>
git.proxmox.com Git - qemu.git/blob - softmmu_template.h
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define DATA_SIZE (1 << SHIFT)
25 #define DATA_TYPE uint64_t
29 #define DATA_TYPE uint32_t
33 #define DATA_TYPE uint16_t
37 #define DATA_TYPE uint8_t
39 #error unsupported data size
42 #ifdef SOFTMMU_CODE_ACCESS
43 #define READ_ACCESS_TYPE 2
45 #define READ_ACCESS_TYPE 0
48 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(unsigned long addr
,
51 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(unsigned long physaddr
,
52 unsigned long tlb_addr
)
57 index
= (tlb_addr
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
59 res
= io_mem_read
[index
][SHIFT
](io_mem_opaque
[index
], physaddr
);
61 #ifdef TARGET_WORDS_BIGENDIAN
62 res
= (uint64_t)io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
) << 32;
63 res
|= io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
+ 4);
65 res
= io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
);
66 res
|= (uint64_t)io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
+ 4) << 32;
68 #endif /* SHIFT > 2 */
72 /* handle all cases except unaligned access which span two pages */
73 DATA_TYPE
REGPARM(1) glue(glue(__ld
, SUFFIX
), MMUSUFFIX
)(unsigned long addr
,
78 unsigned long physaddr
, tlb_addr
;
81 /* test if there is match for unaligned or IO access */
82 /* XXX: could done more in memory macro in a non portable way */
83 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
85 tlb_addr
= env
->tlb_read
[is_user
][index
].address
;
86 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
87 physaddr
= addr
+ env
->tlb_read
[is_user
][index
].addend
;
88 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
90 if ((addr
& (DATA_SIZE
- 1)) != 0)
91 goto do_unaligned_access
;
92 res
= glue(io_read
, SUFFIX
)(physaddr
, tlb_addr
);
93 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
94 /* slow unaligned access (it spans two pages or IO) */
97 res
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr
,
100 /* unaligned access in the same page */
101 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)physaddr
);
104 /* the page is not in the TLB : fill it */
106 tlb_fill(addr
, READ_ACCESS_TYPE
, is_user
, retaddr
);
112 /* handle all unaligned cases */
113 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(unsigned long addr
,
117 DATA_TYPE res
, res1
, res2
;
119 unsigned long physaddr
, tlb_addr
, addr1
, addr2
;
121 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
123 tlb_addr
= env
->tlb_read
[is_user
][index
].address
;
124 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
125 physaddr
= addr
+ env
->tlb_read
[is_user
][index
].addend
;
126 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
128 if ((addr
& (DATA_SIZE
- 1)) != 0)
129 goto do_unaligned_access
;
130 res
= glue(io_read
, SUFFIX
)(physaddr
, tlb_addr
);
131 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
133 /* slow unaligned access (it spans two pages) */
134 addr1
= addr
& ~(DATA_SIZE
- 1);
135 addr2
= addr1
+ DATA_SIZE
;
136 res1
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr1
,
138 res2
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr2
,
140 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
141 #ifdef TARGET_WORDS_BIGENDIAN
142 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
144 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
146 res
= (DATA_TYPE
)res
;
148 /* unaligned/aligned access in the same page */
149 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)physaddr
);
152 /* the page is not in the TLB : fill it */
153 tlb_fill(addr
, READ_ACCESS_TYPE
, is_user
, retaddr
);
159 #ifndef SOFTMMU_CODE_ACCESS
161 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(unsigned long addr
,
166 static inline void glue(io_write
, SUFFIX
)(unsigned long physaddr
,
168 unsigned long tlb_addr
,
173 index
= (tlb_addr
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
174 env
->mem_write_vaddr
= tlb_addr
;
175 env
->mem_write_pc
= (unsigned long)retaddr
;
177 io_mem_write
[index
][SHIFT
](io_mem_opaque
[index
], physaddr
, val
);
179 #ifdef TARGET_WORDS_BIGENDIAN
180 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
, val
>> 32);
181 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
+ 4, val
);
183 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
, val
);
184 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
+ 4, val
>> 32);
186 #endif /* SHIFT > 2 */
189 void REGPARM(2) glue(glue(__st
, SUFFIX
), MMUSUFFIX
)(unsigned long addr
,
193 unsigned long physaddr
, tlb_addr
;
197 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
199 tlb_addr
= env
->tlb_write
[is_user
][index
].address
;
200 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
201 physaddr
= addr
+ env
->tlb_write
[is_user
][index
].addend
;
202 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
204 if ((addr
& (DATA_SIZE
- 1)) != 0)
205 goto do_unaligned_access
;
207 glue(io_write
, SUFFIX
)(physaddr
, val
, tlb_addr
, retaddr
);
208 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
211 glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(addr
, val
,
214 /* aligned/unaligned access in the same page */
215 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)physaddr
, val
);
218 /* the page is not in the TLB : fill it */
220 tlb_fill(addr
, 1, is_user
, retaddr
);
225 /* handles all unaligned cases */
226 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(unsigned long addr
,
231 unsigned long physaddr
, tlb_addr
;
234 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
236 tlb_addr
= env
->tlb_write
[is_user
][index
].address
;
237 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
238 physaddr
= addr
+ env
->tlb_write
[is_user
][index
].addend
;
239 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
241 if ((addr
& (DATA_SIZE
- 1)) != 0)
242 goto do_unaligned_access
;
243 glue(io_write
, SUFFIX
)(physaddr
, val
, tlb_addr
, retaddr
);
244 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
246 /* XXX: not efficient, but simple */
247 for(i
= 0;i
< DATA_SIZE
; i
++) {
248 #ifdef TARGET_WORDS_BIGENDIAN
249 glue(slow_stb
, MMUSUFFIX
)(addr
+ i
, val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8)),
252 glue(slow_stb
, MMUSUFFIX
)(addr
+ i
, val
>> (i
* 8),
257 /* aligned/unaligned access in the same page */
258 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)physaddr
, val
);
261 /* the page is not in the TLB : fill it */
262 tlb_fill(addr
, 1, is_user
, retaddr
);
267 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
269 #undef READ_ACCESS_TYPE