]>
git.proxmox.com Git - qemu.git/blob - softmmu_template.h
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define DATA_SIZE (1 << SHIFT)
24 #define DATA_TYPE uint64_t
27 #define DATA_TYPE uint32_t
30 #define DATA_TYPE uint16_t
33 #define DATA_TYPE uint8_t
35 #error unsupported data size
38 static DATA_TYPE
glue(slow_ld
, SUFFIX
)(unsigned long addr
, void *retaddr
);
39 static void glue(slow_st
, SUFFIX
)(unsigned long addr
, DATA_TYPE val
,
42 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(unsigned long physaddr
,
43 unsigned long tlb_addr
)
48 index
= (tlb_addr
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
50 res
= io_mem_read
[index
][SHIFT
](physaddr
);
52 #ifdef TARGET_WORDS_BIGENDIAN
53 res
= (uint64_t)io_mem_read
[index
][2](physaddr
) << 32;
54 res
|= io_mem_read
[index
][2](physaddr
+ 4);
56 res
= io_mem_read
[index
][2](physaddr
);
57 res
|= (uint64_t)io_mem_read
[index
][2](physaddr
+ 4) << 32;
59 #endif /* SHIFT > 2 */
63 static inline void glue(io_write
, SUFFIX
)(unsigned long physaddr
,
65 unsigned long tlb_addr
)
69 index
= (tlb_addr
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
71 io_mem_write
[index
][SHIFT
](physaddr
, val
);
73 #ifdef TARGET_WORDS_BIGENDIAN
74 io_mem_write
[index
][2](physaddr
, val
>> 32);
75 io_mem_write
[index
][2](physaddr
+ 4, val
);
77 io_mem_write
[index
][2](physaddr
, val
);
78 io_mem_write
[index
][2](physaddr
+ 4, val
>> 32);
80 #endif /* SHIFT > 2 */
83 /* handle all cases except unaligned access which span two pages */
84 DATA_TYPE
__attribute((regparm(1))) glue(glue(__ld
, SUFFIX
), _mmu
)(unsigned long addr
)
88 unsigned long physaddr
, tlb_addr
;
91 /* test if there is match for unaligned or IO access */
92 /* XXX: could done more in memory macro in a non portable way */
93 is_user
= (env
->cpl
== 3);
94 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
96 tlb_addr
= env
->tlb_read
[is_user
][index
].address
;
97 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
98 physaddr
= addr
+ env
->tlb_read
[is_user
][index
].addend
;
99 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
101 if ((addr
& (DATA_SIZE
- 1)) != 0)
102 goto do_unaligned_access
;
103 res
= glue(io_read
, SUFFIX
)(physaddr
, tlb_addr
);
104 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
105 /* slow unaligned access (it spans two pages or IO) */
107 retaddr
= __builtin_return_address(0);
108 res
= glue(slow_ld
, SUFFIX
)(addr
, retaddr
);
110 /* unaligned access in the same page */
111 res
= glue(glue(ldu
, SUFFIX
), _raw
)((uint8_t *)physaddr
);
114 /* the page is not in the TLB : fill it */
115 retaddr
= __builtin_return_address(0);
116 tlb_fill(addr
, 0, retaddr
);
122 /* handle all unaligned cases */
123 static DATA_TYPE
glue(slow_ld
, SUFFIX
)(unsigned long addr
, void *retaddr
)
125 DATA_TYPE res
, res1
, res2
;
126 int is_user
, index
, shift
;
127 unsigned long physaddr
, tlb_addr
, addr1
, addr2
;
129 is_user
= (env
->cpl
== 3);
130 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
132 tlb_addr
= env
->tlb_read
[is_user
][index
].address
;
133 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
134 physaddr
= addr
+ env
->tlb_read
[is_user
][index
].addend
;
135 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
137 if ((addr
& (DATA_SIZE
- 1)) != 0)
138 goto do_unaligned_access
;
139 res
= glue(io_read
, SUFFIX
)(physaddr
, tlb_addr
);
140 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
142 /* slow unaligned access (it spans two pages) */
143 addr1
= addr
& ~(DATA_SIZE
- 1);
144 addr2
= addr1
+ DATA_SIZE
;
145 res1
= glue(slow_ld
, SUFFIX
)(addr1
, retaddr
);
146 res2
= glue(slow_ld
, SUFFIX
)(addr2
, retaddr
);
147 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
148 #ifdef TARGET_WORDS_BIGENDIAN
149 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
151 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
154 /* unaligned/aligned access in the same page */
155 res
= glue(glue(ldu
, SUFFIX
), _raw
)((uint8_t *)physaddr
);
158 /* the page is not in the TLB : fill it */
159 tlb_fill(addr
, 0, retaddr
);
166 void __attribute((regparm(2))) glue(glue(__st
, SUFFIX
), _mmu
)(unsigned long addr
, DATA_TYPE val
)
168 unsigned long physaddr
, tlb_addr
;
172 is_user
= (env
->cpl
== 3);
173 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
175 tlb_addr
= env
->tlb_write
[is_user
][index
].address
;
176 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
177 physaddr
= addr
+ env
->tlb_read
[is_user
][index
].addend
;
178 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
180 if ((addr
& (DATA_SIZE
- 1)) != 0)
181 goto do_unaligned_access
;
182 glue(io_write
, SUFFIX
)(physaddr
, val
, tlb_addr
);
183 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
185 retaddr
= __builtin_return_address(0);
186 glue(slow_st
, SUFFIX
)(addr
, val
, retaddr
);
188 /* aligned/unaligned access in the same page */
189 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)physaddr
, val
);
192 /* the page is not in the TLB : fill it */
193 retaddr
= __builtin_return_address(0);
194 tlb_fill(addr
, 1, retaddr
);
199 /* handles all unaligned cases */
200 static void glue(slow_st
, SUFFIX
)(unsigned long addr
, DATA_TYPE val
,
203 unsigned long physaddr
, tlb_addr
;
204 int is_user
, index
, i
;
206 is_user
= (env
->cpl
== 3);
207 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
209 tlb_addr
= env
->tlb_write
[is_user
][index
].address
;
210 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
211 physaddr
= addr
+ env
->tlb_read
[is_user
][index
].addend
;
212 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
214 if ((addr
& (DATA_SIZE
- 1)) != 0)
215 goto do_unaligned_access
;
216 glue(io_write
, SUFFIX
)(physaddr
, val
, tlb_addr
);
217 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
219 /* XXX: not efficient, but simple */
220 for(i
= 0;i
< DATA_SIZE
; i
++) {
221 #ifdef TARGET_WORDS_BIGENDIAN
222 slow_stb(addr
+ i
, val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8)), retaddr
);
224 slow_stb(addr
+ i
, val
>> (i
* 8), retaddr
);
228 /* aligned/unaligned access in the same page */
229 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)physaddr
, val
);
232 /* the page is not in the TLB : fill it */
233 tlb_fill(addr
, 1, retaddr
);