4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define DATA_SIZE (1 << SHIFT)
25 #define DATA_TYPE uint64_t
29 #define DATA_TYPE uint32_t
33 #define DATA_TYPE uint16_t
37 #define DATA_TYPE uint8_t
39 #error unsupported data size
42 #ifdef SOFTMMU_CODE_ACCESS
43 #define READ_ACCESS_TYPE 2
44 #define ADDR_READ addr_code
46 #define READ_ACCESS_TYPE 0
47 #define ADDR_READ addr_read
50 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
53 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(target_phys_addr_t physaddr
,
54 target_ulong tlb_addr
)
59 index
= (tlb_addr
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
61 res
= io_mem_read
[index
][SHIFT
](io_mem_opaque
[index
], physaddr
);
63 #ifdef TARGET_WORDS_BIGENDIAN
64 res
= (uint64_t)io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
) << 32;
65 res
|= io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
+ 4);
67 res
= io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
);
68 res
|= (uint64_t)io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
+ 4) << 32;
70 #endif /* SHIFT > 2 */
74 /* handle all cases except unaligned access which span two pages */
75 DATA_TYPE
REGPARM(1) glue(glue(__ld
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
80 target_ulong tlb_addr
;
81 target_phys_addr_t physaddr
;
84 /* test if there is match for unaligned or IO access */
85 /* XXX: could done more in memory macro in a non portable way */
86 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
88 tlb_addr
= env
->tlb_table
[is_user
][index
].ADDR_READ
;
89 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
90 physaddr
= addr
+ env
->tlb_table
[is_user
][index
].addend
;
91 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
93 if ((addr
& (DATA_SIZE
- 1)) != 0)
94 goto do_unaligned_access
;
95 res
= glue(io_read
, SUFFIX
)(physaddr
, tlb_addr
);
96 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
97 /* slow unaligned access (it spans two pages or IO) */
100 res
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr
,
103 /* unaligned access in the same page */
104 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)(long)physaddr
);
107 /* the page is not in the TLB : fill it */
109 tlb_fill(addr
, READ_ACCESS_TYPE
, is_user
, retaddr
);
115 /* handle all unaligned cases */
116 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
120 DATA_TYPE res
, res1
, res2
;
122 target_phys_addr_t physaddr
;
123 target_ulong tlb_addr
, addr1
, addr2
;
125 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
127 tlb_addr
= env
->tlb_table
[is_user
][index
].ADDR_READ
;
128 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
129 physaddr
= addr
+ env
->tlb_table
[is_user
][index
].addend
;
130 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
132 if ((addr
& (DATA_SIZE
- 1)) != 0)
133 goto do_unaligned_access
;
134 res
= glue(io_read
, SUFFIX
)(physaddr
, tlb_addr
);
135 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
137 /* slow unaligned access (it spans two pages) */
138 addr1
= addr
& ~(DATA_SIZE
- 1);
139 addr2
= addr1
+ DATA_SIZE
;
140 res1
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr1
,
142 res2
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr2
,
144 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
145 #ifdef TARGET_WORDS_BIGENDIAN
146 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
148 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
150 res
= (DATA_TYPE
)res
;
152 /* unaligned/aligned access in the same page */
153 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)(long)physaddr
);
156 /* the page is not in the TLB : fill it */
157 tlb_fill(addr
, READ_ACCESS_TYPE
, is_user
, retaddr
);
163 #ifndef SOFTMMU_CODE_ACCESS
165 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
170 static inline void glue(io_write
, SUFFIX
)(target_phys_addr_t physaddr
,
172 target_ulong tlb_addr
,
177 index
= (tlb_addr
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
178 env
->mem_write_vaddr
= tlb_addr
;
179 env
->mem_write_pc
= (unsigned long)retaddr
;
181 io_mem_write
[index
][SHIFT
](io_mem_opaque
[index
], physaddr
, val
);
183 #ifdef TARGET_WORDS_BIGENDIAN
184 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
, val
>> 32);
185 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
+ 4, val
);
187 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
, val
);
188 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
+ 4, val
>> 32);
190 #endif /* SHIFT > 2 */
193 void REGPARM(2) glue(glue(__st
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
197 target_phys_addr_t physaddr
;
198 target_ulong tlb_addr
;
202 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
204 tlb_addr
= env
->tlb_table
[is_user
][index
].addr_write
;
205 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
206 physaddr
= addr
+ env
->tlb_table
[is_user
][index
].addend
;
207 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
209 if ((addr
& (DATA_SIZE
- 1)) != 0)
210 goto do_unaligned_access
;
212 glue(io_write
, SUFFIX
)(physaddr
, val
, tlb_addr
, retaddr
);
213 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
216 glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(addr
, val
,
219 /* aligned/unaligned access in the same page */
220 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)(long)physaddr
, val
);
223 /* the page is not in the TLB : fill it */
225 tlb_fill(addr
, 1, is_user
, retaddr
);
230 /* handles all unaligned cases */
231 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
236 target_phys_addr_t physaddr
;
237 target_ulong tlb_addr
;
240 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
242 tlb_addr
= env
->tlb_table
[is_user
][index
].addr_write
;
243 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
244 physaddr
= addr
+ env
->tlb_table
[is_user
][index
].addend
;
245 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
247 if ((addr
& (DATA_SIZE
- 1)) != 0)
248 goto do_unaligned_access
;
249 glue(io_write
, SUFFIX
)(physaddr
, val
, tlb_addr
, retaddr
);
250 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
252 /* XXX: not efficient, but simple */
253 for(i
= 0;i
< DATA_SIZE
; i
++) {
254 #ifdef TARGET_WORDS_BIGENDIAN
255 glue(slow_stb
, MMUSUFFIX
)(addr
+ i
, val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8)),
258 glue(slow_stb
, MMUSUFFIX
)(addr
+ i
, val
>> (i
* 8),
263 /* aligned/unaligned access in the same page */
264 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)(long)physaddr
, val
);
267 /* the page is not in the TLB : fill it */
268 tlb_fill(addr
, 1, is_user
, retaddr
);
273 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
275 #undef READ_ACCESS_TYPE