]> git.proxmox.com Git - mirror_qemu.git/blame - target-ppc/mem_helper.c
ppc: Clean up includes
[mirror_qemu.git] / target-ppc / mem_helper.c
CommitLineData
9a64fbe4 1/*
2f5a189c 2 * PowerPC memory access emulation helpers for QEMU.
5fafdf24 3 *
76a66253 4 * Copyright (c) 2003-2007 Jocelyn Mayer
9a64fbe4
FB
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
9a64fbe4 18 */
0d75590d 19#include "qemu/osdep.h"
3e457172 20#include "cpu.h"
1de7afc9 21#include "qemu/host-utils.h"
2ef6175a 22#include "exec/helper-proto.h"
9a64fbe4 23
0411a972 24#include "helper_regs.h"
f08b6170 25#include "exec/cpu_ldst.h"
3e457172 26
fdabc366 27//#define DEBUG_OP
d12d51d5 28
e22c357b
DK
29static inline bool needs_byteswap(const CPUPPCState *env)
30{
31#if defined(TARGET_WORDS_BIGENDIAN)
32 return msr_le;
33#else
34 return !msr_le;
35#endif
36}
37
ff4a62cd
AJ
38/*****************************************************************************/
39/* Memory load and stores */
40
2f5a189c
BS
41static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr,
42 target_long arg)
ff4a62cd
AJ
43{
44#if defined(TARGET_PPC64)
e42a61f1 45 if (!msr_is_64bit(env, env->msr)) {
b327c654
BS
46 return (uint32_t)(addr + arg);
47 } else
ff4a62cd 48#endif
b327c654
BS
49 {
50 return addr + arg;
51 }
ff4a62cd
AJ
52}
53
2f5a189c 54void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
ff4a62cd 55{
76db3ba4 56 for (; reg < 32; reg++) {
e22c357b 57 if (needs_byteswap(env)) {
2f5a189c 58 env->gpr[reg] = bswap32(cpu_ldl_data(env, addr));
b327c654 59 } else {
2f5a189c 60 env->gpr[reg] = cpu_ldl_data(env, addr);
b327c654 61 }
2f5a189c 62 addr = addr_add(env, addr, 4);
ff4a62cd
AJ
63 }
64}
65
2f5a189c 66void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
ff4a62cd 67{
76db3ba4 68 for (; reg < 32; reg++) {
e22c357b 69 if (needs_byteswap(env)) {
2f5a189c 70 cpu_stl_data(env, addr, bswap32((uint32_t)env->gpr[reg]));
b327c654 71 } else {
2f5a189c 72 cpu_stl_data(env, addr, (uint32_t)env->gpr[reg]);
b327c654 73 }
2f5a189c 74 addr = addr_add(env, addr, 4);
ff4a62cd
AJ
75 }
76}
77
2f5a189c 78void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
dfbc799d
AJ
79{
80 int sh;
b327c654 81
76db3ba4 82 for (; nb > 3; nb -= 4) {
2f5a189c 83 env->gpr[reg] = cpu_ldl_data(env, addr);
dfbc799d 84 reg = (reg + 1) % 32;
2f5a189c 85 addr = addr_add(env, addr, 4);
dfbc799d
AJ
86 }
87 if (unlikely(nb > 0)) {
88 env->gpr[reg] = 0;
76db3ba4 89 for (sh = 24; nb > 0; nb--, sh -= 8) {
2f5a189c
BS
90 env->gpr[reg] |= cpu_ldub_data(env, addr) << sh;
91 addr = addr_add(env, addr, 1);
dfbc799d
AJ
92 }
93 }
94}
95/* PPC32 specification says we must generate an exception if
96 * rA is in the range of registers to be loaded.
97 * In an other hand, IBM says this is valid, but rA won't be loaded.
98 * For now, I'll follow the spec...
99 */
2f5a189c
BS
100void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
101 uint32_t ra, uint32_t rb)
dfbc799d
AJ
102{
103 if (likely(xer_bc != 0)) {
488661ee
AG
104 int num_used_regs = (xer_bc + 3) / 4;
105 if (unlikely((ra != 0 && reg < ra && (reg + num_used_regs) > ra) ||
106 (reg < rb && (reg + num_used_regs) > rb))) {
e5f17ac6 107 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
e06fcd75
AJ
108 POWERPC_EXCP_INVAL |
109 POWERPC_EXCP_INVAL_LSWX);
dfbc799d 110 } else {
2f5a189c 111 helper_lsw(env, addr, xer_bc, reg);
dfbc799d
AJ
112 }
113 }
114}
115
2f5a189c
BS
116void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
117 uint32_t reg)
dfbc799d
AJ
118{
119 int sh;
b327c654 120
76db3ba4 121 for (; nb > 3; nb -= 4) {
2f5a189c 122 cpu_stl_data(env, addr, env->gpr[reg]);
dfbc799d 123 reg = (reg + 1) % 32;
2f5a189c 124 addr = addr_add(env, addr, 4);
dfbc799d
AJ
125 }
126 if (unlikely(nb > 0)) {
a16b45e7 127 for (sh = 24; nb > 0; nb--, sh -= 8) {
2f5a189c
BS
128 cpu_stb_data(env, addr, (env->gpr[reg] >> sh) & 0xFF);
129 addr = addr_add(env, addr, 1);
a16b45e7 130 }
dfbc799d
AJ
131 }
132}
133
2f5a189c 134static void do_dcbz(CPUPPCState *env, target_ulong addr, int dcache_line_size)
799a8c8d 135{
799a8c8d 136 int i;
b327c654
BS
137
138 addr &= ~(dcache_line_size - 1);
139 for (i = 0; i < dcache_line_size; i += 4) {
2f5a189c 140 cpu_stl_data(env, addr + i, 0);
799a8c8d 141 }
b327c654 142 if (env->reserve_addr == addr) {
18b21a2f 143 env->reserve_addr = (target_ulong)-1ULL;
b327c654 144 }
799a8c8d
AJ
145}
146
8e33944f 147void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t is_dcbzl)
799a8c8d 148{
8e33944f 149 int dcbz_size = env->dcache_line_size;
799a8c8d 150
414f5d14 151#if defined(TARGET_PPC64)
8e33944f
AG
152 if (!is_dcbzl &&
153 (env->excp_model == POWERPC_EXCP_970) &&
154 ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
155 dcbz_size = 32;
b327c654 156 }
8e33944f
AG
157#endif
158
159 /* XXX add e500mc support */
160
161 do_dcbz(env, addr, dcbz_size);
799a8c8d
AJ
162}
163
2f5a189c 164void helper_icbi(CPUPPCState *env, target_ulong addr)
37d269df 165{
76db3ba4 166 addr &= ~(env->dcache_line_size - 1);
37d269df
AJ
167 /* Invalidate one cache line :
168 * PowerPC specification says this is to be treated like a load
169 * (not a fetch) by the MMU. To be sure it will be so,
170 * do the load "by hand".
171 */
2f5a189c 172 cpu_ldl_data(env, addr);
37d269df
AJ
173}
174
b327c654 175/* XXX: to be tested */
2f5a189c
BS
176target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
177 uint32_t ra, uint32_t rb)
bdb4b689
AJ
178{
179 int i, c, d;
b327c654 180
bdb4b689
AJ
181 d = 24;
182 for (i = 0; i < xer_bc; i++) {
2f5a189c
BS
183 c = cpu_ldub_data(env, addr);
184 addr = addr_add(env, addr, 1);
bdb4b689
AJ
185 /* ra (if not 0) and rb are never modified */
186 if (likely(reg != rb && (ra == 0 || reg != ra))) {
187 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
188 }
b327c654 189 if (unlikely(c == xer_cmp)) {
bdb4b689 190 break;
b327c654 191 }
bdb4b689
AJ
192 if (likely(d != 0)) {
193 d -= 8;
194 } else {
195 d = 24;
196 reg++;
197 reg = reg & 0x1F;
198 }
199 }
200 return i;
201}
202
d6a46fe8
AJ
203/*****************************************************************************/
204/* Altivec extension helpers */
e2542fe2 205#if defined(HOST_WORDS_BIGENDIAN)
d6a46fe8
AJ
206#define HI_IDX 0
207#define LO_IDX 1
208#else
209#define HI_IDX 1
210#define LO_IDX 0
211#endif
212
e22c357b
DK
213/* We use msr_le to determine index ordering in a vector. However,
214 byteswapping is not simply controlled by msr_le. We also need to take
215 into account endianness of the target. This is done for the little-endian
216 PPC64 user-mode target. */
217
cbfb6ae9 218#define LVE(name, access, swap, element) \
2f5a189c
BS
219 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
220 target_ulong addr) \
cbfb6ae9
AJ
221 { \
222 size_t n_elems = ARRAY_SIZE(r->element); \
b327c654 223 int adjust = HI_IDX*(n_elems - 1); \
cbfb6ae9
AJ
224 int sh = sizeof(r->element[0]) >> 1; \
225 int index = (addr & 0xf) >> sh; \
b327c654 226 if (msr_le) { \
bbfb6f13 227 index = n_elems - index - 1; \
e22c357b
DK
228 } \
229 \
230 if (needs_byteswap(env)) { \
b327c654 231 r->element[LO_IDX ? index : (adjust - index)] = \
2f5a189c 232 swap(access(env, addr)); \
b327c654
BS
233 } else { \
234 r->element[LO_IDX ? index : (adjust - index)] = \
2f5a189c 235 access(env, addr); \
b327c654 236 } \
cbfb6ae9
AJ
237 }
238#define I(x) (x)
2f5a189c
BS
239LVE(lvebx, cpu_ldub_data, I, u8)
240LVE(lvehx, cpu_lduw_data, bswap16, u16)
241LVE(lvewx, cpu_ldl_data, bswap32, u32)
cbfb6ae9
AJ
242#undef I
243#undef LVE
244
b327c654 245#define STVE(name, access, swap, element) \
2f5a189c
BS
246 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
247 target_ulong addr) \
b327c654
BS
248 { \
249 size_t n_elems = ARRAY_SIZE(r->element); \
250 int adjust = HI_IDX * (n_elems - 1); \
251 int sh = sizeof(r->element[0]) >> 1; \
252 int index = (addr & 0xf) >> sh; \
b327c654 253 if (msr_le) { \
bbfb6f13 254 index = n_elems - index - 1; \
e22c357b
DK
255 } \
256 \
257 if (needs_byteswap(env)) { \
2f5a189c
BS
258 access(env, addr, swap(r->element[LO_IDX ? index : \
259 (adjust - index)])); \
cbfb6ae9 260 } else { \
2f5a189c
BS
261 access(env, addr, r->element[LO_IDX ? index : \
262 (adjust - index)]); \
cbfb6ae9
AJ
263 } \
264 }
265#define I(x) (x)
2f5a189c
BS
266STVE(stvebx, cpu_stb_data, I, u8)
267STVE(stvehx, cpu_stw_data, bswap16, u16)
268STVE(stvewx, cpu_stl_data, bswap32, u32)
cbfb6ae9
AJ
269#undef I
270#undef LVE
271
d6a46fe8
AJ
272#undef HI_IDX
273#undef LO_IDX
0ff93d11
TM
274
275void helper_tbegin(CPUPPCState *env)
276{
277 /* As a degenerate implementation, always fail tbegin. The reason
278 * given is "Nesting overflow". The "persistent" bit is set,
279 * providing a hint to the error handler to not retry. The TFIAR
280 * captures the address of the failure, which is this tbegin
281 * instruction. Instruction execution will continue with the
282 * next instruction in memory, which is precisely what we want.
283 */
284
285 env->spr[SPR_TEXASR] =
286 (1ULL << TEXASR_FAILURE_PERSISTENT) |
287 (1ULL << TEXASR_NESTING_OVERFLOW) |
288 (msr_hv << TEXASR_PRIVILEGE_HV) |
289 (msr_pr << TEXASR_PRIVILEGE_PR) |
290 (1ULL << TEXASR_FAILURE_SUMMARY) |
291 (1ULL << TEXASR_TFIAR_EXACT);
292 env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr;
293 env->spr[SPR_TFHAR] = env->nip + 4;
294 env->crf[0] = 0xB; /* 0b1010 = transaction failure */
295}