]> git.proxmox.com Git - mirror_qemu.git/blame - target/ppc/mem_helper.c
Merge remote-tracking branch 'remotes/thuth/tags/pull-request-2021-10-15' into staging
[mirror_qemu.git] / target / ppc / mem_helper.c
CommitLineData
9a64fbe4 1/*
2f5a189c 2 * PowerPC memory access emulation helpers for QEMU.
5fafdf24 3 *
76a66253 4 * Copyright (c) 2003-2007 Jocelyn Mayer
9a64fbe4
FB
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
6bd039cd 9 * version 2.1 of the License, or (at your option) any later version.
9a64fbe4
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
9a64fbe4 18 */
db725815 19
0d75590d 20#include "qemu/osdep.h"
3e457172 21#include "cpu.h"
63c91552 22#include "exec/exec-all.h"
1de7afc9 23#include "qemu/host-utils.h"
db725815 24#include "qemu/main-loop.h"
2ef6175a 25#include "exec/helper-proto.h"
0411a972 26#include "helper_regs.h"
f08b6170 27#include "exec/cpu_ldst.h"
6914bc4f 28#include "internal.h"
f34ec0f6 29#include "qemu/atomic128.h"
3e457172 30
5a2c8b9e 31/* #define DEBUG_OP */
d12d51d5 32
e22c357b
DK
33static inline bool needs_byteswap(const CPUPPCState *env)
34{
35#if defined(TARGET_WORDS_BIGENDIAN)
36 return msr_le;
37#else
38 return !msr_le;
39#endif
40}
41
ff4a62cd
AJ
42/*****************************************************************************/
43/* Memory load and stores */
44
2f5a189c
BS
45static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr,
46 target_long arg)
ff4a62cd
AJ
47{
48#if defined(TARGET_PPC64)
e42a61f1 49 if (!msr_is_64bit(env, env->msr)) {
b327c654
BS
50 return (uint32_t)(addr + arg);
51 } else
ff4a62cd 52#endif
b327c654
BS
53 {
54 return addr + arg;
55 }
ff4a62cd
AJ
56}
57
bb99b391
RH
58static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb,
59 MMUAccessType access_type, int mmu_idx,
60 uintptr_t raddr)
61{
62 void *host1, *host2;
63 uint32_t nb_pg1, nb_pg2;
64
65 nb_pg1 = -(addr | TARGET_PAGE_MASK);
66 if (likely(nb <= nb_pg1)) {
67 /* The entire operation is on a single page. */
68 return probe_access(env, addr, nb, access_type, mmu_idx, raddr);
69 }
70
71 /* The operation spans two pages. */
72 nb_pg2 = nb - nb_pg1;
73 host1 = probe_access(env, addr, nb_pg1, access_type, mmu_idx, raddr);
74 addr = addr_add(env, addr, nb_pg1);
75 host2 = probe_access(env, addr, nb_pg2, access_type, mmu_idx, raddr);
76
77 /* If the two host pages are contiguous, optimize. */
78 if (host2 == host1 + nb_pg1) {
79 return host1;
80 }
81 return NULL;
82}
83
2f5a189c 84void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
ff4a62cd 85{
2ca2ef49
RH
86 uintptr_t raddr = GETPC();
87 int mmu_idx = cpu_mmu_index(env, false);
88 void *host = probe_contiguous(env, addr, (32 - reg) * 4,
89 MMU_DATA_LOAD, mmu_idx, raddr);
90
91 if (likely(host)) {
92 /* Fast path -- the entire operation is in RAM at host. */
93 for (; reg < 32; reg++) {
94 env->gpr[reg] = (uint32_t)ldl_be_p(host);
95 host += 4;
96 }
97 } else {
98 /* Slow path -- at least some of the operation requires i/o. */
99 for (; reg < 32; reg++) {
100 env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr);
101 addr = addr_add(env, addr, 4);
b327c654 102 }
ff4a62cd
AJ
103 }
104}
105
2f5a189c 106void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
ff4a62cd 107{
2ca2ef49
RH
108 uintptr_t raddr = GETPC();
109 int mmu_idx = cpu_mmu_index(env, false);
110 void *host = probe_contiguous(env, addr, (32 - reg) * 4,
111 MMU_DATA_STORE, mmu_idx, raddr);
112
113 if (likely(host)) {
114 /* Fast path -- the entire operation is in RAM at host. */
115 for (; reg < 32; reg++) {
116 stl_be_p(host, env->gpr[reg]);
117 host += 4;
118 }
119 } else {
120 /* Slow path -- at least some of the operation requires i/o. */
121 for (; reg < 32; reg++) {
122 cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr);
123 addr = addr_add(env, addr, 4);
b327c654 124 }
ff4a62cd
AJ
125 }
126}
127
e41029b3
BH
128static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
129 uint32_t reg, uintptr_t raddr)
dfbc799d 130{
bb99b391
RH
131 int mmu_idx;
132 void *host;
133 uint32_t val;
b327c654 134
bb99b391
RH
135 if (unlikely(nb == 0)) {
136 return;
dfbc799d 137 }
bb99b391
RH
138
139 mmu_idx = cpu_mmu_index(env, false);
140 host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr);
141
142 if (likely(host)) {
143 /* Fast path -- the entire operation is in RAM at host. */
144 for (; nb > 3; nb -= 4) {
145 env->gpr[reg] = (uint32_t)ldl_be_p(host);
146 reg = (reg + 1) % 32;
147 host += 4;
148 }
149 switch (nb) {
150 default:
151 return;
152 case 1:
153 val = ldub_p(host) << 24;
154 break;
155 case 2:
156 val = lduw_be_p(host) << 16;
157 break;
158 case 3:
159 val = (lduw_be_p(host) << 16) | (ldub_p(host + 2) << 8);
160 break;
161 }
162 } else {
163 /* Slow path -- at least some of the operation requires i/o. */
164 for (; nb > 3; nb -= 4) {
165 env->gpr[reg] = cpu_ldl_mmuidx_ra(env, addr, mmu_idx, raddr);
166 reg = (reg + 1) % 32;
167 addr = addr_add(env, addr, 4);
168 }
169 switch (nb) {
170 default:
171 return;
172 case 1:
173 val = cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 24;
174 break;
175 case 2:
176 val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16;
177 break;
178 case 3:
179 val = cpu_lduw_mmuidx_ra(env, addr, mmu_idx, raddr) << 16;
180 addr = addr_add(env, addr, 2);
181 val |= cpu_ldub_mmuidx_ra(env, addr, mmu_idx, raddr) << 8;
182 break;
dfbc799d
AJ
183 }
184 }
bb99b391 185 env->gpr[reg] = val;
dfbc799d 186}
e41029b3 187
bb99b391
RH
188void helper_lsw(CPUPPCState *env, target_ulong addr,
189 uint32_t nb, uint32_t reg)
e41029b3
BH
190{
191 do_lsw(env, addr, nb, reg, GETPC());
192}
193
5a2c8b9e
DG
194/*
195 * PPC32 specification says we must generate an exception if rA is in
196 * the range of registers to be loaded. In an other hand, IBM says
197 * this is valid, but rA won't be loaded. For now, I'll follow the
198 * spec...
dfbc799d 199 */
2f5a189c
BS
200void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
201 uint32_t ra, uint32_t rb)
dfbc799d
AJ
202{
203 if (likely(xer_bc != 0)) {
f0704d78 204 int num_used_regs = DIV_ROUND_UP(xer_bc, 4);
537d3e8e
TH
205 if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) ||
206 lsw_reg_in_range(reg, num_used_regs, rb))) {
e41029b3
BH
207 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
208 POWERPC_EXCP_INVAL |
209 POWERPC_EXCP_INVAL_LSWX, GETPC());
dfbc799d 210 } else {
e41029b3 211 do_lsw(env, addr, xer_bc, reg, GETPC());
dfbc799d
AJ
212 }
213 }
214}
215
2f5a189c
BS
216void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
217 uint32_t reg)
dfbc799d 218{
bb99b391
RH
219 uintptr_t raddr = GETPC();
220 int mmu_idx;
221 void *host;
222 uint32_t val;
b327c654 223
bb99b391
RH
224 if (unlikely(nb == 0)) {
225 return;
dfbc799d 226 }
bb99b391
RH
227
228 mmu_idx = cpu_mmu_index(env, false);
229 host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr);
230
231 if (likely(host)) {
232 /* Fast path -- the entire operation is in RAM at host. */
233 for (; nb > 3; nb -= 4) {
234 stl_be_p(host, env->gpr[reg]);
235 reg = (reg + 1) % 32;
236 host += 4;
237 }
238 val = env->gpr[reg];
239 switch (nb) {
240 case 1:
241 stb_p(host, val >> 24);
242 break;
243 case 2:
244 stw_be_p(host, val >> 16);
245 break;
246 case 3:
247 stw_be_p(host, val >> 16);
248 stb_p(host + 2, val >> 8);
249 break;
250 }
251 } else {
252 for (; nb > 3; nb -= 4) {
253 cpu_stl_mmuidx_ra(env, addr, env->gpr[reg], mmu_idx, raddr);
254 reg = (reg + 1) % 32;
255 addr = addr_add(env, addr, 4);
256 }
257 val = env->gpr[reg];
258 switch (nb) {
259 case 1:
260 cpu_stb_mmuidx_ra(env, addr, val >> 24, mmu_idx, raddr);
261 break;
262 case 2:
263 cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr);
264 break;
265 case 3:
266 cpu_stw_mmuidx_ra(env, addr, val >> 16, mmu_idx, raddr);
267 addr = addr_add(env, addr, 2);
268 cpu_stb_mmuidx_ra(env, addr, val >> 8, mmu_idx, raddr);
269 break;
a16b45e7 270 }
dfbc799d
AJ
271 }
272}
273
50728199
RK
274static void dcbz_common(CPUPPCState *env, target_ulong addr,
275 uint32_t opcode, bool epid, uintptr_t retaddr)
799a8c8d 276{
c9f82d01
BH
277 target_ulong mask, dcbz_size = env->dcache_line_size;
278 uint32_t i;
279 void *haddr;
d764184d 280 int mmu_idx = epid ? PPC_TLB_EPID_STORE : cpu_mmu_index(env, false);
799a8c8d 281
414f5d14 282#if defined(TARGET_PPC64)
c9f82d01
BH
283 /* Check for dcbz vs dcbzl on 970 */
284 if (env->excp_model == POWERPC_EXCP_970 &&
285 !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
8e33944f 286 dcbz_size = 32;
b327c654 287 }
8e33944f
AG
288#endif
289
c9f82d01
BH
290 /* Align address */
291 mask = ~(dcbz_size - 1);
292 addr &= mask;
293
294 /* Check reservation */
1cbddf6d 295 if ((env->reserve_addr & mask) == addr) {
c9f82d01
BH
296 env->reserve_addr = (target_ulong)-1ULL;
297 }
8e33944f 298
c9f82d01 299 /* Try fast path translate */
4dcf078f 300 haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr);
c9f82d01
BH
301 if (haddr) {
302 memset(haddr, 0, dcbz_size);
303 } else {
304 /* Slow path */
305 for (i = 0; i < dcbz_size; i += 8) {
5a376e4f 306 cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr);
c9f82d01
BH
307 }
308 }
799a8c8d
AJ
309}
310
50728199
RK
311void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode)
312{
313 dcbz_common(env, addr, opcode, false, GETPC());
314}
315
316void helper_dcbzep(CPUPPCState *env, target_ulong addr, uint32_t opcode)
317{
318 dcbz_common(env, addr, opcode, true, GETPC());
319}
320
2f5a189c 321void helper_icbi(CPUPPCState *env, target_ulong addr)
37d269df 322{
76db3ba4 323 addr &= ~(env->dcache_line_size - 1);
5a2c8b9e
DG
324 /*
325 * Invalidate one cache line :
37d269df
AJ
326 * PowerPC specification says this is to be treated like a load
327 * (not a fetch) by the MMU. To be sure it will be so,
328 * do the load "by hand".
329 */
af6d376e 330 cpu_ldl_data_ra(env, addr, GETPC());
37d269df
AJ
331}
332
50728199
RK
333void helper_icbiep(CPUPPCState *env, target_ulong addr)
334{
335#if !defined(CONFIG_USER_ONLY)
336 /* See comments above */
337 addr &= ~(env->dcache_line_size - 1);
5a376e4f 338 cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC());
50728199
RK
339#endif
340}
341
b327c654 342/* XXX: to be tested */
2f5a189c
BS
343target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
344 uint32_t ra, uint32_t rb)
bdb4b689
AJ
345{
346 int i, c, d;
b327c654 347
bdb4b689
AJ
348 d = 24;
349 for (i = 0; i < xer_bc; i++) {
b00a3b36 350 c = cpu_ldub_data_ra(env, addr, GETPC());
2f5a189c 351 addr = addr_add(env, addr, 1);
bdb4b689
AJ
352 /* ra (if not 0) and rb are never modified */
353 if (likely(reg != rb && (ra == 0 || reg != ra))) {
354 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
355 }
b327c654 356 if (unlikely(c == xer_cmp)) {
bdb4b689 357 break;
b327c654 358 }
bdb4b689
AJ
359 if (likely(d != 0)) {
360 d -= 8;
361 } else {
362 d = 24;
363 reg++;
364 reg = reg & 0x1F;
365 }
366 }
367 return i;
368}
369
f34ec0f6 370#ifdef TARGET_PPC64
94bf2658
RH
371uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
372 uint32_t opidx)
373{
f34ec0f6
RH
374 Int128 ret;
375
376 /* We will have raised EXCP_ATOMIC from the translator. */
377 assert(HAVE_ATOMIC128);
be9568b4 378 ret = cpu_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
94bf2658
RH
379 env->retxh = int128_gethi(ret);
380 return int128_getlo(ret);
381}
382
383uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
384 uint32_t opidx)
385{
f34ec0f6
RH
386 Int128 ret;
387
388 /* We will have raised EXCP_ATOMIC from the translator. */
389 assert(HAVE_ATOMIC128);
be9568b4 390 ret = cpu_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
94bf2658
RH
391 env->retxh = int128_gethi(ret);
392 return int128_getlo(ret);
393}
f89ced5f
RH
394
395void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
396 uint64_t lo, uint64_t hi, uint32_t opidx)
397{
f34ec0f6
RH
398 Int128 val;
399
400 /* We will have raised EXCP_ATOMIC from the translator. */
401 assert(HAVE_ATOMIC128);
402 val = int128_make128(lo, hi);
be9568b4 403 cpu_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
f89ced5f
RH
404}
405
406void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
407 uint64_t lo, uint64_t hi, uint32_t opidx)
408{
f34ec0f6
RH
409 Int128 val;
410
411 /* We will have raised EXCP_ATOMIC from the translator. */
412 assert(HAVE_ATOMIC128);
413 val = int128_make128(lo, hi);
be9568b4 414 cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
f89ced5f 415}
4a9b3c5d
RH
416
417uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
418 uint64_t new_lo, uint64_t new_hi,
419 uint32_t opidx)
420{
421 bool success = false;
422
f34ec0f6
RH
423 /* We will have raised EXCP_ATOMIC from the translator. */
424 assert(HAVE_CMPXCHG128);
425
4a9b3c5d
RH
426 if (likely(addr == env->reserve_addr)) {
427 Int128 oldv, cmpv, newv;
428
429 cmpv = int128_make128(env->reserve_val2, env->reserve_val);
430 newv = int128_make128(new_lo, new_hi);
be9568b4
RH
431 oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
432 opidx, GETPC());
4a9b3c5d
RH
433 success = int128_eq(oldv, cmpv);
434 }
435 env->reserve_addr = -1;
436 return env->so + success * CRF_EQ_BIT;
437}
438
439uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
440 uint64_t new_lo, uint64_t new_hi,
441 uint32_t opidx)
442{
443 bool success = false;
444
f34ec0f6
RH
445 /* We will have raised EXCP_ATOMIC from the translator. */
446 assert(HAVE_CMPXCHG128);
447
4a9b3c5d
RH
448 if (likely(addr == env->reserve_addr)) {
449 Int128 oldv, cmpv, newv;
450
451 cmpv = int128_make128(env->reserve_val2, env->reserve_val);
452 newv = int128_make128(new_lo, new_hi);
be9568b4
RH
453 oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
454 opidx, GETPC());
4a9b3c5d
RH
455 success = int128_eq(oldv, cmpv);
456 }
457 env->reserve_addr = -1;
458 return env->so + success * CRF_EQ_BIT;
459}
94bf2658
RH
460#endif
461
d6a46fe8
AJ
462/*****************************************************************************/
463/* Altivec extension helpers */
e2542fe2 464#if defined(HOST_WORDS_BIGENDIAN)
d6a46fe8
AJ
465#define HI_IDX 0
466#define LO_IDX 1
467#else
468#define HI_IDX 1
469#define LO_IDX 0
470#endif
471
5a2c8b9e
DG
472/*
473 * We use msr_le to determine index ordering in a vector. However,
474 * byteswapping is not simply controlled by msr_le. We also need to
475 * take into account endianness of the target. This is done for the
476 * little-endian PPC64 user-mode target.
477 */
e22c357b 478
cbfb6ae9 479#define LVE(name, access, swap, element) \
2f5a189c
BS
480 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
481 target_ulong addr) \
cbfb6ae9
AJ
482 { \
483 size_t n_elems = ARRAY_SIZE(r->element); \
5a2c8b9e 484 int adjust = HI_IDX * (n_elems - 1); \
cbfb6ae9
AJ
485 int sh = sizeof(r->element[0]) >> 1; \
486 int index = (addr & 0xf) >> sh; \
b327c654 487 if (msr_le) { \
bbfb6f13 488 index = n_elems - index - 1; \
e22c357b
DK
489 } \
490 \
491 if (needs_byteswap(env)) { \
b327c654 492 r->element[LO_IDX ? index : (adjust - index)] = \
bcd510b1 493 swap(access(env, addr, GETPC())); \
b327c654
BS
494 } else { \
495 r->element[LO_IDX ? index : (adjust - index)] = \
bcd510b1 496 access(env, addr, GETPC()); \
b327c654 497 } \
cbfb6ae9
AJ
498 }
499#define I(x) (x)
bcd510b1
BH
500LVE(lvebx, cpu_ldub_data_ra, I, u8)
501LVE(lvehx, cpu_lduw_data_ra, bswap16, u16)
502LVE(lvewx, cpu_ldl_data_ra, bswap32, u32)
cbfb6ae9
AJ
503#undef I
504#undef LVE
505
b327c654 506#define STVE(name, access, swap, element) \
2f5a189c
BS
507 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
508 target_ulong addr) \
b327c654
BS
509 { \
510 size_t n_elems = ARRAY_SIZE(r->element); \
511 int adjust = HI_IDX * (n_elems - 1); \
512 int sh = sizeof(r->element[0]) >> 1; \
513 int index = (addr & 0xf) >> sh; \
b327c654 514 if (msr_le) { \
bbfb6f13 515 index = n_elems - index - 1; \
e22c357b
DK
516 } \
517 \
518 if (needs_byteswap(env)) { \
2f5a189c 519 access(env, addr, swap(r->element[LO_IDX ? index : \
bcd510b1
BH
520 (adjust - index)]), \
521 GETPC()); \
cbfb6ae9 522 } else { \
2f5a189c 523 access(env, addr, r->element[LO_IDX ? index : \
bcd510b1 524 (adjust - index)], GETPC()); \
cbfb6ae9
AJ
525 } \
526 }
527#define I(x) (x)
bcd510b1
BH
528STVE(stvebx, cpu_stb_data_ra, I, u8)
529STVE(stvehx, cpu_stw_data_ra, bswap16, u16)
530STVE(stvewx, cpu_stl_data_ra, bswap32, u32)
cbfb6ae9
AJ
531#undef I
532#undef LVE
533
6914bc4f
ND
534#ifdef TARGET_PPC64
535#define GET_NB(rb) ((rb >> 56) & 0xFF)
536
537#define VSX_LXVL(name, lj) \
538void helper_##name(CPUPPCState *env, target_ulong addr, \
2aba168e 539 ppc_vsr_t *xt, target_ulong rb) \
6914bc4f 540{ \
2a175830 541 ppc_vsr_t t; \
6914bc4f 542 uint64_t nb = GET_NB(rb); \
2a175830 543 int i; \
6914bc4f 544 \
2a175830 545 t.s128 = int128_zero(); \
6914bc4f
ND
546 if (nb) { \
547 nb = (nb >= 16) ? 16 : nb; \
548 if (msr_le && !lj) { \
549 for (i = 16; i > 16 - nb; i--) { \
2a175830 550 t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
6914bc4f
ND
551 addr = addr_add(env, addr, 1); \
552 } \
553 } else { \
554 for (i = 0; i < nb; i++) { \
2a175830 555 t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
6914bc4f
ND
556 addr = addr_add(env, addr, 1); \
557 } \
558 } \
559 } \
2a175830 560 *xt = t; \
6914bc4f
ND
561}
562
563VSX_LXVL(lxvl, 0)
176e44e7 564VSX_LXVL(lxvll, 1)
6914bc4f 565#undef VSX_LXVL
681c2478
ND
566
567#define VSX_STXVL(name, lj) \
568void helper_##name(CPUPPCState *env, target_ulong addr, \
2aba168e 569 ppc_vsr_t *xt, target_ulong rb) \
681c2478 570{ \
681c2478 571 target_ulong nb = GET_NB(rb); \
2a175830 572 int i; \
681c2478
ND
573 \
574 if (!nb) { \
575 return; \
576 } \
2a175830 577 \
681c2478
ND
578 nb = (nb >= 16) ? 16 : nb; \
579 if (msr_le && !lj) { \
580 for (i = 16; i > 16 - nb; i--) { \
2a175830 581 cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
681c2478
ND
582 addr = addr_add(env, addr, 1); \
583 } \
584 } else { \
585 for (i = 0; i < nb; i++) { \
2a175830 586 cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \
681c2478
ND
587 addr = addr_add(env, addr, 1); \
588 } \
589 } \
590}
591
592VSX_STXVL(stxvl, 0)
e122090d 593VSX_STXVL(stxvll, 1)
681c2478 594#undef VSX_STXVL
6914bc4f
ND
595#undef GET_NB
596#endif /* TARGET_PPC64 */
597
d6a46fe8
AJ
598#undef HI_IDX
599#undef LO_IDX
0ff93d11
TM
600
601void helper_tbegin(CPUPPCState *env)
602{
5a2c8b9e
DG
603 /*
604 * As a degenerate implementation, always fail tbegin. The reason
0ff93d11
TM
605 * given is "Nesting overflow". The "persistent" bit is set,
606 * providing a hint to the error handler to not retry. The TFIAR
607 * captures the address of the failure, which is this tbegin
5a2c8b9e
DG
608 * instruction. Instruction execution will continue with the next
609 * instruction in memory, which is precisely what we want.
0ff93d11
TM
610 */
611
612 env->spr[SPR_TEXASR] =
613 (1ULL << TEXASR_FAILURE_PERSISTENT) |
614 (1ULL << TEXASR_NESTING_OVERFLOW) |
615 (msr_hv << TEXASR_PRIVILEGE_HV) |
616 (msr_pr << TEXASR_PRIVILEGE_PR) |
617 (1ULL << TEXASR_FAILURE_SUMMARY) |
618 (1ULL << TEXASR_TFIAR_EXACT);
619 env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr;
620 env->spr[SPR_TFHAR] = env->nip + 4;
621 env->crf[0] = 0xB; /* 0b1010 = transaction failure */
622}