]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/tcg/op_helper.c
target/arm: Move feature test functions to their own header
[mirror_qemu.git] / target / arm / tcg / op_helper.c
CommitLineData
b7bcbe95
FB
1/*
2 * ARM helper routines
5fafdf24 3 *
9ee6e8bb 4 * Copyright (c) 2005-2007 CodeSourcery, LLC
b7bcbe95
FB
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
50f57e09 9 * version 2.1 of the License, or (at your option) any later version.
b7bcbe95
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
b7bcbe95 18 */
74c21bd0 19#include "qemu/osdep.h"
8d04fb55 20#include "qemu/main-loop.h"
3e457172 21#include "cpu.h"
2ef6175a 22#include "exec/helper-proto.h"
ccd38087 23#include "internals.h"
5a534314 24#include "cpu-features.h"
63c91552 25#include "exec/exec-all.h"
f08b6170 26#include "exec/cpu_ldst.h"
cf7c6d10 27#include "cpregs.h"
b7bcbe95 28
ad69471c
PB
29#define SIGNBIT (uint32_t)0x80000000
30#define SIGNBIT64 ((uint64_t)1 << 63)
31
57287a6e
RH
32int exception_target_el(CPUARMState *env)
33{
34 int target_el = MAX(1, arm_current_el(env));
35
36 /*
37 * No such thing as secure EL1 if EL3 is aarch32,
38 * so update the target EL to EL3 in this case.
39 */
40 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
41 target_el = 3;
42 }
43
44 return target_el;
45}
46
154acaba
JI
47void raise_exception(CPUARMState *env, uint32_t excp,
48 uint32_t syndrome, uint32_t target_el)
b7bcbe95 49{
2fc0cc0e 50 CPUState *cs = env_cpu(env);
27103424 51
7c208e0f 52 if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
7556edfb
PM
53 /*
54 * Redirect NS EL1 exceptions to NS EL2. These are reported with
55 * their original syndrome register value, with the exception of
56 * SIMD/FP access traps, which are reported as uncategorized
57 * (see DDI0478C.a D1.10.4)
58 */
59 target_el = 2;
64b91e3f 60 if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
7556edfb
PM
61 syndrome = syn_uncategorized();
62 }
63 }
64
c6328599
PM
65 assert(!excp_is_internal(excp));
66 cs->exception_index = excp;
67 env->exception.syndrome = syndrome;
68 env->exception.target_el = target_el;
5638d180 69 cpu_loop_exit(cs);
b7bcbe95
FB
70}
71
7469f6c6
RH
72void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
73 uint32_t target_el, uintptr_t ra)
74{
6e0c60a2
JI
75 CPUState *cs = env_cpu(env);
76
77 /*
78 * restore_state_to_opc() will set env->exception.syndrome, so
79 * we must restore CPU state here before setting the syndrome
80 * the caller passed us, and cannot use cpu_loop_exit_restore().
81 */
3d419a4d 82 cpu_restore_state(cs, ra);
6e0c60a2 83 raise_exception(env, excp, syndrome, target_el);
7469f6c6
RH
84}
85
604cef3e
RH
86uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
87 uint64_t ireg, uint64_t def)
9ee6e8bb 88{
604cef3e
RH
89 uint64_t tmp, val = 0;
90 uint32_t maxindex = ((desc & 3) + 1) * 8;
91 uint32_t base_reg = desc >> 2;
92 uint32_t shift, index, reg;
e7c06c4e 93
604cef3e
RH
94 for (shift = 0; shift < 64; shift += 8) {
95 index = (ireg >> shift) & 0xff;
8f8e3aa4 96 if (index < maxindex) {
604cef3e
RH
97 reg = base_reg + (index >> 3);
98 tmp = *aa32_vfp_dreg(env, reg);
99 tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
9ee6e8bb 100 } else {
604cef3e 101 tmp = def & (0xffull << shift);
9ee6e8bb 102 }
604cef3e 103 val |= tmp;
9ee6e8bb 104 }
8f8e3aa4 105 return val;
9ee6e8bb
PB
106}
107
55203189
PM
108void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
109{
110 /*
111 * Perform the v8M stack limit check for SP updates from translated code,
112 * raising an exception if the limit is breached.
113 */
114 if (newvalue < v7m_sp_limit(env)) {
55203189
PM
115 /*
116 * Stack limit exceptions are a rare case, so rather than syncing
9d75d45c
JI
117 * PC/condbits before the call, we use raise_exception_ra() so
118 * that cpu_restore_state() will sort them out.
55203189 119 */
9d75d45c 120 raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
55203189
PM
121 }
122}
123
9ef39277 124uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
125{
126 uint32_t res = a + b;
127 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
128 env->QF = 1;
129 return res;
130}
131
9ef39277 132uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
133{
134 uint32_t res = a + b;
135 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
136 env->QF = 1;
137 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
138 }
139 return res;
140}
141
9ef39277 142uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
143{
144 uint32_t res = a - b;
145 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
146 env->QF = 1;
147 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
148 }
149 return res;
150}
151
9ef39277 152uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
153{
154 uint32_t res = a + b;
155 if (res < a) {
156 env->QF = 1;
157 res = ~0;
158 }
159 return res;
160}
161
9ef39277 162uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
163{
164 uint32_t res = a - b;
165 if (res > a) {
166 env->QF = 1;
167 res = 0;
168 }
169 return res;
170}
171
6ddbc6e4 172/* Signed saturation. */
9ef39277 173static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
6ddbc6e4
PB
174{
175 int32_t top;
176 uint32_t mask;
177
6ddbc6e4
PB
178 top = val >> shift;
179 mask = (1u << shift) - 1;
180 if (top > 0) {
181 env->QF = 1;
182 return mask;
183 } else if (top < -1) {
184 env->QF = 1;
185 return ~mask;
186 }
187 return val;
188}
189
190/* Unsigned saturation. */
9ef39277 191static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
6ddbc6e4
PB
192{
193 uint32_t max;
194
6ddbc6e4
PB
195 max = (1u << shift) - 1;
196 if (val < 0) {
197 env->QF = 1;
198 return 0;
199 } else if (val > max) {
200 env->QF = 1;
201 return max;
202 }
203 return val;
204}
205
206/* Signed saturate. */
9ef39277 207uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4 208{
9ef39277 209 return do_ssat(env, x, shift);
6ddbc6e4
PB
210}
211
212/* Dual halfword signed saturate. */
9ef39277 213uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4
PB
214{
215 uint32_t res;
216
9ef39277
BS
217 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
218 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
6ddbc6e4
PB
219 return res;
220}
221
222/* Unsigned saturate. */
9ef39277 223uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4 224{
9ef39277 225 return do_usat(env, x, shift);
6ddbc6e4
PB
226}
227
228/* Dual halfword unsigned saturate. */
9ef39277 229uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4
PB
230{
231 uint32_t res;
232
9ef39277
BS
233 res = (uint16_t)do_usat(env, (int16_t)x, shift);
234 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
6ddbc6e4
PB
235 return res;
236}
d9ba4830 237
9886ecdf
PB
238void HELPER(setend)(CPUARMState *env)
239{
240 env->uncached_cpsr ^= CPSR_E;
7b2625eb 241 arm_rebuild_hflags(env);
9886ecdf
PB
242}
243
8e228c9e
PM
244void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm)
245{
246 /*
247 * Only called if in NS EL0 or EL1 for a BXJ for a v7A CPU;
248 * check if HSTR.TJDBX means we need to trap to EL2.
249 */
250 if (env->cp15.hstr_el2 & HSTR_TJDBX) {
251 /*
252 * We know the condition code check passed, so take the IMPDEF
253 * choice to always report CV=1 COND 0xe
254 */
255 uint32_t syn = syn_bxjtrap(1, 0xe, rm);
256 raise_exception_ra(env, EXCP_HYP_TRAP, syn, 2, GETPC());
257 }
258}
259
5b2c8af8 260#ifndef CONFIG_USER_ONLY
b1eced71
GB
261/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
262 * The function returns the target EL (1-3) if the instruction is to be trapped;
263 * otherwise it returns 0 indicating it is not trapped.
264 */
265static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
266{
267 int cur_el = arm_current_el(env);
268 uint64_t mask;
269
0e284568
PM
270 if (arm_feature(env, ARM_FEATURE_M)) {
271 /* M profile cores can never trap WFI/WFE. */
272 return 0;
273 }
274
b1eced71
GB
275 /* If we are currently in EL0 then we need to check if SCTLR is set up for
276 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
277 */
278 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
279 int target_el;
280
281 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
282 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
283 /* Secure EL0 and Secure PL1 is at EL3 */
284 target_el = 3;
285 } else {
286 target_el = 1;
287 }
288
289 if (!(env->cp15.sctlr_el[target_el] & mask)) {
290 return target_el;
291 }
292 }
293
294 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
295 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
296 * bits will be zero indicating no trap.
297 */
7c208e0f
RH
298 if (cur_el < 2) {
299 mask = is_wfe ? HCR_TWE : HCR_TWI;
300 if (arm_hcr_el2_eff(env) & mask) {
b1eced71
GB
301 return 2;
302 }
303 }
304
305 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
306 if (cur_el < 3) {
307 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
308 if (env->cp15.scr_el3 & mask) {
309 return 3;
310 }
311 }
312
313 return 0;
314}
5b2c8af8 315#endif
b1eced71 316
58803318 317void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
d9ba4830 318{
5b2c8af8
PM
319#ifdef CONFIG_USER_ONLY
320 /*
321 * WFI in the user-mode emulator is technically permitted but not
322 * something any real-world code would do. AArch64 Linux kernels
323 * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
324 * AArch32 kernels don't trap it so it will delay a bit.
325 * For QEMU, make it NOP here, because trying to raise EXCP_HLT
326 * would trigger an abort.
327 */
328 return;
329#else
2fc0cc0e 330 CPUState *cs = env_cpu(env);
b1eced71 331 int target_el = check_wfx_trap(env, false);
259186a7 332
84549b6d
PM
333 if (cpu_has_work(cs)) {
334 /* Don't bother to go into our "low power state" if
335 * we would just wake up immediately.
336 */
337 return;
338 }
339
b1eced71 340 if (target_el) {
85553291
JK
341 if (env->aarch64) {
342 env->pc -= insn_len;
343 } else {
344 env->regs[15] -= insn_len;
345 }
346
58803318
SS
347 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
348 target_el);
b1eced71
GB
349 }
350
27103424 351 cs->exception_index = EXCP_HLT;
259186a7 352 cs->halted = 1;
5638d180 353 cpu_loop_exit(cs);
5b2c8af8 354#endif
d9ba4830
PB
355}
356
72c1d3af
PM
357void HELPER(wfe)(CPUARMState *env)
358{
049e24a1
PM
359 /* This is a hint instruction that is semantically different
360 * from YIELD even though we currently implement it identically.
361 * Don't actually halt the CPU, just yield back to top
b1eced71
GB
362 * level loop. This is not going into a "low power state"
363 * (ie halting until some event occurs), so we never take
364 * a configurable trap to a different exception level.
72c1d3af 365 */
049e24a1
PM
366 HELPER(yield)(env);
367}
368
369void HELPER(yield)(CPUARMState *env)
370{
2fc0cc0e 371 CPUState *cs = env_cpu(env);
049e24a1
PM
372
373 /* This is a non-trappable hint instruction that generally indicates
374 * that the guest is currently busy-looping. Yield control back to the
375 * top level loop so that a more deserving VCPU has a chance to run.
376 */
27103424 377 cs->exception_index = EXCP_YIELD;
5638d180 378 cpu_loop_exit(cs);
72c1d3af
PM
379}
380
d4a2dc67
PM
381/* Raise an internal-to-QEMU exception. This is limited to only
382 * those EXCP values which are special cases for QEMU to interrupt
383 * execution and not to be used for exceptions which are passed to
384 * the guest (those must all have syndrome information and thus should
d3c5d50a 385 * use exception_with_syndrome*).
d4a2dc67
PM
386 */
387void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
388{
2fc0cc0e 389 CPUState *cs = env_cpu(env);
d4a2dc67
PM
390
391 assert(excp_is_internal(excp));
392 cs->exception_index = excp;
393 cpu_loop_exit(cs);
394}
395
396/* Raise an exception with the specified syndrome register value */
d3c5d50a
RH
397void HELPER(exception_with_syndrome_el)(CPUARMState *env, uint32_t excp,
398 uint32_t syndrome, uint32_t target_el)
d9ba4830 399{
c6328599 400 raise_exception(env, excp, syndrome, target_el);
d9ba4830
PB
401}
402
eeaf5960
RH
403/*
404 * Raise an exception with the specified syndrome register value
405 * to the default target el.
406 */
407void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
408 uint32_t syndrome)
409{
410 raise_exception(env, excp, syndrome, exception_target_el(env));
411}
412
9ef39277 413uint32_t HELPER(cpsr_read)(CPUARMState *env)
d9ba4830 414{
f944a854 415 return cpsr_read(env) & ~CPSR_EXEC;
d9ba4830
PB
416}
417
1ce94f81 418void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
d9ba4830 419{
50866ba5 420 cpsr_write(env, val, mask, CPSRWriteByInstr);
7b2625eb
RH
421 /* TODO: Not all cpsr bits are relevant to hflags. */
422 arm_rebuild_hflags(env);
d9ba4830 423}
b0109805 424
235ea1f5
PM
425/* Write the CPSR for a 32-bit exception return */
426void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
427{
43786421
RH
428 uint32_t mask;
429
b5c53d1b 430 qemu_mutex_lock_iothread();
2fc0cc0e 431 arm_call_pre_el_change_hook(env_archcpu(env));
b5c53d1b
AL
432 qemu_mutex_unlock_iothread();
433
43786421
RH
434 mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
435 cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
bd7d00fc 436
fb0e8e79
PM
437 /* Generated code has already stored the new PC value, but
438 * without masking out its low bits, because which bits need
439 * masking depends on whether we're returning to Thumb or ARM
440 * state. Do the masking now.
441 */
442 env->regs[15] &= (env->thumb ? ~1 : ~3);
a8a79c7a 443 arm_rebuild_hflags(env);
fb0e8e79 444
8d04fb55 445 qemu_mutex_lock_iothread();
2fc0cc0e 446 arm_call_el_change_hook(env_archcpu(env));
8d04fb55 447 qemu_mutex_unlock_iothread();
235ea1f5
PM
448}
449
b0109805 450/* Access to user mode registers from privileged modes. */
9ef39277 451uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
b0109805
PB
452{
453 uint32_t val;
454
455 if (regno == 13) {
99a99c1f 456 val = env->banked_r13[BANK_USRSYS];
b0109805 457 } else if (regno == 14) {
99a99c1f 458 val = env->banked_r14[BANK_USRSYS];
b0109805
PB
459 } else if (regno >= 8
460 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
461 val = env->usr_regs[regno - 8];
462 } else {
463 val = env->regs[regno];
464 }
465 return val;
466}
467
1ce94f81 468void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
b0109805
PB
469{
470 if (regno == 13) {
99a99c1f 471 env->banked_r13[BANK_USRSYS] = val;
b0109805 472 } else if (regno == 14) {
99a99c1f 473 env->banked_r14[BANK_USRSYS] = val;
b0109805
PB
474 } else if (regno >= 8
475 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
476 env->usr_regs[regno - 8] = val;
477 } else {
478 env->regs[regno] = val;
479 }
480}
4b6a83fb 481
72309cee
PM
482void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
483{
484 if ((env->uncached_cpsr & CPSR_M) == mode) {
485 env->regs[13] = val;
486 } else {
487 env->banked_r13[bank_number(mode)] = val;
488 }
489}
490
491uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
492{
f01377f5
PM
493 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
494 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
495 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
496 */
497 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
498 exception_target_el(env));
499 }
500
72309cee
PM
501 if ((env->uncached_cpsr & CPSR_M) == mode) {
502 return env->regs[13];
503 } else {
504 return env->banked_r13[bank_number(mode)];
505 }
506}
72309cee 507
8bfd0550
PM
508static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
509 uint32_t regno)
510{
511 /* Raise an exception if the requested access is one of the UNPREDICTABLE
512 * cases; otherwise return. This broadly corresponds to the pseudocode
513 * BankedRegisterAccessValid() and SPSRAccessValid(),
514 * except that we have already handled some cases at translate time.
515 */
516 int curmode = env->uncached_cpsr & CPSR_M;
517
aec4dd09
PM
518 if (regno == 17) {
519 /* ELR_Hyp: a special case because access from tgtmode is OK */
520 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
521 goto undef;
522 }
523 return;
524 }
525
8bfd0550
PM
526 if (curmode == tgtmode) {
527 goto undef;
528 }
529
530 if (tgtmode == ARM_CPU_MODE_USR) {
531 switch (regno) {
532 case 8 ... 12:
533 if (curmode != ARM_CPU_MODE_FIQ) {
534 goto undef;
535 }
536 break;
537 case 13:
538 if (curmode == ARM_CPU_MODE_SYS) {
539 goto undef;
540 }
541 break;
542 case 14:
543 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
544 goto undef;
545 }
546 break;
547 default:
548 break;
549 }
550 }
551
552 if (tgtmode == ARM_CPU_MODE_HYP) {
aec4dd09
PM
553 /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
554 if (curmode != ARM_CPU_MODE_MON) {
555 goto undef;
8bfd0550
PM
556 }
557 }
558
559 return;
560
561undef:
562 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
563 exception_target_el(env));
564}
565
566void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
567 uint32_t regno)
568{
569 msr_mrs_banked_exc_checks(env, tgtmode, regno);
570
571 switch (regno) {
572 case 16: /* SPSRs */
573 env->banked_spsr[bank_number(tgtmode)] = value;
574 break;
575 case 17: /* ELR_Hyp */
576 env->elr_el[2] = value;
577 break;
578 case 13:
579 env->banked_r13[bank_number(tgtmode)] = value;
580 break;
581 case 14:
593cfa2b 582 env->banked_r14[r14_bank_number(tgtmode)] = value;
8bfd0550
PM
583 break;
584 case 8 ... 12:
585 switch (tgtmode) {
586 case ARM_CPU_MODE_USR:
587 env->usr_regs[regno - 8] = value;
588 break;
589 case ARM_CPU_MODE_FIQ:
590 env->fiq_regs[regno - 8] = value;
591 break;
592 default:
593 g_assert_not_reached();
594 }
595 break;
596 default:
597 g_assert_not_reached();
598 }
599}
600
601uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
602{
603 msr_mrs_banked_exc_checks(env, tgtmode, regno);
604
605 switch (regno) {
606 case 16: /* SPSRs */
607 return env->banked_spsr[bank_number(tgtmode)];
608 case 17: /* ELR_Hyp */
609 return env->elr_el[2];
610 case 13:
611 return env->banked_r13[bank_number(tgtmode)];
612 case 14:
593cfa2b 613 return env->banked_r14[r14_bank_number(tgtmode)];
8bfd0550
PM
614 case 8 ... 12:
615 switch (tgtmode) {
616 case ARM_CPU_MODE_USR:
617 return env->usr_regs[regno - 8];
618 case ARM_CPU_MODE_FIQ:
619 return env->fiq_regs[regno - 8];
620 default:
621 g_assert_not_reached();
622 }
623 default:
624 g_assert_not_reached();
625 }
626}
627
3b07a936
RH
628const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
629 uint32_t syndrome, uint32_t isread)
f59df3f2 630{
75662f36 631 ARMCPU *cpu = env_archcpu(env);
3b07a936 632 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
330477ea 633 CPAccessResult res = CP_ACCESS_OK;
38836a2c 634 int target_el;
c0f4af17 635
3b07a936
RH
636 assert(ri != NULL);
637
c0f4af17
PM
638 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
639 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
330477ea
RH
640 res = CP_ACCESS_TRAP;
641 goto fail;
c0f4af17
PM
642 }
643
cccc104b
PM
644 if (ri->accessfn) {
645 res = ri->accessfn(env, ri, isread);
646 }
647
5bb0a20b 648 /*
cccc104b
PM
649 * If the access function indicates a trap from EL0 to EL1 then
650 * that always takes priority over the HSTR_EL2 trap. (If it indicates
651 * a trap to EL3, then the HSTR_EL2 trap takes priority; if it indicates
652 * a trap to EL2, then the syndrome is the same either way so we don't
653 * care whether technically the architecture says that HSTR_EL2 trap or
654 * the other trap takes priority. So we take the "check HSTR_EL2" path
655 * for all of those cases.)
5bb0a20b 656 */
cccc104b
PM
657 if (res != CP_ACCESS_OK && ((res & CP_ACCESS_EL_MASK) == 0) &&
658 arm_current_el(env) == 0) {
659 goto fail;
660 }
661
049edada
PM
662 /*
663 * HSTR_EL2 traps from EL1 are checked earlier, in generated code;
664 * we only need to check here for traps from EL0.
665 */
666 if (!is_a64(env) && arm_current_el(env) == 0 && ri->cp == 15 &&
034bb45a 667 arm_is_el2_enabled(env) &&
5bb0a20b
MZ
668 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
669 uint32_t mask = 1 << ri->crn;
670
671 if (ri->type & ARM_CP_64BIT) {
672 mask = 1 << ri->crm;
673 }
674
675 /* T4 and T14 are RES0 */
676 mask &= ~((1 << 4) | (1 << 14));
677
678 if (env->cp15.hstr_el2 & mask) {
330477ea
RH
679 res = CP_ACCESS_TRAP_EL2;
680 goto fail;
5bb0a20b
MZ
681 }
682 }
683
361c33f6
PM
684 /*
685 * Fine-grained traps also are lower priority than undef-to-EL1,
686 * higher priority than trap-to-EL3, and we don't care about priority
687 * order with other EL2 traps because the syndrome value is the same.
688 */
689 if (arm_fgt_active(env, arm_current_el(env))) {
690 uint64_t trapword = 0;
691 unsigned int idx = FIELD_EX32(ri->fgt, FGT, IDX);
692 unsigned int bitpos = FIELD_EX32(ri->fgt, FGT, BITPOS);
693 bool rev = FIELD_EX32(ri->fgt, FGT, REV);
694 bool trapbit;
695
696 if (ri->fgt & FGT_EXEC) {
697 assert(idx < ARRAY_SIZE(env->cp15.fgt_exec));
698 trapword = env->cp15.fgt_exec[idx];
699 } else if (isread && (ri->fgt & FGT_R)) {
700 assert(idx < ARRAY_SIZE(env->cp15.fgt_read));
701 trapword = env->cp15.fgt_read[idx];
702 } else if (!isread && (ri->fgt & FGT_W)) {
703 assert(idx < ARRAY_SIZE(env->cp15.fgt_write));
704 trapword = env->cp15.fgt_write[idx];
705 }
706
707 trapbit = extract64(trapword, bitpos, 1);
708 if (trapbit != rev) {
709 res = CP_ACCESS_TRAP_EL2;
710 goto fail;
711 }
712 }
713
330477ea 714 if (likely(res == CP_ACCESS_OK)) {
3b07a936 715 return ri;
c0f4af17
PM
716 }
717
330477ea
RH
718 fail:
719 switch (res & ~CP_ACCESS_EL_MASK) {
f59df3f2 720 case CP_ACCESS_TRAP:
8bcbf37c 721 break;
f59df3f2 722 case CP_ACCESS_TRAP_UNCATEGORIZED:
80ea70f2
PM
723 /* Only CP_ACCESS_TRAP traps are direct to a specified EL */
724 assert((res & CP_ACCESS_EL_MASK) == 0);
75662f36
PM
725 if (cpu_isar_feature(aa64_ids, cpu) && isread &&
726 arm_cpreg_in_idspace(ri)) {
727 /*
728 * FEAT_IDST says this should be reported as EC_SYSTEMREGISTERTRAP,
729 * not EC_UNCATEGORIZED
730 */
731 break;
732 }
c6328599 733 syndrome = syn_uncategorized();
f59df3f2 734 break;
330477ea
RH
735 default:
736 g_assert_not_reached();
737 }
738
739 target_el = res & CP_ACCESS_EL_MASK;
740 switch (target_el) {
741 case 0:
742 target_el = exception_target_el(env);
e7615726 743 break;
330477ea
RH
744 case 2:
745 assert(arm_current_el(env) != 3);
746 assert(arm_is_el2_enabled(env));
747 break;
748 case 3:
749 assert(arm_feature(env, ARM_FEATURE_EL3));
e7615726 750 break;
f59df3f2 751 default:
330477ea 752 /* No "direct" traps to EL1 */
f59df3f2
PM
753 g_assert_not_reached();
754 }
c6328599 755
38836a2c 756 raise_exception(env, EXCP_UDEF, syndrome, target_el);
f59df3f2
PM
757}
758
3b07a936
RH
759const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key)
760{
761 ARMCPU *cpu = env_archcpu(env);
762 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
763
764 assert(ri != NULL);
765 return ri;
766}
767
27920d3d
RH
768/*
769 * Test for HCR_EL2.TIDCP at EL1.
770 * Since implementation defined registers are rare, and within QEMU
771 * most of them are no-op, do not waste HFLAGS space for this and
772 * always use a helper.
773 */
774void HELPER(tidcp_el1)(CPUARMState *env, uint32_t syndrome)
775{
776 if (arm_hcr_el2_eff(env) & HCR_TIDCP) {
777 raise_exception_ra(env, EXCP_UDEF, syndrome, 2, GETPC());
778 }
779}
780
9cd0c0de
RH
781/*
782 * Similarly, for FEAT_TIDCP1 at EL0.
783 * We have already checked for the presence of the feature.
784 */
785void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome)
786{
787 /* See arm_sctlr(), but we also need the sctlr el. */
788 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
789 int target_el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
790
791 /*
792 * The bit is not valid unless the target el is aa64, but since the
793 * bit test is simpler perform that first and check validity after.
794 */
795 if ((env->cp15.sctlr_el[target_el] & SCTLR_TIDCP)
796 && arm_el_is_aa64(env, target_el)) {
797 raise_exception_ra(env, EXCP_UDEF, syndrome, target_el, GETPC());
798 }
799}
800
3b07a936 801void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
4b6a83fb
PM
802{
803 const ARMCPRegInfo *ri = rip;
c4241c7d 804
8d04fb55
JK
805 if (ri->type & ARM_CP_IO) {
806 qemu_mutex_lock_iothread();
807 ri->writefn(env, ri, value);
808 qemu_mutex_unlock_iothread();
809 } else {
810 ri->writefn(env, ri, value);
811 }
4b6a83fb
PM
812}
813
3b07a936 814uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip)
4b6a83fb
PM
815{
816 const ARMCPRegInfo *ri = rip;
8d04fb55 817 uint32_t res;
c4241c7d 818
8d04fb55
JK
819 if (ri->type & ARM_CP_IO) {
820 qemu_mutex_lock_iothread();
821 res = ri->readfn(env, ri);
822 qemu_mutex_unlock_iothread();
823 } else {
824 res = ri->readfn(env, ri);
825 }
826
827 return res;
4b6a83fb
PM
828}
829
3b07a936 830void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value)
4b6a83fb
PM
831{
832 const ARMCPRegInfo *ri = rip;
c4241c7d 833
8d04fb55
JK
834 if (ri->type & ARM_CP_IO) {
835 qemu_mutex_lock_iothread();
836 ri->writefn(env, ri, value);
837 qemu_mutex_unlock_iothread();
838 } else {
839 ri->writefn(env, ri, value);
840 }
4b6a83fb
PM
841}
842
3b07a936 843uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip)
4b6a83fb
PM
844{
845 const ARMCPRegInfo *ri = rip;
8d04fb55
JK
846 uint64_t res;
847
848 if (ri->type & ARM_CP_IO) {
849 qemu_mutex_lock_iothread();
850 res = ri->readfn(env, ri);
851 qemu_mutex_unlock_iothread();
852 } else {
853 res = ri->readfn(env, ri);
854 }
c4241c7d 855
8d04fb55 856 return res;
4b6a83fb 857}
b0109805 858
35979d71
EI
859void HELPER(pre_hvc)(CPUARMState *env)
860{
2fc0cc0e 861 ARMCPU *cpu = env_archcpu(env);
dcbff19b 862 int cur_el = arm_current_el(env);
35979d71
EI
863 /* FIXME: Use actual secure state. */
864 bool secure = false;
865 bool undef;
866
98128601
RH
867 if (arm_is_psci_call(cpu, EXCP_HVC)) {
868 /* If PSCI is enabled and this looks like a valid PSCI call then
869 * that overrides the architecturally mandated HVC behaviour.
870 */
871 return;
872 }
873
39404338
PM
874 if (!arm_feature(env, ARM_FEATURE_EL2)) {
875 /* If EL2 doesn't exist, HVC always UNDEFs */
876 undef = true;
877 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
878 /* EL3.HCE has priority over EL2.HCD. */
35979d71
EI
879 undef = !(env->cp15.scr_el3 & SCR_HCE);
880 } else {
881 undef = env->cp15.hcr_el2 & HCR_HCD;
882 }
883
884 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
885 * For ARMv8/AArch64, HVC is allowed in EL3.
886 * Note that we've already trapped HVC from EL0 at translation
887 * time.
888 */
889 if (secure && (!is_a64(env) || cur_el == 1)) {
890 undef = true;
891 }
892
893 if (undef) {
c6328599
PM
894 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
895 exception_target_el(env));
35979d71
EI
896 }
897}
898
e0d6e6a5
EI
899void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
900{
2fc0cc0e 901 ARMCPU *cpu = env_archcpu(env);
dcbff19b 902 int cur_el = arm_current_el(env);
dbe9d163 903 bool secure = arm_is_secure(env);
7760da72
LM
904 bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
905
906 /*
907 * SMC behaviour is summarized in the following table.
908 * This helper handles the "Trap to EL2" and "Undef insn" cases.
909 * The "Trap to EL3" and "PSCI call" cases are handled in the exception
910 * helper.
911 *
912 * -> ARM_FEATURE_EL3 and !SMD
913 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
914 *
915 * Conduit SMC, valid call Trap to EL2 PSCI Call
916 * Conduit SMC, inval call Trap to EL2 Trap to EL3
917 * Conduit not SMC Trap to EL2 Trap to EL3
918 *
919 *
920 * -> ARM_FEATURE_EL3 and SMD
921 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
922 *
923 * Conduit SMC, valid call Trap to EL2 PSCI Call
924 * Conduit SMC, inval call Trap to EL2 Undef insn
925 * Conduit not SMC Trap to EL2 Undef insn
926 *
927 *
928 * -> !ARM_FEATURE_EL3
929 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
930 *
931 * Conduit SMC, valid call Trap to EL2 PSCI Call
932 * Conduit SMC, inval call Trap to EL2 Undef insn
933 * Conduit not SMC Undef insn Undef insn
934 */
935
f096e92b
PM
936 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
937 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
938 * extensions, SMD only applies to NS state.
939 * On ARMv7 without the Virtualization extensions, the SMD bit
940 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
941 * so we need not special case this here.
e0d6e6a5 942 */
7760da72
LM
943 bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
944 : smd_flag && !secure;
e0d6e6a5 945
77077a83
JK
946 if (!arm_feature(env, ARM_FEATURE_EL3) &&
947 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
948 /* If we have no EL3 then SMC always UNDEFs and can't be
949 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
950 * firmware within QEMU, and we want an EL2 guest to be able
951 * to forbid its EL1 from making PSCI calls into QEMU's
952 * "firmware" via HCR.TSC, so for these purposes treat
953 * PSCI-via-SMC as implying an EL3.
7760da72 954 * This handles the very last line of the previous table.
98128601 955 */
7760da72
LM
956 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
957 exception_target_el(env));
958 }
959
7c208e0f 960 if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
77077a83
JK
961 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
962 * We also want an EL2 guest to be able to forbid its EL1 from
963 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
7760da72 964 * This handles all the "Trap to EL2" cases of the previous table.
77077a83 965 */
c6328599 966 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
e0d6e6a5
EI
967 }
968
7760da72
LM
969 /* Catch the two remaining "Undef insn" cases of the previous table:
970 * - PSCI conduit is SMC but we don't have a valid PCSI call,
971 * - We don't have EL3 or SMD is set.
77077a83 972 */
7760da72
LM
973 if (!arm_is_psci_call(cpu, EXCP_SMC) &&
974 (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
c6328599
PM
975 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
976 exception_target_el(env));
e0d6e6a5
EI
977 }
978}
979
8984bd2e
PB
980/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
981 The only way to do that in TCG is a conditional branch, which clobbers
982 all our temporaries. For now implement these as helper functions. */
983
8984bd2e
PB
984/* Similarly for variable shift instructions. */
985
9ef39277 986uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
987{
988 int shift = i & 0xff;
989 if (shift >= 32) {
990 if (shift == 32)
991 env->CF = x & 1;
992 else
993 env->CF = 0;
994 return 0;
995 } else if (shift != 0) {
996 env->CF = (x >> (32 - shift)) & 1;
997 return x << shift;
998 }
999 return x;
1000}
1001
9ef39277 1002uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1003{
1004 int shift = i & 0xff;
1005 if (shift >= 32) {
1006 if (shift == 32)
1007 env->CF = (x >> 31) & 1;
1008 else
1009 env->CF = 0;
1010 return 0;
1011 } else if (shift != 0) {
1012 env->CF = (x >> (shift - 1)) & 1;
1013 return x >> shift;
1014 }
1015 return x;
1016}
1017
9ef39277 1018uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1019{
1020 int shift = i & 0xff;
1021 if (shift >= 32) {
1022 env->CF = (x >> 31) & 1;
1023 return (int32_t)x >> 31;
1024 } else if (shift != 0) {
1025 env->CF = (x >> (shift - 1)) & 1;
1026 return (int32_t)x >> shift;
1027 }
1028 return x;
1029}
1030
9ef39277 1031uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1032{
1033 int shift1, shift;
1034 shift1 = i & 0xff;
1035 shift = shift1 & 0x1f;
1036 if (shift == 0) {
1037 if (shift1 != 0)
1038 env->CF = (x >> 31) & 1;
1039 return x;
1040 } else {
1041 env->CF = (x >> (shift - 1)) & 1;
1042 return ((uint32_t)x >> shift) | (x << (32 - shift));
1043 }
1044}
c15294c1
RH
1045
1046void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
1047 uint32_t access_type, uint32_t mmu_idx,
1048 uint32_t size)
1049{
1050 uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
1051 uintptr_t ra = GETPC();
1052
1053 if (likely(size <= in_page)) {
1054 probe_access(env, ptr, size, access_type, mmu_idx, ra);
1055 } else {
1056 probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
1057 probe_access(env, ptr + in_page, size - in_page,
1058 access_type, mmu_idx, ra);
1059 }
1060}
13954587
RH
1061
1062/*
1063 * This function corresponds to AArch64.vESBOperation().
1064 * Note that the AArch32 version is not functionally different.
1065 */
1066void HELPER(vesb)(CPUARMState *env)
1067{
1068 /*
1069 * The EL2Enabled() check is done inside arm_hcr_el2_eff,
1070 * and will return HCR_EL2.VSE == 0, so nothing happens.
1071 */
1072 uint64_t hcr = arm_hcr_el2_eff(env);
1073 bool enabled = !(hcr & HCR_TGE) && (hcr & HCR_AMO);
1074 bool pending = enabled && (hcr & HCR_VSE);
1075 bool masked = (env->daif & PSTATE_A);
1076
1077 /* If VSE pending and masked, defer the exception. */
1078 if (pending && masked) {
1079 uint32_t syndrome;
1080
1081 if (arm_el_is_aa64(env, 1)) {
1082 /* Copy across IDS and ISS from VSESR. */
1083 syndrome = env->cp15.vsesr_el2 & 0x1ffffff;
1084 } else {
1085 ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal };
1086
1087 if (extended_addresses_enabled(env)) {
1088 syndrome = arm_fi_to_lfsc(&fi);
1089 } else {
1090 syndrome = arm_fi_to_sfsc(&fi);
1091 }
1092 /* Copy across AET and ExT from VSESR. */
1093 syndrome |= env->cp15.vsesr_el2 & 0xd000;
1094 }
1095
1096 /* Set VDISR_EL2.A along with the syndrome. */
1097 env->cp15.vdisr_el2 = syndrome | (1u << 31);
1098
1099 /* Clear pending virtual SError */
1100 env->cp15.hcr_el2 &= ~HCR_VSE;
1101 cpu_reset_interrupt(env_cpu(env), CPU_INTERRUPT_VSERR);
1102 }
1103}