]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/op_helper.c
target/arm: Use pointers in neon tbl helper
[mirror_qemu.git] / target / arm / op_helper.c
CommitLineData
b7bcbe95
FB
1/*
2 * ARM helper routines
5fafdf24 3 *
9ee6e8bb 4 * Copyright (c) 2005-2007 CodeSourcery, LLC
b7bcbe95
FB
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
b7bcbe95 18 */
74c21bd0 19#include "qemu/osdep.h"
c9b61d9a 20#include "qemu/log.h"
8d04fb55 21#include "qemu/main-loop.h"
3e457172 22#include "cpu.h"
2ef6175a 23#include "exec/helper-proto.h"
ccd38087 24#include "internals.h"
63c91552 25#include "exec/exec-all.h"
f08b6170 26#include "exec/cpu_ldst.h"
b7bcbe95 27
ad69471c
PB
28#define SIGNBIT (uint32_t)0x80000000
29#define SIGNBIT64 ((uint64_t)1 << 63)
30
c6328599
PM
31static void raise_exception(CPUARMState *env, uint32_t excp,
32 uint32_t syndrome, uint32_t target_el)
b7bcbe95 33{
c6328599 34 CPUState *cs = CPU(arm_env_get_cpu(env));
27103424 35
c6328599
PM
36 assert(!excp_is_internal(excp));
37 cs->exception_index = excp;
38 env->exception.syndrome = syndrome;
39 env->exception.target_el = target_el;
5638d180 40 cpu_loop_exit(cs);
b7bcbe95
FB
41}
42
e3b1d480
GB
43static int exception_target_el(CPUARMState *env)
44{
45 int target_el = MAX(1, arm_current_el(env));
46
47 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
48 * to EL3 in this case.
49 */
50 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
51 target_el = 3;
52 }
53
54 return target_el;
55}
56
e7c06c4e
RH
57uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
58 uint32_t maxindex)
9ee6e8bb 59{
e7c06c4e
RH
60 uint32_t val, shift;
61 uint64_t *table = vn;
62
9ee6e8bb 63 val = 0;
9ee6e8bb 64 for (shift = 0; shift < 32; shift += 8) {
e7c06c4e 65 uint32_t index = (ireg >> shift) & 0xff;
8f8e3aa4 66 if (index < maxindex) {
e7c06c4e 67 uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
9ee6e8bb
PB
68 val |= tmp << shift;
69 } else {
8f8e3aa4 70 val |= def & (0xff << shift);
9ee6e8bb
PB
71 }
72 }
8f8e3aa4 73 return val;
9ee6e8bb
PB
74}
75
b5ff1b31
FB
76#if !defined(CONFIG_USER_ONLY)
77
aaa1f954
EI
78static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
79 unsigned int target_el,
c528af7a 80 bool same_el, bool ea,
b35399bb 81 bool s1ptw, bool is_write,
aaa1f954
EI
82 int fsc)
83{
84 uint32_t syn;
85
86 /* ISV is only set for data aborts routed to EL2 and
87 * never for stage-1 page table walks faulting on stage 2.
88 *
89 * Furthermore, ISV is only set for certain kinds of load/stores.
90 * If the template syndrome does not have ISV set, we should leave
91 * it cleared.
92 *
93 * See ARMv8 specs, D7-1974:
94 * ISS encoding for an exception from a Data Abort, the
95 * ISV field.
96 */
97 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
98 syn = syn_data_abort_no_iss(same_el,
c528af7a 99 ea, 0, s1ptw, is_write, fsc);
aaa1f954
EI
100 } else {
101 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
102 * syndrome created at translation time.
103 * Now we create the runtime syndrome with the remaining fields.
104 */
105 syn = syn_data_abort_with_iss(same_el,
106 0, 0, 0, 0, 0,
c528af7a 107 ea, 0, s1ptw, is_write, fsc,
aaa1f954
EI
108 false);
109 /* Merge the runtime syndrome with the template syndrome. */
110 syn |= template_syn;
111 }
112 return syn;
113}
114
aac43da1 115static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
681f9a89 116 int mmu_idx, ARMMMUFaultInfo *fi)
aac43da1
PM
117{
118 CPUARMState *env = &cpu->env;
119 int target_el;
120 bool same_el;
681f9a89
PM
121 uint32_t syn, exc, fsr, fsc;
122 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
aac43da1
PM
123
124 target_el = exception_target_el(env);
125 if (fi->stage2) {
126 target_el = 2;
127 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
128 }
129 same_el = (arm_current_el(env) == target_el);
130
681f9a89
PM
131 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
132 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
133 /* LPAE format fault status register : bottom 6 bits are
134 * status code in the same form as needed for syndrome
135 */
136 fsr = arm_fi_to_lfsc(fi);
137 fsc = extract32(fsr, 0, 6);
138 } else {
139 fsr = arm_fi_to_sfsc(fi);
140 /* Short format FSR : this fault will never actually be reported
141 * to an EL that uses a syndrome register. Use a (currently)
142 * reserved FSR code in case the constructed syndrome does leak
143 * into the guest somehow.
aac43da1 144 */
681f9a89 145 fsc = 0x3f;
aac43da1
PM
146 }
147
148 if (access_type == MMU_INST_FETCH) {
c528af7a 149 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
aac43da1
PM
150 exc = EXCP_PREFETCH_ABORT;
151 } else {
152 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
c528af7a 153 same_el, fi->ea, fi->s1ptw,
aac43da1
PM
154 access_type == MMU_DATA_STORE,
155 fsc);
156 if (access_type == MMU_DATA_STORE
157 && arm_feature(env, ARM_FEATURE_V6)) {
158 fsr |= (1 << 11);
159 }
160 exc = EXCP_DATA_ABORT;
161 }
162
163 env->exception.vaddress = addr;
164 env->exception.fsr = fsr;
165 raise_exception(env, exc, syn, target_el);
166}
167
b5ff1b31 168/* try to fill the TLB and return an exception if error. If retaddr is
d5a11fef
AF
169 * NULL, it means that the function was called in C code (i.e. not
170 * from generated code or from helper.c)
171 */
b35399bb
SS
172void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
173 int mmu_idx, uintptr_t retaddr)
b5ff1b31 174{
b7cc4e82 175 bool ret;
e14b5a23 176 ARMMMUFaultInfo fi = {};
b5ff1b31 177
bc52bfeb 178 ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
551bd27f 179 if (unlikely(ret)) {
d5a11fef 180 ARMCPU *cpu = ARM_CPU(cs);
d5a11fef 181
65255e8e
AB
182 /* now we have a real cpu fault */
183 cpu_restore_state(cs, retaddr);
8c6084bf 184
681f9a89 185 deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
b5ff1b31 186 }
b5ff1b31 187}
30901475
AB
188
189/* Raise a data fault alignment exception for the specified virtual address */
b35399bb
SS
190void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
191 MMUAccessType access_type,
192 int mmu_idx, uintptr_t retaddr)
30901475
AB
193{
194 ARMCPU *cpu = ARM_CPU(cs);
aac43da1 195 ARMMMUFaultInfo fi = {};
30901475 196
65255e8e
AB
197 /* now we have a real cpu fault */
198 cpu_restore_state(cs, retaddr);
30901475 199
681f9a89
PM
200 fi.type = ARMFault_Alignment;
201 deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
30901475
AB
202}
203
c79c0a31
PM
204/* arm_cpu_do_transaction_failed: handle a memory system error response
205 * (eg "no device/memory present at address") by raising an external abort
206 * exception
207 */
208void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
209 vaddr addr, unsigned size,
210 MMUAccessType access_type,
211 int mmu_idx, MemTxAttrs attrs,
212 MemTxResult response, uintptr_t retaddr)
213{
214 ARMCPU *cpu = ARM_CPU(cs);
c79c0a31 215 ARMMMUFaultInfo fi = {};
c79c0a31 216
65255e8e
AB
217 /* now we have a real cpu fault */
218 cpu_restore_state(cs, retaddr);
c79c0a31 219
3b39d734 220 fi.ea = arm_extabort_type(response);
681f9a89
PM
221 fi.type = ARMFault_SyncExternal;
222 deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
c79c0a31
PM
223}
224
30901475 225#endif /* !defined(CONFIG_USER_ONLY) */
1497c961 226
9ef39277 227uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
228{
229 uint32_t res = a + b;
230 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
231 env->QF = 1;
232 return res;
233}
234
9ef39277 235uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
236{
237 uint32_t res = a + b;
238 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
239 env->QF = 1;
240 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
241 }
242 return res;
243}
244
9ef39277 245uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
246{
247 uint32_t res = a - b;
248 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
249 env->QF = 1;
250 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
251 }
252 return res;
253}
254
9ef39277 255uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
1497c961
PB
256{
257 uint32_t res;
258 if (val >= 0x40000000) {
259 res = ~SIGNBIT;
260 env->QF = 1;
261 } else if (val <= (int32_t)0xc0000000) {
262 res = SIGNBIT;
263 env->QF = 1;
264 } else {
265 res = val << 1;
266 }
267 return res;
268}
269
9ef39277 270uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
271{
272 uint32_t res = a + b;
273 if (res < a) {
274 env->QF = 1;
275 res = ~0;
276 }
277 return res;
278}
279
9ef39277 280uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
281{
282 uint32_t res = a - b;
283 if (res > a) {
284 env->QF = 1;
285 res = 0;
286 }
287 return res;
288}
289
6ddbc6e4 290/* Signed saturation. */
9ef39277 291static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
6ddbc6e4
PB
292{
293 int32_t top;
294 uint32_t mask;
295
6ddbc6e4
PB
296 top = val >> shift;
297 mask = (1u << shift) - 1;
298 if (top > 0) {
299 env->QF = 1;
300 return mask;
301 } else if (top < -1) {
302 env->QF = 1;
303 return ~mask;
304 }
305 return val;
306}
307
308/* Unsigned saturation. */
9ef39277 309static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
6ddbc6e4
PB
310{
311 uint32_t max;
312
6ddbc6e4
PB
313 max = (1u << shift) - 1;
314 if (val < 0) {
315 env->QF = 1;
316 return 0;
317 } else if (val > max) {
318 env->QF = 1;
319 return max;
320 }
321 return val;
322}
323
324/* Signed saturate. */
9ef39277 325uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4 326{
9ef39277 327 return do_ssat(env, x, shift);
6ddbc6e4
PB
328}
329
330/* Dual halfword signed saturate. */
9ef39277 331uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4
PB
332{
333 uint32_t res;
334
9ef39277
BS
335 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
336 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
6ddbc6e4
PB
337 return res;
338}
339
340/* Unsigned saturate. */
9ef39277 341uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4 342{
9ef39277 343 return do_usat(env, x, shift);
6ddbc6e4
PB
344}
345
346/* Dual halfword unsigned saturate. */
9ef39277 347uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4
PB
348{
349 uint32_t res;
350
9ef39277
BS
351 res = (uint16_t)do_usat(env, (int16_t)x, shift);
352 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
6ddbc6e4
PB
353 return res;
354}
d9ba4830 355
9886ecdf
PB
356void HELPER(setend)(CPUARMState *env)
357{
358 env->uncached_cpsr ^= CPSR_E;
359}
360
b1eced71
GB
361/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
362 * The function returns the target EL (1-3) if the instruction is to be trapped;
363 * otherwise it returns 0 indicating it is not trapped.
364 */
365static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
366{
367 int cur_el = arm_current_el(env);
368 uint64_t mask;
369
0e284568
PM
370 if (arm_feature(env, ARM_FEATURE_M)) {
371 /* M profile cores can never trap WFI/WFE. */
372 return 0;
373 }
374
b1eced71
GB
375 /* If we are currently in EL0 then we need to check if SCTLR is set up for
376 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
377 */
378 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
379 int target_el;
380
381 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
382 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
383 /* Secure EL0 and Secure PL1 is at EL3 */
384 target_el = 3;
385 } else {
386 target_el = 1;
387 }
388
389 if (!(env->cp15.sctlr_el[target_el] & mask)) {
390 return target_el;
391 }
392 }
393
394 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
395 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
396 * bits will be zero indicating no trap.
397 */
398 if (cur_el < 2 && !arm_is_secure(env)) {
399 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
400 if (env->cp15.hcr_el2 & mask) {
401 return 2;
402 }
403 }
404
405 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
406 if (cur_el < 3) {
407 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
408 if (env->cp15.scr_el3 & mask) {
409 return 3;
410 }
411 }
412
413 return 0;
414}
415
58803318 416void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
d9ba4830 417{
259186a7 418 CPUState *cs = CPU(arm_env_get_cpu(env));
b1eced71 419 int target_el = check_wfx_trap(env, false);
259186a7 420
84549b6d
PM
421 if (cpu_has_work(cs)) {
422 /* Don't bother to go into our "low power state" if
423 * we would just wake up immediately.
424 */
425 return;
426 }
427
b1eced71 428 if (target_el) {
58803318
SS
429 env->pc -= insn_len;
430 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
431 target_el);
b1eced71
GB
432 }
433
27103424 434 cs->exception_index = EXCP_HLT;
259186a7 435 cs->halted = 1;
5638d180 436 cpu_loop_exit(cs);
d9ba4830
PB
437}
438
72c1d3af
PM
439void HELPER(wfe)(CPUARMState *env)
440{
049e24a1
PM
441 /* This is a hint instruction that is semantically different
442 * from YIELD even though we currently implement it identically.
443 * Don't actually halt the CPU, just yield back to top
b1eced71
GB
444 * level loop. This is not going into a "low power state"
445 * (ie halting until some event occurs), so we never take
446 * a configurable trap to a different exception level.
72c1d3af 447 */
049e24a1
PM
448 HELPER(yield)(env);
449}
450
451void HELPER(yield)(CPUARMState *env)
452{
453 ARMCPU *cpu = arm_env_get_cpu(env);
454 CPUState *cs = CPU(cpu);
455
456 /* This is a non-trappable hint instruction that generally indicates
457 * that the guest is currently busy-looping. Yield control back to the
458 * top level loop so that a more deserving VCPU has a chance to run.
459 */
27103424 460 cs->exception_index = EXCP_YIELD;
5638d180 461 cpu_loop_exit(cs);
72c1d3af
PM
462}
463
d4a2dc67
PM
464/* Raise an internal-to-QEMU exception. This is limited to only
465 * those EXCP values which are special cases for QEMU to interrupt
466 * execution and not to be used for exceptions which are passed to
467 * the guest (those must all have syndrome information and thus should
468 * use exception_with_syndrome).
469 */
470void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
471{
472 CPUState *cs = CPU(arm_env_get_cpu(env));
473
474 assert(excp_is_internal(excp));
475 cs->exception_index = excp;
476 cpu_loop_exit(cs);
477}
478
479/* Raise an exception with the specified syndrome register value */
480void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
73710361 481 uint32_t syndrome, uint32_t target_el)
d9ba4830 482{
c6328599 483 raise_exception(env, excp, syndrome, target_el);
d9ba4830
PB
484}
485
9ef39277 486uint32_t HELPER(cpsr_read)(CPUARMState *env)
d9ba4830 487{
4051e12c 488 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
d9ba4830
PB
489}
490
1ce94f81 491void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
d9ba4830 492{
50866ba5 493 cpsr_write(env, val, mask, CPSRWriteByInstr);
d9ba4830 494}
b0109805 495
235ea1f5
PM
496/* Write the CPSR for a 32-bit exception return */
497void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
498{
50866ba5 499 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
bd7d00fc 500
fb0e8e79
PM
501 /* Generated code has already stored the new PC value, but
502 * without masking out its low bits, because which bits need
503 * masking depends on whether we're returning to Thumb or ARM
504 * state. Do the masking now.
505 */
506 env->regs[15] &= (env->thumb ? ~1 : ~3);
507
8d04fb55 508 qemu_mutex_lock_iothread();
bd7d00fc 509 arm_call_el_change_hook(arm_env_get_cpu(env));
8d04fb55 510 qemu_mutex_unlock_iothread();
235ea1f5
PM
511}
512
b0109805 513/* Access to user mode registers from privileged modes. */
9ef39277 514uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
b0109805
PB
515{
516 uint32_t val;
517
518 if (regno == 13) {
99a99c1f 519 val = env->banked_r13[BANK_USRSYS];
b0109805 520 } else if (regno == 14) {
99a99c1f 521 val = env->banked_r14[BANK_USRSYS];
b0109805
PB
522 } else if (regno >= 8
523 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
524 val = env->usr_regs[regno - 8];
525 } else {
526 val = env->regs[regno];
527 }
528 return val;
529}
530
1ce94f81 531void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
b0109805
PB
532{
533 if (regno == 13) {
99a99c1f 534 env->banked_r13[BANK_USRSYS] = val;
b0109805 535 } else if (regno == 14) {
99a99c1f 536 env->banked_r14[BANK_USRSYS] = val;
b0109805
PB
537 } else if (regno >= 8
538 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
539 env->usr_regs[regno - 8] = val;
540 } else {
541 env->regs[regno] = val;
542 }
543}
4b6a83fb 544
72309cee
PM
545void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
546{
547 if ((env->uncached_cpsr & CPSR_M) == mode) {
548 env->regs[13] = val;
549 } else {
550 env->banked_r13[bank_number(mode)] = val;
551 }
552}
553
554uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
555{
f01377f5
PM
556 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
557 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
558 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
559 */
560 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
561 exception_target_el(env));
562 }
563
72309cee
PM
564 if ((env->uncached_cpsr & CPSR_M) == mode) {
565 return env->regs[13];
566 } else {
567 return env->banked_r13[bank_number(mode)];
568 }
569}
72309cee 570
8bfd0550
PM
571static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
572 uint32_t regno)
573{
574 /* Raise an exception if the requested access is one of the UNPREDICTABLE
575 * cases; otherwise return. This broadly corresponds to the pseudocode
576 * BankedRegisterAccessValid() and SPSRAccessValid(),
577 * except that we have already handled some cases at translate time.
578 */
579 int curmode = env->uncached_cpsr & CPSR_M;
580
581 if (curmode == tgtmode) {
582 goto undef;
583 }
584
585 if (tgtmode == ARM_CPU_MODE_USR) {
586 switch (regno) {
587 case 8 ... 12:
588 if (curmode != ARM_CPU_MODE_FIQ) {
589 goto undef;
590 }
591 break;
592 case 13:
593 if (curmode == ARM_CPU_MODE_SYS) {
594 goto undef;
595 }
596 break;
597 case 14:
598 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
599 goto undef;
600 }
601 break;
602 default:
603 break;
604 }
605 }
606
607 if (tgtmode == ARM_CPU_MODE_HYP) {
608 switch (regno) {
609 case 17: /* ELR_Hyp */
610 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
611 goto undef;
612 }
613 break;
614 default:
615 if (curmode != ARM_CPU_MODE_MON) {
616 goto undef;
617 }
618 break;
619 }
620 }
621
622 return;
623
624undef:
625 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
626 exception_target_el(env));
627}
628
629void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
630 uint32_t regno)
631{
632 msr_mrs_banked_exc_checks(env, tgtmode, regno);
633
634 switch (regno) {
635 case 16: /* SPSRs */
636 env->banked_spsr[bank_number(tgtmode)] = value;
637 break;
638 case 17: /* ELR_Hyp */
639 env->elr_el[2] = value;
640 break;
641 case 13:
642 env->banked_r13[bank_number(tgtmode)] = value;
643 break;
644 case 14:
645 env->banked_r14[bank_number(tgtmode)] = value;
646 break;
647 case 8 ... 12:
648 switch (tgtmode) {
649 case ARM_CPU_MODE_USR:
650 env->usr_regs[regno - 8] = value;
651 break;
652 case ARM_CPU_MODE_FIQ:
653 env->fiq_regs[regno - 8] = value;
654 break;
655 default:
656 g_assert_not_reached();
657 }
658 break;
659 default:
660 g_assert_not_reached();
661 }
662}
663
664uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
665{
666 msr_mrs_banked_exc_checks(env, tgtmode, regno);
667
668 switch (regno) {
669 case 16: /* SPSRs */
670 return env->banked_spsr[bank_number(tgtmode)];
671 case 17: /* ELR_Hyp */
672 return env->elr_el[2];
673 case 13:
674 return env->banked_r13[bank_number(tgtmode)];
675 case 14:
676 return env->banked_r14[bank_number(tgtmode)];
677 case 8 ... 12:
678 switch (tgtmode) {
679 case ARM_CPU_MODE_USR:
680 return env->usr_regs[regno - 8];
681 case ARM_CPU_MODE_FIQ:
682 return env->fiq_regs[regno - 8];
683 default:
684 g_assert_not_reached();
685 }
686 default:
687 g_assert_not_reached();
688 }
689}
690
3f208fd7
PM
691void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
692 uint32_t isread)
f59df3f2
PM
693{
694 const ARMCPRegInfo *ri = rip;
38836a2c 695 int target_el;
c0f4af17
PM
696
697 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
698 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
c6328599 699 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
c0f4af17
PM
700 }
701
702 if (!ri->accessfn) {
703 return;
704 }
705
3f208fd7 706 switch (ri->accessfn(env, ri, isread)) {
f59df3f2
PM
707 case CP_ACCESS_OK:
708 return;
709 case CP_ACCESS_TRAP:
38836a2c
PM
710 target_el = exception_target_el(env);
711 break;
712 case CP_ACCESS_TRAP_EL2:
713 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
714 * a bug in the access function.
715 */
3fc827d5 716 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
38836a2c
PM
717 target_el = 2;
718 break;
719 case CP_ACCESS_TRAP_EL3:
720 target_el = 3;
8bcbf37c 721 break;
f59df3f2 722 case CP_ACCESS_TRAP_UNCATEGORIZED:
38836a2c 723 target_el = exception_target_el(env);
c6328599 724 syndrome = syn_uncategorized();
f59df3f2 725 break;
e7615726
PM
726 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
727 target_el = 2;
728 syndrome = syn_uncategorized();
729 break;
730 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
731 target_el = 3;
732 syndrome = syn_uncategorized();
733 break;
f2cae609
PM
734 case CP_ACCESS_TRAP_FP_EL2:
735 target_el = 2;
736 /* Since we are an implementation that takes exceptions on a trapped
737 * conditional insn only if the insn has passed its condition code
738 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
739 * (which is also the required value for AArch64 traps).
740 */
741 syndrome = syn_fp_access_trap(1, 0xe, false);
742 break;
743 case CP_ACCESS_TRAP_FP_EL3:
744 target_el = 3;
745 syndrome = syn_fp_access_trap(1, 0xe, false);
746 break;
f59df3f2
PM
747 default:
748 g_assert_not_reached();
749 }
c6328599 750
38836a2c 751 raise_exception(env, EXCP_UDEF, syndrome, target_el);
f59df3f2
PM
752}
753
4b6a83fb
PM
754void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
755{
756 const ARMCPRegInfo *ri = rip;
c4241c7d 757
8d04fb55
JK
758 if (ri->type & ARM_CP_IO) {
759 qemu_mutex_lock_iothread();
760 ri->writefn(env, ri, value);
761 qemu_mutex_unlock_iothread();
762 } else {
763 ri->writefn(env, ri, value);
764 }
4b6a83fb
PM
765}
766
767uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
768{
769 const ARMCPRegInfo *ri = rip;
8d04fb55 770 uint32_t res;
c4241c7d 771
8d04fb55
JK
772 if (ri->type & ARM_CP_IO) {
773 qemu_mutex_lock_iothread();
774 res = ri->readfn(env, ri);
775 qemu_mutex_unlock_iothread();
776 } else {
777 res = ri->readfn(env, ri);
778 }
779
780 return res;
4b6a83fb
PM
781}
782
783void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
784{
785 const ARMCPRegInfo *ri = rip;
c4241c7d 786
8d04fb55
JK
787 if (ri->type & ARM_CP_IO) {
788 qemu_mutex_lock_iothread();
789 ri->writefn(env, ri, value);
790 qemu_mutex_unlock_iothread();
791 } else {
792 ri->writefn(env, ri, value);
793 }
4b6a83fb
PM
794}
795
796uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
797{
798 const ARMCPRegInfo *ri = rip;
8d04fb55
JK
799 uint64_t res;
800
801 if (ri->type & ARM_CP_IO) {
802 qemu_mutex_lock_iothread();
803 res = ri->readfn(env, ri);
804 qemu_mutex_unlock_iothread();
805 } else {
806 res = ri->readfn(env, ri);
807 }
c4241c7d 808
8d04fb55 809 return res;
4b6a83fb 810}
b0109805 811
9cfa0b4e
PM
812void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
813{
814 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
815 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
816 * to catch that case at translate time.
817 */
137feaa9 818 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
c6328599
PM
819 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
820 extract32(op, 3, 3), 4,
821 imm, 0x1f, 0);
822 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
9cfa0b4e
PM
823 }
824
825 switch (op) {
826 case 0x05: /* SPSel */
f502cfc2 827 update_spsel(env, imm);
9cfa0b4e
PM
828 break;
829 case 0x1e: /* DAIFSet */
830 env->daif |= (imm << 6) & PSTATE_DAIF;
831 break;
832 case 0x1f: /* DAIFClear */
833 env->daif &= ~((imm << 6) & PSTATE_DAIF);
834 break;
835 default:
836 g_assert_not_reached();
837 }
838}
839
7ea47fe7
PM
840void HELPER(clear_pstate_ss)(CPUARMState *env)
841{
842 env->pstate &= ~PSTATE_SS;
843}
844
35979d71
EI
845void HELPER(pre_hvc)(CPUARMState *env)
846{
98128601 847 ARMCPU *cpu = arm_env_get_cpu(env);
dcbff19b 848 int cur_el = arm_current_el(env);
35979d71
EI
849 /* FIXME: Use actual secure state. */
850 bool secure = false;
851 bool undef;
852
98128601
RH
853 if (arm_is_psci_call(cpu, EXCP_HVC)) {
854 /* If PSCI is enabled and this looks like a valid PSCI call then
855 * that overrides the architecturally mandated HVC behaviour.
856 */
857 return;
858 }
859
39404338
PM
860 if (!arm_feature(env, ARM_FEATURE_EL2)) {
861 /* If EL2 doesn't exist, HVC always UNDEFs */
862 undef = true;
863 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
864 /* EL3.HCE has priority over EL2.HCD. */
35979d71
EI
865 undef = !(env->cp15.scr_el3 & SCR_HCE);
866 } else {
867 undef = env->cp15.hcr_el2 & HCR_HCD;
868 }
869
870 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
871 * For ARMv8/AArch64, HVC is allowed in EL3.
872 * Note that we've already trapped HVC from EL0 at translation
873 * time.
874 */
875 if (secure && (!is_a64(env) || cur_el == 1)) {
876 undef = true;
877 }
878
879 if (undef) {
c6328599
PM
880 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
881 exception_target_el(env));
35979d71
EI
882 }
883}
884
e0d6e6a5
EI
885void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
886{
98128601 887 ARMCPU *cpu = arm_env_get_cpu(env);
dcbff19b 888 int cur_el = arm_current_el(env);
dbe9d163 889 bool secure = arm_is_secure(env);
e0d6e6a5 890 bool smd = env->cp15.scr_el3 & SCR_SMD;
f096e92b
PM
891 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
892 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
893 * extensions, SMD only applies to NS state.
894 * On ARMv7 without the Virtualization extensions, the SMD bit
895 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
896 * so we need not special case this here.
e0d6e6a5 897 */
f096e92b 898 bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
e0d6e6a5 899
77077a83
JK
900 if (!arm_feature(env, ARM_FEATURE_EL3) &&
901 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
902 /* If we have no EL3 then SMC always UNDEFs and can't be
903 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
904 * firmware within QEMU, and we want an EL2 guest to be able
905 * to forbid its EL1 from making PSCI calls into QEMU's
906 * "firmware" via HCR.TSC, so for these purposes treat
907 * PSCI-via-SMC as implying an EL3.
98128601 908 */
39404338
PM
909 undef = true;
910 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
77077a83
JK
911 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
912 * We also want an EL2 guest to be able to forbid its EL1 from
913 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
914 */
c6328599 915 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
e0d6e6a5
EI
916 }
917
77077a83
JK
918 /* If PSCI is enabled and this looks like a valid PSCI call then
919 * suppress the UNDEF -- we'll catch the SMC exception and
920 * implement the PSCI call behaviour there.
921 */
922 if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) {
c6328599
PM
923 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
924 exception_target_el(env));
e0d6e6a5
EI
925 }
926}
927
3809951b
PM
928static int el_from_spsr(uint32_t spsr)
929{
930 /* Return the exception level that this SPSR is requesting a return to,
931 * or -1 if it is invalid (an illegal return)
932 */
933 if (spsr & PSTATE_nRW) {
934 switch (spsr & CPSR_M) {
935 case ARM_CPU_MODE_USR:
936 return 0;
937 case ARM_CPU_MODE_HYP:
938 return 2;
939 case ARM_CPU_MODE_FIQ:
940 case ARM_CPU_MODE_IRQ:
941 case ARM_CPU_MODE_SVC:
942 case ARM_CPU_MODE_ABT:
943 case ARM_CPU_MODE_UND:
944 case ARM_CPU_MODE_SYS:
945 return 1;
946 case ARM_CPU_MODE_MON:
947 /* Returning to Mon from AArch64 is never possible,
948 * so this is an illegal return.
949 */
950 default:
951 return -1;
952 }
953 } else {
954 if (extract32(spsr, 1, 1)) {
955 /* Return with reserved M[1] bit set */
956 return -1;
957 }
958 if (extract32(spsr, 0, 4) == 1) {
959 /* return to EL0 with M[0] bit set */
960 return -1;
961 }
962 return extract32(spsr, 2, 2);
963 }
964}
965
52e60cdd
RH
966void HELPER(exception_return)(CPUARMState *env)
967{
dcbff19b 968 int cur_el = arm_current_el(env);
db6c3cd0 969 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
2a923c4d 970 uint32_t spsr = env->banked_spsr[spsr_idx];
ce02049d 971 int new_el;
3809951b 972 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
52e60cdd 973
9208b961 974 aarch64_save_sp(env, cur_el);
52e60cdd 975
dc3c4c14 976 arm_clear_exclusive(env);
52e60cdd 977
3a298203
PM
978 /* We must squash the PSTATE.SS bit to zero unless both of the
979 * following hold:
980 * 1. debug exceptions are currently disabled
981 * 2. singlestep will be active in the EL we return to
982 * We check 1 here and 2 after we've done the pstate/cpsr write() to
983 * transition to the EL we're going to.
984 */
985 if (arm_generate_debug_exceptions(env)) {
986 spsr &= ~PSTATE_SS;
987 }
988
3809951b
PM
989 new_el = el_from_spsr(spsr);
990 if (new_el == -1) {
991 goto illegal_return;
992 }
993 if (new_el > cur_el
994 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
995 /* Disallow return to an EL which is unimplemented or higher
996 * than the current one.
997 */
998 goto illegal_return;
999 }
1000
1001 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
1002 /* Return to an EL which is configured for a different register width */
1003 goto illegal_return;
1004 }
1005
e393f339
PM
1006 if (new_el == 2 && arm_is_secure_below_el3(env)) {
1007 /* Return to the non-existent secure-EL2 */
1008 goto illegal_return;
1009 }
1010
1011 if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
1012 && !arm_is_secure_below_el3(env)) {
1013 goto illegal_return;
1014 }
1015
3809951b 1016 if (!return_to_aa64) {
52e60cdd 1017 env->aarch64 = 0;
f8c88bbc
PM
1018 /* We do a raw CPSR write because aarch64_sync_64_to_32()
1019 * will sort the register banks out for us, and we've already
1020 * caught all the bad-mode cases in el_from_spsr().
1021 */
50866ba5 1022 cpsr_write(env, spsr, ~0, CPSRWriteRaw);
3a298203
PM
1023 if (!arm_singlestep_active(env)) {
1024 env->uncached_cpsr &= ~PSTATE_SS;
1025 }
ce02049d 1026 aarch64_sync_64_to_32(env);
52e60cdd 1027
c1e03714
PM
1028 if (spsr & CPSR_T) {
1029 env->regs[15] = env->elr_el[cur_el] & ~0x1;
1030 } else {
1031 env->regs[15] = env->elr_el[cur_el] & ~0x3;
1032 }
c9b61d9a
PM
1033 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1034 "AArch32 EL%d PC 0x%" PRIx32 "\n",
1035 cur_el, new_el, env->regs[15]);
52e60cdd 1036 } else {
52e60cdd
RH
1037 env->aarch64 = 1;
1038 pstate_write(env, spsr);
3a298203
PM
1039 if (!arm_singlestep_active(env)) {
1040 env->pstate &= ~PSTATE_SS;
1041 }
98ea5615 1042 aarch64_restore_sp(env, new_el);
db6c3cd0 1043 env->pc = env->elr_el[cur_el];
c9b61d9a
PM
1044 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1045 "AArch64 EL%d PC 0x%" PRIx64 "\n",
1046 cur_el, new_el, env->pc);
52e60cdd
RH
1047 }
1048
8d04fb55 1049 qemu_mutex_lock_iothread();
bd7d00fc 1050 arm_call_el_change_hook(arm_env_get_cpu(env));
8d04fb55 1051 qemu_mutex_unlock_iothread();
bd7d00fc 1052
52e60cdd
RH
1053 return;
1054
1055illegal_return:
1056 /* Illegal return events of various kinds have architecturally
1057 * mandated behaviour:
1058 * restore NZCV and DAIF from SPSR_ELx
1059 * set PSTATE.IL
1060 * restore PC from ELR_ELx
1061 * no change to exception level, execution state or stack pointer
1062 */
1063 env->pstate |= PSTATE_IL;
db6c3cd0 1064 env->pc = env->elr_el[cur_el];
52e60cdd
RH
1065 spsr &= PSTATE_NZCV | PSTATE_DAIF;
1066 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1067 pstate_write(env, spsr);
3a298203
PM
1068 if (!arm_singlestep_active(env)) {
1069 env->pstate &= ~PSTATE_SS;
1070 }
c9b61d9a
PM
1071 qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
1072 "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
52e60cdd
RH
1073}
1074
3ff6fc91
PM
1075/* Return true if the linked breakpoint entry lbn passes its checks */
1076static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1077{
1078 CPUARMState *env = &cpu->env;
1079 uint64_t bcr = env->cp15.dbgbcr[lbn];
1080 int brps = extract32(cpu->dbgdidr, 24, 4);
1081 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1082 int bt;
1083 uint32_t contextidr;
1084
1085 /* Links to unimplemented or non-context aware breakpoints are
1086 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1087 * as if linked to an UNKNOWN context-aware breakpoint (in which
1088 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1089 * We choose the former.
1090 */
1091 if (lbn > brps || lbn < (brps - ctx_cmps)) {
1092 return false;
1093 }
1094
1095 bcr = env->cp15.dbgbcr[lbn];
1096
1097 if (extract64(bcr, 0, 1) == 0) {
1098 /* Linked breakpoint disabled : generate no events */
1099 return false;
1100 }
1101
1102 bt = extract64(bcr, 20, 4);
1103
1104 /* We match the whole register even if this is AArch32 using the
1105 * short descriptor format (in which case it holds both PROCID and ASID),
1106 * since we don't implement the optional v7 context ID masking.
1107 */
54bf36ed 1108 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
3ff6fc91
PM
1109
1110 switch (bt) {
1111 case 3: /* linked context ID match */
dcbff19b 1112 if (arm_current_el(env) > 1) {
3ff6fc91
PM
1113 /* Context matches never fire in EL2 or (AArch64) EL3 */
1114 return false;
1115 }
1116 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1117 case 5: /* linked address mismatch (reserved in AArch64) */
1118 case 9: /* linked VMID match (reserved if no EL2) */
1119 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1120 default:
1121 /* Links to Unlinked context breakpoints must generate no
1122 * events; we choose to do the same for reserved values too.
1123 */
1124 return false;
1125 }
1126
1127 return false;
1128}
1129
0eacea70 1130static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
3ff6fc91
PM
1131{
1132 CPUARMState *env = &cpu->env;
0eacea70 1133 uint64_t cr;
3ff6fc91 1134 int pac, hmc, ssc, wt, lbn;
ef7bab8d
PM
1135 /* Note that for watchpoints the check is against the CPU security
1136 * state, not the S/NS attribute on the offending data access.
1137 */
1138 bool is_secure = arm_is_secure(env);
9e1fc5bd 1139 int access_el = arm_current_el(env);
3ff6fc91 1140
0eacea70 1141 if (is_wp) {
9e1fc5bd
PM
1142 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1143
1144 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
0eacea70
PM
1145 return false;
1146 }
1147 cr = env->cp15.dbgwcr[n];
9e1fc5bd
PM
1148 if (wp->hitattrs.user) {
1149 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1150 * match watchpoints as if they were accesses done at EL0, even if
1151 * the CPU is at EL1 or higher.
1152 */
1153 access_el = 0;
1154 }
0eacea70
PM
1155 } else {
1156 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
3ff6fc91 1157
0eacea70
PM
1158 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1159 return false;
1160 }
1161 cr = env->cp15.dbgbcr[n];
1162 }
3ff6fc91 1163 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
0eacea70
PM
1164 * enabled and that the address and access type match; for breakpoints
1165 * we know the address matched; check the remaining fields, including
1166 * linked breakpoints. We rely on WCR and BCR having the same layout
1167 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1168 * Note that some combinations of {PAC, HMC, SSC} are reserved and
3ff6fc91
PM
1169 * must act either like some valid combination or as if the watchpoint
1170 * were disabled. We choose the former, and use this together with
1171 * the fact that EL3 must always be Secure and EL2 must always be
1172 * Non-Secure to simplify the code slightly compared to the full
1173 * table in the ARM ARM.
1174 */
0eacea70
PM
1175 pac = extract64(cr, 1, 2);
1176 hmc = extract64(cr, 13, 1);
1177 ssc = extract64(cr, 14, 2);
3ff6fc91
PM
1178
1179 switch (ssc) {
1180 case 0:
1181 break;
1182 case 1:
1183 case 3:
1184 if (is_secure) {
1185 return false;
1186 }
1187 break;
1188 case 2:
1189 if (!is_secure) {
1190 return false;
1191 }
1192 break;
1193 }
1194
9e1fc5bd 1195 switch (access_el) {
3ff6fc91
PM
1196 case 3:
1197 case 2:
1198 if (!hmc) {
1199 return false;
1200 }
1201 break;
1202 case 1:
1203 if (extract32(pac, 0, 1) == 0) {
1204 return false;
1205 }
1206 break;
1207 case 0:
1208 if (extract32(pac, 1, 1) == 0) {
1209 return false;
1210 }
1211 break;
1212 default:
1213 g_assert_not_reached();
1214 }
1215
0eacea70
PM
1216 wt = extract64(cr, 20, 1);
1217 lbn = extract64(cr, 16, 4);
3ff6fc91
PM
1218
1219 if (wt && !linked_bp_matches(cpu, lbn)) {
1220 return false;
1221 }
1222
1223 return true;
1224}
1225
1226static bool check_watchpoints(ARMCPU *cpu)
1227{
1228 CPUARMState *env = &cpu->env;
1229 int n;
1230
1231 /* If watchpoints are disabled globally or we can't take debug
1232 * exceptions here then watchpoint firings are ignored.
1233 */
1234 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1235 || !arm_generate_debug_exceptions(env)) {
1236 return false;
1237 }
1238
1239 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
0eacea70
PM
1240 if (bp_wp_matches(cpu, n, true)) {
1241 return true;
1242 }
1243 }
1244 return false;
1245}
1246
1247static bool check_breakpoints(ARMCPU *cpu)
1248{
1249 CPUARMState *env = &cpu->env;
1250 int n;
1251
1252 /* If breakpoints are disabled globally or we can't take debug
1253 * exceptions here then breakpoint firings are ignored.
1254 */
1255 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1256 || !arm_generate_debug_exceptions(env)) {
1257 return false;
1258 }
1259
1260 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1261 if (bp_wp_matches(cpu, n, false)) {
3ff6fc91
PM
1262 return true;
1263 }
1264 }
1265 return false;
1266}
1267
5d98bf8f
SF
1268void HELPER(check_breakpoints)(CPUARMState *env)
1269{
1270 ARMCPU *cpu = arm_env_get_cpu(env);
1271
1272 if (check_breakpoints(cpu)) {
1273 HELPER(exception_internal(env, EXCP_DEBUG));
1274 }
1275}
1276
3826121d
SF
1277bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1278{
1279 /* Called by core code when a CPU watchpoint fires; need to check if this
1280 * is also an architectural watchpoint match.
1281 */
1282 ARMCPU *cpu = ARM_CPU(cs);
1283
1284 return check_watchpoints(cpu);
1285}
1286
40612000
JB
1287vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1288{
1289 ARMCPU *cpu = ARM_CPU(cs);
1290 CPUARMState *env = &cpu->env;
1291
1292 /* In BE32 system mode, target memory is stored byteswapped (on a
1293 * little-endian host system), and by the time we reach here (via an
1294 * opcode helper) the addresses of subword accesses have been adjusted
1295 * to account for that, which means that watchpoints will not match.
1296 * Undo the adjustment here.
1297 */
1298 if (arm_sctlr_b(env)) {
1299 if (len == 1) {
1300 addr ^= 3;
1301 } else if (len == 2) {
1302 addr ^= 2;
1303 }
1304 }
1305
1306 return addr;
1307}
1308
3ff6fc91
PM
1309void arm_debug_excp_handler(CPUState *cs)
1310{
1311 /* Called by core code when a watchpoint or breakpoint fires;
1312 * need to check which one and raise the appropriate exception.
1313 */
1314 ARMCPU *cpu = ARM_CPU(cs);
1315 CPUARMState *env = &cpu->env;
1316 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1317
1318 if (wp_hit) {
1319 if (wp_hit->flags & BP_CPU) {
3826121d
SF
1320 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1321 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1322
3ff6fc91 1323 cs->watchpoint_hit = NULL;
3826121d
SF
1324
1325 if (extended_addresses_enabled(env)) {
1326 env->exception.fsr = (1 << 9) | 0x22;
3ff6fc91 1327 } else {
3826121d 1328 env->exception.fsr = 0x2;
3ff6fc91 1329 }
3826121d
SF
1330 env->exception.vaddress = wp_hit->hitaddr;
1331 raise_exception(env, EXCP_DATA_ABORT,
1332 syn_watchpoint(same_el, 0, wnr),
1333 arm_debug_target_el(env));
3ff6fc91 1334 }
0eacea70 1335 } else {
e63a2d4d 1336 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
5d98bf8f 1337 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
e63a2d4d 1338
5c629f4f
SF
1339 /* (1) GDB breakpoints should be handled first.
1340 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1341 * since singlestep is also done by generating a debug internal
1342 * exception.
1343 */
1344 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1345 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
e63a2d4d
SF
1346 return;
1347 }
1348
5d98bf8f
SF
1349 if (extended_addresses_enabled(env)) {
1350 env->exception.fsr = (1 << 9) | 0x22;
1351 } else {
1352 env->exception.fsr = 0x2;
0eacea70 1353 }
5d98bf8f
SF
1354 /* FAR is UNKNOWN, so doesn't need setting */
1355 raise_exception(env, EXCP_PREFETCH_ABORT,
1356 syn_breakpoint(same_el),
1357 arm_debug_target_el(env));
3ff6fc91
PM
1358 }
1359}
1360
8984bd2e
PB
1361/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1362 The only way to do that in TCG is a conditional branch, which clobbers
1363 all our temporaries. For now implement these as helper functions. */
1364
8984bd2e
PB
1365/* Similarly for variable shift instructions. */
1366
9ef39277 1367uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1368{
1369 int shift = i & 0xff;
1370 if (shift >= 32) {
1371 if (shift == 32)
1372 env->CF = x & 1;
1373 else
1374 env->CF = 0;
1375 return 0;
1376 } else if (shift != 0) {
1377 env->CF = (x >> (32 - shift)) & 1;
1378 return x << shift;
1379 }
1380 return x;
1381}
1382
9ef39277 1383uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1384{
1385 int shift = i & 0xff;
1386 if (shift >= 32) {
1387 if (shift == 32)
1388 env->CF = (x >> 31) & 1;
1389 else
1390 env->CF = 0;
1391 return 0;
1392 } else if (shift != 0) {
1393 env->CF = (x >> (shift - 1)) & 1;
1394 return x >> shift;
1395 }
1396 return x;
1397}
1398
9ef39277 1399uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1400{
1401 int shift = i & 0xff;
1402 if (shift >= 32) {
1403 env->CF = (x >> 31) & 1;
1404 return (int32_t)x >> 31;
1405 } else if (shift != 0) {
1406 env->CF = (x >> (shift - 1)) & 1;
1407 return (int32_t)x >> shift;
1408 }
1409 return x;
1410}
1411
9ef39277 1412uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1413{
1414 int shift1, shift;
1415 shift1 = i & 0xff;
1416 shift = shift1 & 0x1f;
1417 if (shift == 0) {
1418 if (shift1 != 0)
1419 env->CF = (x >> 31) & 1;
1420 return x;
1421 } else {
1422 env->CF = (x >> (shift - 1)) & 1;
1423 return ((uint32_t)x >> shift) | (x << (32 - shift));
1424 }
1425}