]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/op_helper.c
target/ppc: introduce GEN_VSX_HELPER_R2 macro to fpu_helper.c
[mirror_qemu.git] / target / arm / op_helper.c
CommitLineData
b7bcbe95
FB
1/*
2 * ARM helper routines
5fafdf24 3 *
9ee6e8bb 4 * Copyright (c) 2005-2007 CodeSourcery, LLC
b7bcbe95
FB
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
b7bcbe95 18 */
74c21bd0 19#include "qemu/osdep.h"
c9b61d9a 20#include "qemu/log.h"
8d04fb55 21#include "qemu/main-loop.h"
3e457172 22#include "cpu.h"
2ef6175a 23#include "exec/helper-proto.h"
ccd38087 24#include "internals.h"
63c91552 25#include "exec/exec-all.h"
f08b6170 26#include "exec/cpu_ldst.h"
b7bcbe95 27
ad69471c
PB
28#define SIGNBIT (uint32_t)0x80000000
29#define SIGNBIT64 ((uint64_t)1 << 63)
30
7469f6c6
RH
31static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
32 uint32_t syndrome, uint32_t target_el)
b7bcbe95 33{
2fc0cc0e 34 CPUState *cs = env_cpu(env);
27103424 35
7c208e0f 36 if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
7556edfb
PM
37 /*
38 * Redirect NS EL1 exceptions to NS EL2. These are reported with
39 * their original syndrome register value, with the exception of
40 * SIMD/FP access traps, which are reported as uncategorized
41 * (see DDI0478C.a D1.10.4)
42 */
43 target_el = 2;
64b91e3f 44 if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
7556edfb
PM
45 syndrome = syn_uncategorized();
46 }
47 }
48
c6328599
PM
49 assert(!excp_is_internal(excp));
50 cs->exception_index = excp;
51 env->exception.syndrome = syndrome;
52 env->exception.target_el = target_el;
7469f6c6
RH
53
54 return cs;
55}
56
57void raise_exception(CPUARMState *env, uint32_t excp,
58 uint32_t syndrome, uint32_t target_el)
59{
60 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
5638d180 61 cpu_loop_exit(cs);
b7bcbe95
FB
62}
63
7469f6c6
RH
64void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
65 uint32_t target_el, uintptr_t ra)
66{
67 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
68 cpu_loop_exit_restore(cs, ra);
69}
70
e7c06c4e
RH
71uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
72 uint32_t maxindex)
9ee6e8bb 73{
e7c06c4e
RH
74 uint32_t val, shift;
75 uint64_t *table = vn;
76
9ee6e8bb 77 val = 0;
9ee6e8bb 78 for (shift = 0; shift < 32; shift += 8) {
e7c06c4e 79 uint32_t index = (ireg >> shift) & 0xff;
8f8e3aa4 80 if (index < maxindex) {
e7c06c4e 81 uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
9ee6e8bb
PB
82 val |= tmp << shift;
83 } else {
8f8e3aa4 84 val |= def & (0xff << shift);
9ee6e8bb
PB
85 }
86 }
8f8e3aa4 87 return val;
9ee6e8bb
PB
88}
89
b5ff1b31
FB
90#if !defined(CONFIG_USER_ONLY)
91
aaa1f954
EI
92static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
93 unsigned int target_el,
c528af7a 94 bool same_el, bool ea,
b35399bb 95 bool s1ptw, bool is_write,
aaa1f954
EI
96 int fsc)
97{
98 uint32_t syn;
99
100 /* ISV is only set for data aborts routed to EL2 and
101 * never for stage-1 page table walks faulting on stage 2.
102 *
103 * Furthermore, ISV is only set for certain kinds of load/stores.
104 * If the template syndrome does not have ISV set, we should leave
105 * it cleared.
106 *
107 * See ARMv8 specs, D7-1974:
108 * ISS encoding for an exception from a Data Abort, the
109 * ISV field.
110 */
111 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
112 syn = syn_data_abort_no_iss(same_el,
c528af7a 113 ea, 0, s1ptw, is_write, fsc);
aaa1f954
EI
114 } else {
115 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
116 * syndrome created at translation time.
117 * Now we create the runtime syndrome with the remaining fields.
118 */
119 syn = syn_data_abort_with_iss(same_el,
120 0, 0, 0, 0, 0,
c528af7a 121 ea, 0, s1ptw, is_write, fsc,
aaa1f954
EI
122 false);
123 /* Merge the runtime syndrome with the template syndrome. */
124 syn |= template_syn;
125 }
126 return syn;
127}
128
7350d553
RH
129void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
130 int mmu_idx, ARMMMUFaultInfo *fi)
aac43da1
PM
131{
132 CPUARMState *env = &cpu->env;
133 int target_el;
134 bool same_el;
681f9a89
PM
135 uint32_t syn, exc, fsr, fsc;
136 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
aac43da1
PM
137
138 target_el = exception_target_el(env);
139 if (fi->stage2) {
140 target_el = 2;
141 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
142 }
143 same_el = (arm_current_el(env) == target_el);
144
681f9a89
PM
145 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
146 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
147 /* LPAE format fault status register : bottom 6 bits are
148 * status code in the same form as needed for syndrome
149 */
150 fsr = arm_fi_to_lfsc(fi);
151 fsc = extract32(fsr, 0, 6);
152 } else {
153 fsr = arm_fi_to_sfsc(fi);
154 /* Short format FSR : this fault will never actually be reported
155 * to an EL that uses a syndrome register. Use a (currently)
156 * reserved FSR code in case the constructed syndrome does leak
157 * into the guest somehow.
aac43da1 158 */
681f9a89 159 fsc = 0x3f;
aac43da1
PM
160 }
161
162 if (access_type == MMU_INST_FETCH) {
c528af7a 163 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
aac43da1
PM
164 exc = EXCP_PREFETCH_ABORT;
165 } else {
166 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
c528af7a 167 same_el, fi->ea, fi->s1ptw,
aac43da1
PM
168 access_type == MMU_DATA_STORE,
169 fsc);
170 if (access_type == MMU_DATA_STORE
171 && arm_feature(env, ARM_FEATURE_V6)) {
172 fsr |= (1 << 11);
173 }
174 exc = EXCP_DATA_ABORT;
175 }
176
177 env->exception.vaddress = addr;
178 env->exception.fsr = fsr;
179 raise_exception(env, exc, syn, target_el);
180}
181
30901475 182/* Raise a data fault alignment exception for the specified virtual address */
b35399bb
SS
183void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
184 MMUAccessType access_type,
185 int mmu_idx, uintptr_t retaddr)
30901475
AB
186{
187 ARMCPU *cpu = ARM_CPU(cs);
aac43da1 188 ARMMMUFaultInfo fi = {};
30901475 189
65255e8e 190 /* now we have a real cpu fault */
afd46fca 191 cpu_restore_state(cs, retaddr, true);
30901475 192
681f9a89 193 fi.type = ARMFault_Alignment;
7350d553 194 arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
30901475
AB
195}
196
c79c0a31
PM
197/* arm_cpu_do_transaction_failed: handle a memory system error response
198 * (eg "no device/memory present at address") by raising an external abort
199 * exception
200 */
201void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
202 vaddr addr, unsigned size,
203 MMUAccessType access_type,
204 int mmu_idx, MemTxAttrs attrs,
205 MemTxResult response, uintptr_t retaddr)
206{
207 ARMCPU *cpu = ARM_CPU(cs);
c79c0a31 208 ARMMMUFaultInfo fi = {};
c79c0a31 209
65255e8e 210 /* now we have a real cpu fault */
afd46fca 211 cpu_restore_state(cs, retaddr, true);
c79c0a31 212
3b39d734 213 fi.ea = arm_extabort_type(response);
681f9a89 214 fi.type = ARMFault_SyncExternal;
7350d553 215 arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
c79c0a31
PM
216}
217
30901475 218#endif /* !defined(CONFIG_USER_ONLY) */
1497c961 219
55203189
PM
220void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
221{
222 /*
223 * Perform the v8M stack limit check for SP updates from translated code,
224 * raising an exception if the limit is breached.
225 */
226 if (newvalue < v7m_sp_limit(env)) {
2fc0cc0e 227 CPUState *cs = env_cpu(env);
55203189
PM
228
229 /*
230 * Stack limit exceptions are a rare case, so rather than syncing
231 * PC/condbits before the call, we use cpu_restore_state() to
232 * get them right before raising the exception.
233 */
234 cpu_restore_state(cs, GETPC(), true);
235 raise_exception(env, EXCP_STKOF, 0, 1);
236 }
237}
238
9ef39277 239uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
240{
241 uint32_t res = a + b;
242 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
243 env->QF = 1;
244 return res;
245}
246
9ef39277 247uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
248{
249 uint32_t res = a + b;
250 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
251 env->QF = 1;
252 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
253 }
254 return res;
255}
256
9ef39277 257uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
258{
259 uint32_t res = a - b;
260 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
261 env->QF = 1;
262 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
263 }
264 return res;
265}
266
9ef39277 267uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
1497c961
PB
268{
269 uint32_t res;
270 if (val >= 0x40000000) {
271 res = ~SIGNBIT;
272 env->QF = 1;
273 } else if (val <= (int32_t)0xc0000000) {
274 res = SIGNBIT;
275 env->QF = 1;
276 } else {
277 res = val << 1;
278 }
279 return res;
280}
281
9ef39277 282uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
283{
284 uint32_t res = a + b;
285 if (res < a) {
286 env->QF = 1;
287 res = ~0;
288 }
289 return res;
290}
291
9ef39277 292uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
293{
294 uint32_t res = a - b;
295 if (res > a) {
296 env->QF = 1;
297 res = 0;
298 }
299 return res;
300}
301
6ddbc6e4 302/* Signed saturation. */
9ef39277 303static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
6ddbc6e4
PB
304{
305 int32_t top;
306 uint32_t mask;
307
6ddbc6e4
PB
308 top = val >> shift;
309 mask = (1u << shift) - 1;
310 if (top > 0) {
311 env->QF = 1;
312 return mask;
313 } else if (top < -1) {
314 env->QF = 1;
315 return ~mask;
316 }
317 return val;
318}
319
320/* Unsigned saturation. */
9ef39277 321static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
6ddbc6e4
PB
322{
323 uint32_t max;
324
6ddbc6e4
PB
325 max = (1u << shift) - 1;
326 if (val < 0) {
327 env->QF = 1;
328 return 0;
329 } else if (val > max) {
330 env->QF = 1;
331 return max;
332 }
333 return val;
334}
335
336/* Signed saturate. */
9ef39277 337uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4 338{
9ef39277 339 return do_ssat(env, x, shift);
6ddbc6e4
PB
340}
341
342/* Dual halfword signed saturate. */
9ef39277 343uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4
PB
344{
345 uint32_t res;
346
9ef39277
BS
347 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
348 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
6ddbc6e4
PB
349 return res;
350}
351
352/* Unsigned saturate. */
9ef39277 353uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4 354{
9ef39277 355 return do_usat(env, x, shift);
6ddbc6e4
PB
356}
357
358/* Dual halfword unsigned saturate. */
9ef39277 359uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4
PB
360{
361 uint32_t res;
362
9ef39277
BS
363 res = (uint16_t)do_usat(env, (int16_t)x, shift);
364 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
6ddbc6e4
PB
365 return res;
366}
d9ba4830 367
9886ecdf
PB
368void HELPER(setend)(CPUARMState *env)
369{
370 env->uncached_cpsr ^= CPSR_E;
371}
372
b1eced71
GB
373/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
374 * The function returns the target EL (1-3) if the instruction is to be trapped;
375 * otherwise it returns 0 indicating it is not trapped.
376 */
377static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
378{
379 int cur_el = arm_current_el(env);
380 uint64_t mask;
381
0e284568
PM
382 if (arm_feature(env, ARM_FEATURE_M)) {
383 /* M profile cores can never trap WFI/WFE. */
384 return 0;
385 }
386
b1eced71
GB
387 /* If we are currently in EL0 then we need to check if SCTLR is set up for
388 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
389 */
390 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
391 int target_el;
392
393 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
394 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
395 /* Secure EL0 and Secure PL1 is at EL3 */
396 target_el = 3;
397 } else {
398 target_el = 1;
399 }
400
401 if (!(env->cp15.sctlr_el[target_el] & mask)) {
402 return target_el;
403 }
404 }
405
406 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
407 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
408 * bits will be zero indicating no trap.
409 */
7c208e0f
RH
410 if (cur_el < 2) {
411 mask = is_wfe ? HCR_TWE : HCR_TWI;
412 if (arm_hcr_el2_eff(env) & mask) {
b1eced71
GB
413 return 2;
414 }
415 }
416
417 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
418 if (cur_el < 3) {
419 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
420 if (env->cp15.scr_el3 & mask) {
421 return 3;
422 }
423 }
424
425 return 0;
426}
427
58803318 428void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
d9ba4830 429{
2fc0cc0e 430 CPUState *cs = env_cpu(env);
b1eced71 431 int target_el = check_wfx_trap(env, false);
259186a7 432
84549b6d
PM
433 if (cpu_has_work(cs)) {
434 /* Don't bother to go into our "low power state" if
435 * we would just wake up immediately.
436 */
437 return;
438 }
439
b1eced71 440 if (target_el) {
58803318
SS
441 env->pc -= insn_len;
442 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
443 target_el);
b1eced71
GB
444 }
445
27103424 446 cs->exception_index = EXCP_HLT;
259186a7 447 cs->halted = 1;
5638d180 448 cpu_loop_exit(cs);
d9ba4830
PB
449}
450
72c1d3af
PM
451void HELPER(wfe)(CPUARMState *env)
452{
049e24a1
PM
453 /* This is a hint instruction that is semantically different
454 * from YIELD even though we currently implement it identically.
455 * Don't actually halt the CPU, just yield back to top
b1eced71
GB
456 * level loop. This is not going into a "low power state"
457 * (ie halting until some event occurs), so we never take
458 * a configurable trap to a different exception level.
72c1d3af 459 */
049e24a1
PM
460 HELPER(yield)(env);
461}
462
463void HELPER(yield)(CPUARMState *env)
464{
2fc0cc0e 465 CPUState *cs = env_cpu(env);
049e24a1
PM
466
467 /* This is a non-trappable hint instruction that generally indicates
468 * that the guest is currently busy-looping. Yield control back to the
469 * top level loop so that a more deserving VCPU has a chance to run.
470 */
27103424 471 cs->exception_index = EXCP_YIELD;
5638d180 472 cpu_loop_exit(cs);
72c1d3af
PM
473}
474
d4a2dc67
PM
475/* Raise an internal-to-QEMU exception. This is limited to only
476 * those EXCP values which are special cases for QEMU to interrupt
477 * execution and not to be used for exceptions which are passed to
478 * the guest (those must all have syndrome information and thus should
479 * use exception_with_syndrome).
480 */
481void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
482{
2fc0cc0e 483 CPUState *cs = env_cpu(env);
d4a2dc67
PM
484
485 assert(excp_is_internal(excp));
486 cs->exception_index = excp;
487 cpu_loop_exit(cs);
488}
489
490/* Raise an exception with the specified syndrome register value */
491void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
73710361 492 uint32_t syndrome, uint32_t target_el)
d9ba4830 493{
c6328599 494 raise_exception(env, excp, syndrome, target_el);
d9ba4830
PB
495}
496
c900a2e6
PM
497/* Raise an EXCP_BKPT with the specified syndrome register value,
498 * targeting the correct exception level for debug exceptions.
499 */
500void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
501{
62b94f31
PM
502 /* FSR will only be used if the debug target EL is AArch32. */
503 env->exception.fsr = arm_debug_exception_fsr(env);
548f514c
PM
504 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
505 * values to the guest that it shouldn't be able to see at its
506 * exception/security level.
507 */
508 env->exception.vaddress = 0;
c900a2e6
PM
509 raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
510}
511
9ef39277 512uint32_t HELPER(cpsr_read)(CPUARMState *env)
d9ba4830 513{
4051e12c 514 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
d9ba4830
PB
515}
516
1ce94f81 517void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
d9ba4830 518{
50866ba5 519 cpsr_write(env, val, mask, CPSRWriteByInstr);
d9ba4830 520}
b0109805 521
235ea1f5
PM
522/* Write the CPSR for a 32-bit exception return */
523void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
524{
b5c53d1b 525 qemu_mutex_lock_iothread();
2fc0cc0e 526 arm_call_pre_el_change_hook(env_archcpu(env));
b5c53d1b
AL
527 qemu_mutex_unlock_iothread();
528
50866ba5 529 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
bd7d00fc 530
fb0e8e79
PM
531 /* Generated code has already stored the new PC value, but
532 * without masking out its low bits, because which bits need
533 * masking depends on whether we're returning to Thumb or ARM
534 * state. Do the masking now.
535 */
536 env->regs[15] &= (env->thumb ? ~1 : ~3);
537
8d04fb55 538 qemu_mutex_lock_iothread();
2fc0cc0e 539 arm_call_el_change_hook(env_archcpu(env));
8d04fb55 540 qemu_mutex_unlock_iothread();
235ea1f5
PM
541}
542
b0109805 543/* Access to user mode registers from privileged modes. */
9ef39277 544uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
b0109805
PB
545{
546 uint32_t val;
547
548 if (regno == 13) {
99a99c1f 549 val = env->banked_r13[BANK_USRSYS];
b0109805 550 } else if (regno == 14) {
99a99c1f 551 val = env->banked_r14[BANK_USRSYS];
b0109805
PB
552 } else if (regno >= 8
553 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
554 val = env->usr_regs[regno - 8];
555 } else {
556 val = env->regs[regno];
557 }
558 return val;
559}
560
1ce94f81 561void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
b0109805
PB
562{
563 if (regno == 13) {
99a99c1f 564 env->banked_r13[BANK_USRSYS] = val;
b0109805 565 } else if (regno == 14) {
99a99c1f 566 env->banked_r14[BANK_USRSYS] = val;
b0109805
PB
567 } else if (regno >= 8
568 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
569 env->usr_regs[regno - 8] = val;
570 } else {
571 env->regs[regno] = val;
572 }
573}
4b6a83fb 574
72309cee
PM
575void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
576{
577 if ((env->uncached_cpsr & CPSR_M) == mode) {
578 env->regs[13] = val;
579 } else {
580 env->banked_r13[bank_number(mode)] = val;
581 }
582}
583
584uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
585{
f01377f5
PM
586 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
587 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
588 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
589 */
590 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
591 exception_target_el(env));
592 }
593
72309cee
PM
594 if ((env->uncached_cpsr & CPSR_M) == mode) {
595 return env->regs[13];
596 } else {
597 return env->banked_r13[bank_number(mode)];
598 }
599}
72309cee 600
8bfd0550
PM
601static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
602 uint32_t regno)
603{
604 /* Raise an exception if the requested access is one of the UNPREDICTABLE
605 * cases; otherwise return. This broadly corresponds to the pseudocode
606 * BankedRegisterAccessValid() and SPSRAccessValid(),
607 * except that we have already handled some cases at translate time.
608 */
609 int curmode = env->uncached_cpsr & CPSR_M;
610
aec4dd09
PM
611 if (regno == 17) {
612 /* ELR_Hyp: a special case because access from tgtmode is OK */
613 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
614 goto undef;
615 }
616 return;
617 }
618
8bfd0550
PM
619 if (curmode == tgtmode) {
620 goto undef;
621 }
622
623 if (tgtmode == ARM_CPU_MODE_USR) {
624 switch (regno) {
625 case 8 ... 12:
626 if (curmode != ARM_CPU_MODE_FIQ) {
627 goto undef;
628 }
629 break;
630 case 13:
631 if (curmode == ARM_CPU_MODE_SYS) {
632 goto undef;
633 }
634 break;
635 case 14:
636 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
637 goto undef;
638 }
639 break;
640 default:
641 break;
642 }
643 }
644
645 if (tgtmode == ARM_CPU_MODE_HYP) {
aec4dd09
PM
646 /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
647 if (curmode != ARM_CPU_MODE_MON) {
648 goto undef;
8bfd0550
PM
649 }
650 }
651
652 return;
653
654undef:
655 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
656 exception_target_el(env));
657}
658
659void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
660 uint32_t regno)
661{
662 msr_mrs_banked_exc_checks(env, tgtmode, regno);
663
664 switch (regno) {
665 case 16: /* SPSRs */
666 env->banked_spsr[bank_number(tgtmode)] = value;
667 break;
668 case 17: /* ELR_Hyp */
669 env->elr_el[2] = value;
670 break;
671 case 13:
672 env->banked_r13[bank_number(tgtmode)] = value;
673 break;
674 case 14:
593cfa2b 675 env->banked_r14[r14_bank_number(tgtmode)] = value;
8bfd0550
PM
676 break;
677 case 8 ... 12:
678 switch (tgtmode) {
679 case ARM_CPU_MODE_USR:
680 env->usr_regs[regno - 8] = value;
681 break;
682 case ARM_CPU_MODE_FIQ:
683 env->fiq_regs[regno - 8] = value;
684 break;
685 default:
686 g_assert_not_reached();
687 }
688 break;
689 default:
690 g_assert_not_reached();
691 }
692}
693
694uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
695{
696 msr_mrs_banked_exc_checks(env, tgtmode, regno);
697
698 switch (regno) {
699 case 16: /* SPSRs */
700 return env->banked_spsr[bank_number(tgtmode)];
701 case 17: /* ELR_Hyp */
702 return env->elr_el[2];
703 case 13:
704 return env->banked_r13[bank_number(tgtmode)];
705 case 14:
593cfa2b 706 return env->banked_r14[r14_bank_number(tgtmode)];
8bfd0550
PM
707 case 8 ... 12:
708 switch (tgtmode) {
709 case ARM_CPU_MODE_USR:
710 return env->usr_regs[regno - 8];
711 case ARM_CPU_MODE_FIQ:
712 return env->fiq_regs[regno - 8];
713 default:
714 g_assert_not_reached();
715 }
716 default:
717 g_assert_not_reached();
718 }
719}
720
3f208fd7
PM
721void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
722 uint32_t isread)
f59df3f2
PM
723{
724 const ARMCPRegInfo *ri = rip;
38836a2c 725 int target_el;
c0f4af17
PM
726
727 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
728 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
c6328599 729 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
c0f4af17
PM
730 }
731
732 if (!ri->accessfn) {
733 return;
734 }
735
3f208fd7 736 switch (ri->accessfn(env, ri, isread)) {
f59df3f2
PM
737 case CP_ACCESS_OK:
738 return;
739 case CP_ACCESS_TRAP:
38836a2c
PM
740 target_el = exception_target_el(env);
741 break;
742 case CP_ACCESS_TRAP_EL2:
743 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
744 * a bug in the access function.
745 */
3fc827d5 746 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
38836a2c
PM
747 target_el = 2;
748 break;
749 case CP_ACCESS_TRAP_EL3:
750 target_el = 3;
8bcbf37c 751 break;
f59df3f2 752 case CP_ACCESS_TRAP_UNCATEGORIZED:
38836a2c 753 target_el = exception_target_el(env);
c6328599 754 syndrome = syn_uncategorized();
f59df3f2 755 break;
e7615726
PM
756 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
757 target_el = 2;
758 syndrome = syn_uncategorized();
759 break;
760 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
761 target_el = 3;
762 syndrome = syn_uncategorized();
763 break;
f2cae609
PM
764 case CP_ACCESS_TRAP_FP_EL2:
765 target_el = 2;
766 /* Since we are an implementation that takes exceptions on a trapped
767 * conditional insn only if the insn has passed its condition code
768 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
769 * (which is also the required value for AArch64 traps).
770 */
771 syndrome = syn_fp_access_trap(1, 0xe, false);
772 break;
773 case CP_ACCESS_TRAP_FP_EL3:
774 target_el = 3;
775 syndrome = syn_fp_access_trap(1, 0xe, false);
776 break;
f59df3f2
PM
777 default:
778 g_assert_not_reached();
779 }
c6328599 780
38836a2c 781 raise_exception(env, EXCP_UDEF, syndrome, target_el);
f59df3f2
PM
782}
783
4b6a83fb
PM
784void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
785{
786 const ARMCPRegInfo *ri = rip;
c4241c7d 787
8d04fb55
JK
788 if (ri->type & ARM_CP_IO) {
789 qemu_mutex_lock_iothread();
790 ri->writefn(env, ri, value);
791 qemu_mutex_unlock_iothread();
792 } else {
793 ri->writefn(env, ri, value);
794 }
4b6a83fb
PM
795}
796
797uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
798{
799 const ARMCPRegInfo *ri = rip;
8d04fb55 800 uint32_t res;
c4241c7d 801
8d04fb55
JK
802 if (ri->type & ARM_CP_IO) {
803 qemu_mutex_lock_iothread();
804 res = ri->readfn(env, ri);
805 qemu_mutex_unlock_iothread();
806 } else {
807 res = ri->readfn(env, ri);
808 }
809
810 return res;
4b6a83fb
PM
811}
812
813void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
814{
815 const ARMCPRegInfo *ri = rip;
c4241c7d 816
8d04fb55
JK
817 if (ri->type & ARM_CP_IO) {
818 qemu_mutex_lock_iothread();
819 ri->writefn(env, ri, value);
820 qemu_mutex_unlock_iothread();
821 } else {
822 ri->writefn(env, ri, value);
823 }
4b6a83fb
PM
824}
825
826uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
827{
828 const ARMCPRegInfo *ri = rip;
8d04fb55
JK
829 uint64_t res;
830
831 if (ri->type & ARM_CP_IO) {
832 qemu_mutex_lock_iothread();
833 res = ri->readfn(env, ri);
834 qemu_mutex_unlock_iothread();
835 } else {
836 res = ri->readfn(env, ri);
837 }
c4241c7d 838
8d04fb55 839 return res;
4b6a83fb 840}
b0109805 841
35979d71
EI
842void HELPER(pre_hvc)(CPUARMState *env)
843{
2fc0cc0e 844 ARMCPU *cpu = env_archcpu(env);
dcbff19b 845 int cur_el = arm_current_el(env);
35979d71
EI
846 /* FIXME: Use actual secure state. */
847 bool secure = false;
848 bool undef;
849
98128601
RH
850 if (arm_is_psci_call(cpu, EXCP_HVC)) {
851 /* If PSCI is enabled and this looks like a valid PSCI call then
852 * that overrides the architecturally mandated HVC behaviour.
853 */
854 return;
855 }
856
39404338
PM
857 if (!arm_feature(env, ARM_FEATURE_EL2)) {
858 /* If EL2 doesn't exist, HVC always UNDEFs */
859 undef = true;
860 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
861 /* EL3.HCE has priority over EL2.HCD. */
35979d71
EI
862 undef = !(env->cp15.scr_el3 & SCR_HCE);
863 } else {
864 undef = env->cp15.hcr_el2 & HCR_HCD;
865 }
866
867 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
868 * For ARMv8/AArch64, HVC is allowed in EL3.
869 * Note that we've already trapped HVC from EL0 at translation
870 * time.
871 */
872 if (secure && (!is_a64(env) || cur_el == 1)) {
873 undef = true;
874 }
875
876 if (undef) {
c6328599
PM
877 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
878 exception_target_el(env));
35979d71
EI
879 }
880}
881
e0d6e6a5
EI
882void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
883{
2fc0cc0e 884 ARMCPU *cpu = env_archcpu(env);
dcbff19b 885 int cur_el = arm_current_el(env);
dbe9d163 886 bool secure = arm_is_secure(env);
7760da72
LM
887 bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
888
889 /*
890 * SMC behaviour is summarized in the following table.
891 * This helper handles the "Trap to EL2" and "Undef insn" cases.
892 * The "Trap to EL3" and "PSCI call" cases are handled in the exception
893 * helper.
894 *
895 * -> ARM_FEATURE_EL3 and !SMD
896 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
897 *
898 * Conduit SMC, valid call Trap to EL2 PSCI Call
899 * Conduit SMC, inval call Trap to EL2 Trap to EL3
900 * Conduit not SMC Trap to EL2 Trap to EL3
901 *
902 *
903 * -> ARM_FEATURE_EL3 and SMD
904 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
905 *
906 * Conduit SMC, valid call Trap to EL2 PSCI Call
907 * Conduit SMC, inval call Trap to EL2 Undef insn
908 * Conduit not SMC Trap to EL2 Undef insn
909 *
910 *
911 * -> !ARM_FEATURE_EL3
912 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
913 *
914 * Conduit SMC, valid call Trap to EL2 PSCI Call
915 * Conduit SMC, inval call Trap to EL2 Undef insn
916 * Conduit not SMC Undef insn Undef insn
917 */
918
f096e92b
PM
919 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
920 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
921 * extensions, SMD only applies to NS state.
922 * On ARMv7 without the Virtualization extensions, the SMD bit
923 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
924 * so we need not special case this here.
e0d6e6a5 925 */
7760da72
LM
926 bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
927 : smd_flag && !secure;
e0d6e6a5 928
77077a83
JK
929 if (!arm_feature(env, ARM_FEATURE_EL3) &&
930 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
931 /* If we have no EL3 then SMC always UNDEFs and can't be
932 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
933 * firmware within QEMU, and we want an EL2 guest to be able
934 * to forbid its EL1 from making PSCI calls into QEMU's
935 * "firmware" via HCR.TSC, so for these purposes treat
936 * PSCI-via-SMC as implying an EL3.
7760da72 937 * This handles the very last line of the previous table.
98128601 938 */
7760da72
LM
939 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
940 exception_target_el(env));
941 }
942
7c208e0f 943 if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
77077a83
JK
944 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
945 * We also want an EL2 guest to be able to forbid its EL1 from
946 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
7760da72 947 * This handles all the "Trap to EL2" cases of the previous table.
77077a83 948 */
c6328599 949 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
e0d6e6a5
EI
950 }
951
7760da72
LM
952 /* Catch the two remaining "Undef insn" cases of the previous table:
953 * - PSCI conduit is SMC but we don't have a valid PCSI call,
954 * - We don't have EL3 or SMD is set.
77077a83 955 */
7760da72
LM
956 if (!arm_is_psci_call(cpu, EXCP_SMC) &&
957 (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
c6328599
PM
958 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
959 exception_target_el(env));
e0d6e6a5
EI
960 }
961}
962
3ff6fc91
PM
963/* Return true if the linked breakpoint entry lbn passes its checks */
964static bool linked_bp_matches(ARMCPU *cpu, int lbn)
965{
966 CPUARMState *env = &cpu->env;
967 uint64_t bcr = env->cp15.dbgbcr[lbn];
968 int brps = extract32(cpu->dbgdidr, 24, 4);
969 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
970 int bt;
971 uint32_t contextidr;
972
973 /* Links to unimplemented or non-context aware breakpoints are
974 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
975 * as if linked to an UNKNOWN context-aware breakpoint (in which
976 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
977 * We choose the former.
978 */
979 if (lbn > brps || lbn < (brps - ctx_cmps)) {
980 return false;
981 }
982
983 bcr = env->cp15.dbgbcr[lbn];
984
985 if (extract64(bcr, 0, 1) == 0) {
986 /* Linked breakpoint disabled : generate no events */
987 return false;
988 }
989
990 bt = extract64(bcr, 20, 4);
991
992 /* We match the whole register even if this is AArch32 using the
993 * short descriptor format (in which case it holds both PROCID and ASID),
994 * since we don't implement the optional v7 context ID masking.
995 */
54bf36ed 996 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
3ff6fc91
PM
997
998 switch (bt) {
999 case 3: /* linked context ID match */
dcbff19b 1000 if (arm_current_el(env) > 1) {
3ff6fc91
PM
1001 /* Context matches never fire in EL2 or (AArch64) EL3 */
1002 return false;
1003 }
1004 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1005 case 5: /* linked address mismatch (reserved in AArch64) */
1006 case 9: /* linked VMID match (reserved if no EL2) */
1007 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1008 default:
1009 /* Links to Unlinked context breakpoints must generate no
1010 * events; we choose to do the same for reserved values too.
1011 */
1012 return false;
1013 }
1014
1015 return false;
1016}
1017
0eacea70 1018static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
3ff6fc91
PM
1019{
1020 CPUARMState *env = &cpu->env;
0eacea70 1021 uint64_t cr;
3ff6fc91 1022 int pac, hmc, ssc, wt, lbn;
ef7bab8d
PM
1023 /* Note that for watchpoints the check is against the CPU security
1024 * state, not the S/NS attribute on the offending data access.
1025 */
1026 bool is_secure = arm_is_secure(env);
9e1fc5bd 1027 int access_el = arm_current_el(env);
3ff6fc91 1028
0eacea70 1029 if (is_wp) {
9e1fc5bd
PM
1030 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1031
1032 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
0eacea70
PM
1033 return false;
1034 }
1035 cr = env->cp15.dbgwcr[n];
9e1fc5bd
PM
1036 if (wp->hitattrs.user) {
1037 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1038 * match watchpoints as if they were accesses done at EL0, even if
1039 * the CPU is at EL1 or higher.
1040 */
1041 access_el = 0;
1042 }
0eacea70
PM
1043 } else {
1044 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
3ff6fc91 1045
0eacea70
PM
1046 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1047 return false;
1048 }
1049 cr = env->cp15.dbgbcr[n];
1050 }
3ff6fc91 1051 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
0eacea70
PM
1052 * enabled and that the address and access type match; for breakpoints
1053 * we know the address matched; check the remaining fields, including
1054 * linked breakpoints. We rely on WCR and BCR having the same layout
1055 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1056 * Note that some combinations of {PAC, HMC, SSC} are reserved and
3ff6fc91
PM
1057 * must act either like some valid combination or as if the watchpoint
1058 * were disabled. We choose the former, and use this together with
1059 * the fact that EL3 must always be Secure and EL2 must always be
1060 * Non-Secure to simplify the code slightly compared to the full
1061 * table in the ARM ARM.
1062 */
0eacea70
PM
1063 pac = extract64(cr, 1, 2);
1064 hmc = extract64(cr, 13, 1);
1065 ssc = extract64(cr, 14, 2);
3ff6fc91
PM
1066
1067 switch (ssc) {
1068 case 0:
1069 break;
1070 case 1:
1071 case 3:
1072 if (is_secure) {
1073 return false;
1074 }
1075 break;
1076 case 2:
1077 if (!is_secure) {
1078 return false;
1079 }
1080 break;
1081 }
1082
9e1fc5bd 1083 switch (access_el) {
3ff6fc91
PM
1084 case 3:
1085 case 2:
1086 if (!hmc) {
1087 return false;
1088 }
1089 break;
1090 case 1:
1091 if (extract32(pac, 0, 1) == 0) {
1092 return false;
1093 }
1094 break;
1095 case 0:
1096 if (extract32(pac, 1, 1) == 0) {
1097 return false;
1098 }
1099 break;
1100 default:
1101 g_assert_not_reached();
1102 }
1103
0eacea70
PM
1104 wt = extract64(cr, 20, 1);
1105 lbn = extract64(cr, 16, 4);
3ff6fc91
PM
1106
1107 if (wt && !linked_bp_matches(cpu, lbn)) {
1108 return false;
1109 }
1110
1111 return true;
1112}
1113
1114static bool check_watchpoints(ARMCPU *cpu)
1115{
1116 CPUARMState *env = &cpu->env;
1117 int n;
1118
1119 /* If watchpoints are disabled globally or we can't take debug
1120 * exceptions here then watchpoint firings are ignored.
1121 */
1122 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1123 || !arm_generate_debug_exceptions(env)) {
1124 return false;
1125 }
1126
1127 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
0eacea70
PM
1128 if (bp_wp_matches(cpu, n, true)) {
1129 return true;
1130 }
1131 }
1132 return false;
1133}
1134
1135static bool check_breakpoints(ARMCPU *cpu)
1136{
1137 CPUARMState *env = &cpu->env;
1138 int n;
1139
1140 /* If breakpoints are disabled globally or we can't take debug
1141 * exceptions here then breakpoint firings are ignored.
1142 */
1143 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1144 || !arm_generate_debug_exceptions(env)) {
1145 return false;
1146 }
1147
1148 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1149 if (bp_wp_matches(cpu, n, false)) {
3ff6fc91
PM
1150 return true;
1151 }
1152 }
1153 return false;
1154}
1155
5d98bf8f
SF
1156void HELPER(check_breakpoints)(CPUARMState *env)
1157{
2fc0cc0e 1158 ARMCPU *cpu = env_archcpu(env);
5d98bf8f
SF
1159
1160 if (check_breakpoints(cpu)) {
1161 HELPER(exception_internal(env, EXCP_DEBUG));
1162 }
1163}
1164
3826121d
SF
1165bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1166{
1167 /* Called by core code when a CPU watchpoint fires; need to check if this
1168 * is also an architectural watchpoint match.
1169 */
1170 ARMCPU *cpu = ARM_CPU(cs);
1171
1172 return check_watchpoints(cpu);
1173}
1174
40612000
JB
1175vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1176{
1177 ARMCPU *cpu = ARM_CPU(cs);
1178 CPUARMState *env = &cpu->env;
1179
1180 /* In BE32 system mode, target memory is stored byteswapped (on a
1181 * little-endian host system), and by the time we reach here (via an
1182 * opcode helper) the addresses of subword accesses have been adjusted
1183 * to account for that, which means that watchpoints will not match.
1184 * Undo the adjustment here.
1185 */
1186 if (arm_sctlr_b(env)) {
1187 if (len == 1) {
1188 addr ^= 3;
1189 } else if (len == 2) {
1190 addr ^= 2;
1191 }
1192 }
1193
1194 return addr;
1195}
1196
3ff6fc91
PM
1197void arm_debug_excp_handler(CPUState *cs)
1198{
1199 /* Called by core code when a watchpoint or breakpoint fires;
1200 * need to check which one and raise the appropriate exception.
1201 */
1202 ARMCPU *cpu = ARM_CPU(cs);
1203 CPUARMState *env = &cpu->env;
1204 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1205
1206 if (wp_hit) {
1207 if (wp_hit->flags & BP_CPU) {
3826121d
SF
1208 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1209 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1210
3ff6fc91 1211 cs->watchpoint_hit = NULL;
3826121d 1212
81621d9a 1213 env->exception.fsr = arm_debug_exception_fsr(env);
3826121d
SF
1214 env->exception.vaddress = wp_hit->hitaddr;
1215 raise_exception(env, EXCP_DATA_ABORT,
1216 syn_watchpoint(same_el, 0, wnr),
1217 arm_debug_target_el(env));
3ff6fc91 1218 }
0eacea70 1219 } else {
e63a2d4d 1220 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
5d98bf8f 1221 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
e63a2d4d 1222
5c629f4f
SF
1223 /* (1) GDB breakpoints should be handled first.
1224 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1225 * since singlestep is also done by generating a debug internal
1226 * exception.
1227 */
1228 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1229 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
e63a2d4d
SF
1230 return;
1231 }
1232
81621d9a 1233 env->exception.fsr = arm_debug_exception_fsr(env);
548f514c
PM
1234 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
1235 * values to the guest that it shouldn't be able to see at its
1236 * exception/security level.
1237 */
1238 env->exception.vaddress = 0;
5d98bf8f
SF
1239 raise_exception(env, EXCP_PREFETCH_ABORT,
1240 syn_breakpoint(same_el),
1241 arm_debug_target_el(env));
3ff6fc91
PM
1242 }
1243}
1244
8984bd2e
PB
1245/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1246 The only way to do that in TCG is a conditional branch, which clobbers
1247 all our temporaries. For now implement these as helper functions. */
1248
8984bd2e
PB
1249/* Similarly for variable shift instructions. */
1250
9ef39277 1251uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1252{
1253 int shift = i & 0xff;
1254 if (shift >= 32) {
1255 if (shift == 32)
1256 env->CF = x & 1;
1257 else
1258 env->CF = 0;
1259 return 0;
1260 } else if (shift != 0) {
1261 env->CF = (x >> (32 - shift)) & 1;
1262 return x << shift;
1263 }
1264 return x;
1265}
1266
9ef39277 1267uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1268{
1269 int shift = i & 0xff;
1270 if (shift >= 32) {
1271 if (shift == 32)
1272 env->CF = (x >> 31) & 1;
1273 else
1274 env->CF = 0;
1275 return 0;
1276 } else if (shift != 0) {
1277 env->CF = (x >> (shift - 1)) & 1;
1278 return x >> shift;
1279 }
1280 return x;
1281}
1282
9ef39277 1283uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1284{
1285 int shift = i & 0xff;
1286 if (shift >= 32) {
1287 env->CF = (x >> 31) & 1;
1288 return (int32_t)x >> 31;
1289 } else if (shift != 0) {
1290 env->CF = (x >> (shift - 1)) & 1;
1291 return (int32_t)x >> shift;
1292 }
1293 return x;
1294}
1295
9ef39277 1296uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1297{
1298 int shift1, shift;
1299 shift1 = i & 0xff;
1300 shift = shift1 & 0x1f;
1301 if (shift == 0) {
1302 if (shift1 != 0)
1303 env->CF = (x >> 31) & 1;
1304 return x;
1305 } else {
1306 env->CF = (x >> (shift - 1)) & 1;
1307 return ((uint32_t)x >> shift) | (x << (32 - shift));
1308 }
1309}