]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/op_helper.c
target/arm: Add set/clear_pstate_bits, share gen_ss_advance
[mirror_qemu.git] / target / arm / op_helper.c
1 /*
2 * ARM helper routines
3 *
4 * Copyright (c) 2005-2007 CodeSourcery, LLC
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "internals.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27
28 #define SIGNBIT (uint32_t)0x80000000
29 #define SIGNBIT64 ((uint64_t)1 << 63)
30
31 static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
32 uint32_t syndrome, uint32_t target_el)
33 {
34 CPUState *cs = CPU(arm_env_get_cpu(env));
35
36 if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
37 /*
38 * Redirect NS EL1 exceptions to NS EL2. These are reported with
39 * their original syndrome register value, with the exception of
40 * SIMD/FP access traps, which are reported as uncategorized
41 * (see DDI0478C.a D1.10.4)
42 */
43 target_el = 2;
44 if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
45 syndrome = syn_uncategorized();
46 }
47 }
48
49 assert(!excp_is_internal(excp));
50 cs->exception_index = excp;
51 env->exception.syndrome = syndrome;
52 env->exception.target_el = target_el;
53
54 return cs;
55 }
56
57 void raise_exception(CPUARMState *env, uint32_t excp,
58 uint32_t syndrome, uint32_t target_el)
59 {
60 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
61 cpu_loop_exit(cs);
62 }
63
64 void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
65 uint32_t target_el, uintptr_t ra)
66 {
67 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
68 cpu_loop_exit_restore(cs, ra);
69 }
70
71 uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
72 uint32_t maxindex)
73 {
74 uint32_t val, shift;
75 uint64_t *table = vn;
76
77 val = 0;
78 for (shift = 0; shift < 32; shift += 8) {
79 uint32_t index = (ireg >> shift) & 0xff;
80 if (index < maxindex) {
81 uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
82 val |= tmp << shift;
83 } else {
84 val |= def & (0xff << shift);
85 }
86 }
87 return val;
88 }
89
90 #if !defined(CONFIG_USER_ONLY)
91
92 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
93 unsigned int target_el,
94 bool same_el, bool ea,
95 bool s1ptw, bool is_write,
96 int fsc)
97 {
98 uint32_t syn;
99
100 /* ISV is only set for data aborts routed to EL2 and
101 * never for stage-1 page table walks faulting on stage 2.
102 *
103 * Furthermore, ISV is only set for certain kinds of load/stores.
104 * If the template syndrome does not have ISV set, we should leave
105 * it cleared.
106 *
107 * See ARMv8 specs, D7-1974:
108 * ISS encoding for an exception from a Data Abort, the
109 * ISV field.
110 */
111 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
112 syn = syn_data_abort_no_iss(same_el,
113 ea, 0, s1ptw, is_write, fsc);
114 } else {
115 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
116 * syndrome created at translation time.
117 * Now we create the runtime syndrome with the remaining fields.
118 */
119 syn = syn_data_abort_with_iss(same_el,
120 0, 0, 0, 0, 0,
121 ea, 0, s1ptw, is_write, fsc,
122 false);
123 /* Merge the runtime syndrome with the template syndrome. */
124 syn |= template_syn;
125 }
126 return syn;
127 }
128
129 static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
130 int mmu_idx, ARMMMUFaultInfo *fi)
131 {
132 CPUARMState *env = &cpu->env;
133 int target_el;
134 bool same_el;
135 uint32_t syn, exc, fsr, fsc;
136 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
137
138 target_el = exception_target_el(env);
139 if (fi->stage2) {
140 target_el = 2;
141 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
142 }
143 same_el = (arm_current_el(env) == target_el);
144
145 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
146 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
147 /* LPAE format fault status register : bottom 6 bits are
148 * status code in the same form as needed for syndrome
149 */
150 fsr = arm_fi_to_lfsc(fi);
151 fsc = extract32(fsr, 0, 6);
152 } else {
153 fsr = arm_fi_to_sfsc(fi);
154 /* Short format FSR : this fault will never actually be reported
155 * to an EL that uses a syndrome register. Use a (currently)
156 * reserved FSR code in case the constructed syndrome does leak
157 * into the guest somehow.
158 */
159 fsc = 0x3f;
160 }
161
162 if (access_type == MMU_INST_FETCH) {
163 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
164 exc = EXCP_PREFETCH_ABORT;
165 } else {
166 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
167 same_el, fi->ea, fi->s1ptw,
168 access_type == MMU_DATA_STORE,
169 fsc);
170 if (access_type == MMU_DATA_STORE
171 && arm_feature(env, ARM_FEATURE_V6)) {
172 fsr |= (1 << 11);
173 }
174 exc = EXCP_DATA_ABORT;
175 }
176
177 env->exception.vaddress = addr;
178 env->exception.fsr = fsr;
179 raise_exception(env, exc, syn, target_el);
180 }
181
182 /* try to fill the TLB and return an exception if error. If retaddr is
183 * NULL, it means that the function was called in C code (i.e. not
184 * from generated code or from helper.c)
185 */
186 void tlb_fill(CPUState *cs, target_ulong addr, int size,
187 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
188 {
189 bool ret;
190 ARMMMUFaultInfo fi = {};
191
192 ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
193 if (unlikely(ret)) {
194 ARMCPU *cpu = ARM_CPU(cs);
195
196 /* now we have a real cpu fault */
197 cpu_restore_state(cs, retaddr, true);
198
199 deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
200 }
201 }
202
203 /* Raise a data fault alignment exception for the specified virtual address */
204 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
205 MMUAccessType access_type,
206 int mmu_idx, uintptr_t retaddr)
207 {
208 ARMCPU *cpu = ARM_CPU(cs);
209 ARMMMUFaultInfo fi = {};
210
211 /* now we have a real cpu fault */
212 cpu_restore_state(cs, retaddr, true);
213
214 fi.type = ARMFault_Alignment;
215 deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
216 }
217
218 /* arm_cpu_do_transaction_failed: handle a memory system error response
219 * (eg "no device/memory present at address") by raising an external abort
220 * exception
221 */
222 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
223 vaddr addr, unsigned size,
224 MMUAccessType access_type,
225 int mmu_idx, MemTxAttrs attrs,
226 MemTxResult response, uintptr_t retaddr)
227 {
228 ARMCPU *cpu = ARM_CPU(cs);
229 ARMMMUFaultInfo fi = {};
230
231 /* now we have a real cpu fault */
232 cpu_restore_state(cs, retaddr, true);
233
234 fi.ea = arm_extabort_type(response);
235 fi.type = ARMFault_SyncExternal;
236 deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
237 }
238
239 #endif /* !defined(CONFIG_USER_ONLY) */
240
241 void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
242 {
243 /*
244 * Perform the v8M stack limit check for SP updates from translated code,
245 * raising an exception if the limit is breached.
246 */
247 if (newvalue < v7m_sp_limit(env)) {
248 CPUState *cs = CPU(arm_env_get_cpu(env));
249
250 /*
251 * Stack limit exceptions are a rare case, so rather than syncing
252 * PC/condbits before the call, we use cpu_restore_state() to
253 * get them right before raising the exception.
254 */
255 cpu_restore_state(cs, GETPC(), true);
256 raise_exception(env, EXCP_STKOF, 0, 1);
257 }
258 }
259
260 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
261 {
262 uint32_t res = a + b;
263 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
264 env->QF = 1;
265 return res;
266 }
267
268 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
269 {
270 uint32_t res = a + b;
271 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
272 env->QF = 1;
273 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
274 }
275 return res;
276 }
277
278 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
279 {
280 uint32_t res = a - b;
281 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
282 env->QF = 1;
283 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
284 }
285 return res;
286 }
287
288 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
289 {
290 uint32_t res;
291 if (val >= 0x40000000) {
292 res = ~SIGNBIT;
293 env->QF = 1;
294 } else if (val <= (int32_t)0xc0000000) {
295 res = SIGNBIT;
296 env->QF = 1;
297 } else {
298 res = val << 1;
299 }
300 return res;
301 }
302
303 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
304 {
305 uint32_t res = a + b;
306 if (res < a) {
307 env->QF = 1;
308 res = ~0;
309 }
310 return res;
311 }
312
313 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
314 {
315 uint32_t res = a - b;
316 if (res > a) {
317 env->QF = 1;
318 res = 0;
319 }
320 return res;
321 }
322
323 /* Signed saturation. */
324 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
325 {
326 int32_t top;
327 uint32_t mask;
328
329 top = val >> shift;
330 mask = (1u << shift) - 1;
331 if (top > 0) {
332 env->QF = 1;
333 return mask;
334 } else if (top < -1) {
335 env->QF = 1;
336 return ~mask;
337 }
338 return val;
339 }
340
341 /* Unsigned saturation. */
342 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
343 {
344 uint32_t max;
345
346 max = (1u << shift) - 1;
347 if (val < 0) {
348 env->QF = 1;
349 return 0;
350 } else if (val > max) {
351 env->QF = 1;
352 return max;
353 }
354 return val;
355 }
356
357 /* Signed saturate. */
358 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
359 {
360 return do_ssat(env, x, shift);
361 }
362
363 /* Dual halfword signed saturate. */
364 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
365 {
366 uint32_t res;
367
368 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
369 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
370 return res;
371 }
372
373 /* Unsigned saturate. */
374 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
375 {
376 return do_usat(env, x, shift);
377 }
378
379 /* Dual halfword unsigned saturate. */
380 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
381 {
382 uint32_t res;
383
384 res = (uint16_t)do_usat(env, (int16_t)x, shift);
385 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
386 return res;
387 }
388
389 void HELPER(setend)(CPUARMState *env)
390 {
391 env->uncached_cpsr ^= CPSR_E;
392 }
393
394 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
395 * The function returns the target EL (1-3) if the instruction is to be trapped;
396 * otherwise it returns 0 indicating it is not trapped.
397 */
398 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
399 {
400 int cur_el = arm_current_el(env);
401 uint64_t mask;
402
403 if (arm_feature(env, ARM_FEATURE_M)) {
404 /* M profile cores can never trap WFI/WFE. */
405 return 0;
406 }
407
408 /* If we are currently in EL0 then we need to check if SCTLR is set up for
409 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
410 */
411 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
412 int target_el;
413
414 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
415 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
416 /* Secure EL0 and Secure PL1 is at EL3 */
417 target_el = 3;
418 } else {
419 target_el = 1;
420 }
421
422 if (!(env->cp15.sctlr_el[target_el] & mask)) {
423 return target_el;
424 }
425 }
426
427 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
428 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
429 * bits will be zero indicating no trap.
430 */
431 if (cur_el < 2) {
432 mask = is_wfe ? HCR_TWE : HCR_TWI;
433 if (arm_hcr_el2_eff(env) & mask) {
434 return 2;
435 }
436 }
437
438 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
439 if (cur_el < 3) {
440 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
441 if (env->cp15.scr_el3 & mask) {
442 return 3;
443 }
444 }
445
446 return 0;
447 }
448
449 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
450 {
451 CPUState *cs = CPU(arm_env_get_cpu(env));
452 int target_el = check_wfx_trap(env, false);
453
454 if (cpu_has_work(cs)) {
455 /* Don't bother to go into our "low power state" if
456 * we would just wake up immediately.
457 */
458 return;
459 }
460
461 if (target_el) {
462 env->pc -= insn_len;
463 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
464 target_el);
465 }
466
467 cs->exception_index = EXCP_HLT;
468 cs->halted = 1;
469 cpu_loop_exit(cs);
470 }
471
472 void HELPER(wfe)(CPUARMState *env)
473 {
474 /* This is a hint instruction that is semantically different
475 * from YIELD even though we currently implement it identically.
476 * Don't actually halt the CPU, just yield back to top
477 * level loop. This is not going into a "low power state"
478 * (ie halting until some event occurs), so we never take
479 * a configurable trap to a different exception level.
480 */
481 HELPER(yield)(env);
482 }
483
484 void HELPER(yield)(CPUARMState *env)
485 {
486 ARMCPU *cpu = arm_env_get_cpu(env);
487 CPUState *cs = CPU(cpu);
488
489 /* This is a non-trappable hint instruction that generally indicates
490 * that the guest is currently busy-looping. Yield control back to the
491 * top level loop so that a more deserving VCPU has a chance to run.
492 */
493 cs->exception_index = EXCP_YIELD;
494 cpu_loop_exit(cs);
495 }
496
497 /* Raise an internal-to-QEMU exception. This is limited to only
498 * those EXCP values which are special cases for QEMU to interrupt
499 * execution and not to be used for exceptions which are passed to
500 * the guest (those must all have syndrome information and thus should
501 * use exception_with_syndrome).
502 */
503 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
504 {
505 CPUState *cs = CPU(arm_env_get_cpu(env));
506
507 assert(excp_is_internal(excp));
508 cs->exception_index = excp;
509 cpu_loop_exit(cs);
510 }
511
512 /* Raise an exception with the specified syndrome register value */
513 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
514 uint32_t syndrome, uint32_t target_el)
515 {
516 raise_exception(env, excp, syndrome, target_el);
517 }
518
519 /* Raise an EXCP_BKPT with the specified syndrome register value,
520 * targeting the correct exception level for debug exceptions.
521 */
522 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
523 {
524 /* FSR will only be used if the debug target EL is AArch32. */
525 env->exception.fsr = arm_debug_exception_fsr(env);
526 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
527 * values to the guest that it shouldn't be able to see at its
528 * exception/security level.
529 */
530 env->exception.vaddress = 0;
531 raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
532 }
533
534 uint32_t HELPER(cpsr_read)(CPUARMState *env)
535 {
536 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
537 }
538
539 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
540 {
541 cpsr_write(env, val, mask, CPSRWriteByInstr);
542 }
543
544 /* Write the CPSR for a 32-bit exception return */
545 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
546 {
547 qemu_mutex_lock_iothread();
548 arm_call_pre_el_change_hook(arm_env_get_cpu(env));
549 qemu_mutex_unlock_iothread();
550
551 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
552
553 /* Generated code has already stored the new PC value, but
554 * without masking out its low bits, because which bits need
555 * masking depends on whether we're returning to Thumb or ARM
556 * state. Do the masking now.
557 */
558 env->regs[15] &= (env->thumb ? ~1 : ~3);
559
560 qemu_mutex_lock_iothread();
561 arm_call_el_change_hook(arm_env_get_cpu(env));
562 qemu_mutex_unlock_iothread();
563 }
564
565 /* Access to user mode registers from privileged modes. */
566 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
567 {
568 uint32_t val;
569
570 if (regno == 13) {
571 val = env->banked_r13[BANK_USRSYS];
572 } else if (regno == 14) {
573 val = env->banked_r14[BANK_USRSYS];
574 } else if (regno >= 8
575 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
576 val = env->usr_regs[regno - 8];
577 } else {
578 val = env->regs[regno];
579 }
580 return val;
581 }
582
583 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
584 {
585 if (regno == 13) {
586 env->banked_r13[BANK_USRSYS] = val;
587 } else if (regno == 14) {
588 env->banked_r14[BANK_USRSYS] = val;
589 } else if (regno >= 8
590 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
591 env->usr_regs[regno - 8] = val;
592 } else {
593 env->regs[regno] = val;
594 }
595 }
596
597 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
598 {
599 if ((env->uncached_cpsr & CPSR_M) == mode) {
600 env->regs[13] = val;
601 } else {
602 env->banked_r13[bank_number(mode)] = val;
603 }
604 }
605
606 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
607 {
608 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
609 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
610 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
611 */
612 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
613 exception_target_el(env));
614 }
615
616 if ((env->uncached_cpsr & CPSR_M) == mode) {
617 return env->regs[13];
618 } else {
619 return env->banked_r13[bank_number(mode)];
620 }
621 }
622
623 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
624 uint32_t regno)
625 {
626 /* Raise an exception if the requested access is one of the UNPREDICTABLE
627 * cases; otherwise return. This broadly corresponds to the pseudocode
628 * BankedRegisterAccessValid() and SPSRAccessValid(),
629 * except that we have already handled some cases at translate time.
630 */
631 int curmode = env->uncached_cpsr & CPSR_M;
632
633 if (regno == 17) {
634 /* ELR_Hyp: a special case because access from tgtmode is OK */
635 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
636 goto undef;
637 }
638 return;
639 }
640
641 if (curmode == tgtmode) {
642 goto undef;
643 }
644
645 if (tgtmode == ARM_CPU_MODE_USR) {
646 switch (regno) {
647 case 8 ... 12:
648 if (curmode != ARM_CPU_MODE_FIQ) {
649 goto undef;
650 }
651 break;
652 case 13:
653 if (curmode == ARM_CPU_MODE_SYS) {
654 goto undef;
655 }
656 break;
657 case 14:
658 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
659 goto undef;
660 }
661 break;
662 default:
663 break;
664 }
665 }
666
667 if (tgtmode == ARM_CPU_MODE_HYP) {
668 /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
669 if (curmode != ARM_CPU_MODE_MON) {
670 goto undef;
671 }
672 }
673
674 return;
675
676 undef:
677 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
678 exception_target_el(env));
679 }
680
681 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
682 uint32_t regno)
683 {
684 msr_mrs_banked_exc_checks(env, tgtmode, regno);
685
686 switch (regno) {
687 case 16: /* SPSRs */
688 env->banked_spsr[bank_number(tgtmode)] = value;
689 break;
690 case 17: /* ELR_Hyp */
691 env->elr_el[2] = value;
692 break;
693 case 13:
694 env->banked_r13[bank_number(tgtmode)] = value;
695 break;
696 case 14:
697 env->banked_r14[r14_bank_number(tgtmode)] = value;
698 break;
699 case 8 ... 12:
700 switch (tgtmode) {
701 case ARM_CPU_MODE_USR:
702 env->usr_regs[regno - 8] = value;
703 break;
704 case ARM_CPU_MODE_FIQ:
705 env->fiq_regs[regno - 8] = value;
706 break;
707 default:
708 g_assert_not_reached();
709 }
710 break;
711 default:
712 g_assert_not_reached();
713 }
714 }
715
716 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
717 {
718 msr_mrs_banked_exc_checks(env, tgtmode, regno);
719
720 switch (regno) {
721 case 16: /* SPSRs */
722 return env->banked_spsr[bank_number(tgtmode)];
723 case 17: /* ELR_Hyp */
724 return env->elr_el[2];
725 case 13:
726 return env->banked_r13[bank_number(tgtmode)];
727 case 14:
728 return env->banked_r14[r14_bank_number(tgtmode)];
729 case 8 ... 12:
730 switch (tgtmode) {
731 case ARM_CPU_MODE_USR:
732 return env->usr_regs[regno - 8];
733 case ARM_CPU_MODE_FIQ:
734 return env->fiq_regs[regno - 8];
735 default:
736 g_assert_not_reached();
737 }
738 default:
739 g_assert_not_reached();
740 }
741 }
742
743 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
744 uint32_t isread)
745 {
746 const ARMCPRegInfo *ri = rip;
747 int target_el;
748
749 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
750 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
751 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
752 }
753
754 if (!ri->accessfn) {
755 return;
756 }
757
758 switch (ri->accessfn(env, ri, isread)) {
759 case CP_ACCESS_OK:
760 return;
761 case CP_ACCESS_TRAP:
762 target_el = exception_target_el(env);
763 break;
764 case CP_ACCESS_TRAP_EL2:
765 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
766 * a bug in the access function.
767 */
768 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
769 target_el = 2;
770 break;
771 case CP_ACCESS_TRAP_EL3:
772 target_el = 3;
773 break;
774 case CP_ACCESS_TRAP_UNCATEGORIZED:
775 target_el = exception_target_el(env);
776 syndrome = syn_uncategorized();
777 break;
778 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
779 target_el = 2;
780 syndrome = syn_uncategorized();
781 break;
782 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
783 target_el = 3;
784 syndrome = syn_uncategorized();
785 break;
786 case CP_ACCESS_TRAP_FP_EL2:
787 target_el = 2;
788 /* Since we are an implementation that takes exceptions on a trapped
789 * conditional insn only if the insn has passed its condition code
790 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
791 * (which is also the required value for AArch64 traps).
792 */
793 syndrome = syn_fp_access_trap(1, 0xe, false);
794 break;
795 case CP_ACCESS_TRAP_FP_EL3:
796 target_el = 3;
797 syndrome = syn_fp_access_trap(1, 0xe, false);
798 break;
799 default:
800 g_assert_not_reached();
801 }
802
803 raise_exception(env, EXCP_UDEF, syndrome, target_el);
804 }
805
806 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
807 {
808 const ARMCPRegInfo *ri = rip;
809
810 if (ri->type & ARM_CP_IO) {
811 qemu_mutex_lock_iothread();
812 ri->writefn(env, ri, value);
813 qemu_mutex_unlock_iothread();
814 } else {
815 ri->writefn(env, ri, value);
816 }
817 }
818
819 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
820 {
821 const ARMCPRegInfo *ri = rip;
822 uint32_t res;
823
824 if (ri->type & ARM_CP_IO) {
825 qemu_mutex_lock_iothread();
826 res = ri->readfn(env, ri);
827 qemu_mutex_unlock_iothread();
828 } else {
829 res = ri->readfn(env, ri);
830 }
831
832 return res;
833 }
834
835 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
836 {
837 const ARMCPRegInfo *ri = rip;
838
839 if (ri->type & ARM_CP_IO) {
840 qemu_mutex_lock_iothread();
841 ri->writefn(env, ri, value);
842 qemu_mutex_unlock_iothread();
843 } else {
844 ri->writefn(env, ri, value);
845 }
846 }
847
848 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
849 {
850 const ARMCPRegInfo *ri = rip;
851 uint64_t res;
852
853 if (ri->type & ARM_CP_IO) {
854 qemu_mutex_lock_iothread();
855 res = ri->readfn(env, ri);
856 qemu_mutex_unlock_iothread();
857 } else {
858 res = ri->readfn(env, ri);
859 }
860
861 return res;
862 }
863
864 void HELPER(pre_hvc)(CPUARMState *env)
865 {
866 ARMCPU *cpu = arm_env_get_cpu(env);
867 int cur_el = arm_current_el(env);
868 /* FIXME: Use actual secure state. */
869 bool secure = false;
870 bool undef;
871
872 if (arm_is_psci_call(cpu, EXCP_HVC)) {
873 /* If PSCI is enabled and this looks like a valid PSCI call then
874 * that overrides the architecturally mandated HVC behaviour.
875 */
876 return;
877 }
878
879 if (!arm_feature(env, ARM_FEATURE_EL2)) {
880 /* If EL2 doesn't exist, HVC always UNDEFs */
881 undef = true;
882 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
883 /* EL3.HCE has priority over EL2.HCD. */
884 undef = !(env->cp15.scr_el3 & SCR_HCE);
885 } else {
886 undef = env->cp15.hcr_el2 & HCR_HCD;
887 }
888
889 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
890 * For ARMv8/AArch64, HVC is allowed in EL3.
891 * Note that we've already trapped HVC from EL0 at translation
892 * time.
893 */
894 if (secure && (!is_a64(env) || cur_el == 1)) {
895 undef = true;
896 }
897
898 if (undef) {
899 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
900 exception_target_el(env));
901 }
902 }
903
904 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
905 {
906 ARMCPU *cpu = arm_env_get_cpu(env);
907 int cur_el = arm_current_el(env);
908 bool secure = arm_is_secure(env);
909 bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
910
911 /*
912 * SMC behaviour is summarized in the following table.
913 * This helper handles the "Trap to EL2" and "Undef insn" cases.
914 * The "Trap to EL3" and "PSCI call" cases are handled in the exception
915 * helper.
916 *
917 * -> ARM_FEATURE_EL3 and !SMD
918 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
919 *
920 * Conduit SMC, valid call Trap to EL2 PSCI Call
921 * Conduit SMC, inval call Trap to EL2 Trap to EL3
922 * Conduit not SMC Trap to EL2 Trap to EL3
923 *
924 *
925 * -> ARM_FEATURE_EL3 and SMD
926 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
927 *
928 * Conduit SMC, valid call Trap to EL2 PSCI Call
929 * Conduit SMC, inval call Trap to EL2 Undef insn
930 * Conduit not SMC Trap to EL2 Undef insn
931 *
932 *
933 * -> !ARM_FEATURE_EL3
934 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
935 *
936 * Conduit SMC, valid call Trap to EL2 PSCI Call
937 * Conduit SMC, inval call Trap to EL2 Undef insn
938 * Conduit not SMC Undef insn Undef insn
939 */
940
941 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
942 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
943 * extensions, SMD only applies to NS state.
944 * On ARMv7 without the Virtualization extensions, the SMD bit
945 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
946 * so we need not special case this here.
947 */
948 bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
949 : smd_flag && !secure;
950
951 if (!arm_feature(env, ARM_FEATURE_EL3) &&
952 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
953 /* If we have no EL3 then SMC always UNDEFs and can't be
954 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
955 * firmware within QEMU, and we want an EL2 guest to be able
956 * to forbid its EL1 from making PSCI calls into QEMU's
957 * "firmware" via HCR.TSC, so for these purposes treat
958 * PSCI-via-SMC as implying an EL3.
959 * This handles the very last line of the previous table.
960 */
961 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
962 exception_target_el(env));
963 }
964
965 if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
966 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
967 * We also want an EL2 guest to be able to forbid its EL1 from
968 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
969 * This handles all the "Trap to EL2" cases of the previous table.
970 */
971 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
972 }
973
974 /* Catch the two remaining "Undef insn" cases of the previous table:
975 * - PSCI conduit is SMC but we don't have a valid PCSI call,
976 * - We don't have EL3 or SMD is set.
977 */
978 if (!arm_is_psci_call(cpu, EXCP_SMC) &&
979 (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
980 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
981 exception_target_el(env));
982 }
983 }
984
985 /* Return true if the linked breakpoint entry lbn passes its checks */
986 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
987 {
988 CPUARMState *env = &cpu->env;
989 uint64_t bcr = env->cp15.dbgbcr[lbn];
990 int brps = extract32(cpu->dbgdidr, 24, 4);
991 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
992 int bt;
993 uint32_t contextidr;
994
995 /* Links to unimplemented or non-context aware breakpoints are
996 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
997 * as if linked to an UNKNOWN context-aware breakpoint (in which
998 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
999 * We choose the former.
1000 */
1001 if (lbn > brps || lbn < (brps - ctx_cmps)) {
1002 return false;
1003 }
1004
1005 bcr = env->cp15.dbgbcr[lbn];
1006
1007 if (extract64(bcr, 0, 1) == 0) {
1008 /* Linked breakpoint disabled : generate no events */
1009 return false;
1010 }
1011
1012 bt = extract64(bcr, 20, 4);
1013
1014 /* We match the whole register even if this is AArch32 using the
1015 * short descriptor format (in which case it holds both PROCID and ASID),
1016 * since we don't implement the optional v7 context ID masking.
1017 */
1018 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1019
1020 switch (bt) {
1021 case 3: /* linked context ID match */
1022 if (arm_current_el(env) > 1) {
1023 /* Context matches never fire in EL2 or (AArch64) EL3 */
1024 return false;
1025 }
1026 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1027 case 5: /* linked address mismatch (reserved in AArch64) */
1028 case 9: /* linked VMID match (reserved if no EL2) */
1029 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1030 default:
1031 /* Links to Unlinked context breakpoints must generate no
1032 * events; we choose to do the same for reserved values too.
1033 */
1034 return false;
1035 }
1036
1037 return false;
1038 }
1039
1040 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1041 {
1042 CPUARMState *env = &cpu->env;
1043 uint64_t cr;
1044 int pac, hmc, ssc, wt, lbn;
1045 /* Note that for watchpoints the check is against the CPU security
1046 * state, not the S/NS attribute on the offending data access.
1047 */
1048 bool is_secure = arm_is_secure(env);
1049 int access_el = arm_current_el(env);
1050
1051 if (is_wp) {
1052 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1053
1054 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1055 return false;
1056 }
1057 cr = env->cp15.dbgwcr[n];
1058 if (wp->hitattrs.user) {
1059 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1060 * match watchpoints as if they were accesses done at EL0, even if
1061 * the CPU is at EL1 or higher.
1062 */
1063 access_el = 0;
1064 }
1065 } else {
1066 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1067
1068 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1069 return false;
1070 }
1071 cr = env->cp15.dbgbcr[n];
1072 }
1073 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1074 * enabled and that the address and access type match; for breakpoints
1075 * we know the address matched; check the remaining fields, including
1076 * linked breakpoints. We rely on WCR and BCR having the same layout
1077 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1078 * Note that some combinations of {PAC, HMC, SSC} are reserved and
1079 * must act either like some valid combination or as if the watchpoint
1080 * were disabled. We choose the former, and use this together with
1081 * the fact that EL3 must always be Secure and EL2 must always be
1082 * Non-Secure to simplify the code slightly compared to the full
1083 * table in the ARM ARM.
1084 */
1085 pac = extract64(cr, 1, 2);
1086 hmc = extract64(cr, 13, 1);
1087 ssc = extract64(cr, 14, 2);
1088
1089 switch (ssc) {
1090 case 0:
1091 break;
1092 case 1:
1093 case 3:
1094 if (is_secure) {
1095 return false;
1096 }
1097 break;
1098 case 2:
1099 if (!is_secure) {
1100 return false;
1101 }
1102 break;
1103 }
1104
1105 switch (access_el) {
1106 case 3:
1107 case 2:
1108 if (!hmc) {
1109 return false;
1110 }
1111 break;
1112 case 1:
1113 if (extract32(pac, 0, 1) == 0) {
1114 return false;
1115 }
1116 break;
1117 case 0:
1118 if (extract32(pac, 1, 1) == 0) {
1119 return false;
1120 }
1121 break;
1122 default:
1123 g_assert_not_reached();
1124 }
1125
1126 wt = extract64(cr, 20, 1);
1127 lbn = extract64(cr, 16, 4);
1128
1129 if (wt && !linked_bp_matches(cpu, lbn)) {
1130 return false;
1131 }
1132
1133 return true;
1134 }
1135
1136 static bool check_watchpoints(ARMCPU *cpu)
1137 {
1138 CPUARMState *env = &cpu->env;
1139 int n;
1140
1141 /* If watchpoints are disabled globally or we can't take debug
1142 * exceptions here then watchpoint firings are ignored.
1143 */
1144 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1145 || !arm_generate_debug_exceptions(env)) {
1146 return false;
1147 }
1148
1149 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1150 if (bp_wp_matches(cpu, n, true)) {
1151 return true;
1152 }
1153 }
1154 return false;
1155 }
1156
1157 static bool check_breakpoints(ARMCPU *cpu)
1158 {
1159 CPUARMState *env = &cpu->env;
1160 int n;
1161
1162 /* If breakpoints are disabled globally or we can't take debug
1163 * exceptions here then breakpoint firings are ignored.
1164 */
1165 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1166 || !arm_generate_debug_exceptions(env)) {
1167 return false;
1168 }
1169
1170 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1171 if (bp_wp_matches(cpu, n, false)) {
1172 return true;
1173 }
1174 }
1175 return false;
1176 }
1177
1178 void HELPER(check_breakpoints)(CPUARMState *env)
1179 {
1180 ARMCPU *cpu = arm_env_get_cpu(env);
1181
1182 if (check_breakpoints(cpu)) {
1183 HELPER(exception_internal(env, EXCP_DEBUG));
1184 }
1185 }
1186
1187 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1188 {
1189 /* Called by core code when a CPU watchpoint fires; need to check if this
1190 * is also an architectural watchpoint match.
1191 */
1192 ARMCPU *cpu = ARM_CPU(cs);
1193
1194 return check_watchpoints(cpu);
1195 }
1196
1197 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1198 {
1199 ARMCPU *cpu = ARM_CPU(cs);
1200 CPUARMState *env = &cpu->env;
1201
1202 /* In BE32 system mode, target memory is stored byteswapped (on a
1203 * little-endian host system), and by the time we reach here (via an
1204 * opcode helper) the addresses of subword accesses have been adjusted
1205 * to account for that, which means that watchpoints will not match.
1206 * Undo the adjustment here.
1207 */
1208 if (arm_sctlr_b(env)) {
1209 if (len == 1) {
1210 addr ^= 3;
1211 } else if (len == 2) {
1212 addr ^= 2;
1213 }
1214 }
1215
1216 return addr;
1217 }
1218
1219 void arm_debug_excp_handler(CPUState *cs)
1220 {
1221 /* Called by core code when a watchpoint or breakpoint fires;
1222 * need to check which one and raise the appropriate exception.
1223 */
1224 ARMCPU *cpu = ARM_CPU(cs);
1225 CPUARMState *env = &cpu->env;
1226 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1227
1228 if (wp_hit) {
1229 if (wp_hit->flags & BP_CPU) {
1230 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1231 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1232
1233 cs->watchpoint_hit = NULL;
1234
1235 env->exception.fsr = arm_debug_exception_fsr(env);
1236 env->exception.vaddress = wp_hit->hitaddr;
1237 raise_exception(env, EXCP_DATA_ABORT,
1238 syn_watchpoint(same_el, 0, wnr),
1239 arm_debug_target_el(env));
1240 }
1241 } else {
1242 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1243 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1244
1245 /* (1) GDB breakpoints should be handled first.
1246 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1247 * since singlestep is also done by generating a debug internal
1248 * exception.
1249 */
1250 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1251 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1252 return;
1253 }
1254
1255 env->exception.fsr = arm_debug_exception_fsr(env);
1256 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
1257 * values to the guest that it shouldn't be able to see at its
1258 * exception/security level.
1259 */
1260 env->exception.vaddress = 0;
1261 raise_exception(env, EXCP_PREFETCH_ABORT,
1262 syn_breakpoint(same_el),
1263 arm_debug_target_el(env));
1264 }
1265 }
1266
1267 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1268 The only way to do that in TCG is a conditional branch, which clobbers
1269 all our temporaries. For now implement these as helper functions. */
1270
1271 /* Similarly for variable shift instructions. */
1272
1273 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1274 {
1275 int shift = i & 0xff;
1276 if (shift >= 32) {
1277 if (shift == 32)
1278 env->CF = x & 1;
1279 else
1280 env->CF = 0;
1281 return 0;
1282 } else if (shift != 0) {
1283 env->CF = (x >> (32 - shift)) & 1;
1284 return x << shift;
1285 }
1286 return x;
1287 }
1288
1289 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1290 {
1291 int shift = i & 0xff;
1292 if (shift >= 32) {
1293 if (shift == 32)
1294 env->CF = (x >> 31) & 1;
1295 else
1296 env->CF = 0;
1297 return 0;
1298 } else if (shift != 0) {
1299 env->CF = (x >> (shift - 1)) & 1;
1300 return x >> shift;
1301 }
1302 return x;
1303 }
1304
1305 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1306 {
1307 int shift = i & 0xff;
1308 if (shift >= 32) {
1309 env->CF = (x >> 31) & 1;
1310 return (int32_t)x >> 31;
1311 } else if (shift != 0) {
1312 env->CF = (x >> (shift - 1)) & 1;
1313 return (int32_t)x >> shift;
1314 }
1315 return x;
1316 }
1317
1318 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1319 {
1320 int shift1, shift;
1321 shift1 = i & 0xff;
1322 shift = shift1 & 0x1f;
1323 if (shift == 0) {
1324 if (shift1 != 0)
1325 env->CF = (x >> 31) & 1;
1326 return x;
1327 } else {
1328 env->CF = (x >> (shift - 1)) & 1;
1329 return ((uint32_t)x >> shift) | (x << (32 - shift));
1330 }
1331 }