]> git.proxmox.com Git - mirror_qemu.git/blob - target-arm/op_helper.c
cd94216591598fa7257d1b54b38bd1b4bdecac6c
[mirror_qemu.git] / target-arm / op_helper.c
1 /*
2 * ARM helper routines
3 *
4 * Copyright (c) 2005-2007 CodeSourcery, LLC
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "internals.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25
26 #define SIGNBIT (uint32_t)0x80000000
27 #define SIGNBIT64 ((uint64_t)1 << 63)
28
29 static void raise_exception(CPUARMState *env, uint32_t excp,
30 uint32_t syndrome, uint32_t target_el)
31 {
32 CPUState *cs = CPU(arm_env_get_cpu(env));
33
34 assert(!excp_is_internal(excp));
35 cs->exception_index = excp;
36 env->exception.syndrome = syndrome;
37 env->exception.target_el = target_el;
38 cpu_loop_exit(cs);
39 }
40
41 static int exception_target_el(CPUARMState *env)
42 {
43 int target_el = MAX(1, arm_current_el(env));
44
45 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
46 * to EL3 in this case.
47 */
48 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
49 target_el = 3;
50 }
51
52 return target_el;
53 }
54
55 uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
56 uint32_t rn, uint32_t maxindex)
57 {
58 uint32_t val;
59 uint32_t tmp;
60 int index;
61 int shift;
62 uint64_t *table;
63 table = (uint64_t *)&env->vfp.regs[rn];
64 val = 0;
65 for (shift = 0; shift < 32; shift += 8) {
66 index = (ireg >> shift) & 0xff;
67 if (index < maxindex) {
68 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
69 val |= tmp << shift;
70 } else {
71 val |= def & (0xff << shift);
72 }
73 }
74 return val;
75 }
76
77 #if !defined(CONFIG_USER_ONLY)
78
79 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
80 unsigned int target_el,
81 bool same_el,
82 bool s1ptw, bool is_write,
83 int fsc)
84 {
85 uint32_t syn;
86
87 /* ISV is only set for data aborts routed to EL2 and
88 * never for stage-1 page table walks faulting on stage 2.
89 *
90 * Furthermore, ISV is only set for certain kinds of load/stores.
91 * If the template syndrome does not have ISV set, we should leave
92 * it cleared.
93 *
94 * See ARMv8 specs, D7-1974:
95 * ISS encoding for an exception from a Data Abort, the
96 * ISV field.
97 */
98 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
99 syn = syn_data_abort_no_iss(same_el,
100 0, 0, s1ptw, is_write, fsc);
101 } else {
102 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
103 * syndrome created at translation time.
104 * Now we create the runtime syndrome with the remaining fields.
105 */
106 syn = syn_data_abort_with_iss(same_el,
107 0, 0, 0, 0, 0,
108 0, 0, s1ptw, is_write, fsc,
109 false);
110 /* Merge the runtime syndrome with the template syndrome. */
111 syn |= template_syn;
112 }
113 return syn;
114 }
115
116 /* try to fill the TLB and return an exception if error. If retaddr is
117 * NULL, it means that the function was called in C code (i.e. not
118 * from generated code or from helper.c)
119 */
120 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
121 int mmu_idx, uintptr_t retaddr)
122 {
123 bool ret;
124 uint32_t fsr = 0;
125 ARMMMUFaultInfo fi = {};
126
127 ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
128 if (unlikely(ret)) {
129 ARMCPU *cpu = ARM_CPU(cs);
130 CPUARMState *env = &cpu->env;
131 uint32_t syn, exc;
132 unsigned int target_el;
133 bool same_el;
134
135 if (retaddr) {
136 /* now we have a real cpu fault */
137 cpu_restore_state(cs, retaddr);
138 }
139
140 target_el = exception_target_el(env);
141 if (fi.stage2) {
142 target_el = 2;
143 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
144 }
145 same_el = arm_current_el(env) == target_el;
146 /* AArch64 syndrome does not have an LPAE bit */
147 syn = fsr & ~(1 << 9);
148
149 /* For insn and data aborts we assume there is no instruction syndrome
150 * information; this is always true for exceptions reported to EL1.
151 */
152 if (access_type == MMU_INST_FETCH) {
153 syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
154 exc = EXCP_PREFETCH_ABORT;
155 } else {
156 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
157 same_el, fi.s1ptw,
158 access_type == MMU_DATA_STORE, syn);
159 if (access_type == MMU_DATA_STORE
160 && arm_feature(env, ARM_FEATURE_V6)) {
161 fsr |= (1 << 11);
162 }
163 exc = EXCP_DATA_ABORT;
164 }
165
166 env->exception.vaddress = addr;
167 env->exception.fsr = fsr;
168 raise_exception(env, exc, syn, target_el);
169 }
170 }
171
172 /* Raise a data fault alignment exception for the specified virtual address */
173 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
174 MMUAccessType access_type,
175 int mmu_idx, uintptr_t retaddr)
176 {
177 ARMCPU *cpu = ARM_CPU(cs);
178 CPUARMState *env = &cpu->env;
179 int target_el;
180 bool same_el;
181 uint32_t syn;
182
183 if (retaddr) {
184 /* now we have a real cpu fault */
185 cpu_restore_state(cs, retaddr);
186 }
187
188 target_el = exception_target_el(env);
189 same_el = (arm_current_el(env) == target_el);
190
191 env->exception.vaddress = vaddr;
192
193 /* the DFSR for an alignment fault depends on whether we're using
194 * the LPAE long descriptor format, or the short descriptor format
195 */
196 if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
197 env->exception.fsr = (1 << 9) | 0x21;
198 } else {
199 env->exception.fsr = 0x1;
200 }
201
202 if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) {
203 env->exception.fsr |= (1 << 11);
204 }
205
206 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
207 same_el, 0, access_type == MMU_DATA_STORE,
208 0x21);
209 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
210 }
211
212 #endif /* !defined(CONFIG_USER_ONLY) */
213
214 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
215 {
216 uint32_t res = a + b;
217 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
218 env->QF = 1;
219 return res;
220 }
221
222 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
223 {
224 uint32_t res = a + b;
225 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
226 env->QF = 1;
227 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
228 }
229 return res;
230 }
231
232 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
233 {
234 uint32_t res = a - b;
235 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
236 env->QF = 1;
237 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
238 }
239 return res;
240 }
241
242 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
243 {
244 uint32_t res;
245 if (val >= 0x40000000) {
246 res = ~SIGNBIT;
247 env->QF = 1;
248 } else if (val <= (int32_t)0xc0000000) {
249 res = SIGNBIT;
250 env->QF = 1;
251 } else {
252 res = val << 1;
253 }
254 return res;
255 }
256
257 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
258 {
259 uint32_t res = a + b;
260 if (res < a) {
261 env->QF = 1;
262 res = ~0;
263 }
264 return res;
265 }
266
267 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
268 {
269 uint32_t res = a - b;
270 if (res > a) {
271 env->QF = 1;
272 res = 0;
273 }
274 return res;
275 }
276
277 /* Signed saturation. */
278 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
279 {
280 int32_t top;
281 uint32_t mask;
282
283 top = val >> shift;
284 mask = (1u << shift) - 1;
285 if (top > 0) {
286 env->QF = 1;
287 return mask;
288 } else if (top < -1) {
289 env->QF = 1;
290 return ~mask;
291 }
292 return val;
293 }
294
295 /* Unsigned saturation. */
296 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
297 {
298 uint32_t max;
299
300 max = (1u << shift) - 1;
301 if (val < 0) {
302 env->QF = 1;
303 return 0;
304 } else if (val > max) {
305 env->QF = 1;
306 return max;
307 }
308 return val;
309 }
310
311 /* Signed saturate. */
312 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
313 {
314 return do_ssat(env, x, shift);
315 }
316
317 /* Dual halfword signed saturate. */
318 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
319 {
320 uint32_t res;
321
322 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
323 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
324 return res;
325 }
326
327 /* Unsigned saturate. */
328 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
329 {
330 return do_usat(env, x, shift);
331 }
332
333 /* Dual halfword unsigned saturate. */
334 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
335 {
336 uint32_t res;
337
338 res = (uint16_t)do_usat(env, (int16_t)x, shift);
339 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
340 return res;
341 }
342
343 void HELPER(setend)(CPUARMState *env)
344 {
345 env->uncached_cpsr ^= CPSR_E;
346 }
347
348 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
349 * The function returns the target EL (1-3) if the instruction is to be trapped;
350 * otherwise it returns 0 indicating it is not trapped.
351 */
352 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
353 {
354 int cur_el = arm_current_el(env);
355 uint64_t mask;
356
357 /* If we are currently in EL0 then we need to check if SCTLR is set up for
358 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
359 */
360 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
361 int target_el;
362
363 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
364 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
365 /* Secure EL0 and Secure PL1 is at EL3 */
366 target_el = 3;
367 } else {
368 target_el = 1;
369 }
370
371 if (!(env->cp15.sctlr_el[target_el] & mask)) {
372 return target_el;
373 }
374 }
375
376 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
377 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
378 * bits will be zero indicating no trap.
379 */
380 if (cur_el < 2 && !arm_is_secure(env)) {
381 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
382 if (env->cp15.hcr_el2 & mask) {
383 return 2;
384 }
385 }
386
387 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
388 if (cur_el < 3) {
389 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
390 if (env->cp15.scr_el3 & mask) {
391 return 3;
392 }
393 }
394
395 return 0;
396 }
397
398 void HELPER(wfi)(CPUARMState *env)
399 {
400 CPUState *cs = CPU(arm_env_get_cpu(env));
401 int target_el = check_wfx_trap(env, false);
402
403 if (cpu_has_work(cs)) {
404 /* Don't bother to go into our "low power state" if
405 * we would just wake up immediately.
406 */
407 return;
408 }
409
410 if (target_el) {
411 env->pc -= 4;
412 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
413 }
414
415 cs->exception_index = EXCP_HLT;
416 cs->halted = 1;
417 cpu_loop_exit(cs);
418 }
419
420 void HELPER(wfe)(CPUARMState *env)
421 {
422 /* This is a hint instruction that is semantically different
423 * from YIELD even though we currently implement it identically.
424 * Don't actually halt the CPU, just yield back to top
425 * level loop. This is not going into a "low power state"
426 * (ie halting until some event occurs), so we never take
427 * a configurable trap to a different exception level.
428 */
429 HELPER(yield)(env);
430 }
431
432 void HELPER(yield)(CPUARMState *env)
433 {
434 ARMCPU *cpu = arm_env_get_cpu(env);
435 CPUState *cs = CPU(cpu);
436
437 /* This is a non-trappable hint instruction that generally indicates
438 * that the guest is currently busy-looping. Yield control back to the
439 * top level loop so that a more deserving VCPU has a chance to run.
440 */
441 cs->exception_index = EXCP_YIELD;
442 cpu_loop_exit(cs);
443 }
444
445 /* Raise an internal-to-QEMU exception. This is limited to only
446 * those EXCP values which are special cases for QEMU to interrupt
447 * execution and not to be used for exceptions which are passed to
448 * the guest (those must all have syndrome information and thus should
449 * use exception_with_syndrome).
450 */
451 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
452 {
453 CPUState *cs = CPU(arm_env_get_cpu(env));
454
455 assert(excp_is_internal(excp));
456 cs->exception_index = excp;
457 cpu_loop_exit(cs);
458 }
459
460 /* Raise an exception with the specified syndrome register value */
461 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
462 uint32_t syndrome, uint32_t target_el)
463 {
464 raise_exception(env, excp, syndrome, target_el);
465 }
466
467 uint32_t HELPER(cpsr_read)(CPUARMState *env)
468 {
469 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
470 }
471
472 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
473 {
474 cpsr_write(env, val, mask, CPSRWriteByInstr);
475 }
476
477 /* Write the CPSR for a 32-bit exception return */
478 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
479 {
480 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
481
482 /* Generated code has already stored the new PC value, but
483 * without masking out its low bits, because which bits need
484 * masking depends on whether we're returning to Thumb or ARM
485 * state. Do the masking now.
486 */
487 env->regs[15] &= (env->thumb ? ~1 : ~3);
488
489 arm_call_el_change_hook(arm_env_get_cpu(env));
490 }
491
492 /* Access to user mode registers from privileged modes. */
493 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
494 {
495 uint32_t val;
496
497 if (regno == 13) {
498 val = env->banked_r13[BANK_USRSYS];
499 } else if (regno == 14) {
500 val = env->banked_r14[BANK_USRSYS];
501 } else if (regno >= 8
502 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
503 val = env->usr_regs[regno - 8];
504 } else {
505 val = env->regs[regno];
506 }
507 return val;
508 }
509
510 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
511 {
512 if (regno == 13) {
513 env->banked_r13[BANK_USRSYS] = val;
514 } else if (regno == 14) {
515 env->banked_r14[BANK_USRSYS] = val;
516 } else if (regno >= 8
517 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
518 env->usr_regs[regno - 8] = val;
519 } else {
520 env->regs[regno] = val;
521 }
522 }
523
524 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
525 {
526 if ((env->uncached_cpsr & CPSR_M) == mode) {
527 env->regs[13] = val;
528 } else {
529 env->banked_r13[bank_number(mode)] = val;
530 }
531 }
532
533 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
534 {
535 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
536 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
537 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
538 */
539 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
540 exception_target_el(env));
541 }
542
543 if ((env->uncached_cpsr & CPSR_M) == mode) {
544 return env->regs[13];
545 } else {
546 return env->banked_r13[bank_number(mode)];
547 }
548 }
549
550 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
551 uint32_t regno)
552 {
553 /* Raise an exception if the requested access is one of the UNPREDICTABLE
554 * cases; otherwise return. This broadly corresponds to the pseudocode
555 * BankedRegisterAccessValid() and SPSRAccessValid(),
556 * except that we have already handled some cases at translate time.
557 */
558 int curmode = env->uncached_cpsr & CPSR_M;
559
560 if (curmode == tgtmode) {
561 goto undef;
562 }
563
564 if (tgtmode == ARM_CPU_MODE_USR) {
565 switch (regno) {
566 case 8 ... 12:
567 if (curmode != ARM_CPU_MODE_FIQ) {
568 goto undef;
569 }
570 break;
571 case 13:
572 if (curmode == ARM_CPU_MODE_SYS) {
573 goto undef;
574 }
575 break;
576 case 14:
577 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
578 goto undef;
579 }
580 break;
581 default:
582 break;
583 }
584 }
585
586 if (tgtmode == ARM_CPU_MODE_HYP) {
587 switch (regno) {
588 case 17: /* ELR_Hyp */
589 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
590 goto undef;
591 }
592 break;
593 default:
594 if (curmode != ARM_CPU_MODE_MON) {
595 goto undef;
596 }
597 break;
598 }
599 }
600
601 return;
602
603 undef:
604 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
605 exception_target_el(env));
606 }
607
608 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
609 uint32_t regno)
610 {
611 msr_mrs_banked_exc_checks(env, tgtmode, regno);
612
613 switch (regno) {
614 case 16: /* SPSRs */
615 env->banked_spsr[bank_number(tgtmode)] = value;
616 break;
617 case 17: /* ELR_Hyp */
618 env->elr_el[2] = value;
619 break;
620 case 13:
621 env->banked_r13[bank_number(tgtmode)] = value;
622 break;
623 case 14:
624 env->banked_r14[bank_number(tgtmode)] = value;
625 break;
626 case 8 ... 12:
627 switch (tgtmode) {
628 case ARM_CPU_MODE_USR:
629 env->usr_regs[regno - 8] = value;
630 break;
631 case ARM_CPU_MODE_FIQ:
632 env->fiq_regs[regno - 8] = value;
633 break;
634 default:
635 g_assert_not_reached();
636 }
637 break;
638 default:
639 g_assert_not_reached();
640 }
641 }
642
643 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
644 {
645 msr_mrs_banked_exc_checks(env, tgtmode, regno);
646
647 switch (regno) {
648 case 16: /* SPSRs */
649 return env->banked_spsr[bank_number(tgtmode)];
650 case 17: /* ELR_Hyp */
651 return env->elr_el[2];
652 case 13:
653 return env->banked_r13[bank_number(tgtmode)];
654 case 14:
655 return env->banked_r14[bank_number(tgtmode)];
656 case 8 ... 12:
657 switch (tgtmode) {
658 case ARM_CPU_MODE_USR:
659 return env->usr_regs[regno - 8];
660 case ARM_CPU_MODE_FIQ:
661 return env->fiq_regs[regno - 8];
662 default:
663 g_assert_not_reached();
664 }
665 default:
666 g_assert_not_reached();
667 }
668 }
669
670 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
671 uint32_t isread)
672 {
673 const ARMCPRegInfo *ri = rip;
674 int target_el;
675
676 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
677 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
678 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
679 }
680
681 if (!ri->accessfn) {
682 return;
683 }
684
685 switch (ri->accessfn(env, ri, isread)) {
686 case CP_ACCESS_OK:
687 return;
688 case CP_ACCESS_TRAP:
689 target_el = exception_target_el(env);
690 break;
691 case CP_ACCESS_TRAP_EL2:
692 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
693 * a bug in the access function.
694 */
695 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
696 target_el = 2;
697 break;
698 case CP_ACCESS_TRAP_EL3:
699 target_el = 3;
700 break;
701 case CP_ACCESS_TRAP_UNCATEGORIZED:
702 target_el = exception_target_el(env);
703 syndrome = syn_uncategorized();
704 break;
705 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
706 target_el = 2;
707 syndrome = syn_uncategorized();
708 break;
709 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
710 target_el = 3;
711 syndrome = syn_uncategorized();
712 break;
713 case CP_ACCESS_TRAP_FP_EL2:
714 target_el = 2;
715 /* Since we are an implementation that takes exceptions on a trapped
716 * conditional insn only if the insn has passed its condition code
717 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
718 * (which is also the required value for AArch64 traps).
719 */
720 syndrome = syn_fp_access_trap(1, 0xe, false);
721 break;
722 case CP_ACCESS_TRAP_FP_EL3:
723 target_el = 3;
724 syndrome = syn_fp_access_trap(1, 0xe, false);
725 break;
726 default:
727 g_assert_not_reached();
728 }
729
730 raise_exception(env, EXCP_UDEF, syndrome, target_el);
731 }
732
733 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
734 {
735 const ARMCPRegInfo *ri = rip;
736
737 ri->writefn(env, ri, value);
738 }
739
740 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
741 {
742 const ARMCPRegInfo *ri = rip;
743
744 return ri->readfn(env, ri);
745 }
746
747 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
748 {
749 const ARMCPRegInfo *ri = rip;
750
751 ri->writefn(env, ri, value);
752 }
753
754 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
755 {
756 const ARMCPRegInfo *ri = rip;
757
758 return ri->readfn(env, ri);
759 }
760
761 void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
762 {
763 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
764 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
765 * to catch that case at translate time.
766 */
767 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
768 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
769 extract32(op, 3, 3), 4,
770 imm, 0x1f, 0);
771 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
772 }
773
774 switch (op) {
775 case 0x05: /* SPSel */
776 update_spsel(env, imm);
777 break;
778 case 0x1e: /* DAIFSet */
779 env->daif |= (imm << 6) & PSTATE_DAIF;
780 break;
781 case 0x1f: /* DAIFClear */
782 env->daif &= ~((imm << 6) & PSTATE_DAIF);
783 break;
784 default:
785 g_assert_not_reached();
786 }
787 }
788
789 void HELPER(clear_pstate_ss)(CPUARMState *env)
790 {
791 env->pstate &= ~PSTATE_SS;
792 }
793
794 void HELPER(pre_hvc)(CPUARMState *env)
795 {
796 ARMCPU *cpu = arm_env_get_cpu(env);
797 int cur_el = arm_current_el(env);
798 /* FIXME: Use actual secure state. */
799 bool secure = false;
800 bool undef;
801
802 if (arm_is_psci_call(cpu, EXCP_HVC)) {
803 /* If PSCI is enabled and this looks like a valid PSCI call then
804 * that overrides the architecturally mandated HVC behaviour.
805 */
806 return;
807 }
808
809 if (!arm_feature(env, ARM_FEATURE_EL2)) {
810 /* If EL2 doesn't exist, HVC always UNDEFs */
811 undef = true;
812 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
813 /* EL3.HCE has priority over EL2.HCD. */
814 undef = !(env->cp15.scr_el3 & SCR_HCE);
815 } else {
816 undef = env->cp15.hcr_el2 & HCR_HCD;
817 }
818
819 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
820 * For ARMv8/AArch64, HVC is allowed in EL3.
821 * Note that we've already trapped HVC from EL0 at translation
822 * time.
823 */
824 if (secure && (!is_a64(env) || cur_el == 1)) {
825 undef = true;
826 }
827
828 if (undef) {
829 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
830 exception_target_el(env));
831 }
832 }
833
834 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
835 {
836 ARMCPU *cpu = arm_env_get_cpu(env);
837 int cur_el = arm_current_el(env);
838 bool secure = arm_is_secure(env);
839 bool smd = env->cp15.scr_el3 & SCR_SMD;
840 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
841 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
842 * extensions, SMD only applies to NS state.
843 * On ARMv7 without the Virtualization extensions, the SMD bit
844 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
845 * so we need not special case this here.
846 */
847 bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
848
849 if (arm_is_psci_call(cpu, EXCP_SMC)) {
850 /* If PSCI is enabled and this looks like a valid PSCI call then
851 * that overrides the architecturally mandated SMC behaviour.
852 */
853 return;
854 }
855
856 if (!arm_feature(env, ARM_FEATURE_EL3)) {
857 /* If we have no EL3 then SMC always UNDEFs */
858 undef = true;
859 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
860 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
861 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
862 }
863
864 if (undef) {
865 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
866 exception_target_el(env));
867 }
868 }
869
870 static int el_from_spsr(uint32_t spsr)
871 {
872 /* Return the exception level that this SPSR is requesting a return to,
873 * or -1 if it is invalid (an illegal return)
874 */
875 if (spsr & PSTATE_nRW) {
876 switch (spsr & CPSR_M) {
877 case ARM_CPU_MODE_USR:
878 return 0;
879 case ARM_CPU_MODE_HYP:
880 return 2;
881 case ARM_CPU_MODE_FIQ:
882 case ARM_CPU_MODE_IRQ:
883 case ARM_CPU_MODE_SVC:
884 case ARM_CPU_MODE_ABT:
885 case ARM_CPU_MODE_UND:
886 case ARM_CPU_MODE_SYS:
887 return 1;
888 case ARM_CPU_MODE_MON:
889 /* Returning to Mon from AArch64 is never possible,
890 * so this is an illegal return.
891 */
892 default:
893 return -1;
894 }
895 } else {
896 if (extract32(spsr, 1, 1)) {
897 /* Return with reserved M[1] bit set */
898 return -1;
899 }
900 if (extract32(spsr, 0, 4) == 1) {
901 /* return to EL0 with M[0] bit set */
902 return -1;
903 }
904 return extract32(spsr, 2, 2);
905 }
906 }
907
908 void HELPER(exception_return)(CPUARMState *env)
909 {
910 int cur_el = arm_current_el(env);
911 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
912 uint32_t spsr = env->banked_spsr[spsr_idx];
913 int new_el;
914 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
915
916 aarch64_save_sp(env, cur_el);
917
918 env->exclusive_addr = -1;
919
920 /* We must squash the PSTATE.SS bit to zero unless both of the
921 * following hold:
922 * 1. debug exceptions are currently disabled
923 * 2. singlestep will be active in the EL we return to
924 * We check 1 here and 2 after we've done the pstate/cpsr write() to
925 * transition to the EL we're going to.
926 */
927 if (arm_generate_debug_exceptions(env)) {
928 spsr &= ~PSTATE_SS;
929 }
930
931 new_el = el_from_spsr(spsr);
932 if (new_el == -1) {
933 goto illegal_return;
934 }
935 if (new_el > cur_el
936 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
937 /* Disallow return to an EL which is unimplemented or higher
938 * than the current one.
939 */
940 goto illegal_return;
941 }
942
943 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
944 /* Return to an EL which is configured for a different register width */
945 goto illegal_return;
946 }
947
948 if (new_el == 2 && arm_is_secure_below_el3(env)) {
949 /* Return to the non-existent secure-EL2 */
950 goto illegal_return;
951 }
952
953 if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
954 && !arm_is_secure_below_el3(env)) {
955 goto illegal_return;
956 }
957
958 if (!return_to_aa64) {
959 env->aarch64 = 0;
960 /* We do a raw CPSR write because aarch64_sync_64_to_32()
961 * will sort the register banks out for us, and we've already
962 * caught all the bad-mode cases in el_from_spsr().
963 */
964 cpsr_write(env, spsr, ~0, CPSRWriteRaw);
965 if (!arm_singlestep_active(env)) {
966 env->uncached_cpsr &= ~PSTATE_SS;
967 }
968 aarch64_sync_64_to_32(env);
969
970 if (spsr & CPSR_T) {
971 env->regs[15] = env->elr_el[cur_el] & ~0x1;
972 } else {
973 env->regs[15] = env->elr_el[cur_el] & ~0x3;
974 }
975 } else {
976 env->aarch64 = 1;
977 pstate_write(env, spsr);
978 if (!arm_singlestep_active(env)) {
979 env->pstate &= ~PSTATE_SS;
980 }
981 aarch64_restore_sp(env, new_el);
982 env->pc = env->elr_el[cur_el];
983 }
984
985 arm_call_el_change_hook(arm_env_get_cpu(env));
986
987 return;
988
989 illegal_return:
990 /* Illegal return events of various kinds have architecturally
991 * mandated behaviour:
992 * restore NZCV and DAIF from SPSR_ELx
993 * set PSTATE.IL
994 * restore PC from ELR_ELx
995 * no change to exception level, execution state or stack pointer
996 */
997 env->pstate |= PSTATE_IL;
998 env->pc = env->elr_el[cur_el];
999 spsr &= PSTATE_NZCV | PSTATE_DAIF;
1000 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1001 pstate_write(env, spsr);
1002 if (!arm_singlestep_active(env)) {
1003 env->pstate &= ~PSTATE_SS;
1004 }
1005 }
1006
1007 /* Return true if the linked breakpoint entry lbn passes its checks */
1008 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1009 {
1010 CPUARMState *env = &cpu->env;
1011 uint64_t bcr = env->cp15.dbgbcr[lbn];
1012 int brps = extract32(cpu->dbgdidr, 24, 4);
1013 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1014 int bt;
1015 uint32_t contextidr;
1016
1017 /* Links to unimplemented or non-context aware breakpoints are
1018 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1019 * as if linked to an UNKNOWN context-aware breakpoint (in which
1020 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1021 * We choose the former.
1022 */
1023 if (lbn > brps || lbn < (brps - ctx_cmps)) {
1024 return false;
1025 }
1026
1027 bcr = env->cp15.dbgbcr[lbn];
1028
1029 if (extract64(bcr, 0, 1) == 0) {
1030 /* Linked breakpoint disabled : generate no events */
1031 return false;
1032 }
1033
1034 bt = extract64(bcr, 20, 4);
1035
1036 /* We match the whole register even if this is AArch32 using the
1037 * short descriptor format (in which case it holds both PROCID and ASID),
1038 * since we don't implement the optional v7 context ID masking.
1039 */
1040 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1041
1042 switch (bt) {
1043 case 3: /* linked context ID match */
1044 if (arm_current_el(env) > 1) {
1045 /* Context matches never fire in EL2 or (AArch64) EL3 */
1046 return false;
1047 }
1048 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1049 case 5: /* linked address mismatch (reserved in AArch64) */
1050 case 9: /* linked VMID match (reserved if no EL2) */
1051 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1052 default:
1053 /* Links to Unlinked context breakpoints must generate no
1054 * events; we choose to do the same for reserved values too.
1055 */
1056 return false;
1057 }
1058
1059 return false;
1060 }
1061
1062 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1063 {
1064 CPUARMState *env = &cpu->env;
1065 uint64_t cr;
1066 int pac, hmc, ssc, wt, lbn;
1067 /* Note that for watchpoints the check is against the CPU security
1068 * state, not the S/NS attribute on the offending data access.
1069 */
1070 bool is_secure = arm_is_secure(env);
1071 int access_el = arm_current_el(env);
1072
1073 if (is_wp) {
1074 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1075
1076 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1077 return false;
1078 }
1079 cr = env->cp15.dbgwcr[n];
1080 if (wp->hitattrs.user) {
1081 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1082 * match watchpoints as if they were accesses done at EL0, even if
1083 * the CPU is at EL1 or higher.
1084 */
1085 access_el = 0;
1086 }
1087 } else {
1088 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1089
1090 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1091 return false;
1092 }
1093 cr = env->cp15.dbgbcr[n];
1094 }
1095 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1096 * enabled and that the address and access type match; for breakpoints
1097 * we know the address matched; check the remaining fields, including
1098 * linked breakpoints. We rely on WCR and BCR having the same layout
1099 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1100 * Note that some combinations of {PAC, HMC, SSC} are reserved and
1101 * must act either like some valid combination or as if the watchpoint
1102 * were disabled. We choose the former, and use this together with
1103 * the fact that EL3 must always be Secure and EL2 must always be
1104 * Non-Secure to simplify the code slightly compared to the full
1105 * table in the ARM ARM.
1106 */
1107 pac = extract64(cr, 1, 2);
1108 hmc = extract64(cr, 13, 1);
1109 ssc = extract64(cr, 14, 2);
1110
1111 switch (ssc) {
1112 case 0:
1113 break;
1114 case 1:
1115 case 3:
1116 if (is_secure) {
1117 return false;
1118 }
1119 break;
1120 case 2:
1121 if (!is_secure) {
1122 return false;
1123 }
1124 break;
1125 }
1126
1127 switch (access_el) {
1128 case 3:
1129 case 2:
1130 if (!hmc) {
1131 return false;
1132 }
1133 break;
1134 case 1:
1135 if (extract32(pac, 0, 1) == 0) {
1136 return false;
1137 }
1138 break;
1139 case 0:
1140 if (extract32(pac, 1, 1) == 0) {
1141 return false;
1142 }
1143 break;
1144 default:
1145 g_assert_not_reached();
1146 }
1147
1148 wt = extract64(cr, 20, 1);
1149 lbn = extract64(cr, 16, 4);
1150
1151 if (wt && !linked_bp_matches(cpu, lbn)) {
1152 return false;
1153 }
1154
1155 return true;
1156 }
1157
1158 static bool check_watchpoints(ARMCPU *cpu)
1159 {
1160 CPUARMState *env = &cpu->env;
1161 int n;
1162
1163 /* If watchpoints are disabled globally or we can't take debug
1164 * exceptions here then watchpoint firings are ignored.
1165 */
1166 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1167 || !arm_generate_debug_exceptions(env)) {
1168 return false;
1169 }
1170
1171 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1172 if (bp_wp_matches(cpu, n, true)) {
1173 return true;
1174 }
1175 }
1176 return false;
1177 }
1178
1179 static bool check_breakpoints(ARMCPU *cpu)
1180 {
1181 CPUARMState *env = &cpu->env;
1182 int n;
1183
1184 /* If breakpoints are disabled globally or we can't take debug
1185 * exceptions here then breakpoint firings are ignored.
1186 */
1187 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1188 || !arm_generate_debug_exceptions(env)) {
1189 return false;
1190 }
1191
1192 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1193 if (bp_wp_matches(cpu, n, false)) {
1194 return true;
1195 }
1196 }
1197 return false;
1198 }
1199
1200 void HELPER(check_breakpoints)(CPUARMState *env)
1201 {
1202 ARMCPU *cpu = arm_env_get_cpu(env);
1203
1204 if (check_breakpoints(cpu)) {
1205 HELPER(exception_internal(env, EXCP_DEBUG));
1206 }
1207 }
1208
1209 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1210 {
1211 /* Called by core code when a CPU watchpoint fires; need to check if this
1212 * is also an architectural watchpoint match.
1213 */
1214 ARMCPU *cpu = ARM_CPU(cs);
1215
1216 return check_watchpoints(cpu);
1217 }
1218
1219 void arm_debug_excp_handler(CPUState *cs)
1220 {
1221 /* Called by core code when a watchpoint or breakpoint fires;
1222 * need to check which one and raise the appropriate exception.
1223 */
1224 ARMCPU *cpu = ARM_CPU(cs);
1225 CPUARMState *env = &cpu->env;
1226 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1227
1228 if (wp_hit) {
1229 if (wp_hit->flags & BP_CPU) {
1230 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1231 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1232
1233 cs->watchpoint_hit = NULL;
1234
1235 if (extended_addresses_enabled(env)) {
1236 env->exception.fsr = (1 << 9) | 0x22;
1237 } else {
1238 env->exception.fsr = 0x2;
1239 }
1240 env->exception.vaddress = wp_hit->hitaddr;
1241 raise_exception(env, EXCP_DATA_ABORT,
1242 syn_watchpoint(same_el, 0, wnr),
1243 arm_debug_target_el(env));
1244 }
1245 } else {
1246 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1247 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1248
1249 /* (1) GDB breakpoints should be handled first.
1250 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1251 * since singlestep is also done by generating a debug internal
1252 * exception.
1253 */
1254 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1255 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1256 return;
1257 }
1258
1259 if (extended_addresses_enabled(env)) {
1260 env->exception.fsr = (1 << 9) | 0x22;
1261 } else {
1262 env->exception.fsr = 0x2;
1263 }
1264 /* FAR is UNKNOWN, so doesn't need setting */
1265 raise_exception(env, EXCP_PREFETCH_ABORT,
1266 syn_breakpoint(same_el),
1267 arm_debug_target_el(env));
1268 }
1269 }
1270
1271 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1272 The only way to do that in TCG is a conditional branch, which clobbers
1273 all our temporaries. For now implement these as helper functions. */
1274
1275 /* Similarly for variable shift instructions. */
1276
1277 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1278 {
1279 int shift = i & 0xff;
1280 if (shift >= 32) {
1281 if (shift == 32)
1282 env->CF = x & 1;
1283 else
1284 env->CF = 0;
1285 return 0;
1286 } else if (shift != 0) {
1287 env->CF = (x >> (32 - shift)) & 1;
1288 return x << shift;
1289 }
1290 return x;
1291 }
1292
1293 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1294 {
1295 int shift = i & 0xff;
1296 if (shift >= 32) {
1297 if (shift == 32)
1298 env->CF = (x >> 31) & 1;
1299 else
1300 env->CF = 0;
1301 return 0;
1302 } else if (shift != 0) {
1303 env->CF = (x >> (shift - 1)) & 1;
1304 return x >> shift;
1305 }
1306 return x;
1307 }
1308
1309 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1310 {
1311 int shift = i & 0xff;
1312 if (shift >= 32) {
1313 env->CF = (x >> 31) & 1;
1314 return (int32_t)x >> 31;
1315 } else if (shift != 0) {
1316 env->CF = (x >> (shift - 1)) & 1;
1317 return (int32_t)x >> shift;
1318 }
1319 return x;
1320 }
1321
1322 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1323 {
1324 int shift1, shift;
1325 shift1 = i & 0xff;
1326 shift = shift1 & 0x1f;
1327 if (shift == 0) {
1328 if (shift1 != 0)
1329 env->CF = (x >> 31) & 1;
1330 return x;
1331 } else {
1332 env->CF = (x >> (shift - 1)) & 1;
1333 return ((uint32_t)x >> shift) | (x << (32 - shift));
1334 }
1335 }