]>
Commit | Line | Data |
---|---|---|
b7bcbe95 FB |
1 | /* |
2 | * ARM helper routines | |
5fafdf24 | 3 | * |
9ee6e8bb | 4 | * Copyright (c) 2005-2007 CodeSourcery, LLC |
b7bcbe95 FB |
5 | * |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
b7bcbe95 | 18 | */ |
74c21bd0 | 19 | #include "qemu/osdep.h" |
3e457172 | 20 | #include "cpu.h" |
2ef6175a | 21 | #include "exec/helper-proto.h" |
ccd38087 | 22 | #include "internals.h" |
63c91552 | 23 | #include "exec/exec-all.h" |
f08b6170 | 24 | #include "exec/cpu_ldst.h" |
b7bcbe95 | 25 | |
ad69471c PB |
26 | #define SIGNBIT (uint32_t)0x80000000 |
27 | #define SIGNBIT64 ((uint64_t)1 << 63) | |
28 | ||
c6328599 PM |
29 | static void raise_exception(CPUARMState *env, uint32_t excp, |
30 | uint32_t syndrome, uint32_t target_el) | |
b7bcbe95 | 31 | { |
c6328599 | 32 | CPUState *cs = CPU(arm_env_get_cpu(env)); |
27103424 | 33 | |
c6328599 PM |
34 | assert(!excp_is_internal(excp)); |
35 | cs->exception_index = excp; | |
36 | env->exception.syndrome = syndrome; | |
37 | env->exception.target_el = target_el; | |
5638d180 | 38 | cpu_loop_exit(cs); |
b7bcbe95 FB |
39 | } |
40 | ||
e3b1d480 GB |
41 | static int exception_target_el(CPUARMState *env) |
42 | { | |
43 | int target_el = MAX(1, arm_current_el(env)); | |
44 | ||
45 | /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL | |
46 | * to EL3 in this case. | |
47 | */ | |
48 | if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) { | |
49 | target_el = 3; | |
50 | } | |
51 | ||
52 | return target_el; | |
53 | } | |
54 | ||
9ef39277 | 55 | uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def, |
8f8e3aa4 | 56 | uint32_t rn, uint32_t maxindex) |
9ee6e8bb PB |
57 | { |
58 | uint32_t val; | |
9ee6e8bb PB |
59 | uint32_t tmp; |
60 | int index; | |
61 | int shift; | |
62 | uint64_t *table; | |
63 | table = (uint64_t *)&env->vfp.regs[rn]; | |
64 | val = 0; | |
9ee6e8bb | 65 | for (shift = 0; shift < 32; shift += 8) { |
8f8e3aa4 PB |
66 | index = (ireg >> shift) & 0xff; |
67 | if (index < maxindex) { | |
3018f259 | 68 | tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff; |
9ee6e8bb PB |
69 | val |= tmp << shift; |
70 | } else { | |
8f8e3aa4 | 71 | val |= def & (0xff << shift); |
9ee6e8bb PB |
72 | } |
73 | } | |
8f8e3aa4 | 74 | return val; |
9ee6e8bb PB |
75 | } |
76 | ||
b5ff1b31 FB |
77 | #if !defined(CONFIG_USER_ONLY) |
78 | ||
b5ff1b31 | 79 | /* try to fill the TLB and return an exception if error. If retaddr is |
d5a11fef AF |
80 | * NULL, it means that the function was called in C code (i.e. not |
81 | * from generated code or from helper.c) | |
82 | */ | |
83 | void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, | |
20503968 | 84 | uintptr_t retaddr) |
b5ff1b31 | 85 | { |
b7cc4e82 PC |
86 | bool ret; |
87 | uint32_t fsr = 0; | |
e14b5a23 | 88 | ARMMMUFaultInfo fi = {}; |
b5ff1b31 | 89 | |
e14b5a23 | 90 | ret = arm_tlb_fill(cs, addr, is_write, mmu_idx, &fsr, &fi); |
551bd27f | 91 | if (unlikely(ret)) { |
d5a11fef AF |
92 | ARMCPU *cpu = ARM_CPU(cs); |
93 | CPUARMState *env = &cpu->env; | |
8c6084bf | 94 | uint32_t syn, exc; |
d759a457 EI |
95 | unsigned int target_el; |
96 | bool same_el; | |
d5a11fef | 97 | |
b5ff1b31 FB |
98 | if (retaddr) { |
99 | /* now we have a real cpu fault */ | |
3f38f309 | 100 | cpu_restore_state(cs, retaddr); |
b5ff1b31 | 101 | } |
8c6084bf | 102 | |
d759a457 EI |
103 | target_el = exception_target_el(env); |
104 | if (fi.stage2) { | |
105 | target_el = 2; | |
9b539263 | 106 | env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; |
d759a457 EI |
107 | } |
108 | same_el = arm_current_el(env) == target_el; | |
8c6084bf | 109 | /* AArch64 syndrome does not have an LPAE bit */ |
b7cc4e82 | 110 | syn = fsr & ~(1 << 9); |
8c6084bf PM |
111 | |
112 | /* For insn and data aborts we assume there is no instruction syndrome | |
113 | * information; this is always true for exceptions reported to EL1. | |
114 | */ | |
115 | if (is_write == 2) { | |
37785977 | 116 | syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn); |
8c6084bf PM |
117 | exc = EXCP_PREFETCH_ABORT; |
118 | } else { | |
094d028a PM |
119 | syn = syn_data_abort_no_iss(same_el, |
120 | 0, 0, fi.s1ptw, is_write == 1, syn); | |
8c6084bf | 121 | if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) { |
b7cc4e82 | 122 | fsr |= (1 << 11); |
8c6084bf PM |
123 | } |
124 | exc = EXCP_DATA_ABORT; | |
125 | } | |
126 | ||
8c6084bf | 127 | env->exception.vaddress = addr; |
b7cc4e82 | 128 | env->exception.fsr = fsr; |
d759a457 | 129 | raise_exception(env, exc, syn, target_el); |
b5ff1b31 | 130 | } |
b5ff1b31 | 131 | } |
30901475 AB |
132 | |
133 | /* Raise a data fault alignment exception for the specified virtual address */ | |
134 | void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write, | |
135 | int is_user, uintptr_t retaddr) | |
136 | { | |
137 | ARMCPU *cpu = ARM_CPU(cs); | |
138 | CPUARMState *env = &cpu->env; | |
139 | int target_el; | |
140 | bool same_el; | |
141 | ||
142 | if (retaddr) { | |
143 | /* now we have a real cpu fault */ | |
144 | cpu_restore_state(cs, retaddr); | |
145 | } | |
146 | ||
147 | target_el = exception_target_el(env); | |
148 | same_el = (arm_current_el(env) == target_el); | |
149 | ||
150 | env->exception.vaddress = vaddr; | |
151 | ||
152 | /* the DFSR for an alignment fault depends on whether we're using | |
153 | * the LPAE long descriptor format, or the short descriptor format | |
154 | */ | |
deb2db99 | 155 | if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) { |
30901475 AB |
156 | env->exception.fsr = 0x21; |
157 | } else { | |
158 | env->exception.fsr = 0x1; | |
159 | } | |
160 | ||
161 | if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) { | |
162 | env->exception.fsr |= (1 << 11); | |
163 | } | |
164 | ||
165 | raise_exception(env, EXCP_DATA_ABORT, | |
094d028a PM |
166 | syn_data_abort_no_iss(same_el, |
167 | 0, 0, 0, is_write == 1, 0x21), | |
30901475 AB |
168 | target_el); |
169 | } | |
170 | ||
171 | #endif /* !defined(CONFIG_USER_ONLY) */ | |
1497c961 | 172 | |
9ef39277 | 173 | uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) |
1497c961 PB |
174 | { |
175 | uint32_t res = a + b; | |
176 | if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) | |
177 | env->QF = 1; | |
178 | return res; | |
179 | } | |
180 | ||
9ef39277 | 181 | uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b) |
1497c961 PB |
182 | { |
183 | uint32_t res = a + b; | |
184 | if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { | |
185 | env->QF = 1; | |
186 | res = ~(((int32_t)a >> 31) ^ SIGNBIT); | |
187 | } | |
188 | return res; | |
189 | } | |
190 | ||
9ef39277 | 191 | uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b) |
1497c961 PB |
192 | { |
193 | uint32_t res = a - b; | |
194 | if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { | |
195 | env->QF = 1; | |
196 | res = ~(((int32_t)a >> 31) ^ SIGNBIT); | |
197 | } | |
198 | return res; | |
199 | } | |
200 | ||
9ef39277 | 201 | uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val) |
1497c961 PB |
202 | { |
203 | uint32_t res; | |
204 | if (val >= 0x40000000) { | |
205 | res = ~SIGNBIT; | |
206 | env->QF = 1; | |
207 | } else if (val <= (int32_t)0xc0000000) { | |
208 | res = SIGNBIT; | |
209 | env->QF = 1; | |
210 | } else { | |
211 | res = val << 1; | |
212 | } | |
213 | return res; | |
214 | } | |
215 | ||
9ef39277 | 216 | uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) |
1497c961 PB |
217 | { |
218 | uint32_t res = a + b; | |
219 | if (res < a) { | |
220 | env->QF = 1; | |
221 | res = ~0; | |
222 | } | |
223 | return res; | |
224 | } | |
225 | ||
9ef39277 | 226 | uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) |
1497c961 PB |
227 | { |
228 | uint32_t res = a - b; | |
229 | if (res > a) { | |
230 | env->QF = 1; | |
231 | res = 0; | |
232 | } | |
233 | return res; | |
234 | } | |
235 | ||
6ddbc6e4 | 236 | /* Signed saturation. */ |
9ef39277 | 237 | static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift) |
6ddbc6e4 PB |
238 | { |
239 | int32_t top; | |
240 | uint32_t mask; | |
241 | ||
6ddbc6e4 PB |
242 | top = val >> shift; |
243 | mask = (1u << shift) - 1; | |
244 | if (top > 0) { | |
245 | env->QF = 1; | |
246 | return mask; | |
247 | } else if (top < -1) { | |
248 | env->QF = 1; | |
249 | return ~mask; | |
250 | } | |
251 | return val; | |
252 | } | |
253 | ||
254 | /* Unsigned saturation. */ | |
9ef39277 | 255 | static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift) |
6ddbc6e4 PB |
256 | { |
257 | uint32_t max; | |
258 | ||
6ddbc6e4 PB |
259 | max = (1u << shift) - 1; |
260 | if (val < 0) { | |
261 | env->QF = 1; | |
262 | return 0; | |
263 | } else if (val > max) { | |
264 | env->QF = 1; | |
265 | return max; | |
266 | } | |
267 | return val; | |
268 | } | |
269 | ||
270 | /* Signed saturate. */ | |
9ef39277 | 271 | uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift) |
6ddbc6e4 | 272 | { |
9ef39277 | 273 | return do_ssat(env, x, shift); |
6ddbc6e4 PB |
274 | } |
275 | ||
276 | /* Dual halfword signed saturate. */ | |
9ef39277 | 277 | uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift) |
6ddbc6e4 PB |
278 | { |
279 | uint32_t res; | |
280 | ||
9ef39277 BS |
281 | res = (uint16_t)do_ssat(env, (int16_t)x, shift); |
282 | res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16; | |
6ddbc6e4 PB |
283 | return res; |
284 | } | |
285 | ||
286 | /* Unsigned saturate. */ | |
9ef39277 | 287 | uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift) |
6ddbc6e4 | 288 | { |
9ef39277 | 289 | return do_usat(env, x, shift); |
6ddbc6e4 PB |
290 | } |
291 | ||
292 | /* Dual halfword unsigned saturate. */ | |
9ef39277 | 293 | uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift) |
6ddbc6e4 PB |
294 | { |
295 | uint32_t res; | |
296 | ||
9ef39277 BS |
297 | res = (uint16_t)do_usat(env, (int16_t)x, shift); |
298 | res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16; | |
6ddbc6e4 PB |
299 | return res; |
300 | } | |
d9ba4830 | 301 | |
9886ecdf PB |
302 | void HELPER(setend)(CPUARMState *env) |
303 | { | |
304 | env->uncached_cpsr ^= CPSR_E; | |
305 | } | |
306 | ||
b1eced71 GB |
307 | /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped. |
308 | * The function returns the target EL (1-3) if the instruction is to be trapped; | |
309 | * otherwise it returns 0 indicating it is not trapped. | |
310 | */ | |
311 | static inline int check_wfx_trap(CPUARMState *env, bool is_wfe) | |
312 | { | |
313 | int cur_el = arm_current_el(env); | |
314 | uint64_t mask; | |
315 | ||
316 | /* If we are currently in EL0 then we need to check if SCTLR is set up for | |
317 | * WFx instructions being trapped to EL1. These trap bits don't exist in v7. | |
318 | */ | |
319 | if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) { | |
320 | int target_el; | |
321 | ||
322 | mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI; | |
323 | if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) { | |
324 | /* Secure EL0 and Secure PL1 is at EL3 */ | |
325 | target_el = 3; | |
326 | } else { | |
327 | target_el = 1; | |
328 | } | |
329 | ||
330 | if (!(env->cp15.sctlr_el[target_el] & mask)) { | |
331 | return target_el; | |
332 | } | |
333 | } | |
334 | ||
335 | /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it | |
336 | * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the | |
337 | * bits will be zero indicating no trap. | |
338 | */ | |
339 | if (cur_el < 2 && !arm_is_secure(env)) { | |
340 | mask = (is_wfe) ? HCR_TWE : HCR_TWI; | |
341 | if (env->cp15.hcr_el2 & mask) { | |
342 | return 2; | |
343 | } | |
344 | } | |
345 | ||
346 | /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */ | |
347 | if (cur_el < 3) { | |
348 | mask = (is_wfe) ? SCR_TWE : SCR_TWI; | |
349 | if (env->cp15.scr_el3 & mask) { | |
350 | return 3; | |
351 | } | |
352 | } | |
353 | ||
354 | return 0; | |
355 | } | |
356 | ||
1ce94f81 | 357 | void HELPER(wfi)(CPUARMState *env) |
d9ba4830 | 358 | { |
259186a7 | 359 | CPUState *cs = CPU(arm_env_get_cpu(env)); |
b1eced71 | 360 | int target_el = check_wfx_trap(env, false); |
259186a7 | 361 | |
84549b6d PM |
362 | if (cpu_has_work(cs)) { |
363 | /* Don't bother to go into our "low power state" if | |
364 | * we would just wake up immediately. | |
365 | */ | |
366 | return; | |
367 | } | |
368 | ||
b1eced71 GB |
369 | if (target_el) { |
370 | env->pc -= 4; | |
371 | raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el); | |
372 | } | |
373 | ||
27103424 | 374 | cs->exception_index = EXCP_HLT; |
259186a7 | 375 | cs->halted = 1; |
5638d180 | 376 | cpu_loop_exit(cs); |
d9ba4830 PB |
377 | } |
378 | ||
72c1d3af PM |
379 | void HELPER(wfe)(CPUARMState *env) |
380 | { | |
049e24a1 PM |
381 | /* This is a hint instruction that is semantically different |
382 | * from YIELD even though we currently implement it identically. | |
383 | * Don't actually halt the CPU, just yield back to top | |
b1eced71 GB |
384 | * level loop. This is not going into a "low power state" |
385 | * (ie halting until some event occurs), so we never take | |
386 | * a configurable trap to a different exception level. | |
72c1d3af | 387 | */ |
049e24a1 PM |
388 | HELPER(yield)(env); |
389 | } | |
390 | ||
391 | void HELPER(yield)(CPUARMState *env) | |
392 | { | |
393 | ARMCPU *cpu = arm_env_get_cpu(env); | |
394 | CPUState *cs = CPU(cpu); | |
395 | ||
396 | /* This is a non-trappable hint instruction that generally indicates | |
397 | * that the guest is currently busy-looping. Yield control back to the | |
398 | * top level loop so that a more deserving VCPU has a chance to run. | |
399 | */ | |
27103424 | 400 | cs->exception_index = EXCP_YIELD; |
5638d180 | 401 | cpu_loop_exit(cs); |
72c1d3af PM |
402 | } |
403 | ||
d4a2dc67 PM |
404 | /* Raise an internal-to-QEMU exception. This is limited to only |
405 | * those EXCP values which are special cases for QEMU to interrupt | |
406 | * execution and not to be used for exceptions which are passed to | |
407 | * the guest (those must all have syndrome information and thus should | |
408 | * use exception_with_syndrome). | |
409 | */ | |
410 | void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) | |
411 | { | |
412 | CPUState *cs = CPU(arm_env_get_cpu(env)); | |
413 | ||
414 | assert(excp_is_internal(excp)); | |
415 | cs->exception_index = excp; | |
416 | cpu_loop_exit(cs); | |
417 | } | |
418 | ||
419 | /* Raise an exception with the specified syndrome register value */ | |
420 | void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, | |
73710361 | 421 | uint32_t syndrome, uint32_t target_el) |
d9ba4830 | 422 | { |
c6328599 | 423 | raise_exception(env, excp, syndrome, target_el); |
d9ba4830 PB |
424 | } |
425 | ||
9ef39277 | 426 | uint32_t HELPER(cpsr_read)(CPUARMState *env) |
d9ba4830 | 427 | { |
4051e12c | 428 | return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED); |
d9ba4830 PB |
429 | } |
430 | ||
1ce94f81 | 431 | void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) |
d9ba4830 | 432 | { |
50866ba5 | 433 | cpsr_write(env, val, mask, CPSRWriteByInstr); |
d9ba4830 | 434 | } |
b0109805 | 435 | |
235ea1f5 PM |
436 | /* Write the CPSR for a 32-bit exception return */ |
437 | void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) | |
438 | { | |
50866ba5 | 439 | cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn); |
235ea1f5 PM |
440 | } |
441 | ||
b0109805 | 442 | /* Access to user mode registers from privileged modes. */ |
9ef39277 | 443 | uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno) |
b0109805 PB |
444 | { |
445 | uint32_t val; | |
446 | ||
447 | if (regno == 13) { | |
99a99c1f | 448 | val = env->banked_r13[BANK_USRSYS]; |
b0109805 | 449 | } else if (regno == 14) { |
99a99c1f | 450 | val = env->banked_r14[BANK_USRSYS]; |
b0109805 PB |
451 | } else if (regno >= 8 |
452 | && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { | |
453 | val = env->usr_regs[regno - 8]; | |
454 | } else { | |
455 | val = env->regs[regno]; | |
456 | } | |
457 | return val; | |
458 | } | |
459 | ||
1ce94f81 | 460 | void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val) |
b0109805 PB |
461 | { |
462 | if (regno == 13) { | |
99a99c1f | 463 | env->banked_r13[BANK_USRSYS] = val; |
b0109805 | 464 | } else if (regno == 14) { |
99a99c1f | 465 | env->banked_r14[BANK_USRSYS] = val; |
b0109805 PB |
466 | } else if (regno >= 8 |
467 | && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { | |
468 | env->usr_regs[regno - 8] = val; | |
469 | } else { | |
470 | env->regs[regno] = val; | |
471 | } | |
472 | } | |
4b6a83fb | 473 | |
72309cee PM |
474 | void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) |
475 | { | |
476 | if ((env->uncached_cpsr & CPSR_M) == mode) { | |
477 | env->regs[13] = val; | |
478 | } else { | |
479 | env->banked_r13[bank_number(mode)] = val; | |
480 | } | |
481 | } | |
482 | ||
483 | uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) | |
484 | { | |
f01377f5 PM |
485 | if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) { |
486 | /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF. | |
487 | * Other UNPREDICTABLE and UNDEF cases were caught at translate time. | |
488 | */ | |
489 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), | |
490 | exception_target_el(env)); | |
491 | } | |
492 | ||
72309cee PM |
493 | if ((env->uncached_cpsr & CPSR_M) == mode) { |
494 | return env->regs[13]; | |
495 | } else { | |
496 | return env->banked_r13[bank_number(mode)]; | |
497 | } | |
498 | } | |
72309cee | 499 | |
8bfd0550 PM |
500 | static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode, |
501 | uint32_t regno) | |
502 | { | |
503 | /* Raise an exception if the requested access is one of the UNPREDICTABLE | |
504 | * cases; otherwise return. This broadly corresponds to the pseudocode | |
505 | * BankedRegisterAccessValid() and SPSRAccessValid(), | |
506 | * except that we have already handled some cases at translate time. | |
507 | */ | |
508 | int curmode = env->uncached_cpsr & CPSR_M; | |
509 | ||
510 | if (curmode == tgtmode) { | |
511 | goto undef; | |
512 | } | |
513 | ||
514 | if (tgtmode == ARM_CPU_MODE_USR) { | |
515 | switch (regno) { | |
516 | case 8 ... 12: | |
517 | if (curmode != ARM_CPU_MODE_FIQ) { | |
518 | goto undef; | |
519 | } | |
520 | break; | |
521 | case 13: | |
522 | if (curmode == ARM_CPU_MODE_SYS) { | |
523 | goto undef; | |
524 | } | |
525 | break; | |
526 | case 14: | |
527 | if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) { | |
528 | goto undef; | |
529 | } | |
530 | break; | |
531 | default: | |
532 | break; | |
533 | } | |
534 | } | |
535 | ||
536 | if (tgtmode == ARM_CPU_MODE_HYP) { | |
537 | switch (regno) { | |
538 | case 17: /* ELR_Hyp */ | |
539 | if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) { | |
540 | goto undef; | |
541 | } | |
542 | break; | |
543 | default: | |
544 | if (curmode != ARM_CPU_MODE_MON) { | |
545 | goto undef; | |
546 | } | |
547 | break; | |
548 | } | |
549 | } | |
550 | ||
551 | return; | |
552 | ||
553 | undef: | |
554 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), | |
555 | exception_target_el(env)); | |
556 | } | |
557 | ||
558 | void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode, | |
559 | uint32_t regno) | |
560 | { | |
561 | msr_mrs_banked_exc_checks(env, tgtmode, regno); | |
562 | ||
563 | switch (regno) { | |
564 | case 16: /* SPSRs */ | |
565 | env->banked_spsr[bank_number(tgtmode)] = value; | |
566 | break; | |
567 | case 17: /* ELR_Hyp */ | |
568 | env->elr_el[2] = value; | |
569 | break; | |
570 | case 13: | |
571 | env->banked_r13[bank_number(tgtmode)] = value; | |
572 | break; | |
573 | case 14: | |
574 | env->banked_r14[bank_number(tgtmode)] = value; | |
575 | break; | |
576 | case 8 ... 12: | |
577 | switch (tgtmode) { | |
578 | case ARM_CPU_MODE_USR: | |
579 | env->usr_regs[regno - 8] = value; | |
580 | break; | |
581 | case ARM_CPU_MODE_FIQ: | |
582 | env->fiq_regs[regno - 8] = value; | |
583 | break; | |
584 | default: | |
585 | g_assert_not_reached(); | |
586 | } | |
587 | break; | |
588 | default: | |
589 | g_assert_not_reached(); | |
590 | } | |
591 | } | |
592 | ||
593 | uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno) | |
594 | { | |
595 | msr_mrs_banked_exc_checks(env, tgtmode, regno); | |
596 | ||
597 | switch (regno) { | |
598 | case 16: /* SPSRs */ | |
599 | return env->banked_spsr[bank_number(tgtmode)]; | |
600 | case 17: /* ELR_Hyp */ | |
601 | return env->elr_el[2]; | |
602 | case 13: | |
603 | return env->banked_r13[bank_number(tgtmode)]; | |
604 | case 14: | |
605 | return env->banked_r14[bank_number(tgtmode)]; | |
606 | case 8 ... 12: | |
607 | switch (tgtmode) { | |
608 | case ARM_CPU_MODE_USR: | |
609 | return env->usr_regs[regno - 8]; | |
610 | case ARM_CPU_MODE_FIQ: | |
611 | return env->fiq_regs[regno - 8]; | |
612 | default: | |
613 | g_assert_not_reached(); | |
614 | } | |
615 | default: | |
616 | g_assert_not_reached(); | |
617 | } | |
618 | } | |
619 | ||
3f208fd7 PM |
620 | void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome, |
621 | uint32_t isread) | |
f59df3f2 PM |
622 | { |
623 | const ARMCPRegInfo *ri = rip; | |
38836a2c | 624 | int target_el; |
c0f4af17 PM |
625 | |
626 | if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 | |
627 | && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { | |
c6328599 | 628 | raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env)); |
c0f4af17 PM |
629 | } |
630 | ||
631 | if (!ri->accessfn) { | |
632 | return; | |
633 | } | |
634 | ||
3f208fd7 | 635 | switch (ri->accessfn(env, ri, isread)) { |
f59df3f2 PM |
636 | case CP_ACCESS_OK: |
637 | return; | |
638 | case CP_ACCESS_TRAP: | |
38836a2c PM |
639 | target_el = exception_target_el(env); |
640 | break; | |
641 | case CP_ACCESS_TRAP_EL2: | |
642 | /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is | |
643 | * a bug in the access function. | |
644 | */ | |
3fc827d5 | 645 | assert(!arm_is_secure(env) && arm_current_el(env) != 3); |
38836a2c PM |
646 | target_el = 2; |
647 | break; | |
648 | case CP_ACCESS_TRAP_EL3: | |
649 | target_el = 3; | |
8bcbf37c | 650 | break; |
f59df3f2 | 651 | case CP_ACCESS_TRAP_UNCATEGORIZED: |
38836a2c | 652 | target_el = exception_target_el(env); |
c6328599 | 653 | syndrome = syn_uncategorized(); |
f59df3f2 | 654 | break; |
e7615726 PM |
655 | case CP_ACCESS_TRAP_UNCATEGORIZED_EL2: |
656 | target_el = 2; | |
657 | syndrome = syn_uncategorized(); | |
658 | break; | |
659 | case CP_ACCESS_TRAP_UNCATEGORIZED_EL3: | |
660 | target_el = 3; | |
661 | syndrome = syn_uncategorized(); | |
662 | break; | |
f2cae609 PM |
663 | case CP_ACCESS_TRAP_FP_EL2: |
664 | target_el = 2; | |
665 | /* Since we are an implementation that takes exceptions on a trapped | |
666 | * conditional insn only if the insn has passed its condition code | |
667 | * check, we take the IMPDEF choice to always report CV=1 COND=0xe | |
668 | * (which is also the required value for AArch64 traps). | |
669 | */ | |
670 | syndrome = syn_fp_access_trap(1, 0xe, false); | |
671 | break; | |
672 | case CP_ACCESS_TRAP_FP_EL3: | |
673 | target_el = 3; | |
674 | syndrome = syn_fp_access_trap(1, 0xe, false); | |
675 | break; | |
f59df3f2 PM |
676 | default: |
677 | g_assert_not_reached(); | |
678 | } | |
c6328599 | 679 | |
38836a2c | 680 | raise_exception(env, EXCP_UDEF, syndrome, target_el); |
f59df3f2 PM |
681 | } |
682 | ||
4b6a83fb PM |
683 | void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) |
684 | { | |
685 | const ARMCPRegInfo *ri = rip; | |
c4241c7d PM |
686 | |
687 | ri->writefn(env, ri, value); | |
4b6a83fb PM |
688 | } |
689 | ||
690 | uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) | |
691 | { | |
692 | const ARMCPRegInfo *ri = rip; | |
c4241c7d PM |
693 | |
694 | return ri->readfn(env, ri); | |
4b6a83fb PM |
695 | } |
696 | ||
697 | void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) | |
698 | { | |
699 | const ARMCPRegInfo *ri = rip; | |
c4241c7d PM |
700 | |
701 | ri->writefn(env, ri, value); | |
4b6a83fb PM |
702 | } |
703 | ||
704 | uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) | |
705 | { | |
706 | const ARMCPRegInfo *ri = rip; | |
c4241c7d PM |
707 | |
708 | return ri->readfn(env, ri); | |
4b6a83fb | 709 | } |
b0109805 | 710 | |
9cfa0b4e PM |
711 | void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm) |
712 | { | |
713 | /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set. | |
714 | * Note that SPSel is never OK from EL0; we rely on handle_msr_i() | |
715 | * to catch that case at translate time. | |
716 | */ | |
137feaa9 | 717 | if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) { |
c6328599 PM |
718 | uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3), |
719 | extract32(op, 3, 3), 4, | |
720 | imm, 0x1f, 0); | |
721 | raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env)); | |
9cfa0b4e PM |
722 | } |
723 | ||
724 | switch (op) { | |
725 | case 0x05: /* SPSel */ | |
f502cfc2 | 726 | update_spsel(env, imm); |
9cfa0b4e PM |
727 | break; |
728 | case 0x1e: /* DAIFSet */ | |
729 | env->daif |= (imm << 6) & PSTATE_DAIF; | |
730 | break; | |
731 | case 0x1f: /* DAIFClear */ | |
732 | env->daif &= ~((imm << 6) & PSTATE_DAIF); | |
733 | break; | |
734 | default: | |
735 | g_assert_not_reached(); | |
736 | } | |
737 | } | |
738 | ||
7ea47fe7 PM |
739 | void HELPER(clear_pstate_ss)(CPUARMState *env) |
740 | { | |
741 | env->pstate &= ~PSTATE_SS; | |
742 | } | |
743 | ||
35979d71 EI |
744 | void HELPER(pre_hvc)(CPUARMState *env) |
745 | { | |
98128601 | 746 | ARMCPU *cpu = arm_env_get_cpu(env); |
dcbff19b | 747 | int cur_el = arm_current_el(env); |
35979d71 EI |
748 | /* FIXME: Use actual secure state. */ |
749 | bool secure = false; | |
750 | bool undef; | |
751 | ||
98128601 RH |
752 | if (arm_is_psci_call(cpu, EXCP_HVC)) { |
753 | /* If PSCI is enabled and this looks like a valid PSCI call then | |
754 | * that overrides the architecturally mandated HVC behaviour. | |
755 | */ | |
756 | return; | |
757 | } | |
758 | ||
39404338 PM |
759 | if (!arm_feature(env, ARM_FEATURE_EL2)) { |
760 | /* If EL2 doesn't exist, HVC always UNDEFs */ | |
761 | undef = true; | |
762 | } else if (arm_feature(env, ARM_FEATURE_EL3)) { | |
763 | /* EL3.HCE has priority over EL2.HCD. */ | |
35979d71 EI |
764 | undef = !(env->cp15.scr_el3 & SCR_HCE); |
765 | } else { | |
766 | undef = env->cp15.hcr_el2 & HCR_HCD; | |
767 | } | |
768 | ||
769 | /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state. | |
770 | * For ARMv8/AArch64, HVC is allowed in EL3. | |
771 | * Note that we've already trapped HVC from EL0 at translation | |
772 | * time. | |
773 | */ | |
774 | if (secure && (!is_a64(env) || cur_el == 1)) { | |
775 | undef = true; | |
776 | } | |
777 | ||
778 | if (undef) { | |
c6328599 PM |
779 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), |
780 | exception_target_el(env)); | |
35979d71 EI |
781 | } |
782 | } | |
783 | ||
e0d6e6a5 EI |
784 | void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) |
785 | { | |
98128601 | 786 | ARMCPU *cpu = arm_env_get_cpu(env); |
dcbff19b | 787 | int cur_el = arm_current_el(env); |
dbe9d163 | 788 | bool secure = arm_is_secure(env); |
e0d6e6a5 | 789 | bool smd = env->cp15.scr_el3 & SCR_SMD; |
f096e92b PM |
790 | /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state. |
791 | * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization | |
792 | * extensions, SMD only applies to NS state. | |
793 | * On ARMv7 without the Virtualization extensions, the SMD bit | |
794 | * doesn't exist, but we forbid the guest to set it to 1 in scr_write(), | |
795 | * so we need not special case this here. | |
e0d6e6a5 | 796 | */ |
f096e92b | 797 | bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure; |
e0d6e6a5 | 798 | |
98128601 RH |
799 | if (arm_is_psci_call(cpu, EXCP_SMC)) { |
800 | /* If PSCI is enabled and this looks like a valid PSCI call then | |
801 | * that overrides the architecturally mandated SMC behaviour. | |
802 | */ | |
803 | return; | |
804 | } | |
805 | ||
39404338 PM |
806 | if (!arm_feature(env, ARM_FEATURE_EL3)) { |
807 | /* If we have no EL3 then SMC always UNDEFs */ | |
808 | undef = true; | |
809 | } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) { | |
810 | /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */ | |
c6328599 | 811 | raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); |
e0d6e6a5 EI |
812 | } |
813 | ||
e0d6e6a5 | 814 | if (undef) { |
c6328599 PM |
815 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), |
816 | exception_target_el(env)); | |
e0d6e6a5 EI |
817 | } |
818 | } | |
819 | ||
3809951b PM |
820 | static int el_from_spsr(uint32_t spsr) |
821 | { | |
822 | /* Return the exception level that this SPSR is requesting a return to, | |
823 | * or -1 if it is invalid (an illegal return) | |
824 | */ | |
825 | if (spsr & PSTATE_nRW) { | |
826 | switch (spsr & CPSR_M) { | |
827 | case ARM_CPU_MODE_USR: | |
828 | return 0; | |
829 | case ARM_CPU_MODE_HYP: | |
830 | return 2; | |
831 | case ARM_CPU_MODE_FIQ: | |
832 | case ARM_CPU_MODE_IRQ: | |
833 | case ARM_CPU_MODE_SVC: | |
834 | case ARM_CPU_MODE_ABT: | |
835 | case ARM_CPU_MODE_UND: | |
836 | case ARM_CPU_MODE_SYS: | |
837 | return 1; | |
838 | case ARM_CPU_MODE_MON: | |
839 | /* Returning to Mon from AArch64 is never possible, | |
840 | * so this is an illegal return. | |
841 | */ | |
842 | default: | |
843 | return -1; | |
844 | } | |
845 | } else { | |
846 | if (extract32(spsr, 1, 1)) { | |
847 | /* Return with reserved M[1] bit set */ | |
848 | return -1; | |
849 | } | |
850 | if (extract32(spsr, 0, 4) == 1) { | |
851 | /* return to EL0 with M[0] bit set */ | |
852 | return -1; | |
853 | } | |
854 | return extract32(spsr, 2, 2); | |
855 | } | |
856 | } | |
857 | ||
52e60cdd RH |
858 | void HELPER(exception_return)(CPUARMState *env) |
859 | { | |
dcbff19b | 860 | int cur_el = arm_current_el(env); |
db6c3cd0 | 861 | unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); |
2a923c4d | 862 | uint32_t spsr = env->banked_spsr[spsr_idx]; |
ce02049d | 863 | int new_el; |
3809951b | 864 | bool return_to_aa64 = (spsr & PSTATE_nRW) == 0; |
52e60cdd | 865 | |
9208b961 | 866 | aarch64_save_sp(env, cur_el); |
52e60cdd RH |
867 | |
868 | env->exclusive_addr = -1; | |
869 | ||
3a298203 PM |
870 | /* We must squash the PSTATE.SS bit to zero unless both of the |
871 | * following hold: | |
872 | * 1. debug exceptions are currently disabled | |
873 | * 2. singlestep will be active in the EL we return to | |
874 | * We check 1 here and 2 after we've done the pstate/cpsr write() to | |
875 | * transition to the EL we're going to. | |
876 | */ | |
877 | if (arm_generate_debug_exceptions(env)) { | |
878 | spsr &= ~PSTATE_SS; | |
879 | } | |
880 | ||
3809951b PM |
881 | new_el = el_from_spsr(spsr); |
882 | if (new_el == -1) { | |
883 | goto illegal_return; | |
884 | } | |
885 | if (new_el > cur_el | |
886 | || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) { | |
887 | /* Disallow return to an EL which is unimplemented or higher | |
888 | * than the current one. | |
889 | */ | |
890 | goto illegal_return; | |
891 | } | |
892 | ||
893 | if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) { | |
894 | /* Return to an EL which is configured for a different register width */ | |
895 | goto illegal_return; | |
896 | } | |
897 | ||
e393f339 PM |
898 | if (new_el == 2 && arm_is_secure_below_el3(env)) { |
899 | /* Return to the non-existent secure-EL2 */ | |
900 | goto illegal_return; | |
901 | } | |
902 | ||
903 | if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE) | |
904 | && !arm_is_secure_below_el3(env)) { | |
905 | goto illegal_return; | |
906 | } | |
907 | ||
3809951b | 908 | if (!return_to_aa64) { |
52e60cdd | 909 | env->aarch64 = 0; |
f8c88bbc PM |
910 | /* We do a raw CPSR write because aarch64_sync_64_to_32() |
911 | * will sort the register banks out for us, and we've already | |
912 | * caught all the bad-mode cases in el_from_spsr(). | |
913 | */ | |
50866ba5 | 914 | cpsr_write(env, spsr, ~0, CPSRWriteRaw); |
3a298203 PM |
915 | if (!arm_singlestep_active(env)) { |
916 | env->uncached_cpsr &= ~PSTATE_SS; | |
917 | } | |
ce02049d | 918 | aarch64_sync_64_to_32(env); |
52e60cdd | 919 | |
c1e03714 PM |
920 | if (spsr & CPSR_T) { |
921 | env->regs[15] = env->elr_el[cur_el] & ~0x1; | |
922 | } else { | |
923 | env->regs[15] = env->elr_el[cur_el] & ~0x3; | |
924 | } | |
52e60cdd | 925 | } else { |
52e60cdd RH |
926 | env->aarch64 = 1; |
927 | pstate_write(env, spsr); | |
3a298203 PM |
928 | if (!arm_singlestep_active(env)) { |
929 | env->pstate &= ~PSTATE_SS; | |
930 | } | |
98ea5615 | 931 | aarch64_restore_sp(env, new_el); |
db6c3cd0 | 932 | env->pc = env->elr_el[cur_el]; |
52e60cdd RH |
933 | } |
934 | ||
935 | return; | |
936 | ||
937 | illegal_return: | |
938 | /* Illegal return events of various kinds have architecturally | |
939 | * mandated behaviour: | |
940 | * restore NZCV and DAIF from SPSR_ELx | |
941 | * set PSTATE.IL | |
942 | * restore PC from ELR_ELx | |
943 | * no change to exception level, execution state or stack pointer | |
944 | */ | |
945 | env->pstate |= PSTATE_IL; | |
db6c3cd0 | 946 | env->pc = env->elr_el[cur_el]; |
52e60cdd RH |
947 | spsr &= PSTATE_NZCV | PSTATE_DAIF; |
948 | spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF); | |
949 | pstate_write(env, spsr); | |
3a298203 PM |
950 | if (!arm_singlestep_active(env)) { |
951 | env->pstate &= ~PSTATE_SS; | |
952 | } | |
52e60cdd RH |
953 | } |
954 | ||
3ff6fc91 PM |
955 | /* Return true if the linked breakpoint entry lbn passes its checks */ |
956 | static bool linked_bp_matches(ARMCPU *cpu, int lbn) | |
957 | { | |
958 | CPUARMState *env = &cpu->env; | |
959 | uint64_t bcr = env->cp15.dbgbcr[lbn]; | |
960 | int brps = extract32(cpu->dbgdidr, 24, 4); | |
961 | int ctx_cmps = extract32(cpu->dbgdidr, 20, 4); | |
962 | int bt; | |
963 | uint32_t contextidr; | |
964 | ||
965 | /* Links to unimplemented or non-context aware breakpoints are | |
966 | * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or | |
967 | * as if linked to an UNKNOWN context-aware breakpoint (in which | |
968 | * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). | |
969 | * We choose the former. | |
970 | */ | |
971 | if (lbn > brps || lbn < (brps - ctx_cmps)) { | |
972 | return false; | |
973 | } | |
974 | ||
975 | bcr = env->cp15.dbgbcr[lbn]; | |
976 | ||
977 | if (extract64(bcr, 0, 1) == 0) { | |
978 | /* Linked breakpoint disabled : generate no events */ | |
979 | return false; | |
980 | } | |
981 | ||
982 | bt = extract64(bcr, 20, 4); | |
983 | ||
984 | /* We match the whole register even if this is AArch32 using the | |
985 | * short descriptor format (in which case it holds both PROCID and ASID), | |
986 | * since we don't implement the optional v7 context ID masking. | |
987 | */ | |
54bf36ed | 988 | contextidr = extract64(env->cp15.contextidr_el[1], 0, 32); |
3ff6fc91 PM |
989 | |
990 | switch (bt) { | |
991 | case 3: /* linked context ID match */ | |
dcbff19b | 992 | if (arm_current_el(env) > 1) { |
3ff6fc91 PM |
993 | /* Context matches never fire in EL2 or (AArch64) EL3 */ |
994 | return false; | |
995 | } | |
996 | return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32)); | |
997 | case 5: /* linked address mismatch (reserved in AArch64) */ | |
998 | case 9: /* linked VMID match (reserved if no EL2) */ | |
999 | case 11: /* linked context ID and VMID match (reserved if no EL2) */ | |
1000 | default: | |
1001 | /* Links to Unlinked context breakpoints must generate no | |
1002 | * events; we choose to do the same for reserved values too. | |
1003 | */ | |
1004 | return false; | |
1005 | } | |
1006 | ||
1007 | return false; | |
1008 | } | |
1009 | ||
0eacea70 | 1010 | static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) |
3ff6fc91 PM |
1011 | { |
1012 | CPUARMState *env = &cpu->env; | |
0eacea70 | 1013 | uint64_t cr; |
3ff6fc91 | 1014 | int pac, hmc, ssc, wt, lbn; |
ef7bab8d PM |
1015 | /* Note that for watchpoints the check is against the CPU security |
1016 | * state, not the S/NS attribute on the offending data access. | |
1017 | */ | |
1018 | bool is_secure = arm_is_secure(env); | |
9e1fc5bd | 1019 | int access_el = arm_current_el(env); |
3ff6fc91 | 1020 | |
0eacea70 | 1021 | if (is_wp) { |
9e1fc5bd PM |
1022 | CPUWatchpoint *wp = env->cpu_watchpoint[n]; |
1023 | ||
1024 | if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { | |
0eacea70 PM |
1025 | return false; |
1026 | } | |
1027 | cr = env->cp15.dbgwcr[n]; | |
9e1fc5bd PM |
1028 | if (wp->hitattrs.user) { |
1029 | /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should | |
1030 | * match watchpoints as if they were accesses done at EL0, even if | |
1031 | * the CPU is at EL1 or higher. | |
1032 | */ | |
1033 | access_el = 0; | |
1034 | } | |
0eacea70 PM |
1035 | } else { |
1036 | uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; | |
3ff6fc91 | 1037 | |
0eacea70 PM |
1038 | if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { |
1039 | return false; | |
1040 | } | |
1041 | cr = env->cp15.dbgbcr[n]; | |
1042 | } | |
3ff6fc91 | 1043 | /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is |
0eacea70 PM |
1044 | * enabled and that the address and access type match; for breakpoints |
1045 | * we know the address matched; check the remaining fields, including | |
1046 | * linked breakpoints. We rely on WCR and BCR having the same layout | |
1047 | * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. | |
1048 | * Note that some combinations of {PAC, HMC, SSC} are reserved and | |
3ff6fc91 PM |
1049 | * must act either like some valid combination or as if the watchpoint |
1050 | * were disabled. We choose the former, and use this together with | |
1051 | * the fact that EL3 must always be Secure and EL2 must always be | |
1052 | * Non-Secure to simplify the code slightly compared to the full | |
1053 | * table in the ARM ARM. | |
1054 | */ | |
0eacea70 PM |
1055 | pac = extract64(cr, 1, 2); |
1056 | hmc = extract64(cr, 13, 1); | |
1057 | ssc = extract64(cr, 14, 2); | |
3ff6fc91 PM |
1058 | |
1059 | switch (ssc) { | |
1060 | case 0: | |
1061 | break; | |
1062 | case 1: | |
1063 | case 3: | |
1064 | if (is_secure) { | |
1065 | return false; | |
1066 | } | |
1067 | break; | |
1068 | case 2: | |
1069 | if (!is_secure) { | |
1070 | return false; | |
1071 | } | |
1072 | break; | |
1073 | } | |
1074 | ||
9e1fc5bd | 1075 | switch (access_el) { |
3ff6fc91 PM |
1076 | case 3: |
1077 | case 2: | |
1078 | if (!hmc) { | |
1079 | return false; | |
1080 | } | |
1081 | break; | |
1082 | case 1: | |
1083 | if (extract32(pac, 0, 1) == 0) { | |
1084 | return false; | |
1085 | } | |
1086 | break; | |
1087 | case 0: | |
1088 | if (extract32(pac, 1, 1) == 0) { | |
1089 | return false; | |
1090 | } | |
1091 | break; | |
1092 | default: | |
1093 | g_assert_not_reached(); | |
1094 | } | |
1095 | ||
0eacea70 PM |
1096 | wt = extract64(cr, 20, 1); |
1097 | lbn = extract64(cr, 16, 4); | |
3ff6fc91 PM |
1098 | |
1099 | if (wt && !linked_bp_matches(cpu, lbn)) { | |
1100 | return false; | |
1101 | } | |
1102 | ||
1103 | return true; | |
1104 | } | |
1105 | ||
1106 | static bool check_watchpoints(ARMCPU *cpu) | |
1107 | { | |
1108 | CPUARMState *env = &cpu->env; | |
1109 | int n; | |
1110 | ||
1111 | /* If watchpoints are disabled globally or we can't take debug | |
1112 | * exceptions here then watchpoint firings are ignored. | |
1113 | */ | |
1114 | if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 | |
1115 | || !arm_generate_debug_exceptions(env)) { | |
1116 | return false; | |
1117 | } | |
1118 | ||
1119 | for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { | |
0eacea70 PM |
1120 | if (bp_wp_matches(cpu, n, true)) { |
1121 | return true; | |
1122 | } | |
1123 | } | |
1124 | return false; | |
1125 | } | |
1126 | ||
1127 | static bool check_breakpoints(ARMCPU *cpu) | |
1128 | { | |
1129 | CPUARMState *env = &cpu->env; | |
1130 | int n; | |
1131 | ||
1132 | /* If breakpoints are disabled globally or we can't take debug | |
1133 | * exceptions here then breakpoint firings are ignored. | |
1134 | */ | |
1135 | if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 | |
1136 | || !arm_generate_debug_exceptions(env)) { | |
1137 | return false; | |
1138 | } | |
1139 | ||
1140 | for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { | |
1141 | if (bp_wp_matches(cpu, n, false)) { | |
3ff6fc91 PM |
1142 | return true; |
1143 | } | |
1144 | } | |
1145 | return false; | |
1146 | } | |
1147 | ||
5d98bf8f SF |
1148 | void HELPER(check_breakpoints)(CPUARMState *env) |
1149 | { | |
1150 | ARMCPU *cpu = arm_env_get_cpu(env); | |
1151 | ||
1152 | if (check_breakpoints(cpu)) { | |
1153 | HELPER(exception_internal(env, EXCP_DEBUG)); | |
1154 | } | |
1155 | } | |
1156 | ||
3826121d SF |
1157 | bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp) |
1158 | { | |
1159 | /* Called by core code when a CPU watchpoint fires; need to check if this | |
1160 | * is also an architectural watchpoint match. | |
1161 | */ | |
1162 | ARMCPU *cpu = ARM_CPU(cs); | |
1163 | ||
1164 | return check_watchpoints(cpu); | |
1165 | } | |
1166 | ||
3ff6fc91 PM |
1167 | void arm_debug_excp_handler(CPUState *cs) |
1168 | { | |
1169 | /* Called by core code when a watchpoint or breakpoint fires; | |
1170 | * need to check which one and raise the appropriate exception. | |
1171 | */ | |
1172 | ARMCPU *cpu = ARM_CPU(cs); | |
1173 | CPUARMState *env = &cpu->env; | |
1174 | CPUWatchpoint *wp_hit = cs->watchpoint_hit; | |
1175 | ||
1176 | if (wp_hit) { | |
1177 | if (wp_hit->flags & BP_CPU) { | |
3826121d SF |
1178 | bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; |
1179 | bool same_el = arm_debug_target_el(env) == arm_current_el(env); | |
1180 | ||
3ff6fc91 | 1181 | cs->watchpoint_hit = NULL; |
3826121d SF |
1182 | |
1183 | if (extended_addresses_enabled(env)) { | |
1184 | env->exception.fsr = (1 << 9) | 0x22; | |
3ff6fc91 | 1185 | } else { |
3826121d | 1186 | env->exception.fsr = 0x2; |
3ff6fc91 | 1187 | } |
3826121d SF |
1188 | env->exception.vaddress = wp_hit->hitaddr; |
1189 | raise_exception(env, EXCP_DATA_ABORT, | |
1190 | syn_watchpoint(same_el, 0, wnr), | |
1191 | arm_debug_target_el(env)); | |
3ff6fc91 | 1192 | } |
0eacea70 | 1193 | } else { |
e63a2d4d | 1194 | uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; |
5d98bf8f | 1195 | bool same_el = (arm_debug_target_el(env) == arm_current_el(env)); |
e63a2d4d | 1196 | |
5c629f4f SF |
1197 | /* (1) GDB breakpoints should be handled first. |
1198 | * (2) Do not raise a CPU exception if no CPU breakpoint has fired, | |
1199 | * since singlestep is also done by generating a debug internal | |
1200 | * exception. | |
1201 | */ | |
1202 | if (cpu_breakpoint_test(cs, pc, BP_GDB) | |
1203 | || !cpu_breakpoint_test(cs, pc, BP_CPU)) { | |
e63a2d4d SF |
1204 | return; |
1205 | } | |
1206 | ||
5d98bf8f SF |
1207 | if (extended_addresses_enabled(env)) { |
1208 | env->exception.fsr = (1 << 9) | 0x22; | |
1209 | } else { | |
1210 | env->exception.fsr = 0x2; | |
0eacea70 | 1211 | } |
5d98bf8f SF |
1212 | /* FAR is UNKNOWN, so doesn't need setting */ |
1213 | raise_exception(env, EXCP_PREFETCH_ABORT, | |
1214 | syn_breakpoint(same_el), | |
1215 | arm_debug_target_el(env)); | |
3ff6fc91 PM |
1216 | } |
1217 | } | |
1218 | ||
8984bd2e PB |
1219 | /* ??? Flag setting arithmetic is awkward because we need to do comparisons. |
1220 | The only way to do that in TCG is a conditional branch, which clobbers | |
1221 | all our temporaries. For now implement these as helper functions. */ | |
1222 | ||
8984bd2e PB |
1223 | /* Similarly for variable shift instructions. */ |
1224 | ||
9ef39277 | 1225 | uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
8984bd2e PB |
1226 | { |
1227 | int shift = i & 0xff; | |
1228 | if (shift >= 32) { | |
1229 | if (shift == 32) | |
1230 | env->CF = x & 1; | |
1231 | else | |
1232 | env->CF = 0; | |
1233 | return 0; | |
1234 | } else if (shift != 0) { | |
1235 | env->CF = (x >> (32 - shift)) & 1; | |
1236 | return x << shift; | |
1237 | } | |
1238 | return x; | |
1239 | } | |
1240 | ||
9ef39277 | 1241 | uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
8984bd2e PB |
1242 | { |
1243 | int shift = i & 0xff; | |
1244 | if (shift >= 32) { | |
1245 | if (shift == 32) | |
1246 | env->CF = (x >> 31) & 1; | |
1247 | else | |
1248 | env->CF = 0; | |
1249 | return 0; | |
1250 | } else if (shift != 0) { | |
1251 | env->CF = (x >> (shift - 1)) & 1; | |
1252 | return x >> shift; | |
1253 | } | |
1254 | return x; | |
1255 | } | |
1256 | ||
9ef39277 | 1257 | uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
8984bd2e PB |
1258 | { |
1259 | int shift = i & 0xff; | |
1260 | if (shift >= 32) { | |
1261 | env->CF = (x >> 31) & 1; | |
1262 | return (int32_t)x >> 31; | |
1263 | } else if (shift != 0) { | |
1264 | env->CF = (x >> (shift - 1)) & 1; | |
1265 | return (int32_t)x >> shift; | |
1266 | } | |
1267 | return x; | |
1268 | } | |
1269 | ||
9ef39277 | 1270 | uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
8984bd2e PB |
1271 | { |
1272 | int shift1, shift; | |
1273 | shift1 = i & 0xff; | |
1274 | shift = shift1 & 0x1f; | |
1275 | if (shift == 0) { | |
1276 | if (shift1 != 0) | |
1277 | env->CF = (x >> 31) & 1; | |
1278 | return x; | |
1279 | } else { | |
1280 | env->CF = (x >> (shift - 1)) & 1; | |
1281 | return ((uint32_t)x >> shift) | (x << (32 - shift)); | |
1282 | } | |
1283 | } |