]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/internals.h
target/arm: New utility function to extract EC from syndrome
[mirror_qemu.git] / target / arm / internals.h
1 /*
2 * QEMU ARM CPU -- internal functions and types
3 *
4 * Copyright (c) 2014 Linaro Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 *
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
23 */
24
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
27
28 #include "hw/registerfields.h"
29
30 /* register banks for CPU modes */
31 #define BANK_USRSYS 0
32 #define BANK_SVC 1
33 #define BANK_ABT 2
34 #define BANK_UND 3
35 #define BANK_IRQ 4
36 #define BANK_FIQ 5
37 #define BANK_HYP 6
38 #define BANK_MON 7
39
40 static inline bool excp_is_internal(int excp)
41 {
42 /* Return true if this exception number represents a QEMU-internal
43 * exception that will not be passed to the guest.
44 */
45 return excp == EXCP_INTERRUPT
46 || excp == EXCP_HLT
47 || excp == EXCP_DEBUG
48 || excp == EXCP_HALTED
49 || excp == EXCP_EXCEPTION_EXIT
50 || excp == EXCP_KERNEL_TRAP
51 || excp == EXCP_SEMIHOST;
52 }
53
54 /* Scale factor for generic timers, ie number of ns per tick.
55 * This gives a 62.5MHz timer.
56 */
57 #define GTIMER_SCALE 16
58
59 /* Bit definitions for the v7M CONTROL register */
60 FIELD(V7M_CONTROL, NPRIV, 0, 1)
61 FIELD(V7M_CONTROL, SPSEL, 1, 1)
62 FIELD(V7M_CONTROL, FPCA, 2, 1)
63 FIELD(V7M_CONTROL, SFPA, 3, 1)
64
65 /* Bit definitions for v7M exception return payload */
66 FIELD(V7M_EXCRET, ES, 0, 1)
67 FIELD(V7M_EXCRET, RES0, 1, 1)
68 FIELD(V7M_EXCRET, SPSEL, 2, 1)
69 FIELD(V7M_EXCRET, MODE, 3, 1)
70 FIELD(V7M_EXCRET, FTYPE, 4, 1)
71 FIELD(V7M_EXCRET, DCRS, 5, 1)
72 FIELD(V7M_EXCRET, S, 6, 1)
73 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
74
75 /* Minimum value which is a magic number for exception return */
76 #define EXC_RETURN_MIN_MAGIC 0xff000000
77 /* Minimum number which is a magic number for function or exception return
78 * when using v8M security extension
79 */
80 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
81
82 /* We use a few fake FSR values for internal purposes in M profile.
83 * M profile cores don't have A/R format FSRs, but currently our
84 * get_phys_addr() code assumes A/R profile and reports failures via
85 * an A/R format FSR value. We then translate that into the proper
86 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
87 * Mostly the FSR values we use for this are those defined for v7PMSA,
88 * since we share some of that codepath. A few kinds of fault are
89 * only for M profile and have no A/R equivalent, though, so we have
90 * to pick a value from the reserved range (which we never otherwise
91 * generate) to use for these.
92 * These values will never be visible to the guest.
93 */
94 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
95 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
96
97 /**
98 * raise_exception: Raise the specified exception.
99 * Raise a guest exception with the specified value, syndrome register
100 * and target exception level. This should be called from helper functions,
101 * and never returns because we will longjump back up to the CPU main loop.
102 */
103 void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
104 uint32_t syndrome, uint32_t target_el);
105
106 /*
107 * For AArch64, map a given EL to an index in the banked_spsr array.
108 * Note that this mapping and the AArch32 mapping defined in bank_number()
109 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
110 * mandated mapping between each other.
111 */
112 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
113 {
114 static const unsigned int map[4] = {
115 [1] = BANK_SVC, /* EL1. */
116 [2] = BANK_HYP, /* EL2. */
117 [3] = BANK_MON, /* EL3. */
118 };
119 assert(el >= 1 && el <= 3);
120 return map[el];
121 }
122
123 /* Map CPU modes onto saved register banks. */
124 static inline int bank_number(int mode)
125 {
126 switch (mode) {
127 case ARM_CPU_MODE_USR:
128 case ARM_CPU_MODE_SYS:
129 return BANK_USRSYS;
130 case ARM_CPU_MODE_SVC:
131 return BANK_SVC;
132 case ARM_CPU_MODE_ABT:
133 return BANK_ABT;
134 case ARM_CPU_MODE_UND:
135 return BANK_UND;
136 case ARM_CPU_MODE_IRQ:
137 return BANK_IRQ;
138 case ARM_CPU_MODE_FIQ:
139 return BANK_FIQ;
140 case ARM_CPU_MODE_HYP:
141 return BANK_HYP;
142 case ARM_CPU_MODE_MON:
143 return BANK_MON;
144 }
145 g_assert_not_reached();
146 }
147
148 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
149 void arm_translate_init(void);
150
151 enum arm_fprounding {
152 FPROUNDING_TIEEVEN,
153 FPROUNDING_POSINF,
154 FPROUNDING_NEGINF,
155 FPROUNDING_ZERO,
156 FPROUNDING_TIEAWAY,
157 FPROUNDING_ODD
158 };
159
160 int arm_rmode_to_sf(int rmode);
161
162 static inline void aarch64_save_sp(CPUARMState *env, int el)
163 {
164 if (env->pstate & PSTATE_SP) {
165 env->sp_el[el] = env->xregs[31];
166 } else {
167 env->sp_el[0] = env->xregs[31];
168 }
169 }
170
171 static inline void aarch64_restore_sp(CPUARMState *env, int el)
172 {
173 if (env->pstate & PSTATE_SP) {
174 env->xregs[31] = env->sp_el[el];
175 } else {
176 env->xregs[31] = env->sp_el[0];
177 }
178 }
179
180 static inline void update_spsel(CPUARMState *env, uint32_t imm)
181 {
182 unsigned int cur_el = arm_current_el(env);
183 /* Update PSTATE SPSel bit; this requires us to update the
184 * working stack pointer in xregs[31].
185 */
186 if (!((imm ^ env->pstate) & PSTATE_SP)) {
187 return;
188 }
189 aarch64_save_sp(env, cur_el);
190 env->pstate = deposit32(env->pstate, 0, 1, imm);
191
192 /* We rely on illegal updates to SPsel from EL0 to get trapped
193 * at translation time.
194 */
195 assert(cur_el >= 1 && cur_el <= 3);
196 aarch64_restore_sp(env, cur_el);
197 }
198
199 /*
200 * arm_pamax
201 * @cpu: ARMCPU
202 *
203 * Returns the implementation defined bit-width of physical addresses.
204 * The ARMv8 reference manuals refer to this as PAMax().
205 */
206 static inline unsigned int arm_pamax(ARMCPU *cpu)
207 {
208 static const unsigned int pamax_map[] = {
209 [0] = 32,
210 [1] = 36,
211 [2] = 40,
212 [3] = 42,
213 [4] = 44,
214 [5] = 48,
215 };
216 unsigned int parange = extract32(cpu->id_aa64mmfr0, 0, 4);
217
218 /* id_aa64mmfr0 is a read-only register so values outside of the
219 * supported mappings can be considered an implementation error. */
220 assert(parange < ARRAY_SIZE(pamax_map));
221 return pamax_map[parange];
222 }
223
224 /* Return true if extended addresses are enabled.
225 * This is always the case if our translation regime is 64 bit,
226 * but depends on TTBCR.EAE for 32 bit.
227 */
228 static inline bool extended_addresses_enabled(CPUARMState *env)
229 {
230 TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
231 return arm_el_is_aa64(env, 1) ||
232 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
233 }
234
235 /* Valid Syndrome Register EC field values */
236 enum arm_exception_class {
237 EC_UNCATEGORIZED = 0x00,
238 EC_WFX_TRAP = 0x01,
239 EC_CP15RTTRAP = 0x03,
240 EC_CP15RRTTRAP = 0x04,
241 EC_CP14RTTRAP = 0x05,
242 EC_CP14DTTRAP = 0x06,
243 EC_ADVSIMDFPACCESSTRAP = 0x07,
244 EC_FPIDTRAP = 0x08,
245 EC_CP14RRTTRAP = 0x0c,
246 EC_ILLEGALSTATE = 0x0e,
247 EC_AA32_SVC = 0x11,
248 EC_AA32_HVC = 0x12,
249 EC_AA32_SMC = 0x13,
250 EC_AA64_SVC = 0x15,
251 EC_AA64_HVC = 0x16,
252 EC_AA64_SMC = 0x17,
253 EC_SYSTEMREGISTERTRAP = 0x18,
254 EC_SVEACCESSTRAP = 0x19,
255 EC_INSNABORT = 0x20,
256 EC_INSNABORT_SAME_EL = 0x21,
257 EC_PCALIGNMENT = 0x22,
258 EC_DATAABORT = 0x24,
259 EC_DATAABORT_SAME_EL = 0x25,
260 EC_SPALIGNMENT = 0x26,
261 EC_AA32_FPTRAP = 0x28,
262 EC_AA64_FPTRAP = 0x2c,
263 EC_SERROR = 0x2f,
264 EC_BREAKPOINT = 0x30,
265 EC_BREAKPOINT_SAME_EL = 0x31,
266 EC_SOFTWARESTEP = 0x32,
267 EC_SOFTWARESTEP_SAME_EL = 0x33,
268 EC_WATCHPOINT = 0x34,
269 EC_WATCHPOINT_SAME_EL = 0x35,
270 EC_AA32_BKPT = 0x38,
271 EC_VECTORCATCH = 0x3a,
272 EC_AA64_BKPT = 0x3c,
273 };
274
275 #define ARM_EL_EC_SHIFT 26
276 #define ARM_EL_IL_SHIFT 25
277 #define ARM_EL_ISV_SHIFT 24
278 #define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
279 #define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
280
281 static inline uint32_t syn_get_ec(uint32_t syn)
282 {
283 return syn >> ARM_EL_EC_SHIFT;
284 }
285
286 /* Utility functions for constructing various kinds of syndrome value.
287 * Note that in general we follow the AArch64 syndrome values; in a
288 * few cases the value in HSR for exceptions taken to AArch32 Hyp
289 * mode differs slightly, so if we ever implemented Hyp mode then the
290 * syndrome value would need some massaging on exception entry.
291 * (One example of this is that AArch64 defaults to IL bit set for
292 * exceptions which don't specifically indicate information about the
293 * trapping instruction, whereas AArch32 defaults to IL bit clear.)
294 */
295 static inline uint32_t syn_uncategorized(void)
296 {
297 return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL;
298 }
299
300 static inline uint32_t syn_aa64_svc(uint32_t imm16)
301 {
302 return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
303 }
304
305 static inline uint32_t syn_aa64_hvc(uint32_t imm16)
306 {
307 return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
308 }
309
310 static inline uint32_t syn_aa64_smc(uint32_t imm16)
311 {
312 return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
313 }
314
315 static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit)
316 {
317 return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
318 | (is_16bit ? 0 : ARM_EL_IL);
319 }
320
321 static inline uint32_t syn_aa32_hvc(uint32_t imm16)
322 {
323 return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
324 }
325
326 static inline uint32_t syn_aa32_smc(void)
327 {
328 return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
329 }
330
331 static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
332 {
333 return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
334 }
335
336 static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit)
337 {
338 return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
339 | (is_16bit ? 0 : ARM_EL_IL);
340 }
341
342 static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
343 int crn, int crm, int rt,
344 int isread)
345 {
346 return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL
347 | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5)
348 | (crm << 1) | isread;
349 }
350
351 static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2,
352 int crn, int crm, int rt, int isread,
353 bool is_16bit)
354 {
355 return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT)
356 | (is_16bit ? 0 : ARM_EL_IL)
357 | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
358 | (crn << 10) | (rt << 5) | (crm << 1) | isread;
359 }
360
361 static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2,
362 int crn, int crm, int rt, int isread,
363 bool is_16bit)
364 {
365 return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT)
366 | (is_16bit ? 0 : ARM_EL_IL)
367 | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
368 | (crn << 10) | (rt << 5) | (crm << 1) | isread;
369 }
370
371 static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm,
372 int rt, int rt2, int isread,
373 bool is_16bit)
374 {
375 return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT)
376 | (is_16bit ? 0 : ARM_EL_IL)
377 | (cv << 24) | (cond << 20) | (opc1 << 16)
378 | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
379 }
380
381 static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
382 int rt, int rt2, int isread,
383 bool is_16bit)
384 {
385 return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT)
386 | (is_16bit ? 0 : ARM_EL_IL)
387 | (cv << 24) | (cond << 20) | (opc1 << 16)
388 | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
389 }
390
391 static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
392 {
393 return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
394 | (is_16bit ? 0 : ARM_EL_IL)
395 | (cv << 24) | (cond << 20);
396 }
397
398 static inline uint32_t syn_sve_access_trap(void)
399 {
400 return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
401 }
402
403 static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
404 {
405 return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
406 | ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc;
407 }
408
409 static inline uint32_t syn_data_abort_no_iss(int same_el,
410 int ea, int cm, int s1ptw,
411 int wnr, int fsc)
412 {
413 return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
414 | ARM_EL_IL
415 | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
416 }
417
418 static inline uint32_t syn_data_abort_with_iss(int same_el,
419 int sas, int sse, int srt,
420 int sf, int ar,
421 int ea, int cm, int s1ptw,
422 int wnr, int fsc,
423 bool is_16bit)
424 {
425 return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
426 | (is_16bit ? 0 : ARM_EL_IL)
427 | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16)
428 | (sf << 15) | (ar << 14)
429 | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
430 }
431
432 static inline uint32_t syn_swstep(int same_el, int isv, int ex)
433 {
434 return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
435 | ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22;
436 }
437
438 static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
439 {
440 return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
441 | ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22;
442 }
443
444 static inline uint32_t syn_breakpoint(int same_el)
445 {
446 return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
447 | ARM_EL_IL | 0x22;
448 }
449
450 static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit)
451 {
452 return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) |
453 (is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) |
454 (cv << 24) | (cond << 20) | ti;
455 }
456
457 /* Update a QEMU watchpoint based on the information the guest has set in the
458 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
459 */
460 void hw_watchpoint_update(ARMCPU *cpu, int n);
461 /* Update the QEMU watchpoints for every guest watchpoint. This does a
462 * complete delete-and-reinstate of the QEMU watchpoint list and so is
463 * suitable for use after migration or on reset.
464 */
465 void hw_watchpoint_update_all(ARMCPU *cpu);
466 /* Update a QEMU breakpoint based on the information the guest has set in the
467 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
468 */
469 void hw_breakpoint_update(ARMCPU *cpu, int n);
470 /* Update the QEMU breakpoints for every guest breakpoint. This does a
471 * complete delete-and-reinstate of the QEMU breakpoint list and so is
472 * suitable for use after migration or on reset.
473 */
474 void hw_breakpoint_update_all(ARMCPU *cpu);
475
476 /* Callback function for checking if a watchpoint should trigger. */
477 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
478
479 /* Adjust addresses (in BE32 mode) before testing against watchpoint
480 * addresses.
481 */
482 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
483
484 /* Callback function for when a watchpoint or breakpoint triggers. */
485 void arm_debug_excp_handler(CPUState *cs);
486
487 #ifdef CONFIG_USER_ONLY
488 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
489 {
490 return false;
491 }
492 #else
493 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
494 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
495 /* Actually handle a PSCI call */
496 void arm_handle_psci_call(ARMCPU *cpu);
497 #endif
498
499 /**
500 * arm_clear_exclusive: clear the exclusive monitor
501 * @env: CPU env
502 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
503 */
504 static inline void arm_clear_exclusive(CPUARMState *env)
505 {
506 env->exclusive_addr = -1;
507 }
508
509 /**
510 * ARMFaultType: type of an ARM MMU fault
511 * This corresponds to the v8A pseudocode's Fault enumeration,
512 * with extensions for QEMU internal conditions.
513 */
514 typedef enum ARMFaultType {
515 ARMFault_None,
516 ARMFault_AccessFlag,
517 ARMFault_Alignment,
518 ARMFault_Background,
519 ARMFault_Domain,
520 ARMFault_Permission,
521 ARMFault_Translation,
522 ARMFault_AddressSize,
523 ARMFault_SyncExternal,
524 ARMFault_SyncExternalOnWalk,
525 ARMFault_SyncParity,
526 ARMFault_SyncParityOnWalk,
527 ARMFault_AsyncParity,
528 ARMFault_AsyncExternal,
529 ARMFault_Debug,
530 ARMFault_TLBConflict,
531 ARMFault_Lockdown,
532 ARMFault_Exclusive,
533 ARMFault_ICacheMaint,
534 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
535 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
536 } ARMFaultType;
537
538 /**
539 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
540 * @type: Type of fault
541 * @level: Table walk level (for translation, access flag and permission faults)
542 * @domain: Domain of the fault address (for non-LPAE CPUs only)
543 * @s2addr: Address that caused a fault at stage 2
544 * @stage2: True if we faulted at stage 2
545 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
546 * @ea: True if we should set the EA (external abort type) bit in syndrome
547 */
548 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
549 struct ARMMMUFaultInfo {
550 ARMFaultType type;
551 target_ulong s2addr;
552 int level;
553 int domain;
554 bool stage2;
555 bool s1ptw;
556 bool ea;
557 };
558
559 /**
560 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
561 * Compare pseudocode EncodeSDFSC(), though unlike that function
562 * we set up a whole FSR-format code including domain field and
563 * putting the high bit of the FSC into bit 10.
564 */
565 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
566 {
567 uint32_t fsc;
568
569 switch (fi->type) {
570 case ARMFault_None:
571 return 0;
572 case ARMFault_AccessFlag:
573 fsc = fi->level == 1 ? 0x3 : 0x6;
574 break;
575 case ARMFault_Alignment:
576 fsc = 0x1;
577 break;
578 case ARMFault_Permission:
579 fsc = fi->level == 1 ? 0xd : 0xf;
580 break;
581 case ARMFault_Domain:
582 fsc = fi->level == 1 ? 0x9 : 0xb;
583 break;
584 case ARMFault_Translation:
585 fsc = fi->level == 1 ? 0x5 : 0x7;
586 break;
587 case ARMFault_SyncExternal:
588 fsc = 0x8 | (fi->ea << 12);
589 break;
590 case ARMFault_SyncExternalOnWalk:
591 fsc = fi->level == 1 ? 0xc : 0xe;
592 fsc |= (fi->ea << 12);
593 break;
594 case ARMFault_SyncParity:
595 fsc = 0x409;
596 break;
597 case ARMFault_SyncParityOnWalk:
598 fsc = fi->level == 1 ? 0x40c : 0x40e;
599 break;
600 case ARMFault_AsyncParity:
601 fsc = 0x408;
602 break;
603 case ARMFault_AsyncExternal:
604 fsc = 0x406 | (fi->ea << 12);
605 break;
606 case ARMFault_Debug:
607 fsc = 0x2;
608 break;
609 case ARMFault_TLBConflict:
610 fsc = 0x400;
611 break;
612 case ARMFault_Lockdown:
613 fsc = 0x404;
614 break;
615 case ARMFault_Exclusive:
616 fsc = 0x405;
617 break;
618 case ARMFault_ICacheMaint:
619 fsc = 0x4;
620 break;
621 case ARMFault_Background:
622 fsc = 0x0;
623 break;
624 case ARMFault_QEMU_NSCExec:
625 fsc = M_FAKE_FSR_NSC_EXEC;
626 break;
627 case ARMFault_QEMU_SFault:
628 fsc = M_FAKE_FSR_SFAULT;
629 break;
630 default:
631 /* Other faults can't occur in a context that requires a
632 * short-format status code.
633 */
634 g_assert_not_reached();
635 }
636
637 fsc |= (fi->domain << 4);
638 return fsc;
639 }
640
641 /**
642 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
643 * Compare pseudocode EncodeLDFSC(), though unlike that function
644 * we fill in also the LPAE bit 9 of a DFSR format.
645 */
646 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
647 {
648 uint32_t fsc;
649
650 switch (fi->type) {
651 case ARMFault_None:
652 return 0;
653 case ARMFault_AddressSize:
654 fsc = fi->level & 3;
655 break;
656 case ARMFault_AccessFlag:
657 fsc = (fi->level & 3) | (0x2 << 2);
658 break;
659 case ARMFault_Permission:
660 fsc = (fi->level & 3) | (0x3 << 2);
661 break;
662 case ARMFault_Translation:
663 fsc = (fi->level & 3) | (0x1 << 2);
664 break;
665 case ARMFault_SyncExternal:
666 fsc = 0x10 | (fi->ea << 12);
667 break;
668 case ARMFault_SyncExternalOnWalk:
669 fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12);
670 break;
671 case ARMFault_SyncParity:
672 fsc = 0x18;
673 break;
674 case ARMFault_SyncParityOnWalk:
675 fsc = (fi->level & 3) | (0x7 << 2);
676 break;
677 case ARMFault_AsyncParity:
678 fsc = 0x19;
679 break;
680 case ARMFault_AsyncExternal:
681 fsc = 0x11 | (fi->ea << 12);
682 break;
683 case ARMFault_Alignment:
684 fsc = 0x21;
685 break;
686 case ARMFault_Debug:
687 fsc = 0x22;
688 break;
689 case ARMFault_TLBConflict:
690 fsc = 0x30;
691 break;
692 case ARMFault_Lockdown:
693 fsc = 0x34;
694 break;
695 case ARMFault_Exclusive:
696 fsc = 0x35;
697 break;
698 default:
699 /* Other faults can't occur in a context that requires a
700 * long-format status code.
701 */
702 g_assert_not_reached();
703 }
704
705 fsc |= 1 << 9;
706 return fsc;
707 }
708
709 static inline bool arm_extabort_type(MemTxResult result)
710 {
711 /* The EA bit in syndromes and fault status registers is an
712 * IMPDEF classification of external aborts. ARM implementations
713 * usually use this to indicate AXI bus Decode error (0) or
714 * Slave error (1); in QEMU we follow that.
715 */
716 return result != MEMTX_DECODE_ERROR;
717 }
718
719 /* Do a page table walk and add page to TLB if possible */
720 bool arm_tlb_fill(CPUState *cpu, vaddr address,
721 MMUAccessType access_type, int mmu_idx,
722 ARMMMUFaultInfo *fi);
723
724 /* Return true if the stage 1 translation regime is using LPAE format page
725 * tables */
726 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
727
728 /* Raise a data fault alignment exception for the specified virtual address */
729 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
730 MMUAccessType access_type,
731 int mmu_idx, uintptr_t retaddr);
732
733 /* arm_cpu_do_transaction_failed: handle a memory system error response
734 * (eg "no device/memory present at address") by raising an external abort
735 * exception
736 */
737 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
738 vaddr addr, unsigned size,
739 MMUAccessType access_type,
740 int mmu_idx, MemTxAttrs attrs,
741 MemTxResult response, uintptr_t retaddr);
742
743 /* Call any registered EL change hooks */
744 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
745 {
746 ARMELChangeHook *hook, *next;
747 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
748 hook->hook(cpu, hook->opaque);
749 }
750 }
751 static inline void arm_call_el_change_hook(ARMCPU *cpu)
752 {
753 ARMELChangeHook *hook, *next;
754 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
755 hook->hook(cpu, hook->opaque);
756 }
757 }
758
759 /* Return true if this address translation regime is secure */
760 static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
761 {
762 switch (mmu_idx) {
763 case ARMMMUIdx_S12NSE0:
764 case ARMMMUIdx_S12NSE1:
765 case ARMMMUIdx_S1NSE0:
766 case ARMMMUIdx_S1NSE1:
767 case ARMMMUIdx_S1E2:
768 case ARMMMUIdx_S2NS:
769 case ARMMMUIdx_MPrivNegPri:
770 case ARMMMUIdx_MUserNegPri:
771 case ARMMMUIdx_MPriv:
772 case ARMMMUIdx_MUser:
773 return false;
774 case ARMMMUIdx_S1E3:
775 case ARMMMUIdx_S1SE0:
776 case ARMMMUIdx_S1SE1:
777 case ARMMMUIdx_MSPrivNegPri:
778 case ARMMMUIdx_MSUserNegPri:
779 case ARMMMUIdx_MSPriv:
780 case ARMMMUIdx_MSUser:
781 return true;
782 default:
783 g_assert_not_reached();
784 }
785 }
786
787 /* Return the FSR value for a debug exception (watchpoint, hardware
788 * breakpoint or BKPT insn) targeting the specified exception level.
789 */
790 static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
791 {
792 ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
793 int target_el = arm_debug_target_el(env);
794 bool using_lpae = false;
795
796 if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
797 using_lpae = true;
798 } else {
799 if (arm_feature(env, ARM_FEATURE_LPAE) &&
800 (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
801 using_lpae = true;
802 }
803 }
804
805 if (using_lpae) {
806 return arm_fi_to_lfsc(&fi);
807 } else {
808 return arm_fi_to_sfsc(&fi);
809 }
810 }
811
812 /* Note make_memop_idx reserves 4 bits for mmu_idx, and MO_BSWAP is bit 3.
813 * Thus a TCGMemOpIdx, without any MO_ALIGN bits, fits in 8 bits.
814 */
815 #define MEMOPIDX_SHIFT 8
816
817 /**
818 * v7m_using_psp: Return true if using process stack pointer
819 * Return true if the CPU is currently using the process stack
820 * pointer, or false if it is using the main stack pointer.
821 */
822 static inline bool v7m_using_psp(CPUARMState *env)
823 {
824 /* Handler mode always uses the main stack; for thread mode
825 * the CONTROL.SPSEL bit determines the answer.
826 * Note that in v7M it is not possible to be in Handler mode with
827 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
828 */
829 return !arm_v7m_is_handler_mode(env) &&
830 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
831 }
832
833 /**
834 * v7m_sp_limit: Return SP limit for current CPU state
835 * Return the SP limit value for the current CPU security state
836 * and stack pointer.
837 */
838 static inline uint32_t v7m_sp_limit(CPUARMState *env)
839 {
840 if (v7m_using_psp(env)) {
841 return env->v7m.psplim[env->v7m.secure];
842 } else {
843 return env->v7m.msplim[env->v7m.secure];
844 }
845 }
846
847 /**
848 * aarch32_mode_name(): Return name of the AArch32 CPU mode
849 * @psr: Program Status Register indicating CPU mode
850 *
851 * Returns, for debug logging purposes, a printable representation
852 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
853 * the low bits of the specified PSR.
854 */
855 static inline const char *aarch32_mode_name(uint32_t psr)
856 {
857 static const char cpu_mode_names[16][4] = {
858 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
859 "???", "???", "hyp", "und", "???", "???", "???", "sys"
860 };
861
862 return cpu_mode_names[psr & 0xf];
863 }
864
865 #endif