]>
Commit | Line | Data |
---|---|---|
ccd38087 PM |
1 | /* |
2 | * QEMU ARM CPU -- internal functions and types | |
3 | * | |
4 | * Copyright (c) 2014 Linaro Ltd | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version 2 | |
9 | * of the License, or (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, see | |
18 | * <http://www.gnu.org/licenses/gpl-2.0.html> | |
19 | * | |
20 | * This header defines functions, types, etc which need to be shared | |
fcf5ef2a | 21 | * between different source files within target/arm/ but which are |
ccd38087 PM |
22 | * private to it and not required by the rest of QEMU. |
23 | */ | |
24 | ||
25 | #ifndef TARGET_ARM_INTERNALS_H | |
26 | #define TARGET_ARM_INTERNALS_H | |
27 | ||
abc24d86 MD |
28 | #include "hw/registerfields.h" |
29 | ||
99a99c1f SB |
30 | /* register banks for CPU modes */ |
31 | #define BANK_USRSYS 0 | |
32 | #define BANK_SVC 1 | |
33 | #define BANK_ABT 2 | |
34 | #define BANK_UND 3 | |
35 | #define BANK_IRQ 4 | |
36 | #define BANK_FIQ 5 | |
37 | #define BANK_HYP 6 | |
38 | #define BANK_MON 7 | |
39 | ||
d4a2dc67 PM |
40 | static inline bool excp_is_internal(int excp) |
41 | { | |
42 | /* Return true if this exception number represents a QEMU-internal | |
43 | * exception that will not be passed to the guest. | |
44 | */ | |
45 | return excp == EXCP_INTERRUPT | |
46 | || excp == EXCP_HLT | |
47 | || excp == EXCP_DEBUG | |
48 | || excp == EXCP_HALTED | |
49 | || excp == EXCP_EXCEPTION_EXIT | |
50 | || excp == EXCP_KERNEL_TRAP | |
05188cc7 | 51 | || excp == EXCP_SEMIHOST; |
d4a2dc67 PM |
52 | } |
53 | ||
ccd38087 PM |
54 | /* Scale factor for generic timers, ie number of ns per tick. |
55 | * This gives a 62.5MHz timer. | |
56 | */ | |
57 | #define GTIMER_SCALE 16 | |
58 | ||
abc24d86 MD |
59 | /* Bit definitions for the v7M CONTROL register */ |
60 | FIELD(V7M_CONTROL, NPRIV, 0, 1) | |
61 | FIELD(V7M_CONTROL, SPSEL, 1, 1) | |
62 | FIELD(V7M_CONTROL, FPCA, 2, 1) | |
3e3fa230 | 63 | FIELD(V7M_CONTROL, SFPA, 3, 1) |
abc24d86 | 64 | |
4d1e7a47 PM |
65 | /* Bit definitions for v7M exception return payload */ |
66 | FIELD(V7M_EXCRET, ES, 0, 1) | |
67 | FIELD(V7M_EXCRET, RES0, 1, 1) | |
68 | FIELD(V7M_EXCRET, SPSEL, 2, 1) | |
69 | FIELD(V7M_EXCRET, MODE, 3, 1) | |
70 | FIELD(V7M_EXCRET, FTYPE, 4, 1) | |
71 | FIELD(V7M_EXCRET, DCRS, 5, 1) | |
72 | FIELD(V7M_EXCRET, S, 6, 1) | |
73 | FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ | |
74 | ||
d02a8698 PM |
75 | /* Minimum value which is a magic number for exception return */ |
76 | #define EXC_RETURN_MIN_MAGIC 0xff000000 | |
77 | /* Minimum number which is a magic number for function or exception return | |
78 | * when using v8M security extension | |
79 | */ | |
80 | #define FNC_RETURN_MIN_MAGIC 0xfefffffe | |
81 | ||
35337cc3 PM |
82 | /* We use a few fake FSR values for internal purposes in M profile. |
83 | * M profile cores don't have A/R format FSRs, but currently our | |
84 | * get_phys_addr() code assumes A/R profile and reports failures via | |
85 | * an A/R format FSR value. We then translate that into the proper | |
86 | * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). | |
87 | * Mostly the FSR values we use for this are those defined for v7PMSA, | |
88 | * since we share some of that codepath. A few kinds of fault are | |
89 | * only for M profile and have no A/R equivalent, though, so we have | |
90 | * to pick a value from the reserved range (which we never otherwise | |
91 | * generate) to use for these. | |
92 | * These values will never be visible to the guest. | |
93 | */ | |
94 | #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ | |
95 | #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ | |
96 | ||
597610eb PM |
97 | /** |
98 | * raise_exception: Raise the specified exception. | |
99 | * Raise a guest exception with the specified value, syndrome register | |
100 | * and target exception level. This should be called from helper functions, | |
101 | * and never returns because we will longjump back up to the CPU main loop. | |
102 | */ | |
103 | void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp, | |
104 | uint32_t syndrome, uint32_t target_el); | |
105 | ||
7469f6c6 RH |
106 | /* |
107 | * Similarly, but also use unwinding to restore cpu state. | |
108 | */ | |
109 | void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp, | |
110 | uint32_t syndrome, uint32_t target_el, | |
111 | uintptr_t ra); | |
112 | ||
2a923c4d EI |
113 | /* |
114 | * For AArch64, map a given EL to an index in the banked_spsr array. | |
7847f9ea PM |
115 | * Note that this mapping and the AArch32 mapping defined in bank_number() |
116 | * must agree such that the AArch64<->AArch32 SPSRs have the architecturally | |
117 | * mandated mapping between each other. | |
2a923c4d EI |
118 | */ |
119 | static inline unsigned int aarch64_banked_spsr_index(unsigned int el) | |
120 | { | |
121 | static const unsigned int map[4] = { | |
99a99c1f SB |
122 | [1] = BANK_SVC, /* EL1. */ |
123 | [2] = BANK_HYP, /* EL2. */ | |
124 | [3] = BANK_MON, /* EL3. */ | |
2a923c4d EI |
125 | }; |
126 | assert(el >= 1 && el <= 3); | |
127 | return map[el]; | |
128 | } | |
129 | ||
c766568d PM |
130 | /* Map CPU modes onto saved register banks. */ |
131 | static inline int bank_number(int mode) | |
132 | { | |
133 | switch (mode) { | |
134 | case ARM_CPU_MODE_USR: | |
135 | case ARM_CPU_MODE_SYS: | |
136 | return BANK_USRSYS; | |
137 | case ARM_CPU_MODE_SVC: | |
138 | return BANK_SVC; | |
139 | case ARM_CPU_MODE_ABT: | |
140 | return BANK_ABT; | |
141 | case ARM_CPU_MODE_UND: | |
142 | return BANK_UND; | |
143 | case ARM_CPU_MODE_IRQ: | |
144 | return BANK_IRQ; | |
145 | case ARM_CPU_MODE_FIQ: | |
146 | return BANK_FIQ; | |
147 | case ARM_CPU_MODE_HYP: | |
148 | return BANK_HYP; | |
149 | case ARM_CPU_MODE_MON: | |
150 | return BANK_MON; | |
151 | } | |
152 | g_assert_not_reached(); | |
153 | } | |
154 | ||
593cfa2b PM |
155 | /** |
156 | * r14_bank_number: Map CPU mode onto register bank for r14 | |
157 | * | |
158 | * Given an AArch32 CPU mode, return the index into the saved register | |
159 | * banks to use for the R14 (LR) in that mode. This is the same as | |
160 | * bank_number(), except for the special case of Hyp mode, where | |
161 | * R14 is shared with USR and SYS, unlike its R13 and SPSR. | |
162 | * This should be used as the index into env->banked_r14[], and | |
163 | * bank_number() used for the index into env->banked_r13[] and | |
164 | * env->banked_spsr[]. | |
165 | */ | |
166 | static inline int r14_bank_number(int mode) | |
167 | { | |
168 | return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); | |
169 | } | |
170 | ||
ccd38087 PM |
171 | void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); |
172 | void arm_translate_init(void); | |
173 | ||
78271684 CF |
174 | #ifdef CONFIG_TCG |
175 | void arm_cpu_synchronize_from_tb(CPUState *cs, | |
176 | const struct TranslationBlock *tb); | |
177 | #endif /* CONFIG_TCG */ | |
178 | ||
179 | ||
ccd38087 PM |
180 | enum arm_fprounding { |
181 | FPROUNDING_TIEEVEN, | |
182 | FPROUNDING_POSINF, | |
183 | FPROUNDING_NEGINF, | |
184 | FPROUNDING_ZERO, | |
185 | FPROUNDING_TIEAWAY, | |
186 | FPROUNDING_ODD | |
187 | }; | |
188 | ||
189 | int arm_rmode_to_sf(int rmode); | |
190 | ||
9208b961 EI |
191 | static inline void aarch64_save_sp(CPUARMState *env, int el) |
192 | { | |
193 | if (env->pstate & PSTATE_SP) { | |
194 | env->sp_el[el] = env->xregs[31]; | |
195 | } else { | |
196 | env->sp_el[0] = env->xregs[31]; | |
197 | } | |
198 | } | |
199 | ||
200 | static inline void aarch64_restore_sp(CPUARMState *env, int el) | |
201 | { | |
202 | if (env->pstate & PSTATE_SP) { | |
203 | env->xregs[31] = env->sp_el[el]; | |
204 | } else { | |
205 | env->xregs[31] = env->sp_el[0]; | |
206 | } | |
207 | } | |
208 | ||
f502cfc2 PM |
209 | static inline void update_spsel(CPUARMState *env, uint32_t imm) |
210 | { | |
dcbff19b | 211 | unsigned int cur_el = arm_current_el(env); |
f502cfc2 PM |
212 | /* Update PSTATE SPSel bit; this requires us to update the |
213 | * working stack pointer in xregs[31]. | |
214 | */ | |
215 | if (!((imm ^ env->pstate) & PSTATE_SP)) { | |
216 | return; | |
217 | } | |
9208b961 | 218 | aarch64_save_sp(env, cur_el); |
f502cfc2 PM |
219 | env->pstate = deposit32(env->pstate, 0, 1, imm); |
220 | ||
61d4b215 EI |
221 | /* We rely on illegal updates to SPsel from EL0 to get trapped |
222 | * at translation time. | |
f502cfc2 | 223 | */ |
61d4b215 | 224 | assert(cur_el >= 1 && cur_el <= 3); |
9208b961 | 225 | aarch64_restore_sp(env, cur_el); |
f502cfc2 PM |
226 | } |
227 | ||
1853d5a9 EI |
228 | /* |
229 | * arm_pamax | |
230 | * @cpu: ARMCPU | |
231 | * | |
232 | * Returns the implementation defined bit-width of physical addresses. | |
233 | * The ARMv8 reference manuals refer to this as PAMax(). | |
234 | */ | |
235 | static inline unsigned int arm_pamax(ARMCPU *cpu) | |
236 | { | |
237 | static const unsigned int pamax_map[] = { | |
238 | [0] = 32, | |
239 | [1] = 36, | |
240 | [2] = 40, | |
241 | [3] = 42, | |
242 | [4] = 44, | |
243 | [5] = 48, | |
244 | }; | |
3dc91ddb PM |
245 | unsigned int parange = |
246 | FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); | |
1853d5a9 EI |
247 | |
248 | /* id_aa64mmfr0 is a read-only register so values outside of the | |
249 | * supported mappings can be considered an implementation error. */ | |
250 | assert(parange < ARRAY_SIZE(pamax_map)); | |
251 | return pamax_map[parange]; | |
252 | } | |
253 | ||
73c5211b PM |
254 | /* Return true if extended addresses are enabled. |
255 | * This is always the case if our translation regime is 64 bit, | |
256 | * but depends on TTBCR.EAE for 32 bit. | |
257 | */ | |
258 | static inline bool extended_addresses_enabled(CPUARMState *env) | |
259 | { | |
11f136ee FA |
260 | TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; |
261 | return arm_el_is_aa64(env, 1) || | |
262 | (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE)); | |
73c5211b PM |
263 | } |
264 | ||
8bcbf37c PM |
265 | /* Valid Syndrome Register EC field values */ |
266 | enum arm_exception_class { | |
267 | EC_UNCATEGORIZED = 0x00, | |
268 | EC_WFX_TRAP = 0x01, | |
269 | EC_CP15RTTRAP = 0x03, | |
270 | EC_CP15RRTTRAP = 0x04, | |
271 | EC_CP14RTTRAP = 0x05, | |
272 | EC_CP14DTTRAP = 0x06, | |
273 | EC_ADVSIMDFPACCESSTRAP = 0x07, | |
274 | EC_FPIDTRAP = 0x08, | |
0d43e1a2 | 275 | EC_PACTRAP = 0x09, |
8bcbf37c | 276 | EC_CP14RRTTRAP = 0x0c, |
51bf0d7a | 277 | EC_BTITRAP = 0x0d, |
8bcbf37c PM |
278 | EC_ILLEGALSTATE = 0x0e, |
279 | EC_AA32_SVC = 0x11, | |
280 | EC_AA32_HVC = 0x12, | |
281 | EC_AA32_SMC = 0x13, | |
282 | EC_AA64_SVC = 0x15, | |
283 | EC_AA64_HVC = 0x16, | |
284 | EC_AA64_SMC = 0x17, | |
285 | EC_SYSTEMREGISTERTRAP = 0x18, | |
490aa7f1 | 286 | EC_SVEACCESSTRAP = 0x19, |
8bcbf37c PM |
287 | EC_INSNABORT = 0x20, |
288 | EC_INSNABORT_SAME_EL = 0x21, | |
289 | EC_PCALIGNMENT = 0x22, | |
290 | EC_DATAABORT = 0x24, | |
291 | EC_DATAABORT_SAME_EL = 0x25, | |
292 | EC_SPALIGNMENT = 0x26, | |
293 | EC_AA32_FPTRAP = 0x28, | |
294 | EC_AA64_FPTRAP = 0x2c, | |
295 | EC_SERROR = 0x2f, | |
296 | EC_BREAKPOINT = 0x30, | |
297 | EC_BREAKPOINT_SAME_EL = 0x31, | |
298 | EC_SOFTWARESTEP = 0x32, | |
299 | EC_SOFTWARESTEP_SAME_EL = 0x33, | |
300 | EC_WATCHPOINT = 0x34, | |
301 | EC_WATCHPOINT_SAME_EL = 0x35, | |
302 | EC_AA32_BKPT = 0x38, | |
303 | EC_VECTORCATCH = 0x3a, | |
304 | EC_AA64_BKPT = 0x3c, | |
305 | }; | |
306 | ||
307 | #define ARM_EL_EC_SHIFT 26 | |
308 | #define ARM_EL_IL_SHIFT 25 | |
094d028a | 309 | #define ARM_EL_ISV_SHIFT 24 |
8bcbf37c | 310 | #define ARM_EL_IL (1 << ARM_EL_IL_SHIFT) |
094d028a | 311 | #define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT) |
8bcbf37c | 312 | |
64b91e3f PM |
313 | static inline uint32_t syn_get_ec(uint32_t syn) |
314 | { | |
315 | return syn >> ARM_EL_EC_SHIFT; | |
316 | } | |
317 | ||
8bcbf37c PM |
318 | /* Utility functions for constructing various kinds of syndrome value. |
319 | * Note that in general we follow the AArch64 syndrome values; in a | |
320 | * few cases the value in HSR for exceptions taken to AArch32 Hyp | |
2ed08180 PM |
321 | * mode differs slightly, and we fix this up when populating HSR in |
322 | * arm_cpu_do_interrupt_aarch32_hyp(). | |
4be42f40 PM |
323 | * The exception is FP/SIMD access traps -- these report extra information |
324 | * when taking an exception to AArch32. For those we include the extra coproc | |
325 | * and TA fields, and mask them out when taking the exception to AArch64. | |
8bcbf37c PM |
326 | */ |
327 | static inline uint32_t syn_uncategorized(void) | |
328 | { | |
329 | return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL; | |
330 | } | |
331 | ||
332 | static inline uint32_t syn_aa64_svc(uint32_t imm16) | |
333 | { | |
334 | return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); | |
335 | } | |
336 | ||
35979d71 EI |
337 | static inline uint32_t syn_aa64_hvc(uint32_t imm16) |
338 | { | |
339 | return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); | |
340 | } | |
341 | ||
e0d6e6a5 EI |
342 | static inline uint32_t syn_aa64_smc(uint32_t imm16) |
343 | { | |
344 | return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); | |
345 | } | |
346 | ||
fc05f4a6 | 347 | static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit) |
8bcbf37c PM |
348 | { |
349 | return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) | |
fc05f4a6 | 350 | | (is_16bit ? 0 : ARM_EL_IL); |
8bcbf37c PM |
351 | } |
352 | ||
37e6456e PM |
353 | static inline uint32_t syn_aa32_hvc(uint32_t imm16) |
354 | { | |
355 | return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); | |
356 | } | |
357 | ||
358 | static inline uint32_t syn_aa32_smc(void) | |
359 | { | |
360 | return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL; | |
361 | } | |
362 | ||
8bcbf37c PM |
363 | static inline uint32_t syn_aa64_bkpt(uint32_t imm16) |
364 | { | |
365 | return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); | |
366 | } | |
367 | ||
fc05f4a6 | 368 | static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit) |
8bcbf37c PM |
369 | { |
370 | return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) | |
fc05f4a6 | 371 | | (is_16bit ? 0 : ARM_EL_IL); |
8bcbf37c PM |
372 | } |
373 | ||
374 | static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2, | |
375 | int crn, int crm, int rt, | |
376 | int isread) | |
377 | { | |
378 | return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL | |
379 | | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5) | |
380 | | (crm << 1) | isread; | |
381 | } | |
382 | ||
383 | static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2, | |
384 | int crn, int crm, int rt, int isread, | |
fc05f4a6 | 385 | bool is_16bit) |
8bcbf37c PM |
386 | { |
387 | return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT) | |
fc05f4a6 | 388 | | (is_16bit ? 0 : ARM_EL_IL) |
8bcbf37c PM |
389 | | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) |
390 | | (crn << 10) | (rt << 5) | (crm << 1) | isread; | |
391 | } | |
392 | ||
393 | static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2, | |
394 | int crn, int crm, int rt, int isread, | |
fc05f4a6 | 395 | bool is_16bit) |
8bcbf37c PM |
396 | { |
397 | return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT) | |
fc05f4a6 | 398 | | (is_16bit ? 0 : ARM_EL_IL) |
8bcbf37c PM |
399 | | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) |
400 | | (crn << 10) | (rt << 5) | (crm << 1) | isread; | |
401 | } | |
402 | ||
403 | static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm, | |
404 | int rt, int rt2, int isread, | |
fc05f4a6 | 405 | bool is_16bit) |
8bcbf37c PM |
406 | { |
407 | return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT) | |
fc05f4a6 | 408 | | (is_16bit ? 0 : ARM_EL_IL) |
8bcbf37c PM |
409 | | (cv << 24) | (cond << 20) | (opc1 << 16) |
410 | | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; | |
411 | } | |
412 | ||
413 | static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm, | |
414 | int rt, int rt2, int isread, | |
fc05f4a6 | 415 | bool is_16bit) |
8bcbf37c PM |
416 | { |
417 | return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT) | |
fc05f4a6 | 418 | | (is_16bit ? 0 : ARM_EL_IL) |
8bcbf37c PM |
419 | | (cv << 24) | (cond << 20) | (opc1 << 16) |
420 | | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; | |
421 | } | |
422 | ||
fc05f4a6 | 423 | static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit) |
8c6afa6a | 424 | { |
4be42f40 | 425 | /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */ |
8c6afa6a | 426 | return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) |
fc05f4a6 | 427 | | (is_16bit ? 0 : ARM_EL_IL) |
4be42f40 PM |
428 | | (cv << 24) | (cond << 20) | 0xa; |
429 | } | |
430 | ||
431 | static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit) | |
432 | { | |
433 | /* AArch32 SIMD trap: TA == 1 coproc == 0 */ | |
434 | return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) | |
435 | | (is_16bit ? 0 : ARM_EL_IL) | |
436 | | (cv << 24) | (cond << 20) | (1 << 5); | |
8c6afa6a PM |
437 | } |
438 | ||
490aa7f1 RH |
439 | static inline uint32_t syn_sve_access_trap(void) |
440 | { | |
441 | return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT; | |
442 | } | |
443 | ||
0d43e1a2 RH |
444 | static inline uint32_t syn_pactrap(void) |
445 | { | |
446 | return EC_PACTRAP << ARM_EL_EC_SHIFT; | |
447 | } | |
448 | ||
51bf0d7a RH |
449 | static inline uint32_t syn_btitrap(int btype) |
450 | { | |
451 | return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype; | |
452 | } | |
453 | ||
00892383 RH |
454 | static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc) |
455 | { | |
456 | return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | |
04ce861e | 457 | | ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc; |
00892383 RH |
458 | } |
459 | ||
e24fd076 | 460 | static inline uint32_t syn_data_abort_no_iss(int same_el, int fnv, |
094d028a PM |
461 | int ea, int cm, int s1ptw, |
462 | int wnr, int fsc) | |
00892383 RH |
463 | { |
464 | return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | |
094d028a | 465 | | ARM_EL_IL |
e24fd076 DG |
466 | | (fnv << 10) | (ea << 9) | (cm << 8) | (s1ptw << 7) |
467 | | (wnr << 6) | fsc; | |
094d028a PM |
468 | } |
469 | ||
470 | static inline uint32_t syn_data_abort_with_iss(int same_el, | |
471 | int sas, int sse, int srt, | |
472 | int sf, int ar, | |
473 | int ea, int cm, int s1ptw, | |
474 | int wnr, int fsc, | |
475 | bool is_16bit) | |
476 | { | |
477 | return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | |
478 | | (is_16bit ? 0 : ARM_EL_IL) | |
479 | | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16) | |
480 | | (sf << 15) | (ar << 14) | |
481 | | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; | |
00892383 RH |
482 | } |
483 | ||
7ea47fe7 PM |
484 | static inline uint32_t syn_swstep(int same_el, int isv, int ex) |
485 | { | |
486 | return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | |
04ce861e | 487 | | ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22; |
7ea47fe7 PM |
488 | } |
489 | ||
3ff6fc91 PM |
490 | static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr) |
491 | { | |
492 | return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | |
04ce861e | 493 | | ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22; |
3ff6fc91 PM |
494 | } |
495 | ||
0eacea70 PM |
496 | static inline uint32_t syn_breakpoint(int same_el) |
497 | { | |
498 | return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) | |
499 | | ARM_EL_IL | 0x22; | |
500 | } | |
501 | ||
58803318 | 502 | static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit) |
06fbb2fd GB |
503 | { |
504 | return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) | | |
58803318 | 505 | (is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) | |
06fbb2fd GB |
506 | (cv << 24) | (cond << 20) | ti; |
507 | } | |
508 | ||
9ee98ce8 PM |
509 | /* Update a QEMU watchpoint based on the information the guest has set in the |
510 | * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. | |
511 | */ | |
512 | void hw_watchpoint_update(ARMCPU *cpu, int n); | |
513 | /* Update the QEMU watchpoints for every guest watchpoint. This does a | |
514 | * complete delete-and-reinstate of the QEMU watchpoint list and so is | |
515 | * suitable for use after migration or on reset. | |
516 | */ | |
517 | void hw_watchpoint_update_all(ARMCPU *cpu); | |
46747d15 PM |
518 | /* Update a QEMU breakpoint based on the information the guest has set in the |
519 | * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. | |
520 | */ | |
521 | void hw_breakpoint_update(ARMCPU *cpu, int n); | |
522 | /* Update the QEMU breakpoints for every guest breakpoint. This does a | |
523 | * complete delete-and-reinstate of the QEMU breakpoint list and so is | |
524 | * suitable for use after migration or on reset. | |
525 | */ | |
526 | void hw_breakpoint_update_all(ARMCPU *cpu); | |
9ee98ce8 | 527 | |
3826121d SF |
528 | /* Callback function for checking if a watchpoint should trigger. */ |
529 | bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); | |
530 | ||
40612000 JB |
531 | /* Adjust addresses (in BE32 mode) before testing against watchpoint |
532 | * addresses. | |
533 | */ | |
534 | vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); | |
535 | ||
3ff6fc91 PM |
536 | /* Callback function for when a watchpoint or breakpoint triggers. */ |
537 | void arm_debug_excp_handler(CPUState *cs); | |
538 | ||
21fbea8c | 539 | #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) |
98128601 RH |
540 | static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) |
541 | { | |
542 | return false; | |
543 | } | |
21fbea8c PMD |
544 | static inline void arm_handle_psci_call(ARMCPU *cpu) |
545 | { | |
546 | g_assert_not_reached(); | |
547 | } | |
98128601 RH |
548 | #else |
549 | /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ | |
550 | bool arm_is_psci_call(ARMCPU *cpu, int excp_type); | |
551 | /* Actually handle a PSCI call */ | |
552 | void arm_handle_psci_call(ARMCPU *cpu); | |
553 | #endif | |
554 | ||
dc3c4c14 PM |
555 | /** |
556 | * arm_clear_exclusive: clear the exclusive monitor | |
557 | * @env: CPU env | |
558 | * Clear the CPU's exclusive monitor, like the guest CLREX instruction. | |
559 | */ | |
560 | static inline void arm_clear_exclusive(CPUARMState *env) | |
561 | { | |
562 | env->exclusive_addr = -1; | |
563 | } | |
564 | ||
1fa498fe PM |
565 | /** |
566 | * ARMFaultType: type of an ARM MMU fault | |
567 | * This corresponds to the v8A pseudocode's Fault enumeration, | |
568 | * with extensions for QEMU internal conditions. | |
569 | */ | |
570 | typedef enum ARMFaultType { | |
571 | ARMFault_None, | |
572 | ARMFault_AccessFlag, | |
573 | ARMFault_Alignment, | |
574 | ARMFault_Background, | |
575 | ARMFault_Domain, | |
576 | ARMFault_Permission, | |
577 | ARMFault_Translation, | |
578 | ARMFault_AddressSize, | |
579 | ARMFault_SyncExternal, | |
580 | ARMFault_SyncExternalOnWalk, | |
581 | ARMFault_SyncParity, | |
582 | ARMFault_SyncParityOnWalk, | |
583 | ARMFault_AsyncParity, | |
584 | ARMFault_AsyncExternal, | |
585 | ARMFault_Debug, | |
586 | ARMFault_TLBConflict, | |
587 | ARMFault_Lockdown, | |
588 | ARMFault_Exclusive, | |
589 | ARMFault_ICacheMaint, | |
590 | ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ | |
591 | ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ | |
592 | } ARMFaultType; | |
593 | ||
e14b5a23 EI |
594 | /** |
595 | * ARMMMUFaultInfo: Information describing an ARM MMU Fault | |
1fa498fe PM |
596 | * @type: Type of fault |
597 | * @level: Table walk level (for translation, access flag and permission faults) | |
598 | * @domain: Domain of the fault address (for non-LPAE CPUs only) | |
e14b5a23 EI |
599 | * @s2addr: Address that caused a fault at stage 2 |
600 | * @stage2: True if we faulted at stage 2 | |
601 | * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk | |
9861248f | 602 | * @s1ns: True if we faulted on a non-secure IPA while in secure state |
c528af7a | 603 | * @ea: True if we should set the EA (external abort type) bit in syndrome |
e14b5a23 EI |
604 | */ |
605 | typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; | |
606 | struct ARMMMUFaultInfo { | |
1fa498fe | 607 | ARMFaultType type; |
e14b5a23 | 608 | target_ulong s2addr; |
1fa498fe PM |
609 | int level; |
610 | int domain; | |
e14b5a23 EI |
611 | bool stage2; |
612 | bool s1ptw; | |
9861248f | 613 | bool s1ns; |
c528af7a | 614 | bool ea; |
e14b5a23 EI |
615 | }; |
616 | ||
1fa498fe PM |
617 | /** |
618 | * arm_fi_to_sfsc: Convert fault info struct to short-format FSC | |
619 | * Compare pseudocode EncodeSDFSC(), though unlike that function | |
620 | * we set up a whole FSR-format code including domain field and | |
621 | * putting the high bit of the FSC into bit 10. | |
622 | */ | |
623 | static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) | |
624 | { | |
625 | uint32_t fsc; | |
626 | ||
627 | switch (fi->type) { | |
628 | case ARMFault_None: | |
629 | return 0; | |
630 | case ARMFault_AccessFlag: | |
631 | fsc = fi->level == 1 ? 0x3 : 0x6; | |
632 | break; | |
633 | case ARMFault_Alignment: | |
634 | fsc = 0x1; | |
635 | break; | |
636 | case ARMFault_Permission: | |
637 | fsc = fi->level == 1 ? 0xd : 0xf; | |
638 | break; | |
639 | case ARMFault_Domain: | |
640 | fsc = fi->level == 1 ? 0x9 : 0xb; | |
641 | break; | |
642 | case ARMFault_Translation: | |
643 | fsc = fi->level == 1 ? 0x5 : 0x7; | |
644 | break; | |
645 | case ARMFault_SyncExternal: | |
646 | fsc = 0x8 | (fi->ea << 12); | |
647 | break; | |
648 | case ARMFault_SyncExternalOnWalk: | |
649 | fsc = fi->level == 1 ? 0xc : 0xe; | |
650 | fsc |= (fi->ea << 12); | |
651 | break; | |
652 | case ARMFault_SyncParity: | |
653 | fsc = 0x409; | |
654 | break; | |
655 | case ARMFault_SyncParityOnWalk: | |
656 | fsc = fi->level == 1 ? 0x40c : 0x40e; | |
657 | break; | |
658 | case ARMFault_AsyncParity: | |
659 | fsc = 0x408; | |
660 | break; | |
661 | case ARMFault_AsyncExternal: | |
662 | fsc = 0x406 | (fi->ea << 12); | |
663 | break; | |
664 | case ARMFault_Debug: | |
665 | fsc = 0x2; | |
666 | break; | |
667 | case ARMFault_TLBConflict: | |
668 | fsc = 0x400; | |
669 | break; | |
670 | case ARMFault_Lockdown: | |
671 | fsc = 0x404; | |
672 | break; | |
673 | case ARMFault_Exclusive: | |
674 | fsc = 0x405; | |
675 | break; | |
676 | case ARMFault_ICacheMaint: | |
677 | fsc = 0x4; | |
678 | break; | |
679 | case ARMFault_Background: | |
680 | fsc = 0x0; | |
681 | break; | |
682 | case ARMFault_QEMU_NSCExec: | |
683 | fsc = M_FAKE_FSR_NSC_EXEC; | |
684 | break; | |
685 | case ARMFault_QEMU_SFault: | |
686 | fsc = M_FAKE_FSR_SFAULT; | |
687 | break; | |
688 | default: | |
689 | /* Other faults can't occur in a context that requires a | |
690 | * short-format status code. | |
691 | */ | |
692 | g_assert_not_reached(); | |
693 | } | |
694 | ||
695 | fsc |= (fi->domain << 4); | |
696 | return fsc; | |
697 | } | |
698 | ||
699 | /** | |
700 | * arm_fi_to_lfsc: Convert fault info struct to long-format FSC | |
701 | * Compare pseudocode EncodeLDFSC(), though unlike that function | |
702 | * we fill in also the LPAE bit 9 of a DFSR format. | |
703 | */ | |
704 | static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) | |
705 | { | |
706 | uint32_t fsc; | |
707 | ||
708 | switch (fi->type) { | |
709 | case ARMFault_None: | |
710 | return 0; | |
711 | case ARMFault_AddressSize: | |
712 | fsc = fi->level & 3; | |
713 | break; | |
714 | case ARMFault_AccessFlag: | |
715 | fsc = (fi->level & 3) | (0x2 << 2); | |
716 | break; | |
717 | case ARMFault_Permission: | |
718 | fsc = (fi->level & 3) | (0x3 << 2); | |
719 | break; | |
720 | case ARMFault_Translation: | |
721 | fsc = (fi->level & 3) | (0x1 << 2); | |
722 | break; | |
723 | case ARMFault_SyncExternal: | |
724 | fsc = 0x10 | (fi->ea << 12); | |
725 | break; | |
726 | case ARMFault_SyncExternalOnWalk: | |
727 | fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12); | |
728 | break; | |
729 | case ARMFault_SyncParity: | |
730 | fsc = 0x18; | |
731 | break; | |
732 | case ARMFault_SyncParityOnWalk: | |
733 | fsc = (fi->level & 3) | (0x7 << 2); | |
734 | break; | |
735 | case ARMFault_AsyncParity: | |
736 | fsc = 0x19; | |
737 | break; | |
738 | case ARMFault_AsyncExternal: | |
739 | fsc = 0x11 | (fi->ea << 12); | |
740 | break; | |
741 | case ARMFault_Alignment: | |
742 | fsc = 0x21; | |
743 | break; | |
744 | case ARMFault_Debug: | |
745 | fsc = 0x22; | |
746 | break; | |
747 | case ARMFault_TLBConflict: | |
748 | fsc = 0x30; | |
749 | break; | |
750 | case ARMFault_Lockdown: | |
751 | fsc = 0x34; | |
752 | break; | |
753 | case ARMFault_Exclusive: | |
754 | fsc = 0x35; | |
755 | break; | |
756 | default: | |
757 | /* Other faults can't occur in a context that requires a | |
758 | * long-format status code. | |
759 | */ | |
760 | g_assert_not_reached(); | |
761 | } | |
762 | ||
763 | fsc |= 1 << 9; | |
764 | return fsc; | |
765 | } | |
766 | ||
3b39d734 PM |
767 | static inline bool arm_extabort_type(MemTxResult result) |
768 | { | |
769 | /* The EA bit in syndromes and fault status registers is an | |
770 | * IMPDEF classification of external aborts. ARM implementations | |
771 | * usually use this to indicate AXI bus Decode error (0) or | |
772 | * Slave error (1); in QEMU we follow that. | |
773 | */ | |
774 | return result != MEMTX_DECODE_ERROR; | |
775 | } | |
776 | ||
7350d553 RH |
777 | bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, |
778 | MMUAccessType access_type, int mmu_idx, | |
779 | bool probe, uintptr_t retaddr); | |
780 | ||
b9f6033c RH |
781 | static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) |
782 | { | |
783 | return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; | |
784 | } | |
785 | ||
786 | static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) | |
787 | { | |
788 | if (arm_feature(env, ARM_FEATURE_M)) { | |
789 | return mmu_idx | ARM_MMU_IDX_M; | |
790 | } else { | |
791 | return mmu_idx | ARM_MMU_IDX_A; | |
792 | } | |
793 | } | |
794 | ||
20dc67c9 RH |
795 | static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) |
796 | { | |
797 | /* AArch64 is always a-profile. */ | |
798 | return mmu_idx | ARM_MMU_IDX_A; | |
799 | } | |
800 | ||
b9f6033c RH |
801 | int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); |
802 | ||
803 | /* | |
804 | * Return the MMU index for a v7M CPU with all relevant information | |
805 | * manually specified. | |
806 | */ | |
807 | ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env, | |
808 | bool secstate, bool priv, bool negpri); | |
809 | ||
810 | /* | |
811 | * Return the MMU index for a v7M CPU in the specified security and | |
812 | * privilege state. | |
813 | */ | |
814 | ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, | |
815 | bool secstate, bool priv); | |
816 | ||
817 | /* Return the MMU index for a v7M CPU in the specified security state */ | |
818 | ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); | |
819 | ||
deb2db99 AR |
820 | /* Return true if the stage 1 translation regime is using LPAE format page |
821 | * tables */ | |
822 | bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); | |
30901475 AB |
823 | |
824 | /* Raise a data fault alignment exception for the specified virtual address */ | |
b35399bb SS |
825 | void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, |
826 | MMUAccessType access_type, | |
827 | int mmu_idx, uintptr_t retaddr); | |
30901475 | 828 | |
c79c0a31 PM |
829 | /* arm_cpu_do_transaction_failed: handle a memory system error response |
830 | * (eg "no device/memory present at address") by raising an external abort | |
831 | * exception | |
832 | */ | |
833 | void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, | |
834 | vaddr addr, unsigned size, | |
835 | MMUAccessType access_type, | |
836 | int mmu_idx, MemTxAttrs attrs, | |
837 | MemTxResult response, uintptr_t retaddr); | |
838 | ||
08267487 | 839 | /* Call any registered EL change hooks */ |
b5c53d1b AL |
840 | static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) |
841 | { | |
842 | ARMELChangeHook *hook, *next; | |
843 | QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { | |
844 | hook->hook(cpu, hook->opaque); | |
845 | } | |
846 | } | |
bd7d00fc PM |
847 | static inline void arm_call_el_change_hook(ARMCPU *cpu) |
848 | { | |
08267487 AL |
849 | ARMELChangeHook *hook, *next; |
850 | QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { | |
851 | hook->hook(cpu, hook->opaque); | |
bd7d00fc PM |
852 | } |
853 | } | |
854 | ||
339370b9 RH |
855 | /* Return true if this address translation regime has two ranges. */ |
856 | static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) | |
857 | { | |
858 | switch (mmu_idx) { | |
859 | case ARMMMUIdx_Stage1_E0: | |
860 | case ARMMMUIdx_Stage1_E1: | |
452ef8cb | 861 | case ARMMMUIdx_Stage1_E1_PAN: |
b1a10c86 RDC |
862 | case ARMMMUIdx_Stage1_SE0: |
863 | case ARMMMUIdx_Stage1_SE1: | |
864 | case ARMMMUIdx_Stage1_SE1_PAN: | |
339370b9 RH |
865 | case ARMMMUIdx_E10_0: |
866 | case ARMMMUIdx_E10_1: | |
452ef8cb | 867 | case ARMMMUIdx_E10_1_PAN: |
339370b9 RH |
868 | case ARMMMUIdx_E20_0: |
869 | case ARMMMUIdx_E20_2: | |
452ef8cb | 870 | case ARMMMUIdx_E20_2_PAN: |
339370b9 RH |
871 | case ARMMMUIdx_SE10_0: |
872 | case ARMMMUIdx_SE10_1: | |
452ef8cb | 873 | case ARMMMUIdx_SE10_1_PAN: |
b6ad6062 RDC |
874 | case ARMMMUIdx_SE20_0: |
875 | case ARMMMUIdx_SE20_2: | |
876 | case ARMMMUIdx_SE20_2_PAN: | |
339370b9 RH |
877 | return true; |
878 | default: | |
879 | return false; | |
880 | } | |
881 | } | |
882 | ||
61fcd69b PM |
883 | /* Return true if this address translation regime is secure */ |
884 | static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) | |
885 | { | |
886 | switch (mmu_idx) { | |
01b98b68 RH |
887 | case ARMMMUIdx_E10_0: |
888 | case ARMMMUIdx_E10_1: | |
452ef8cb | 889 | case ARMMMUIdx_E10_1_PAN: |
b9f6033c RH |
890 | case ARMMMUIdx_E20_0: |
891 | case ARMMMUIdx_E20_2: | |
452ef8cb | 892 | case ARMMMUIdx_E20_2_PAN: |
2859d7b5 RH |
893 | case ARMMMUIdx_Stage1_E0: |
894 | case ARMMMUIdx_Stage1_E1: | |
452ef8cb | 895 | case ARMMMUIdx_Stage1_E1_PAN: |
e013b741 | 896 | case ARMMMUIdx_E2: |
97fa9350 | 897 | case ARMMMUIdx_Stage2: |
62593718 PM |
898 | case ARMMMUIdx_MPrivNegPri: |
899 | case ARMMMUIdx_MUserNegPri: | |
61fcd69b | 900 | case ARMMMUIdx_MPriv: |
61fcd69b PM |
901 | case ARMMMUIdx_MUser: |
902 | return false; | |
127b2b08 | 903 | case ARMMMUIdx_SE3: |
fba37aed RH |
904 | case ARMMMUIdx_SE10_0: |
905 | case ARMMMUIdx_SE10_1: | |
452ef8cb | 906 | case ARMMMUIdx_SE10_1_PAN: |
b6ad6062 RDC |
907 | case ARMMMUIdx_SE20_0: |
908 | case ARMMMUIdx_SE20_2: | |
909 | case ARMMMUIdx_SE20_2_PAN: | |
b1a10c86 RDC |
910 | case ARMMMUIdx_Stage1_SE0: |
911 | case ARMMMUIdx_Stage1_SE1: | |
912 | case ARMMMUIdx_Stage1_SE1_PAN: | |
b6ad6062 | 913 | case ARMMMUIdx_SE2: |
b1a10c86 | 914 | case ARMMMUIdx_Stage2_S: |
62593718 PM |
915 | case ARMMMUIdx_MSPrivNegPri: |
916 | case ARMMMUIdx_MSUserNegPri: | |
61fcd69b | 917 | case ARMMMUIdx_MSPriv: |
61fcd69b PM |
918 | case ARMMMUIdx_MSUser: |
919 | return true; | |
920 | default: | |
921 | g_assert_not_reached(); | |
922 | } | |
923 | } | |
924 | ||
81636b70 RH |
925 | static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) |
926 | { | |
927 | switch (mmu_idx) { | |
928 | case ARMMMUIdx_Stage1_E1_PAN: | |
b1a10c86 | 929 | case ARMMMUIdx_Stage1_SE1_PAN: |
81636b70 RH |
930 | case ARMMMUIdx_E10_1_PAN: |
931 | case ARMMMUIdx_E20_2_PAN: | |
932 | case ARMMMUIdx_SE10_1_PAN: | |
b6ad6062 | 933 | case ARMMMUIdx_SE20_2_PAN: |
81636b70 RH |
934 | return true; |
935 | default: | |
936 | return false; | |
937 | } | |
938 | } | |
939 | ||
9c7ab8fc RH |
940 | /* Return the exception level which controls this address translation regime */ |
941 | static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) | |
942 | { | |
943 | switch (mmu_idx) { | |
b6ad6062 RDC |
944 | case ARMMMUIdx_SE20_0: |
945 | case ARMMMUIdx_SE20_2: | |
946 | case ARMMMUIdx_SE20_2_PAN: | |
9c7ab8fc RH |
947 | case ARMMMUIdx_E20_0: |
948 | case ARMMMUIdx_E20_2: | |
949 | case ARMMMUIdx_E20_2_PAN: | |
950 | case ARMMMUIdx_Stage2: | |
b1a10c86 | 951 | case ARMMMUIdx_Stage2_S: |
b6ad6062 | 952 | case ARMMMUIdx_SE2: |
9c7ab8fc RH |
953 | case ARMMMUIdx_E2: |
954 | return 2; | |
955 | case ARMMMUIdx_SE3: | |
956 | return 3; | |
957 | case ARMMMUIdx_SE10_0: | |
b1a10c86 | 958 | case ARMMMUIdx_Stage1_SE0: |
9c7ab8fc RH |
959 | return arm_el_is_aa64(env, 3) ? 1 : 3; |
960 | case ARMMMUIdx_SE10_1: | |
961 | case ARMMMUIdx_SE10_1_PAN: | |
962 | case ARMMMUIdx_Stage1_E0: | |
963 | case ARMMMUIdx_Stage1_E1: | |
964 | case ARMMMUIdx_Stage1_E1_PAN: | |
b1a10c86 RDC |
965 | case ARMMMUIdx_Stage1_SE1: |
966 | case ARMMMUIdx_Stage1_SE1_PAN: | |
9c7ab8fc RH |
967 | case ARMMMUIdx_E10_0: |
968 | case ARMMMUIdx_E10_1: | |
969 | case ARMMMUIdx_E10_1_PAN: | |
970 | case ARMMMUIdx_MPrivNegPri: | |
971 | case ARMMMUIdx_MUserNegPri: | |
972 | case ARMMMUIdx_MPriv: | |
973 | case ARMMMUIdx_MUser: | |
974 | case ARMMMUIdx_MSPrivNegPri: | |
975 | case ARMMMUIdx_MSUserNegPri: | |
976 | case ARMMMUIdx_MSPriv: | |
977 | case ARMMMUIdx_MSUser: | |
978 | return 1; | |
979 | default: | |
980 | g_assert_not_reached(); | |
981 | } | |
982 | } | |
983 | ||
38659d31 RH |
984 | /* Return the TCR controlling this translation regime */ |
985 | static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) | |
986 | { | |
987 | if (mmu_idx == ARMMMUIdx_Stage2) { | |
988 | return &env->cp15.vtcr_el2; | |
989 | } | |
b1a10c86 RDC |
990 | if (mmu_idx == ARMMMUIdx_Stage2_S) { |
991 | /* | |
992 | * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but | |
993 | * those are not currently used by QEMU, so just return VSTCR_EL2. | |
994 | */ | |
995 | return &env->cp15.vstcr_el2; | |
996 | } | |
38659d31 RH |
997 | return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; |
998 | } | |
999 | ||
81621d9a PM |
1000 | /* Return the FSR value for a debug exception (watchpoint, hardware |
1001 | * breakpoint or BKPT insn) targeting the specified exception level. | |
1002 | */ | |
1003 | static inline uint32_t arm_debug_exception_fsr(CPUARMState *env) | |
1004 | { | |
1005 | ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; | |
1006 | int target_el = arm_debug_target_el(env); | |
1007 | bool using_lpae = false; | |
1008 | ||
1009 | if (target_el == 2 || arm_el_is_aa64(env, target_el)) { | |
1010 | using_lpae = true; | |
1011 | } else { | |
1012 | if (arm_feature(env, ARM_FEATURE_LPAE) && | |
1013 | (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) { | |
1014 | using_lpae = true; | |
1015 | } | |
1016 | } | |
1017 | ||
1018 | if (using_lpae) { | |
1019 | return arm_fi_to_lfsc(&fi); | |
1020 | } else { | |
1021 | return arm_fi_to_sfsc(&fi); | |
1022 | } | |
1023 | } | |
1024 | ||
88ce6c6e PM |
1025 | /** |
1026 | * arm_num_brps: Return number of implemented breakpoints. | |
1027 | * Note that the ID register BRPS field is "number of bps - 1", | |
1028 | * and we return the actual number of breakpoints. | |
1029 | */ | |
1030 | static inline int arm_num_brps(ARMCPU *cpu) | |
1031 | { | |
1032 | if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { | |
1033 | return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; | |
1034 | } else { | |
4426d361 | 1035 | return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; |
88ce6c6e PM |
1036 | } |
1037 | } | |
1038 | ||
1039 | /** | |
1040 | * arm_num_wrps: Return number of implemented watchpoints. | |
1041 | * Note that the ID register WRPS field is "number of wps - 1", | |
1042 | * and we return the actual number of watchpoints. | |
1043 | */ | |
1044 | static inline int arm_num_wrps(ARMCPU *cpu) | |
1045 | { | |
1046 | if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { | |
1047 | return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; | |
1048 | } else { | |
4426d361 | 1049 | return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; |
88ce6c6e PM |
1050 | } |
1051 | } | |
1052 | ||
1053 | /** | |
1054 | * arm_num_ctx_cmps: Return number of implemented context comparators. | |
1055 | * Note that the ID register CTX_CMPS field is "number of cmps - 1", | |
1056 | * and we return the actual number of comparators. | |
1057 | */ | |
1058 | static inline int arm_num_ctx_cmps(ARMCPU *cpu) | |
1059 | { | |
1060 | if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { | |
1061 | return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; | |
1062 | } else { | |
4426d361 | 1063 | return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; |
88ce6c6e PM |
1064 | } |
1065 | } | |
1066 | ||
5529bf18 PM |
1067 | /** |
1068 | * v7m_using_psp: Return true if using process stack pointer | |
1069 | * Return true if the CPU is currently using the process stack | |
1070 | * pointer, or false if it is using the main stack pointer. | |
1071 | */ | |
1072 | static inline bool v7m_using_psp(CPUARMState *env) | |
1073 | { | |
1074 | /* Handler mode always uses the main stack; for thread mode | |
1075 | * the CONTROL.SPSEL bit determines the answer. | |
1076 | * Note that in v7M it is not possible to be in Handler mode with | |
1077 | * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. | |
1078 | */ | |
1079 | return !arm_v7m_is_handler_mode(env) && | |
1080 | env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; | |
1081 | } | |
1082 | ||
55203189 PM |
1083 | /** |
1084 | * v7m_sp_limit: Return SP limit for current CPU state | |
1085 | * Return the SP limit value for the current CPU security state | |
1086 | * and stack pointer. | |
1087 | */ | |
1088 | static inline uint32_t v7m_sp_limit(CPUARMState *env) | |
1089 | { | |
1090 | if (v7m_using_psp(env)) { | |
1091 | return env->v7m.psplim[env->v7m.secure]; | |
1092 | } else { | |
1093 | return env->v7m.msplim[env->v7m.secure]; | |
1094 | } | |
1095 | } | |
1096 | ||
787a7e76 PMD |
1097 | /** |
1098 | * v7m_cpacr_pass: | |
1099 | * Return true if the v7M CPACR permits access to the FPU for the specified | |
1100 | * security state and privilege level. | |
1101 | */ | |
1102 | static inline bool v7m_cpacr_pass(CPUARMState *env, | |
1103 | bool is_secure, bool is_priv) | |
1104 | { | |
1105 | switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { | |
1106 | case 0: | |
1107 | case 2: /* UNPREDICTABLE: we treat like 0 */ | |
1108 | return false; | |
1109 | case 1: | |
1110 | return is_priv; | |
1111 | case 3: | |
1112 | return true; | |
1113 | default: | |
1114 | g_assert_not_reached(); | |
1115 | } | |
1116 | } | |
1117 | ||
81e37284 PM |
1118 | /** |
1119 | * aarch32_mode_name(): Return name of the AArch32 CPU mode | |
1120 | * @psr: Program Status Register indicating CPU mode | |
1121 | * | |
1122 | * Returns, for debug logging purposes, a printable representation | |
1123 | * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by | |
1124 | * the low bits of the specified PSR. | |
1125 | */ | |
1126 | static inline const char *aarch32_mode_name(uint32_t psr) | |
1127 | { | |
1128 | static const char cpu_mode_names[16][4] = { | |
1129 | "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", | |
1130 | "???", "???", "hyp", "und", "???", "???", "???", "sys" | |
1131 | }; | |
1132 | ||
1133 | return cpu_mode_names[psr & 0xf]; | |
1134 | } | |
1135 | ||
89430fc6 PM |
1136 | /** |
1137 | * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request | |
1138 | * | |
1139 | * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following | |
1140 | * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. | |
1141 | * Must be called with the iothread lock held. | |
1142 | */ | |
1143 | void arm_cpu_update_virq(ARMCPU *cpu); | |
1144 | ||
1145 | /** | |
1146 | * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request | |
1147 | * | |
1148 | * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following | |
1149 | * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. | |
1150 | * Must be called with the iothread lock held. | |
1151 | */ | |
1152 | void arm_cpu_update_vfiq(ARMCPU *cpu); | |
1153 | ||
164690b2 RH |
1154 | /** |
1155 | * arm_mmu_idx_el: | |
1156 | * @env: The cpu environment | |
1157 | * @el: The EL to use. | |
1158 | * | |
1159 | * Return the full ARMMMUIdx for the translation regime for EL. | |
1160 | */ | |
1161 | ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); | |
1162 | ||
50494a27 RH |
1163 | /** |
1164 | * arm_mmu_idx: | |
1165 | * @env: The cpu environment | |
1166 | * | |
1167 | * Return the full ARMMMUIdx for the current translation regime. | |
1168 | */ | |
1169 | ARMMMUIdx arm_mmu_idx(CPUARMState *env); | |
1170 | ||
64be86ab RH |
1171 | /** |
1172 | * arm_stage1_mmu_idx: | |
1173 | * @env: The cpu environment | |
1174 | * | |
1175 | * Return the ARMMMUIdx for the stage1 traversal for the current regime. | |
1176 | */ | |
1177 | #ifdef CONFIG_USER_ONLY | |
1178 | static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) | |
1179 | { | |
2859d7b5 | 1180 | return ARMMMUIdx_Stage1_E0; |
64be86ab RH |
1181 | } |
1182 | #else | |
1183 | ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); | |
1184 | #endif | |
1185 | ||
fee7aa46 RH |
1186 | /** |
1187 | * arm_mmu_idx_is_stage1_of_2: | |
1188 | * @mmu_idx: The ARMMMUIdx to test | |
1189 | * | |
1190 | * Return true if @mmu_idx is a NOTLB mmu_idx that is the | |
1191 | * first stage of a two stage regime. | |
1192 | */ | |
1193 | static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) | |
1194 | { | |
1195 | switch (mmu_idx) { | |
1196 | case ARMMMUIdx_Stage1_E0: | |
1197 | case ARMMMUIdx_Stage1_E1: | |
452ef8cb | 1198 | case ARMMMUIdx_Stage1_E1_PAN: |
b1a10c86 RDC |
1199 | case ARMMMUIdx_Stage1_SE0: |
1200 | case ARMMMUIdx_Stage1_SE1: | |
1201 | case ARMMMUIdx_Stage1_SE1_PAN: | |
fee7aa46 RH |
1202 | return true; |
1203 | default: | |
1204 | return false; | |
1205 | } | |
1206 | } | |
1207 | ||
4f9584ed RH |
1208 | static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, |
1209 | const ARMISARegisters *id) | |
1210 | { | |
f062d144 | 1211 | uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; |
4f9584ed RH |
1212 | |
1213 | if ((features >> ARM_FEATURE_V4T) & 1) { | |
1214 | valid |= CPSR_T; | |
1215 | } | |
1216 | if ((features >> ARM_FEATURE_V5) & 1) { | |
1217 | valid |= CPSR_Q; /* V5TE in reality*/ | |
1218 | } | |
1219 | if ((features >> ARM_FEATURE_V6) & 1) { | |
1220 | valid |= CPSR_E | CPSR_GE; | |
1221 | } | |
1222 | if ((features >> ARM_FEATURE_THUMB2) & 1) { | |
1223 | valid |= CPSR_IT; | |
1224 | } | |
873b73c0 | 1225 | if (isar_feature_aa32_jazelle(id)) { |
f062d144 RH |
1226 | valid |= CPSR_J; |
1227 | } | |
220f508f RH |
1228 | if (isar_feature_aa32_pan(id)) { |
1229 | valid |= CPSR_PAN; | |
1230 | } | |
dc8b1853 RC |
1231 | if (isar_feature_aa32_dit(id)) { |
1232 | valid |= CPSR_DIT; | |
1233 | } | |
4f9584ed RH |
1234 | |
1235 | return valid; | |
1236 | } | |
1237 | ||
14084511 RH |
1238 | static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) |
1239 | { | |
1240 | uint32_t valid; | |
1241 | ||
1242 | valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; | |
1243 | if (isar_feature_aa64_bti(id)) { | |
1244 | valid |= PSTATE_BTYPE; | |
1245 | } | |
220f508f RH |
1246 | if (isar_feature_aa64_pan(id)) { |
1247 | valid |= PSTATE_PAN; | |
1248 | } | |
9eeb7a1c RH |
1249 | if (isar_feature_aa64_uao(id)) { |
1250 | valid |= PSTATE_UAO; | |
1251 | } | |
dc8b1853 RC |
1252 | if (isar_feature_aa64_dit(id)) { |
1253 | valid |= PSTATE_DIT; | |
1254 | } | |
4b779ceb RH |
1255 | if (isar_feature_aa64_mte(id)) { |
1256 | valid |= PSTATE_TCO; | |
1257 | } | |
14084511 RH |
1258 | |
1259 | return valid; | |
1260 | } | |
1261 | ||
ba97be9f RH |
1262 | /* |
1263 | * Parameters of a given virtual address, as extracted from the | |
1264 | * translation control register (TCR) for a given regime. | |
1265 | */ | |
1266 | typedef struct ARMVAParameters { | |
1267 | unsigned tsz : 8; | |
1268 | unsigned select : 1; | |
1269 | bool tbi : 1; | |
1270 | bool epd : 1; | |
1271 | bool hpd : 1; | |
1272 | bool using16k : 1; | |
1273 | bool using64k : 1; | |
1274 | } ARMVAParameters; | |
1275 | ||
bf0be433 RH |
1276 | ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, |
1277 | ARMMMUIdx mmu_idx, bool data); | |
bf0be433 | 1278 | |
ff730e96 RH |
1279 | static inline int exception_target_el(CPUARMState *env) |
1280 | { | |
1281 | int target_el = MAX(1, arm_current_el(env)); | |
1282 | ||
1283 | /* | |
1284 | * No such thing as secure EL1 if EL3 is aarch32, | |
1285 | * so update the target EL to EL3 in this case. | |
1286 | */ | |
1287 | if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) { | |
1288 | target_el = 3; | |
1289 | } | |
1290 | ||
1291 | return target_el; | |
1292 | } | |
1293 | ||
81ae05fa RH |
1294 | /* Determine if allocation tags are available. */ |
1295 | static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, | |
1296 | uint64_t sctlr) | |
1297 | { | |
1298 | if (el < 3 | |
1299 | && arm_feature(env, ARM_FEATURE_EL3) | |
1300 | && !(env->cp15.scr_el3 & SCR_ATA)) { | |
1301 | return false; | |
1302 | } | |
4301acd7 RH |
1303 | if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { |
1304 | uint64_t hcr = arm_hcr_el2_eff(env); | |
1305 | if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { | |
1306 | return false; | |
1307 | } | |
81ae05fa RH |
1308 | } |
1309 | sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); | |
1310 | return sctlr != 0; | |
1311 | } | |
1312 | ||
ebae861f PMD |
1313 | #ifndef CONFIG_USER_ONLY |
1314 | ||
787a7e76 PMD |
1315 | /* Security attributes for an address, as returned by v8m_security_lookup. */ |
1316 | typedef struct V8M_SAttributes { | |
1317 | bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ | |
1318 | bool ns; | |
1319 | bool nsc; | |
1320 | uint8_t sregion; | |
1321 | bool srvalid; | |
1322 | uint8_t iregion; | |
1323 | bool irvalid; | |
1324 | } V8M_SAttributes; | |
1325 | ||
1326 | void v8m_security_lookup(CPUARMState *env, uint32_t address, | |
1327 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
1328 | V8M_SAttributes *sattrs); | |
1329 | ||
1330 | bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, | |
1331 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
1332 | hwaddr *phys_ptr, MemTxAttrs *txattrs, | |
1333 | int *prot, bool *is_subpage, | |
1334 | ARMMMUFaultInfo *fi, uint32_t *mregion); | |
1335 | ||
ebae861f PMD |
1336 | /* Cacheability and shareability attributes for a memory access */ |
1337 | typedef struct ARMCacheAttrs { | |
1338 | unsigned int attrs:8; /* as in the MAIR register encoding */ | |
1339 | unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ | |
1340 | } ARMCacheAttrs; | |
1341 | ||
1342 | bool get_phys_addr(CPUARMState *env, target_ulong address, | |
1343 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
1344 | hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, | |
1345 | target_ulong *page_size, | |
7e98e21c RH |
1346 | ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) |
1347 | __attribute__((nonnull)); | |
ebae861f | 1348 | |
b59f479b PMD |
1349 | void arm_log_exception(int idx); |
1350 | ||
ebae861f PMD |
1351 | #endif /* !CONFIG_USER_ONLY */ |
1352 | ||
4b779ceb RH |
1353 | /* |
1354 | * The log2 of the words in the tag block, for GMID_EL1.BS. | |
1355 | * The is the maximum, 256 bytes, which manipulates 64-bits of tags. | |
1356 | */ | |
1357 | #define GMID_EL1_BS 6 | |
1358 | ||
efbc78ad RH |
1359 | /* We associate one allocation tag per 16 bytes, the minimum. */ |
1360 | #define LOG2_TAG_GRANULE 4 | |
1361 | #define TAG_GRANULE (1 << LOG2_TAG_GRANULE) | |
1362 | ||
b64ee454 RH |
1363 | /* |
1364 | * SVE predicates are 1/8 the size of SVE vectors, and cannot use | |
1365 | * the same simd_desc() encoding due to restrictions on size. | |
1366 | * Use these instead. | |
1367 | */ | |
1368 | FIELD(PREDDESC, OPRSZ, 0, 6) | |
1369 | FIELD(PREDDESC, ESZ, 6, 2) | |
1370 | FIELD(PREDDESC, DATA, 8, 24) | |
1371 | ||
206adacf RH |
1372 | /* |
1373 | * The SVE simd_data field, for memory ops, contains either | |
1374 | * rd (5 bits) or a shift count (2 bits). | |
1375 | */ | |
1376 | #define SVE_MTEDESC_SHIFT 5 | |
1377 | ||
0a405be2 RH |
1378 | /* Bits within a descriptor passed to the helper_mte_check* functions. */ |
1379 | FIELD(MTEDESC, MIDX, 0, 4) | |
1380 | FIELD(MTEDESC, TBI, 4, 2) | |
1381 | FIELD(MTEDESC, TCMA, 6, 2) | |
1382 | FIELD(MTEDESC, WRITE, 8, 1) | |
1383 | FIELD(MTEDESC, ESIZE, 9, 5) | |
1384 | FIELD(MTEDESC, TSIZE, 14, 10) /* mte_checkN only */ | |
1385 | ||
2e34ff45 RH |
1386 | bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr); |
1387 | uint64_t mte_check1(CPUARMState *env, uint32_t desc, | |
1388 | uint64_t ptr, uintptr_t ra); | |
5add8248 RH |
1389 | uint64_t mte_checkN(CPUARMState *env, uint32_t desc, |
1390 | uint64_t ptr, uintptr_t ra); | |
2e34ff45 | 1391 | |
efbc78ad RH |
1392 | static inline int allocation_tag_from_addr(uint64_t ptr) |
1393 | { | |
1394 | return extract64(ptr, 56, 4); | |
1395 | } | |
1396 | ||
da54941f RH |
1397 | static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) |
1398 | { | |
1399 | return deposit64(ptr, 56, 4, rtag); | |
1400 | } | |
1401 | ||
2e34ff45 RH |
1402 | /* Return true if tbi bits mean that the access is checked. */ |
1403 | static inline bool tbi_check(uint32_t desc, int bit55) | |
1404 | { | |
1405 | return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; | |
1406 | } | |
1407 | ||
1408 | /* Return true if tcma bits mean that the access is unchecked. */ | |
1409 | static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) | |
1410 | { | |
1411 | /* | |
1412 | * We had extracted bit55 and ptr_tag for other reasons, so fold | |
1413 | * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. | |
1414 | */ | |
1415 | bool match = ((ptr_tag + bit55) & 0xf) == 0; | |
1416 | bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; | |
1417 | return tcma && match; | |
1418 | } | |
1419 | ||
1420 | /* | |
1421 | * For TBI, ideally, we would do nothing. Proper behaviour on fault is | |
1422 | * for the tag to be present in the FAR_ELx register. But for user-only | |
1423 | * mode, we do not have a TLB with which to implement this, so we must | |
1424 | * remove the top byte. | |
1425 | */ | |
1426 | static inline uint64_t useronly_clean_ptr(uint64_t ptr) | |
1427 | { | |
1428 | /* TBI is known to be enabled. */ | |
1429 | #ifdef CONFIG_USER_ONLY | |
1430 | ptr = sextract64(ptr, 0, 56); | |
1431 | #endif | |
1432 | return ptr; | |
1433 | } | |
1434 | ||
1435 | static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) | |
1436 | { | |
1437 | #ifdef CONFIG_USER_ONLY | |
1438 | int64_t clean_ptr = sextract64(ptr, 0, 56); | |
1439 | if (tbi_check(desc, clean_ptr < 0)) { | |
1440 | ptr = clean_ptr; | |
1441 | } | |
1442 | #endif | |
1443 | return ptr; | |
1444 | } | |
1445 | ||
ccd38087 | 1446 | #endif |