]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/internals.h
target/arm: Set TCR_EL1.TSZ for user-only
[mirror_qemu.git] / target / arm / internals.h
CommitLineData
ccd38087
PM
1/*
2 * QEMU ARM CPU -- internal functions and types
3 *
4 * Copyright (c) 2014 Linaro Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 *
20 * This header defines functions, types, etc which need to be shared
fcf5ef2a 21 * between different source files within target/arm/ but which are
ccd38087
PM
22 * private to it and not required by the rest of QEMU.
23 */
24
25#ifndef TARGET_ARM_INTERNALS_H
26#define TARGET_ARM_INTERNALS_H
27
abc24d86 28#include "hw/registerfields.h"
28f32503 29#include "tcg/tcg-gvec-desc.h"
1fe27859 30#include "syndrome.h"
abc24d86 31
99a99c1f
SB
32/* register banks for CPU modes */
33#define BANK_USRSYS 0
34#define BANK_SVC 1
35#define BANK_ABT 2
36#define BANK_UND 3
37#define BANK_IRQ 4
38#define BANK_FIQ 5
39#define BANK_HYP 6
40#define BANK_MON 7
41
d4a2dc67
PM
42static inline bool excp_is_internal(int excp)
43{
44 /* Return true if this exception number represents a QEMU-internal
45 * exception that will not be passed to the guest.
46 */
47 return excp == EXCP_INTERRUPT
48 || excp == EXCP_HLT
49 || excp == EXCP_DEBUG
50 || excp == EXCP_HALTED
51 || excp == EXCP_EXCEPTION_EXIT
52 || excp == EXCP_KERNEL_TRAP
05188cc7 53 || excp == EXCP_SEMIHOST;
d4a2dc67
PM
54}
55
ccd38087
PM
56/* Scale factor for generic timers, ie number of ns per tick.
57 * This gives a 62.5MHz timer.
58 */
59#define GTIMER_SCALE 16
60
abc24d86
MD
61/* Bit definitions for the v7M CONTROL register */
62FIELD(V7M_CONTROL, NPRIV, 0, 1)
63FIELD(V7M_CONTROL, SPSEL, 1, 1)
64FIELD(V7M_CONTROL, FPCA, 2, 1)
3e3fa230 65FIELD(V7M_CONTROL, SFPA, 3, 1)
abc24d86 66
4d1e7a47
PM
67/* Bit definitions for v7M exception return payload */
68FIELD(V7M_EXCRET, ES, 0, 1)
69FIELD(V7M_EXCRET, RES0, 1, 1)
70FIELD(V7M_EXCRET, SPSEL, 2, 1)
71FIELD(V7M_EXCRET, MODE, 3, 1)
72FIELD(V7M_EXCRET, FTYPE, 4, 1)
73FIELD(V7M_EXCRET, DCRS, 5, 1)
74FIELD(V7M_EXCRET, S, 6, 1)
75FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
76
d02a8698
PM
77/* Minimum value which is a magic number for exception return */
78#define EXC_RETURN_MIN_MAGIC 0xff000000
79/* Minimum number which is a magic number for function or exception return
80 * when using v8M security extension
81 */
82#define FNC_RETURN_MIN_MAGIC 0xfefffffe
83
35337cc3
PM
84/* We use a few fake FSR values for internal purposes in M profile.
85 * M profile cores don't have A/R format FSRs, but currently our
86 * get_phys_addr() code assumes A/R profile and reports failures via
87 * an A/R format FSR value. We then translate that into the proper
88 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
89 * Mostly the FSR values we use for this are those defined for v7PMSA,
90 * since we share some of that codepath. A few kinds of fault are
91 * only for M profile and have no A/R equivalent, though, so we have
92 * to pick a value from the reserved range (which we never otherwise
93 * generate) to use for these.
94 * These values will never be visible to the guest.
95 */
96#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
97#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
98
597610eb
PM
99/**
100 * raise_exception: Raise the specified exception.
101 * Raise a guest exception with the specified value, syndrome register
102 * and target exception level. This should be called from helper functions,
103 * and never returns because we will longjump back up to the CPU main loop.
104 */
105void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
106 uint32_t syndrome, uint32_t target_el);
107
7469f6c6
RH
108/*
109 * Similarly, but also use unwinding to restore cpu state.
110 */
111void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp,
112 uint32_t syndrome, uint32_t target_el,
113 uintptr_t ra);
114
2a923c4d
EI
115/*
116 * For AArch64, map a given EL to an index in the banked_spsr array.
7847f9ea
PM
117 * Note that this mapping and the AArch32 mapping defined in bank_number()
118 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
119 * mandated mapping between each other.
2a923c4d
EI
120 */
121static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
122{
123 static const unsigned int map[4] = {
99a99c1f
SB
124 [1] = BANK_SVC, /* EL1. */
125 [2] = BANK_HYP, /* EL2. */
126 [3] = BANK_MON, /* EL3. */
2a923c4d
EI
127 };
128 assert(el >= 1 && el <= 3);
129 return map[el];
130}
131
c766568d
PM
132/* Map CPU modes onto saved register banks. */
133static inline int bank_number(int mode)
134{
135 switch (mode) {
136 case ARM_CPU_MODE_USR:
137 case ARM_CPU_MODE_SYS:
138 return BANK_USRSYS;
139 case ARM_CPU_MODE_SVC:
140 return BANK_SVC;
141 case ARM_CPU_MODE_ABT:
142 return BANK_ABT;
143 case ARM_CPU_MODE_UND:
144 return BANK_UND;
145 case ARM_CPU_MODE_IRQ:
146 return BANK_IRQ;
147 case ARM_CPU_MODE_FIQ:
148 return BANK_FIQ;
149 case ARM_CPU_MODE_HYP:
150 return BANK_HYP;
151 case ARM_CPU_MODE_MON:
152 return BANK_MON;
153 }
154 g_assert_not_reached();
155}
156
593cfa2b
PM
157/**
158 * r14_bank_number: Map CPU mode onto register bank for r14
159 *
160 * Given an AArch32 CPU mode, return the index into the saved register
161 * banks to use for the R14 (LR) in that mode. This is the same as
162 * bank_number(), except for the special case of Hyp mode, where
163 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
164 * This should be used as the index into env->banked_r14[], and
165 * bank_number() used for the index into env->banked_r13[] and
166 * env->banked_spsr[].
167 */
168static inline int r14_bank_number(int mode)
169{
170 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
171}
172
ccd38087
PM
173void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
174void arm_translate_init(void);
175
78271684 176#ifdef CONFIG_TCG
8349d2ae 177void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
78271684
CF
178#endif /* CONFIG_TCG */
179
ce440581
RH
180/**
181 * aarch64_sve_zcr_get_valid_len:
182 * @cpu: cpu context
183 * @start_len: maximum len to consider
184 *
185 * Return the maximum supported sve vector length <= @start_len.
186 * Note that both @start_len and the return value are in units
187 * of ZCR_ELx.LEN, so the vector bit length is (x + 1) * 128.
188 */
189uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len);
78271684 190
ccd38087
PM
191enum arm_fprounding {
192 FPROUNDING_TIEEVEN,
193 FPROUNDING_POSINF,
194 FPROUNDING_NEGINF,
195 FPROUNDING_ZERO,
196 FPROUNDING_TIEAWAY,
197 FPROUNDING_ODD
198};
199
200int arm_rmode_to_sf(int rmode);
201
9208b961
EI
202static inline void aarch64_save_sp(CPUARMState *env, int el)
203{
204 if (env->pstate & PSTATE_SP) {
205 env->sp_el[el] = env->xregs[31];
206 } else {
207 env->sp_el[0] = env->xregs[31];
208 }
209}
210
211static inline void aarch64_restore_sp(CPUARMState *env, int el)
212{
213 if (env->pstate & PSTATE_SP) {
214 env->xregs[31] = env->sp_el[el];
215 } else {
216 env->xregs[31] = env->sp_el[0];
217 }
218}
219
f502cfc2
PM
220static inline void update_spsel(CPUARMState *env, uint32_t imm)
221{
dcbff19b 222 unsigned int cur_el = arm_current_el(env);
f502cfc2
PM
223 /* Update PSTATE SPSel bit; this requires us to update the
224 * working stack pointer in xregs[31].
225 */
226 if (!((imm ^ env->pstate) & PSTATE_SP)) {
227 return;
228 }
9208b961 229 aarch64_save_sp(env, cur_el);
f502cfc2
PM
230 env->pstate = deposit32(env->pstate, 0, 1, imm);
231
61d4b215
EI
232 /* We rely on illegal updates to SPsel from EL0 to get trapped
233 * at translation time.
f502cfc2 234 */
61d4b215 235 assert(cur_el >= 1 && cur_el <= 3);
9208b961 236 aarch64_restore_sp(env, cur_el);
f502cfc2
PM
237}
238
1853d5a9
EI
239/*
240 * arm_pamax
241 * @cpu: ARMCPU
242 *
243 * Returns the implementation defined bit-width of physical addresses.
244 * The ARMv8 reference manuals refer to this as PAMax().
245 */
246static inline unsigned int arm_pamax(ARMCPU *cpu)
247{
248 static const unsigned int pamax_map[] = {
249 [0] = 32,
250 [1] = 36,
251 [2] = 40,
252 [3] = 42,
253 [4] = 44,
254 [5] = 48,
255 };
3dc91ddb
PM
256 unsigned int parange =
257 FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1853d5a9
EI
258
259 /* id_aa64mmfr0 is a read-only register so values outside of the
260 * supported mappings can be considered an implementation error. */
261 assert(parange < ARRAY_SIZE(pamax_map));
262 return pamax_map[parange];
263}
264
73c5211b
PM
265/* Return true if extended addresses are enabled.
266 * This is always the case if our translation regime is 64 bit,
267 * but depends on TTBCR.EAE for 32 bit.
268 */
269static inline bool extended_addresses_enabled(CPUARMState *env)
270{
11f136ee
FA
271 TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
272 return arm_el_is_aa64(env, 1) ||
273 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
73c5211b
PM
274}
275
9ee98ce8
PM
276/* Update a QEMU watchpoint based on the information the guest has set in the
277 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
278 */
279void hw_watchpoint_update(ARMCPU *cpu, int n);
280/* Update the QEMU watchpoints for every guest watchpoint. This does a
281 * complete delete-and-reinstate of the QEMU watchpoint list and so is
282 * suitable for use after migration or on reset.
283 */
284void hw_watchpoint_update_all(ARMCPU *cpu);
46747d15
PM
285/* Update a QEMU breakpoint based on the information the guest has set in the
286 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
287 */
288void hw_breakpoint_update(ARMCPU *cpu, int n);
289/* Update the QEMU breakpoints for every guest breakpoint. This does a
290 * complete delete-and-reinstate of the QEMU breakpoint list and so is
291 * suitable for use after migration or on reset.
292 */
293void hw_breakpoint_update_all(ARMCPU *cpu);
9ee98ce8 294
b00d86bc
RH
295/* Callback function for checking if a breakpoint should trigger. */
296bool arm_debug_check_breakpoint(CPUState *cs);
297
3826121d
SF
298/* Callback function for checking if a watchpoint should trigger. */
299bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
300
40612000
JB
301/* Adjust addresses (in BE32 mode) before testing against watchpoint
302 * addresses.
303 */
304vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
305
3ff6fc91
PM
306/* Callback function for when a watchpoint or breakpoint triggers. */
307void arm_debug_excp_handler(CPUState *cs);
308
21fbea8c 309#if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
98128601
RH
310static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
311{
312 return false;
313}
21fbea8c
PMD
314static inline void arm_handle_psci_call(ARMCPU *cpu)
315{
316 g_assert_not_reached();
317}
98128601
RH
318#else
319/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
320bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
321/* Actually handle a PSCI call */
322void arm_handle_psci_call(ARMCPU *cpu);
323#endif
324
dc3c4c14
PM
325/**
326 * arm_clear_exclusive: clear the exclusive monitor
327 * @env: CPU env
328 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
329 */
330static inline void arm_clear_exclusive(CPUARMState *env)
331{
332 env->exclusive_addr = -1;
333}
334
1fa498fe
PM
335/**
336 * ARMFaultType: type of an ARM MMU fault
337 * This corresponds to the v8A pseudocode's Fault enumeration,
338 * with extensions for QEMU internal conditions.
339 */
340typedef enum ARMFaultType {
341 ARMFault_None,
342 ARMFault_AccessFlag,
343 ARMFault_Alignment,
344 ARMFault_Background,
345 ARMFault_Domain,
346 ARMFault_Permission,
347 ARMFault_Translation,
348 ARMFault_AddressSize,
349 ARMFault_SyncExternal,
350 ARMFault_SyncExternalOnWalk,
351 ARMFault_SyncParity,
352 ARMFault_SyncParityOnWalk,
353 ARMFault_AsyncParity,
354 ARMFault_AsyncExternal,
355 ARMFault_Debug,
356 ARMFault_TLBConflict,
357 ARMFault_Lockdown,
358 ARMFault_Exclusive,
359 ARMFault_ICacheMaint,
360 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
361 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
362} ARMFaultType;
363
e14b5a23
EI
364/**
365 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
1fa498fe
PM
366 * @type: Type of fault
367 * @level: Table walk level (for translation, access flag and permission faults)
368 * @domain: Domain of the fault address (for non-LPAE CPUs only)
e14b5a23
EI
369 * @s2addr: Address that caused a fault at stage 2
370 * @stage2: True if we faulted at stage 2
371 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
9861248f 372 * @s1ns: True if we faulted on a non-secure IPA while in secure state
c528af7a 373 * @ea: True if we should set the EA (external abort type) bit in syndrome
e14b5a23
EI
374 */
375typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
376struct ARMMMUFaultInfo {
1fa498fe 377 ARMFaultType type;
e14b5a23 378 target_ulong s2addr;
1fa498fe
PM
379 int level;
380 int domain;
e14b5a23
EI
381 bool stage2;
382 bool s1ptw;
9861248f 383 bool s1ns;
c528af7a 384 bool ea;
e14b5a23
EI
385};
386
1fa498fe
PM
387/**
388 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
389 * Compare pseudocode EncodeSDFSC(), though unlike that function
390 * we set up a whole FSR-format code including domain field and
391 * putting the high bit of the FSC into bit 10.
392 */
393static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
394{
395 uint32_t fsc;
396
397 switch (fi->type) {
398 case ARMFault_None:
399 return 0;
400 case ARMFault_AccessFlag:
401 fsc = fi->level == 1 ? 0x3 : 0x6;
402 break;
403 case ARMFault_Alignment:
404 fsc = 0x1;
405 break;
406 case ARMFault_Permission:
407 fsc = fi->level == 1 ? 0xd : 0xf;
408 break;
409 case ARMFault_Domain:
410 fsc = fi->level == 1 ? 0x9 : 0xb;
411 break;
412 case ARMFault_Translation:
413 fsc = fi->level == 1 ? 0x5 : 0x7;
414 break;
415 case ARMFault_SyncExternal:
416 fsc = 0x8 | (fi->ea << 12);
417 break;
418 case ARMFault_SyncExternalOnWalk:
419 fsc = fi->level == 1 ? 0xc : 0xe;
420 fsc |= (fi->ea << 12);
421 break;
422 case ARMFault_SyncParity:
423 fsc = 0x409;
424 break;
425 case ARMFault_SyncParityOnWalk:
426 fsc = fi->level == 1 ? 0x40c : 0x40e;
427 break;
428 case ARMFault_AsyncParity:
429 fsc = 0x408;
430 break;
431 case ARMFault_AsyncExternal:
432 fsc = 0x406 | (fi->ea << 12);
433 break;
434 case ARMFault_Debug:
435 fsc = 0x2;
436 break;
437 case ARMFault_TLBConflict:
438 fsc = 0x400;
439 break;
440 case ARMFault_Lockdown:
441 fsc = 0x404;
442 break;
443 case ARMFault_Exclusive:
444 fsc = 0x405;
445 break;
446 case ARMFault_ICacheMaint:
447 fsc = 0x4;
448 break;
449 case ARMFault_Background:
450 fsc = 0x0;
451 break;
452 case ARMFault_QEMU_NSCExec:
453 fsc = M_FAKE_FSR_NSC_EXEC;
454 break;
455 case ARMFault_QEMU_SFault:
456 fsc = M_FAKE_FSR_SFAULT;
457 break;
458 default:
459 /* Other faults can't occur in a context that requires a
460 * short-format status code.
461 */
462 g_assert_not_reached();
463 }
464
465 fsc |= (fi->domain << 4);
466 return fsc;
467}
468
469/**
470 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
471 * Compare pseudocode EncodeLDFSC(), though unlike that function
472 * we fill in also the LPAE bit 9 of a DFSR format.
473 */
474static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
475{
476 uint32_t fsc;
477
478 switch (fi->type) {
479 case ARMFault_None:
480 return 0;
481 case ARMFault_AddressSize:
482 fsc = fi->level & 3;
483 break;
484 case ARMFault_AccessFlag:
485 fsc = (fi->level & 3) | (0x2 << 2);
486 break;
487 case ARMFault_Permission:
488 fsc = (fi->level & 3) | (0x3 << 2);
489 break;
490 case ARMFault_Translation:
491 fsc = (fi->level & 3) | (0x1 << 2);
492 break;
493 case ARMFault_SyncExternal:
494 fsc = 0x10 | (fi->ea << 12);
495 break;
496 case ARMFault_SyncExternalOnWalk:
497 fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12);
498 break;
499 case ARMFault_SyncParity:
500 fsc = 0x18;
501 break;
502 case ARMFault_SyncParityOnWalk:
503 fsc = (fi->level & 3) | (0x7 << 2);
504 break;
505 case ARMFault_AsyncParity:
506 fsc = 0x19;
507 break;
508 case ARMFault_AsyncExternal:
509 fsc = 0x11 | (fi->ea << 12);
510 break;
511 case ARMFault_Alignment:
512 fsc = 0x21;
513 break;
514 case ARMFault_Debug:
515 fsc = 0x22;
516 break;
517 case ARMFault_TLBConflict:
518 fsc = 0x30;
519 break;
520 case ARMFault_Lockdown:
521 fsc = 0x34;
522 break;
523 case ARMFault_Exclusive:
524 fsc = 0x35;
525 break;
526 default:
527 /* Other faults can't occur in a context that requires a
528 * long-format status code.
529 */
530 g_assert_not_reached();
531 }
532
533 fsc |= 1 << 9;
534 return fsc;
535}
536
3b39d734
PM
537static inline bool arm_extabort_type(MemTxResult result)
538{
539 /* The EA bit in syndromes and fault status registers is an
540 * IMPDEF classification of external aborts. ARM implementations
541 * usually use this to indicate AXI bus Decode error (0) or
542 * Slave error (1); in QEMU we follow that.
543 */
544 return result != MEMTX_DECODE_ERROR;
545}
546
9b12b6b4
RH
547#ifdef CONFIG_USER_ONLY
548void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
549 MMUAccessType access_type,
550 bool maperr, uintptr_t ra);
39a099ca
RH
551void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
552 MMUAccessType access_type, uintptr_t ra);
9b12b6b4 553#else
7350d553
RH
554bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
555 MMUAccessType access_type, int mmu_idx,
556 bool probe, uintptr_t retaddr);
9b12b6b4 557#endif
7350d553 558
b9f6033c
RH
559static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
560{
561 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
562}
563
564static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
565{
566 if (arm_feature(env, ARM_FEATURE_M)) {
567 return mmu_idx | ARM_MMU_IDX_M;
568 } else {
569 return mmu_idx | ARM_MMU_IDX_A;
570 }
571}
572
20dc67c9
RH
573static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
574{
575 /* AArch64 is always a-profile. */
576 return mmu_idx | ARM_MMU_IDX_A;
577}
578
b9f6033c
RH
579int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
580
581/*
582 * Return the MMU index for a v7M CPU with all relevant information
583 * manually specified.
584 */
585ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
586 bool secstate, bool priv, bool negpri);
587
588/*
589 * Return the MMU index for a v7M CPU in the specified security and
590 * privilege state.
591 */
592ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
593 bool secstate, bool priv);
594
595/* Return the MMU index for a v7M CPU in the specified security state */
596ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
597
deb2db99
AR
598/* Return true if the stage 1 translation regime is using LPAE format page
599 * tables */
600bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
30901475
AB
601
602/* Raise a data fault alignment exception for the specified virtual address */
b35399bb
SS
603void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
604 MMUAccessType access_type,
fa947a66 605 int mmu_idx, uintptr_t retaddr) QEMU_NORETURN;
30901475 606
c79c0a31
PM
607/* arm_cpu_do_transaction_failed: handle a memory system error response
608 * (eg "no device/memory present at address") by raising an external abort
609 * exception
610 */
611void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
612 vaddr addr, unsigned size,
613 MMUAccessType access_type,
614 int mmu_idx, MemTxAttrs attrs,
615 MemTxResult response, uintptr_t retaddr);
616
08267487 617/* Call any registered EL change hooks */
b5c53d1b
AL
618static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
619{
620 ARMELChangeHook *hook, *next;
621 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
622 hook->hook(cpu, hook->opaque);
623 }
624}
bd7d00fc
PM
625static inline void arm_call_el_change_hook(ARMCPU *cpu)
626{
08267487
AL
627 ARMELChangeHook *hook, *next;
628 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
629 hook->hook(cpu, hook->opaque);
bd7d00fc
PM
630 }
631}
632
339370b9
RH
633/* Return true if this address translation regime has two ranges. */
634static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
635{
636 switch (mmu_idx) {
637 case ARMMMUIdx_Stage1_E0:
638 case ARMMMUIdx_Stage1_E1:
452ef8cb 639 case ARMMMUIdx_Stage1_E1_PAN:
b1a10c86
RDC
640 case ARMMMUIdx_Stage1_SE0:
641 case ARMMMUIdx_Stage1_SE1:
642 case ARMMMUIdx_Stage1_SE1_PAN:
339370b9
RH
643 case ARMMMUIdx_E10_0:
644 case ARMMMUIdx_E10_1:
452ef8cb 645 case ARMMMUIdx_E10_1_PAN:
339370b9
RH
646 case ARMMMUIdx_E20_0:
647 case ARMMMUIdx_E20_2:
452ef8cb 648 case ARMMMUIdx_E20_2_PAN:
339370b9
RH
649 case ARMMMUIdx_SE10_0:
650 case ARMMMUIdx_SE10_1:
452ef8cb 651 case ARMMMUIdx_SE10_1_PAN:
b6ad6062
RDC
652 case ARMMMUIdx_SE20_0:
653 case ARMMMUIdx_SE20_2:
654 case ARMMMUIdx_SE20_2_PAN:
339370b9
RH
655 return true;
656 default:
657 return false;
658 }
659}
660
61fcd69b
PM
661/* Return true if this address translation regime is secure */
662static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
663{
664 switch (mmu_idx) {
01b98b68
RH
665 case ARMMMUIdx_E10_0:
666 case ARMMMUIdx_E10_1:
452ef8cb 667 case ARMMMUIdx_E10_1_PAN:
b9f6033c
RH
668 case ARMMMUIdx_E20_0:
669 case ARMMMUIdx_E20_2:
452ef8cb 670 case ARMMMUIdx_E20_2_PAN:
2859d7b5
RH
671 case ARMMMUIdx_Stage1_E0:
672 case ARMMMUIdx_Stage1_E1:
452ef8cb 673 case ARMMMUIdx_Stage1_E1_PAN:
e013b741 674 case ARMMMUIdx_E2:
97fa9350 675 case ARMMMUIdx_Stage2:
62593718
PM
676 case ARMMMUIdx_MPrivNegPri:
677 case ARMMMUIdx_MUserNegPri:
61fcd69b 678 case ARMMMUIdx_MPriv:
61fcd69b
PM
679 case ARMMMUIdx_MUser:
680 return false;
127b2b08 681 case ARMMMUIdx_SE3:
fba37aed
RH
682 case ARMMMUIdx_SE10_0:
683 case ARMMMUIdx_SE10_1:
452ef8cb 684 case ARMMMUIdx_SE10_1_PAN:
b6ad6062
RDC
685 case ARMMMUIdx_SE20_0:
686 case ARMMMUIdx_SE20_2:
687 case ARMMMUIdx_SE20_2_PAN:
b1a10c86
RDC
688 case ARMMMUIdx_Stage1_SE0:
689 case ARMMMUIdx_Stage1_SE1:
690 case ARMMMUIdx_Stage1_SE1_PAN:
b6ad6062 691 case ARMMMUIdx_SE2:
b1a10c86 692 case ARMMMUIdx_Stage2_S:
62593718
PM
693 case ARMMMUIdx_MSPrivNegPri:
694 case ARMMMUIdx_MSUserNegPri:
61fcd69b 695 case ARMMMUIdx_MSPriv:
61fcd69b
PM
696 case ARMMMUIdx_MSUser:
697 return true;
698 default:
699 g_assert_not_reached();
700 }
701}
702
81636b70
RH
703static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
704{
705 switch (mmu_idx) {
706 case ARMMMUIdx_Stage1_E1_PAN:
b1a10c86 707 case ARMMMUIdx_Stage1_SE1_PAN:
81636b70
RH
708 case ARMMMUIdx_E10_1_PAN:
709 case ARMMMUIdx_E20_2_PAN:
710 case ARMMMUIdx_SE10_1_PAN:
b6ad6062 711 case ARMMMUIdx_SE20_2_PAN:
81636b70
RH
712 return true;
713 default:
714 return false;
715 }
716}
717
9c7ab8fc
RH
718/* Return the exception level which controls this address translation regime */
719static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
720{
721 switch (mmu_idx) {
b6ad6062
RDC
722 case ARMMMUIdx_SE20_0:
723 case ARMMMUIdx_SE20_2:
724 case ARMMMUIdx_SE20_2_PAN:
9c7ab8fc
RH
725 case ARMMMUIdx_E20_0:
726 case ARMMMUIdx_E20_2:
727 case ARMMMUIdx_E20_2_PAN:
728 case ARMMMUIdx_Stage2:
b1a10c86 729 case ARMMMUIdx_Stage2_S:
b6ad6062 730 case ARMMMUIdx_SE2:
9c7ab8fc
RH
731 case ARMMMUIdx_E2:
732 return 2;
733 case ARMMMUIdx_SE3:
734 return 3;
735 case ARMMMUIdx_SE10_0:
b1a10c86 736 case ARMMMUIdx_Stage1_SE0:
9c7ab8fc
RH
737 return arm_el_is_aa64(env, 3) ? 1 : 3;
738 case ARMMMUIdx_SE10_1:
739 case ARMMMUIdx_SE10_1_PAN:
740 case ARMMMUIdx_Stage1_E0:
741 case ARMMMUIdx_Stage1_E1:
742 case ARMMMUIdx_Stage1_E1_PAN:
b1a10c86
RDC
743 case ARMMMUIdx_Stage1_SE1:
744 case ARMMMUIdx_Stage1_SE1_PAN:
9c7ab8fc
RH
745 case ARMMMUIdx_E10_0:
746 case ARMMMUIdx_E10_1:
747 case ARMMMUIdx_E10_1_PAN:
748 case ARMMMUIdx_MPrivNegPri:
749 case ARMMMUIdx_MUserNegPri:
750 case ARMMMUIdx_MPriv:
751 case ARMMMUIdx_MUser:
752 case ARMMMUIdx_MSPrivNegPri:
753 case ARMMMUIdx_MSUserNegPri:
754 case ARMMMUIdx_MSPriv:
755 case ARMMMUIdx_MSUser:
756 return 1;
757 default:
758 g_assert_not_reached();
759 }
760}
761
38659d31
RH
762/* Return the TCR controlling this translation regime */
763static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
764{
765 if (mmu_idx == ARMMMUIdx_Stage2) {
766 return &env->cp15.vtcr_el2;
767 }
b1a10c86
RDC
768 if (mmu_idx == ARMMMUIdx_Stage2_S) {
769 /*
770 * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
771 * those are not currently used by QEMU, so just return VSTCR_EL2.
772 */
773 return &env->cp15.vstcr_el2;
774 }
38659d31
RH
775 return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
776}
777
81621d9a
PM
778/* Return the FSR value for a debug exception (watchpoint, hardware
779 * breakpoint or BKPT insn) targeting the specified exception level.
780 */
781static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
782{
783 ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
784 int target_el = arm_debug_target_el(env);
785 bool using_lpae = false;
786
787 if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
788 using_lpae = true;
789 } else {
790 if (arm_feature(env, ARM_FEATURE_LPAE) &&
791 (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
792 using_lpae = true;
793 }
794 }
795
796 if (using_lpae) {
797 return arm_fi_to_lfsc(&fi);
798 } else {
799 return arm_fi_to_sfsc(&fi);
800 }
801}
802
88ce6c6e
PM
803/**
804 * arm_num_brps: Return number of implemented breakpoints.
805 * Note that the ID register BRPS field is "number of bps - 1",
806 * and we return the actual number of breakpoints.
807 */
808static inline int arm_num_brps(ARMCPU *cpu)
809{
810 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
811 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
812 } else {
4426d361 813 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
88ce6c6e
PM
814 }
815}
816
817/**
818 * arm_num_wrps: Return number of implemented watchpoints.
819 * Note that the ID register WRPS field is "number of wps - 1",
820 * and we return the actual number of watchpoints.
821 */
822static inline int arm_num_wrps(ARMCPU *cpu)
823{
824 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
825 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
826 } else {
4426d361 827 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
88ce6c6e
PM
828 }
829}
830
831/**
832 * arm_num_ctx_cmps: Return number of implemented context comparators.
833 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
834 * and we return the actual number of comparators.
835 */
836static inline int arm_num_ctx_cmps(ARMCPU *cpu)
837{
838 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
839 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
840 } else {
4426d361 841 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
88ce6c6e
PM
842 }
843}
844
5529bf18
PM
845/**
846 * v7m_using_psp: Return true if using process stack pointer
847 * Return true if the CPU is currently using the process stack
848 * pointer, or false if it is using the main stack pointer.
849 */
850static inline bool v7m_using_psp(CPUARMState *env)
851{
852 /* Handler mode always uses the main stack; for thread mode
853 * the CONTROL.SPSEL bit determines the answer.
854 * Note that in v7M it is not possible to be in Handler mode with
855 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
856 */
857 return !arm_v7m_is_handler_mode(env) &&
858 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
859}
860
55203189
PM
861/**
862 * v7m_sp_limit: Return SP limit for current CPU state
863 * Return the SP limit value for the current CPU security state
864 * and stack pointer.
865 */
866static inline uint32_t v7m_sp_limit(CPUARMState *env)
867{
868 if (v7m_using_psp(env)) {
869 return env->v7m.psplim[env->v7m.secure];
870 } else {
871 return env->v7m.msplim[env->v7m.secure];
872 }
873}
874
787a7e76
PMD
875/**
876 * v7m_cpacr_pass:
877 * Return true if the v7M CPACR permits access to the FPU for the specified
878 * security state and privilege level.
879 */
880static inline bool v7m_cpacr_pass(CPUARMState *env,
881 bool is_secure, bool is_priv)
882{
883 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
884 case 0:
885 case 2: /* UNPREDICTABLE: we treat like 0 */
886 return false;
887 case 1:
888 return is_priv;
889 case 3:
890 return true;
891 default:
892 g_assert_not_reached();
893 }
894}
895
81e37284
PM
896/**
897 * aarch32_mode_name(): Return name of the AArch32 CPU mode
898 * @psr: Program Status Register indicating CPU mode
899 *
900 * Returns, for debug logging purposes, a printable representation
901 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
902 * the low bits of the specified PSR.
903 */
904static inline const char *aarch32_mode_name(uint32_t psr)
905{
906 static const char cpu_mode_names[16][4] = {
907 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
908 "???", "???", "hyp", "und", "???", "???", "???", "sys"
909 };
910
911 return cpu_mode_names[psr & 0xf];
912}
913
89430fc6
PM
914/**
915 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
916 *
917 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
918 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
919 * Must be called with the iothread lock held.
920 */
921void arm_cpu_update_virq(ARMCPU *cpu);
922
923/**
924 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
925 *
926 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
927 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
928 * Must be called with the iothread lock held.
929 */
930void arm_cpu_update_vfiq(ARMCPU *cpu);
931
164690b2
RH
932/**
933 * arm_mmu_idx_el:
934 * @env: The cpu environment
935 * @el: The EL to use.
936 *
937 * Return the full ARMMMUIdx for the translation regime for EL.
938 */
939ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
940
50494a27
RH
941/**
942 * arm_mmu_idx:
943 * @env: The cpu environment
944 *
945 * Return the full ARMMMUIdx for the current translation regime.
946 */
947ARMMMUIdx arm_mmu_idx(CPUARMState *env);
948
64be86ab
RH
949/**
950 * arm_stage1_mmu_idx:
951 * @env: The cpu environment
952 *
953 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
954 */
955#ifdef CONFIG_USER_ONLY
956static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
957{
2859d7b5 958 return ARMMMUIdx_Stage1_E0;
64be86ab
RH
959}
960#else
961ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
962#endif
963
fee7aa46
RH
964/**
965 * arm_mmu_idx_is_stage1_of_2:
966 * @mmu_idx: The ARMMMUIdx to test
967 *
968 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
969 * first stage of a two stage regime.
970 */
971static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
972{
973 switch (mmu_idx) {
974 case ARMMMUIdx_Stage1_E0:
975 case ARMMMUIdx_Stage1_E1:
452ef8cb 976 case ARMMMUIdx_Stage1_E1_PAN:
b1a10c86
RDC
977 case ARMMMUIdx_Stage1_SE0:
978 case ARMMMUIdx_Stage1_SE1:
979 case ARMMMUIdx_Stage1_SE1_PAN:
fee7aa46
RH
980 return true;
981 default:
982 return false;
983 }
984}
985
4f9584ed
RH
986static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
987 const ARMISARegisters *id)
988{
f062d144 989 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
4f9584ed
RH
990
991 if ((features >> ARM_FEATURE_V4T) & 1) {
992 valid |= CPSR_T;
993 }
994 if ((features >> ARM_FEATURE_V5) & 1) {
995 valid |= CPSR_Q; /* V5TE in reality*/
996 }
997 if ((features >> ARM_FEATURE_V6) & 1) {
998 valid |= CPSR_E | CPSR_GE;
999 }
1000 if ((features >> ARM_FEATURE_THUMB2) & 1) {
1001 valid |= CPSR_IT;
1002 }
873b73c0 1003 if (isar_feature_aa32_jazelle(id)) {
f062d144
RH
1004 valid |= CPSR_J;
1005 }
220f508f
RH
1006 if (isar_feature_aa32_pan(id)) {
1007 valid |= CPSR_PAN;
1008 }
dc8b1853
RC
1009 if (isar_feature_aa32_dit(id)) {
1010 valid |= CPSR_DIT;
1011 }
f2f68a78
RC
1012 if (isar_feature_aa32_ssbs(id)) {
1013 valid |= CPSR_SSBS;
1014 }
4f9584ed
RH
1015
1016 return valid;
1017}
1018
14084511
RH
1019static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1020{
1021 uint32_t valid;
1022
1023 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1024 if (isar_feature_aa64_bti(id)) {
1025 valid |= PSTATE_BTYPE;
1026 }
220f508f
RH
1027 if (isar_feature_aa64_pan(id)) {
1028 valid |= PSTATE_PAN;
1029 }
9eeb7a1c
RH
1030 if (isar_feature_aa64_uao(id)) {
1031 valid |= PSTATE_UAO;
1032 }
dc8b1853
RC
1033 if (isar_feature_aa64_dit(id)) {
1034 valid |= PSTATE_DIT;
1035 }
f2f68a78
RC
1036 if (isar_feature_aa64_ssbs(id)) {
1037 valid |= PSTATE_SSBS;
1038 }
4b779ceb
RH
1039 if (isar_feature_aa64_mte(id)) {
1040 valid |= PSTATE_TCO;
1041 }
14084511
RH
1042
1043 return valid;
1044}
1045
ba97be9f
RH
1046/*
1047 * Parameters of a given virtual address, as extracted from the
1048 * translation control register (TCR) for a given regime.
1049 */
1050typedef struct ARMVAParameters {
1051 unsigned tsz : 8;
1052 unsigned select : 1;
1053 bool tbi : 1;
1054 bool epd : 1;
1055 bool hpd : 1;
1056 bool using16k : 1;
1057 bool using64k : 1;
1058} ARMVAParameters;
1059
bf0be433
RH
1060ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1061 ARMMMUIdx mmu_idx, bool data);
bf0be433 1062
ff730e96
RH
1063static inline int exception_target_el(CPUARMState *env)
1064{
1065 int target_el = MAX(1, arm_current_el(env));
1066
1067 /*
1068 * No such thing as secure EL1 if EL3 is aarch32,
1069 * so update the target EL to EL3 in this case.
1070 */
1071 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
1072 target_el = 3;
1073 }
1074
1075 return target_el;
1076}
1077
81ae05fa
RH
1078/* Determine if allocation tags are available. */
1079static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1080 uint64_t sctlr)
1081{
1082 if (el < 3
1083 && arm_feature(env, ARM_FEATURE_EL3)
1084 && !(env->cp15.scr_el3 & SCR_ATA)) {
1085 return false;
1086 }
4301acd7
RH
1087 if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
1088 uint64_t hcr = arm_hcr_el2_eff(env);
1089 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1090 return false;
1091 }
81ae05fa
RH
1092 }
1093 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1094 return sctlr != 0;
1095}
1096
ebae861f
PMD
1097#ifndef CONFIG_USER_ONLY
1098
787a7e76
PMD
1099/* Security attributes for an address, as returned by v8m_security_lookup. */
1100typedef struct V8M_SAttributes {
1101 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1102 bool ns;
1103 bool nsc;
1104 uint8_t sregion;
1105 bool srvalid;
1106 uint8_t iregion;
1107 bool irvalid;
1108} V8M_SAttributes;
1109
1110void v8m_security_lookup(CPUARMState *env, uint32_t address,
1111 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1112 V8M_SAttributes *sattrs);
1113
1114bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1115 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1116 hwaddr *phys_ptr, MemTxAttrs *txattrs,
1117 int *prot, bool *is_subpage,
1118 ARMMMUFaultInfo *fi, uint32_t *mregion);
1119
ebae861f
PMD
1120/* Cacheability and shareability attributes for a memory access */
1121typedef struct ARMCacheAttrs {
1122 unsigned int attrs:8; /* as in the MAIR register encoding */
1123 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1124} ARMCacheAttrs;
1125
1126bool get_phys_addr(CPUARMState *env, target_ulong address,
1127 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1128 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
1129 target_ulong *page_size,
7e98e21c
RH
1130 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
1131 __attribute__((nonnull));
ebae861f 1132
fc6177af 1133void arm_log_exception(CPUState *cs);
b59f479b 1134
ebae861f
PMD
1135#endif /* !CONFIG_USER_ONLY */
1136
4b779ceb
RH
1137/*
1138 * The log2 of the words in the tag block, for GMID_EL1.BS.
1139 * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1140 */
1141#define GMID_EL1_BS 6
1142
efbc78ad
RH
1143/* We associate one allocation tag per 16 bytes, the minimum. */
1144#define LOG2_TAG_GRANULE 4
1145#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
1146
b64ee454
RH
1147/*
1148 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1149 * the same simd_desc() encoding due to restrictions on size.
1150 * Use these instead.
1151 */
1152FIELD(PREDDESC, OPRSZ, 0, 6)
1153FIELD(PREDDESC, ESZ, 6, 2)
1154FIELD(PREDDESC, DATA, 8, 24)
1155
206adacf
RH
1156/*
1157 * The SVE simd_data field, for memory ops, contains either
1158 * rd (5 bits) or a shift count (2 bits).
1159 */
1160#define SVE_MTEDESC_SHIFT 5
1161
0a405be2
RH
1162/* Bits within a descriptor passed to the helper_mte_check* functions. */
1163FIELD(MTEDESC, MIDX, 0, 4)
1164FIELD(MTEDESC, TBI, 4, 2)
1165FIELD(MTEDESC, TCMA, 6, 2)
1166FIELD(MTEDESC, WRITE, 8, 1)
28f32503 1167FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
0a405be2 1168
d304d280 1169bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
bd47b61c 1170uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
2e34ff45 1171
efbc78ad
RH
1172static inline int allocation_tag_from_addr(uint64_t ptr)
1173{
1174 return extract64(ptr, 56, 4);
1175}
1176
da54941f
RH
1177static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1178{
1179 return deposit64(ptr, 56, 4, rtag);
1180}
1181
2e34ff45
RH
1182/* Return true if tbi bits mean that the access is checked. */
1183static inline bool tbi_check(uint32_t desc, int bit55)
1184{
1185 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1186}
1187
1188/* Return true if tcma bits mean that the access is unchecked. */
1189static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1190{
1191 /*
1192 * We had extracted bit55 and ptr_tag for other reasons, so fold
1193 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1194 */
1195 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1196 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1197 return tcma && match;
1198}
1199
1200/*
1201 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1202 * for the tag to be present in the FAR_ELx register. But for user-only
1203 * mode, we do not have a TLB with which to implement this, so we must
1204 * remove the top byte.
1205 */
1206static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1207{
2e34ff45 1208#ifdef CONFIG_USER_ONLY
16c84978
RH
1209 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1210 ptr &= sextract64(ptr, 0, 56);
2e34ff45
RH
1211#endif
1212 return ptr;
1213}
1214
1215static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1216{
1217#ifdef CONFIG_USER_ONLY
1218 int64_t clean_ptr = sextract64(ptr, 0, 56);
1219 if (tbi_check(desc, clean_ptr < 0)) {
1220 ptr = clean_ptr;
1221 }
1222#endif
1223 return ptr;
1224}
1225
507b6a50
PM
1226/* Values for M-profile PSR.ECI for MVE insns */
1227enum MVEECIState {
1228 ECI_NONE = 0, /* No completed beats */
1229 ECI_A0 = 1, /* Completed: A0 */
1230 ECI_A0A1 = 2, /* Completed: A0, A1 */
1231 /* 3 is reserved */
1232 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1233 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1234 /* All other values reserved */
1235};
1236
0130895d
AG
1237/* Definitions for the PMU registers */
1238#define PMCRN_MASK 0xf800
1239#define PMCRN_SHIFT 11
1240#define PMCRLC 0x40
1241#define PMCRDP 0x20
1242#define PMCRX 0x10
1243#define PMCRD 0x8
1244#define PMCRC 0x4
1245#define PMCRP 0x2
1246#define PMCRE 0x1
1247/*
1248 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1249 * which can be written as 1 to trigger behaviour but which stay RAZ).
1250 */
1251#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1252
1253#define PMXEVTYPER_P 0x80000000
1254#define PMXEVTYPER_U 0x40000000
1255#define PMXEVTYPER_NSK 0x20000000
1256#define PMXEVTYPER_NSU 0x10000000
1257#define PMXEVTYPER_NSH 0x08000000
1258#define PMXEVTYPER_M 0x04000000
1259#define PMXEVTYPER_MT 0x02000000
1260#define PMXEVTYPER_EVTCOUNT 0x0000ffff
1261#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1262 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1263 PMXEVTYPER_M | PMXEVTYPER_MT | \
1264 PMXEVTYPER_EVTCOUNT)
1265
1266#define PMCCFILTR 0xf8000000
1267#define PMCCFILTR_M PMXEVTYPER_M
1268#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1269
1270static inline uint32_t pmu_num_counters(CPUARMState *env)
1271{
1272 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1273}
1274
1275/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1276static inline uint64_t pmu_counter_mask(CPUARMState *env)
1277{
1278 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1279}
1280
89f4f20e
PM
1281#ifdef TARGET_AARCH64
1282int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
1283int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
1284int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
1285int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
1286#endif
1287
ccd38087 1288#endif