]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/internals.h
Merge tag 'pull-target-arm-20221027' of https://git.linaro.org/people/pmaydell/qemu...
[mirror_qemu.git] / target / arm / internals.h
1 /*
2 * QEMU ARM CPU -- internal functions and types
3 *
4 * Copyright (c) 2014 Linaro Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 *
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
23 */
24
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
27
28 #include "hw/registerfields.h"
29 #include "tcg/tcg-gvec-desc.h"
30 #include "syndrome.h"
31
32 /* register banks for CPU modes */
33 #define BANK_USRSYS 0
34 #define BANK_SVC 1
35 #define BANK_ABT 2
36 #define BANK_UND 3
37 #define BANK_IRQ 4
38 #define BANK_FIQ 5
39 #define BANK_HYP 6
40 #define BANK_MON 7
41
42 static inline bool excp_is_internal(int excp)
43 {
44 /* Return true if this exception number represents a QEMU-internal
45 * exception that will not be passed to the guest.
46 */
47 return excp == EXCP_INTERRUPT
48 || excp == EXCP_HLT
49 || excp == EXCP_DEBUG
50 || excp == EXCP_HALTED
51 || excp == EXCP_EXCEPTION_EXIT
52 || excp == EXCP_KERNEL_TRAP
53 || excp == EXCP_SEMIHOST;
54 }
55
56 /* Scale factor for generic timers, ie number of ns per tick.
57 * This gives a 62.5MHz timer.
58 */
59 #define GTIMER_SCALE 16
60
61 /* Bit definitions for the v7M CONTROL register */
62 FIELD(V7M_CONTROL, NPRIV, 0, 1)
63 FIELD(V7M_CONTROL, SPSEL, 1, 1)
64 FIELD(V7M_CONTROL, FPCA, 2, 1)
65 FIELD(V7M_CONTROL, SFPA, 3, 1)
66
67 /* Bit definitions for v7M exception return payload */
68 FIELD(V7M_EXCRET, ES, 0, 1)
69 FIELD(V7M_EXCRET, RES0, 1, 1)
70 FIELD(V7M_EXCRET, SPSEL, 2, 1)
71 FIELD(V7M_EXCRET, MODE, 3, 1)
72 FIELD(V7M_EXCRET, FTYPE, 4, 1)
73 FIELD(V7M_EXCRET, DCRS, 5, 1)
74 FIELD(V7M_EXCRET, S, 6, 1)
75 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
76
77 /* Minimum value which is a magic number for exception return */
78 #define EXC_RETURN_MIN_MAGIC 0xff000000
79 /* Minimum number which is a magic number for function or exception return
80 * when using v8M security extension
81 */
82 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
83
84 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
85 FIELD(DBGWCR, E, 0, 1)
86 FIELD(DBGWCR, PAC, 1, 2)
87 FIELD(DBGWCR, LSC, 3, 2)
88 FIELD(DBGWCR, BAS, 5, 8)
89 FIELD(DBGWCR, HMC, 13, 1)
90 FIELD(DBGWCR, SSC, 14, 2)
91 FIELD(DBGWCR, LBN, 16, 4)
92 FIELD(DBGWCR, WT, 20, 1)
93 FIELD(DBGWCR, MASK, 24, 5)
94 FIELD(DBGWCR, SSCE, 29, 1)
95
96 /* We use a few fake FSR values for internal purposes in M profile.
97 * M profile cores don't have A/R format FSRs, but currently our
98 * get_phys_addr() code assumes A/R profile and reports failures via
99 * an A/R format FSR value. We then translate that into the proper
100 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
101 * Mostly the FSR values we use for this are those defined for v7PMSA,
102 * since we share some of that codepath. A few kinds of fault are
103 * only for M profile and have no A/R equivalent, though, so we have
104 * to pick a value from the reserved range (which we never otherwise
105 * generate) to use for these.
106 * These values will never be visible to the guest.
107 */
108 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
109 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
110
111 /**
112 * raise_exception: Raise the specified exception.
113 * Raise a guest exception with the specified value, syndrome register
114 * and target exception level. This should be called from helper functions,
115 * and never returns because we will longjump back up to the CPU main loop.
116 */
117 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
118 uint32_t syndrome, uint32_t target_el);
119
120 /*
121 * Similarly, but also use unwinding to restore cpu state.
122 */
123 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
124 uint32_t syndrome, uint32_t target_el,
125 uintptr_t ra);
126
127 /*
128 * For AArch64, map a given EL to an index in the banked_spsr array.
129 * Note that this mapping and the AArch32 mapping defined in bank_number()
130 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
131 * mandated mapping between each other.
132 */
133 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
134 {
135 static const unsigned int map[4] = {
136 [1] = BANK_SVC, /* EL1. */
137 [2] = BANK_HYP, /* EL2. */
138 [3] = BANK_MON, /* EL3. */
139 };
140 assert(el >= 1 && el <= 3);
141 return map[el];
142 }
143
144 /* Map CPU modes onto saved register banks. */
145 static inline int bank_number(int mode)
146 {
147 switch (mode) {
148 case ARM_CPU_MODE_USR:
149 case ARM_CPU_MODE_SYS:
150 return BANK_USRSYS;
151 case ARM_CPU_MODE_SVC:
152 return BANK_SVC;
153 case ARM_CPU_MODE_ABT:
154 return BANK_ABT;
155 case ARM_CPU_MODE_UND:
156 return BANK_UND;
157 case ARM_CPU_MODE_IRQ:
158 return BANK_IRQ;
159 case ARM_CPU_MODE_FIQ:
160 return BANK_FIQ;
161 case ARM_CPU_MODE_HYP:
162 return BANK_HYP;
163 case ARM_CPU_MODE_MON:
164 return BANK_MON;
165 }
166 g_assert_not_reached();
167 }
168
169 /**
170 * r14_bank_number: Map CPU mode onto register bank for r14
171 *
172 * Given an AArch32 CPU mode, return the index into the saved register
173 * banks to use for the R14 (LR) in that mode. This is the same as
174 * bank_number(), except for the special case of Hyp mode, where
175 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
176 * This should be used as the index into env->banked_r14[], and
177 * bank_number() used for the index into env->banked_r13[] and
178 * env->banked_spsr[].
179 */
180 static inline int r14_bank_number(int mode)
181 {
182 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
183 }
184
185 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
186 void arm_translate_init(void);
187
188 #ifdef CONFIG_TCG
189 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
190 #endif /* CONFIG_TCG */
191
192 enum arm_fprounding {
193 FPROUNDING_TIEEVEN,
194 FPROUNDING_POSINF,
195 FPROUNDING_NEGINF,
196 FPROUNDING_ZERO,
197 FPROUNDING_TIEAWAY,
198 FPROUNDING_ODD
199 };
200
201 int arm_rmode_to_sf(int rmode);
202
203 static inline void aarch64_save_sp(CPUARMState *env, int el)
204 {
205 if (env->pstate & PSTATE_SP) {
206 env->sp_el[el] = env->xregs[31];
207 } else {
208 env->sp_el[0] = env->xregs[31];
209 }
210 }
211
212 static inline void aarch64_restore_sp(CPUARMState *env, int el)
213 {
214 if (env->pstate & PSTATE_SP) {
215 env->xregs[31] = env->sp_el[el];
216 } else {
217 env->xregs[31] = env->sp_el[0];
218 }
219 }
220
221 static inline void update_spsel(CPUARMState *env, uint32_t imm)
222 {
223 unsigned int cur_el = arm_current_el(env);
224 /* Update PSTATE SPSel bit; this requires us to update the
225 * working stack pointer in xregs[31].
226 */
227 if (!((imm ^ env->pstate) & PSTATE_SP)) {
228 return;
229 }
230 aarch64_save_sp(env, cur_el);
231 env->pstate = deposit32(env->pstate, 0, 1, imm);
232
233 /* We rely on illegal updates to SPsel from EL0 to get trapped
234 * at translation time.
235 */
236 assert(cur_el >= 1 && cur_el <= 3);
237 aarch64_restore_sp(env, cur_el);
238 }
239
240 /*
241 * arm_pamax
242 * @cpu: ARMCPU
243 *
244 * Returns the implementation defined bit-width of physical addresses.
245 * The ARMv8 reference manuals refer to this as PAMax().
246 */
247 unsigned int arm_pamax(ARMCPU *cpu);
248
249 /* Return true if extended addresses are enabled.
250 * This is always the case if our translation regime is 64 bit,
251 * but depends on TTBCR.EAE for 32 bit.
252 */
253 static inline bool extended_addresses_enabled(CPUARMState *env)
254 {
255 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
256 return arm_el_is_aa64(env, 1) ||
257 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
258 }
259
260 /* Update a QEMU watchpoint based on the information the guest has set in the
261 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
262 */
263 void hw_watchpoint_update(ARMCPU *cpu, int n);
264 /* Update the QEMU watchpoints for every guest watchpoint. This does a
265 * complete delete-and-reinstate of the QEMU watchpoint list and so is
266 * suitable for use after migration or on reset.
267 */
268 void hw_watchpoint_update_all(ARMCPU *cpu);
269 /* Update a QEMU breakpoint based on the information the guest has set in the
270 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
271 */
272 void hw_breakpoint_update(ARMCPU *cpu, int n);
273 /* Update the QEMU breakpoints for every guest breakpoint. This does a
274 * complete delete-and-reinstate of the QEMU breakpoint list and so is
275 * suitable for use after migration or on reset.
276 */
277 void hw_breakpoint_update_all(ARMCPU *cpu);
278
279 /* Callback function for checking if a breakpoint should trigger. */
280 bool arm_debug_check_breakpoint(CPUState *cs);
281
282 /* Callback function for checking if a watchpoint should trigger. */
283 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
284
285 /* Adjust addresses (in BE32 mode) before testing against watchpoint
286 * addresses.
287 */
288 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
289
290 /* Callback function for when a watchpoint or breakpoint triggers. */
291 void arm_debug_excp_handler(CPUState *cs);
292
293 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
294 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
295 {
296 return false;
297 }
298 static inline void arm_handle_psci_call(ARMCPU *cpu)
299 {
300 g_assert_not_reached();
301 }
302 #else
303 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
304 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
305 /* Actually handle a PSCI call */
306 void arm_handle_psci_call(ARMCPU *cpu);
307 #endif
308
309 /**
310 * arm_clear_exclusive: clear the exclusive monitor
311 * @env: CPU env
312 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
313 */
314 static inline void arm_clear_exclusive(CPUARMState *env)
315 {
316 env->exclusive_addr = -1;
317 }
318
319 /**
320 * ARMFaultType: type of an ARM MMU fault
321 * This corresponds to the v8A pseudocode's Fault enumeration,
322 * with extensions for QEMU internal conditions.
323 */
324 typedef enum ARMFaultType {
325 ARMFault_None,
326 ARMFault_AccessFlag,
327 ARMFault_Alignment,
328 ARMFault_Background,
329 ARMFault_Domain,
330 ARMFault_Permission,
331 ARMFault_Translation,
332 ARMFault_AddressSize,
333 ARMFault_SyncExternal,
334 ARMFault_SyncExternalOnWalk,
335 ARMFault_SyncParity,
336 ARMFault_SyncParityOnWalk,
337 ARMFault_AsyncParity,
338 ARMFault_AsyncExternal,
339 ARMFault_Debug,
340 ARMFault_TLBConflict,
341 ARMFault_UnsuppAtomicUpdate,
342 ARMFault_Lockdown,
343 ARMFault_Exclusive,
344 ARMFault_ICacheMaint,
345 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
346 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
347 } ARMFaultType;
348
349 /**
350 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
351 * @type: Type of fault
352 * @level: Table walk level (for translation, access flag and permission faults)
353 * @domain: Domain of the fault address (for non-LPAE CPUs only)
354 * @s2addr: Address that caused a fault at stage 2
355 * @stage2: True if we faulted at stage 2
356 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
357 * @s1ns: True if we faulted on a non-secure IPA while in secure state
358 * @ea: True if we should set the EA (external abort type) bit in syndrome
359 */
360 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
361 struct ARMMMUFaultInfo {
362 ARMFaultType type;
363 target_ulong s2addr;
364 int level;
365 int domain;
366 bool stage2;
367 bool s1ptw;
368 bool s1ns;
369 bool ea;
370 };
371
372 /**
373 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
374 * Compare pseudocode EncodeSDFSC(), though unlike that function
375 * we set up a whole FSR-format code including domain field and
376 * putting the high bit of the FSC into bit 10.
377 */
378 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
379 {
380 uint32_t fsc;
381
382 switch (fi->type) {
383 case ARMFault_None:
384 return 0;
385 case ARMFault_AccessFlag:
386 fsc = fi->level == 1 ? 0x3 : 0x6;
387 break;
388 case ARMFault_Alignment:
389 fsc = 0x1;
390 break;
391 case ARMFault_Permission:
392 fsc = fi->level == 1 ? 0xd : 0xf;
393 break;
394 case ARMFault_Domain:
395 fsc = fi->level == 1 ? 0x9 : 0xb;
396 break;
397 case ARMFault_Translation:
398 fsc = fi->level == 1 ? 0x5 : 0x7;
399 break;
400 case ARMFault_SyncExternal:
401 fsc = 0x8 | (fi->ea << 12);
402 break;
403 case ARMFault_SyncExternalOnWalk:
404 fsc = fi->level == 1 ? 0xc : 0xe;
405 fsc |= (fi->ea << 12);
406 break;
407 case ARMFault_SyncParity:
408 fsc = 0x409;
409 break;
410 case ARMFault_SyncParityOnWalk:
411 fsc = fi->level == 1 ? 0x40c : 0x40e;
412 break;
413 case ARMFault_AsyncParity:
414 fsc = 0x408;
415 break;
416 case ARMFault_AsyncExternal:
417 fsc = 0x406 | (fi->ea << 12);
418 break;
419 case ARMFault_Debug:
420 fsc = 0x2;
421 break;
422 case ARMFault_TLBConflict:
423 fsc = 0x400;
424 break;
425 case ARMFault_Lockdown:
426 fsc = 0x404;
427 break;
428 case ARMFault_Exclusive:
429 fsc = 0x405;
430 break;
431 case ARMFault_ICacheMaint:
432 fsc = 0x4;
433 break;
434 case ARMFault_Background:
435 fsc = 0x0;
436 break;
437 case ARMFault_QEMU_NSCExec:
438 fsc = M_FAKE_FSR_NSC_EXEC;
439 break;
440 case ARMFault_QEMU_SFault:
441 fsc = M_FAKE_FSR_SFAULT;
442 break;
443 default:
444 /* Other faults can't occur in a context that requires a
445 * short-format status code.
446 */
447 g_assert_not_reached();
448 }
449
450 fsc |= (fi->domain << 4);
451 return fsc;
452 }
453
454 /**
455 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
456 * Compare pseudocode EncodeLDFSC(), though unlike that function
457 * we fill in also the LPAE bit 9 of a DFSR format.
458 */
459 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
460 {
461 uint32_t fsc;
462
463 switch (fi->type) {
464 case ARMFault_None:
465 return 0;
466 case ARMFault_AddressSize:
467 assert(fi->level >= -1 && fi->level <= 3);
468 if (fi->level < 0) {
469 fsc = 0b101001;
470 } else {
471 fsc = fi->level;
472 }
473 break;
474 case ARMFault_AccessFlag:
475 assert(fi->level >= 0 && fi->level <= 3);
476 fsc = 0b001000 | fi->level;
477 break;
478 case ARMFault_Permission:
479 assert(fi->level >= 0 && fi->level <= 3);
480 fsc = 0b001100 | fi->level;
481 break;
482 case ARMFault_Translation:
483 assert(fi->level >= -1 && fi->level <= 3);
484 if (fi->level < 0) {
485 fsc = 0b101011;
486 } else {
487 fsc = 0b000100 | fi->level;
488 }
489 break;
490 case ARMFault_SyncExternal:
491 fsc = 0x10 | (fi->ea << 12);
492 break;
493 case ARMFault_SyncExternalOnWalk:
494 assert(fi->level >= -1 && fi->level <= 3);
495 if (fi->level < 0) {
496 fsc = 0b010011;
497 } else {
498 fsc = 0b010100 | fi->level;
499 }
500 fsc |= fi->ea << 12;
501 break;
502 case ARMFault_SyncParity:
503 fsc = 0x18;
504 break;
505 case ARMFault_SyncParityOnWalk:
506 assert(fi->level >= -1 && fi->level <= 3);
507 if (fi->level < 0) {
508 fsc = 0b011011;
509 } else {
510 fsc = 0b011100 | fi->level;
511 }
512 break;
513 case ARMFault_AsyncParity:
514 fsc = 0x19;
515 break;
516 case ARMFault_AsyncExternal:
517 fsc = 0x11 | (fi->ea << 12);
518 break;
519 case ARMFault_Alignment:
520 fsc = 0x21;
521 break;
522 case ARMFault_Debug:
523 fsc = 0x22;
524 break;
525 case ARMFault_TLBConflict:
526 fsc = 0x30;
527 break;
528 case ARMFault_UnsuppAtomicUpdate:
529 fsc = 0x31;
530 break;
531 case ARMFault_Lockdown:
532 fsc = 0x34;
533 break;
534 case ARMFault_Exclusive:
535 fsc = 0x35;
536 break;
537 default:
538 /* Other faults can't occur in a context that requires a
539 * long-format status code.
540 */
541 g_assert_not_reached();
542 }
543
544 fsc |= 1 << 9;
545 return fsc;
546 }
547
548 static inline bool arm_extabort_type(MemTxResult result)
549 {
550 /* The EA bit in syndromes and fault status registers is an
551 * IMPDEF classification of external aborts. ARM implementations
552 * usually use this to indicate AXI bus Decode error (0) or
553 * Slave error (1); in QEMU we follow that.
554 */
555 return result != MEMTX_DECODE_ERROR;
556 }
557
558 #ifdef CONFIG_USER_ONLY
559 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
560 MMUAccessType access_type,
561 bool maperr, uintptr_t ra);
562 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
563 MMUAccessType access_type, uintptr_t ra);
564 #else
565 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
566 MMUAccessType access_type, int mmu_idx,
567 bool probe, uintptr_t retaddr);
568 #endif
569
570 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
571 {
572 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
573 }
574
575 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
576 {
577 if (arm_feature(env, ARM_FEATURE_M)) {
578 return mmu_idx | ARM_MMU_IDX_M;
579 } else {
580 return mmu_idx | ARM_MMU_IDX_A;
581 }
582 }
583
584 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
585 {
586 /* AArch64 is always a-profile. */
587 return mmu_idx | ARM_MMU_IDX_A;
588 }
589
590 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
591
592 /*
593 * Return the MMU index for a v7M CPU with all relevant information
594 * manually specified.
595 */
596 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
597 bool secstate, bool priv, bool negpri);
598
599 /*
600 * Return the MMU index for a v7M CPU in the specified security and
601 * privilege state.
602 */
603 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
604 bool secstate, bool priv);
605
606 /* Return the MMU index for a v7M CPU in the specified security state */
607 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
608
609 /* Return true if the translation regime is using LPAE format page tables */
610 bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
611
612 /*
613 * Return true if the stage 1 translation regime is using LPAE
614 * format page tables
615 */
616 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
617
618 /* Raise a data fault alignment exception for the specified virtual address */
619 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
620 MMUAccessType access_type,
621 int mmu_idx, uintptr_t retaddr);
622
623 /* arm_cpu_do_transaction_failed: handle a memory system error response
624 * (eg "no device/memory present at address") by raising an external abort
625 * exception
626 */
627 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
628 vaddr addr, unsigned size,
629 MMUAccessType access_type,
630 int mmu_idx, MemTxAttrs attrs,
631 MemTxResult response, uintptr_t retaddr);
632
633 /* Call any registered EL change hooks */
634 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
635 {
636 ARMELChangeHook *hook, *next;
637 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
638 hook->hook(cpu, hook->opaque);
639 }
640 }
641 static inline void arm_call_el_change_hook(ARMCPU *cpu)
642 {
643 ARMELChangeHook *hook, *next;
644 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
645 hook->hook(cpu, hook->opaque);
646 }
647 }
648
649 /* Return true if this address translation regime has two ranges. */
650 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
651 {
652 switch (mmu_idx) {
653 case ARMMMUIdx_Stage1_E0:
654 case ARMMMUIdx_Stage1_E1:
655 case ARMMMUIdx_Stage1_E1_PAN:
656 case ARMMMUIdx_E10_0:
657 case ARMMMUIdx_E10_1:
658 case ARMMMUIdx_E10_1_PAN:
659 case ARMMMUIdx_E20_0:
660 case ARMMMUIdx_E20_2:
661 case ARMMMUIdx_E20_2_PAN:
662 return true;
663 default:
664 return false;
665 }
666 }
667
668 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
669 {
670 switch (mmu_idx) {
671 case ARMMMUIdx_Stage1_E1_PAN:
672 case ARMMMUIdx_E10_1_PAN:
673 case ARMMMUIdx_E20_2_PAN:
674 return true;
675 default:
676 return false;
677 }
678 }
679
680 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
681 {
682 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
683 }
684
685 /* Return the exception level which controls this address translation regime */
686 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
687 {
688 switch (mmu_idx) {
689 case ARMMMUIdx_E20_0:
690 case ARMMMUIdx_E20_2:
691 case ARMMMUIdx_E20_2_PAN:
692 case ARMMMUIdx_Stage2:
693 case ARMMMUIdx_Stage2_S:
694 case ARMMMUIdx_E2:
695 return 2;
696 case ARMMMUIdx_E3:
697 return 3;
698 case ARMMMUIdx_E10_0:
699 case ARMMMUIdx_Stage1_E0:
700 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
701 case ARMMMUIdx_Stage1_E1:
702 case ARMMMUIdx_Stage1_E1_PAN:
703 case ARMMMUIdx_E10_1:
704 case ARMMMUIdx_E10_1_PAN:
705 case ARMMMUIdx_MPrivNegPri:
706 case ARMMMUIdx_MUserNegPri:
707 case ARMMMUIdx_MPriv:
708 case ARMMMUIdx_MUser:
709 case ARMMMUIdx_MSPrivNegPri:
710 case ARMMMUIdx_MSUserNegPri:
711 case ARMMMUIdx_MSPriv:
712 case ARMMMUIdx_MSUser:
713 return 1;
714 default:
715 g_assert_not_reached();
716 }
717 }
718
719 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
720 {
721 switch (mmu_idx) {
722 case ARMMMUIdx_E20_0:
723 case ARMMMUIdx_Stage1_E0:
724 case ARMMMUIdx_MUser:
725 case ARMMMUIdx_MSUser:
726 case ARMMMUIdx_MUserNegPri:
727 case ARMMMUIdx_MSUserNegPri:
728 return true;
729 default:
730 return false;
731 case ARMMMUIdx_E10_0:
732 case ARMMMUIdx_E10_1:
733 case ARMMMUIdx_E10_1_PAN:
734 g_assert_not_reached();
735 }
736 }
737
738 /* Return the SCTLR value which controls this address translation regime */
739 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
740 {
741 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
742 }
743
744 /*
745 * These are the fields in VTCR_EL2 which affect both the Secure stage 2
746 * and the Non-Secure stage 2 translation regimes (and hence which are
747 * not present in VSTCR_EL2).
748 */
749 #define VTCR_SHARED_FIELD_MASK \
750 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
751 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
752 R_VTCR_DS_MASK)
753
754 /* Return the value of the TCR controlling this translation regime */
755 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
756 {
757 if (mmu_idx == ARMMMUIdx_Stage2) {
758 return env->cp15.vtcr_el2;
759 }
760 if (mmu_idx == ARMMMUIdx_Stage2_S) {
761 /*
762 * Secure stage 2 shares fields from VTCR_EL2. We merge those
763 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
764 * value so the callers don't need to special case this.
765 *
766 * If a future architecture change defines bits in VSTCR_EL2 that
767 * overlap with these VTCR_EL2 fields we may need to revisit this.
768 */
769 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
770 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
771 return v;
772 }
773 return env->cp15.tcr_el[regime_el(env, mmu_idx)];
774 }
775
776 /**
777 * arm_num_brps: Return number of implemented breakpoints.
778 * Note that the ID register BRPS field is "number of bps - 1",
779 * and we return the actual number of breakpoints.
780 */
781 static inline int arm_num_brps(ARMCPU *cpu)
782 {
783 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
784 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
785 } else {
786 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
787 }
788 }
789
790 /**
791 * arm_num_wrps: Return number of implemented watchpoints.
792 * Note that the ID register WRPS field is "number of wps - 1",
793 * and we return the actual number of watchpoints.
794 */
795 static inline int arm_num_wrps(ARMCPU *cpu)
796 {
797 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
798 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
799 } else {
800 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
801 }
802 }
803
804 /**
805 * arm_num_ctx_cmps: Return number of implemented context comparators.
806 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
807 * and we return the actual number of comparators.
808 */
809 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
810 {
811 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
812 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
813 } else {
814 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
815 }
816 }
817
818 /**
819 * v7m_using_psp: Return true if using process stack pointer
820 * Return true if the CPU is currently using the process stack
821 * pointer, or false if it is using the main stack pointer.
822 */
823 static inline bool v7m_using_psp(CPUARMState *env)
824 {
825 /* Handler mode always uses the main stack; for thread mode
826 * the CONTROL.SPSEL bit determines the answer.
827 * Note that in v7M it is not possible to be in Handler mode with
828 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
829 */
830 return !arm_v7m_is_handler_mode(env) &&
831 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
832 }
833
834 /**
835 * v7m_sp_limit: Return SP limit for current CPU state
836 * Return the SP limit value for the current CPU security state
837 * and stack pointer.
838 */
839 static inline uint32_t v7m_sp_limit(CPUARMState *env)
840 {
841 if (v7m_using_psp(env)) {
842 return env->v7m.psplim[env->v7m.secure];
843 } else {
844 return env->v7m.msplim[env->v7m.secure];
845 }
846 }
847
848 /**
849 * v7m_cpacr_pass:
850 * Return true if the v7M CPACR permits access to the FPU for the specified
851 * security state and privilege level.
852 */
853 static inline bool v7m_cpacr_pass(CPUARMState *env,
854 bool is_secure, bool is_priv)
855 {
856 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
857 case 0:
858 case 2: /* UNPREDICTABLE: we treat like 0 */
859 return false;
860 case 1:
861 return is_priv;
862 case 3:
863 return true;
864 default:
865 g_assert_not_reached();
866 }
867 }
868
869 /**
870 * aarch32_mode_name(): Return name of the AArch32 CPU mode
871 * @psr: Program Status Register indicating CPU mode
872 *
873 * Returns, for debug logging purposes, a printable representation
874 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
875 * the low bits of the specified PSR.
876 */
877 static inline const char *aarch32_mode_name(uint32_t psr)
878 {
879 static const char cpu_mode_names[16][4] = {
880 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
881 "???", "???", "hyp", "und", "???", "???", "???", "sys"
882 };
883
884 return cpu_mode_names[psr & 0xf];
885 }
886
887 /**
888 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
889 *
890 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
891 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
892 * Must be called with the iothread lock held.
893 */
894 void arm_cpu_update_virq(ARMCPU *cpu);
895
896 /**
897 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
898 *
899 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
900 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
901 * Must be called with the iothread lock held.
902 */
903 void arm_cpu_update_vfiq(ARMCPU *cpu);
904
905 /**
906 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
907 *
908 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
909 * following a change to the HCR_EL2.VSE bit.
910 */
911 void arm_cpu_update_vserr(ARMCPU *cpu);
912
913 /**
914 * arm_mmu_idx_el:
915 * @env: The cpu environment
916 * @el: The EL to use.
917 *
918 * Return the full ARMMMUIdx for the translation regime for EL.
919 */
920 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
921
922 /**
923 * arm_mmu_idx:
924 * @env: The cpu environment
925 *
926 * Return the full ARMMMUIdx for the current translation regime.
927 */
928 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
929
930 /**
931 * arm_stage1_mmu_idx:
932 * @env: The cpu environment
933 *
934 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
935 */
936 #ifdef CONFIG_USER_ONLY
937 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
938 {
939 return ARMMMUIdx_Stage1_E0;
940 }
941 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
942 {
943 return ARMMMUIdx_Stage1_E0;
944 }
945 #else
946 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
947 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
948 #endif
949
950 /**
951 * arm_mmu_idx_is_stage1_of_2:
952 * @mmu_idx: The ARMMMUIdx to test
953 *
954 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
955 * first stage of a two stage regime.
956 */
957 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
958 {
959 switch (mmu_idx) {
960 case ARMMMUIdx_Stage1_E0:
961 case ARMMMUIdx_Stage1_E1:
962 case ARMMMUIdx_Stage1_E1_PAN:
963 return true;
964 default:
965 return false;
966 }
967 }
968
969 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
970 const ARMISARegisters *id)
971 {
972 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
973
974 if ((features >> ARM_FEATURE_V4T) & 1) {
975 valid |= CPSR_T;
976 }
977 if ((features >> ARM_FEATURE_V5) & 1) {
978 valid |= CPSR_Q; /* V5TE in reality*/
979 }
980 if ((features >> ARM_FEATURE_V6) & 1) {
981 valid |= CPSR_E | CPSR_GE;
982 }
983 if ((features >> ARM_FEATURE_THUMB2) & 1) {
984 valid |= CPSR_IT;
985 }
986 if (isar_feature_aa32_jazelle(id)) {
987 valid |= CPSR_J;
988 }
989 if (isar_feature_aa32_pan(id)) {
990 valid |= CPSR_PAN;
991 }
992 if (isar_feature_aa32_dit(id)) {
993 valid |= CPSR_DIT;
994 }
995 if (isar_feature_aa32_ssbs(id)) {
996 valid |= CPSR_SSBS;
997 }
998
999 return valid;
1000 }
1001
1002 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1003 {
1004 uint32_t valid;
1005
1006 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1007 if (isar_feature_aa64_bti(id)) {
1008 valid |= PSTATE_BTYPE;
1009 }
1010 if (isar_feature_aa64_pan(id)) {
1011 valid |= PSTATE_PAN;
1012 }
1013 if (isar_feature_aa64_uao(id)) {
1014 valid |= PSTATE_UAO;
1015 }
1016 if (isar_feature_aa64_dit(id)) {
1017 valid |= PSTATE_DIT;
1018 }
1019 if (isar_feature_aa64_ssbs(id)) {
1020 valid |= PSTATE_SSBS;
1021 }
1022 if (isar_feature_aa64_mte(id)) {
1023 valid |= PSTATE_TCO;
1024 }
1025
1026 return valid;
1027 }
1028
1029 /* Granule size (i.e. page size) */
1030 typedef enum ARMGranuleSize {
1031 /* Same order as TG0 encoding */
1032 Gran4K,
1033 Gran64K,
1034 Gran16K,
1035 GranInvalid,
1036 } ARMGranuleSize;
1037
1038 /**
1039 * arm_granule_bits: Return address size of the granule in bits
1040 *
1041 * Return the address size of the granule in bits. This corresponds
1042 * to the pseudocode TGxGranuleBits().
1043 */
1044 static inline int arm_granule_bits(ARMGranuleSize gran)
1045 {
1046 switch (gran) {
1047 case Gran64K:
1048 return 16;
1049 case Gran16K:
1050 return 14;
1051 case Gran4K:
1052 return 12;
1053 default:
1054 g_assert_not_reached();
1055 }
1056 }
1057
1058 /*
1059 * Parameters of a given virtual address, as extracted from the
1060 * translation control register (TCR) for a given regime.
1061 */
1062 typedef struct ARMVAParameters {
1063 unsigned tsz : 8;
1064 unsigned ps : 3;
1065 unsigned sh : 2;
1066 unsigned select : 1;
1067 bool tbi : 1;
1068 bool epd : 1;
1069 bool hpd : 1;
1070 bool tsz_oob : 1; /* tsz has been clamped to legal range */
1071 bool ds : 1;
1072 bool ha : 1;
1073 bool hd : 1;
1074 ARMGranuleSize gran : 2;
1075 } ARMVAParameters;
1076
1077 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1078 ARMMMUIdx mmu_idx, bool data);
1079
1080 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1081 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1082
1083 /* Determine if allocation tags are available. */
1084 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1085 uint64_t sctlr)
1086 {
1087 if (el < 3
1088 && arm_feature(env, ARM_FEATURE_EL3)
1089 && !(env->cp15.scr_el3 & SCR_ATA)) {
1090 return false;
1091 }
1092 if (el < 2 && arm_is_el2_enabled(env)) {
1093 uint64_t hcr = arm_hcr_el2_eff(env);
1094 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1095 return false;
1096 }
1097 }
1098 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1099 return sctlr != 0;
1100 }
1101
1102 #ifndef CONFIG_USER_ONLY
1103
1104 /* Security attributes for an address, as returned by v8m_security_lookup. */
1105 typedef struct V8M_SAttributes {
1106 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1107 bool ns;
1108 bool nsc;
1109 uint8_t sregion;
1110 bool srvalid;
1111 uint8_t iregion;
1112 bool irvalid;
1113 } V8M_SAttributes;
1114
1115 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1116 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1117 bool secure, V8M_SAttributes *sattrs);
1118
1119 /* Cacheability and shareability attributes for a memory access */
1120 typedef struct ARMCacheAttrs {
1121 /*
1122 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1123 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1124 */
1125 unsigned int attrs:8;
1126 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1127 bool is_s2_format:1;
1128 bool guarded:1; /* guarded bit of the v8-64 PTE */
1129 } ARMCacheAttrs;
1130
1131 /* Fields that are valid upon success. */
1132 typedef struct GetPhysAddrResult {
1133 CPUTLBEntryFull f;
1134 ARMCacheAttrs cacheattrs;
1135 } GetPhysAddrResult;
1136
1137 /**
1138 * get_phys_addr_with_secure: get the physical address for a virtual address
1139 * @env: CPUARMState
1140 * @address: virtual address to get physical address for
1141 * @access_type: 0 for read, 1 for write, 2 for execute
1142 * @mmu_idx: MMU index indicating required translation regime
1143 * @is_secure: security state for the access
1144 * @result: set on translation success.
1145 * @fi: set to fault info if the translation fails
1146 *
1147 * Find the physical address corresponding to the given virtual address,
1148 * by doing a translation table walk on MMU based systems or using the
1149 * MPU state on MPU based systems.
1150 *
1151 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1152 * prot and page_size may not be filled in, and the populated fsr value provides
1153 * information on why the translation aborted, in the format of a
1154 * DFSR/IFSR fault register, with the following caveats:
1155 * * we honour the short vs long DFSR format differences.
1156 * * the WnR bit is never set (the caller must do this).
1157 * * for PSMAv5 based systems we don't bother to return a full FSR format
1158 * value.
1159 */
1160 bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
1161 MMUAccessType access_type,
1162 ARMMMUIdx mmu_idx, bool is_secure,
1163 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1164 __attribute__((nonnull));
1165
1166 /**
1167 * get_phys_addr: get the physical address for a virtual address
1168 * @env: CPUARMState
1169 * @address: virtual address to get physical address for
1170 * @access_type: 0 for read, 1 for write, 2 for execute
1171 * @mmu_idx: MMU index indicating required translation regime
1172 * @result: set on translation success.
1173 * @fi: set to fault info if the translation fails
1174 *
1175 * Similarly, but use the security regime of @mmu_idx.
1176 */
1177 bool get_phys_addr(CPUARMState *env, target_ulong address,
1178 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1179 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1180 __attribute__((nonnull));
1181
1182 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1183 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1184 bool is_secure, GetPhysAddrResult *result,
1185 ARMMMUFaultInfo *fi, uint32_t *mregion);
1186
1187 void arm_log_exception(CPUState *cs);
1188
1189 #endif /* !CONFIG_USER_ONLY */
1190
1191 /*
1192 * The log2 of the words in the tag block, for GMID_EL1.BS.
1193 * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1194 */
1195 #define GMID_EL1_BS 6
1196
1197 /*
1198 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1199 * the same simd_desc() encoding due to restrictions on size.
1200 * Use these instead.
1201 */
1202 FIELD(PREDDESC, OPRSZ, 0, 6)
1203 FIELD(PREDDESC, ESZ, 6, 2)
1204 FIELD(PREDDESC, DATA, 8, 24)
1205
1206 /*
1207 * The SVE simd_data field, for memory ops, contains either
1208 * rd (5 bits) or a shift count (2 bits).
1209 */
1210 #define SVE_MTEDESC_SHIFT 5
1211
1212 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1213 FIELD(MTEDESC, MIDX, 0, 4)
1214 FIELD(MTEDESC, TBI, 4, 2)
1215 FIELD(MTEDESC, TCMA, 6, 2)
1216 FIELD(MTEDESC, WRITE, 8, 1)
1217 FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
1218
1219 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1220 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1221
1222 static inline int allocation_tag_from_addr(uint64_t ptr)
1223 {
1224 return extract64(ptr, 56, 4);
1225 }
1226
1227 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1228 {
1229 return deposit64(ptr, 56, 4, rtag);
1230 }
1231
1232 /* Return true if tbi bits mean that the access is checked. */
1233 static inline bool tbi_check(uint32_t desc, int bit55)
1234 {
1235 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1236 }
1237
1238 /* Return true if tcma bits mean that the access is unchecked. */
1239 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1240 {
1241 /*
1242 * We had extracted bit55 and ptr_tag for other reasons, so fold
1243 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1244 */
1245 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1246 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1247 return tcma && match;
1248 }
1249
1250 /*
1251 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1252 * for the tag to be present in the FAR_ELx register. But for user-only
1253 * mode, we do not have a TLB with which to implement this, so we must
1254 * remove the top byte.
1255 */
1256 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1257 {
1258 #ifdef CONFIG_USER_ONLY
1259 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1260 ptr &= sextract64(ptr, 0, 56);
1261 #endif
1262 return ptr;
1263 }
1264
1265 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1266 {
1267 #ifdef CONFIG_USER_ONLY
1268 int64_t clean_ptr = sextract64(ptr, 0, 56);
1269 if (tbi_check(desc, clean_ptr < 0)) {
1270 ptr = clean_ptr;
1271 }
1272 #endif
1273 return ptr;
1274 }
1275
1276 /* Values for M-profile PSR.ECI for MVE insns */
1277 enum MVEECIState {
1278 ECI_NONE = 0, /* No completed beats */
1279 ECI_A0 = 1, /* Completed: A0 */
1280 ECI_A0A1 = 2, /* Completed: A0, A1 */
1281 /* 3 is reserved */
1282 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1283 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1284 /* All other values reserved */
1285 };
1286
1287 /* Definitions for the PMU registers */
1288 #define PMCRN_MASK 0xf800
1289 #define PMCRN_SHIFT 11
1290 #define PMCRLP 0x80
1291 #define PMCRLC 0x40
1292 #define PMCRDP 0x20
1293 #define PMCRX 0x10
1294 #define PMCRD 0x8
1295 #define PMCRC 0x4
1296 #define PMCRP 0x2
1297 #define PMCRE 0x1
1298 /*
1299 * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1300 * which can be written as 1 to trigger behaviour but which stay RAZ).
1301 */
1302 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1303
1304 #define PMXEVTYPER_P 0x80000000
1305 #define PMXEVTYPER_U 0x40000000
1306 #define PMXEVTYPER_NSK 0x20000000
1307 #define PMXEVTYPER_NSU 0x10000000
1308 #define PMXEVTYPER_NSH 0x08000000
1309 #define PMXEVTYPER_M 0x04000000
1310 #define PMXEVTYPER_MT 0x02000000
1311 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1312 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1313 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1314 PMXEVTYPER_M | PMXEVTYPER_MT | \
1315 PMXEVTYPER_EVTCOUNT)
1316
1317 #define PMCCFILTR 0xf8000000
1318 #define PMCCFILTR_M PMXEVTYPER_M
1319 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1320
1321 static inline uint32_t pmu_num_counters(CPUARMState *env)
1322 {
1323 ARMCPU *cpu = env_archcpu(env);
1324
1325 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1326 }
1327
1328 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1329 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1330 {
1331 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1332 }
1333
1334 #ifdef TARGET_AARCH64
1335 int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
1336 int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
1337 int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
1338 int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
1339 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1340 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1341 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1342 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1343 #endif
1344
1345 #ifdef CONFIG_USER_ONLY
1346 static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
1347 #else
1348 void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
1349 #endif
1350
1351 bool el_is_in_host(CPUARMState *env, int el);
1352
1353 void aa32_max_features(ARMCPU *cpu);
1354 int exception_target_el(CPUARMState *env);
1355 bool arm_singlestep_active(CPUARMState *env);
1356 bool arm_generate_debug_exceptions(CPUARMState *env);
1357
1358 /* Add the cpreg definitions for debug related system registers */
1359 void define_debug_regs(ARMCPU *cpu);
1360
1361 /* Effective value of MDCR_EL2 */
1362 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1363 {
1364 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1365 }
1366
1367 /* Powers of 2 for sve_vq_map et al. */
1368 #define SVE_VQ_POW2_MAP \
1369 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1370 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1371
1372 #endif