]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/internals.h
target/hppa: Update to SeaBIOS-hppa version 8
[mirror_qemu.git] / target / arm / internals.h
1 /*
2 * QEMU ARM CPU -- internal functions and types
3 *
4 * Copyright (c) 2014 Linaro Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 *
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
23 */
24
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
27
28 #include "hw/registerfields.h"
29 #include "tcg/tcg-gvec-desc.h"
30 #include "syndrome.h"
31
32 /* register banks for CPU modes */
33 #define BANK_USRSYS 0
34 #define BANK_SVC 1
35 #define BANK_ABT 2
36 #define BANK_UND 3
37 #define BANK_IRQ 4
38 #define BANK_FIQ 5
39 #define BANK_HYP 6
40 #define BANK_MON 7
41
42 static inline bool excp_is_internal(int excp)
43 {
44 /* Return true if this exception number represents a QEMU-internal
45 * exception that will not be passed to the guest.
46 */
47 return excp == EXCP_INTERRUPT
48 || excp == EXCP_HLT
49 || excp == EXCP_DEBUG
50 || excp == EXCP_HALTED
51 || excp == EXCP_EXCEPTION_EXIT
52 || excp == EXCP_KERNEL_TRAP
53 || excp == EXCP_SEMIHOST;
54 }
55
56 /* Scale factor for generic timers, ie number of ns per tick.
57 * This gives a 62.5MHz timer.
58 */
59 #define GTIMER_SCALE 16
60
61 /* Bit definitions for the v7M CONTROL register */
62 FIELD(V7M_CONTROL, NPRIV, 0, 1)
63 FIELD(V7M_CONTROL, SPSEL, 1, 1)
64 FIELD(V7M_CONTROL, FPCA, 2, 1)
65 FIELD(V7M_CONTROL, SFPA, 3, 1)
66
67 /* Bit definitions for v7M exception return payload */
68 FIELD(V7M_EXCRET, ES, 0, 1)
69 FIELD(V7M_EXCRET, RES0, 1, 1)
70 FIELD(V7M_EXCRET, SPSEL, 2, 1)
71 FIELD(V7M_EXCRET, MODE, 3, 1)
72 FIELD(V7M_EXCRET, FTYPE, 4, 1)
73 FIELD(V7M_EXCRET, DCRS, 5, 1)
74 FIELD(V7M_EXCRET, S, 6, 1)
75 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
76
77 /* Minimum value which is a magic number for exception return */
78 #define EXC_RETURN_MIN_MAGIC 0xff000000
79 /* Minimum number which is a magic number for function or exception return
80 * when using v8M security extension
81 */
82 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
83
84 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
85 FIELD(DBGWCR, E, 0, 1)
86 FIELD(DBGWCR, PAC, 1, 2)
87 FIELD(DBGWCR, LSC, 3, 2)
88 FIELD(DBGWCR, BAS, 5, 8)
89 FIELD(DBGWCR, HMC, 13, 1)
90 FIELD(DBGWCR, SSC, 14, 2)
91 FIELD(DBGWCR, LBN, 16, 4)
92 FIELD(DBGWCR, WT, 20, 1)
93 FIELD(DBGWCR, MASK, 24, 5)
94 FIELD(DBGWCR, SSCE, 29, 1)
95
96 /* We use a few fake FSR values for internal purposes in M profile.
97 * M profile cores don't have A/R format FSRs, but currently our
98 * get_phys_addr() code assumes A/R profile and reports failures via
99 * an A/R format FSR value. We then translate that into the proper
100 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
101 * Mostly the FSR values we use for this are those defined for v7PMSA,
102 * since we share some of that codepath. A few kinds of fault are
103 * only for M profile and have no A/R equivalent, though, so we have
104 * to pick a value from the reserved range (which we never otherwise
105 * generate) to use for these.
106 * These values will never be visible to the guest.
107 */
108 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
109 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
110
111 /**
112 * raise_exception: Raise the specified exception.
113 * Raise a guest exception with the specified value, syndrome register
114 * and target exception level. This should be called from helper functions,
115 * and never returns because we will longjump back up to the CPU main loop.
116 */
117 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
118 uint32_t syndrome, uint32_t target_el);
119
120 /*
121 * Similarly, but also use unwinding to restore cpu state.
122 */
123 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
124 uint32_t syndrome, uint32_t target_el,
125 uintptr_t ra);
126
127 /*
128 * For AArch64, map a given EL to an index in the banked_spsr array.
129 * Note that this mapping and the AArch32 mapping defined in bank_number()
130 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
131 * mandated mapping between each other.
132 */
133 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
134 {
135 static const unsigned int map[4] = {
136 [1] = BANK_SVC, /* EL1. */
137 [2] = BANK_HYP, /* EL2. */
138 [3] = BANK_MON, /* EL3. */
139 };
140 assert(el >= 1 && el <= 3);
141 return map[el];
142 }
143
144 /* Map CPU modes onto saved register banks. */
145 static inline int bank_number(int mode)
146 {
147 switch (mode) {
148 case ARM_CPU_MODE_USR:
149 case ARM_CPU_MODE_SYS:
150 return BANK_USRSYS;
151 case ARM_CPU_MODE_SVC:
152 return BANK_SVC;
153 case ARM_CPU_MODE_ABT:
154 return BANK_ABT;
155 case ARM_CPU_MODE_UND:
156 return BANK_UND;
157 case ARM_CPU_MODE_IRQ:
158 return BANK_IRQ;
159 case ARM_CPU_MODE_FIQ:
160 return BANK_FIQ;
161 case ARM_CPU_MODE_HYP:
162 return BANK_HYP;
163 case ARM_CPU_MODE_MON:
164 return BANK_MON;
165 }
166 g_assert_not_reached();
167 }
168
169 /**
170 * r14_bank_number: Map CPU mode onto register bank for r14
171 *
172 * Given an AArch32 CPU mode, return the index into the saved register
173 * banks to use for the R14 (LR) in that mode. This is the same as
174 * bank_number(), except for the special case of Hyp mode, where
175 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
176 * This should be used as the index into env->banked_r14[], and
177 * bank_number() used for the index into env->banked_r13[] and
178 * env->banked_spsr[].
179 */
180 static inline int r14_bank_number(int mode)
181 {
182 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
183 }
184
185 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
186 void arm_translate_init(void);
187
188 void arm_restore_state_to_opc(CPUState *cs,
189 const TranslationBlock *tb,
190 const uint64_t *data);
191
192 #ifdef CONFIG_TCG
193 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
194 #endif /* CONFIG_TCG */
195
196 typedef enum ARMFPRounding {
197 FPROUNDING_TIEEVEN,
198 FPROUNDING_POSINF,
199 FPROUNDING_NEGINF,
200 FPROUNDING_ZERO,
201 FPROUNDING_TIEAWAY,
202 FPROUNDING_ODD
203 } ARMFPRounding;
204
205 extern const FloatRoundMode arm_rmode_to_sf_map[6];
206
207 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
208 {
209 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
210 return arm_rmode_to_sf_map[rmode];
211 }
212
213 static inline void aarch64_save_sp(CPUARMState *env, int el)
214 {
215 if (env->pstate & PSTATE_SP) {
216 env->sp_el[el] = env->xregs[31];
217 } else {
218 env->sp_el[0] = env->xregs[31];
219 }
220 }
221
222 static inline void aarch64_restore_sp(CPUARMState *env, int el)
223 {
224 if (env->pstate & PSTATE_SP) {
225 env->xregs[31] = env->sp_el[el];
226 } else {
227 env->xregs[31] = env->sp_el[0];
228 }
229 }
230
231 static inline void update_spsel(CPUARMState *env, uint32_t imm)
232 {
233 unsigned int cur_el = arm_current_el(env);
234 /* Update PSTATE SPSel bit; this requires us to update the
235 * working stack pointer in xregs[31].
236 */
237 if (!((imm ^ env->pstate) & PSTATE_SP)) {
238 return;
239 }
240 aarch64_save_sp(env, cur_el);
241 env->pstate = deposit32(env->pstate, 0, 1, imm);
242
243 /* We rely on illegal updates to SPsel from EL0 to get trapped
244 * at translation time.
245 */
246 assert(cur_el >= 1 && cur_el <= 3);
247 aarch64_restore_sp(env, cur_el);
248 }
249
250 /*
251 * arm_pamax
252 * @cpu: ARMCPU
253 *
254 * Returns the implementation defined bit-width of physical addresses.
255 * The ARMv8 reference manuals refer to this as PAMax().
256 */
257 unsigned int arm_pamax(ARMCPU *cpu);
258
259 /* Return true if extended addresses are enabled.
260 * This is always the case if our translation regime is 64 bit,
261 * but depends on TTBCR.EAE for 32 bit.
262 */
263 static inline bool extended_addresses_enabled(CPUARMState *env)
264 {
265 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
266 if (arm_feature(env, ARM_FEATURE_PMSA) &&
267 arm_feature(env, ARM_FEATURE_V8)) {
268 return true;
269 }
270 return arm_el_is_aa64(env, 1) ||
271 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
272 }
273
274 /* Update a QEMU watchpoint based on the information the guest has set in the
275 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
276 */
277 void hw_watchpoint_update(ARMCPU *cpu, int n);
278 /* Update the QEMU watchpoints for every guest watchpoint. This does a
279 * complete delete-and-reinstate of the QEMU watchpoint list and so is
280 * suitable for use after migration or on reset.
281 */
282 void hw_watchpoint_update_all(ARMCPU *cpu);
283 /* Update a QEMU breakpoint based on the information the guest has set in the
284 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
285 */
286 void hw_breakpoint_update(ARMCPU *cpu, int n);
287 /* Update the QEMU breakpoints for every guest breakpoint. This does a
288 * complete delete-and-reinstate of the QEMU breakpoint list and so is
289 * suitable for use after migration or on reset.
290 */
291 void hw_breakpoint_update_all(ARMCPU *cpu);
292
293 /* Callback function for checking if a breakpoint should trigger. */
294 bool arm_debug_check_breakpoint(CPUState *cs);
295
296 /* Callback function for checking if a watchpoint should trigger. */
297 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
298
299 /* Adjust addresses (in BE32 mode) before testing against watchpoint
300 * addresses.
301 */
302 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
303
304 /* Callback function for when a watchpoint or breakpoint triggers. */
305 void arm_debug_excp_handler(CPUState *cs);
306
307 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
308 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
309 {
310 return false;
311 }
312 static inline void arm_handle_psci_call(ARMCPU *cpu)
313 {
314 g_assert_not_reached();
315 }
316 #else
317 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
318 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
319 /* Actually handle a PSCI call */
320 void arm_handle_psci_call(ARMCPU *cpu);
321 #endif
322
323 /**
324 * arm_clear_exclusive: clear the exclusive monitor
325 * @env: CPU env
326 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
327 */
328 static inline void arm_clear_exclusive(CPUARMState *env)
329 {
330 env->exclusive_addr = -1;
331 }
332
333 /**
334 * ARMFaultType: type of an ARM MMU fault
335 * This corresponds to the v8A pseudocode's Fault enumeration,
336 * with extensions for QEMU internal conditions.
337 */
338 typedef enum ARMFaultType {
339 ARMFault_None,
340 ARMFault_AccessFlag,
341 ARMFault_Alignment,
342 ARMFault_Background,
343 ARMFault_Domain,
344 ARMFault_Permission,
345 ARMFault_Translation,
346 ARMFault_AddressSize,
347 ARMFault_SyncExternal,
348 ARMFault_SyncExternalOnWalk,
349 ARMFault_SyncParity,
350 ARMFault_SyncParityOnWalk,
351 ARMFault_AsyncParity,
352 ARMFault_AsyncExternal,
353 ARMFault_Debug,
354 ARMFault_TLBConflict,
355 ARMFault_UnsuppAtomicUpdate,
356 ARMFault_Lockdown,
357 ARMFault_Exclusive,
358 ARMFault_ICacheMaint,
359 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
360 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
361 } ARMFaultType;
362
363 /**
364 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
365 * @type: Type of fault
366 * @level: Table walk level (for translation, access flag and permission faults)
367 * @domain: Domain of the fault address (for non-LPAE CPUs only)
368 * @s2addr: Address that caused a fault at stage 2
369 * @stage2: True if we faulted at stage 2
370 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
371 * @s1ns: True if we faulted on a non-secure IPA while in secure state
372 * @ea: True if we should set the EA (external abort type) bit in syndrome
373 */
374 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
375 struct ARMMMUFaultInfo {
376 ARMFaultType type;
377 target_ulong s2addr;
378 int level;
379 int domain;
380 bool stage2;
381 bool s1ptw;
382 bool s1ns;
383 bool ea;
384 };
385
386 /**
387 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
388 * Compare pseudocode EncodeSDFSC(), though unlike that function
389 * we set up a whole FSR-format code including domain field and
390 * putting the high bit of the FSC into bit 10.
391 */
392 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
393 {
394 uint32_t fsc;
395
396 switch (fi->type) {
397 case ARMFault_None:
398 return 0;
399 case ARMFault_AccessFlag:
400 fsc = fi->level == 1 ? 0x3 : 0x6;
401 break;
402 case ARMFault_Alignment:
403 fsc = 0x1;
404 break;
405 case ARMFault_Permission:
406 fsc = fi->level == 1 ? 0xd : 0xf;
407 break;
408 case ARMFault_Domain:
409 fsc = fi->level == 1 ? 0x9 : 0xb;
410 break;
411 case ARMFault_Translation:
412 fsc = fi->level == 1 ? 0x5 : 0x7;
413 break;
414 case ARMFault_SyncExternal:
415 fsc = 0x8 | (fi->ea << 12);
416 break;
417 case ARMFault_SyncExternalOnWalk:
418 fsc = fi->level == 1 ? 0xc : 0xe;
419 fsc |= (fi->ea << 12);
420 break;
421 case ARMFault_SyncParity:
422 fsc = 0x409;
423 break;
424 case ARMFault_SyncParityOnWalk:
425 fsc = fi->level == 1 ? 0x40c : 0x40e;
426 break;
427 case ARMFault_AsyncParity:
428 fsc = 0x408;
429 break;
430 case ARMFault_AsyncExternal:
431 fsc = 0x406 | (fi->ea << 12);
432 break;
433 case ARMFault_Debug:
434 fsc = 0x2;
435 break;
436 case ARMFault_TLBConflict:
437 fsc = 0x400;
438 break;
439 case ARMFault_Lockdown:
440 fsc = 0x404;
441 break;
442 case ARMFault_Exclusive:
443 fsc = 0x405;
444 break;
445 case ARMFault_ICacheMaint:
446 fsc = 0x4;
447 break;
448 case ARMFault_Background:
449 fsc = 0x0;
450 break;
451 case ARMFault_QEMU_NSCExec:
452 fsc = M_FAKE_FSR_NSC_EXEC;
453 break;
454 case ARMFault_QEMU_SFault:
455 fsc = M_FAKE_FSR_SFAULT;
456 break;
457 default:
458 /* Other faults can't occur in a context that requires a
459 * short-format status code.
460 */
461 g_assert_not_reached();
462 }
463
464 fsc |= (fi->domain << 4);
465 return fsc;
466 }
467
468 /**
469 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
470 * Compare pseudocode EncodeLDFSC(), though unlike that function
471 * we fill in also the LPAE bit 9 of a DFSR format.
472 */
473 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
474 {
475 uint32_t fsc;
476
477 switch (fi->type) {
478 case ARMFault_None:
479 return 0;
480 case ARMFault_AddressSize:
481 assert(fi->level >= -1 && fi->level <= 3);
482 if (fi->level < 0) {
483 fsc = 0b101001;
484 } else {
485 fsc = fi->level;
486 }
487 break;
488 case ARMFault_AccessFlag:
489 assert(fi->level >= 0 && fi->level <= 3);
490 fsc = 0b001000 | fi->level;
491 break;
492 case ARMFault_Permission:
493 assert(fi->level >= 0 && fi->level <= 3);
494 fsc = 0b001100 | fi->level;
495 break;
496 case ARMFault_Translation:
497 assert(fi->level >= -1 && fi->level <= 3);
498 if (fi->level < 0) {
499 fsc = 0b101011;
500 } else {
501 fsc = 0b000100 | fi->level;
502 }
503 break;
504 case ARMFault_SyncExternal:
505 fsc = 0x10 | (fi->ea << 12);
506 break;
507 case ARMFault_SyncExternalOnWalk:
508 assert(fi->level >= -1 && fi->level <= 3);
509 if (fi->level < 0) {
510 fsc = 0b010011;
511 } else {
512 fsc = 0b010100 | fi->level;
513 }
514 fsc |= fi->ea << 12;
515 break;
516 case ARMFault_SyncParity:
517 fsc = 0x18;
518 break;
519 case ARMFault_SyncParityOnWalk:
520 assert(fi->level >= -1 && fi->level <= 3);
521 if (fi->level < 0) {
522 fsc = 0b011011;
523 } else {
524 fsc = 0b011100 | fi->level;
525 }
526 break;
527 case ARMFault_AsyncParity:
528 fsc = 0x19;
529 break;
530 case ARMFault_AsyncExternal:
531 fsc = 0x11 | (fi->ea << 12);
532 break;
533 case ARMFault_Alignment:
534 fsc = 0x21;
535 break;
536 case ARMFault_Debug:
537 fsc = 0x22;
538 break;
539 case ARMFault_TLBConflict:
540 fsc = 0x30;
541 break;
542 case ARMFault_UnsuppAtomicUpdate:
543 fsc = 0x31;
544 break;
545 case ARMFault_Lockdown:
546 fsc = 0x34;
547 break;
548 case ARMFault_Exclusive:
549 fsc = 0x35;
550 break;
551 default:
552 /* Other faults can't occur in a context that requires a
553 * long-format status code.
554 */
555 g_assert_not_reached();
556 }
557
558 fsc |= 1 << 9;
559 return fsc;
560 }
561
562 static inline bool arm_extabort_type(MemTxResult result)
563 {
564 /* The EA bit in syndromes and fault status registers is an
565 * IMPDEF classification of external aborts. ARM implementations
566 * usually use this to indicate AXI bus Decode error (0) or
567 * Slave error (1); in QEMU we follow that.
568 */
569 return result != MEMTX_DECODE_ERROR;
570 }
571
572 #ifdef CONFIG_USER_ONLY
573 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
574 MMUAccessType access_type,
575 bool maperr, uintptr_t ra);
576 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
577 MMUAccessType access_type, uintptr_t ra);
578 #else
579 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
580 MMUAccessType access_type, int mmu_idx,
581 bool probe, uintptr_t retaddr);
582 #endif
583
584 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
585 {
586 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
587 }
588
589 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
590 {
591 if (arm_feature(env, ARM_FEATURE_M)) {
592 return mmu_idx | ARM_MMU_IDX_M;
593 } else {
594 return mmu_idx | ARM_MMU_IDX_A;
595 }
596 }
597
598 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
599 {
600 /* AArch64 is always a-profile. */
601 return mmu_idx | ARM_MMU_IDX_A;
602 }
603
604 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
605
606 /* Return the MMU index for a v7M CPU in the specified security state */
607 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
608
609 /*
610 * Return true if the stage 1 translation regime is using LPAE
611 * format page tables
612 */
613 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
614
615 /* Raise a data fault alignment exception for the specified virtual address */
616 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
617 MMUAccessType access_type,
618 int mmu_idx, uintptr_t retaddr);
619
620 #ifndef CONFIG_USER_ONLY
621 /* arm_cpu_do_transaction_failed: handle a memory system error response
622 * (eg "no device/memory present at address") by raising an external abort
623 * exception
624 */
625 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
626 vaddr addr, unsigned size,
627 MMUAccessType access_type,
628 int mmu_idx, MemTxAttrs attrs,
629 MemTxResult response, uintptr_t retaddr);
630 #endif
631
632 /* Call any registered EL change hooks */
633 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
634 {
635 ARMELChangeHook *hook, *next;
636 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
637 hook->hook(cpu, hook->opaque);
638 }
639 }
640 static inline void arm_call_el_change_hook(ARMCPU *cpu)
641 {
642 ARMELChangeHook *hook, *next;
643 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
644 hook->hook(cpu, hook->opaque);
645 }
646 }
647
648 /* Return true if this address translation regime has two ranges. */
649 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
650 {
651 switch (mmu_idx) {
652 case ARMMMUIdx_Stage1_E0:
653 case ARMMMUIdx_Stage1_E1:
654 case ARMMMUIdx_Stage1_E1_PAN:
655 case ARMMMUIdx_E10_0:
656 case ARMMMUIdx_E10_1:
657 case ARMMMUIdx_E10_1_PAN:
658 case ARMMMUIdx_E20_0:
659 case ARMMMUIdx_E20_2:
660 case ARMMMUIdx_E20_2_PAN:
661 return true;
662 default:
663 return false;
664 }
665 }
666
667 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
668 {
669 switch (mmu_idx) {
670 case ARMMMUIdx_Stage1_E1_PAN:
671 case ARMMMUIdx_E10_1_PAN:
672 case ARMMMUIdx_E20_2_PAN:
673 return true;
674 default:
675 return false;
676 }
677 }
678
679 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
680 {
681 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
682 }
683
684 /* Return the exception level which controls this address translation regime */
685 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
686 {
687 switch (mmu_idx) {
688 case ARMMMUIdx_E20_0:
689 case ARMMMUIdx_E20_2:
690 case ARMMMUIdx_E20_2_PAN:
691 case ARMMMUIdx_Stage2:
692 case ARMMMUIdx_Stage2_S:
693 case ARMMMUIdx_E2:
694 return 2;
695 case ARMMMUIdx_E3:
696 return 3;
697 case ARMMMUIdx_E10_0:
698 case ARMMMUIdx_Stage1_E0:
699 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
700 case ARMMMUIdx_Stage1_E1:
701 case ARMMMUIdx_Stage1_E1_PAN:
702 case ARMMMUIdx_E10_1:
703 case ARMMMUIdx_E10_1_PAN:
704 case ARMMMUIdx_MPrivNegPri:
705 case ARMMMUIdx_MUserNegPri:
706 case ARMMMUIdx_MPriv:
707 case ARMMMUIdx_MUser:
708 case ARMMMUIdx_MSPrivNegPri:
709 case ARMMMUIdx_MSUserNegPri:
710 case ARMMMUIdx_MSPriv:
711 case ARMMMUIdx_MSUser:
712 return 1;
713 default:
714 g_assert_not_reached();
715 }
716 }
717
718 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
719 {
720 switch (mmu_idx) {
721 case ARMMMUIdx_E20_0:
722 case ARMMMUIdx_Stage1_E0:
723 case ARMMMUIdx_MUser:
724 case ARMMMUIdx_MSUser:
725 case ARMMMUIdx_MUserNegPri:
726 case ARMMMUIdx_MSUserNegPri:
727 return true;
728 default:
729 return false;
730 case ARMMMUIdx_E10_0:
731 case ARMMMUIdx_E10_1:
732 case ARMMMUIdx_E10_1_PAN:
733 g_assert_not_reached();
734 }
735 }
736
737 /* Return the SCTLR value which controls this address translation regime */
738 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
739 {
740 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
741 }
742
743 /*
744 * These are the fields in VTCR_EL2 which affect both the Secure stage 2
745 * and the Non-Secure stage 2 translation regimes (and hence which are
746 * not present in VSTCR_EL2).
747 */
748 #define VTCR_SHARED_FIELD_MASK \
749 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
750 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
751 R_VTCR_DS_MASK)
752
753 /* Return the value of the TCR controlling this translation regime */
754 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
755 {
756 if (mmu_idx == ARMMMUIdx_Stage2) {
757 return env->cp15.vtcr_el2;
758 }
759 if (mmu_idx == ARMMMUIdx_Stage2_S) {
760 /*
761 * Secure stage 2 shares fields from VTCR_EL2. We merge those
762 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
763 * value so the callers don't need to special case this.
764 *
765 * If a future architecture change defines bits in VSTCR_EL2 that
766 * overlap with these VTCR_EL2 fields we may need to revisit this.
767 */
768 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
769 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
770 return v;
771 }
772 return env->cp15.tcr_el[regime_el(env, mmu_idx)];
773 }
774
775 /* Return true if the translation regime is using LPAE format page tables */
776 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
777 {
778 int el = regime_el(env, mmu_idx);
779 if (el == 2 || arm_el_is_aa64(env, el)) {
780 return true;
781 }
782 if (arm_feature(env, ARM_FEATURE_PMSA) &&
783 arm_feature(env, ARM_FEATURE_V8)) {
784 return true;
785 }
786 if (arm_feature(env, ARM_FEATURE_LPAE)
787 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
788 return true;
789 }
790 return false;
791 }
792
793 /**
794 * arm_num_brps: Return number of implemented breakpoints.
795 * Note that the ID register BRPS field is "number of bps - 1",
796 * and we return the actual number of breakpoints.
797 */
798 static inline int arm_num_brps(ARMCPU *cpu)
799 {
800 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
801 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
802 } else {
803 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
804 }
805 }
806
807 /**
808 * arm_num_wrps: Return number of implemented watchpoints.
809 * Note that the ID register WRPS field is "number of wps - 1",
810 * and we return the actual number of watchpoints.
811 */
812 static inline int arm_num_wrps(ARMCPU *cpu)
813 {
814 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
815 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
816 } else {
817 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
818 }
819 }
820
821 /**
822 * arm_num_ctx_cmps: Return number of implemented context comparators.
823 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
824 * and we return the actual number of comparators.
825 */
826 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
827 {
828 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
829 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
830 } else {
831 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
832 }
833 }
834
835 /**
836 * v7m_using_psp: Return true if using process stack pointer
837 * Return true if the CPU is currently using the process stack
838 * pointer, or false if it is using the main stack pointer.
839 */
840 static inline bool v7m_using_psp(CPUARMState *env)
841 {
842 /* Handler mode always uses the main stack; for thread mode
843 * the CONTROL.SPSEL bit determines the answer.
844 * Note that in v7M it is not possible to be in Handler mode with
845 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
846 */
847 return !arm_v7m_is_handler_mode(env) &&
848 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
849 }
850
851 /**
852 * v7m_sp_limit: Return SP limit for current CPU state
853 * Return the SP limit value for the current CPU security state
854 * and stack pointer.
855 */
856 static inline uint32_t v7m_sp_limit(CPUARMState *env)
857 {
858 if (v7m_using_psp(env)) {
859 return env->v7m.psplim[env->v7m.secure];
860 } else {
861 return env->v7m.msplim[env->v7m.secure];
862 }
863 }
864
865 /**
866 * v7m_cpacr_pass:
867 * Return true if the v7M CPACR permits access to the FPU for the specified
868 * security state and privilege level.
869 */
870 static inline bool v7m_cpacr_pass(CPUARMState *env,
871 bool is_secure, bool is_priv)
872 {
873 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
874 case 0:
875 case 2: /* UNPREDICTABLE: we treat like 0 */
876 return false;
877 case 1:
878 return is_priv;
879 case 3:
880 return true;
881 default:
882 g_assert_not_reached();
883 }
884 }
885
886 /**
887 * aarch32_mode_name(): Return name of the AArch32 CPU mode
888 * @psr: Program Status Register indicating CPU mode
889 *
890 * Returns, for debug logging purposes, a printable representation
891 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
892 * the low bits of the specified PSR.
893 */
894 static inline const char *aarch32_mode_name(uint32_t psr)
895 {
896 static const char cpu_mode_names[16][4] = {
897 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
898 "???", "???", "hyp", "und", "???", "???", "???", "sys"
899 };
900
901 return cpu_mode_names[psr & 0xf];
902 }
903
904 /**
905 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
906 *
907 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
908 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
909 * Must be called with the iothread lock held.
910 */
911 void arm_cpu_update_virq(ARMCPU *cpu);
912
913 /**
914 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
915 *
916 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
917 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
918 * Must be called with the iothread lock held.
919 */
920 void arm_cpu_update_vfiq(ARMCPU *cpu);
921
922 /**
923 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
924 *
925 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
926 * following a change to the HCR_EL2.VSE bit.
927 */
928 void arm_cpu_update_vserr(ARMCPU *cpu);
929
930 /**
931 * arm_mmu_idx_el:
932 * @env: The cpu environment
933 * @el: The EL to use.
934 *
935 * Return the full ARMMMUIdx for the translation regime for EL.
936 */
937 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
938
939 /**
940 * arm_mmu_idx:
941 * @env: The cpu environment
942 *
943 * Return the full ARMMMUIdx for the current translation regime.
944 */
945 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
946
947 /**
948 * arm_stage1_mmu_idx:
949 * @env: The cpu environment
950 *
951 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
952 */
953 #ifdef CONFIG_USER_ONLY
954 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
955 {
956 return ARMMMUIdx_Stage1_E0;
957 }
958 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
959 {
960 return ARMMMUIdx_Stage1_E0;
961 }
962 #else
963 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
964 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
965 #endif
966
967 /**
968 * arm_mmu_idx_is_stage1_of_2:
969 * @mmu_idx: The ARMMMUIdx to test
970 *
971 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
972 * first stage of a two stage regime.
973 */
974 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
975 {
976 switch (mmu_idx) {
977 case ARMMMUIdx_Stage1_E0:
978 case ARMMMUIdx_Stage1_E1:
979 case ARMMMUIdx_Stage1_E1_PAN:
980 return true;
981 default:
982 return false;
983 }
984 }
985
986 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
987 const ARMISARegisters *id)
988 {
989 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
990
991 if ((features >> ARM_FEATURE_V4T) & 1) {
992 valid |= CPSR_T;
993 }
994 if ((features >> ARM_FEATURE_V5) & 1) {
995 valid |= CPSR_Q; /* V5TE in reality*/
996 }
997 if ((features >> ARM_FEATURE_V6) & 1) {
998 valid |= CPSR_E | CPSR_GE;
999 }
1000 if ((features >> ARM_FEATURE_THUMB2) & 1) {
1001 valid |= CPSR_IT;
1002 }
1003 if (isar_feature_aa32_jazelle(id)) {
1004 valid |= CPSR_J;
1005 }
1006 if (isar_feature_aa32_pan(id)) {
1007 valid |= CPSR_PAN;
1008 }
1009 if (isar_feature_aa32_dit(id)) {
1010 valid |= CPSR_DIT;
1011 }
1012 if (isar_feature_aa32_ssbs(id)) {
1013 valid |= CPSR_SSBS;
1014 }
1015
1016 return valid;
1017 }
1018
1019 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1020 {
1021 uint32_t valid;
1022
1023 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1024 if (isar_feature_aa64_bti(id)) {
1025 valid |= PSTATE_BTYPE;
1026 }
1027 if (isar_feature_aa64_pan(id)) {
1028 valid |= PSTATE_PAN;
1029 }
1030 if (isar_feature_aa64_uao(id)) {
1031 valid |= PSTATE_UAO;
1032 }
1033 if (isar_feature_aa64_dit(id)) {
1034 valid |= PSTATE_DIT;
1035 }
1036 if (isar_feature_aa64_ssbs(id)) {
1037 valid |= PSTATE_SSBS;
1038 }
1039 if (isar_feature_aa64_mte(id)) {
1040 valid |= PSTATE_TCO;
1041 }
1042
1043 return valid;
1044 }
1045
1046 /* Granule size (i.e. page size) */
1047 typedef enum ARMGranuleSize {
1048 /* Same order as TG0 encoding */
1049 Gran4K,
1050 Gran64K,
1051 Gran16K,
1052 GranInvalid,
1053 } ARMGranuleSize;
1054
1055 /**
1056 * arm_granule_bits: Return address size of the granule in bits
1057 *
1058 * Return the address size of the granule in bits. This corresponds
1059 * to the pseudocode TGxGranuleBits().
1060 */
1061 static inline int arm_granule_bits(ARMGranuleSize gran)
1062 {
1063 switch (gran) {
1064 case Gran64K:
1065 return 16;
1066 case Gran16K:
1067 return 14;
1068 case Gran4K:
1069 return 12;
1070 default:
1071 g_assert_not_reached();
1072 }
1073 }
1074
1075 /*
1076 * Parameters of a given virtual address, as extracted from the
1077 * translation control register (TCR) for a given regime.
1078 */
1079 typedef struct ARMVAParameters {
1080 unsigned tsz : 8;
1081 unsigned ps : 3;
1082 unsigned sh : 2;
1083 unsigned select : 1;
1084 bool tbi : 1;
1085 bool epd : 1;
1086 bool hpd : 1;
1087 bool tsz_oob : 1; /* tsz has been clamped to legal range */
1088 bool ds : 1;
1089 bool ha : 1;
1090 bool hd : 1;
1091 ARMGranuleSize gran : 2;
1092 } ARMVAParameters;
1093
1094 /**
1095 * aa64_va_parameters: Return parameters for an AArch64 virtual address
1096 * @env: CPU
1097 * @va: virtual address to look up
1098 * @mmu_idx: determines translation regime to use
1099 * @data: true if this is a data access
1100 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1101 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1102 */
1103 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1104 ARMMMUIdx mmu_idx, bool data,
1105 bool el1_is_aa32);
1106
1107 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1108 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1109 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1110
1111 /* Determine if allocation tags are available. */
1112 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1113 uint64_t sctlr)
1114 {
1115 if (el < 3
1116 && arm_feature(env, ARM_FEATURE_EL3)
1117 && !(env->cp15.scr_el3 & SCR_ATA)) {
1118 return false;
1119 }
1120 if (el < 2 && arm_is_el2_enabled(env)) {
1121 uint64_t hcr = arm_hcr_el2_eff(env);
1122 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1123 return false;
1124 }
1125 }
1126 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1127 return sctlr != 0;
1128 }
1129
1130 #ifndef CONFIG_USER_ONLY
1131
1132 /* Security attributes for an address, as returned by v8m_security_lookup. */
1133 typedef struct V8M_SAttributes {
1134 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1135 bool ns;
1136 bool nsc;
1137 uint8_t sregion;
1138 bool srvalid;
1139 uint8_t iregion;
1140 bool irvalid;
1141 } V8M_SAttributes;
1142
1143 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1144 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1145 bool secure, V8M_SAttributes *sattrs);
1146
1147 /* Cacheability and shareability attributes for a memory access */
1148 typedef struct ARMCacheAttrs {
1149 /*
1150 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1151 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1152 */
1153 unsigned int attrs:8;
1154 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1155 bool is_s2_format:1;
1156 bool guarded:1; /* guarded bit of the v8-64 PTE */
1157 } ARMCacheAttrs;
1158
1159 /* Fields that are valid upon success. */
1160 typedef struct GetPhysAddrResult {
1161 CPUTLBEntryFull f;
1162 ARMCacheAttrs cacheattrs;
1163 } GetPhysAddrResult;
1164
1165 /**
1166 * get_phys_addr_with_secure: get the physical address for a virtual address
1167 * @env: CPUARMState
1168 * @address: virtual address to get physical address for
1169 * @access_type: 0 for read, 1 for write, 2 for execute
1170 * @mmu_idx: MMU index indicating required translation regime
1171 * @is_secure: security state for the access
1172 * @result: set on translation success.
1173 * @fi: set to fault info if the translation fails
1174 *
1175 * Find the physical address corresponding to the given virtual address,
1176 * by doing a translation table walk on MMU based systems or using the
1177 * MPU state on MPU based systems.
1178 *
1179 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1180 * prot and page_size may not be filled in, and the populated fsr value provides
1181 * information on why the translation aborted, in the format of a
1182 * DFSR/IFSR fault register, with the following caveats:
1183 * * we honour the short vs long DFSR format differences.
1184 * * the WnR bit is never set (the caller must do this).
1185 * * for PSMAv5 based systems we don't bother to return a full FSR format
1186 * value.
1187 */
1188 bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
1189 MMUAccessType access_type,
1190 ARMMMUIdx mmu_idx, bool is_secure,
1191 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1192 __attribute__((nonnull));
1193
1194 /**
1195 * get_phys_addr: get the physical address for a virtual address
1196 * @env: CPUARMState
1197 * @address: virtual address to get physical address for
1198 * @access_type: 0 for read, 1 for write, 2 for execute
1199 * @mmu_idx: MMU index indicating required translation regime
1200 * @result: set on translation success.
1201 * @fi: set to fault info if the translation fails
1202 *
1203 * Similarly, but use the security regime of @mmu_idx.
1204 */
1205 bool get_phys_addr(CPUARMState *env, target_ulong address,
1206 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1207 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1208 __attribute__((nonnull));
1209
1210 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1211 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1212 bool is_secure, GetPhysAddrResult *result,
1213 ARMMMUFaultInfo *fi, uint32_t *mregion);
1214
1215 void arm_log_exception(CPUState *cs);
1216
1217 #endif /* !CONFIG_USER_ONLY */
1218
1219 /*
1220 * The log2 of the words in the tag block, for GMID_EL1.BS.
1221 * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1222 */
1223 #define GMID_EL1_BS 6
1224
1225 /*
1226 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1227 * the same simd_desc() encoding due to restrictions on size.
1228 * Use these instead.
1229 */
1230 FIELD(PREDDESC, OPRSZ, 0, 6)
1231 FIELD(PREDDESC, ESZ, 6, 2)
1232 FIELD(PREDDESC, DATA, 8, 24)
1233
1234 /*
1235 * The SVE simd_data field, for memory ops, contains either
1236 * rd (5 bits) or a shift count (2 bits).
1237 */
1238 #define SVE_MTEDESC_SHIFT 5
1239
1240 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1241 FIELD(MTEDESC, MIDX, 0, 4)
1242 FIELD(MTEDESC, TBI, 4, 2)
1243 FIELD(MTEDESC, TCMA, 6, 2)
1244 FIELD(MTEDESC, WRITE, 8, 1)
1245 FIELD(MTEDESC, ALIGN, 9, 3)
1246 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - 12) /* size - 1 */
1247
1248 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1249 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1250
1251 static inline int allocation_tag_from_addr(uint64_t ptr)
1252 {
1253 return extract64(ptr, 56, 4);
1254 }
1255
1256 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1257 {
1258 return deposit64(ptr, 56, 4, rtag);
1259 }
1260
1261 /* Return true if tbi bits mean that the access is checked. */
1262 static inline bool tbi_check(uint32_t desc, int bit55)
1263 {
1264 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1265 }
1266
1267 /* Return true if tcma bits mean that the access is unchecked. */
1268 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1269 {
1270 /*
1271 * We had extracted bit55 and ptr_tag for other reasons, so fold
1272 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1273 */
1274 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1275 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1276 return tcma && match;
1277 }
1278
1279 /*
1280 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1281 * for the tag to be present in the FAR_ELx register. But for user-only
1282 * mode, we do not have a TLB with which to implement this, so we must
1283 * remove the top byte.
1284 */
1285 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1286 {
1287 #ifdef CONFIG_USER_ONLY
1288 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1289 ptr &= sextract64(ptr, 0, 56);
1290 #endif
1291 return ptr;
1292 }
1293
1294 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1295 {
1296 #ifdef CONFIG_USER_ONLY
1297 int64_t clean_ptr = sextract64(ptr, 0, 56);
1298 if (tbi_check(desc, clean_ptr < 0)) {
1299 ptr = clean_ptr;
1300 }
1301 #endif
1302 return ptr;
1303 }
1304
1305 /* Values for M-profile PSR.ECI for MVE insns */
1306 enum MVEECIState {
1307 ECI_NONE = 0, /* No completed beats */
1308 ECI_A0 = 1, /* Completed: A0 */
1309 ECI_A0A1 = 2, /* Completed: A0, A1 */
1310 /* 3 is reserved */
1311 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1312 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1313 /* All other values reserved */
1314 };
1315
1316 /* Definitions for the PMU registers */
1317 #define PMCRN_MASK 0xf800
1318 #define PMCRN_SHIFT 11
1319 #define PMCRLP 0x80
1320 #define PMCRLC 0x40
1321 #define PMCRDP 0x20
1322 #define PMCRX 0x10
1323 #define PMCRD 0x8
1324 #define PMCRC 0x4
1325 #define PMCRP 0x2
1326 #define PMCRE 0x1
1327 /*
1328 * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1329 * which can be written as 1 to trigger behaviour but which stay RAZ).
1330 */
1331 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1332
1333 #define PMXEVTYPER_P 0x80000000
1334 #define PMXEVTYPER_U 0x40000000
1335 #define PMXEVTYPER_NSK 0x20000000
1336 #define PMXEVTYPER_NSU 0x10000000
1337 #define PMXEVTYPER_NSH 0x08000000
1338 #define PMXEVTYPER_M 0x04000000
1339 #define PMXEVTYPER_MT 0x02000000
1340 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1341 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1342 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1343 PMXEVTYPER_M | PMXEVTYPER_MT | \
1344 PMXEVTYPER_EVTCOUNT)
1345
1346 #define PMCCFILTR 0xf8000000
1347 #define PMCCFILTR_M PMXEVTYPER_M
1348 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1349
1350 static inline uint32_t pmu_num_counters(CPUARMState *env)
1351 {
1352 ARMCPU *cpu = env_archcpu(env);
1353
1354 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1355 }
1356
1357 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1358 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1359 {
1360 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1361 }
1362
1363 #ifdef TARGET_AARCH64
1364 int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
1365 int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
1366 int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
1367 int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
1368 int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
1369 int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg);
1370 int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg);
1371 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1372 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1373 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1374 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1375 void aarch64_max_tcg_initfn(Object *obj);
1376 void aarch64_add_pauth_properties(Object *obj);
1377 void aarch64_add_sve_properties(Object *obj);
1378 void aarch64_add_sme_properties(Object *obj);
1379 #endif
1380
1381 /* Read the CONTROL register as the MRS instruction would. */
1382 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1383
1384 /*
1385 * Return a pointer to the location where we currently store the
1386 * stack pointer for the requested security state and thread mode.
1387 * This pointer will become invalid if the CPU state is updated
1388 * such that the stack pointers are switched around (eg changing
1389 * the SPSEL control bit).
1390 */
1391 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1392 bool threadmode, bool spsel);
1393
1394 bool el_is_in_host(CPUARMState *env, int el);
1395
1396 void aa32_max_features(ARMCPU *cpu);
1397 int exception_target_el(CPUARMState *env);
1398 bool arm_singlestep_active(CPUARMState *env);
1399 bool arm_generate_debug_exceptions(CPUARMState *env);
1400
1401 /**
1402 * pauth_ptr_mask:
1403 * @param: parameters defining the MMU setup
1404 *
1405 * Return a mask of the address bits that contain the authentication code,
1406 * given the MMU config defined by @param.
1407 */
1408 static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1409 {
1410 int bot_pac_bit = 64 - param.tsz;
1411 int top_pac_bit = 64 - 8 * param.tbi;
1412
1413 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1414 }
1415
1416 /* Add the cpreg definitions for debug related system registers */
1417 void define_debug_regs(ARMCPU *cpu);
1418
1419 /* Effective value of MDCR_EL2 */
1420 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1421 {
1422 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1423 }
1424
1425 /* Powers of 2 for sve_vq_map et al. */
1426 #define SVE_VQ_POW2_MAP \
1427 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1428 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1429
1430 /*
1431 * Return true if it is possible to take a fine-grained-trap to EL2.
1432 */
1433 static inline bool arm_fgt_active(CPUARMState *env, int el)
1434 {
1435 /*
1436 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1437 * that can affect EL0, but it is harmless to do the test also for
1438 * traps on registers that are only accessible at EL1 because if the test
1439 * returns true then we can't be executing at EL1 anyway.
1440 * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1441 * traps from AArch32 only happen for the EL0 is AArch32 case.
1442 */
1443 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1444 el < 2 && arm_is_el2_enabled(env) &&
1445 arm_el_is_aa64(env, 1) &&
1446 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1447 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1448 }
1449
1450 void assert_hflags_rebuild_correctly(CPUARMState *env);
1451
1452 /*
1453 * Although the ARM implementation of hardware assisted debugging
1454 * allows for different breakpoints per-core, the current GDB
1455 * interface treats them as a global pool of registers (which seems to
1456 * be the case for x86, ppc and s390). As a result we store one copy
1457 * of registers which is used for all active cores.
1458 *
1459 * Write access is serialised by virtue of the GDB protocol which
1460 * updates things. Read access (i.e. when the values are copied to the
1461 * vCPU) is also gated by GDB's run control.
1462 *
1463 * This is not unreasonable as most of the time debugging kernels you
1464 * never know which core will eventually execute your function.
1465 */
1466
1467 typedef struct {
1468 uint64_t bcr;
1469 uint64_t bvr;
1470 } HWBreakpoint;
1471
1472 /*
1473 * The watchpoint registers can cover more area than the requested
1474 * watchpoint so we need to store the additional information
1475 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1476 * when the watchpoint is hit.
1477 */
1478 typedef struct {
1479 uint64_t wcr;
1480 uint64_t wvr;
1481 CPUWatchpoint details;
1482 } HWWatchpoint;
1483
1484 /* Maximum and current break/watch point counts */
1485 extern int max_hw_bps, max_hw_wps;
1486 extern GArray *hw_breakpoints, *hw_watchpoints;
1487
1488 #define cur_hw_wps (hw_watchpoints->len)
1489 #define cur_hw_bps (hw_breakpoints->len)
1490 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1491 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1492
1493 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
1494 int insert_hw_breakpoint(target_ulong pc);
1495 int delete_hw_breakpoint(target_ulong pc);
1496
1497 bool check_watchpoint_in_range(int i, target_ulong addr);
1498 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
1499 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1500 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1501 #endif