]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/helper.c
target/arm: Don't mishandle count when enabling or disabling PMU counters
[mirror_qemu.git] / target / arm / helper.c
CommitLineData
ed3baad1
PMD
1/*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
db725815 8
74c21bd0 9#include "qemu/osdep.h"
63159601 10#include "qemu/units.h"
cd617484 11#include "qemu/log.h"
194cbc49 12#include "trace.h"
b5ff1b31 13#include "cpu.h"
ccd38087 14#include "internals.h"
2ef6175a 15#include "exec/helper-proto.h"
1de7afc9 16#include "qemu/host-utils.h"
db725815 17#include "qemu/main-loop.h"
b8012ecf 18#include "qemu/timer.h"
1de7afc9 19#include "qemu/bitops.h"
eb0ecd5a 20#include "qemu/crc32c.h"
0442428a 21#include "qemu/qemu-print.h"
63c91552 22#include "exec/exec-all.h"
eb0ecd5a 23#include <zlib.h> /* For crc32 */
64552b6b 24#include "hw/irq.h"
6b5fe137 25#include "semihosting/semihost.h"
b2e23725 26#include "sysemu/cpus.h"
740b1759 27#include "sysemu/cpu-timers.h"
f3a9b694 28#include "sysemu/kvm.h"
9d2b5a58 29#include "qemu/range.h"
7f7b4e7a 30#include "qapi/qapi-commands-machine-target.h"
de390645
RH
31#include "qapi/error.h"
32#include "qemu/guest-random.h"
91f78c58
PMD
33#ifdef CONFIG_TCG
34#include "arm_ldst.h"
7aab5a8c 35#include "exec/cpu_ldst.h"
6b5fe137 36#include "semihosting/common-semi.h"
91f78c58 37#endif
cf7c6d10 38#include "cpregs.h"
0b03bdfc 39
352c98e5
LV
40#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
41
affdb64d
PM
42static void switch_mode(CPUARMState *env, int mode);
43
c4241c7d 44static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
d4e6df63 45{
375421cc 46 assert(ri->fieldoffset);
67ed771d 47 if (cpreg_field_is_64bit(ri)) {
c4241c7d 48 return CPREG_FIELD64(env, ri);
22d9e1a9 49 } else {
c4241c7d 50 return CPREG_FIELD32(env, ri);
22d9e1a9 51 }
d4e6df63
PM
52}
53
f43ee493 54void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
d4e6df63 55{
375421cc 56 assert(ri->fieldoffset);
67ed771d 57 if (cpreg_field_is_64bit(ri)) {
22d9e1a9
PM
58 CPREG_FIELD64(env, ri) = value;
59 } else {
60 CPREG_FIELD32(env, ri) = value;
61 }
d4e6df63
PM
62}
63
11f136ee
FA
64static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
65{
66 return (char *)env + ri->fieldoffset;
67}
68
49a66191 69uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
721fae12 70{
59a1c327 71 /* Raw read of a coprocessor register (as needed for migration, etc). */
721fae12 72 if (ri->type & ARM_CP_CONST) {
59a1c327 73 return ri->resetvalue;
721fae12 74 } else if (ri->raw_readfn) {
59a1c327 75 return ri->raw_readfn(env, ri);
721fae12 76 } else if (ri->readfn) {
59a1c327 77 return ri->readfn(env, ri);
721fae12 78 } else {
59a1c327 79 return raw_read(env, ri);
721fae12 80 }
721fae12
PM
81}
82
59a1c327 83static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
7900e9f1 84 uint64_t v)
721fae12
PM
85{
86 /* Raw write of a coprocessor register (as needed for migration, etc).
721fae12
PM
87 * Note that constant registers are treated as write-ignored; the
88 * caller should check for success by whether a readback gives the
89 * value written.
90 */
91 if (ri->type & ARM_CP_CONST) {
59a1c327 92 return;
721fae12 93 } else if (ri->raw_writefn) {
c4241c7d 94 ri->raw_writefn(env, ri, v);
721fae12 95 } else if (ri->writefn) {
c4241c7d 96 ri->writefn(env, ri, v);
721fae12 97 } else {
afb2530f 98 raw_write(env, ri, v);
721fae12 99 }
721fae12
PM
100}
101
375421cc
PM
102static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
103{
104 /* Return true if the regdef would cause an assertion if you called
105 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
106 * program bug for it not to have the NO_RAW flag).
107 * NB that returning false here doesn't necessarily mean that calling
108 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
109 * read/write access functions which are safe for raw use" from "has
110 * read/write access functions which have side effects but has forgotten
111 * to provide raw access functions".
112 * The tests here line up with the conditions in read/write_raw_cp_reg()
113 * and assertions in raw_read()/raw_write().
114 */
115 if ((ri->type & ARM_CP_CONST) ||
116 ri->fieldoffset ||
117 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
118 return false;
119 }
120 return true;
121}
122
b698e4ee 123bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
721fae12
PM
124{
125 /* Write the coprocessor state from cpu->env to the (index,value) list. */
126 int i;
127 bool ok = true;
128
129 for (i = 0; i < cpu->cpreg_array_len; i++) {
130 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
131 const ARMCPRegInfo *ri;
b698e4ee 132 uint64_t newval;
59a1c327 133
60322b39 134 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12
PM
135 if (!ri) {
136 ok = false;
137 continue;
138 }
7a0e58fa 139 if (ri->type & ARM_CP_NO_RAW) {
721fae12
PM
140 continue;
141 }
b698e4ee
PM
142
143 newval = read_raw_cp_reg(&cpu->env, ri);
144 if (kvm_sync) {
145 /*
146 * Only sync if the previous list->cpustate sync succeeded.
147 * Rather than tracking the success/failure state for every
148 * item in the list, we just recheck "does the raw write we must
149 * have made in write_list_to_cpustate() read back OK" here.
150 */
151 uint64_t oldval = cpu->cpreg_values[i];
152
153 if (oldval == newval) {
154 continue;
155 }
156
157 write_raw_cp_reg(&cpu->env, ri, oldval);
158 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
159 continue;
160 }
161
162 write_raw_cp_reg(&cpu->env, ri, newval);
163 }
164 cpu->cpreg_values[i] = newval;
721fae12
PM
165 }
166 return ok;
167}
168
169bool write_list_to_cpustate(ARMCPU *cpu)
170{
171 int i;
172 bool ok = true;
173
174 for (i = 0; i < cpu->cpreg_array_len; i++) {
175 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
176 uint64_t v = cpu->cpreg_values[i];
721fae12
PM
177 const ARMCPRegInfo *ri;
178
60322b39 179 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12
PM
180 if (!ri) {
181 ok = false;
182 continue;
183 }
7a0e58fa 184 if (ri->type & ARM_CP_NO_RAW) {
721fae12
PM
185 continue;
186 }
187 /* Write value and confirm it reads back as written
188 * (to catch read-only registers and partially read-only
189 * registers where the incoming migration value doesn't match)
190 */
59a1c327
PM
191 write_raw_cp_reg(&cpu->env, ri, v);
192 if (read_raw_cp_reg(&cpu->env, ri) != v) {
721fae12
PM
193 ok = false;
194 }
195 }
196 return ok;
197}
198
199static void add_cpreg_to_list(gpointer key, gpointer opaque)
200{
201 ARMCPU *cpu = opaque;
5860362d
RH
202 uint32_t regidx = (uintptr_t)key;
203 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12 204
7a0e58fa 205 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
721fae12
PM
206 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
207 /* The value array need not be initialized at this point */
208 cpu->cpreg_array_len++;
209 }
210}
211
212static void count_cpreg(gpointer key, gpointer opaque)
213{
214 ARMCPU *cpu = opaque;
721fae12
PM
215 const ARMCPRegInfo *ri;
216
5860362d 217 ri = g_hash_table_lookup(cpu->cp_regs, key);
721fae12 218
7a0e58fa 219 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
721fae12
PM
220 cpu->cpreg_array_len++;
221 }
222}
223
224static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
225{
5860362d
RH
226 uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
227 uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
721fae12 228
cbf239b7
AR
229 if (aidx > bidx) {
230 return 1;
231 }
232 if (aidx < bidx) {
233 return -1;
234 }
235 return 0;
721fae12
PM
236}
237
238void init_cpreg_list(ARMCPU *cpu)
239{
240 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
241 * Note that we require cpreg_tuples[] to be sorted by key ID.
242 */
57b6d95e 243 GList *keys;
721fae12
PM
244 int arraylen;
245
57b6d95e 246 keys = g_hash_table_get_keys(cpu->cp_regs);
721fae12
PM
247 keys = g_list_sort(keys, cpreg_key_compare);
248
249 cpu->cpreg_array_len = 0;
250
251 g_list_foreach(keys, count_cpreg, cpu);
252
253 arraylen = cpu->cpreg_array_len;
254 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
255 cpu->cpreg_values = g_new(uint64_t, arraylen);
256 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
257 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
258 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
259 cpu->cpreg_array_len = 0;
260
261 g_list_foreach(keys, add_cpreg_to_list, cpu);
262
263 assert(cpu->cpreg_array_len == arraylen);
264
265 g_list_free(keys);
266}
267
68e9c2fe 268/*
93dd1e61 269 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
68e9c2fe
EI
270 */
271static CPAccessResult access_el3_aa32ns(CPUARMState *env,
3f208fd7
PM
272 const ARMCPRegInfo *ri,
273 bool isread)
68e9c2fe 274{
93dd1e61
EI
275 if (!is_a64(env) && arm_current_el(env) == 3 &&
276 arm_is_secure_below_el3(env)) {
68e9c2fe
EI
277 return CP_ACCESS_TRAP_UNCATEGORIZED;
278 }
279 return CP_ACCESS_OK;
280}
281
5513c3ab
PM
282/* Some secure-only AArch32 registers trap to EL3 if used from
283 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
284 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
285 * We assume that the .access field is set to PL1_RW.
286 */
287static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
3f208fd7
PM
288 const ARMCPRegInfo *ri,
289 bool isread)
5513c3ab
PM
290{
291 if (arm_current_el(env) == 3) {
292 return CP_ACCESS_OK;
293 }
294 if (arm_is_secure_below_el3(env)) {
926c1b97
RDC
295 if (env->cp15.scr_el3 & SCR_EEL2) {
296 return CP_ACCESS_TRAP_EL2;
297 }
5513c3ab
PM
298 return CP_ACCESS_TRAP_EL3;
299 }
300 /* This will be EL1 NS and EL2 NS, which just UNDEF */
301 return CP_ACCESS_TRAP_UNCATEGORIZED;
302}
303
1fce1ba9
PM
304/* Check for traps to performance monitor registers, which are controlled
305 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
306 */
307static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
308 bool isread)
309{
310 int el = arm_current_el(env);
59dd089c 311 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1fce1ba9 312
59dd089c 313 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1fce1ba9
PM
314 return CP_ACCESS_TRAP_EL2;
315 }
316 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
317 return CP_ACCESS_TRAP_EL3;
318 }
319 return CP_ACCESS_OK;
320}
321
84929218
RH
322/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
323static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
324 bool isread)
325{
326 if (arm_current_el(env) == 1) {
327 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
328 if (arm_hcr_el2_eff(env) & trap) {
329 return CP_ACCESS_TRAP_EL2;
330 }
331 }
332 return CP_ACCESS_OK;
333}
334
1803d271
RH
335/* Check for traps from EL1 due to HCR_EL2.TSW. */
336static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
337 bool isread)
338{
339 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
340 return CP_ACCESS_TRAP_EL2;
341 }
342 return CP_ACCESS_OK;
343}
344
99602377
RH
345/* Check for traps from EL1 due to HCR_EL2.TACR. */
346static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
347 bool isread)
348{
349 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
350 return CP_ACCESS_TRAP_EL2;
351 }
352 return CP_ACCESS_OK;
353}
354
30881b73
RH
355/* Check for traps from EL1 due to HCR_EL2.TTLB. */
356static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
357 bool isread)
358{
359 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
360 return CP_ACCESS_TRAP_EL2;
361 }
362 return CP_ACCESS_OK;
363}
364
c4241c7d 365static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
c983fe6c 366{
2fc0cc0e 367 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 368
8d5c773e 369 raw_write(env, ri, value);
d10eb08f 370 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
c983fe6c
PM
371}
372
c4241c7d 373static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
08de207b 374{
2fc0cc0e 375 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 376
8d5c773e 377 if (raw_read(env, ri) != value) {
08de207b
PM
378 /* Unlike real hardware the qemu TLB uses virtual addresses,
379 * not modified virtual addresses, so this causes a TLB flush.
380 */
d10eb08f 381 tlb_flush(CPU(cpu));
8d5c773e 382 raw_write(env, ri, value);
08de207b 383 }
08de207b 384}
c4241c7d
PM
385
386static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
387 uint64_t value)
08de207b 388{
2fc0cc0e 389 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 390
452a0955 391 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
014406b5 392 && !extended_addresses_enabled(env)) {
08de207b
PM
393 /* For VMSA (when not using the LPAE long descriptor page table
394 * format) this register includes the ASID, so do a TLB flush.
395 * For PMSA it is purely a process ID and no action is needed.
396 */
d10eb08f 397 tlb_flush(CPU(cpu));
08de207b 398 }
8d5c773e 399 raw_write(env, ri, value);
08de207b
PM
400}
401
b4ab8ce9
PM
402/* IS variants of TLB operations must affect all cores */
403static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
404 uint64_t value)
405{
29a0af61 406 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
407
408 tlb_flush_all_cpus_synced(cs);
409}
410
411static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
412 uint64_t value)
413{
29a0af61 414 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
415
416 tlb_flush_all_cpus_synced(cs);
417}
418
419static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
420 uint64_t value)
421{
29a0af61 422 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
423
424 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
425}
426
427static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
428 uint64_t value)
429{
29a0af61 430 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
431
432 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
433}
434
435/*
436 * Non-IS variants of TLB operations are upgraded to
373e7ffd 437 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
b4ab8ce9
PM
438 * force broadcast of these operations.
439 */
440static bool tlb_force_broadcast(CPUARMState *env)
441{
373e7ffd 442 return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
b4ab8ce9
PM
443}
444
c4241c7d
PM
445static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
446 uint64_t value)
d929823f
PM
447{
448 /* Invalidate all (TLBIALL) */
527db2be 449 CPUState *cs = env_cpu(env);
00c8cb0a 450
b4ab8ce9 451 if (tlb_force_broadcast(env)) {
527db2be
RH
452 tlb_flush_all_cpus_synced(cs);
453 } else {
454 tlb_flush(cs);
b4ab8ce9 455 }
d929823f
PM
456}
457
c4241c7d
PM
458static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
459 uint64_t value)
d929823f
PM
460{
461 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
527db2be 462 CPUState *cs = env_cpu(env);
31b030d4 463
527db2be 464 value &= TARGET_PAGE_MASK;
b4ab8ce9 465 if (tlb_force_broadcast(env)) {
527db2be
RH
466 tlb_flush_page_all_cpus_synced(cs, value);
467 } else {
468 tlb_flush_page(cs, value);
b4ab8ce9 469 }
d929823f
PM
470}
471
c4241c7d
PM
472static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
473 uint64_t value)
d929823f
PM
474{
475 /* Invalidate by ASID (TLBIASID) */
527db2be 476 CPUState *cs = env_cpu(env);
00c8cb0a 477
b4ab8ce9 478 if (tlb_force_broadcast(env)) {
527db2be
RH
479 tlb_flush_all_cpus_synced(cs);
480 } else {
481 tlb_flush(cs);
b4ab8ce9 482 }
d929823f
PM
483}
484
c4241c7d
PM
485static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
486 uint64_t value)
d929823f
PM
487{
488 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
527db2be 489 CPUState *cs = env_cpu(env);
31b030d4 490
527db2be 491 value &= TARGET_PAGE_MASK;
b4ab8ce9 492 if (tlb_force_broadcast(env)) {
527db2be
RH
493 tlb_flush_page_all_cpus_synced(cs, value);
494 } else {
495 tlb_flush_page(cs, value);
b4ab8ce9 496 }
fa439fc5
PM
497}
498
541ef8c2
SS
499static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
500 uint64_t value)
501{
29a0af61 502 CPUState *cs = env_cpu(env);
541ef8c2 503
0336cbf8 504 tlb_flush_by_mmuidx(cs,
01b98b68 505 ARMMMUIdxBit_E10_1 |
452ef8cb 506 ARMMMUIdxBit_E10_1_PAN |
bf05340c 507 ARMMMUIdxBit_E10_0);
541ef8c2
SS
508}
509
510static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
511 uint64_t value)
512{
29a0af61 513 CPUState *cs = env_cpu(env);
541ef8c2 514
a67cf277 515 tlb_flush_by_mmuidx_all_cpus_synced(cs,
01b98b68 516 ARMMMUIdxBit_E10_1 |
452ef8cb 517 ARMMMUIdxBit_E10_1_PAN |
bf05340c 518 ARMMMUIdxBit_E10_0);
541ef8c2
SS
519}
520
541ef8c2
SS
521
522static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
523 uint64_t value)
524{
29a0af61 525 CPUState *cs = env_cpu(env);
541ef8c2 526
e013b741 527 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
541ef8c2
SS
528}
529
530static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
531 uint64_t value)
532{
29a0af61 533 CPUState *cs = env_cpu(env);
541ef8c2 534
e013b741 535 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
541ef8c2
SS
536}
537
538static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
539 uint64_t value)
540{
29a0af61 541 CPUState *cs = env_cpu(env);
541ef8c2
SS
542 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
543
e013b741 544 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
541ef8c2
SS
545}
546
547static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
548 uint64_t value)
549{
29a0af61 550 CPUState *cs = env_cpu(env);
541ef8c2
SS
551 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
552
a67cf277 553 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
e013b741 554 ARMMMUIdxBit_E2);
541ef8c2
SS
555}
556
e9aa6c21 557static const ARMCPRegInfo cp_reginfo[] = {
54bf36ed
FA
558 /* Define the secure and non-secure FCSE identifier CP registers
559 * separately because there is no secure bank in V8 (no _EL3). This allows
560 * the secure register to be properly reset and migrated. There is also no
561 * v8 EL1 version of the register so the non-secure instance stands alone.
562 */
9c513e78 563 { .name = "FCSEIDR",
54bf36ed
FA
564 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
565 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
566 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
567 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
9c513e78 568 { .name = "FCSEIDR_S",
54bf36ed
FA
569 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
570 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
571 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
d4e6df63 572 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
54bf36ed
FA
573 /* Define the secure and non-secure context identifier CP registers
574 * separately because there is no secure bank in V8 (no _EL3). This allows
575 * the secure register to be properly reset and migrated. In the
576 * non-secure case, the 32-bit register will have reset and migration
577 * disabled during registration as it is handled by the 64-bit instance.
578 */
579 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
014406b5 580 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
84929218
RH
581 .access = PL1_RW, .accessfn = access_tvm_trvm,
582 .secure = ARM_CP_SECSTATE_NS,
54bf36ed
FA
583 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
584 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
9c513e78 585 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
54bf36ed 586 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
84929218
RH
587 .access = PL1_RW, .accessfn = access_tvm_trvm,
588 .secure = ARM_CP_SECSTATE_S,
54bf36ed 589 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
d4e6df63 590 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
9449fdf6
PM
591};
592
593static const ARMCPRegInfo not_v8_cp_reginfo[] = {
594 /* NB: Some of these registers exist in v8 but with more precise
595 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
596 */
597 /* MMU Domain access control / MPU write buffer control */
0c17d68c
FA
598 { .name = "DACR",
599 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
84929218 600 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
0c17d68c
FA
601 .writefn = dacr_write, .raw_writefn = raw_write,
602 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
603 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
a903c449
EI
604 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
605 * For v6 and v5, these mappings are overly broad.
4fdd17dd 606 */
a903c449
EI
607 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
608 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
609 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
610 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
611 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
612 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
613 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
4fdd17dd 614 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
c4804214
PM
615 /* Cache maintenance ops; some of this space may be overridden later. */
616 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
617 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
618 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
e9aa6c21
PM
619};
620
7d57f408
PM
621static const ARMCPRegInfo not_v6_cp_reginfo[] = {
622 /* Not all pre-v6 cores implemented this WFI, so this is slightly
623 * over-broad.
624 */
625 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
626 .access = PL1_W, .type = ARM_CP_WFI },
7d57f408
PM
627};
628
629static const ARMCPRegInfo not_v7_cp_reginfo[] = {
630 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
631 * is UNPREDICTABLE; we choose to NOP as most implementations do).
632 */
633 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
634 .access = PL1_W, .type = ARM_CP_WFI },
34f90529
PM
635 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
636 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
637 * OMAPCP will override this space.
638 */
639 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
640 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
641 .resetvalue = 0 },
642 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
643 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
644 .resetvalue = 0 },
776d4e5c
PM
645 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
646 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
7a0e58fa 647 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 648 .resetvalue = 0 },
50300698
PM
649 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
650 * implementing it as RAZ means the "debug architecture version" bits
651 * will read as a reserved value, which should cause Linux to not try
652 * to use the debug hardware.
653 */
654 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
655 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
995939a6
PM
656 /* MMU TLB control. Note that the wildcarding means we cover not just
657 * the unified TLB ops but also the dside/iside/inner-shareable variants.
658 */
659 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
660 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
7a0e58fa 661 .type = ARM_CP_NO_RAW },
995939a6
PM
662 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
663 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
7a0e58fa 664 .type = ARM_CP_NO_RAW },
995939a6
PM
665 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
666 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
7a0e58fa 667 .type = ARM_CP_NO_RAW },
995939a6
PM
668 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
669 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
7a0e58fa 670 .type = ARM_CP_NO_RAW },
a903c449
EI
671 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
672 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
673 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
674 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
7d57f408
PM
675};
676
c4241c7d
PM
677static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
678 uint64_t value)
2771db27 679{
f0aff255
FA
680 uint32_t mask = 0;
681
682 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
683 if (!arm_feature(env, ARM_FEATURE_V8)) {
684 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
685 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
686 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
687 */
7fbc6a40 688 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
f0aff255 689 /* VFP coprocessor: cp10 & cp11 [23:20] */
fab8ad39
RH
690 mask |= R_CPACR_ASEDIS_MASK |
691 R_CPACR_D32DIS_MASK |
692 R_CPACR_CP11_MASK |
693 R_CPACR_CP10_MASK;
f0aff255
FA
694
695 if (!arm_feature(env, ARM_FEATURE_NEON)) {
696 /* ASEDIS [31] bit is RAO/WI */
fab8ad39 697 value |= R_CPACR_ASEDIS_MASK;
f0aff255
FA
698 }
699
700 /* VFPv3 and upwards with NEON implement 32 double precision
701 * registers (D0-D31).
702 */
a6627f5f 703 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
f0aff255 704 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
fab8ad39 705 value |= R_CPACR_D32DIS_MASK;
f0aff255
FA
706 }
707 }
708 value &= mask;
2771db27 709 }
fc1120a7
PM
710
711 /*
712 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
713 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
714 */
715 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
716 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
fab8ad39
RH
717 mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
718 value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
fc1120a7
PM
719 }
720
7ebd5f2e 721 env->cp15.cpacr_el1 = value;
2771db27
PM
722}
723
fc1120a7
PM
724static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
725{
726 /*
727 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
728 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
729 */
730 uint64_t value = env->cp15.cpacr_el1;
731
732 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
733 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
fab8ad39 734 value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
fc1120a7
PM
735 }
736 return value;
737}
738
739
5deac39c
PM
740static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
741{
742 /* Call cpacr_write() so that we reset with the correct RAO bits set
743 * for our CPU features.
744 */
745 cpacr_write(env, ri, 0);
746}
747
3f208fd7
PM
748static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
749 bool isread)
c6f19164
GB
750{
751 if (arm_feature(env, ARM_FEATURE_V8)) {
752 /* Check if CPACR accesses are to be trapped to EL2 */
e6ef0169 753 if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
fab8ad39 754 FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
c6f19164
GB
755 return CP_ACCESS_TRAP_EL2;
756 /* Check if CPACR accesses are to be trapped to EL3 */
757 } else if (arm_current_el(env) < 3 &&
fab8ad39 758 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
c6f19164
GB
759 return CP_ACCESS_TRAP_EL3;
760 }
761 }
762
763 return CP_ACCESS_OK;
764}
765
3f208fd7
PM
766static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
767 bool isread)
c6f19164
GB
768{
769 /* Check if CPTR accesses are set to trap to EL3 */
fab8ad39
RH
770 if (arm_current_el(env) == 2 &&
771 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
c6f19164
GB
772 return CP_ACCESS_TRAP_EL3;
773 }
774
775 return CP_ACCESS_OK;
776}
777
7d57f408
PM
778static const ARMCPRegInfo v6_cp_reginfo[] = {
779 /* prefetch by MVA in v6, NOP in v7 */
780 { .name = "MVA_prefetch",
781 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
782 .access = PL1_W, .type = ARM_CP_NOP },
6df99dec
SS
783 /* We need to break the TB after ISB to execute self-modifying code
784 * correctly and also to take any pending interrupts immediately.
785 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
786 */
7d57f408 787 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
6df99dec 788 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
091fd17c 789 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
7d57f408 790 .access = PL0_W, .type = ARM_CP_NOP },
091fd17c 791 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
7d57f408 792 .access = PL0_W, .type = ARM_CP_NOP },
06d76f31 793 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
84929218 794 .access = PL1_RW, .accessfn = access_tvm_trvm,
b848ce2b
FA
795 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
796 offsetof(CPUARMState, cp15.ifar_ns) },
06d76f31
PM
797 .resetvalue = 0, },
798 /* Watchpoint Fault Address Register : should actually only be present
799 * for 1136, 1176, 11MPCore.
800 */
801 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
802 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
34222fb8 803 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
c6f19164 804 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
7ebd5f2e 805 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
fc1120a7 806 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
7d57f408
PM
807};
808
57a4a11b
AL
809typedef struct pm_event {
810 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
811 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
812 bool (*supported)(CPUARMState *);
813 /*
814 * Retrieve the current count of the underlying event. The programmed
815 * counters hold a difference from the return value from this function
816 */
817 uint64_t (*get_count)(CPUARMState *);
4e7beb0c
AL
818 /*
819 * Return how many nanoseconds it will take (at a minimum) for count events
820 * to occur. A negative value indicates the counter will never overflow, or
821 * that the counter has otherwise arranged for the overflow bit to be set
822 * and the PMU interrupt to be raised on overflow.
823 */
824 int64_t (*ns_per_count)(uint64_t);
57a4a11b
AL
825} pm_event;
826
b2e23725
AL
827static bool event_always_supported(CPUARMState *env)
828{
829 return true;
830}
831
0d4bfd7d
AL
832static uint64_t swinc_get_count(CPUARMState *env)
833{
834 /*
835 * SW_INCR events are written directly to the pmevcntr's by writes to
836 * PMSWINC, so there is no underlying count maintained by the PMU itself
837 */
838 return 0;
839}
840
4e7beb0c
AL
841static int64_t swinc_ns_per(uint64_t ignored)
842{
843 return -1;
844}
845
b2e23725
AL
846/*
847 * Return the underlying cycle count for the PMU cycle counters. If we're in
848 * usermode, simply return 0.
849 */
850static uint64_t cycles_get_count(CPUARMState *env)
851{
852#ifndef CONFIG_USER_ONLY
853 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
854 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
855#else
856 return cpu_get_host_ticks();
857#endif
858}
859
860#ifndef CONFIG_USER_ONLY
4e7beb0c
AL
861static int64_t cycles_ns_per(uint64_t cycles)
862{
863 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
864}
865
b2e23725
AL
866static bool instructions_supported(CPUARMState *env)
867{
740b1759 868 return icount_enabled() == 1; /* Precise instruction counting */
b2e23725
AL
869}
870
871static uint64_t instructions_get_count(CPUARMState *env)
872{
8191d368 873 return (uint64_t)icount_get_raw();
b2e23725 874}
4e7beb0c
AL
875
876static int64_t instructions_ns_per(uint64_t icount)
877{
8191d368 878 return icount_to_ns((int64_t)icount);
4e7beb0c 879}
b2e23725
AL
880#endif
881
0727f63b
PM
882static bool pmu_8_1_events_supported(CPUARMState *env)
883{
884 /* For events which are supported in any v8.1 PMU */
885 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
886}
887
15dd1ebd
PM
888static bool pmu_8_4_events_supported(CPUARMState *env)
889{
890 /* For events which are supported in any v8.1 PMU */
891 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
892}
893
0727f63b
PM
894static uint64_t zero_event_get_count(CPUARMState *env)
895{
896 /* For events which on QEMU never fire, so their count is always zero */
897 return 0;
898}
899
900static int64_t zero_event_ns_per(uint64_t cycles)
901{
902 /* An event which never fires can never overflow */
903 return -1;
904}
905
57a4a11b 906static const pm_event pm_events[] = {
0d4bfd7d
AL
907 { .number = 0x000, /* SW_INCR */
908 .supported = event_always_supported,
909 .get_count = swinc_get_count,
4e7beb0c 910 .ns_per_count = swinc_ns_per,
0d4bfd7d 911 },
b2e23725
AL
912#ifndef CONFIG_USER_ONLY
913 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
914 .supported = instructions_supported,
915 .get_count = instructions_get_count,
4e7beb0c 916 .ns_per_count = instructions_ns_per,
b2e23725
AL
917 },
918 { .number = 0x011, /* CPU_CYCLES, Cycle */
919 .supported = event_always_supported,
920 .get_count = cycles_get_count,
4e7beb0c 921 .ns_per_count = cycles_ns_per,
0727f63b 922 },
b2e23725 923#endif
0727f63b
PM
924 { .number = 0x023, /* STALL_FRONTEND */
925 .supported = pmu_8_1_events_supported,
926 .get_count = zero_event_get_count,
927 .ns_per_count = zero_event_ns_per,
928 },
929 { .number = 0x024, /* STALL_BACKEND */
930 .supported = pmu_8_1_events_supported,
931 .get_count = zero_event_get_count,
932 .ns_per_count = zero_event_ns_per,
933 },
15dd1ebd
PM
934 { .number = 0x03c, /* STALL */
935 .supported = pmu_8_4_events_supported,
936 .get_count = zero_event_get_count,
937 .ns_per_count = zero_event_ns_per,
938 },
57a4a11b
AL
939};
940
941/*
942 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
943 * events (i.e. the statistical profiling extension), this implementation
944 * should first be updated to something sparse instead of the current
945 * supported_event_map[] array.
946 */
15dd1ebd 947#define MAX_EVENT_ID 0x3c
57a4a11b
AL
948#define UNSUPPORTED_EVENT UINT16_MAX
949static uint16_t supported_event_map[MAX_EVENT_ID + 1];
950
951/*
bf8d0969
AL
952 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
953 * of ARM event numbers to indices in our pm_events array.
57a4a11b
AL
954 *
955 * Note: Events in the 0x40XX range are not currently supported.
956 */
bf8d0969 957void pmu_init(ARMCPU *cpu)
57a4a11b 958{
57a4a11b
AL
959 unsigned int i;
960
bf8d0969
AL
961 /*
962 * Empty supported_event_map and cpu->pmceid[01] before adding supported
963 * events to them
964 */
57a4a11b
AL
965 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
966 supported_event_map[i] = UNSUPPORTED_EVENT;
967 }
bf8d0969
AL
968 cpu->pmceid0 = 0;
969 cpu->pmceid1 = 0;
57a4a11b
AL
970
971 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
972 const pm_event *cnt = &pm_events[i];
973 assert(cnt->number <= MAX_EVENT_ID);
974 /* We do not currently support events in the 0x40xx range */
975 assert(cnt->number <= 0x3f);
976
bf8d0969 977 if (cnt->supported(&cpu->env)) {
57a4a11b 978 supported_event_map[cnt->number] = i;
67da43d6 979 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
bf8d0969
AL
980 if (cnt->number & 0x20) {
981 cpu->pmceid1 |= event_mask;
982 } else {
983 cpu->pmceid0 |= event_mask;
984 }
57a4a11b
AL
985 }
986 }
57a4a11b
AL
987}
988
5ecdd3e4
AL
989/*
990 * Check at runtime whether a PMU event is supported for the current machine
991 */
992static bool event_supported(uint16_t number)
993{
994 if (number > MAX_EVENT_ID) {
995 return false;
996 }
997 return supported_event_map[number] != UNSUPPORTED_EVENT;
998}
999
3f208fd7
PM
1000static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1001 bool isread)
200ac0ef 1002{
3b163b01 1003 /* Performance monitor registers user accessibility is controlled
1fce1ba9
PM
1004 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1005 * trapping to EL2 or EL3 for other accesses.
200ac0ef 1006 */
1fce1ba9 1007 int el = arm_current_el(env);
59dd089c 1008 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1fce1ba9 1009
6ecd0b6b 1010 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
fcd25206 1011 return CP_ACCESS_TRAP;
200ac0ef 1012 }
59dd089c 1013 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1fce1ba9
PM
1014 return CP_ACCESS_TRAP_EL2;
1015 }
1016 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1017 return CP_ACCESS_TRAP_EL3;
1018 }
1019
fcd25206 1020 return CP_ACCESS_OK;
200ac0ef
PM
1021}
1022
6ecd0b6b
AB
1023static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1024 const ARMCPRegInfo *ri,
1025 bool isread)
1026{
1027 /* ER: event counter read trap control */
1028 if (arm_feature(env, ARM_FEATURE_V8)
1029 && arm_current_el(env) == 0
1030 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1031 && isread) {
1032 return CP_ACCESS_OK;
1033 }
1034
1035 return pmreg_access(env, ri, isread);
1036}
1037
1038static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1039 const ARMCPRegInfo *ri,
1040 bool isread)
1041{
1042 /* SW: software increment write trap control */
1043 if (arm_feature(env, ARM_FEATURE_V8)
1044 && arm_current_el(env) == 0
1045 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1046 && !isread) {
1047 return CP_ACCESS_OK;
1048 }
1049
1050 return pmreg_access(env, ri, isread);
1051}
1052
6ecd0b6b
AB
1053static CPAccessResult pmreg_access_selr(CPUARMState *env,
1054 const ARMCPRegInfo *ri,
1055 bool isread)
1056{
1057 /* ER: event counter read trap control */
1058 if (arm_feature(env, ARM_FEATURE_V8)
1059 && arm_current_el(env) == 0
1060 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1061 return CP_ACCESS_OK;
1062 }
1063
1064 return pmreg_access(env, ri, isread);
1065}
1066
1067static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1068 const ARMCPRegInfo *ri,
1069 bool isread)
1070{
1071 /* CR: cycle counter read trap control */
1072 if (arm_feature(env, ARM_FEATURE_V8)
1073 && arm_current_el(env) == 0
1074 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1075 && isread) {
1076 return CP_ACCESS_OK;
1077 }
1078
1079 return pmreg_access(env, ri, isread);
1080}
1081
01765386
PM
1082/*
1083 * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
1084 * We use these to decide whether we need to wrap a write to MDCR_EL2
1085 * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
1086 */
1087#define MDCR_EL2_PMU_ENABLE_BITS (MDCR_HPME | MDCR_HPMD | MDCR_HPMN)
1088#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME)
1089
033614c4
AL
1090/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1091 * the current EL, security state, and register configuration.
1092 */
1093static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
87124fde 1094{
033614c4
AL
1095 uint64_t filter;
1096 bool e, p, u, nsk, nsu, nsh, m;
1097 bool enabled, prohibited, filtered;
1098 bool secure = arm_is_secure(env);
1099 int el = arm_current_el(env);
59dd089c
RDC
1100 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1101 uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
87124fde 1102
cbbb3041
AJ
1103 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1104 return false;
1105 }
1106
033614c4
AL
1107 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1108 (counter < hpmn || counter == 31)) {
1109 e = env->cp15.c9_pmcr & PMCRE;
1110 } else {
59dd089c 1111 e = mdcr_el2 & MDCR_HPME;
87124fde 1112 }
033614c4 1113 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
87124fde 1114
033614c4
AL
1115 if (!secure) {
1116 if (el == 2 && (counter < hpmn || counter == 31)) {
59dd089c 1117 prohibited = mdcr_el2 & MDCR_HPMD;
033614c4
AL
1118 } else {
1119 prohibited = false;
1120 }
1121 } else {
1122 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
db1f3afb 1123 !(env->cp15.mdcr_el3 & MDCR_SPME);
033614c4
AL
1124 }
1125
1126 if (prohibited && counter == 31) {
1127 prohibited = env->cp15.c9_pmcr & PMCRDP;
1128 }
1129
5ecdd3e4
AL
1130 if (counter == 31) {
1131 filter = env->cp15.pmccfiltr_el0;
1132 } else {
1133 filter = env->cp15.c14_pmevtyper[counter];
1134 }
033614c4
AL
1135
1136 p = filter & PMXEVTYPER_P;
1137 u = filter & PMXEVTYPER_U;
1138 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1139 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1140 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1141 m = arm_el_is_aa64(env, 1) &&
1142 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1143
1144 if (el == 0) {
1145 filtered = secure ? u : u != nsu;
1146 } else if (el == 1) {
1147 filtered = secure ? p : p != nsk;
1148 } else if (el == 2) {
1149 filtered = !nsh;
1150 } else { /* EL3 */
1151 filtered = m != p;
1152 }
1153
5ecdd3e4
AL
1154 if (counter != 31) {
1155 /*
1156 * If not checking PMCCNTR, ensure the counter is setup to an event we
1157 * support
1158 */
1159 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1160 if (!event_supported(event)) {
1161 return false;
1162 }
1163 }
1164
033614c4 1165 return enabled && !prohibited && !filtered;
87124fde 1166}
033614c4 1167
f4efb4b2
AL
1168static void pmu_update_irq(CPUARMState *env)
1169{
2fc0cc0e 1170 ARMCPU *cpu = env_archcpu(env);
f4efb4b2
AL
1171 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1172 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1173}
1174
5d05b9d4
AL
1175/*
1176 * Ensure c15_ccnt is the guest-visible count so that operations such as
1177 * enabling/disabling the counter or filtering, modifying the count itself,
1178 * etc. can be done logically. This is essentially a no-op if the counter is
1179 * not enabled at the time of the call.
1180 */
f2b2f53f 1181static void pmccntr_op_start(CPUARMState *env)
ec7b4ce4 1182{
b2e23725 1183 uint64_t cycles = cycles_get_count(env);
ec7b4ce4 1184
033614c4 1185 if (pmu_counter_enabled(env, 31)) {
5d05b9d4
AL
1186 uint64_t eff_cycles = cycles;
1187 if (env->cp15.c9_pmcr & PMCRD) {
1188 /* Increment once every 64 processor clock cycles */
1189 eff_cycles /= 64;
1190 }
1191
f4efb4b2
AL
1192 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1193
1194 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1195 1ull << 63 : 1ull << 31;
1196 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
76e25d41 1197 env->cp15.c9_pmovsr |= (1ULL << 31);
f4efb4b2
AL
1198 pmu_update_irq(env);
1199 }
1200
1201 env->cp15.c15_ccnt = new_pmccntr;
ec7b4ce4 1202 }
5d05b9d4
AL
1203 env->cp15.c15_ccnt_delta = cycles;
1204}
ec7b4ce4 1205
5d05b9d4
AL
1206/*
1207 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1208 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1209 * pmccntr_op_start.
1210 */
f2b2f53f 1211static void pmccntr_op_finish(CPUARMState *env)
5d05b9d4 1212{
033614c4 1213 if (pmu_counter_enabled(env, 31)) {
4e7beb0c
AL
1214#ifndef CONFIG_USER_ONLY
1215 /* Calculate when the counter will next overflow */
1216 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1217 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1218 remaining_cycles = (uint32_t)remaining_cycles;
1219 }
1220 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1221
1222 if (overflow_in > 0) {
1223 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1224 overflow_in;
2fc0cc0e 1225 ARMCPU *cpu = env_archcpu(env);
4e7beb0c
AL
1226 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1227 }
1228#endif
5d05b9d4 1229
4e7beb0c 1230 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
5d05b9d4
AL
1231 if (env->cp15.c9_pmcr & PMCRD) {
1232 /* Increment once every 64 processor clock cycles */
1233 prev_cycles /= 64;
1234 }
5d05b9d4 1235 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
ec7b4ce4
AF
1236 }
1237}
1238
5ecdd3e4
AL
1239static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1240{
1241
1242 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1243 uint64_t count = 0;
1244 if (event_supported(event)) {
1245 uint16_t event_idx = supported_event_map[event];
1246 count = pm_events[event_idx].get_count(env);
1247 }
1248
1249 if (pmu_counter_enabled(env, counter)) {
f4efb4b2
AL
1250 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1251
1252 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1253 env->cp15.c9_pmovsr |= (1 << counter);
1254 pmu_update_irq(env);
1255 }
1256 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
5ecdd3e4
AL
1257 }
1258 env->cp15.c14_pmevcntr_delta[counter] = count;
1259}
1260
1261static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1262{
1263 if (pmu_counter_enabled(env, counter)) {
4e7beb0c
AL
1264#ifndef CONFIG_USER_ONLY
1265 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1266 uint16_t event_idx = supported_event_map[event];
1267 uint64_t delta = UINT32_MAX -
1268 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1269 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1270
1271 if (overflow_in > 0) {
1272 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1273 overflow_in;
2fc0cc0e 1274 ARMCPU *cpu = env_archcpu(env);
4e7beb0c
AL
1275 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1276 }
1277#endif
1278
5ecdd3e4
AL
1279 env->cp15.c14_pmevcntr_delta[counter] -=
1280 env->cp15.c14_pmevcntr[counter];
1281 }
1282}
1283
5d05b9d4
AL
1284void pmu_op_start(CPUARMState *env)
1285{
5ecdd3e4 1286 unsigned int i;
5d05b9d4 1287 pmccntr_op_start(env);
5ecdd3e4
AL
1288 for (i = 0; i < pmu_num_counters(env); i++) {
1289 pmevcntr_op_start(env, i);
1290 }
5d05b9d4
AL
1291}
1292
1293void pmu_op_finish(CPUARMState *env)
1294{
5ecdd3e4 1295 unsigned int i;
5d05b9d4 1296 pmccntr_op_finish(env);
5ecdd3e4
AL
1297 for (i = 0; i < pmu_num_counters(env); i++) {
1298 pmevcntr_op_finish(env, i);
1299 }
5d05b9d4
AL
1300}
1301
033614c4
AL
1302void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1303{
1304 pmu_op_start(&cpu->env);
1305}
1306
1307void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1308{
1309 pmu_op_finish(&cpu->env);
1310}
1311
4e7beb0c
AL
1312void arm_pmu_timer_cb(void *opaque)
1313{
1314 ARMCPU *cpu = opaque;
1315
1316 /*
1317 * Update all the counter values based on the current underlying counts,
1318 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1319 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1320 * counter may expire.
1321 */
1322 pmu_op_start(&cpu->env);
1323 pmu_op_finish(&cpu->env);
1324}
1325
c4241c7d
PM
1326static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1327 uint64_t value)
200ac0ef 1328{
5d05b9d4 1329 pmu_op_start(env);
7c2cb42b
AF
1330
1331 if (value & PMCRC) {
1332 /* The counter has been reset */
1333 env->cp15.c15_ccnt = 0;
1334 }
1335
5ecdd3e4
AL
1336 if (value & PMCRP) {
1337 unsigned int i;
1338 for (i = 0; i < pmu_num_counters(env); i++) {
1339 env->cp15.c14_pmevcntr[i] = 0;
1340 }
1341 }
1342
9323e79f
PM
1343 env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1344 env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
7c2cb42b 1345
5d05b9d4 1346 pmu_op_finish(env);
7c2cb42b
AF
1347}
1348
0d4bfd7d
AL
1349static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1350 uint64_t value)
1351{
1352 unsigned int i;
1353 for (i = 0; i < pmu_num_counters(env); i++) {
1354 /* Increment a counter's count iff: */
1355 if ((value & (1 << i)) && /* counter's bit is set */
1356 /* counter is enabled and not filtered */
1357 pmu_counter_enabled(env, i) &&
1358 /* counter is SW_INCR */
1359 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1360 pmevcntr_op_start(env, i);
f4efb4b2
AL
1361
1362 /*
1363 * Detect if this write causes an overflow since we can't predict
1364 * PMSWINC overflows like we can for other events
1365 */
1366 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1367
1368 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1369 env->cp15.c9_pmovsr |= (1 << i);
1370 pmu_update_irq(env);
1371 }
1372
1373 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1374
0d4bfd7d
AL
1375 pmevcntr_op_finish(env, i);
1376 }
1377 }
1378}
1379
7c2cb42b
AF
1380static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1381{
5d05b9d4
AL
1382 uint64_t ret;
1383 pmccntr_op_start(env);
1384 ret = env->cp15.c15_ccnt;
1385 pmccntr_op_finish(env);
1386 return ret;
7c2cb42b
AF
1387}
1388
6b040780
WH
1389static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1390 uint64_t value)
1391{
1392 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1393 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1394 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1395 * accessed.
1396 */
1397 env->cp15.c9_pmselr = value & 0x1f;
1398}
1399
7c2cb42b
AF
1400static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1401 uint64_t value)
1402{
5d05b9d4
AL
1403 pmccntr_op_start(env);
1404 env->cp15.c15_ccnt = value;
1405 pmccntr_op_finish(env);
200ac0ef 1406}
421c7ebd
PC
1407
1408static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1409 uint64_t value)
1410{
1411 uint64_t cur_val = pmccntr_read(env, NULL);
1412
1413 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1414}
1415
0614601c
AF
1416static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1417 uint64_t value)
1418{
5d05b9d4 1419 pmccntr_op_start(env);
4b8afa1f
AL
1420 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1421 pmccntr_op_finish(env);
1422}
1423
1424static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1425 uint64_t value)
1426{
1427 pmccntr_op_start(env);
1428 /* M is not accessible from AArch32 */
1429 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1430 (value & PMCCFILTR);
5d05b9d4 1431 pmccntr_op_finish(env);
0614601c
AF
1432}
1433
4b8afa1f
AL
1434static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1435{
1436 /* M is not visible in AArch32 */
1437 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1438}
1439
c4241c7d 1440static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
200ac0ef
PM
1441 uint64_t value)
1442{
01765386 1443 pmu_op_start(env);
7ece99b1 1444 value &= pmu_counter_mask(env);
200ac0ef 1445 env->cp15.c9_pmcnten |= value;
01765386 1446 pmu_op_finish(env);
200ac0ef
PM
1447}
1448
c4241c7d
PM
1449static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1450 uint64_t value)
200ac0ef 1451{
01765386 1452 pmu_op_start(env);
7ece99b1 1453 value &= pmu_counter_mask(env);
200ac0ef 1454 env->cp15.c9_pmcnten &= ~value;
01765386 1455 pmu_op_finish(env);
200ac0ef
PM
1456}
1457
c4241c7d
PM
1458static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1459 uint64_t value)
200ac0ef 1460{
599b71e2 1461 value &= pmu_counter_mask(env);
200ac0ef 1462 env->cp15.c9_pmovsr &= ~value;
f4efb4b2 1463 pmu_update_irq(env);
200ac0ef
PM
1464}
1465
327dd510
AL
1466static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1467 uint64_t value)
1468{
1469 value &= pmu_counter_mask(env);
1470 env->cp15.c9_pmovsr |= value;
f4efb4b2 1471 pmu_update_irq(env);
327dd510
AL
1472}
1473
5ecdd3e4
AL
1474static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1475 uint64_t value, const uint8_t counter)
200ac0ef 1476{
5ecdd3e4
AL
1477 if (counter == 31) {
1478 pmccfiltr_write(env, ri, value);
1479 } else if (counter < pmu_num_counters(env)) {
1480 pmevcntr_op_start(env, counter);
1481
1482 /*
1483 * If this counter's event type is changing, store the current
1484 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1485 * pmevcntr_op_finish has the correct baseline when it converts back to
1486 * a delta.
1487 */
1488 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1489 PMXEVTYPER_EVTCOUNT;
1490 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1491 if (old_event != new_event) {
1492 uint64_t count = 0;
1493 if (event_supported(new_event)) {
1494 uint16_t event_idx = supported_event_map[new_event];
1495 count = pm_events[event_idx].get_count(env);
1496 }
1497 env->cp15.c14_pmevcntr_delta[counter] = count;
1498 }
1499
1500 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1501 pmevcntr_op_finish(env, counter);
1502 }
fdb86656
WH
1503 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1504 * PMSELR value is equal to or greater than the number of implemented
1505 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1506 */
5ecdd3e4
AL
1507}
1508
1509static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1510 const uint8_t counter)
1511{
1512 if (counter == 31) {
1513 return env->cp15.pmccfiltr_el0;
1514 } else if (counter < pmu_num_counters(env)) {
1515 return env->cp15.c14_pmevtyper[counter];
1516 } else {
1517 /*
1518 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1519 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1520 */
1521 return 0;
1522 }
1523}
1524
1525static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1526 uint64_t value)
1527{
1528 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1529 pmevtyper_write(env, ri, value, counter);
1530}
1531
1532static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1533 uint64_t value)
1534{
1535 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1536 env->cp15.c14_pmevtyper[counter] = value;
1537
1538 /*
1539 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1540 * pmu_op_finish calls when loading saved state for a migration. Because
1541 * we're potentially updating the type of event here, the value written to
1542 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1543 * different counter type. Therefore, we need to set this value to the
1544 * current count for the counter type we're writing so that pmu_op_finish
1545 * has the correct count for its calculation.
1546 */
1547 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1548 if (event_supported(event)) {
1549 uint16_t event_idx = supported_event_map[event];
1550 env->cp15.c14_pmevcntr_delta[counter] =
1551 pm_events[event_idx].get_count(env);
fdb86656
WH
1552 }
1553}
1554
5ecdd3e4
AL
1555static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1556{
1557 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1558 return pmevtyper_read(env, ri, counter);
1559}
1560
1561static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1562 uint64_t value)
1563{
1564 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1565}
1566
fdb86656
WH
1567static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1568{
5ecdd3e4
AL
1569 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1570}
1571
1572static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1573 uint64_t value, uint8_t counter)
1574{
1575 if (counter < pmu_num_counters(env)) {
1576 pmevcntr_op_start(env, counter);
1577 env->cp15.c14_pmevcntr[counter] = value;
1578 pmevcntr_op_finish(env, counter);
1579 }
1580 /*
1581 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1582 * are CONSTRAINED UNPREDICTABLE.
fdb86656 1583 */
5ecdd3e4
AL
1584}
1585
1586static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1587 uint8_t counter)
1588{
1589 if (counter < pmu_num_counters(env)) {
1590 uint64_t ret;
1591 pmevcntr_op_start(env, counter);
1592 ret = env->cp15.c14_pmevcntr[counter];
1593 pmevcntr_op_finish(env, counter);
1594 return ret;
fdb86656 1595 } else {
5ecdd3e4
AL
1596 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1597 * are CONSTRAINED UNPREDICTABLE. */
fdb86656
WH
1598 return 0;
1599 }
200ac0ef
PM
1600}
1601
5ecdd3e4
AL
1602static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1603 uint64_t value)
1604{
1605 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1606 pmevcntr_write(env, ri, value, counter);
1607}
1608
1609static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1610{
1611 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1612 return pmevcntr_read(env, ri, counter);
1613}
1614
1615static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1616 uint64_t value)
1617{
1618 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1619 assert(counter < pmu_num_counters(env));
1620 env->cp15.c14_pmevcntr[counter] = value;
1621 pmevcntr_write(env, ri, value, counter);
1622}
1623
1624static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1625{
1626 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1627 assert(counter < pmu_num_counters(env));
1628 return env->cp15.c14_pmevcntr[counter];
1629}
1630
1631static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1632 uint64_t value)
1633{
1634 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1635}
1636
1637static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1638{
1639 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1640}
1641
c4241c7d 1642static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
200ac0ef
PM
1643 uint64_t value)
1644{
6ecd0b6b
AB
1645 if (arm_feature(env, ARM_FEATURE_V8)) {
1646 env->cp15.c9_pmuserenr = value & 0xf;
1647 } else {
1648 env->cp15.c9_pmuserenr = value & 1;
1649 }
200ac0ef
PM
1650}
1651
c4241c7d
PM
1652static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1653 uint64_t value)
200ac0ef
PM
1654{
1655 /* We have no event counters so only the C bit can be changed */
7ece99b1 1656 value &= pmu_counter_mask(env);
200ac0ef 1657 env->cp15.c9_pminten |= value;
f4efb4b2 1658 pmu_update_irq(env);
200ac0ef
PM
1659}
1660
c4241c7d
PM
1661static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1662 uint64_t value)
200ac0ef 1663{
7ece99b1 1664 value &= pmu_counter_mask(env);
200ac0ef 1665 env->cp15.c9_pminten &= ~value;
f4efb4b2 1666 pmu_update_irq(env);
200ac0ef
PM
1667}
1668
c4241c7d
PM
1669static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1670 uint64_t value)
8641136c 1671{
a505d7fe
PM
1672 /* Note that even though the AArch64 view of this register has bits
1673 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1674 * architectural requirements for bits which are RES0 only in some
1675 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1676 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1677 */
855ea66d 1678 raw_write(env, ri, value & ~0x1FULL);
8641136c
NR
1679}
1680
64e0e2de
EI
1681static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1682{
ea22747c
RH
1683 /* Begin with base v8.0 state. */
1684 uint32_t valid_mask = 0x3fff;
2fc0cc0e 1685 ARMCPU *cpu = env_archcpu(env);
ea22747c 1686
bfe43e3d
RH
1687 /*
1688 * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
1689 * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
1690 * Instead, choose the format based on the mode of EL3.
1691 */
1692 if (arm_el_is_aa64(env, 3)) {
1693 value |= SCR_FW | SCR_AW; /* RES1 */
1694 valid_mask &= ~SCR_NET; /* RES0 */
252e8c69 1695
6bcbb07a
RH
1696 if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
1697 !cpu_isar_feature(aa64_aa32_el2, cpu)) {
1698 value |= SCR_RW; /* RAO/WI */
1699 }
da3d8b13
RH
1700 if (cpu_isar_feature(aa64_ras, cpu)) {
1701 valid_mask |= SCR_TERR;
1702 }
252e8c69
RH
1703 if (cpu_isar_feature(aa64_lor, cpu)) {
1704 valid_mask |= SCR_TLOR;
1705 }
1706 if (cpu_isar_feature(aa64_pauth, cpu)) {
1707 valid_mask |= SCR_API | SCR_APK;
1708 }
926c1b97
RDC
1709 if (cpu_isar_feature(aa64_sel2, cpu)) {
1710 valid_mask |= SCR_EEL2;
1711 }
8ddb300b
RH
1712 if (cpu_isar_feature(aa64_mte, cpu)) {
1713 valid_mask |= SCR_ATA;
1714 }
7cb1e618
RH
1715 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
1716 valid_mask |= SCR_ENSCXT;
1717 }
7ac61020
PM
1718 if (cpu_isar_feature(aa64_doublefault, cpu)) {
1719 valid_mask |= SCR_EASE | SCR_NMEA;
1720 }
ea22747c
RH
1721 } else {
1722 valid_mask &= ~(SCR_RW | SCR_ST);
da3d8b13
RH
1723 if (cpu_isar_feature(aa32_ras, cpu)) {
1724 valid_mask |= SCR_TERR;
1725 }
ea22747c 1726 }
64e0e2de
EI
1727
1728 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1729 valid_mask &= ~SCR_HCE;
1730
1731 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1732 * supported if EL2 exists. The bit is UNK/SBZP when
1733 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1734 * when EL2 is unavailable.
4eb27640 1735 * On ARMv8, this bit is always available.
64e0e2de 1736 */
4eb27640
GB
1737 if (arm_feature(env, ARM_FEATURE_V7) &&
1738 !arm_feature(env, ARM_FEATURE_V8)) {
64e0e2de
EI
1739 valid_mask &= ~SCR_SMD;
1740 }
1741 }
1742
1743 /* Clear all-context RES0 bits. */
1744 value &= valid_mask;
1745 raw_write(env, ri, value);
1746}
1747
10d0ef3e
MN
1748static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1749{
1750 /*
1751 * scr_write will set the RES1 bits on an AArch64-only CPU.
1752 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1753 */
1754 scr_write(env, ri, 0);
1755}
1756
630fcd4d
MZ
1757static CPAccessResult access_aa64_tid2(CPUARMState *env,
1758 const ARMCPRegInfo *ri,
1759 bool isread)
1760{
1761 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
1762 return CP_ACCESS_TRAP_EL2;
1763 }
1764
1765 return CP_ACCESS_OK;
1766}
1767
c4241c7d 1768static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
776d4e5c 1769{
2fc0cc0e 1770 ARMCPU *cpu = env_archcpu(env);
b85a1fd6
FA
1771
1772 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1773 * bank
1774 */
1775 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1776 ri->secure & ARM_CP_SECSTATE_S);
1777
1778 return cpu->ccsidr[index];
776d4e5c
PM
1779}
1780
c4241c7d
PM
1781static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1782 uint64_t value)
776d4e5c 1783{
8d5c773e 1784 raw_write(env, ri, value & 0xf);
776d4e5c
PM
1785}
1786
1090b9c6
PM
1787static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1788{
29a0af61 1789 CPUState *cs = env_cpu(env);
cc974d5c
RDC
1790 bool el1 = arm_current_el(env) == 1;
1791 uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
1090b9c6
PM
1792 uint64_t ret = 0;
1793
cc974d5c 1794 if (hcr_el2 & HCR_IMO) {
636540e9
PM
1795 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1796 ret |= CPSR_I;
1797 }
1798 } else {
1799 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1800 ret |= CPSR_I;
1801 }
1090b9c6 1802 }
636540e9 1803
cc974d5c 1804 if (hcr_el2 & HCR_FMO) {
636540e9
PM
1805 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1806 ret |= CPSR_F;
1807 }
1808 } else {
1809 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1810 ret |= CPSR_F;
1811 }
1090b9c6 1812 }
636540e9 1813
3c29632f
RH
1814 if (hcr_el2 & HCR_AMO) {
1815 if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
1816 ret |= CPSR_A;
1817 }
1818 }
1819
1090b9c6
PM
1820 return ret;
1821}
1822
93fbc983
MZ
1823static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1824 bool isread)
1825{
1826 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
1827 return CP_ACCESS_TRAP_EL2;
1828 }
1829
1830 return CP_ACCESS_OK;
1831}
1832
1833static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1834 bool isread)
1835{
1836 if (arm_feature(env, ARM_FEATURE_V8)) {
1837 return access_aa64_tid1(env, ri, isread);
1838 }
1839
1840 return CP_ACCESS_OK;
1841}
1842
e9aa6c21 1843static const ARMCPRegInfo v7_cp_reginfo[] = {
7d57f408
PM
1844 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1845 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1846 .access = PL1_W, .type = ARM_CP_NOP },
200ac0ef
PM
1847 /* Performance monitors are implementation defined in v7,
1848 * but with an ARM recommended set of registers, which we
ac689a2e 1849 * follow.
200ac0ef
PM
1850 *
1851 * Performance registers fall into three categories:
1852 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1853 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1854 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1855 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1856 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1857 */
1858 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
7a0e58fa 1859 .access = PL0_RW, .type = ARM_CP_ALIAS,
8521466b 1860 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
fcd25206
PM
1861 .writefn = pmcntenset_write,
1862 .accessfn = pmreg_access,
1863 .raw_writefn = raw_write },
8521466b
AF
1864 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1865 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1866 .access = PL0_RW, .accessfn = pmreg_access,
1867 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1868 .writefn = pmcntenset_write, .raw_writefn = raw_write },
200ac0ef 1869 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
8521466b
AF
1870 .access = PL0_RW,
1871 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
fcd25206
PM
1872 .accessfn = pmreg_access,
1873 .writefn = pmcntenclr_write,
7a0e58fa 1874 .type = ARM_CP_ALIAS },
8521466b
AF
1875 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1876 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1877 .access = PL0_RW, .accessfn = pmreg_access,
7a0e58fa 1878 .type = ARM_CP_ALIAS,
8521466b
AF
1879 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1880 .writefn = pmcntenclr_write },
200ac0ef 1881 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
f4efb4b2 1882 .access = PL0_RW, .type = ARM_CP_IO,
e4e91a21 1883 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
fcd25206
PM
1884 .accessfn = pmreg_access,
1885 .writefn = pmovsr_write,
1886 .raw_writefn = raw_write },
978364f1
AF
1887 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1888 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1889 .access = PL0_RW, .accessfn = pmreg_access,
f4efb4b2 1890 .type = ARM_CP_ALIAS | ARM_CP_IO,
978364f1
AF
1891 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1892 .writefn = pmovsr_write,
1893 .raw_writefn = raw_write },
200ac0ef 1894 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
f4efb4b2
AL
1895 .access = PL0_W, .accessfn = pmreg_access_swinc,
1896 .type = ARM_CP_NO_RAW | ARM_CP_IO,
0d4bfd7d
AL
1897 .writefn = pmswinc_write },
1898 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
1899 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
f4efb4b2
AL
1900 .access = PL0_W, .accessfn = pmreg_access_swinc,
1901 .type = ARM_CP_NO_RAW | ARM_CP_IO,
0d4bfd7d 1902 .writefn = pmswinc_write },
6b040780
WH
1903 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1904 .access = PL0_RW, .type = ARM_CP_ALIAS,
1905 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
6ecd0b6b 1906 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
6b040780
WH
1907 .raw_writefn = raw_write},
1908 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1909 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
6ecd0b6b 1910 .access = PL0_RW, .accessfn = pmreg_access_selr,
6b040780
WH
1911 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1912 .writefn = pmselr_write, .raw_writefn = raw_write, },
200ac0ef 1913 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
169c8938 1914 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
421c7ebd 1915 .readfn = pmccntr_read, .writefn = pmccntr_write32,
6ecd0b6b 1916 .accessfn = pmreg_access_ccntr },
8521466b
AF
1917 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1918 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
6ecd0b6b 1919 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
8521466b 1920 .type = ARM_CP_IO,
980ebe87
AL
1921 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
1922 .readfn = pmccntr_read, .writefn = pmccntr_write,
1923 .raw_readfn = raw_read, .raw_writefn = raw_write, },
4b8afa1f
AL
1924 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
1925 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
1926 .access = PL0_RW, .accessfn = pmreg_access,
1927 .type = ARM_CP_ALIAS | ARM_CP_IO,
1928 .resetvalue = 0, },
8521466b
AF
1929 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1930 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
980ebe87 1931 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
8521466b
AF
1932 .access = PL0_RW, .accessfn = pmreg_access,
1933 .type = ARM_CP_IO,
1934 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1935 .resetvalue = 0, },
200ac0ef 1936 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
5ecdd3e4
AL
1937 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1938 .accessfn = pmreg_access,
fdb86656
WH
1939 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1940 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1941 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
5ecdd3e4
AL
1942 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1943 .accessfn = pmreg_access,
fdb86656 1944 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
200ac0ef 1945 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
5ecdd3e4
AL
1946 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1947 .accessfn = pmreg_access_xevcntr,
1948 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
1949 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
1950 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
1951 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1952 .accessfn = pmreg_access_xevcntr,
1953 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
200ac0ef 1954 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1fce1ba9 1955 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
e4e91a21 1956 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
200ac0ef 1957 .resetvalue = 0,
d4e6df63 1958 .writefn = pmuserenr_write, .raw_writefn = raw_write },
8a83ffc2
AF
1959 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1960 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1fce1ba9 1961 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
8a83ffc2
AF
1962 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1963 .resetvalue = 0,
1964 .writefn = pmuserenr_write, .raw_writefn = raw_write },
200ac0ef 1965 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1fce1ba9 1966 .access = PL1_RW, .accessfn = access_tpm,
b7d793ad 1967 .type = ARM_CP_ALIAS | ARM_CP_IO,
e6ec5457 1968 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
200ac0ef 1969 .resetvalue = 0,
d4e6df63 1970 .writefn = pmintenset_write, .raw_writefn = raw_write },
e6ec5457
WH
1971 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1972 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1973 .access = PL1_RW, .accessfn = access_tpm,
1974 .type = ARM_CP_IO,
1975 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1976 .writefn = pmintenset_write, .raw_writefn = raw_write,
1977 .resetvalue = 0x0 },
200ac0ef 1978 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
fc5f6856 1979 .access = PL1_RW, .accessfn = access_tpm,
887c0f15 1980 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
200ac0ef 1981 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
b061a82b 1982 .writefn = pmintenclr_write, },
978364f1
AF
1983 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1984 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
fc5f6856 1985 .access = PL1_RW, .accessfn = access_tpm,
887c0f15 1986 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
978364f1
AF
1987 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1988 .writefn = pmintenclr_write },
7da845b0
PM
1989 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1990 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
630fcd4d
MZ
1991 .access = PL1_R,
1992 .accessfn = access_aa64_tid2,
1993 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
7da845b0
PM
1994 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1995 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
630fcd4d
MZ
1996 .access = PL1_RW,
1997 .accessfn = access_aa64_tid2,
1998 .writefn = csselr_write, .resetvalue = 0,
b85a1fd6
FA
1999 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2000 offsetof(CPUARMState, cp15.csselr_ns) } },
776d4e5c
PM
2001 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2002 * just RAZ for all cores:
2003 */
0ff644a7
PM
2004 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2005 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
93fbc983
MZ
2006 .access = PL1_R, .type = ARM_CP_CONST,
2007 .accessfn = access_aa64_tid1,
2008 .resetvalue = 0 },
f32cdad5
PM
2009 /* Auxiliary fault status registers: these also are IMPDEF, and we
2010 * choose to RAZ/WI for all cores.
2011 */
2012 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2013 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
84929218
RH
2014 .access = PL1_RW, .accessfn = access_tvm_trvm,
2015 .type = ARM_CP_CONST, .resetvalue = 0 },
f32cdad5
PM
2016 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2017 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
84929218
RH
2018 .access = PL1_RW, .accessfn = access_tvm_trvm,
2019 .type = ARM_CP_CONST, .resetvalue = 0 },
b0fe2427
PM
2020 /* MAIR can just read-as-written because we don't implement caches
2021 * and so don't need to care about memory attributes.
2022 */
2023 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2024 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
84929218
RH
2025 .access = PL1_RW, .accessfn = access_tvm_trvm,
2026 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
b0fe2427 2027 .resetvalue = 0 },
4cfb8ad8
PM
2028 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2029 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2030 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2031 .resetvalue = 0 },
b0fe2427
PM
2032 /* For non-long-descriptor page tables these are PRRR and NMRR;
2033 * regardless they still act as reads-as-written for QEMU.
b0fe2427 2034 */
1281f8e3 2035 /* MAIR0/1 are defined separately from their 64-bit counterpart which
be693c87
GB
2036 * allows them to assign the correct fieldoffset based on the endianness
2037 * handled in the field definitions.
2038 */
a903c449 2039 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
84929218
RH
2040 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2041 .access = PL1_RW, .accessfn = access_tvm_trvm,
be693c87
GB
2042 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2043 offsetof(CPUARMState, cp15.mair0_ns) },
b0fe2427 2044 .resetfn = arm_cp_reset_ignore },
a903c449 2045 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
84929218
RH
2046 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2047 .access = PL1_RW, .accessfn = access_tvm_trvm,
be693c87
GB
2048 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2049 offsetof(CPUARMState, cp15.mair1_ns) },
b0fe2427 2050 .resetfn = arm_cp_reset_ignore },
1090b9c6
PM
2051 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2052 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
7a0e58fa 2053 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
995939a6
PM
2054 /* 32 bit ITLB invalidates */
2055 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
30881b73
RH
2056 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2057 .writefn = tlbiall_write },
995939a6 2058 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
30881b73
RH
2059 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2060 .writefn = tlbimva_write },
995939a6 2061 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
30881b73
RH
2062 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2063 .writefn = tlbiasid_write },
995939a6
PM
2064 /* 32 bit DTLB invalidates */
2065 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
30881b73
RH
2066 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2067 .writefn = tlbiall_write },
995939a6 2068 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
30881b73
RH
2069 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2070 .writefn = tlbimva_write },
995939a6 2071 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
30881b73
RH
2072 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2073 .writefn = tlbiasid_write },
995939a6
PM
2074 /* 32 bit TLB invalidates */
2075 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
30881b73
RH
2076 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2077 .writefn = tlbiall_write },
995939a6 2078 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
30881b73
RH
2079 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2080 .writefn = tlbimva_write },
995939a6 2081 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
30881b73
RH
2082 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2083 .writefn = tlbiasid_write },
995939a6 2084 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
30881b73
RH
2085 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2086 .writefn = tlbimvaa_write },
995939a6
PM
2087};
2088
2089static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2090 /* 32 bit TLB invalidates, Inner Shareable */
2091 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
30881b73
RH
2092 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2093 .writefn = tlbiall_is_write },
995939a6 2094 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
30881b73
RH
2095 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2096 .writefn = tlbimva_is_write },
995939a6 2097 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
30881b73 2098 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
fa439fc5 2099 .writefn = tlbiasid_is_write },
995939a6 2100 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
30881b73 2101 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
fa439fc5 2102 .writefn = tlbimvaa_is_write },
e9aa6c21
PM
2103};
2104
327dd510
AL
2105static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2106 /* PMOVSSET is not implemented in v7 before v7ve */
2107 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2108 .access = PL0_RW, .accessfn = pmreg_access,
f4efb4b2 2109 .type = ARM_CP_ALIAS | ARM_CP_IO,
327dd510
AL
2110 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2111 .writefn = pmovsset_write,
2112 .raw_writefn = raw_write },
2113 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2114 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2115 .access = PL0_RW, .accessfn = pmreg_access,
f4efb4b2 2116 .type = ARM_CP_ALIAS | ARM_CP_IO,
327dd510
AL
2117 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2118 .writefn = pmovsset_write,
2119 .raw_writefn = raw_write },
327dd510
AL
2120};
2121
c4241c7d
PM
2122static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2123 uint64_t value)
c326b979
PM
2124{
2125 value &= 1;
2126 env->teecr = value;
c326b979
PM
2127}
2128
cc7613bf
PM
2129static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2130 bool isread)
2131{
2132 /*
2133 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2134 * at all, so we don't need to check whether we're v8A.
2135 */
2136 if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
2137 (env->cp15.hstr_el2 & HSTR_TTEE)) {
2138 return CP_ACCESS_TRAP_EL2;
2139 }
2140 return CP_ACCESS_OK;
2141}
2142
3f208fd7
PM
2143static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2144 bool isread)
c326b979 2145{
dcbff19b 2146 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
92611c00 2147 return CP_ACCESS_TRAP;
c326b979 2148 }
cc7613bf 2149 return teecr_access(env, ri, isread);
c326b979
PM
2150}
2151
2152static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2153 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2154 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2155 .resetvalue = 0,
cc7613bf 2156 .writefn = teecr_write, .accessfn = teecr_access },
c326b979
PM
2157 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2158 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
92611c00 2159 .accessfn = teehbr_access, .resetvalue = 0 },
c326b979
PM
2160};
2161
4d31c596 2162static const ARMCPRegInfo v6k_cp_reginfo[] = {
e4fe830b
PM
2163 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2164 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2165 .access = PL0_RW,
54bf36ed 2166 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
4d31c596
PM
2167 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2168 .access = PL0_RW,
54bf36ed
FA
2169 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2170 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
e4fe830b
PM
2171 .resetfn = arm_cp_reset_ignore },
2172 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2173 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2174 .access = PL0_R|PL1_W,
54bf36ed
FA
2175 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2176 .resetvalue = 0},
4d31c596
PM
2177 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2178 .access = PL0_R|PL1_W,
54bf36ed
FA
2179 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2180 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
e4fe830b 2181 .resetfn = arm_cp_reset_ignore },
54bf36ed 2182 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
e4fe830b 2183 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
4d31c596 2184 .access = PL1_RW,
54bf36ed
FA
2185 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2186 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2187 .access = PL1_RW,
2188 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2189 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2190 .resetvalue = 0 },
4d31c596
PM
2191};
2192
55d284af
PM
2193#ifndef CONFIG_USER_ONLY
2194
3f208fd7
PM
2195static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2196 bool isread)
00108f2d 2197{
75502672
PM
2198 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2199 * Writable only at the highest implemented exception level.
2200 */
2201 int el = arm_current_el(env);
5bc84371
RH
2202 uint64_t hcr;
2203 uint32_t cntkctl;
75502672
PM
2204
2205 switch (el) {
2206 case 0:
5bc84371
RH
2207 hcr = arm_hcr_el2_eff(env);
2208 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2209 cntkctl = env->cp15.cnthctl_el2;
2210 } else {
2211 cntkctl = env->cp15.c14_cntkctl;
2212 }
2213 if (!extract32(cntkctl, 0, 2)) {
75502672
PM
2214 return CP_ACCESS_TRAP;
2215 }
2216 break;
2217 case 1:
2218 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2219 arm_is_secure_below_el3(env)) {
2220 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2221 return CP_ACCESS_TRAP_UNCATEGORIZED;
2222 }
2223 break;
2224 case 2:
2225 case 3:
2226 break;
00108f2d 2227 }
75502672
PM
2228
2229 if (!isread && el < arm_highest_el(env)) {
2230 return CP_ACCESS_TRAP_UNCATEGORIZED;
2231 }
2232
00108f2d
PM
2233 return CP_ACCESS_OK;
2234}
2235
3f208fd7
PM
2236static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2237 bool isread)
00108f2d 2238{
0b6440af 2239 unsigned int cur_el = arm_current_el(env);
e6ef0169 2240 bool has_el2 = arm_is_el2_enabled(env);
5bc84371 2241 uint64_t hcr = arm_hcr_el2_eff(env);
0b6440af 2242
5bc84371
RH
2243 switch (cur_el) {
2244 case 0:
2245 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2246 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2247 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2248 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2249 }
0b6440af 2250
5bc84371
RH
2251 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2252 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2253 return CP_ACCESS_TRAP;
2254 }
2255
2256 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2257 if (hcr & HCR_E2H) {
2258 if (timeridx == GTIMER_PHYS &&
2259 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2260 return CP_ACCESS_TRAP_EL2;
2261 }
2262 } else {
2263 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
e6ef0169 2264 if (has_el2 && timeridx == GTIMER_PHYS &&
5bc84371
RH
2265 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2266 return CP_ACCESS_TRAP_EL2;
2267 }
2268 }
2269 break;
2270
2271 case 1:
2272 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
e6ef0169 2273 if (has_el2 && timeridx == GTIMER_PHYS &&
5bc84371
RH
2274 (hcr & HCR_E2H
2275 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2276 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2277 return CP_ACCESS_TRAP_EL2;
2278 }
2279 break;
0b6440af 2280 }
00108f2d
PM
2281 return CP_ACCESS_OK;
2282}
2283
3f208fd7
PM
2284static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2285 bool isread)
00108f2d 2286{
0b6440af 2287 unsigned int cur_el = arm_current_el(env);
e6ef0169 2288 bool has_el2 = arm_is_el2_enabled(env);
5bc84371 2289 uint64_t hcr = arm_hcr_el2_eff(env);
0b6440af 2290
5bc84371
RH
2291 switch (cur_el) {
2292 case 0:
2293 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2294 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2295 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2296 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2297 }
0b6440af 2298
5bc84371
RH
2299 /*
2300 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2301 * EL0 if EL0[PV]TEN is zero.
2302 */
2303 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2304 return CP_ACCESS_TRAP;
2305 }
2306 /* fall through */
2307
2308 case 1:
e6ef0169 2309 if (has_el2 && timeridx == GTIMER_PHYS) {
5bc84371
RH
2310 if (hcr & HCR_E2H) {
2311 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2312 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2313 return CP_ACCESS_TRAP_EL2;
2314 }
2315 } else {
2316 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2317 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2318 return CP_ACCESS_TRAP_EL2;
2319 }
2320 }
2321 }
2322 break;
0b6440af 2323 }
00108f2d
PM
2324 return CP_ACCESS_OK;
2325}
2326
2327static CPAccessResult gt_pct_access(CPUARMState *env,
3f208fd7
PM
2328 const ARMCPRegInfo *ri,
2329 bool isread)
00108f2d 2330{
3f208fd7 2331 return gt_counter_access(env, GTIMER_PHYS, isread);
00108f2d
PM
2332}
2333
2334static CPAccessResult gt_vct_access(CPUARMState *env,
3f208fd7
PM
2335 const ARMCPRegInfo *ri,
2336 bool isread)
00108f2d 2337{
3f208fd7 2338 return gt_counter_access(env, GTIMER_VIRT, isread);
00108f2d
PM
2339}
2340
3f208fd7
PM
2341static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2342 bool isread)
00108f2d 2343{
3f208fd7 2344 return gt_timer_access(env, GTIMER_PHYS, isread);
00108f2d
PM
2345}
2346
3f208fd7
PM
2347static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2348 bool isread)
00108f2d 2349{
3f208fd7 2350 return gt_timer_access(env, GTIMER_VIRT, isread);
00108f2d
PM
2351}
2352
b4d3978c 2353static CPAccessResult gt_stimer_access(CPUARMState *env,
3f208fd7
PM
2354 const ARMCPRegInfo *ri,
2355 bool isread)
b4d3978c
PM
2356{
2357 /* The AArch64 register view of the secure physical timer is
2358 * always accessible from EL3, and configurably accessible from
2359 * Secure EL1.
2360 */
2361 switch (arm_current_el(env)) {
2362 case 1:
2363 if (!arm_is_secure(env)) {
2364 return CP_ACCESS_TRAP;
2365 }
2366 if (!(env->cp15.scr_el3 & SCR_ST)) {
2367 return CP_ACCESS_TRAP_EL3;
2368 }
2369 return CP_ACCESS_OK;
2370 case 0:
2371 case 2:
2372 return CP_ACCESS_TRAP;
2373 case 3:
2374 return CP_ACCESS_OK;
2375 default:
2376 g_assert_not_reached();
2377 }
2378}
2379
55d284af
PM
2380static uint64_t gt_get_countervalue(CPUARMState *env)
2381{
7def8754
AJ
2382 ARMCPU *cpu = env_archcpu(env);
2383
2384 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
55d284af
PM
2385}
2386
2387static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2388{
2389 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2390
2391 if (gt->ctl & 1) {
2392 /* Timer enabled: calculate and set current ISTATUS, irq, and
2393 * reset timer to when ISTATUS next has to change
2394 */
edac4d8a
EI
2395 uint64_t offset = timeridx == GTIMER_VIRT ?
2396 cpu->env.cp15.cntvoff_el2 : 0;
55d284af
PM
2397 uint64_t count = gt_get_countervalue(&cpu->env);
2398 /* Note that this must be unsigned 64 bit arithmetic: */
edac4d8a 2399 int istatus = count - offset >= gt->cval;
55d284af 2400 uint64_t nexttick;
194cbc49 2401 int irqstate;
55d284af
PM
2402
2403 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
194cbc49
PM
2404
2405 irqstate = (istatus && !(gt->ctl & 2));
2406 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2407
55d284af
PM
2408 if (istatus) {
2409 /* Next transition is when count rolls back over to zero */
2410 nexttick = UINT64_MAX;
2411 } else {
2412 /* Next transition is when we hit cval */
edac4d8a 2413 nexttick = gt->cval + offset;
55d284af
PM
2414 }
2415 /* Note that the desired next expiry time might be beyond the
2416 * signed-64-bit range of a QEMUTimer -- in this case we just
2417 * set the timer for as far in the future as possible. When the
2418 * timer expires we will reset the timer for any remaining period.
2419 */
7def8754 2420 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
4a0245b6
AJ
2421 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2422 } else {
2423 timer_mod(cpu->gt_timer[timeridx], nexttick);
55d284af 2424 }
194cbc49 2425 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
55d284af
PM
2426 } else {
2427 /* Timer disabled: ISTATUS and timer output always clear */
2428 gt->ctl &= ~4;
2429 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
bc72ad67 2430 timer_del(cpu->gt_timer[timeridx]);
194cbc49 2431 trace_arm_gt_recalc_disabled(timeridx);
55d284af
PM
2432 }
2433}
2434
0e3eca4c
EI
2435static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2436 int timeridx)
55d284af 2437{
2fc0cc0e 2438 ARMCPU *cpu = env_archcpu(env);
55d284af 2439
bc72ad67 2440 timer_del(cpu->gt_timer[timeridx]);
55d284af
PM
2441}
2442
c4241c7d 2443static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
55d284af 2444{
c4241c7d 2445 return gt_get_countervalue(env);
55d284af
PM
2446}
2447
53d1f856
RH
2448static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2449{
2450 uint64_t hcr;
2451
2452 switch (arm_current_el(env)) {
2453 case 2:
2454 hcr = arm_hcr_el2_eff(env);
2455 if (hcr & HCR_E2H) {
2456 return 0;
2457 }
2458 break;
2459 case 0:
2460 hcr = arm_hcr_el2_eff(env);
2461 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2462 return 0;
2463 }
2464 break;
2465 }
2466
2467 return env->cp15.cntvoff_el2;
2468}
2469
edac4d8a
EI
2470static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2471{
53d1f856 2472 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
edac4d8a
EI
2473}
2474
c4241c7d 2475static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
0e3eca4c 2476 int timeridx,
c4241c7d 2477 uint64_t value)
55d284af 2478{
194cbc49 2479 trace_arm_gt_cval_write(timeridx, value);
55d284af 2480 env->cp15.c14_timer[timeridx].cval = value;
2fc0cc0e 2481 gt_recalc_timer(env_archcpu(env), timeridx);
55d284af 2482}
c4241c7d 2483
0e3eca4c
EI
2484static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2485 int timeridx)
55d284af 2486{
53d1f856
RH
2487 uint64_t offset = 0;
2488
2489 switch (timeridx) {
2490 case GTIMER_VIRT:
8c94b071 2491 case GTIMER_HYPVIRT:
53d1f856
RH
2492 offset = gt_virt_cnt_offset(env);
2493 break;
2494 }
55d284af 2495
c4241c7d 2496 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
edac4d8a 2497 (gt_get_countervalue(env) - offset));
55d284af
PM
2498}
2499
c4241c7d 2500static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
0e3eca4c 2501 int timeridx,
c4241c7d 2502 uint64_t value)
55d284af 2503{
53d1f856
RH
2504 uint64_t offset = 0;
2505
2506 switch (timeridx) {
2507 case GTIMER_VIRT:
8c94b071 2508 case GTIMER_HYPVIRT:
53d1f856
RH
2509 offset = gt_virt_cnt_offset(env);
2510 break;
2511 }
55d284af 2512
194cbc49 2513 trace_arm_gt_tval_write(timeridx, value);
edac4d8a 2514 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
18084b2f 2515 sextract64(value, 0, 32);
2fc0cc0e 2516 gt_recalc_timer(env_archcpu(env), timeridx);
55d284af
PM
2517}
2518
c4241c7d 2519static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
0e3eca4c 2520 int timeridx,
c4241c7d 2521 uint64_t value)
55d284af 2522{
2fc0cc0e 2523 ARMCPU *cpu = env_archcpu(env);
55d284af
PM
2524 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2525
194cbc49 2526 trace_arm_gt_ctl_write(timeridx, value);
d3afacc7 2527 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
55d284af
PM
2528 if ((oldval ^ value) & 1) {
2529 /* Enable toggled */
2530 gt_recalc_timer(cpu, timeridx);
d3afacc7 2531 } else if ((oldval ^ value) & 2) {
55d284af
PM
2532 /* IMASK toggled: don't need to recalculate,
2533 * just set the interrupt line based on ISTATUS
2534 */
194cbc49
PM
2535 int irqstate = (oldval & 4) && !(value & 2);
2536
2537 trace_arm_gt_imask_toggle(timeridx, irqstate);
2538 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
55d284af 2539 }
55d284af
PM
2540}
2541
0e3eca4c
EI
2542static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2543{
2544 gt_timer_reset(env, ri, GTIMER_PHYS);
2545}
2546
2547static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2548 uint64_t value)
2549{
2550 gt_cval_write(env, ri, GTIMER_PHYS, value);
2551}
2552
2553static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2554{
2555 return gt_tval_read(env, ri, GTIMER_PHYS);
2556}
2557
2558static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2559 uint64_t value)
2560{
2561 gt_tval_write(env, ri, GTIMER_PHYS, value);
2562}
2563
2564static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2565 uint64_t value)
2566{
2567 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2568}
2569
bb5972e4
RH
2570static int gt_phys_redir_timeridx(CPUARMState *env)
2571{
2572 switch (arm_mmu_idx(env)) {
2573 case ARMMMUIdx_E20_0:
2574 case ARMMMUIdx_E20_2:
452ef8cb 2575 case ARMMMUIdx_E20_2_PAN:
b6ad6062
RDC
2576 case ARMMMUIdx_SE20_0:
2577 case ARMMMUIdx_SE20_2:
2578 case ARMMMUIdx_SE20_2_PAN:
bb5972e4
RH
2579 return GTIMER_HYP;
2580 default:
2581 return GTIMER_PHYS;
2582 }
2583}
2584
2585static int gt_virt_redir_timeridx(CPUARMState *env)
2586{
2587 switch (arm_mmu_idx(env)) {
2588 case ARMMMUIdx_E20_0:
2589 case ARMMMUIdx_E20_2:
452ef8cb 2590 case ARMMMUIdx_E20_2_PAN:
b6ad6062
RDC
2591 case ARMMMUIdx_SE20_0:
2592 case ARMMMUIdx_SE20_2:
2593 case ARMMMUIdx_SE20_2_PAN:
bb5972e4
RH
2594 return GTIMER_HYPVIRT;
2595 default:
2596 return GTIMER_VIRT;
2597 }
2598}
2599
2600static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2601 const ARMCPRegInfo *ri)
2602{
2603 int timeridx = gt_phys_redir_timeridx(env);
2604 return env->cp15.c14_timer[timeridx].cval;
2605}
2606
2607static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2608 uint64_t value)
2609{
2610 int timeridx = gt_phys_redir_timeridx(env);
2611 gt_cval_write(env, ri, timeridx, value);
2612}
2613
2614static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2615 const ARMCPRegInfo *ri)
2616{
2617 int timeridx = gt_phys_redir_timeridx(env);
2618 return gt_tval_read(env, ri, timeridx);
2619}
2620
2621static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2622 uint64_t value)
2623{
2624 int timeridx = gt_phys_redir_timeridx(env);
2625 gt_tval_write(env, ri, timeridx, value);
2626}
2627
2628static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2629 const ARMCPRegInfo *ri)
2630{
2631 int timeridx = gt_phys_redir_timeridx(env);
2632 return env->cp15.c14_timer[timeridx].ctl;
2633}
2634
2635static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2636 uint64_t value)
2637{
2638 int timeridx = gt_phys_redir_timeridx(env);
2639 gt_ctl_write(env, ri, timeridx, value);
2640}
2641
0e3eca4c
EI
2642static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2643{
2644 gt_timer_reset(env, ri, GTIMER_VIRT);
2645}
2646
2647static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2648 uint64_t value)
2649{
2650 gt_cval_write(env, ri, GTIMER_VIRT, value);
2651}
2652
2653static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2654{
2655 return gt_tval_read(env, ri, GTIMER_VIRT);
2656}
2657
2658static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2659 uint64_t value)
2660{
2661 gt_tval_write(env, ri, GTIMER_VIRT, value);
2662}
2663
2664static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2665 uint64_t value)
2666{
2667 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2668}
2669
edac4d8a
EI
2670static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2671 uint64_t value)
2672{
2fc0cc0e 2673 ARMCPU *cpu = env_archcpu(env);
edac4d8a 2674
194cbc49 2675 trace_arm_gt_cntvoff_write(value);
edac4d8a
EI
2676 raw_write(env, ri, value);
2677 gt_recalc_timer(cpu, GTIMER_VIRT);
2678}
2679
bb5972e4
RH
2680static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2681 const ARMCPRegInfo *ri)
2682{
2683 int timeridx = gt_virt_redir_timeridx(env);
2684 return env->cp15.c14_timer[timeridx].cval;
2685}
2686
2687static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2688 uint64_t value)
2689{
2690 int timeridx = gt_virt_redir_timeridx(env);
2691 gt_cval_write(env, ri, timeridx, value);
2692}
2693
2694static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2695 const ARMCPRegInfo *ri)
2696{
2697 int timeridx = gt_virt_redir_timeridx(env);
2698 return gt_tval_read(env, ri, timeridx);
2699}
2700
2701static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2702 uint64_t value)
2703{
2704 int timeridx = gt_virt_redir_timeridx(env);
2705 gt_tval_write(env, ri, timeridx, value);
2706}
2707
2708static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2709 const ARMCPRegInfo *ri)
2710{
2711 int timeridx = gt_virt_redir_timeridx(env);
2712 return env->cp15.c14_timer[timeridx].ctl;
2713}
2714
2715static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2716 uint64_t value)
2717{
2718 int timeridx = gt_virt_redir_timeridx(env);
2719 gt_ctl_write(env, ri, timeridx, value);
2720}
2721
b0e66d95
EI
2722static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2723{
2724 gt_timer_reset(env, ri, GTIMER_HYP);
2725}
2726
2727static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2728 uint64_t value)
2729{
2730 gt_cval_write(env, ri, GTIMER_HYP, value);
2731}
2732
2733static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2734{
2735 return gt_tval_read(env, ri, GTIMER_HYP);
2736}
2737
2738static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2739 uint64_t value)
2740{
2741 gt_tval_write(env, ri, GTIMER_HYP, value);
2742}
2743
2744static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2745 uint64_t value)
2746{
2747 gt_ctl_write(env, ri, GTIMER_HYP, value);
2748}
2749
b4d3978c
PM
2750static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2751{
2752 gt_timer_reset(env, ri, GTIMER_SEC);
2753}
2754
2755static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2756 uint64_t value)
2757{
2758 gt_cval_write(env, ri, GTIMER_SEC, value);
2759}
2760
2761static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2762{
2763 return gt_tval_read(env, ri, GTIMER_SEC);
2764}
2765
2766static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2767 uint64_t value)
2768{
2769 gt_tval_write(env, ri, GTIMER_SEC, value);
2770}
2771
2772static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2773 uint64_t value)
2774{
2775 gt_ctl_write(env, ri, GTIMER_SEC, value);
2776}
2777
8c94b071
RH
2778static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2779{
2780 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
2781}
2782
2783static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2784 uint64_t value)
2785{
2786 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
2787}
2788
2789static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2790{
2791 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
2792}
2793
2794static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2795 uint64_t value)
2796{
2797 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
2798}
2799
2800static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2801 uint64_t value)
2802{
2803 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
2804}
2805
55d284af
PM
2806void arm_gt_ptimer_cb(void *opaque)
2807{
2808 ARMCPU *cpu = opaque;
2809
2810 gt_recalc_timer(cpu, GTIMER_PHYS);
2811}
2812
2813void arm_gt_vtimer_cb(void *opaque)
2814{
2815 ARMCPU *cpu = opaque;
2816
2817 gt_recalc_timer(cpu, GTIMER_VIRT);
2818}
2819
b0e66d95
EI
2820void arm_gt_htimer_cb(void *opaque)
2821{
2822 ARMCPU *cpu = opaque;
2823
2824 gt_recalc_timer(cpu, GTIMER_HYP);
2825}
2826
b4d3978c
PM
2827void arm_gt_stimer_cb(void *opaque)
2828{
2829 ARMCPU *cpu = opaque;
2830
2831 gt_recalc_timer(cpu, GTIMER_SEC);
2832}
2833
8c94b071
RH
2834void arm_gt_hvtimer_cb(void *opaque)
2835{
2836 ARMCPU *cpu = opaque;
2837
2838 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
2839}
2840
96eec6b2
AJ
2841static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
2842{
2843 ARMCPU *cpu = env_archcpu(env);
2844
2845 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
2846}
2847
55d284af
PM
2848static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2849 /* Note that CNTFRQ is purely reads-as-written for the benefit
2850 * of software; writing it doesn't actually change the timer frequency.
2851 * Our reset value matches the fixed frequency we implement the timer at.
2852 */
2853 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
7a0e58fa 2854 .type = ARM_CP_ALIAS,
a7adc4b7
PM
2855 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2856 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
a7adc4b7
PM
2857 },
2858 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2859 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2860 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
55d284af 2861 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
96eec6b2 2862 .resetfn = arm_gt_cntfrq_reset,
55d284af
PM
2863 },
2864 /* overall control: mostly access permissions */
a7adc4b7
PM
2865 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2866 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
55d284af
PM
2867 .access = PL1_RW,
2868 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2869 .resetvalue = 0,
2870 },
2871 /* per-timer control */
2872 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
9ff9dd3c 2873 .secure = ARM_CP_SECSTATE_NS,
daf1dc5f 2874 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
a7adc4b7
PM
2875 .accessfn = gt_ptimer_access,
2876 .fieldoffset = offsetoflow32(CPUARMState,
2877 cp15.c14_timer[GTIMER_PHYS].ctl),
bb5972e4
RH
2878 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2879 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
a7adc4b7 2880 },
9c513e78 2881 { .name = "CNTP_CTL_S",
9ff9dd3c
PM
2882 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2883 .secure = ARM_CP_SECSTATE_S,
daf1dc5f 2884 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
9ff9dd3c
PM
2885 .accessfn = gt_ptimer_access,
2886 .fieldoffset = offsetoflow32(CPUARMState,
2887 cp15.c14_timer[GTIMER_SEC].ctl),
2888 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2889 },
a7adc4b7
PM
2890 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2891 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
daf1dc5f 2892 .type = ARM_CP_IO, .access = PL0_RW,
a7adc4b7 2893 .accessfn = gt_ptimer_access,
55d284af
PM
2894 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2895 .resetvalue = 0,
bb5972e4
RH
2896 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2897 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
55d284af
PM
2898 },
2899 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
daf1dc5f 2900 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
a7adc4b7
PM
2901 .accessfn = gt_vtimer_access,
2902 .fieldoffset = offsetoflow32(CPUARMState,
2903 cp15.c14_timer[GTIMER_VIRT].ctl),
bb5972e4
RH
2904 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2905 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
a7adc4b7
PM
2906 },
2907 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2908 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
daf1dc5f 2909 .type = ARM_CP_IO, .access = PL0_RW,
a7adc4b7 2910 .accessfn = gt_vtimer_access,
55d284af
PM
2911 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2912 .resetvalue = 0,
bb5972e4
RH
2913 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2914 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
55d284af
PM
2915 },
2916 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2917 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
9ff9dd3c 2918 .secure = ARM_CP_SECSTATE_NS,
daf1dc5f 2919 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
00108f2d 2920 .accessfn = gt_ptimer_access,
bb5972e4 2921 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
55d284af 2922 },
9c513e78 2923 { .name = "CNTP_TVAL_S",
9ff9dd3c
PM
2924 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2925 .secure = ARM_CP_SECSTATE_S,
daf1dc5f 2926 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
9ff9dd3c
PM
2927 .accessfn = gt_ptimer_access,
2928 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2929 },
a7adc4b7
PM
2930 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2931 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
daf1dc5f 2932 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
0e3eca4c 2933 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
bb5972e4 2934 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
a7adc4b7 2935 },
55d284af 2936 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
daf1dc5f 2937 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
00108f2d 2938 .accessfn = gt_vtimer_access,
bb5972e4 2939 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
55d284af 2940 },
a7adc4b7
PM
2941 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2942 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
daf1dc5f 2943 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
0e3eca4c 2944 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
bb5972e4 2945 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
a7adc4b7 2946 },
55d284af
PM
2947 /* The counter itself */
2948 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
7a0e58fa 2949 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
00108f2d 2950 .accessfn = gt_pct_access,
a7adc4b7
PM
2951 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2952 },
2953 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2954 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
7a0e58fa 2955 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
d57b9ee8 2956 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
55d284af
PM
2957 },
2958 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
7a0e58fa 2959 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
00108f2d 2960 .accessfn = gt_vct_access,
edac4d8a 2961 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
a7adc4b7
PM
2962 },
2963 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2964 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
7a0e58fa 2965 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
d57b9ee8 2966 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
55d284af
PM
2967 },
2968 /* Comparison value, indicating when the timer goes off */
2969 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
9ff9dd3c 2970 .secure = ARM_CP_SECSTATE_NS,
daf1dc5f 2971 .access = PL0_RW,
7a0e58fa 2972 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
55d284af 2973 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
b061a82b 2974 .accessfn = gt_ptimer_access,
bb5972e4
RH
2975 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2976 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
a7adc4b7 2977 },
9c513e78 2978 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
9ff9dd3c 2979 .secure = ARM_CP_SECSTATE_S,
daf1dc5f 2980 .access = PL0_RW,
9ff9dd3c
PM
2981 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2982 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2983 .accessfn = gt_ptimer_access,
2984 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2985 },
a7adc4b7
PM
2986 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2987 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
daf1dc5f 2988 .access = PL0_RW,
a7adc4b7
PM
2989 .type = ARM_CP_IO,
2990 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
12cde08a 2991 .resetvalue = 0, .accessfn = gt_ptimer_access,
bb5972e4
RH
2992 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2993 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
55d284af
PM
2994 },
2995 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
daf1dc5f 2996 .access = PL0_RW,
7a0e58fa 2997 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
55d284af 2998 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
b061a82b 2999 .accessfn = gt_vtimer_access,
bb5972e4
RH
3000 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3001 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
a7adc4b7
PM
3002 },
3003 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3004 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
daf1dc5f 3005 .access = PL0_RW,
a7adc4b7
PM
3006 .type = ARM_CP_IO,
3007 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3008 .resetvalue = 0, .accessfn = gt_vtimer_access,
bb5972e4
RH
3009 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3010 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
55d284af 3011 },
b4d3978c
PM
3012 /* Secure timer -- this is actually restricted to only EL3
3013 * and configurably Secure-EL1 via the accessfn.
3014 */
3015 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3016 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3017 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3018 .accessfn = gt_stimer_access,
3019 .readfn = gt_sec_tval_read,
3020 .writefn = gt_sec_tval_write,
3021 .resetfn = gt_sec_timer_reset,
3022 },
3023 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3024 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3025 .type = ARM_CP_IO, .access = PL1_RW,
3026 .accessfn = gt_stimer_access,
3027 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3028 .resetvalue = 0,
3029 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3030 },
3031 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3032 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3033 .type = ARM_CP_IO, .access = PL1_RW,
3034 .accessfn = gt_stimer_access,
3035 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3036 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3037 },
55d284af
PM
3038};
3039
bb5972e4
RH
3040static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
3041 bool isread)
3042{
3043 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
3044 return CP_ACCESS_TRAP;
3045 }
3046 return CP_ACCESS_OK;
3047}
3048
55d284af 3049#else
26c4a83b
AB
3050
3051/* In user-mode most of the generic timer registers are inaccessible
3052 * however modern kernels (4.12+) allow access to cntvct_el0
55d284af 3053 */
26c4a83b
AB
3054
3055static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3056{
7def8754
AJ
3057 ARMCPU *cpu = env_archcpu(env);
3058
26c4a83b
AB
3059 /* Currently we have no support for QEMUTimer in linux-user so we
3060 * can't call gt_get_countervalue(env), instead we directly
3061 * call the lower level functions.
3062 */
7def8754 3063 return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
26c4a83b
AB
3064}
3065
6cc7a3ae 3066static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
26c4a83b
AB
3067 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3068 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3069 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3070 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3071 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3072 },
3073 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3074 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3075 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3076 .readfn = gt_virt_cnt_read,
3077 },
6cc7a3ae
PM
3078};
3079
55d284af
PM
3080#endif
3081
c4241c7d 3082static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4a501606 3083{
891a2fe7 3084 if (arm_feature(env, ARM_FEATURE_LPAE)) {
8d5c773e 3085 raw_write(env, ri, value);
891a2fe7 3086 } else if (arm_feature(env, ARM_FEATURE_V7)) {
8d5c773e 3087 raw_write(env, ri, value & 0xfffff6ff);
4a501606 3088 } else {
8d5c773e 3089 raw_write(env, ri, value & 0xfffff1ff);
4a501606 3090 }
4a501606
PM
3091}
3092
3093#ifndef CONFIG_USER_ONLY
3094/* get_phys_addr() isn't present for user-mode-only targets */
702a9357 3095
3f208fd7
PM
3096static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3097 bool isread)
92611c00
PM
3098{
3099 if (ri->opc2 & 4) {
926c1b97 3100 /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
87562e4f
PM
3101 * Secure EL1 (which can only happen if EL3 is AArch64).
3102 * They are simply UNDEF if executed from NS EL1.
3103 * They function normally from EL2 or EL3.
92611c00 3104 */
87562e4f
PM
3105 if (arm_current_el(env) == 1) {
3106 if (arm_is_secure_below_el3(env)) {
926c1b97
RDC
3107 if (env->cp15.scr_el3 & SCR_EEL2) {
3108 return CP_ACCESS_TRAP_UNCATEGORIZED_EL2;
3109 }
87562e4f
PM
3110 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
3111 }
3112 return CP_ACCESS_TRAP_UNCATEGORIZED;
3113 }
92611c00
PM
3114 }
3115 return CP_ACCESS_OK;
3116}
3117
9fb005b0 3118#ifdef CONFIG_TCG
060e8a48 3119static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
03ae85f8 3120 MMUAccessType access_type, ARMMMUIdx mmu_idx)
4a501606 3121{
a8170e5e 3122 hwaddr phys_addr;
4a501606
PM
3123 target_ulong page_size;
3124 int prot;
b7cc4e82 3125 bool ret;
01c097f7 3126 uint64_t par64;
1313e2d7 3127 bool format64 = false;
8bf5b6a9 3128 MemTxAttrs attrs = {};
e14b5a23 3129 ARMMMUFaultInfo fi = {};
5b2d261d 3130 ARMCacheAttrs cacheattrs = {};
4a501606 3131
5b2d261d 3132 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
bc52bfeb 3133 &prot, &page_size, &fi, &cacheattrs);
1313e2d7 3134
9f225e60
PM
3135 /*
3136 * ATS operations only do S1 or S1+S2 translations, so we never
3137 * have to deal with the ARMCacheAttrs format for S2 only.
3138 */
3139 assert(!cacheattrs.is_s2_format);
3140
0710b2fa
PM
3141 if (ret) {
3142 /*
3143 * Some kinds of translation fault must cause exceptions rather
3144 * than being reported in the PAR.
3145 */
3146 int current_el = arm_current_el(env);
3147 int target_el;
3148 uint32_t syn, fsr, fsc;
3149 bool take_exc = false;
3150
b1a10c86 3151 if (fi.s1ptw && current_el == 1
fee7aa46 3152 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
0710b2fa
PM
3153 /*
3154 * Synchronous stage 2 fault on an access made as part of the
3155 * translation table walk for AT S1E0* or AT S1E1* insn
3156 * executed from NS EL1. If this is a synchronous external abort
3157 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3158 * to EL3. Otherwise the fault is taken as an exception to EL2,
3159 * and HPFAR_EL2 holds the faulting IPA.
3160 */
3161 if (fi.type == ARMFault_SyncExternalOnWalk &&
3162 (env->cp15.scr_el3 & SCR_EA)) {
3163 target_el = 3;
3164 } else {
3165 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
9861248f
RDC
3166 if (arm_is_secure_below_el3(env) && fi.s1ns) {
3167 env->cp15.hpfar_el2 |= HPFAR_NS;
3168 }
0710b2fa
PM
3169 target_el = 2;
3170 }
3171 take_exc = true;
3172 } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3173 /*
3174 * Synchronous external aborts during a translation table walk
3175 * are taken as Data Abort exceptions.
3176 */
3177 if (fi.stage2) {
3178 if (current_el == 3) {
3179 target_el = 3;
3180 } else {
3181 target_el = 2;
3182 }
3183 } else {
3184 target_el = exception_target_el(env);
3185 }
3186 take_exc = true;
3187 }
3188
3189 if (take_exc) {
3190 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3191 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3192 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3193 fsr = arm_fi_to_lfsc(&fi);
3194 fsc = extract32(fsr, 0, 6);
3195 } else {
3196 fsr = arm_fi_to_sfsc(&fi);
3197 fsc = 0x3f;
3198 }
3199 /*
3200 * Report exception with ESR indicating a fault due to a
3201 * translation table walk for a cache maintenance instruction.
3202 */
e24fd076 3203 syn = syn_data_abort_no_iss(current_el == target_el, 0,
0710b2fa
PM
3204 fi.ea, 1, fi.s1ptw, 1, fsc);
3205 env->exception.vaddress = value;
3206 env->exception.fsr = fsr;
3207 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3208 }
3209 }
3210
1313e2d7
EI
3211 if (is_a64(env)) {
3212 format64 = true;
3213 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3214 /*
3215 * ATS1Cxx:
3216 * * TTBCR.EAE determines whether the result is returned using the
3217 * 32-bit or the 64-bit PAR format
3218 * * Instructions executed in Hyp mode always use the 64bit format
3219 *
3220 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3221 * * The Non-secure TTBCR.EAE bit is set to 1
3222 * * The implementation includes EL2, and the value of HCR.VM is 1
3223 *
9d1bab33
PM
3224 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3225 *
23463e0e 3226 * ATS1Hx always uses the 64bit format.
1313e2d7
EI
3227 */
3228 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3229
3230 if (arm_feature(env, ARM_FEATURE_EL2)) {
452ef8cb
RH
3231 if (mmu_idx == ARMMMUIdx_E10_0 ||
3232 mmu_idx == ARMMMUIdx_E10_1 ||
3233 mmu_idx == ARMMMUIdx_E10_1_PAN) {
9d1bab33 3234 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
1313e2d7
EI
3235 } else {
3236 format64 |= arm_current_el(env) == 2;
3237 }
3238 }
3239 }
3240
3241 if (format64) {
5efe9ed4 3242 /* Create a 64-bit PAR */
01c097f7 3243 par64 = (1 << 11); /* LPAE bit always set */
b7cc4e82 3244 if (!ret) {
702a9357 3245 par64 |= phys_addr & ~0xfffULL;
8bf5b6a9
PM
3246 if (!attrs.secure) {
3247 par64 |= (1 << 9); /* NS */
3248 }
5b2d261d
AB
3249 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
3250 par64 |= cacheattrs.shareability << 7; /* SH */
4a501606 3251 } else {
5efe9ed4
PM
3252 uint32_t fsr = arm_fi_to_lfsc(&fi);
3253
702a9357 3254 par64 |= 1; /* F */
b7cc4e82 3255 par64 |= (fsr & 0x3f) << 1; /* FS */
0f7b791b
PM
3256 if (fi.stage2) {
3257 par64 |= (1 << 9); /* S */
3258 }
3259 if (fi.s1ptw) {
3260 par64 |= (1 << 8); /* PTW */
3261 }
4a501606
PM
3262 }
3263 } else {
b7cc4e82 3264 /* fsr is a DFSR/IFSR value for the short descriptor
702a9357
PM
3265 * translation table format (with WnR always clear).
3266 * Convert it to a 32-bit PAR.
3267 */
b7cc4e82 3268 if (!ret) {
702a9357
PM
3269 /* We do not set any attribute bits in the PAR */
3270 if (page_size == (1 << 24)
3271 && arm_feature(env, ARM_FEATURE_V7)) {
01c097f7 3272 par64 = (phys_addr & 0xff000000) | (1 << 1);
702a9357 3273 } else {
01c097f7 3274 par64 = phys_addr & 0xfffff000;
702a9357 3275 }
8bf5b6a9
PM
3276 if (!attrs.secure) {
3277 par64 |= (1 << 9); /* NS */
3278 }
702a9357 3279 } else {
5efe9ed4
PM
3280 uint32_t fsr = arm_fi_to_sfsc(&fi);
3281
b7cc4e82
PC
3282 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3283 ((fsr & 0xf) << 1) | 1;
702a9357 3284 }
4a501606 3285 }
060e8a48
PM
3286 return par64;
3287}
9fb005b0 3288#endif /* CONFIG_TCG */
060e8a48
PM
3289
3290static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3291{
9fb005b0 3292#ifdef CONFIG_TCG
03ae85f8 3293 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
060e8a48 3294 uint64_t par64;
d3649702
PM
3295 ARMMMUIdx mmu_idx;
3296 int el = arm_current_el(env);
3297 bool secure = arm_is_secure_below_el3(env);
060e8a48 3298
d3649702
PM
3299 switch (ri->opc2 & 6) {
3300 case 0:
04b07d29 3301 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
d3649702
PM
3302 switch (el) {
3303 case 3:
127b2b08 3304 mmu_idx = ARMMMUIdx_SE3;
d3649702
PM
3305 break;
3306 case 2:
b6ad6062 3307 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
04b07d29 3308 /* fall through */
d3649702 3309 case 1:
04b07d29 3310 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
b1a10c86 3311 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
04b07d29
RH
3312 : ARMMMUIdx_Stage1_E1_PAN);
3313 } else {
b1a10c86 3314 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
04b07d29 3315 }
d3649702
PM
3316 break;
3317 default:
3318 g_assert_not_reached();
3319 }
3320 break;
3321 case 2:
3322 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3323 switch (el) {
3324 case 3:
fba37aed 3325 mmu_idx = ARMMMUIdx_SE10_0;
d3649702
PM
3326 break;
3327 case 2:
b1a10c86 3328 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
2859d7b5 3329 mmu_idx = ARMMMUIdx_Stage1_E0;
d3649702
PM
3330 break;
3331 case 1:
b1a10c86 3332 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
d3649702
PM
3333 break;
3334 default:
3335 g_assert_not_reached();
3336 }
3337 break;
3338 case 4:
3339 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
01b98b68 3340 mmu_idx = ARMMMUIdx_E10_1;
d3649702
PM
3341 break;
3342 case 6:
3343 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
01b98b68 3344 mmu_idx = ARMMMUIdx_E10_0;
d3649702
PM
3345 break;
3346 default:
3347 g_assert_not_reached();
3348 }
3349
3350 par64 = do_ats_write(env, value, access_type, mmu_idx);
01c097f7
FA
3351
3352 A32_BANKED_CURRENT_REG_SET(env, par, par64);
9fb005b0
PMD
3353#else
3354 /* Handled by hardware accelerator. */
3355 g_assert_not_reached();
3356#endif /* CONFIG_TCG */
4a501606 3357}
060e8a48 3358
14db7fe0
PM
3359static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3360 uint64_t value)
3361{
9fb005b0 3362#ifdef CONFIG_TCG
03ae85f8 3363 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
14db7fe0
PM
3364 uint64_t par64;
3365
e013b741 3366 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
14db7fe0
PM
3367
3368 A32_BANKED_CURRENT_REG_SET(env, par, par64);
9fb005b0
PMD
3369#else
3370 /* Handled by hardware accelerator. */
3371 g_assert_not_reached();
3372#endif /* CONFIG_TCG */
14db7fe0
PM
3373}
3374
3f208fd7
PM
3375static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3376 bool isread)
2a47df95 3377{
926c1b97
RDC
3378 if (arm_current_el(env) == 3 &&
3379 !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
2a47df95
PM
3380 return CP_ACCESS_TRAP;
3381 }
3382 return CP_ACCESS_OK;
3383}
3384
060e8a48
PM
3385static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3386 uint64_t value)
3387{
9fb005b0 3388#ifdef CONFIG_TCG
03ae85f8 3389 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
d3649702
PM
3390 ARMMMUIdx mmu_idx;
3391 int secure = arm_is_secure_below_el3(env);
3392
3393 switch (ri->opc2 & 6) {
3394 case 0:
3395 switch (ri->opc1) {
04b07d29
RH
3396 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3397 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
b1a10c86 3398 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
04b07d29
RH
3399 : ARMMMUIdx_Stage1_E1_PAN);
3400 } else {
b1a10c86 3401 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
04b07d29 3402 }
d3649702
PM
3403 break;
3404 case 4: /* AT S1E2R, AT S1E2W */
b6ad6062 3405 mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
d3649702
PM
3406 break;
3407 case 6: /* AT S1E3R, AT S1E3W */
127b2b08 3408 mmu_idx = ARMMMUIdx_SE3;
d3649702
PM
3409 break;
3410 default:
3411 g_assert_not_reached();
3412 }
3413 break;
3414 case 2: /* AT S1E0R, AT S1E0W */
b1a10c86 3415 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
d3649702
PM
3416 break;
3417 case 4: /* AT S12E1R, AT S12E1W */
fba37aed 3418 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
d3649702
PM
3419 break;
3420 case 6: /* AT S12E0R, AT S12E0W */
fba37aed 3421 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
d3649702
PM
3422 break;
3423 default:
3424 g_assert_not_reached();
3425 }
060e8a48 3426
d3649702 3427 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
9fb005b0
PMD
3428#else
3429 /* Handled by hardware accelerator. */
3430 g_assert_not_reached();
3431#endif /* CONFIG_TCG */
060e8a48 3432}
4a501606
PM
3433#endif
3434
3435static const ARMCPRegInfo vapa_cp_reginfo[] = {
3436 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3437 .access = PL1_RW, .resetvalue = 0,
01c097f7
FA
3438 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3439 offsetoflow32(CPUARMState, cp15.par_ns) },
4a501606
PM
3440 .writefn = par_write },
3441#ifndef CONFIG_USER_ONLY
87562e4f 3442 /* This underdecoding is safe because the reginfo is NO_RAW. */
4a501606 3443 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
92611c00 3444 .access = PL1_W, .accessfn = ats_access,
0710b2fa 3445 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
4a501606 3446#endif
4a501606
PM
3447};
3448
18032bec
PM
3449/* Return basic MPU access permission bits. */
3450static uint32_t simple_mpu_ap_bits(uint32_t val)
3451{
3452 uint32_t ret;
3453 uint32_t mask;
3454 int i;
3455 ret = 0;
3456 mask = 3;
3457 for (i = 0; i < 16; i += 2) {
3458 ret |= (val >> i) & mask;
3459 mask <<= 2;
3460 }
3461 return ret;
3462}
3463
3464/* Pad basic MPU access permission bits to extended format. */
3465static uint32_t extended_mpu_ap_bits(uint32_t val)
3466{
3467 uint32_t ret;
3468 uint32_t mask;
3469 int i;
3470 ret = 0;
3471 mask = 3;
3472 for (i = 0; i < 16; i += 2) {
3473 ret |= (val & mask) << i;
3474 mask <<= 2;
3475 }
3476 return ret;
3477}
3478
c4241c7d
PM
3479static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3480 uint64_t value)
18032bec 3481{
7e09797c 3482 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
18032bec
PM
3483}
3484
c4241c7d 3485static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
18032bec 3486{
7e09797c 3487 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
18032bec
PM
3488}
3489
c4241c7d
PM
3490static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3491 uint64_t value)
18032bec 3492{
7e09797c 3493 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
18032bec
PM
3494}
3495
c4241c7d 3496static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
18032bec 3497{
7e09797c 3498 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
18032bec
PM
3499}
3500
6cb0b013
PC
3501static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3502{
3503 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3504
3505 if (!u32p) {
3506 return 0;
3507 }
3508
1bc04a88 3509 u32p += env->pmsav7.rnr[M_REG_NS];
6cb0b013
PC
3510 return *u32p;
3511}
3512
3513static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3514 uint64_t value)
3515{
2fc0cc0e 3516 ARMCPU *cpu = env_archcpu(env);
6cb0b013
PC
3517 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3518
3519 if (!u32p) {
3520 return;
3521 }
3522
1bc04a88 3523 u32p += env->pmsav7.rnr[M_REG_NS];
d10eb08f 3524 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
6cb0b013
PC
3525 *u32p = value;
3526}
3527
6cb0b013
PC
3528static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3529 uint64_t value)
3530{
2fc0cc0e 3531 ARMCPU *cpu = env_archcpu(env);
6cb0b013
PC
3532 uint32_t nrgs = cpu->pmsav7_dregion;
3533
3534 if (value >= nrgs) {
3535 qemu_log_mask(LOG_GUEST_ERROR,
3536 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3537 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3538 return;
3539 }
3540
3541 raw_write(env, ri, value);
3542}
3543
3544static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
69ceea64
PM
3545 /* Reset for all these registers is handled in arm_cpu_reset(),
3546 * because the PMSAv7 is also used by M-profile CPUs, which do
3547 * not register cpregs but still need the state to be reset.
3548 */
6cb0b013
PC
3549 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3550 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3551 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
69ceea64
PM
3552 .readfn = pmsav7_read, .writefn = pmsav7_write,
3553 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3554 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3555 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3556 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
69ceea64
PM
3557 .readfn = pmsav7_read, .writefn = pmsav7_write,
3558 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3559 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3560 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3561 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
69ceea64
PM
3562 .readfn = pmsav7_read, .writefn = pmsav7_write,
3563 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3564 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3565 .access = PL1_RW,
1bc04a88 3566 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
69ceea64
PM
3567 .writefn = pmsav7_rgnr_write,
3568 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3569};
3570
18032bec
PM
3571static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3572 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
7a0e58fa 3573 .access = PL1_RW, .type = ARM_CP_ALIAS,
7e09797c 3574 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
18032bec
PM
3575 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3576 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
7a0e58fa 3577 .access = PL1_RW, .type = ARM_CP_ALIAS,
7e09797c 3578 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
18032bec
PM
3579 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3580 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3581 .access = PL1_RW,
7e09797c
PM
3582 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3583 .resetvalue = 0, },
18032bec
PM
3584 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3585 .access = PL1_RW,
7e09797c
PM
3586 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3587 .resetvalue = 0, },
ecce5c3c
PM
3588 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3589 .access = PL1_RW,
3590 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3591 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3592 .access = PL1_RW,
3593 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
06d76f31 3594 /* Protection region base and size registers */
e508a92b
PM
3595 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3596 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3597 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3598 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3599 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3600 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3601 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3602 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3603 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3604 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3605 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3606 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3607 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3608 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3609 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3610 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3611 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3612 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3613 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3614 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3615 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3616 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3617 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3618 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
18032bec
PM
3619};
3620
cb4a0a34
PM
3621static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3622 uint64_t value)
ecce5c3c 3623{
cb4a0a34 3624 ARMCPU *cpu = env_archcpu(env);
2ebcebe2 3625
e389be16
FA
3626 if (!arm_feature(env, ARM_FEATURE_V8)) {
3627 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
cb4a0a34
PM
3628 /*
3629 * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3630 * using Long-descriptor translation table format
3631 */
e389be16
FA
3632 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3633 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
cb4a0a34
PM
3634 /*
3635 * In an implementation that includes the Security Extensions
e389be16
FA
3636 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3637 * Short-descriptor translation table format.
3638 */
3639 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3640 } else {
3641 value &= TTBCR_N;
3642 }
e42c4db3 3643 }
e389be16 3644
d4e6df63
PM
3645 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3646 /* With LPAE the TTBCR could result in a change of ASID
3647 * via the TTBCR.A1 bit, so do a TLB flush.
3648 */
d10eb08f 3649 tlb_flush(CPU(cpu));
d4e6df63 3650 }
cb4a0a34 3651 raw_write(env, ri, value);
ecce5c3c
PM
3652}
3653
d06dc933 3654static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
cb2e37df
PM
3655 uint64_t value)
3656{
2fc0cc0e 3657 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 3658
cb2e37df 3659 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
d10eb08f 3660 tlb_flush(CPU(cpu));
cb4a0a34 3661 raw_write(env, ri, value);
cb2e37df
PM
3662}
3663
327ed10f
PM
3664static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3665 uint64_t value)
3666{
93f379b0
RH
3667 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3668 if (cpreg_field_is_64bit(ri) &&
3669 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2fc0cc0e 3670 ARMCPU *cpu = env_archcpu(env);
d10eb08f 3671 tlb_flush(CPU(cpu));
327ed10f
PM
3672 }
3673 raw_write(env, ri, value);
3674}
3675
ed30da8e
RH
3676static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3677 uint64_t value)
3678{
d06dc933
RH
3679 /*
3680 * If we are running with E2&0 regime, then an ASID is active.
3681 * Flush if that might be changing. Note we're not checking
3682 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
3683 * holds the active ASID, only checking the field that might.
3684 */
3685 if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
3686 (arm_hcr_el2_eff(env) & HCR_E2H)) {
b6ad6062
RDC
3687 uint16_t mask = ARMMMUIdxBit_E20_2 |
3688 ARMMMUIdxBit_E20_2_PAN |
3689 ARMMMUIdxBit_E20_0;
3690
3691 if (arm_is_secure_below_el3(env)) {
3692 mask >>= ARM_MMU_IDX_A_NS;
3693 }
3694
3695 tlb_flush_by_mmuidx(env_cpu(env), mask);
d06dc933 3696 }
ed30da8e
RH
3697 raw_write(env, ri, value);
3698}
3699
b698e9cf
EI
3700static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3701 uint64_t value)
3702{
2fc0cc0e 3703 ARMCPU *cpu = env_archcpu(env);
b698e9cf
EI
3704 CPUState *cs = CPU(cpu);
3705
97fa9350
RH
3706 /*
3707 * A change in VMID to the stage2 page table (Stage2) invalidates
3708 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
3709 */
b698e9cf 3710 if (raw_read(env, ri) != value) {
c4f060e8
RDC
3711 uint16_t mask = ARMMMUIdxBit_E10_1 |
3712 ARMMMUIdxBit_E10_1_PAN |
3713 ARMMMUIdxBit_E10_0;
3714
3715 if (arm_is_secure_below_el3(env)) {
3716 mask >>= ARM_MMU_IDX_A_NS;
3717 }
3718
3719 tlb_flush_by_mmuidx(cs, mask);
b698e9cf
EI
3720 raw_write(env, ri, value);
3721 }
3722}
3723
8e5d75c9 3724static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
18032bec 3725 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
84929218 3726 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
4a7e2d73 3727 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
b061a82b 3728 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
18032bec 3729 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
84929218 3730 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
88ca1c2d
FA
3731 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3732 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
8e5d75c9 3733 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
84929218 3734 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
8e5d75c9
PC
3735 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3736 offsetof(CPUARMState, cp15.dfar_ns) } },
3737 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3738 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
84929218
RH
3739 .access = PL1_RW, .accessfn = access_tvm_trvm,
3740 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
8e5d75c9 3741 .resetvalue = 0, },
8e5d75c9
PC
3742};
3743
3744static const ARMCPRegInfo vmsa_cp_reginfo[] = {
6cd8a264
RH
3745 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3746 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
84929218 3747 .access = PL1_RW, .accessfn = access_tvm_trvm,
d81c519c 3748 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
327ed10f 3749 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
7dd8c9af 3750 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
84929218
RH
3751 .access = PL1_RW, .accessfn = access_tvm_trvm,
3752 .writefn = vmsa_ttbr_write, .resetvalue = 0,
7dd8c9af
FA
3753 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3754 offsetof(CPUARMState, cp15.ttbr0_ns) } },
327ed10f 3755 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
7dd8c9af 3756 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
84929218
RH
3757 .access = PL1_RW, .accessfn = access_tvm_trvm,
3758 .writefn = vmsa_ttbr_write, .resetvalue = 0,
7dd8c9af
FA
3759 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3760 offsetof(CPUARMState, cp15.ttbr1_ns) } },
cb2e37df
PM
3761 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3762 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
84929218
RH
3763 .access = PL1_RW, .accessfn = access_tvm_trvm,
3764 .writefn = vmsa_tcr_el12_write,
cb4a0a34
PM
3765 .raw_writefn = raw_write,
3766 .resetvalue = 0,
11f136ee 3767 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
cb2e37df 3768 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
84929218
RH
3769 .access = PL1_RW, .accessfn = access_tvm_trvm,
3770 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
cb4a0a34
PM
3771 .raw_writefn = raw_write,
3772 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
3773 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
18032bec
PM
3774};
3775
ab638a32
RH
3776/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3777 * qemu tlbs nor adjusting cached masks.
3778 */
3779static const ARMCPRegInfo ttbcr2_reginfo = {
3780 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
84929218
RH
3781 .access = PL1_RW, .accessfn = access_tvm_trvm,
3782 .type = ARM_CP_ALIAS,
d102058e 3783 .bank_fieldoffsets = {
cb4a0a34
PM
3784 offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
3785 offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
d102058e 3786 },
ab638a32
RH
3787};
3788
c4241c7d
PM
3789static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3790 uint64_t value)
1047b9d7
PM
3791{
3792 env->cp15.c15_ticonfig = value & 0xe7;
3793 /* The OS_TYPE bit in this register changes the reported CPUID! */
3794 env->cp15.c0_cpuid = (value & (1 << 5)) ?
3795 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1047b9d7
PM
3796}
3797
c4241c7d
PM
3798static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3799 uint64_t value)
1047b9d7
PM
3800{
3801 env->cp15.c15_threadid = value & 0xffff;
1047b9d7
PM
3802}
3803
c4241c7d
PM
3804static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3805 uint64_t value)
1047b9d7
PM
3806{
3807 /* Wait-for-interrupt (deprecated) */
2fc0cc0e 3808 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
1047b9d7
PM
3809}
3810
c4241c7d
PM
3811static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3812 uint64_t value)
c4804214
PM
3813{
3814 /* On OMAP there are registers indicating the max/min index of dcache lines
3815 * containing a dirty line; cache flush operations have to reset these.
3816 */
3817 env->cp15.c15_i_max = 0x000;
3818 env->cp15.c15_i_min = 0xff0;
c4804214
PM
3819}
3820
18032bec
PM
3821static const ARMCPRegInfo omap_cp_reginfo[] = {
3822 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3823 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
d81c519c 3824 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
6cd8a264 3825 .resetvalue = 0, },
1047b9d7
PM
3826 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3827 .access = PL1_RW, .type = ARM_CP_NOP },
3828 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3829 .access = PL1_RW,
3830 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3831 .writefn = omap_ticonfig_write },
3832 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3833 .access = PL1_RW,
3834 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3835 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3836 .access = PL1_RW, .resetvalue = 0xff0,
3837 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3838 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3839 .access = PL1_RW,
3840 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3841 .writefn = omap_threadid_write },
3842 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
3843 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
7a0e58fa 3844 .type = ARM_CP_NO_RAW,
1047b9d7
PM
3845 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
3846 /* TODO: Peripheral port remap register:
3847 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3848 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3849 * when MMU is off.
3850 */
c4804214 3851 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
d4e6df63 3852 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
7a0e58fa 3853 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
c4804214 3854 .writefn = omap_cachemaint_write },
34f90529
PM
3855 { .name = "C9", .cp = 15, .crn = 9,
3856 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
3857 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
1047b9d7
PM
3858};
3859
c4241c7d
PM
3860static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3861 uint64_t value)
1047b9d7 3862{
c0f4af17 3863 env->cp15.c15_cpar = value & 0x3fff;
1047b9d7
PM
3864}
3865
3866static const ARMCPRegInfo xscale_cp_reginfo[] = {
3867 { .name = "XSCALE_CPAR",
3868 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3869 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
3870 .writefn = xscale_cpar_write, },
2771db27
PM
3871 { .name = "XSCALE_AUXCR",
3872 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
3873 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
3874 .resetvalue = 0, },
3b771579
PM
3875 /* XScale specific cache-lockdown: since we have no cache we NOP these
3876 * and hope the guest does not really rely on cache behaviour.
3877 */
3878 { .name = "XSCALE_LOCK_ICACHE_LINE",
3879 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
3880 .access = PL1_W, .type = ARM_CP_NOP },
3881 { .name = "XSCALE_UNLOCK_ICACHE",
3882 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
3883 .access = PL1_W, .type = ARM_CP_NOP },
3884 { .name = "XSCALE_DCACHE_LOCK",
3885 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
3886 .access = PL1_RW, .type = ARM_CP_NOP },
3887 { .name = "XSCALE_UNLOCK_DCACHE",
3888 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
3889 .access = PL1_W, .type = ARM_CP_NOP },
1047b9d7
PM
3890};
3891
3892static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
3893 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3894 * implementation of this implementation-defined space.
3895 * Ideally this should eventually disappear in favour of actually
3896 * implementing the correct behaviour for all cores.
3897 */
3898 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3899 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3671cd87 3900 .access = PL1_RW,
7a0e58fa 3901 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
d4e6df63 3902 .resetvalue = 0 },
18032bec
PM
3903};
3904
c4804214
PM
3905static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3906 /* Cache status: RAZ because we have no cache so it's always clean */
3907 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
7a0e58fa 3908 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3909 .resetvalue = 0 },
c4804214
PM
3910};
3911
3912static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
a07d9df0 3913 /* We never have a block transfer operation in progress */
c4804214 3914 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
7a0e58fa 3915 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3916 .resetvalue = 0 },
30b05bba
PM
3917 /* The cache ops themselves: these all NOP for QEMU */
3918 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
3919 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3920 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
3921 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3922 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
3923 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3924 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
3925 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3926 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
3927 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3928 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
3929 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
c4804214
PM
3930};
3931
3932static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
3933 /* The cache test-and-clean instructions always return (1 << 30)
3934 * to indicate that there are no dirty cache lines.
3935 */
3936 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
7a0e58fa 3937 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3938 .resetvalue = (1 << 30) },
c4804214 3939 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
7a0e58fa 3940 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3941 .resetvalue = (1 << 30) },
c4804214
PM
3942};
3943
34f90529
PM
3944static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3945 /* Ignore ReadBuffer accesses */
3946 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3947 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
d4e6df63 3948 .access = PL1_RW, .resetvalue = 0,
7a0e58fa 3949 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
34f90529
PM
3950};
3951
731de9e6
EI
3952static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3953{
731de9e6 3954 unsigned int cur_el = arm_current_el(env);
731de9e6 3955
e6ef0169 3956 if (arm_is_el2_enabled(env) && cur_el == 1) {
731de9e6
EI
3957 return env->cp15.vpidr_el2;
3958 }
3959 return raw_read(env, ri);
3960}
3961
06a7e647 3962static uint64_t mpidr_read_val(CPUARMState *env)
81bdde9d 3963{
2fc0cc0e 3964 ARMCPU *cpu = env_archcpu(env);
eb5e1d3c
PF
3965 uint64_t mpidr = cpu->mp_affinity;
3966
81bdde9d 3967 if (arm_feature(env, ARM_FEATURE_V7MP)) {
78dbbbe4 3968 mpidr |= (1U << 31);
81bdde9d
PM
3969 /* Cores which are uniprocessor (non-coherent)
3970 * but still implement the MP extensions set
a8e81b31 3971 * bit 30. (For instance, Cortex-R5).
81bdde9d 3972 */
a8e81b31
PC
3973 if (cpu->mp_is_up) {
3974 mpidr |= (1u << 30);
3975 }
81bdde9d 3976 }
c4241c7d 3977 return mpidr;
81bdde9d
PM
3978}
3979
06a7e647
EI
3980static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3981{
f0d574d6 3982 unsigned int cur_el = arm_current_el(env);
f0d574d6 3983
e6ef0169 3984 if (arm_is_el2_enabled(env) && cur_el == 1) {
f0d574d6
EI
3985 return env->cp15.vmpidr_el2;
3986 }
06a7e647
EI
3987 return mpidr_read_val(env);
3988}
3989
7ac681cf 3990static const ARMCPRegInfo lpae_cp_reginfo[] = {
a903c449 3991 /* NOP AMAIR0/1 */
b0fe2427
PM
3992 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3993 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
84929218
RH
3994 .access = PL1_RW, .accessfn = access_tvm_trvm,
3995 .type = ARM_CP_CONST, .resetvalue = 0 },
b0fe2427 3996 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
7ac681cf 3997 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
84929218
RH
3998 .access = PL1_RW, .accessfn = access_tvm_trvm,
3999 .type = ARM_CP_CONST, .resetvalue = 0 },
891a2fe7 4000 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
01c097f7
FA
4001 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4002 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4003 offsetof(CPUARMState, cp15.par_ns)} },
891a2fe7 4004 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
84929218
RH
4005 .access = PL1_RW, .accessfn = access_tvm_trvm,
4006 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
7dd8c9af
FA
4007 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4008 offsetof(CPUARMState, cp15.ttbr0_ns) },
b061a82b 4009 .writefn = vmsa_ttbr_write, },
891a2fe7 4010 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
84929218
RH
4011 .access = PL1_RW, .accessfn = access_tvm_trvm,
4012 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
7dd8c9af
FA
4013 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4014 offsetof(CPUARMState, cp15.ttbr1_ns) },
b061a82b 4015 .writefn = vmsa_ttbr_write, },
7ac681cf
PM
4016};
4017
c4241c7d 4018static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
b0d2b7d0 4019{
c4241c7d 4020 return vfp_get_fpcr(env);
b0d2b7d0
PM
4021}
4022
c4241c7d
PM
4023static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4024 uint64_t value)
b0d2b7d0
PM
4025{
4026 vfp_set_fpcr(env, value);
b0d2b7d0
PM
4027}
4028
c4241c7d 4029static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
b0d2b7d0 4030{
c4241c7d 4031 return vfp_get_fpsr(env);
b0d2b7d0
PM
4032}
4033
c4241c7d
PM
4034static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4035 uint64_t value)
b0d2b7d0
PM
4036{
4037 vfp_set_fpsr(env, value);
b0d2b7d0
PM
4038}
4039
3f208fd7
PM
4040static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4041 bool isread)
c2b820fe 4042{
aaec1432 4043 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
c2b820fe
PM
4044 return CP_ACCESS_TRAP;
4045 }
4046 return CP_ACCESS_OK;
4047}
4048
4049static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4050 uint64_t value)
4051{
4052 env->daif = value & PSTATE_DAIF;
4053}
4054
220f508f
RH
4055static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4056{
4057 return env->pstate & PSTATE_PAN;
4058}
4059
4060static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4061 uint64_t value)
4062{
4063 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4064}
4065
4066static const ARMCPRegInfo pan_reginfo = {
4067 .name = "PAN", .state = ARM_CP_STATE_AA64,
4068 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4069 .type = ARM_CP_NO_RAW, .access = PL1_RW,
4070 .readfn = aa64_pan_read, .writefn = aa64_pan_write
4071};
4072
9eeb7a1c
RH
4073static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4074{
4075 return env->pstate & PSTATE_UAO;
4076}
4077
4078static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4079 uint64_t value)
4080{
4081 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4082}
4083
4084static const ARMCPRegInfo uao_reginfo = {
4085 .name = "UAO", .state = ARM_CP_STATE_AA64,
4086 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4087 .type = ARM_CP_NO_RAW, .access = PL1_RW,
4088 .readfn = aa64_uao_read, .writefn = aa64_uao_write
4089};
4090
dc8b1853
RC
4091static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
4092{
4093 return env->pstate & PSTATE_DIT;
4094}
4095
4096static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
4097 uint64_t value)
4098{
4099 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4100}
4101
4102static const ARMCPRegInfo dit_reginfo = {
4103 .name = "DIT", .state = ARM_CP_STATE_AA64,
4104 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
4105 .type = ARM_CP_NO_RAW, .access = PL0_RW,
4106 .readfn = aa64_dit_read, .writefn = aa64_dit_write
4107};
4108
f2f68a78
RC
4109static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
4110{
4111 return env->pstate & PSTATE_SSBS;
4112}
4113
4114static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
4115 uint64_t value)
4116{
4117 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
4118}
4119
4120static const ARMCPRegInfo ssbs_reginfo = {
4121 .name = "SSBS", .state = ARM_CP_STATE_AA64,
4122 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
4123 .type = ARM_CP_NO_RAW, .access = PL0_RW,
4124 .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
4125};
4126
38262d8a
RH
4127static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4128 const ARMCPRegInfo *ri,
4129 bool isread)
8af35c37 4130{
38262d8a
RH
4131 /* Cache invalidate/clean to Point of Coherency or Persistence... */
4132 switch (arm_current_el(env)) {
4133 case 0:
4134 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4135 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4136 return CP_ACCESS_TRAP;
4137 }
4138 /* fall through */
4139 case 1:
4140 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
4141 if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4142 return CP_ACCESS_TRAP_EL2;
4143 }
4144 break;
8af35c37
PM
4145 }
4146 return CP_ACCESS_OK;
4147}
4148
38262d8a 4149static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env,
1bed4d2e
RH
4150 const ARMCPRegInfo *ri,
4151 bool isread)
4152{
38262d8a 4153 /* Cache invalidate/clean to Point of Unification... */
1bed4d2e
RH
4154 switch (arm_current_el(env)) {
4155 case 0:
4156 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4157 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4158 return CP_ACCESS_TRAP;
4159 }
4160 /* fall through */
4161 case 1:
38262d8a
RH
4162 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */
4163 if (arm_hcr_el2_eff(env) & HCR_TPU) {
1bed4d2e
RH
4164 return CP_ACCESS_TRAP_EL2;
4165 }
4166 break;
4167 }
4168 return CP_ACCESS_OK;
4169}
4170
dbb1fb27
AB
4171/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4172 * Page D4-1736 (DDI0487A.b)
4173 */
4174
b7e0730d
RH
4175static int vae1_tlbmask(CPUARMState *env)
4176{
e04a5752 4177 uint64_t hcr = arm_hcr_el2_eff(env);
bc944d3a 4178 uint16_t mask;
e04a5752
RDC
4179
4180 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
bc944d3a
RDC
4181 mask = ARMMMUIdxBit_E20_2 |
4182 ARMMMUIdxBit_E20_2_PAN |
4183 ARMMMUIdxBit_E20_0;
b7e0730d 4184 } else {
bc944d3a 4185 mask = ARMMMUIdxBit_E10_1 |
452ef8cb
RH
4186 ARMMMUIdxBit_E10_1_PAN |
4187 ARMMMUIdxBit_E10_0;
b7e0730d 4188 }
bc944d3a
RDC
4189
4190 if (arm_is_secure_below_el3(env)) {
4191 mask >>= ARM_MMU_IDX_A_NS;
4192 }
4193
4194 return mask;
b7e0730d
RH
4195}
4196
ea04dce7
RH
4197/* Return 56 if TBI is enabled, 64 otherwise. */
4198static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
4199 uint64_t addr)
4200{
c1547bba 4201 uint64_t tcr = regime_tcr(env, mmu_idx);
ea04dce7
RH
4202 int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
4203 int select = extract64(addr, 55, 1);
4204
4205 return (tbi >> select) & 1 ? 56 : 64;
4206}
4207
4208static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
4209{
b6ad6062 4210 uint64_t hcr = arm_hcr_el2_eff(env);
ea04dce7
RH
4211 ARMMMUIdx mmu_idx;
4212
4213 /* Only the regime of the mmu_idx below is significant. */
b6ad6062 4214 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
ea04dce7
RH
4215 mmu_idx = ARMMMUIdx_E20_0;
4216 } else {
4217 mmu_idx = ARMMMUIdx_E10_0;
4218 }
b6ad6062
RDC
4219
4220 if (arm_is_secure_below_el3(env)) {
4221 mmu_idx &= ~ARM_MMU_IDX_A_NS;
4222 }
4223
ea04dce7
RH
4224 return tlbbits_for_regime(env, mmu_idx, addr);
4225}
4226
fd3ed969
PM
4227static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4228 uint64_t value)
168aa23b 4229{
29a0af61 4230 CPUState *cs = env_cpu(env);
b7e0730d 4231 int mask = vae1_tlbmask(env);
dbb1fb27 4232
b7e0730d 4233 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
168aa23b
PM
4234}
4235
b4ab8ce9
PM
4236static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4237 uint64_t value)
4238{
29a0af61 4239 CPUState *cs = env_cpu(env);
b7e0730d 4240 int mask = vae1_tlbmask(env);
b4ab8ce9
PM
4241
4242 if (tlb_force_broadcast(env)) {
527db2be
RH
4243 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4244 } else {
4245 tlb_flush_by_mmuidx(cs, mask);
b4ab8ce9 4246 }
b4ab8ce9
PM
4247}
4248
90c19cdf 4249static int alle1_tlbmask(CPUARMState *env)
168aa23b 4250{
90c19cdf
RH
4251 /*
4252 * Note that the 'ALL' scope must invalidate both stage 1 and
fd3ed969
PM
4253 * stage 2 translations, whereas most other scopes only invalidate
4254 * stage 1 translations.
4255 */
fd3ed969 4256 if (arm_is_secure_below_el3(env)) {
452ef8cb
RH
4257 return ARMMMUIdxBit_SE10_1 |
4258 ARMMMUIdxBit_SE10_1_PAN |
4259 ARMMMUIdxBit_SE10_0;
fd3ed969 4260 } else {
452ef8cb
RH
4261 return ARMMMUIdxBit_E10_1 |
4262 ARMMMUIdxBit_E10_1_PAN |
4263 ARMMMUIdxBit_E10_0;
fd3ed969 4264 }
168aa23b
PM
4265}
4266
85d0dc9f
RH
4267static int e2_tlbmask(CPUARMState *env)
4268{
b6ad6062
RDC
4269 if (arm_is_secure_below_el3(env)) {
4270 return ARMMMUIdxBit_SE20_0 |
4271 ARMMMUIdxBit_SE20_2 |
4272 ARMMMUIdxBit_SE20_2_PAN |
4273 ARMMMUIdxBit_SE2;
4274 } else {
4275 return ARMMMUIdxBit_E20_0 |
4276 ARMMMUIdxBit_E20_2 |
4277 ARMMMUIdxBit_E20_2_PAN |
4278 ARMMMUIdxBit_E2;
4279 }
85d0dc9f
RH
4280}
4281
90c19cdf
RH
4282static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4283 uint64_t value)
4284{
4285 CPUState *cs = env_cpu(env);
4286 int mask = alle1_tlbmask(env);
4287
4288 tlb_flush_by_mmuidx(cs, mask);
4289}
4290
fd3ed969 4291static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
fa439fc5
PM
4292 uint64_t value)
4293{
85d0dc9f
RH
4294 CPUState *cs = env_cpu(env);
4295 int mask = e2_tlbmask(env);
fd3ed969 4296
85d0dc9f 4297 tlb_flush_by_mmuidx(cs, mask);
fd3ed969
PM
4298}
4299
43efaa33
PM
4300static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4301 uint64_t value)
4302{
2fc0cc0e 4303 ARMCPU *cpu = env_archcpu(env);
43efaa33
PM
4304 CPUState *cs = CPU(cpu);
4305
127b2b08 4306 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
43efaa33
PM
4307}
4308
fd3ed969
PM
4309static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4310 uint64_t value)
4311{
29a0af61 4312 CPUState *cs = env_cpu(env);
90c19cdf
RH
4313 int mask = alle1_tlbmask(env);
4314
4315 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
fa439fc5
PM
4316}
4317
2bfb9d75
PM
4318static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4319 uint64_t value)
4320{
29a0af61 4321 CPUState *cs = env_cpu(env);
85d0dc9f 4322 int mask = e2_tlbmask(env);
2bfb9d75 4323
85d0dc9f 4324 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
2bfb9d75
PM
4325}
4326
43efaa33
PM
4327static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4328 uint64_t value)
4329{
29a0af61 4330 CPUState *cs = env_cpu(env);
43efaa33 4331
127b2b08 4332 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
43efaa33
PM
4333}
4334
fd3ed969
PM
4335static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4336 uint64_t value)
fa439fc5 4337{
fd3ed969
PM
4338 /* Invalidate by VA, EL2
4339 * Currently handles both VAE2 and VALE2, since we don't support
4340 * flush-last-level-only.
4341 */
85d0dc9f
RH
4342 CPUState *cs = env_cpu(env);
4343 int mask = e2_tlbmask(env);
fd3ed969
PM
4344 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4345
85d0dc9f 4346 tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
fd3ed969
PM
4347}
4348
43efaa33
PM
4349static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4350 uint64_t value)
4351{
4352 /* Invalidate by VA, EL3
4353 * Currently handles both VAE3 and VALE3, since we don't support
4354 * flush-last-level-only.
4355 */
2fc0cc0e 4356 ARMCPU *cpu = env_archcpu(env);
43efaa33
PM
4357 CPUState *cs = CPU(cpu);
4358 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4359
127b2b08 4360 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
43efaa33
PM
4361}
4362
fd3ed969
PM
4363static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4364 uint64_t value)
4365{
90c19cdf
RH
4366 CPUState *cs = env_cpu(env);
4367 int mask = vae1_tlbmask(env);
fa439fc5 4368 uint64_t pageaddr = sextract64(value << 12, 0, 56);
ea04dce7 4369 int bits = vae1_tlbbits(env, pageaddr);
fa439fc5 4370
ea04dce7 4371 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
fa439fc5
PM
4372}
4373
b4ab8ce9
PM
4374static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4375 uint64_t value)
4376{
4377 /* Invalidate by VA, EL1&0 (AArch64 version).
4378 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4379 * since we don't support flush-for-specific-ASID-only or
4380 * flush-last-level-only.
4381 */
90c19cdf
RH
4382 CPUState *cs = env_cpu(env);
4383 int mask = vae1_tlbmask(env);
b4ab8ce9 4384 uint64_t pageaddr = sextract64(value << 12, 0, 56);
ea04dce7 4385 int bits = vae1_tlbbits(env, pageaddr);
b4ab8ce9
PM
4386
4387 if (tlb_force_broadcast(env)) {
ea04dce7 4388 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
527db2be 4389 } else {
ea04dce7 4390 tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
b4ab8ce9 4391 }
b4ab8ce9
PM
4392}
4393
fd3ed969
PM
4394static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4395 uint64_t value)
fa439fc5 4396{
29a0af61 4397 CPUState *cs = env_cpu(env);
fd3ed969 4398 uint64_t pageaddr = sextract64(value << 12, 0, 56);
b6ad6062
RDC
4399 bool secure = arm_is_secure_below_el3(env);
4400 int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
eb849d8f 4401 int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
b6ad6062 4402 pageaddr);
fa439fc5 4403
b6ad6062 4404 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
fa439fc5
PM
4405}
4406
43efaa33
PM
4407static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4408 uint64_t value)
4409{
29a0af61 4410 CPUState *cs = env_cpu(env);
43efaa33 4411 uint64_t pageaddr = sextract64(value << 12, 0, 56);
ea04dce7 4412 int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
43efaa33 4413
ea04dce7
RH
4414 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
4415 ARMMMUIdxBit_SE3, bits);
43efaa33
PM
4416}
4417
84940ed8 4418#ifdef TARGET_AARCH64
ab1cdb47
RH
4419typedef struct {
4420 uint64_t base;
84940ed8 4421 uint64_t length;
ab1cdb47
RH
4422} TLBIRange;
4423
4424static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
4425 uint64_t value)
4426{
4427 unsigned int page_size_granule, page_shift, num, scale, exponent;
3974ff93
RH
4428 /* Extract one bit to represent the va selector in use. */
4429 uint64_t select = sextract64(value, 36, 1);
4430 ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true);
ab1cdb47 4431 TLBIRange ret = { };
84940ed8 4432
84940ed8
RC
4433 page_size_granule = extract64(value, 46, 2);
4434
3974ff93
RH
4435 /* The granule encoded in value must match the granule in use. */
4436 if (page_size_granule != (param.using64k ? 3 : param.using16k ? 2 : 1)) {
4437 qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
84940ed8 4438 page_size_granule);
ab1cdb47 4439 return ret;
84940ed8
RC
4440 }
4441
52a9f609 4442 page_shift = (page_size_granule - 1) * 2 + 12;
ab1cdb47
RH
4443 num = extract64(value, 39, 5);
4444 scale = extract64(value, 44, 2);
84940ed8 4445 exponent = (5 * scale) + 1;
84940ed8 4446
ab1cdb47 4447 ret.length = (num + 1) << (exponent + page_shift);
84940ed8 4448
3974ff93 4449 if (param.select) {
d976de21 4450 ret.base = sextract64(value, 0, 37);
84940ed8 4451 } else {
d976de21 4452 ret.base = extract64(value, 0, 37);
84940ed8 4453 }
ef56c242
RH
4454 if (param.ds) {
4455 /*
4456 * With DS=1, BaseADDR is always shifted 16 so that it is able
4457 * to address all 52 va bits. The input address is perforce
4458 * aligned on a 64k boundary regardless of translation granule.
4459 */
4460 page_shift = 16;
4461 }
d976de21 4462 ret.base <<= page_shift;
84940ed8 4463
ab1cdb47 4464 return ret;
84940ed8
RC
4465}
4466
4467static void do_rvae_write(CPUARMState *env, uint64_t value,
4468 int idxmap, bool synced)
4469{
4470 ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
ab1cdb47 4471 TLBIRange range;
84940ed8
RC
4472 int bits;
4473
ab1cdb47
RH
4474 range = tlbi_aa64_get_range(env, one_idx, value);
4475 bits = tlbbits_for_regime(env, one_idx, range.base);
84940ed8
RC
4476
4477 if (synced) {
4478 tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
ab1cdb47
RH
4479 range.base,
4480 range.length,
84940ed8
RC
4481 idxmap,
4482 bits);
4483 } else {
ab1cdb47
RH
4484 tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
4485 range.length, idxmap, bits);
84940ed8
RC
4486 }
4487}
4488
4489static void tlbi_aa64_rvae1_write(CPUARMState *env,
4490 const ARMCPRegInfo *ri,
4491 uint64_t value)
4492{
4493 /*
4494 * Invalidate by VA range, EL1&0.
4495 * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
4496 * since we don't support flush-for-specific-ASID-only or
4497 * flush-last-level-only.
4498 */
4499
4500 do_rvae_write(env, value, vae1_tlbmask(env),
4501 tlb_force_broadcast(env));
4502}
4503
4504static void tlbi_aa64_rvae1is_write(CPUARMState *env,
4505 const ARMCPRegInfo *ri,
4506 uint64_t value)
4507{
4508 /*
4509 * Invalidate by VA range, Inner/Outer Shareable EL1&0.
4510 * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
4511 * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
4512 * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
4513 * shareable specific flushes.
4514 */
4515
4516 do_rvae_write(env, value, vae1_tlbmask(env), true);
4517}
4518
4519static int vae2_tlbmask(CPUARMState *env)
4520{
4521 return (arm_is_secure_below_el3(env)
4522 ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
4523}
4524
4525static void tlbi_aa64_rvae2_write(CPUARMState *env,
4526 const ARMCPRegInfo *ri,
4527 uint64_t value)
4528{
4529 /*
4530 * Invalidate by VA range, EL2.
4531 * Currently handles all of RVAE2 and RVALE2,
4532 * since we don't support flush-for-specific-ASID-only or
4533 * flush-last-level-only.
4534 */
4535
4536 do_rvae_write(env, value, vae2_tlbmask(env),
4537 tlb_force_broadcast(env));
4538
4539
4540}
4541
4542static void tlbi_aa64_rvae2is_write(CPUARMState *env,
4543 const ARMCPRegInfo *ri,
4544 uint64_t value)
4545{
4546 /*
4547 * Invalidate by VA range, Inner/Outer Shareable, EL2.
4548 * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
4549 * since we don't support flush-for-specific-ASID-only,
4550 * flush-last-level-only or inner/outer shareable specific flushes.
4551 */
4552
4553 do_rvae_write(env, value, vae2_tlbmask(env), true);
4554
4555}
4556
4557static void tlbi_aa64_rvae3_write(CPUARMState *env,
4558 const ARMCPRegInfo *ri,
4559 uint64_t value)
4560{
4561 /*
4562 * Invalidate by VA range, EL3.
4563 * Currently handles all of RVAE3 and RVALE3,
4564 * since we don't support flush-for-specific-ASID-only or
4565 * flush-last-level-only.
4566 */
4567
4568 do_rvae_write(env, value, ARMMMUIdxBit_SE3,
4569 tlb_force_broadcast(env));
4570}
4571
4572static void tlbi_aa64_rvae3is_write(CPUARMState *env,
4573 const ARMCPRegInfo *ri,
4574 uint64_t value)
4575{
4576 /*
4577 * Invalidate by VA range, EL3, Inner/Outer Shareable.
4578 * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
4579 * since we don't support flush-for-specific-ASID-only,
4580 * flush-last-level-only or inner/outer specific flushes.
4581 */
4582
4583 do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
4584}
4585#endif
4586
3f208fd7
PM
4587static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4588 bool isread)
aca3f40b 4589{
4351cb72
RH
4590 int cur_el = arm_current_el(env);
4591
4592 if (cur_el < 2) {
4593 uint64_t hcr = arm_hcr_el2_eff(env);
4594
4595 if (cur_el == 0) {
4596 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4597 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
4598 return CP_ACCESS_TRAP_EL2;
4599 }
4600 } else {
4601 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4602 return CP_ACCESS_TRAP;
4603 }
4604 if (hcr & HCR_TDZ) {
4605 return CP_ACCESS_TRAP_EL2;
4606 }
4607 }
4608 } else if (hcr & HCR_TDZ) {
4609 return CP_ACCESS_TRAP_EL2;
4610 }
aca3f40b
PM
4611 }
4612 return CP_ACCESS_OK;
4613}
4614
4615static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4616{
2fc0cc0e 4617 ARMCPU *cpu = env_archcpu(env);
aca3f40b
PM
4618 int dzp_bit = 1 << 4;
4619
4620 /* DZP indicates whether DC ZVA access is allowed */
3f208fd7 4621 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
aca3f40b
PM
4622 dzp_bit = 0;
4623 }
4624 return cpu->dcz_blocksize | dzp_bit;
4625}
4626
3f208fd7
PM
4627static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4628 bool isread)
f502cfc2 4629{
cdcf1405 4630 if (!(env->pstate & PSTATE_SP)) {
f502cfc2
PM
4631 /* Access to SP_EL0 is undefined if it's being used as
4632 * the stack pointer.
4633 */
4634 return CP_ACCESS_TRAP_UNCATEGORIZED;
4635 }
4636 return CP_ACCESS_OK;
4637}
4638
4639static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4640{
4641 return env->pstate & PSTATE_SP;
4642}
4643
4644static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4645{
4646 update_spsel(env, val);
4647}
4648
137feaa9
FA
4649static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4650 uint64_t value)
4651{
2fc0cc0e 4652 ARMCPU *cpu = env_archcpu(env);
137feaa9 4653
f00faf13
RH
4654 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4655 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4656 value &= ~SCTLR_M;
4657 }
4658
4659 /* ??? Lots of these bits are not implemented. */
4660
4661 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
4662 if (ri->opc1 == 6) { /* SCTLR_EL3 */
4663 value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
4664 } else {
4665 value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
4666 SCTLR_ATA0 | SCTLR_ATA);
4667 }
4668 }
4669
137feaa9
FA
4670 if (raw_read(env, ri) == value) {
4671 /* Skip the TLB flush if nothing actually changed; Linux likes
4672 * to do a lot of pointless SCTLR writes.
4673 */
4674 return;
4675 }
4676
4677 raw_write(env, ri, value);
f00faf13 4678
137feaa9 4679 /* This may enable/disable the MMU, so do a TLB flush. */
d10eb08f 4680 tlb_flush(CPU(cpu));
2e5dcf36
RH
4681
4682 if (ri->type & ARM_CP_SUPPRESS_TB_END) {
4683 /*
4684 * Normally we would always end the TB on an SCTLR write; see the
4685 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4686 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4687 * of hflags from the translator, so do it here.
4688 */
4689 arm_rebuild_hflags(env);
4690 }
137feaa9
FA
4691}
4692
a8d64e73
PM
4693static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4694 uint64_t value)
4695{
01765386
PM
4696 /*
4697 * Some MDCR_EL3 bits affect whether PMU counters are running:
4698 * if we are trying to change any of those then we must
4699 * bracket this update with PMU start/finish calls.
4700 */
4701 bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
4702
4703 if (pmu_op) {
4704 pmu_op_start(env);
4705 }
a8d64e73 4706 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
01765386
PM
4707 if (pmu_op) {
4708 pmu_op_finish(env);
4709 }
4710}
4711
4712static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4713 uint64_t value)
4714{
4715 /*
4716 * Some MDCR_EL2 bits affect whether PMU counters are running:
4717 * if we are trying to change any of those then we must
4718 * bracket this update with PMU start/finish calls.
4719 */
4720 bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
4721
4722 if (pmu_op) {
4723 pmu_op_start(env);
4724 }
4725 env->cp15.mdcr_el2 = value;
4726 if (pmu_op) {
4727 pmu_op_finish(env);
4728 }
a8d64e73
PM
4729}
4730
b0d2b7d0
PM
4731static const ARMCPRegInfo v8_cp_reginfo[] = {
4732 /* Minimal set of EL0-visible registers. This will need to be expanded
4733 * significantly for system emulation of AArch64 CPUs.
4734 */
4735 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4736 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4737 .access = PL0_RW, .type = ARM_CP_NZCV },
c2b820fe
PM
4738 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4739 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
7a0e58fa 4740 .type = ARM_CP_NO_RAW,
c2b820fe
PM
4741 .access = PL0_RW, .accessfn = aa64_daif_access,
4742 .fieldoffset = offsetof(CPUARMState, daif),
4743 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
b0d2b7d0
PM
4744 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4745 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
b916c9c3 4746 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
fe03d45f 4747 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
b0d2b7d0
PM
4748 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4749 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
b916c9c3 4750 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
fe03d45f 4751 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
b0d2b7d0
PM
4752 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4753 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
7a0e58fa 4754 .access = PL0_R, .type = ARM_CP_NO_RAW,
aca3f40b
PM
4755 .readfn = aa64_dczid_read },
4756 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4757 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4758 .access = PL0_W, .type = ARM_CP_DC_ZVA,
4759#ifndef CONFIG_USER_ONLY
4760 /* Avoid overhead of an access check that always passes in user-mode */
4761 .accessfn = aa64_zva_access,
4762#endif
4763 },
0eef9d98
PM
4764 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4765 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4766 .access = PL1_R, .type = ARM_CP_CURRENTEL },
8af35c37
PM
4767 /* Cache ops: all NOPs since we don't emulate caches */
4768 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4769 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
38262d8a
RH
4770 .access = PL1_W, .type = ARM_CP_NOP,
4771 .accessfn = aa64_cacheop_pou_access },
8af35c37
PM
4772 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4773 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
38262d8a
RH
4774 .access = PL1_W, .type = ARM_CP_NOP,
4775 .accessfn = aa64_cacheop_pou_access },
8af35c37
PM
4776 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4777 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4778 .access = PL0_W, .type = ARM_CP_NOP,
38262d8a 4779 .accessfn = aa64_cacheop_pou_access },
8af35c37
PM
4780 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4781 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
1bed4d2e
RH
4782 .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
4783 .type = ARM_CP_NOP },
8af35c37
PM
4784 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4785 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
1803d271 4786 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
8af35c37
PM
4787 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4788 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4789 .access = PL0_W, .type = ARM_CP_NOP,
1bed4d2e 4790 .accessfn = aa64_cacheop_poc_access },
8af35c37
PM
4791 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4792 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
1803d271 4793 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
8af35c37
PM
4794 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4795 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4796 .access = PL0_W, .type = ARM_CP_NOP,
38262d8a 4797 .accessfn = aa64_cacheop_pou_access },
8af35c37
PM
4798 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4799 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4800 .access = PL0_W, .type = ARM_CP_NOP,
1bed4d2e 4801 .accessfn = aa64_cacheop_poc_access },
8af35c37
PM
4802 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4803 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
1803d271 4804 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
168aa23b
PM
4805 /* TLBI operations */
4806 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4807 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
30881b73 4808 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4809 .writefn = tlbi_aa64_vmalle1is_write },
168aa23b 4810 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4811 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
30881b73 4812 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4813 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4814 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4815 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
30881b73 4816 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4817 .writefn = tlbi_aa64_vmalle1is_write },
168aa23b 4818 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4819 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
30881b73 4820 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4821 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4822 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4823 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
30881b73 4824 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4825 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4826 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4827 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
30881b73 4828 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4829 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4830 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4831 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
30881b73 4832 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4833 .writefn = tlbi_aa64_vmalle1_write },
168aa23b 4834 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4835 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
30881b73 4836 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4837 .writefn = tlbi_aa64_vae1_write },
168aa23b 4838 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4839 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
30881b73 4840 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4841 .writefn = tlbi_aa64_vmalle1_write },
168aa23b 4842 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4843 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
30881b73 4844 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4845 .writefn = tlbi_aa64_vae1_write },
168aa23b 4846 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4847 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
30881b73 4848 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4849 .writefn = tlbi_aa64_vae1_write },
168aa23b 4850 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4851 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
30881b73 4852 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4853 .writefn = tlbi_aa64_vae1_write },
cea66e91
PM
4854 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4855 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
bf05340c 4856 .access = PL2_W, .type = ARM_CP_NOP },
cea66e91
PM
4857 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4858 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
bf05340c 4859 .access = PL2_W, .type = ARM_CP_NOP },
83ddf975
PM
4860 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4861 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4862 .access = PL2_W, .type = ARM_CP_NO_RAW,
fd3ed969 4863 .writefn = tlbi_aa64_alle1is_write },
43efaa33
PM
4864 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4865 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4866 .access = PL2_W, .type = ARM_CP_NO_RAW,
4867 .writefn = tlbi_aa64_alle1is_write },
cea66e91
PM
4868 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4869 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
bf05340c 4870 .access = PL2_W, .type = ARM_CP_NOP },
cea66e91
PM
4871 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4872 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
bf05340c 4873 .access = PL2_W, .type = ARM_CP_NOP },
83ddf975
PM
4874 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4875 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4876 .access = PL2_W, .type = ARM_CP_NO_RAW,
fd3ed969 4877 .writefn = tlbi_aa64_alle1_write },
43efaa33
PM
4878 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4879 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4880 .access = PL2_W, .type = ARM_CP_NO_RAW,
4881 .writefn = tlbi_aa64_alle1is_write },
19525524
PM
4882#ifndef CONFIG_USER_ONLY
4883 /* 64 bit address translation operations */
4884 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4885 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
0710b2fa
PM
4886 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4887 .writefn = ats_write64 },
19525524
PM
4888 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4889 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
0710b2fa
PM
4890 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4891 .writefn = ats_write64 },
19525524
PM
4892 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4893 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
0710b2fa
PM
4894 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4895 .writefn = ats_write64 },
19525524
PM
4896 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4897 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
0710b2fa
PM
4898 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4899 .writefn = ats_write64 },
2a47df95 4900 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
7a379c7e 4901 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
0710b2fa
PM
4902 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4903 .writefn = ats_write64 },
2a47df95 4904 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
7a379c7e 4905 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
0710b2fa
PM
4906 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4907 .writefn = ats_write64 },
2a47df95 4908 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
7a379c7e 4909 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
0710b2fa
PM
4910 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4911 .writefn = ats_write64 },
2a47df95 4912 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
7a379c7e 4913 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
0710b2fa
PM
4914 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4915 .writefn = ats_write64 },
2a47df95
PM
4916 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4917 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4918 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
0710b2fa
PM
4919 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4920 .writefn = ats_write64 },
2a47df95
PM
4921 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4922 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
0710b2fa
PM
4923 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4924 .writefn = ats_write64 },
c96fc9b5
EI
4925 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4926 .type = ARM_CP_ALIAS,
4927 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4928 .access = PL1_RW, .resetvalue = 0,
4929 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4930 .writefn = par_write },
19525524 4931#endif
995939a6 4932 /* TLB invalidate last level of translation table walk */
9449fdf6 4933 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
30881b73
RH
4934 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4935 .writefn = tlbimva_is_write },
9449fdf6 4936 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
30881b73 4937 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
fa439fc5 4938 .writefn = tlbimvaa_is_write },
9449fdf6 4939 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
30881b73
RH
4940 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4941 .writefn = tlbimva_write },
9449fdf6 4942 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
30881b73
RH
4943 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4944 .writefn = tlbimvaa_write },
541ef8c2
SS
4945 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4946 .type = ARM_CP_NO_RAW, .access = PL2_W,
4947 .writefn = tlbimva_hyp_write },
4948 { .name = "TLBIMVALHIS",
4949 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4950 .type = ARM_CP_NO_RAW, .access = PL2_W,
4951 .writefn = tlbimva_hyp_is_write },
4952 { .name = "TLBIIPAS2",
4953 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
bf05340c 4954 .type = ARM_CP_NOP, .access = PL2_W },
541ef8c2
SS
4955 { .name = "TLBIIPAS2IS",
4956 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
bf05340c 4957 .type = ARM_CP_NOP, .access = PL2_W },
541ef8c2
SS
4958 { .name = "TLBIIPAS2L",
4959 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
bf05340c 4960 .type = ARM_CP_NOP, .access = PL2_W },
541ef8c2
SS
4961 { .name = "TLBIIPAS2LIS",
4962 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
bf05340c 4963 .type = ARM_CP_NOP, .access = PL2_W },
9449fdf6
PM
4964 /* 32 bit cache operations */
4965 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
38262d8a 4966 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
9449fdf6
PM
4967 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
4968 .type = ARM_CP_NOP, .access = PL1_W },
4969 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
38262d8a 4970 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
9449fdf6 4971 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
38262d8a 4972 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
9449fdf6
PM
4973 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
4974 .type = ARM_CP_NOP, .access = PL1_W },
4975 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
4976 .type = ARM_CP_NOP, .access = PL1_W },
4977 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
1bed4d2e 4978 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
9449fdf6 4979 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
1803d271 4980 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
9449fdf6 4981 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
1bed4d2e 4982 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
9449fdf6 4983 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
1803d271 4984 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
9449fdf6 4985 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
38262d8a 4986 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
9449fdf6 4987 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
1bed4d2e 4988 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
9449fdf6 4989 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
1803d271 4990 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
9449fdf6 4991 /* MMU Domain access control / MPU write buffer control */
0c17d68c 4992 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
84929218 4993 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
0c17d68c
FA
4994 .writefn = dacr_write, .raw_writefn = raw_write,
4995 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
4996 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
a0618a19 4997 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
7a0e58fa 4998 .type = ARM_CP_ALIAS,
a0618a19 4999 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
6947f059
EI
5000 .access = PL1_RW,
5001 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
a65f1de9 5002 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
7a0e58fa 5003 .type = ARM_CP_ALIAS,
a65f1de9 5004 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
99a99c1f
SB
5005 .access = PL1_RW,
5006 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
f502cfc2
PM
5007 /* We rely on the access checks not allowing the guest to write to the
5008 * state field when SPSel indicates that it's being used as the stack
5009 * pointer.
5010 */
5011 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5012 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
5013 .access = PL1_RW, .accessfn = sp_el0_access,
7a0e58fa 5014 .type = ARM_CP_ALIAS,
f502cfc2 5015 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
884b4dee
GB
5016 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5017 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
7a0e58fa 5018 .access = PL2_RW, .type = ARM_CP_ALIAS,
884b4dee 5019 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
f502cfc2
PM
5020 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5021 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
7a0e58fa 5022 .type = ARM_CP_NO_RAW,
f502cfc2 5023 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
03fbf20f
PM
5024 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5025 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
696ba377
RH
5026 .access = PL2_RW,
5027 .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
a4c88675 5028 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
6a43e0b6
PM
5029 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5030 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
696ba377 5031 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
6a43e0b6
PM
5032 .writefn = dacr_write, .raw_writefn = raw_write,
5033 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
5034 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5035 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
696ba377 5036 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
6a43e0b6
PM
5037 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
5038 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5039 .type = ARM_CP_ALIAS,
5040 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
5041 .access = PL2_RW,
5042 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
5043 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5044 .type = ARM_CP_ALIAS,
5045 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5046 .access = PL2_RW,
5047 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5048 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5049 .type = ARM_CP_ALIAS,
5050 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5051 .access = PL2_RW,
5052 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5053 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5054 .type = ARM_CP_ALIAS,
5055 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5056 .access = PL2_RW,
5057 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
a8d64e73
PM
5058 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5059 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5060 .resetvalue = 0,
5061 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5062 { .name = "SDCR", .type = ARM_CP_ALIAS,
5063 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5064 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5065 .writefn = sdcr_write,
5066 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
b0d2b7d0
PM
5067};
5068
d1fb4da2 5069static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
f149e3e8 5070{
2fc0cc0e 5071 ARMCPU *cpu = env_archcpu(env);
d1fb4da2
RH
5072
5073 if (arm_feature(env, ARM_FEATURE_V8)) {
5074 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
5075 } else {
5076 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
5077 }
f149e3e8
EI
5078
5079 if (arm_feature(env, ARM_FEATURE_EL3)) {
5080 valid_mask &= ~HCR_HCD;
77077a83
JK
5081 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5082 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5083 * However, if we're using the SMC PSCI conduit then QEMU is
5084 * effectively acting like EL3 firmware and so the guest at
5085 * EL2 should retain the ability to prevent EL1 from being
5086 * able to make SMC calls into the ersatz firmware, so in
5087 * that case HCR.TSC should be read/write.
5088 */
f149e3e8
EI
5089 valid_mask &= ~HCR_TSC;
5090 }
d1fb4da2
RH
5091
5092 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5093 if (cpu_isar_feature(aa64_vh, cpu)) {
5094 valid_mask |= HCR_E2H;
5095 }
da3d8b13
RH
5096 if (cpu_isar_feature(aa64_ras, cpu)) {
5097 valid_mask |= HCR_TERR | HCR_TEA;
5098 }
d1fb4da2
RH
5099 if (cpu_isar_feature(aa64_lor, cpu)) {
5100 valid_mask |= HCR_TLOR;
5101 }
5102 if (cpu_isar_feature(aa64_pauth, cpu)) {
5103 valid_mask |= HCR_API | HCR_APK;
5104 }
8ddb300b
RH
5105 if (cpu_isar_feature(aa64_mte, cpu)) {
5106 valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
5107 }
7cb1e618
RH
5108 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
5109 valid_mask |= HCR_ENSCXT;
5110 }
8c7e17ef
PM
5111 if (cpu_isar_feature(aa64_fwb, cpu)) {
5112 valid_mask |= HCR_FWB;
5113 }
ef682cdb 5114 }
f149e3e8
EI
5115
5116 /* Clear RES0 bits. */
5117 value &= valid_mask;
5118
8ddb300b
RH
5119 /*
5120 * These bits change the MMU setup:
f149e3e8
EI
5121 * HCR_VM enables stage 2 translation
5122 * HCR_PTW forbids certain page-table setups
8ddb300b
RH
5123 * HCR_DC disables stage1 and enables stage2 translation
5124 * HCR_DCT enables tagging on (disabled) stage1 translation
8c7e17ef 5125 * HCR_FWB changes the interpretation of stage2 descriptor bits
f149e3e8 5126 */
8c7e17ef
PM
5127 if ((env->cp15.hcr_el2 ^ value) &
5128 (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) {
d10eb08f 5129 tlb_flush(CPU(cpu));
f149e3e8 5130 }
ce4afed8 5131 env->cp15.hcr_el2 = value;
89430fc6
PM
5132
5133 /*
5134 * Updates to VI and VF require us to update the status of
5135 * virtual interrupts, which are the logical OR of these bits
5136 * and the state of the input lines from the GIC. (This requires
5137 * that we have the iothread lock, which is done by marking the
5138 * reginfo structs as ARM_CP_IO.)
5139 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5140 * possible for it to be taken immediately, because VIRQ and
5141 * VFIQ are masked unless running at EL0 or EL1, and HCR
5142 * can only be written at EL2.
5143 */
5144 g_assert(qemu_mutex_iothread_locked());
5145 arm_cpu_update_virq(cpu);
5146 arm_cpu_update_vfiq(cpu);
3c29632f 5147 arm_cpu_update_vserr(cpu);
ce4afed8
PM
5148}
5149
d1fb4da2
RH
5150static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
5151{
5152 do_hcr_write(env, value, 0);
5153}
5154
ce4afed8
PM
5155static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
5156 uint64_t value)
5157{
5158 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5159 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
d1fb4da2 5160 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
ce4afed8
PM
5161}
5162
5163static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
5164 uint64_t value)
5165{
5166 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5167 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
d1fb4da2 5168 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
f149e3e8
EI
5169}
5170
f7778444
RH
5171/*
5172 * Return the effective value of HCR_EL2.
5173 * Bits that are not included here:
5174 * RW (read from SCR_EL3.RW as needed)
5175 */
5176uint64_t arm_hcr_el2_eff(CPUARMState *env)
5177{
5178 uint64_t ret = env->cp15.hcr_el2;
5179
e6ef0169 5180 if (!arm_is_el2_enabled(env)) {
f7778444
RH
5181 /*
5182 * "This register has no effect if EL2 is not enabled in the
5183 * current Security state". This is ARMv8.4-SecEL2 speak for
5184 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5185 *
5186 * Prior to that, the language was "In an implementation that
5187 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5188 * as if this field is 0 for all purposes other than a direct
5189 * read or write access of HCR_EL2". With lots of enumeration
5190 * on a per-field basis. In current QEMU, this is condition
5191 * is arm_is_secure_below_el3.
5192 *
5193 * Since the v8.4 language applies to the entire register, and
5194 * appears to be backward compatible, use that.
5195 */
4990e1d3
RH
5196 return 0;
5197 }
5198
5199 /*
5200 * For a cpu that supports both aarch64 and aarch32, we can set bits
5201 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5202 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5203 */
5204 if (!arm_el_is_aa64(env, 2)) {
5205 uint64_t aa32_valid;
5206
5207 /*
5208 * These bits are up-to-date as of ARMv8.6.
5209 * For HCR, it's easiest to list just the 2 bits that are invalid.
5210 * For HCR2, list those that are valid.
5211 */
5212 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
5213 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
5214 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
5215 ret &= aa32_valid;
5216 }
5217
5218 if (ret & HCR_TGE) {
5219 /* These bits are up-to-date as of ARMv8.6. */
f7778444
RH
5220 if (ret & HCR_E2H) {
5221 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
5222 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
5223 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
4990e1d3
RH
5224 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
5225 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
5226 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
f7778444
RH
5227 } else {
5228 ret |= HCR_FMO | HCR_IMO | HCR_AMO;
5229 }
5230 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
5231 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
5232 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
5233 HCR_TLOR);
5234 }
5235
5236 return ret;
5237}
5238
19668718
RH
5239/*
5240 * Corresponds to ARM pseudocode function ELIsInHost().
5241 */
5242bool el_is_in_host(CPUARMState *env, int el)
5243{
5244 uint64_t mask;
5245
5246 /*
5247 * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
5248 * Perform the simplest bit tests first, and validate EL2 afterward.
5249 */
5250 if (el & 1) {
5251 return false; /* EL1 or EL3 */
5252 }
5253
5254 /*
5255 * Note that hcr_write() checks isar_feature_aa64_vh(),
5256 * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
5257 */
5258 mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
5259 if ((env->cp15.hcr_el2 & mask) != mask) {
5260 return false;
5261 }
5262
5263 /* TGE and/or E2H set: double check those bits are currently legal. */
5264 return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
5265}
5266
5814d587
RH
5267static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
5268 uint64_t value)
5269{
5270 uint64_t valid_mask = 0;
5271
5272 /* No features adding bits to HCRX are implemented. */
5273
5274 /* Clear RES0 bits. */
5275 env->cp15.hcrx_el2 = value & valid_mask;
5276}
5277
5278static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
5279 bool isread)
5280{
5281 if (arm_current_el(env) < 3
5282 && arm_feature(env, ARM_FEATURE_EL3)
5283 && !(env->cp15.scr_el3 & SCR_HXEN)) {
5284 return CP_ACCESS_TRAP_EL3;
5285 }
5286 return CP_ACCESS_OK;
5287}
5288
5289static const ARMCPRegInfo hcrx_el2_reginfo = {
5290 .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
5291 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
5292 .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
5293 .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
5294};
5295
5296/* Return the effective value of HCRX_EL2. */
5297uint64_t arm_hcrx_el2_eff(CPUARMState *env)
5298{
5299 /*
5300 * The bits in this register behave as 0 for all purposes other than
5301 * direct reads of the register if:
5302 * - EL2 is not enabled in the current security state,
5303 * - SCR_EL3.HXEn is 0.
5304 */
5305 if (!arm_is_el2_enabled(env)
5306 || (arm_feature(env, ARM_FEATURE_EL3)
5307 && !(env->cp15.scr_el3 & SCR_HXEN))) {
5308 return 0;
5309 }
5310 return env->cp15.hcrx_el2;
5311}
5312
fc1120a7
PM
5313static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5314 uint64_t value)
5315{
5316 /*
5317 * For A-profile AArch32 EL3, if NSACR.CP10
5318 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5319 */
5320 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5321 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
fab8ad39
RH
5322 uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
5323 value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
fc1120a7
PM
5324 }
5325 env->cp15.cptr_el[2] = value;
5326}
5327
5328static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5329{
5330 /*
5331 * For A-profile AArch32 EL3, if NSACR.CP10
5332 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5333 */
5334 uint64_t value = env->cp15.cptr_el[2];
5335
5336 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5337 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
fab8ad39 5338 value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
fc1120a7
PM
5339 }
5340 return value;
5341}
5342
4771cd01 5343static const ARMCPRegInfo el2_cp_reginfo[] = {
f149e3e8 5344 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
89430fc6 5345 .type = ARM_CP_IO,
f149e3e8
EI
5346 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5347 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
c624ea0f 5348 .writefn = hcr_write },
ce4afed8 5349 { .name = "HCR", .state = ARM_CP_STATE_AA32,
89430fc6 5350 .type = ARM_CP_ALIAS | ARM_CP_IO,
ce4afed8
PM
5351 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5352 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
c624ea0f 5353 .writefn = hcr_writelow },
831a2fca
PM
5354 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5355 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5356 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3b685ba7 5357 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
7a0e58fa 5358 .type = ARM_CP_ALIAS,
3b685ba7
EI
5359 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
5360 .access = PL2_RW,
5361 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
68e78e33 5362 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
f2c30f42
EI
5363 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5364 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
cba517c3 5365 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
63b60551
EI
5366 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5367 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
cba517c3
PM
5368 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5369 .type = ARM_CP_ALIAS,
5370 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5371 .access = PL2_RW,
5372 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
3b685ba7 5373 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
7a0e58fa 5374 .type = ARM_CP_ALIAS,
3b685ba7 5375 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
99a99c1f
SB
5376 .access = PL2_RW,
5377 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
d79e0c06 5378 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
d42e3c26
EI
5379 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5380 .access = PL2_RW, .writefn = vbar_write,
5381 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
5382 .resetvalue = 0 },
884b4dee
GB
5383 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
5384 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
7a0e58fa 5385 .access = PL3_RW, .type = ARM_CP_ALIAS,
884b4dee 5386 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
c6f19164
GB
5387 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5388 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5389 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
fc1120a7
PM
5390 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
5391 .readfn = cptr_el2_read, .writefn = cptr_el2_write },
95f949ac
EI
5392 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5393 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5394 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
5395 .resetvalue = 0 },
5396 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
b5ede85b 5397 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
95f949ac
EI
5398 .access = PL2_RW, .type = ARM_CP_ALIAS,
5399 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
2179ef95
PM
5400 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5401 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5402 .access = PL2_RW, .type = ARM_CP_CONST,
5403 .resetvalue = 0 },
5404 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
55b53c71 5405 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
b5ede85b 5406 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
2179ef95
PM
5407 .access = PL2_RW, .type = ARM_CP_CONST,
5408 .resetvalue = 0 },
37cd6c24
PM
5409 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5410 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5411 .access = PL2_RW, .type = ARM_CP_CONST,
5412 .resetvalue = 0 },
5413 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5414 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5415 .access = PL2_RW, .type = ARM_CP_CONST,
5416 .resetvalue = 0 },
06ec4c8c
EI
5417 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5418 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
d06dc933 5419 .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
06ec4c8c 5420 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
68e9c2fe
EI
5421 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
5422 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
bf06c112 5423 .type = ARM_CP_ALIAS,
68e9c2fe 5424 .access = PL2_RW, .accessfn = access_el3_aa32ns,
afbb181c 5425 .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
68e9c2fe
EI
5426 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
5427 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
bf06c112 5428 .access = PL2_RW,
988cc190 5429 /* no .writefn needed as this can't cause an ASID change */
68e9c2fe 5430 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
b698e9cf
EI
5431 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5432 .cp = 15, .opc1 = 6, .crm = 2,
5433 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5434 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5435 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
5436 .writefn = vttbr_write },
5437 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5438 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5439 .access = PL2_RW, .writefn = vttbr_write,
5440 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
b9cb5323
EI
5441 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5442 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5443 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
5444 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
ff05f37b
EI
5445 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5446 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5447 .access = PL2_RW, .resetvalue = 0,
5448 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
a57633c0
EI
5449 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5450 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
ed30da8e 5451 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
a57633c0
EI
5452 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5453 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5454 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
a57633c0 5455 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
541ef8c2
SS
5456 { .name = "TLBIALLNSNH",
5457 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5458 .type = ARM_CP_NO_RAW, .access = PL2_W,
5459 .writefn = tlbiall_nsnh_write },
5460 { .name = "TLBIALLNSNHIS",
5461 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5462 .type = ARM_CP_NO_RAW, .access = PL2_W,
5463 .writefn = tlbiall_nsnh_is_write },
5464 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5465 .type = ARM_CP_NO_RAW, .access = PL2_W,
5466 .writefn = tlbiall_hyp_write },
5467 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5468 .type = ARM_CP_NO_RAW, .access = PL2_W,
5469 .writefn = tlbiall_hyp_is_write },
5470 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5471 .type = ARM_CP_NO_RAW, .access = PL2_W,
5472 .writefn = tlbimva_hyp_write },
5473 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5474 .type = ARM_CP_NO_RAW, .access = PL2_W,
5475 .writefn = tlbimva_hyp_is_write },
51da9014
EI
5476 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
5477 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
696ba377 5478 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
fd3ed969 5479 .writefn = tlbi_aa64_alle2_write },
8742d49d
EI
5480 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
5481 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
696ba377 5482 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
fd3ed969 5483 .writefn = tlbi_aa64_vae2_write },
2bfb9d75
PM
5484 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
5485 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
696ba377 5486 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
2bfb9d75
PM
5487 .writefn = tlbi_aa64_vae2_write },
5488 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
5489 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
696ba377 5490 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
2bfb9d75 5491 .writefn = tlbi_aa64_alle2is_write },
8742d49d
EI
5492 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
5493 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
696ba377 5494 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
fd3ed969 5495 .writefn = tlbi_aa64_vae2is_write },
2bfb9d75
PM
5496 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
5497 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
696ba377 5498 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
2bfb9d75 5499 .writefn = tlbi_aa64_vae2is_write },
edac4d8a 5500#ifndef CONFIG_USER_ONLY
2a47df95
PM
5501 /* Unlike the other EL2-related AT operations, these must
5502 * UNDEF from EL3 if EL2 is not implemented, which is why we
5503 * define them here rather than with the rest of the AT ops.
5504 */
5505 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
5506 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5507 .access = PL2_W, .accessfn = at_s1e2_access,
696ba377
RH
5508 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
5509 .writefn = ats_write64 },
2a47df95
PM
5510 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
5511 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5512 .access = PL2_W, .accessfn = at_s1e2_access,
696ba377
RH
5513 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
5514 .writefn = ats_write64 },
14db7fe0
PM
5515 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5516 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5517 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5518 * to behave as if SCR.NS was 1.
5519 */
5520 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5521 .access = PL2_W,
0710b2fa 5522 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
14db7fe0
PM
5523 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5524 .access = PL2_W,
0710b2fa 5525 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
0b6440af
EI
5526 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5527 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5528 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5529 * reset values as IMPDEF. We choose to reset to 3 to comply with
5530 * both ARMv7 and ARMv8.
5531 */
5532 .access = PL2_RW, .resetvalue = 3,
5533 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
edac4d8a
EI
5534 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5535 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5536 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
5537 .writefn = gt_cntvoff_write,
5538 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5539 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5540 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
5541 .writefn = gt_cntvoff_write,
5542 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
b0e66d95
EI
5543 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5544 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5545 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5546 .type = ARM_CP_IO, .access = PL2_RW,
5547 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5548 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5549 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5550 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
5551 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5552 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5553 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
d44ec156 5554 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
b0e66d95
EI
5555 .resetfn = gt_hyp_timer_reset,
5556 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
5557 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5558 .type = ARM_CP_IO,
5559 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5560 .access = PL2_RW,
5561 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
5562 .resetvalue = 0,
5563 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
edac4d8a 5564#endif
59e05530
EI
5565 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5566 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5567 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5568 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5569 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5570 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5571 .access = PL2_RW,
5572 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
2a5a9abd
AF
5573 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5574 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5575 .access = PL2_RW,
5576 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
3b685ba7
EI
5577};
5578
ce4afed8
PM
5579static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
5580 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
89430fc6 5581 .type = ARM_CP_ALIAS | ARM_CP_IO,
ce4afed8
PM
5582 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5583 .access = PL2_RW,
5584 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
5585 .writefn = hcr_writehigh },
ce4afed8
PM
5586};
5587
e9152ee9
RDC
5588static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
5589 bool isread)
5590{
5591 if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
5592 return CP_ACCESS_OK;
5593 }
5594 return CP_ACCESS_TRAP_UNCATEGORIZED;
5595}
5596
5597static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
5598 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
5599 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
5600 .access = PL2_RW, .accessfn = sel2_access,
5601 .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
5602 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
5603 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
5604 .access = PL2_RW, .accessfn = sel2_access,
5605 .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
e9152ee9
RDC
5606};
5607
2f027fc5
PM
5608static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
5609 bool isread)
5610{
5611 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
926c1b97 5612 * At Secure EL1 it traps to EL3 or EL2.
2f027fc5
PM
5613 */
5614 if (arm_current_el(env) == 3) {
5615 return CP_ACCESS_OK;
5616 }
5617 if (arm_is_secure_below_el3(env)) {
926c1b97
RDC
5618 if (env->cp15.scr_el3 & SCR_EEL2) {
5619 return CP_ACCESS_TRAP_EL2;
5620 }
2f027fc5
PM
5621 return CP_ACCESS_TRAP_EL3;
5622 }
5623 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5624 if (isread) {
5625 return CP_ACCESS_OK;
5626 }
5627 return CP_ACCESS_TRAP_UNCATEGORIZED;
5628}
5629
60fb1a87
GB
5630static const ARMCPRegInfo el3_cp_reginfo[] = {
5631 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5632 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5633 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
10d0ef3e 5634 .resetfn = scr_reset, .writefn = scr_write },
f80741d1 5635 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
60fb1a87 5636 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
efe4a274
PM
5637 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5638 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
b061a82b 5639 .writefn = scr_write },
60fb1a87
GB
5640 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5641 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5642 .access = PL3_RW, .resetvalue = 0,
5643 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5644 { .name = "SDER",
5645 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5646 .access = PL3_RW, .resetvalue = 0,
5647 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
60fb1a87 5648 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
efe4a274
PM
5649 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5650 .writefn = vbar_write, .resetvalue = 0,
60fb1a87 5651 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
7dd8c9af
FA
5652 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5653 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
f478847f 5654 .access = PL3_RW, .resetvalue = 0,
7dd8c9af 5655 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
11f136ee
FA
5656 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5657 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
6459b94c 5658 .access = PL3_RW,
cb4a0a34
PM
5659 /* no .writefn needed as this can't cause an ASID change */
5660 .resetvalue = 0,
11f136ee 5661 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
81547d66 5662 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
7a0e58fa 5663 .type = ARM_CP_ALIAS,
81547d66
EI
5664 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5665 .access = PL3_RW,
5666 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
f2c30f42 5667 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
f2c30f42
EI
5668 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5669 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
63b60551
EI
5670 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5671 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5672 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
81547d66 5673 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
7a0e58fa 5674 .type = ARM_CP_ALIAS,
81547d66 5675 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
99a99c1f
SB
5676 .access = PL3_RW,
5677 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
a1ba125c
EI
5678 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5679 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5680 .access = PL3_RW, .writefn = vbar_write,
5681 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5682 .resetvalue = 0 },
c6f19164
GB
5683 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5684 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5685 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5686 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4cfb8ad8
PM
5687 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5688 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5689 .access = PL3_RW, .resetvalue = 0,
5690 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
2179ef95
PM
5691 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5692 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5693 .access = PL3_RW, .type = ARM_CP_CONST,
5694 .resetvalue = 0 },
37cd6c24
PM
5695 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5696 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5697 .access = PL3_RW, .type = ARM_CP_CONST,
5698 .resetvalue = 0 },
5699 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5700 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5701 .access = PL3_RW, .type = ARM_CP_CONST,
5702 .resetvalue = 0 },
43efaa33
PM
5703 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5704 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5705 .access = PL3_W, .type = ARM_CP_NO_RAW,
5706 .writefn = tlbi_aa64_alle3is_write },
5707 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5708 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5709 .access = PL3_W, .type = ARM_CP_NO_RAW,
5710 .writefn = tlbi_aa64_vae3is_write },
5711 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5712 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5713 .access = PL3_W, .type = ARM_CP_NO_RAW,
5714 .writefn = tlbi_aa64_vae3is_write },
5715 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5716 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5717 .access = PL3_W, .type = ARM_CP_NO_RAW,
5718 .writefn = tlbi_aa64_alle3_write },
5719 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5720 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5721 .access = PL3_W, .type = ARM_CP_NO_RAW,
5722 .writefn = tlbi_aa64_vae3_write },
5723 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5724 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5725 .access = PL3_W, .type = ARM_CP_NO_RAW,
5726 .writefn = tlbi_aa64_vae3_write },
0f1a3b24
FA
5727};
5728
e2cce18f
RH
5729#ifndef CONFIG_USER_ONLY
5730/* Test if system register redirection is to occur in the current state. */
5731static bool redirect_for_e2h(CPUARMState *env)
5732{
5733 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
5734}
5735
5736static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
5737{
5738 CPReadFn *readfn;
5739
5740 if (redirect_for_e2h(env)) {
5741 /* Switch to the saved EL2 version of the register. */
5742 ri = ri->opaque;
5743 readfn = ri->readfn;
5744 } else {
5745 readfn = ri->orig_readfn;
5746 }
5747 if (readfn == NULL) {
5748 readfn = raw_read;
5749 }
5750 return readfn(env, ri);
5751}
5752
5753static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
5754 uint64_t value)
5755{
5756 CPWriteFn *writefn;
5757
5758 if (redirect_for_e2h(env)) {
5759 /* Switch to the saved EL2 version of the register. */
5760 ri = ri->opaque;
5761 writefn = ri->writefn;
5762 } else {
5763 writefn = ri->orig_writefn;
5764 }
5765 if (writefn == NULL) {
5766 writefn = raw_write;
5767 }
5768 writefn(env, ri, value);
5769}
5770
5771static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
5772{
5773 struct E2HAlias {
5774 uint32_t src_key, dst_key, new_key;
5775 const char *src_name, *dst_name, *new_name;
5776 bool (*feature)(const ARMISARegisters *id);
5777 };
5778
5779#define K(op0, op1, crn, crm, op2) \
5780 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5781
5782 static const struct E2HAlias aliases[] = {
5783 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
5784 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5785 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
5786 "CPACR", "CPTR_EL2", "CPACR_EL12" },
5787 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
5788 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5789 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
5790 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5791 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
5792 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5793 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
5794 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5795 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
5796 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5797 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
5798 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5799 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
5800 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5801 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
5802 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5803 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
5804 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5805 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5806 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5807 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5808 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5809 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5810 "VBAR", "VBAR_EL2", "VBAR_EL12" },
5811 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5812 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5813 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5814 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5815
5816 /*
5817 * Note that redirection of ZCR is mentioned in the description
5818 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5819 * not in the summary table.
5820 */
5821 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
5822 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
de561988
RH
5823 { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6),
5824 "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
e2cce18f 5825
4b779ceb
RH
5826 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
5827 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
5828
7cb1e618
RH
5829 { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
5830 "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
5831 isar_feature_aa64_scxtnum },
5832
e2cce18f
RH
5833 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5834 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5835 };
5836#undef K
5837
5838 size_t i;
5839
5840 for (i = 0; i < ARRAY_SIZE(aliases); i++) {
5841 const struct E2HAlias *a = &aliases[i];
9da35a40 5842 ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
9da35a40 5843 bool ok;
e2cce18f
RH
5844
5845 if (a->feature && !a->feature(&cpu->isar)) {
5846 continue;
5847 }
5848
5860362d
RH
5849 src_reg = g_hash_table_lookup(cpu->cp_regs,
5850 (gpointer)(uintptr_t)a->src_key);
5851 dst_reg = g_hash_table_lookup(cpu->cp_regs,
5852 (gpointer)(uintptr_t)a->dst_key);
e2cce18f
RH
5853 g_assert(src_reg != NULL);
5854 g_assert(dst_reg != NULL);
5855
5856 /* Cross-compare names to detect typos in the keys. */
5857 g_assert(strcmp(src_reg->name, a->src_name) == 0);
5858 g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
5859
5860 /* None of the core system registers use opaque; we will. */
5861 g_assert(src_reg->opaque == NULL);
5862
5863 /* Create alias before redirection so we dup the right data. */
9da35a40 5864 new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
9da35a40
RH
5865
5866 new_reg->name = a->new_name;
5867 new_reg->type |= ARM_CP_ALIAS;
5868 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
5869 new_reg->access &= PL2_RW | PL3_RW;
5870
5860362d
RH
5871 ok = g_hash_table_insert(cpu->cp_regs,
5872 (gpointer)(uintptr_t)a->new_key, new_reg);
9da35a40 5873 g_assert(ok);
e2cce18f
RH
5874
5875 src_reg->opaque = dst_reg;
5876 src_reg->orig_readfn = src_reg->readfn ?: raw_read;
5877 src_reg->orig_writefn = src_reg->writefn ?: raw_write;
5878 if (!src_reg->raw_readfn) {
5879 src_reg->raw_readfn = raw_read;
5880 }
5881 if (!src_reg->raw_writefn) {
5882 src_reg->raw_writefn = raw_write;
5883 }
5884 src_reg->readfn = el2_e2h_read;
5885 src_reg->writefn = el2_e2h_write;
5886 }
5887}
5888#endif
5889
3f208fd7
PM
5890static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5891 bool isread)
7da845b0 5892{
97475a89
RH
5893 int cur_el = arm_current_el(env);
5894
5895 if (cur_el < 2) {
5896 uint64_t hcr = arm_hcr_el2_eff(env);
5897
5898 if (cur_el == 0) {
5899 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
5900 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
5901 return CP_ACCESS_TRAP_EL2;
5902 }
5903 } else {
5904 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
5905 return CP_ACCESS_TRAP;
5906 }
5907 if (hcr & HCR_TID2) {
5908 return CP_ACCESS_TRAP_EL2;
5909 }
5910 }
5911 } else if (hcr & HCR_TID2) {
5912 return CP_ACCESS_TRAP_EL2;
5913 }
7da845b0 5914 }
630fcd4d
MZ
5915
5916 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
5917 return CP_ACCESS_TRAP_EL2;
5918 }
5919
7da845b0
PM
5920 return CP_ACCESS_OK;
5921}
5922
58e93b48
RH
5923/*
5924 * Check for traps to RAS registers, which are controlled
5925 * by HCR_EL2.TERR and SCR_EL3.TERR.
5926 */
5927static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
5928 bool isread)
5929{
5930 int el = arm_current_el(env);
5931
5932 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
5933 return CP_ACCESS_TRAP_EL2;
5934 }
5935 if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
5936 return CP_ACCESS_TRAP_EL3;
5937 }
5938 return CP_ACCESS_OK;
5939}
5940
5941static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
5942{
5943 int el = arm_current_el(env);
5944
5945 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
5946 return env->cp15.vdisr_el2;
5947 }
5948 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
5949 return 0; /* RAZ/WI */
5950 }
5951 return env->cp15.disr_el1;
5952}
5953
5954static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
5955{
5956 int el = arm_current_el(env);
5957
5958 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
5959 env->cp15.vdisr_el2 = val;
5960 return;
5961 }
5962 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
5963 return; /* RAZ/WI */
5964 }
5965 env->cp15.disr_el1 = val;
5966}
5967
5968/*
5969 * Minimal RAS implementation with no Error Records.
5970 * Which means that all of the Error Record registers:
5971 * ERXADDR_EL1
5972 * ERXCTLR_EL1
5973 * ERXFR_EL1
5974 * ERXMISC0_EL1
5975 * ERXMISC1_EL1
5976 * ERXMISC2_EL1
5977 * ERXMISC3_EL1
5978 * ERXPFGCDN_EL1 (RASv1p1)
5979 * ERXPFGCTL_EL1 (RASv1p1)
5980 * ERXPFGF_EL1 (RASv1p1)
5981 * ERXSTATUS_EL1
5982 * and
5983 * ERRSELR_EL1
5984 * may generate UNDEFINED, which is the effect we get by not
5985 * listing them at all.
5986 */
5987static const ARMCPRegInfo minimal_ras_reginfo[] = {
5988 { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
5989 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
5990 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
5991 .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
5992 { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
5993 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
5994 .access = PL1_R, .accessfn = access_terr,
5995 .type = ARM_CP_CONST, .resetvalue = 0 },
5996 { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
5997 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
5998 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
5999 { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
6000 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
6001 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
6002};
6003
397d922c
RH
6004/*
6005 * Return the exception level to which exceptions should be taken
6006 * via SVEAccessTrap. This excludes the check for whether the exception
6007 * should be routed through AArch64.AdvSIMDFPAccessTrap. That can easily
6008 * be found by testing 0 < fp_exception_el < sve_exception_el.
6009 *
6010 * C.f. the ARM pseudocode function CheckSVEEnabled. Note that the
6011 * pseudocode does *not* separate out the FP trap checks, but has them
6012 * all in one function.
5be5e8ed 6013 */
ced31551 6014int sve_exception_el(CPUARMState *env, int el)
5be5e8ed
RH
6015{
6016#ifndef CONFIG_USER_ONLY
aa4451b6 6017 if (el <= 1 && !el_is_in_host(env, el)) {
fab8ad39 6018 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
7701cee5
RH
6019 case 1:
6020 if (el != 0) {
6021 break;
6022 }
6023 /* fall through */
6024 case 0:
6025 case 2:
61a8c23a 6026 return 1;
5be5e8ed 6027 }
5be5e8ed
RH
6028 }
6029
7d38cb92
RH
6030 if (el <= 2 && arm_is_el2_enabled(env)) {
6031 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
6032 if (env->cp15.hcr_el2 & HCR_E2H) {
fab8ad39 6033 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
d5a6fa2d 6034 case 1:
7d38cb92 6035 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
d5a6fa2d
RH
6036 break;
6037 }
6038 /* fall through */
6039 case 0:
6040 case 2:
6041 return 2;
6042 }
7d38cb92 6043 } else {
fab8ad39 6044 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
d5a6fa2d
RH
6045 return 2;
6046 }
60eed086 6047 }
5be5e8ed
RH
6048 }
6049
60eed086
RH
6050 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
6051 if (arm_feature(env, ARM_FEATURE_EL3)
fab8ad39 6052 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
5be5e8ed
RH
6053 return 3;
6054 }
6055#endif
6056 return 0;
6057}
6058
6b2ca83e
RH
6059/*
6060 * Return the exception level to which exceptions should be taken for SME.
6061 * C.f. the ARM pseudocode function CheckSMEAccess.
6062 */
6063int sme_exception_el(CPUARMState *env, int el)
6064{
6065#ifndef CONFIG_USER_ONLY
6066 if (el <= 1 && !el_is_in_host(env, el)) {
6067 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
6068 case 1:
6069 if (el != 0) {
6070 break;
6071 }
6072 /* fall through */
6073 case 0:
6074 case 2:
6075 return 1;
6076 }
6077 }
6078
6079 if (el <= 2 && arm_is_el2_enabled(env)) {
6080 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
6081 if (env->cp15.hcr_el2 & HCR_E2H) {
6082 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
6083 case 1:
6084 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6085 break;
6086 }
6087 /* fall through */
6088 case 0:
6089 case 2:
6090 return 2;
6091 }
6092 } else {
6093 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
6094 return 2;
6095 }
6096 }
6097 }
6098
6099 /* CPTR_EL3. Since ESM is negative we must check for EL3. */
6100 if (arm_feature(env, ARM_FEATURE_EL3)
6101 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6102 return 3;
6103 }
6104#endif
6105 return 0;
6106}
6107
75fe8356
RH
6108/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
6109static bool sme_fa64(CPUARMState *env, int el)
6110{
6111 if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
6112 return false;
6113 }
6114
6115 if (el <= 1 && !el_is_in_host(env, el)) {
6116 if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
6117 return false;
6118 }
6119 }
6120 if (el <= 2 && arm_is_el2_enabled(env)) {
6121 if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
6122 return false;
6123 }
6124 }
6125 if (arm_feature(env, ARM_FEATURE_EL3)) {
6126 if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
6127 return false;
6128 }
6129 }
6130
6131 return true;
6132}
6133
0ab5953b
RH
6134/*
6135 * Given that SVE is enabled, return the vector length for EL.
6136 */
6ca54aa9 6137uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
0ab5953b 6138{
2fc0cc0e 6139 ARMCPU *cpu = env_archcpu(env);
6ca54aa9
RH
6140 uint64_t *cr = env->vfp.zcr_el;
6141 uint32_t map = cpu->sve_vq.map;
6142 uint32_t len = ARM_MAX_VQ - 1;
6143
6144 if (sm) {
6145 cr = env->vfp.smcr_el;
6146 map = cpu->sme_vq.map;
6147 }
0ab5953b 6148
c6225beb 6149 if (el <= 1 && !el_is_in_host(env, el)) {
6ca54aa9 6150 len = MIN(len, 0xf & (uint32_t)cr[1]);
0ab5953b 6151 }
6a02a732 6152 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
6ca54aa9 6153 len = MIN(len, 0xf & (uint32_t)cr[2]);
0ab5953b 6154 }
6a02a732 6155 if (arm_feature(env, ARM_FEATURE_EL3)) {
6ca54aa9
RH
6156 len = MIN(len, 0xf & (uint32_t)cr[3]);
6157 }
6158
6159 map &= MAKE_64BIT_MASK(0, len + 1);
6160 if (map != 0) {
6161 return 31 - clz32(map);
0ab5953b 6162 }
0df9142d 6163
6ca54aa9
RH
6164 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
6165 assert(sm);
6166 return ctz32(cpu->sme_vq.map);
6167}
6168
6169uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
6170{
6171 return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
0ab5953b
RH
6172}
6173
5be5e8ed
RH
6174static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6175 uint64_t value)
6176{
0ab5953b 6177 int cur_el = arm_current_el(env);
5ef3cc56 6178 int old_len = sve_vqm1_for_el(env, cur_el);
0ab5953b
RH
6179 int new_len;
6180
5be5e8ed 6181 /* Bits other than [3:0] are RAZ/WI. */
7b351d98 6182 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
5be5e8ed 6183 raw_write(env, ri, value & 0xf);
0ab5953b
RH
6184
6185 /*
6186 * Because we arrived here, we know both FP and SVE are enabled;
6187 * otherwise we would have trapped access to the ZCR_ELn register.
6188 */
5ef3cc56 6189 new_len = sve_vqm1_for_el(env, cur_el);
0ab5953b
RH
6190 if (new_len < old_len) {
6191 aarch64_sve_narrow_vq(env, new_len + 1);
6192 }
5be5e8ed
RH
6193}
6194
60360d82
RH
6195static const ARMCPRegInfo zcr_reginfo[] = {
6196 { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
6197 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
6198 .access = PL1_RW, .type = ARM_CP_SVE,
6199 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
6200 .writefn = zcr_write, .raw_writefn = raw_write },
6201 { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6202 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6203 .access = PL2_RW, .type = ARM_CP_SVE,
6204 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
6205 .writefn = zcr_write, .raw_writefn = raw_write },
6206 { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
6207 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
6208 .access = PL3_RW, .type = ARM_CP_SVE,
6209 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
6210 .writefn = zcr_write, .raw_writefn = raw_write },
5be5e8ed
RH
6211};
6212
9e5ec745
RH
6213#ifdef TARGET_AARCH64
6214static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
6215 bool isread)
6216{
6217 int el = arm_current_el(env);
6218
6219 if (el == 0) {
6220 uint64_t sctlr = arm_sctlr(env, el);
6221 if (!(sctlr & SCTLR_EnTP2)) {
6222 return CP_ACCESS_TRAP;
6223 }
6224 }
6225 /* TODO: FEAT_FGT */
6226 if (el < 3
6227 && arm_feature(env, ARM_FEATURE_EL3)
6228 && !(env->cp15.scr_el3 & SCR_ENTP2)) {
6229 return CP_ACCESS_TRAP_EL3;
6230 }
6231 return CP_ACCESS_OK;
6232}
6233
d5b1223a
RH
6234static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri,
6235 bool isread)
6236{
6237 /* TODO: FEAT_FGT for SMPRI_EL1 but not SMPRIMAP_EL2 */
6238 if (arm_current_el(env) < 3
6239 && arm_feature(env, ARM_FEATURE_EL3)
6240 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6241 return CP_ACCESS_TRAP_EL3;
6242 }
6243 return CP_ACCESS_OK;
6244}
6245
c37e6ac9
RH
6246static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6247 uint64_t value)
6248{
f84734b8
RH
6249 helper_set_pstate_sm(env, FIELD_EX64(value, SVCR, SM));
6250 helper_set_pstate_za(env, FIELD_EX64(value, SVCR, ZA));
6251 arm_rebuild_hflags(env);
c37e6ac9
RH
6252}
6253
de561988
RH
6254static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6255 uint64_t value)
6256{
6257 int cur_el = arm_current_el(env);
6258 int old_len = sve_vqm1_for_el(env, cur_el);
6259 int new_len;
6260
6261 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
6262 value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
6263 raw_write(env, ri, value);
6264
6265 /*
6266 * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
6267 * when SVL is widened (old values kept, or zeros). Choose to keep the
6268 * current values for simplicity. But for QEMU internals, we must still
6269 * apply the narrower SVL to the Zregs and Pregs -- see the comment
6270 * above aarch64_sve_narrow_vq.
6271 */
6272 new_len = sve_vqm1_for_el(env, cur_el);
6273 if (new_len < old_len) {
6274 aarch64_sve_narrow_vq(env, new_len + 1);
6275 }
6276}
6277
9e5ec745
RH
6278static const ARMCPRegInfo sme_reginfo[] = {
6279 { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
6280 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
6281 .access = PL0_RW, .accessfn = access_tpidr2,
6282 .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
c37e6ac9
RH
6283 { .name = "SVCR", .state = ARM_CP_STATE_AA64,
6284 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
6285 .access = PL0_RW, .type = ARM_CP_SME,
6286 .fieldoffset = offsetof(CPUARMState, svcr),
6287 .writefn = svcr_write, .raw_writefn = raw_write },
de561988
RH
6288 { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
6289 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
6290 .access = PL1_RW, .type = ARM_CP_SME,
6291 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
6292 .writefn = smcr_write, .raw_writefn = raw_write },
6293 { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
6294 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
6295 .access = PL2_RW, .type = ARM_CP_SME,
6296 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
6297 .writefn = smcr_write, .raw_writefn = raw_write },
6298 { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
6299 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
6300 .access = PL3_RW, .type = ARM_CP_SME,
6301 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
6302 .writefn = smcr_write, .raw_writefn = raw_write },
d5b1223a
RH
6303 { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
6304 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
6305 .access = PL1_R, .accessfn = access_aa64_tid1,
6306 /*
6307 * IMPLEMENTOR = 0 (software)
6308 * REVISION = 0 (implementation defined)
6309 * SMPS = 0 (no streaming execution priority in QEMU)
6310 * AFFINITY = 0 (streaming sve mode not shared with other PEs)
6311 */
6312 .type = ARM_CP_CONST, .resetvalue = 0, },
6313 /*
6314 * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
6315 */
6316 { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
6317 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
6318 .access = PL1_RW, .accessfn = access_esm,
6319 .type = ARM_CP_CONST, .resetvalue = 0 },
6320 { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
6321 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
6322 .access = PL2_RW, .accessfn = access_esm,
6323 .type = ARM_CP_CONST, .resetvalue = 0 },
9e5ec745
RH
6324};
6325#endif /* TARGET_AARCH64 */
6326
24183fb6
PM
6327static void define_pmu_regs(ARMCPU *cpu)
6328{
6329 /*
6330 * v7 performance monitor control register: same implementor
6331 * field as main ID register, and we implement four counters in
6332 * addition to the cycle count register.
6333 */
24526bb9 6334 unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
24183fb6
PM
6335 ARMCPRegInfo pmcr = {
6336 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
6337 .access = PL0_RW,
6338 .type = ARM_CP_IO | ARM_CP_ALIAS,
6339 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
6340 .accessfn = pmreg_access, .writefn = pmcr_write,
6341 .raw_writefn = raw_write,
6342 };
6343 ARMCPRegInfo pmcr64 = {
6344 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
6345 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
6346 .access = PL0_RW, .accessfn = pmreg_access,
6347 .type = ARM_CP_IO,
6348 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
24526bb9 6349 .resetvalue = cpu->isar.reset_pmcr_el0,
24183fb6
PM
6350 .writefn = pmcr_write, .raw_writefn = raw_write,
6351 };
24526bb9 6352
24183fb6
PM
6353 define_one_arm_cp_reg(cpu, &pmcr);
6354 define_one_arm_cp_reg(cpu, &pmcr64);
6355 for (i = 0; i < pmcrn; i++) {
6356 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6357 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6358 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6359 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6360 ARMCPRegInfo pmev_regs[] = {
6361 { .name = pmevcntr_name, .cp = 15, .crn = 14,
6362 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6363 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6364 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
99a50d1a 6365 .accessfn = pmreg_access_xevcntr },
24183fb6
PM
6366 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
6367 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
99a50d1a 6368 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
24183fb6
PM
6369 .type = ARM_CP_IO,
6370 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6371 .raw_readfn = pmevcntr_rawread,
6372 .raw_writefn = pmevcntr_rawwrite },
6373 { .name = pmevtyper_name, .cp = 15, .crn = 14,
6374 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6375 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6376 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6377 .accessfn = pmreg_access },
6378 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
6379 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
6380 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6381 .type = ARM_CP_IO,
6382 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6383 .raw_writefn = pmevtyper_rawwrite },
24183fb6
PM
6384 };
6385 define_arm_cp_regs(cpu, pmev_regs);
6386 g_free(pmevcntr_name);
6387 g_free(pmevcntr_el0_name);
6388 g_free(pmevtyper_name);
6389 g_free(pmevtyper_el0_name);
6390 }
a6179538 6391 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
24183fb6
PM
6392 ARMCPRegInfo v81_pmu_regs[] = {
6393 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6394 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6395 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6396 .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6397 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6398 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6399 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6400 .resetvalue = extract64(cpu->pmceid1, 32, 32) },
24183fb6
PM
6401 };
6402 define_arm_cp_regs(cpu, v81_pmu_regs);
6403 }
15dd1ebd
PM
6404 if (cpu_isar_feature(any_pmu_8_4, cpu)) {
6405 static const ARMCPRegInfo v84_pmmir = {
6406 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
6407 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
6408 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6409 .resetvalue = 0
6410 };
6411 define_one_arm_cp_reg(cpu, &v84_pmmir);
6412 }
24183fb6
PM
6413}
6414
96a8b92e
PM
6415/* We don't know until after realize whether there's a GICv3
6416 * attached, and that is what registers the gicv3 sysregs.
6417 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6418 * at runtime.
6419 */
6420static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
6421{
2fc0cc0e 6422 ARMCPU *cpu = env_archcpu(env);
8a130a7b 6423 uint64_t pfr1 = cpu->isar.id_pfr1;
96a8b92e
PM
6424
6425 if (env->gicv3state) {
6426 pfr1 |= 1 << 28;
6427 }
6428 return pfr1;
6429}
6430
976b99b6 6431#ifndef CONFIG_USER_ONLY
96a8b92e
PM
6432static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
6433{
2fc0cc0e 6434 ARMCPU *cpu = env_archcpu(env);
47576b94 6435 uint64_t pfr0 = cpu->isar.id_aa64pfr0;
96a8b92e
PM
6436
6437 if (env->gicv3state) {
6438 pfr0 |= 1 << 24;
6439 }
6440 return pfr0;
6441}
976b99b6 6442#endif
96a8b92e 6443
2d7137c1 6444/* Shared logic between LORID and the rest of the LOR* registers.
9bd268ba 6445 * Secure state exclusion has already been dealt with.
2d7137c1 6446 */
9bd268ba
RDC
6447static CPAccessResult access_lor_ns(CPUARMState *env,
6448 const ARMCPRegInfo *ri, bool isread)
2d7137c1
RH
6449{
6450 int el = arm_current_el(env);
6451
6452 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
6453 return CP_ACCESS_TRAP_EL2;
6454 }
6455 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
6456 return CP_ACCESS_TRAP_EL3;
6457 }
6458 return CP_ACCESS_OK;
6459}
6460
2d7137c1
RH
6461static CPAccessResult access_lor_other(CPUARMState *env,
6462 const ARMCPRegInfo *ri, bool isread)
6463{
6464 if (arm_is_secure_below_el3(env)) {
6465 /* Access denied in secure mode. */
6466 return CP_ACCESS_TRAP;
6467 }
9bd268ba 6468 return access_lor_ns(env, ri, isread);
2d7137c1
RH
6469}
6470
d8564ee4
RH
6471/*
6472 * A trivial implementation of ARMv8.1-LOR leaves all of these
6473 * registers fixed at 0, which indicates that there are zero
6474 * supported Limited Ordering regions.
6475 */
6476static const ARMCPRegInfo lor_reginfo[] = {
6477 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6478 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6479 .access = PL1_RW, .accessfn = access_lor_other,
6480 .type = ARM_CP_CONST, .resetvalue = 0 },
6481 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6482 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6483 .access = PL1_RW, .accessfn = access_lor_other,
6484 .type = ARM_CP_CONST, .resetvalue = 0 },
6485 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6486 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6487 .access = PL1_RW, .accessfn = access_lor_other,
6488 .type = ARM_CP_CONST, .resetvalue = 0 },
6489 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6490 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6491 .access = PL1_RW, .accessfn = access_lor_other,
6492 .type = ARM_CP_CONST, .resetvalue = 0 },
6493 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6494 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
9bd268ba 6495 .access = PL1_R, .accessfn = access_lor_ns,
d8564ee4 6496 .type = ARM_CP_CONST, .resetvalue = 0 },
d8564ee4
RH
6497};
6498
967aa94f
RH
6499#ifdef TARGET_AARCH64
6500static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
6501 bool isread)
6502{
6503 int el = arm_current_el(env);
6504
6505 if (el < 2 &&
07b034ea 6506 arm_is_el2_enabled(env) &&
967aa94f
RH
6507 !(arm_hcr_el2_eff(env) & HCR_APK)) {
6508 return CP_ACCESS_TRAP_EL2;
6509 }
6510 if (el < 3 &&
6511 arm_feature(env, ARM_FEATURE_EL3) &&
6512 !(env->cp15.scr_el3 & SCR_APK)) {
6513 return CP_ACCESS_TRAP_EL3;
6514 }
6515 return CP_ACCESS_OK;
6516}
6517
6518static const ARMCPRegInfo pauth_reginfo[] = {
6519 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6520 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
6521 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6522 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
967aa94f
RH
6523 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6524 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
6525 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6526 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
967aa94f
RH
6527 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6528 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
6529 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6530 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
967aa94f
RH
6531 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6532 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
6533 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6534 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
967aa94f
RH
6535 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6536 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
6537 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6538 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
967aa94f
RH
6539 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6540 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
6541 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6542 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
967aa94f
RH
6543 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6544 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
6545 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6546 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
967aa94f
RH
6547 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6548 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
6549 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6550 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
967aa94f
RH
6551 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6552 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
6553 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6554 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
967aa94f
RH
6555 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6556 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
6557 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6558 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
967aa94f 6559};
de390645 6560
84940ed8
RC
6561static const ARMCPRegInfo tlbirange_reginfo[] = {
6562 { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
6563 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
6564 .access = PL1_W, .type = ARM_CP_NO_RAW,
6565 .writefn = tlbi_aa64_rvae1is_write },
6566 { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
6567 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
6568 .access = PL1_W, .type = ARM_CP_NO_RAW,
6569 .writefn = tlbi_aa64_rvae1is_write },
6570 { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
6571 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
6572 .access = PL1_W, .type = ARM_CP_NO_RAW,
6573 .writefn = tlbi_aa64_rvae1is_write },
6574 { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
6575 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
6576 .access = PL1_W, .type = ARM_CP_NO_RAW,
6577 .writefn = tlbi_aa64_rvae1is_write },
6578 { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
6579 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
6580 .access = PL1_W, .type = ARM_CP_NO_RAW,
6581 .writefn = tlbi_aa64_rvae1is_write },
6582 { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
6583 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
6584 .access = PL1_W, .type = ARM_CP_NO_RAW,
6585 .writefn = tlbi_aa64_rvae1is_write },
6586 { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
6587 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
6588 .access = PL1_W, .type = ARM_CP_NO_RAW,
6589 .writefn = tlbi_aa64_rvae1is_write },
6590 { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
6591 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
6592 .access = PL1_W, .type = ARM_CP_NO_RAW,
6593 .writefn = tlbi_aa64_rvae1is_write },
6594 { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
6595 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
6596 .access = PL1_W, .type = ARM_CP_NO_RAW,
6597 .writefn = tlbi_aa64_rvae1_write },
6598 { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
6599 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
6600 .access = PL1_W, .type = ARM_CP_NO_RAW,
6601 .writefn = tlbi_aa64_rvae1_write },
6602 { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
6603 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
6604 .access = PL1_W, .type = ARM_CP_NO_RAW,
6605 .writefn = tlbi_aa64_rvae1_write },
6606 { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
6607 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
6608 .access = PL1_W, .type = ARM_CP_NO_RAW,
6609 .writefn = tlbi_aa64_rvae1_write },
6610 { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
6611 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
6612 .access = PL2_W, .type = ARM_CP_NOP },
6613 { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
6614 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
6615 .access = PL2_W, .type = ARM_CP_NOP },
6616 { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
6617 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
696ba377 6618 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6619 .writefn = tlbi_aa64_rvae2is_write },
6620 { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
6621 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
696ba377 6622 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6623 .writefn = tlbi_aa64_rvae2is_write },
6624 { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
6625 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
6626 .access = PL2_W, .type = ARM_CP_NOP },
6627 { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
6628 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
6629 .access = PL2_W, .type = ARM_CP_NOP },
6630 { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
6631 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
696ba377 6632 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6633 .writefn = tlbi_aa64_rvae2is_write },
6634 { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
6635 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
696ba377 6636 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6637 .writefn = tlbi_aa64_rvae2is_write },
6638 { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
6639 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
696ba377 6640 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6641 .writefn = tlbi_aa64_rvae2_write },
6642 { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
6643 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
696ba377 6644 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6645 .writefn = tlbi_aa64_rvae2_write },
6646 { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
6647 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
6648 .access = PL3_W, .type = ARM_CP_NO_RAW,
6649 .writefn = tlbi_aa64_rvae3is_write },
6650 { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
6651 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
6652 .access = PL3_W, .type = ARM_CP_NO_RAW,
6653 .writefn = tlbi_aa64_rvae3is_write },
6654 { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
6655 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
6656 .access = PL3_W, .type = ARM_CP_NO_RAW,
6657 .writefn = tlbi_aa64_rvae3is_write },
6658 { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
6659 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
6660 .access = PL3_W, .type = ARM_CP_NO_RAW,
6661 .writefn = tlbi_aa64_rvae3is_write },
6662 { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
6663 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
6664 .access = PL3_W, .type = ARM_CP_NO_RAW,
6665 .writefn = tlbi_aa64_rvae3_write },
6666 { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
6667 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
6668 .access = PL3_W, .type = ARM_CP_NO_RAW,
6669 .writefn = tlbi_aa64_rvae3_write },
84940ed8
RC
6670};
6671
7113d618
RC
6672static const ARMCPRegInfo tlbios_reginfo[] = {
6673 { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
6674 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
6675 .access = PL1_W, .type = ARM_CP_NO_RAW,
6676 .writefn = tlbi_aa64_vmalle1is_write },
b7469ef9
IH
6677 { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
6678 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
6679 .access = PL1_W, .type = ARM_CP_NO_RAW,
6680 .writefn = tlbi_aa64_vae1is_write },
7113d618
RC
6681 { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
6682 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
6683 .access = PL1_W, .type = ARM_CP_NO_RAW,
6684 .writefn = tlbi_aa64_vmalle1is_write },
b7469ef9
IH
6685 { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
6686 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
6687 .access = PL1_W, .type = ARM_CP_NO_RAW,
6688 .writefn = tlbi_aa64_vae1is_write },
6689 { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
6690 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
6691 .access = PL1_W, .type = ARM_CP_NO_RAW,
6692 .writefn = tlbi_aa64_vae1is_write },
6693 { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
6694 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
6695 .access = PL1_W, .type = ARM_CP_NO_RAW,
6696 .writefn = tlbi_aa64_vae1is_write },
7113d618
RC
6697 { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
6698 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
696ba377 6699 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7113d618 6700 .writefn = tlbi_aa64_alle2is_write },
b7469ef9
IH
6701 { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
6702 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
696ba377 6703 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
b7469ef9 6704 .writefn = tlbi_aa64_vae2is_write },
7113d618
RC
6705 { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
6706 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
6707 .access = PL2_W, .type = ARM_CP_NO_RAW,
6708 .writefn = tlbi_aa64_alle1is_write },
b7469ef9
IH
6709 { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
6710 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
696ba377 6711 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
b7469ef9 6712 .writefn = tlbi_aa64_vae2is_write },
7113d618
RC
6713 { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
6714 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
6715 .access = PL2_W, .type = ARM_CP_NO_RAW,
6716 .writefn = tlbi_aa64_alle1is_write },
6717 { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
6718 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
6719 .access = PL2_W, .type = ARM_CP_NOP },
6720 { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
6721 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
6722 .access = PL2_W, .type = ARM_CP_NOP },
6723 { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
6724 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
6725 .access = PL2_W, .type = ARM_CP_NOP },
6726 { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
6727 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
6728 .access = PL2_W, .type = ARM_CP_NOP },
6729 { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
6730 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
6731 .access = PL3_W, .type = ARM_CP_NO_RAW,
6732 .writefn = tlbi_aa64_alle3is_write },
b7469ef9
IH
6733 { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
6734 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
6735 .access = PL3_W, .type = ARM_CP_NO_RAW,
6736 .writefn = tlbi_aa64_vae3is_write },
6737 { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
6738 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
6739 .access = PL3_W, .type = ARM_CP_NO_RAW,
6740 .writefn = tlbi_aa64_vae3is_write },
7113d618
RC
6741};
6742
de390645
RH
6743static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
6744{
6745 Error *err = NULL;
6746 uint64_t ret;
6747
6748 /* Success sets NZCV = 0000. */
6749 env->NF = env->CF = env->VF = 0, env->ZF = 1;
6750
6751 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
6752 /*
6753 * ??? Failed, for unknown reasons in the crypto subsystem.
6754 * The best we can do is log the reason and return the
6755 * timed-out indication to the guest. There is no reason
6756 * we know to expect this failure to be transitory, so the
6757 * guest may well hang retrying the operation.
6758 */
6759 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
6760 ri->name, error_get_pretty(err));
6761 error_free(err);
6762
6763 env->ZF = 0; /* NZCF = 0100 */
6764 return 0;
6765 }
6766 return ret;
6767}
6768
6769/* We do not support re-seeding, so the two registers operate the same. */
6770static const ARMCPRegInfo rndr_reginfo[] = {
6771 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
6772 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6773 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
6774 .access = PL0_R, .readfn = rndr_readfn },
6775 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
6776 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6777 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
6778 .access = PL0_R, .readfn = rndr_readfn },
de390645 6779};
0d57b499
BM
6780
6781#ifndef CONFIG_USER_ONLY
6782static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
6783 uint64_t value)
6784{
6785 ARMCPU *cpu = env_archcpu(env);
6786 /* CTR_EL0 System register -> DminLine, bits [19:16] */
6787 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
6788 uint64_t vaddr_in = (uint64_t) value;
6789 uint64_t vaddr = vaddr_in & ~(dline_size - 1);
6790 void *haddr;
6791 int mem_idx = cpu_mmu_index(env, false);
6792
6793 /* This won't be crossing page boundaries */
6794 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
6795 if (haddr) {
6796
6797 ram_addr_t offset;
6798 MemoryRegion *mr;
6799
6800 /* RCU lock is already being held */
6801 mr = memory_region_from_host(haddr, &offset);
6802
6803 if (mr) {
4dfe59d1 6804 memory_region_writeback(mr, offset, dline_size);
0d57b499
BM
6805 }
6806 }
6807}
6808
6809static const ARMCPRegInfo dcpop_reg[] = {
6810 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
6811 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
6812 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
1bed4d2e 6813 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
0d57b499
BM
6814};
6815
6816static const ARMCPRegInfo dcpodp_reg[] = {
6817 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
6818 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
6819 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
1bed4d2e 6820 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
0d57b499
BM
6821};
6822#endif /*CONFIG_USER_ONLY*/
6823
4b779ceb
RH
6824static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
6825 bool isread)
6826{
6827 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
6828 return CP_ACCESS_TRAP_EL2;
6829 }
6830
6831 return CP_ACCESS_OK;
6832}
6833
6834static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
6835 bool isread)
6836{
6837 int el = arm_current_el(env);
6838
0da067f2 6839 if (el < 2 && arm_is_el2_enabled(env)) {
4301acd7
RH
6840 uint64_t hcr = arm_hcr_el2_eff(env);
6841 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
6842 return CP_ACCESS_TRAP_EL2;
6843 }
4b779ceb
RH
6844 }
6845 if (el < 3 &&
6846 arm_feature(env, ARM_FEATURE_EL3) &&
6847 !(env->cp15.scr_el3 & SCR_ATA)) {
6848 return CP_ACCESS_TRAP_EL3;
6849 }
6850 return CP_ACCESS_OK;
6851}
6852
6853static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
6854{
6855 return env->pstate & PSTATE_TCO;
6856}
6857
6858static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
6859{
6860 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
6861}
6862
6863static const ARMCPRegInfo mte_reginfo[] = {
6864 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
6865 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
6866 .access = PL1_RW, .accessfn = access_mte,
6867 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
6868 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
6869 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
6870 .access = PL1_RW, .accessfn = access_mte,
6871 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
6872 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
6873 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
6874 .access = PL2_RW, .accessfn = access_mte,
6875 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
6876 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
6877 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
6878 .access = PL3_RW,
6879 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
6880 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
6881 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
6882 .access = PL1_RW, .accessfn = access_mte,
6883 .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
6884 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
6885 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
6886 .access = PL1_RW, .accessfn = access_mte,
6887 .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
6888 { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
6889 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
6890 .access = PL1_R, .accessfn = access_aa64_tid5,
6891 .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
6892 { .name = "TCO", .state = ARM_CP_STATE_AA64,
6893 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
6894 .type = ARM_CP_NO_RAW,
6895 .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
5463df16
RH
6896 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
6897 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
6898 .type = ARM_CP_NOP, .access = PL1_W,
6899 .accessfn = aa64_cacheop_poc_access },
6900 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
6901 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
6902 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6903 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
6904 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
6905 .type = ARM_CP_NOP, .access = PL1_W,
6906 .accessfn = aa64_cacheop_poc_access },
6907 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
6908 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
6909 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6910 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
6911 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
6912 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6913 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
6914 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
6915 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6916 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
6917 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
6918 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6919 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
6920 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
6921 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
4b779ceb
RH
6922};
6923
6924static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
6925 { .name = "TCO", .state = ARM_CP_STATE_AA64,
6926 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
6927 .type = ARM_CP_CONST, .access = PL0_RW, },
4b779ceb 6928};
5463df16
RH
6929
6930static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
6931 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
6932 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
6933 .type = ARM_CP_NOP, .access = PL0_W,
6934 .accessfn = aa64_cacheop_poc_access },
6935 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
6936 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
6937 .type = ARM_CP_NOP, .access = PL0_W,
6938 .accessfn = aa64_cacheop_poc_access },
6939 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
6940 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
6941 .type = ARM_CP_NOP, .access = PL0_W,
6942 .accessfn = aa64_cacheop_poc_access },
6943 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
6944 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
6945 .type = ARM_CP_NOP, .access = PL0_W,
6946 .accessfn = aa64_cacheop_poc_access },
6947 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
6948 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
6949 .type = ARM_CP_NOP, .access = PL0_W,
6950 .accessfn = aa64_cacheop_poc_access },
6951 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
6952 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
6953 .type = ARM_CP_NOP, .access = PL0_W,
6954 .accessfn = aa64_cacheop_poc_access },
6955 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
6956 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
6957 .type = ARM_CP_NOP, .access = PL0_W,
6958 .accessfn = aa64_cacheop_poc_access },
6959 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
6960 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
6961 .type = ARM_CP_NOP, .access = PL0_W,
6962 .accessfn = aa64_cacheop_poc_access },
eb821168
RH
6963 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
6964 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
6965 .access = PL0_W, .type = ARM_CP_DC_GVA,
6966#ifndef CONFIG_USER_ONLY
6967 /* Avoid overhead of an access check that always passes in user-mode */
6968 .accessfn = aa64_zva_access,
6969#endif
6970 },
6971 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
6972 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
6973 .access = PL0_W, .type = ARM_CP_DC_GZVA,
6974#ifndef CONFIG_USER_ONLY
6975 /* Avoid overhead of an access check that always passes in user-mode */
6976 .accessfn = aa64_zva_access,
6977#endif
6978 },
5463df16
RH
6979};
6980
7cb1e618
RH
6981static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
6982 bool isread)
6983{
6984 uint64_t hcr = arm_hcr_el2_eff(env);
6985 int el = arm_current_el(env);
6986
6987 if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
6988 if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
6989 if (hcr & HCR_TGE) {
6990 return CP_ACCESS_TRAP_EL2;
6991 }
6992 return CP_ACCESS_TRAP;
6993 }
6994 } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
6995 return CP_ACCESS_TRAP_EL2;
6996 }
6997 if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
6998 return CP_ACCESS_TRAP_EL2;
6999 }
7000 if (el < 3
7001 && arm_feature(env, ARM_FEATURE_EL3)
7002 && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
7003 return CP_ACCESS_TRAP_EL3;
7004 }
7005 return CP_ACCESS_OK;
7006}
7007
7008static const ARMCPRegInfo scxtnum_reginfo[] = {
7009 { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
7010 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
7011 .access = PL0_RW, .accessfn = access_scxtnum,
7012 .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
7013 { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
7014 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
7015 .access = PL1_RW, .accessfn = access_scxtnum,
7016 .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
7017 { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
7018 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
7019 .access = PL2_RW, .accessfn = access_scxtnum,
7020 .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
7021 { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
7022 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
7023 .access = PL3_RW,
7024 .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
7025};
7026#endif /* TARGET_AARCH64 */
967aa94f 7027
cb570bd3
RH
7028static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
7029 bool isread)
7030{
7031 int el = arm_current_el(env);
7032
7033 if (el == 0) {
7034 uint64_t sctlr = arm_sctlr(env, el);
7035 if (!(sctlr & SCTLR_EnRCTX)) {
7036 return CP_ACCESS_TRAP;
7037 }
7038 } else if (el == 1) {
7039 uint64_t hcr = arm_hcr_el2_eff(env);
7040 if (hcr & HCR_NV) {
7041 return CP_ACCESS_TRAP_EL2;
7042 }
7043 }
7044 return CP_ACCESS_OK;
7045}
7046
7047static const ARMCPRegInfo predinv_reginfo[] = {
7048 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
7049 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
7050 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7051 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
7052 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
7053 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7054 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
7055 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
7056 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7057 /*
7058 * Note the AArch32 opcodes have a different OPC1.
7059 */
7060 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
7061 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
7062 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7063 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
7064 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
7065 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7066 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
7067 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
7068 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
cb570bd3
RH
7069};
7070
957e6155
PM
7071static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
7072{
7073 /* Read the high 32 bits of the current CCSIDR */
7074 return extract64(ccsidr_read(env, ri), 32, 32);
7075}
7076
7077static const ARMCPRegInfo ccsidr2_reginfo[] = {
7078 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
7079 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
7080 .access = PL1_R,
7081 .accessfn = access_aa64_tid2,
7082 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
957e6155
PM
7083};
7084
6a4ef4e5
MZ
7085static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7086 bool isread)
7087{
7088 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
7089 return CP_ACCESS_TRAP_EL2;
7090 }
7091
7092 return CP_ACCESS_OK;
7093}
7094
7095static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7096 bool isread)
7097{
7098 if (arm_feature(env, ARM_FEATURE_V8)) {
7099 return access_aa64_tid3(env, ri, isread);
7100 }
7101
7102 return CP_ACCESS_OK;
7103}
7104
f96f3d5f
MZ
7105static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
7106 bool isread)
7107{
7108 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
7109 return CP_ACCESS_TRAP_EL2;
7110 }
7111
7112 return CP_ACCESS_OK;
7113}
7114
8e228c9e
PM
7115static CPAccessResult access_joscr_jmcr(CPUARMState *env,
7116 const ARMCPRegInfo *ri, bool isread)
7117{
7118 /*
7119 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
7120 * in v7A, not in v8A.
7121 */
7122 if (!arm_feature(env, ARM_FEATURE_V8) &&
7123 arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
7124 (env->cp15.hstr_el2 & HSTR_TJDBX)) {
7125 return CP_ACCESS_TRAP_EL2;
7126 }
7127 return CP_ACCESS_OK;
7128}
7129
f96f3d5f
MZ
7130static const ARMCPRegInfo jazelle_regs[] = {
7131 { .name = "JIDR",
7132 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
7133 .access = PL1_R, .accessfn = access_jazelle,
7134 .type = ARM_CP_CONST, .resetvalue = 0 },
7135 { .name = "JOSCR",
7136 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
8e228c9e 7137 .accessfn = access_joscr_jmcr,
f96f3d5f
MZ
7138 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7139 { .name = "JMCR",
7140 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
8e228c9e 7141 .accessfn = access_joscr_jmcr,
f96f3d5f 7142 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
f96f3d5f
MZ
7143};
7144
52d18727
RH
7145static const ARMCPRegInfo contextidr_el2 = {
7146 .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
7147 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
7148 .access = PL2_RW,
7149 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
7150};
7151
e2a1a461 7152static const ARMCPRegInfo vhe_reginfo[] = {
ed30da8e
RH
7153 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
7154 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
7155 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
7156 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
8c94b071
RH
7157#ifndef CONFIG_USER_ONLY
7158 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
7159 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
7160 .fieldoffset =
7161 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
7162 .type = ARM_CP_IO, .access = PL2_RW,
7163 .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
7164 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
7165 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
7166 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
7167 .resetfn = gt_hv_timer_reset,
7168 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
7169 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
7170 .type = ARM_CP_IO,
7171 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
7172 .access = PL2_RW,
7173 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
7174 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
bb5972e4
RH
7175 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
7176 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
7177 .type = ARM_CP_IO | ARM_CP_ALIAS,
7178 .access = PL2_RW, .accessfn = e2h_access,
7179 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
7180 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
7181 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
7182 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
7183 .type = ARM_CP_IO | ARM_CP_ALIAS,
7184 .access = PL2_RW, .accessfn = e2h_access,
7185 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
7186 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
7187 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7188 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
7189 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7190 .access = PL2_RW, .accessfn = e2h_access,
7191 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
7192 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7193 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
7194 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7195 .access = PL2_RW, .accessfn = e2h_access,
7196 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
7197 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7198 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
7199 .type = ARM_CP_IO | ARM_CP_ALIAS,
7200 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
7201 .access = PL2_RW, .accessfn = e2h_access,
7202 .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
7203 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7204 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
7205 .type = ARM_CP_IO | ARM_CP_ALIAS,
7206 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
7207 .access = PL2_RW, .accessfn = e2h_access,
7208 .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
8c94b071 7209#endif
e2a1a461
RH
7210};
7211
04b07d29
RH
7212#ifndef CONFIG_USER_ONLY
7213static const ARMCPRegInfo ats1e1_reginfo[] = {
7214 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
7215 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7216 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7217 .writefn = ats_write64 },
7218 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
7219 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7220 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7221 .writefn = ats_write64 },
04b07d29
RH
7222};
7223
7224static const ARMCPRegInfo ats1cp_reginfo[] = {
7225 { .name = "ATS1CPRP",
7226 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7227 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7228 .writefn = ats_write },
7229 { .name = "ATS1CPWP",
7230 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7231 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7232 .writefn = ats_write },
04b07d29
RH
7233};
7234#endif
7235
f6287c24
PM
7236/*
7237 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7238 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7239 * is non-zero, which is never for ARMv7, optionally in ARMv8
7240 * and mandatorily for ARMv8.2 and up.
7241 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7242 * implementation is RAZ/WI we can ignore this detail, as we
7243 * do for ACTLR.
7244 */
7245static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
7246 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
7247 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
99602377
RH
7248 .access = PL1_RW, .accessfn = access_tacr,
7249 .type = ARM_CP_CONST, .resetvalue = 0 },
f6287c24
PM
7250 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
7251 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
7252 .access = PL2_RW, .type = ARM_CP_CONST,
7253 .resetvalue = 0 },
f6287c24
PM
7254};
7255
2ceb98c0
PM
7256void register_cp_regs_for_features(ARMCPU *cpu)
7257{
7258 /* Register all the coprocessor registers based on feature bits */
7259 CPUARMState *env = &cpu->env;
7260 if (arm_feature(env, ARM_FEATURE_M)) {
7261 /* M profile has no coprocessor registers */
7262 return;
7263 }
7264
e9aa6c21 7265 define_arm_cp_regs(cpu, cp_reginfo);
9449fdf6
PM
7266 if (!arm_feature(env, ARM_FEATURE_V8)) {
7267 /* Must go early as it is full of wildcards that may be
7268 * overridden by later definitions.
7269 */
7270 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
7271 }
7272
7d57f408 7273 if (arm_feature(env, ARM_FEATURE_V6)) {
8515a092
PM
7274 /* The ID registers all have impdef reset values */
7275 ARMCPRegInfo v6_idregs[] = {
0ff644a7
PM
7276 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
7277 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
7278 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7279 .accessfn = access_aa32_tid3,
8a130a7b 7280 .resetvalue = cpu->isar.id_pfr0 },
96a8b92e
PM
7281 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7282 * the value of the GIC field until after we define these regs.
7283 */
0ff644a7
PM
7284 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
7285 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
96a8b92e 7286 .access = PL1_R, .type = ARM_CP_NO_RAW,
6a4ef4e5 7287 .accessfn = access_aa32_tid3,
96a8b92e
PM
7288 .readfn = id_pfr1_read,
7289 .writefn = arm_cp_write_ignore },
0ff644a7
PM
7290 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
7291 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
7292 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7293 .accessfn = access_aa32_tid3,
a6179538 7294 .resetvalue = cpu->isar.id_dfr0 },
0ff644a7
PM
7295 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
7296 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
7297 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7298 .accessfn = access_aa32_tid3,
8515a092 7299 .resetvalue = cpu->id_afr0 },
0ff644a7
PM
7300 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
7301 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
7302 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7303 .accessfn = access_aa32_tid3,
10054016 7304 .resetvalue = cpu->isar.id_mmfr0 },
0ff644a7
PM
7305 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
7306 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
7307 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7308 .accessfn = access_aa32_tid3,
10054016 7309 .resetvalue = cpu->isar.id_mmfr1 },
0ff644a7
PM
7310 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
7311 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
7312 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7313 .accessfn = access_aa32_tid3,
10054016 7314 .resetvalue = cpu->isar.id_mmfr2 },
0ff644a7
PM
7315 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
7316 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
7317 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7318 .accessfn = access_aa32_tid3,
10054016 7319 .resetvalue = cpu->isar.id_mmfr3 },
0ff644a7
PM
7320 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
7321 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
7322 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7323 .accessfn = access_aa32_tid3,
47576b94 7324 .resetvalue = cpu->isar.id_isar0 },
0ff644a7
PM
7325 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
7326 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
7327 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7328 .accessfn = access_aa32_tid3,
47576b94 7329 .resetvalue = cpu->isar.id_isar1 },
0ff644a7
PM
7330 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
7331 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
7332 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7333 .accessfn = access_aa32_tid3,
47576b94 7334 .resetvalue = cpu->isar.id_isar2 },
0ff644a7
PM
7335 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
7336 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
7337 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7338 .accessfn = access_aa32_tid3,
47576b94 7339 .resetvalue = cpu->isar.id_isar3 },
0ff644a7
PM
7340 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
7341 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
7342 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7343 .accessfn = access_aa32_tid3,
47576b94 7344 .resetvalue = cpu->isar.id_isar4 },
0ff644a7
PM
7345 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
7346 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
7347 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7348 .accessfn = access_aa32_tid3,
47576b94 7349 .resetvalue = cpu->isar.id_isar5 },
e20d84c1
PM
7350 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
7351 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
7352 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7353 .accessfn = access_aa32_tid3,
10054016 7354 .resetvalue = cpu->isar.id_mmfr4 },
802abf40 7355 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
e20d84c1
PM
7356 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
7357 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7358 .accessfn = access_aa32_tid3,
47576b94 7359 .resetvalue = cpu->isar.id_isar6 },
8515a092
PM
7360 };
7361 define_arm_cp_regs(cpu, v6_idregs);
7d57f408
PM
7362 define_arm_cp_regs(cpu, v6_cp_reginfo);
7363 } else {
7364 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
7365 }
4d31c596
PM
7366 if (arm_feature(env, ARM_FEATURE_V6K)) {
7367 define_arm_cp_regs(cpu, v6k_cp_reginfo);
7368 }
5e5cf9e3 7369 if (arm_feature(env, ARM_FEATURE_V7MP) &&
452a0955 7370 !arm_feature(env, ARM_FEATURE_PMSA)) {
995939a6
PM
7371 define_arm_cp_regs(cpu, v7mp_cp_reginfo);
7372 }
327dd510
AL
7373 if (arm_feature(env, ARM_FEATURE_V7VE)) {
7374 define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
7375 }
e9aa6c21 7376 if (arm_feature(env, ARM_FEATURE_V7)) {
776d4e5c 7377 ARMCPRegInfo clidr = {
7da845b0
PM
7378 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
7379 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
630fcd4d
MZ
7380 .access = PL1_R, .type = ARM_CP_CONST,
7381 .accessfn = access_aa64_tid2,
7382 .resetvalue = cpu->clidr
776d4e5c 7383 };
776d4e5c 7384 define_one_arm_cp_reg(cpu, &clidr);
e9aa6c21 7385 define_arm_cp_regs(cpu, v7_cp_reginfo);
50300698 7386 define_debug_regs(cpu);
24183fb6 7387 define_pmu_regs(cpu);
7d57f408
PM
7388 } else {
7389 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
e9aa6c21 7390 }
b0d2b7d0 7391 if (arm_feature(env, ARM_FEATURE_V8)) {
dde4d028
PM
7392 /*
7393 * v8 ID registers, which all have impdef reset values.
e20d84c1
PM
7394 * Note that within the ID register ranges the unused slots
7395 * must all RAZ, not UNDEF; future architecture versions may
7396 * define new registers here.
dde4d028
PM
7397 * ID registers which are AArch64 views of the AArch32 ID registers
7398 * which already existed in v6 and v7 are handled elsewhere,
7399 * in v6_idregs[].
e20d84c1 7400 */
dde4d028 7401 int i;
e60cef86 7402 ARMCPRegInfo v8_idregs[] = {
976b99b6
AB
7403 /*
7404 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7405 * emulation because we don't know the right value for the
7406 * GIC field until after we define these regs.
96a8b92e 7407 */
e60cef86
PM
7408 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
7409 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
976b99b6
AB
7410 .access = PL1_R,
7411#ifdef CONFIG_USER_ONLY
7412 .type = ARM_CP_CONST,
7413 .resetvalue = cpu->isar.id_aa64pfr0
7414#else
7415 .type = ARM_CP_NO_RAW,
6a4ef4e5 7416 .accessfn = access_aa64_tid3,
96a8b92e 7417 .readfn = id_aa64pfr0_read,
976b99b6
AB
7418 .writefn = arm_cp_write_ignore
7419#endif
7420 },
e60cef86
PM
7421 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
7422 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
7423 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7424 .accessfn = access_aa64_tid3,
47576b94 7425 .resetvalue = cpu->isar.id_aa64pfr1},
e20d84c1
PM
7426 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7427 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
7428 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7429 .accessfn = access_aa64_tid3,
e20d84c1
PM
7430 .resetvalue = 0 },
7431 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7432 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
7433 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7434 .accessfn = access_aa64_tid3,
e20d84c1 7435 .resetvalue = 0 },
9516d772 7436 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
e20d84c1
PM
7437 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
7438 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7439 .accessfn = access_aa64_tid3,
2dc10fa2 7440 .resetvalue = cpu->isar.id_aa64zfr0 },
414c54d5 7441 { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
e20d84c1
PM
7442 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
7443 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7444 .accessfn = access_aa64_tid3,
414c54d5 7445 .resetvalue = cpu->isar.id_aa64smfr0 },
e20d84c1
PM
7446 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7447 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
7448 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7449 .accessfn = access_aa64_tid3,
e20d84c1
PM
7450 .resetvalue = 0 },
7451 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7452 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
7453 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7454 .accessfn = access_aa64_tid3,
e20d84c1 7455 .resetvalue = 0 },
e60cef86
PM
7456 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
7457 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
7458 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7459 .accessfn = access_aa64_tid3,
2a609df8 7460 .resetvalue = cpu->isar.id_aa64dfr0 },
e60cef86
PM
7461 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
7462 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
7463 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7464 .accessfn = access_aa64_tid3,
2a609df8 7465 .resetvalue = cpu->isar.id_aa64dfr1 },
e20d84c1
PM
7466 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7467 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
7468 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7469 .accessfn = access_aa64_tid3,
e20d84c1
PM
7470 .resetvalue = 0 },
7471 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7472 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
7473 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7474 .accessfn = access_aa64_tid3,
e20d84c1 7475 .resetvalue = 0 },
e60cef86
PM
7476 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
7477 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
7478 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7479 .accessfn = access_aa64_tid3,
e60cef86
PM
7480 .resetvalue = cpu->id_aa64afr0 },
7481 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
7482 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
7483 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7484 .accessfn = access_aa64_tid3,
e60cef86 7485 .resetvalue = cpu->id_aa64afr1 },
e20d84c1
PM
7486 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7487 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
7488 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7489 .accessfn = access_aa64_tid3,
e20d84c1
PM
7490 .resetvalue = 0 },
7491 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7492 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
7493 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7494 .accessfn = access_aa64_tid3,
e20d84c1 7495 .resetvalue = 0 },
e60cef86
PM
7496 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
7497 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
7498 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7499 .accessfn = access_aa64_tid3,
47576b94 7500 .resetvalue = cpu->isar.id_aa64isar0 },
e60cef86
PM
7501 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
7502 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
7503 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7504 .accessfn = access_aa64_tid3,
47576b94 7505 .resetvalue = cpu->isar.id_aa64isar1 },
e20d84c1
PM
7506 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7507 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
7508 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7509 .accessfn = access_aa64_tid3,
e20d84c1
PM
7510 .resetvalue = 0 },
7511 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7512 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
7513 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7514 .accessfn = access_aa64_tid3,
e20d84c1
PM
7515 .resetvalue = 0 },
7516 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7517 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
7518 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7519 .accessfn = access_aa64_tid3,
e20d84c1
PM
7520 .resetvalue = 0 },
7521 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7522 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
7523 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7524 .accessfn = access_aa64_tid3,
e20d84c1
PM
7525 .resetvalue = 0 },
7526 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7527 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
7528 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7529 .accessfn = access_aa64_tid3,
e20d84c1
PM
7530 .resetvalue = 0 },
7531 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7532 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
7533 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7534 .accessfn = access_aa64_tid3,
e20d84c1 7535 .resetvalue = 0 },
e60cef86
PM
7536 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
7537 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
7538 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7539 .accessfn = access_aa64_tid3,
3dc91ddb 7540 .resetvalue = cpu->isar.id_aa64mmfr0 },
e60cef86
PM
7541 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
7542 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
7543 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7544 .accessfn = access_aa64_tid3,
3dc91ddb 7545 .resetvalue = cpu->isar.id_aa64mmfr1 },
64761e10 7546 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
e20d84c1
PM
7547 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
7548 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7549 .accessfn = access_aa64_tid3,
64761e10 7550 .resetvalue = cpu->isar.id_aa64mmfr2 },
e20d84c1
PM
7551 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7552 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
7553 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7554 .accessfn = access_aa64_tid3,
e20d84c1
PM
7555 .resetvalue = 0 },
7556 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7557 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
7558 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7559 .accessfn = access_aa64_tid3,
e20d84c1
PM
7560 .resetvalue = 0 },
7561 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7562 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
7563 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7564 .accessfn = access_aa64_tid3,
e20d84c1
PM
7565 .resetvalue = 0 },
7566 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7567 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
7568 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7569 .accessfn = access_aa64_tid3,
e20d84c1
PM
7570 .resetvalue = 0 },
7571 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7572 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
7573 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7574 .accessfn = access_aa64_tid3,
e20d84c1 7575 .resetvalue = 0 },
a50c0f51
PM
7576 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
7577 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
7578 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7579 .accessfn = access_aa64_tid3,
47576b94 7580 .resetvalue = cpu->isar.mvfr0 },
a50c0f51
PM
7581 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
7582 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
7583 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7584 .accessfn = access_aa64_tid3,
47576b94 7585 .resetvalue = cpu->isar.mvfr1 },
a50c0f51
PM
7586 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
7587 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
7588 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7589 .accessfn = access_aa64_tid3,
47576b94 7590 .resetvalue = cpu->isar.mvfr2 },
dde4d028
PM
7591 /*
7592 * "0, c0, c3, {0,1,2}" are the encodings corresponding to
7593 * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
7594 * as RAZ, since it is in the "reserved for future ID
7595 * registers, RAZ" part of the AArch32 encoding space.
7596 */
7597 { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
7598 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
7599 .access = PL1_R, .type = ARM_CP_CONST,
7600 .accessfn = access_aa64_tid3,
7601 .resetvalue = 0 },
7602 { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
7603 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
7604 .access = PL1_R, .type = ARM_CP_CONST,
7605 .accessfn = access_aa64_tid3,
7606 .resetvalue = 0 },
7607 { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
7608 .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
7609 .access = PL1_R, .type = ARM_CP_CONST,
7610 .accessfn = access_aa64_tid3,
7611 .resetvalue = 0 },
7612 /*
7613 * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
7614 * they're also RAZ for AArch64, and in v8 are gradually
7615 * being filled with AArch64-view-of-AArch32-ID-register
7616 * for new ID registers.
7617 */
7618 { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
e20d84c1
PM
7619 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
7620 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7621 .accessfn = access_aa64_tid3,
e20d84c1 7622 .resetvalue = 0 },
1d51bc96 7623 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
e20d84c1
PM
7624 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
7625 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7626 .accessfn = access_aa64_tid3,
1d51bc96 7627 .resetvalue = cpu->isar.id_pfr2 },
d22c5649 7628 { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
e20d84c1
PM
7629 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
7630 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7631 .accessfn = access_aa64_tid3,
d22c5649 7632 .resetvalue = cpu->isar.id_dfr1 },
32957aad 7633 { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
e20d84c1
PM
7634 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
7635 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7636 .accessfn = access_aa64_tid3,
32957aad 7637 .resetvalue = cpu->isar.id_mmfr5 },
dde4d028 7638 { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
e20d84c1
PM
7639 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
7640 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7641 .accessfn = access_aa64_tid3,
e20d84c1 7642 .resetvalue = 0 },
4054bfa9
AF
7643 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
7644 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
7645 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
cad86737 7646 .resetvalue = extract64(cpu->pmceid0, 0, 32) },
4054bfa9
AF
7647 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
7648 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
7649 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7650 .resetvalue = cpu->pmceid0 },
7651 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
7652 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
7653 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
cad86737 7654 .resetvalue = extract64(cpu->pmceid1, 0, 32) },
4054bfa9
AF
7655 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
7656 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
7657 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7658 .resetvalue = cpu->pmceid1 },
e60cef86 7659 };
6c5c0fec 7660#ifdef CONFIG_USER_ONLY
10b0220e 7661 static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6c5c0fec
AB
7662 { .name = "ID_AA64PFR0_EL1",
7663 .exported_bits = 0x000f000f00ff0000,
7664 .fixed_bits = 0x0000000000000011 },
7665 { .name = "ID_AA64PFR1_EL1",
7666 .exported_bits = 0x00000000000000f0 },
d040242e
AB
7667 { .name = "ID_AA64PFR*_EL1_RESERVED",
7668 .is_glob = true },
6c5c0fec
AB
7669 { .name = "ID_AA64ZFR0_EL1" },
7670 { .name = "ID_AA64MMFR0_EL1",
7671 .fixed_bits = 0x00000000ff000000 },
7672 { .name = "ID_AA64MMFR1_EL1" },
d040242e
AB
7673 { .name = "ID_AA64MMFR*_EL1_RESERVED",
7674 .is_glob = true },
6c5c0fec
AB
7675 { .name = "ID_AA64DFR0_EL1",
7676 .fixed_bits = 0x0000000000000006 },
7677 { .name = "ID_AA64DFR1_EL1" },
d040242e
AB
7678 { .name = "ID_AA64DFR*_EL1_RESERVED",
7679 .is_glob = true },
7680 { .name = "ID_AA64AFR*",
7681 .is_glob = true },
6c5c0fec
AB
7682 { .name = "ID_AA64ISAR0_EL1",
7683 .exported_bits = 0x00fffffff0fffff0 },
7684 { .name = "ID_AA64ISAR1_EL1",
7685 .exported_bits = 0x000000f0ffffffff },
d040242e
AB
7686 { .name = "ID_AA64ISAR*_EL1_RESERVED",
7687 .is_glob = true },
6c5c0fec
AB
7688 };
7689 modify_arm_cp_regs(v8_idregs, v8_user_idregs);
7690#endif
be8e8128
GB
7691 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7692 if (!arm_feature(env, ARM_FEATURE_EL3) &&
7693 !arm_feature(env, ARM_FEATURE_EL2)) {
7694 ARMCPRegInfo rvbar = {
7695 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
7696 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4a7319b7
EI
7697 .access = PL1_R,
7698 .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
be8e8128
GB
7699 };
7700 define_one_arm_cp_reg(cpu, &rvbar);
7701 }
e60cef86 7702 define_arm_cp_regs(cpu, v8_idregs);
b0d2b7d0 7703 define_arm_cp_regs(cpu, v8_cp_reginfo);
dde4d028
PM
7704
7705 for (i = 4; i < 16; i++) {
7706 /*
7707 * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
7708 * For pre-v8 cores there are RAZ patterns for these in
7709 * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
7710 * v8 extends the "must RAZ" part of the ID register space
7711 * to also cover c0, 0, c{8-15}, {0-7}.
7712 * These are STATE_AA32 because in the AArch64 sysreg space
7713 * c4-c7 is where the AArch64 ID registers live (and we've
7714 * already defined those in v8_idregs[]), and c8-c15 are not
7715 * "must RAZ" for AArch64.
7716 */
7717 g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
7718 ARMCPRegInfo v8_aa32_raz_idregs = {
7719 .name = name,
7720 .state = ARM_CP_STATE_AA32,
7721 .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
7722 .access = PL1_R, .type = ARM_CP_CONST,
7723 .accessfn = access_aa64_tid3,
7724 .resetvalue = 0 };
7725 define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
7726 }
b0d2b7d0 7727 }
99a90811
RH
7728
7729 /*
7730 * Register the base EL2 cpregs.
7731 * Pre v8, these registers are implemented only as part of the
7732 * Virtualization Extensions (EL2 present). Beginning with v8,
7733 * if EL2 is missing but EL3 is enabled, mostly these become
7734 * RES0 from EL3, with some specific exceptions.
7735 */
7736 if (arm_feature(env, ARM_FEATURE_EL2)
7737 || (arm_feature(env, ARM_FEATURE_EL3)
7738 && arm_feature(env, ARM_FEATURE_V8))) {
f0d574d6 7739 uint64_t vmpidr_def = mpidr_read_val(env);
731de9e6
EI
7740 ARMCPRegInfo vpidr_regs[] = {
7741 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
7742 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7743 .access = PL2_RW, .accessfn = access_el3_aa32ns,
696ba377
RH
7744 .resetvalue = cpu->midr,
7745 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
36476562 7746 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
731de9e6
EI
7747 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
7748 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7749 .access = PL2_RW, .resetvalue = cpu->midr,
696ba377 7750 .type = ARM_CP_EL3_NO_EL2_C_NZ,
731de9e6 7751 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
f0d574d6
EI
7752 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
7753 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7754 .access = PL2_RW, .accessfn = access_el3_aa32ns,
696ba377
RH
7755 .resetvalue = vmpidr_def,
7756 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
36476562 7757 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
f0d574d6
EI
7758 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
7759 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
696ba377
RH
7760 .access = PL2_RW, .resetvalue = vmpidr_def,
7761 .type = ARM_CP_EL3_NO_EL2_C_NZ,
f0d574d6 7762 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
731de9e6 7763 };
24526bb9
PM
7764 /*
7765 * The only field of MDCR_EL2 that has a defined architectural reset
7766 * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
7767 */
7768 ARMCPRegInfo mdcr_el2 = {
7769 .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
7770 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
01765386 7771 .writefn = mdcr_el2_write,
24526bb9
PM
7772 .access = PL2_RW, .resetvalue = pmu_num_counters(env),
7773 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
7774 };
7775 define_one_arm_cp_reg(cpu, &mdcr_el2);
731de9e6 7776 define_arm_cp_regs(cpu, vpidr_regs);
4771cd01 7777 define_arm_cp_regs(cpu, el2_cp_reginfo);
ce4afed8
PM
7778 if (arm_feature(env, ARM_FEATURE_V8)) {
7779 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
7780 }
e9152ee9
RDC
7781 if (cpu_isar_feature(aa64_sel2, cpu)) {
7782 define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
7783 }
be8e8128
GB
7784 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
7785 if (!arm_feature(env, ARM_FEATURE_EL3)) {
7786 ARMCPRegInfo rvbar = {
7787 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
7788 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
4a7319b7
EI
7789 .access = PL2_R,
7790 .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
be8e8128
GB
7791 };
7792 define_one_arm_cp_reg(cpu, &rvbar);
7793 }
3b685ba7 7794 }
99a90811
RH
7795
7796 /* Register the base EL3 cpregs. */
81547d66 7797 if (arm_feature(env, ARM_FEATURE_EL3)) {
0f1a3b24 7798 define_arm_cp_regs(cpu, el3_cp_reginfo);
e24fdd23
PM
7799 ARMCPRegInfo el3_regs[] = {
7800 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
7801 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
4a7319b7
EI
7802 .access = PL3_R,
7803 .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
7804 },
e24fdd23
PM
7805 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
7806 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
7807 .access = PL3_RW,
7808 .raw_writefn = raw_write, .writefn = sctlr_write,
7809 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
7810 .resetvalue = cpu->reset_sctlr },
be8e8128 7811 };
e24fdd23
PM
7812
7813 define_arm_cp_regs(cpu, el3_regs);
81547d66 7814 }
2f027fc5
PM
7815 /* The behaviour of NSACR is sufficiently various that we don't
7816 * try to describe it in a single reginfo:
7817 * if EL3 is 64 bit, then trap to EL3 from S EL1,
7818 * reads as constant 0xc00 from NS EL1 and NS EL2
7819 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
7820 * if v7 without EL3, register doesn't exist
7821 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
7822 */
7823 if (arm_feature(env, ARM_FEATURE_EL3)) {
7824 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
10b0220e 7825 static const ARMCPRegInfo nsacr = {
2f027fc5
PM
7826 .name = "NSACR", .type = ARM_CP_CONST,
7827 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7828 .access = PL1_RW, .accessfn = nsacr_access,
7829 .resetvalue = 0xc00
7830 };
7831 define_one_arm_cp_reg(cpu, &nsacr);
7832 } else {
10b0220e 7833 static const ARMCPRegInfo nsacr = {
2f027fc5
PM
7834 .name = "NSACR",
7835 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7836 .access = PL3_RW | PL1_R,
7837 .resetvalue = 0,
7838 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
7839 };
7840 define_one_arm_cp_reg(cpu, &nsacr);
7841 }
7842 } else {
7843 if (arm_feature(env, ARM_FEATURE_V8)) {
10b0220e 7844 static const ARMCPRegInfo nsacr = {
2f027fc5
PM
7845 .name = "NSACR", .type = ARM_CP_CONST,
7846 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7847 .access = PL1_R,
7848 .resetvalue = 0xc00
7849 };
7850 define_one_arm_cp_reg(cpu, &nsacr);
7851 }
7852 }
7853
452a0955 7854 if (arm_feature(env, ARM_FEATURE_PMSA)) {
6cb0b013
PC
7855 if (arm_feature(env, ARM_FEATURE_V6)) {
7856 /* PMSAv6 not implemented */
7857 assert(arm_feature(env, ARM_FEATURE_V7));
7858 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
7859 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
7860 } else {
7861 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
7862 }
18032bec 7863 } else {
8e5d75c9 7864 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
18032bec 7865 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
4036b7d1
PM
7866 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
7867 if (cpu_isar_feature(aa32_hpd, cpu)) {
ab638a32
RH
7868 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
7869 }
18032bec 7870 }
c326b979
PM
7871 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
7872 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
7873 }
6cc7a3ae
PM
7874 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
7875 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
7876 }
4a501606
PM
7877 if (arm_feature(env, ARM_FEATURE_VAPA)) {
7878 define_arm_cp_regs(cpu, vapa_cp_reginfo);
7879 }
c4804214
PM
7880 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
7881 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
7882 }
7883 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
7884 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
7885 }
7886 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
7887 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
7888 }
18032bec
PM
7889 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
7890 define_arm_cp_regs(cpu, omap_cp_reginfo);
7891 }
34f90529
PM
7892 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
7893 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
7894 }
1047b9d7
PM
7895 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7896 define_arm_cp_regs(cpu, xscale_cp_reginfo);
7897 }
7898 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
7899 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
7900 }
7ac681cf
PM
7901 if (arm_feature(env, ARM_FEATURE_LPAE)) {
7902 define_arm_cp_regs(cpu, lpae_cp_reginfo);
7903 }
873b73c0 7904 if (cpu_isar_feature(aa32_jazelle, cpu)) {
f96f3d5f
MZ
7905 define_arm_cp_regs(cpu, jazelle_regs);
7906 }
7884849c
PM
7907 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
7908 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
7909 * be read-only (ie write causes UNDEF exception).
7910 */
7911 {
00a29f3d
PM
7912 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
7913 /* Pre-v8 MIDR space.
7914 * Note that the MIDR isn't a simple constant register because
7884849c
PM
7915 * of the TI925 behaviour where writes to another register can
7916 * cause the MIDR value to change.
97ce8d61
PC
7917 *
7918 * Unimplemented registers in the c15 0 0 0 space default to
7919 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
7920 * and friends override accordingly.
7884849c
PM
7921 */
7922 { .name = "MIDR",
97ce8d61 7923 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
7884849c 7924 .access = PL1_R, .resetvalue = cpu->midr,
d4e6df63 7925 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
731de9e6 7926 .readfn = midr_read,
97ce8d61
PC
7927 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7928 .type = ARM_CP_OVERRIDE },
7884849c
PM
7929 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
7930 { .name = "DUMMY",
7931 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
7932 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7933 { .name = "DUMMY",
7934 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
7935 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7936 { .name = "DUMMY",
7937 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
7938 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7939 { .name = "DUMMY",
7940 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
7941 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7942 { .name = "DUMMY",
7943 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
7944 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7884849c 7945 };
00a29f3d 7946 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
00a29f3d
PM
7947 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
7948 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
731de9e6
EI
7949 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
7950 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7951 .readfn = midr_read },
ac00c79f
SF
7952 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
7953 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
7954 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7955 .access = PL1_R, .resetvalue = cpu->midr },
7956 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
7957 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
7958 .access = PL1_R, .resetvalue = cpu->midr },
00a29f3d
PM
7959 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
7960 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
93fbc983
MZ
7961 .access = PL1_R,
7962 .accessfn = access_aa64_tid1,
7963 .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
00a29f3d
PM
7964 };
7965 ARMCPRegInfo id_cp_reginfo[] = {
7966 /* These are common to v8 and pre-v8 */
7967 { .name = "CTR",
7968 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
630fcd4d
MZ
7969 .access = PL1_R, .accessfn = ctr_el0_access,
7970 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
00a29f3d
PM
7971 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
7972 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
7973 .access = PL0_R, .accessfn = ctr_el0_access,
7974 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
7975 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
7976 { .name = "TCMTR",
7977 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
93fbc983
MZ
7978 .access = PL1_R,
7979 .accessfn = access_aa32_tid1,
7980 .type = ARM_CP_CONST, .resetvalue = 0 },
00a29f3d 7981 };
8085ce63
PC
7982 /* TLBTR is specific to VMSA */
7983 ARMCPRegInfo id_tlbtr_reginfo = {
7984 .name = "TLBTR",
7985 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
93fbc983
MZ
7986 .access = PL1_R,
7987 .accessfn = access_aa32_tid1,
7988 .type = ARM_CP_CONST, .resetvalue = 0,
8085ce63 7989 };
3281af81
PC
7990 /* MPUIR is specific to PMSA V6+ */
7991 ARMCPRegInfo id_mpuir_reginfo = {
7992 .name = "MPUIR",
7993 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7994 .access = PL1_R, .type = ARM_CP_CONST,
7995 .resetvalue = cpu->pmsav7_dregion << 8
7996 };
10b0220e 7997 static const ARMCPRegInfo crn0_wi_reginfo = {
7884849c
PM
7998 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
7999 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
8000 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
8001 };
6c5c0fec 8002#ifdef CONFIG_USER_ONLY
10b0220e 8003 static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
6c5c0fec
AB
8004 { .name = "MIDR_EL1",
8005 .exported_bits = 0x00000000ffffffff },
8006 { .name = "REVIDR_EL1" },
6c5c0fec
AB
8007 };
8008 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
8009#endif
7884849c
PM
8010 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
8011 arm_feature(env, ARM_FEATURE_STRONGARM)) {
5809ac57 8012 size_t i;
7884849c 8013 /* Register the blanket "writes ignored" value first to cover the
a703eda1
PC
8014 * whole space. Then update the specific ID registers to allow write
8015 * access, so that they ignore writes rather than causing them to
8016 * UNDEF.
7884849c
PM
8017 */
8018 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
5809ac57
RH
8019 for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
8020 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
00a29f3d 8021 }
5809ac57
RH
8022 for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
8023 id_cp_reginfo[i].access = PL1_RW;
7884849c 8024 }
10006112 8025 id_mpuir_reginfo.access = PL1_RW;
3281af81 8026 id_tlbtr_reginfo.access = PL1_RW;
7884849c 8027 }
00a29f3d
PM
8028 if (arm_feature(env, ARM_FEATURE_V8)) {
8029 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
8030 } else {
8031 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
8032 }
a703eda1 8033 define_arm_cp_regs(cpu, id_cp_reginfo);
452a0955 8034 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
8085ce63 8035 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
3281af81
PC
8036 } else if (arm_feature(env, ARM_FEATURE_V7)) {
8037 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
8085ce63 8038 }
7884849c
PM
8039 }
8040
97ce8d61 8041 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
52264166
AB
8042 ARMCPRegInfo mpidr_cp_reginfo[] = {
8043 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
8044 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
8045 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
52264166
AB
8046 };
8047#ifdef CONFIG_USER_ONLY
10b0220e 8048 static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
52264166
AB
8049 { .name = "MPIDR_EL1",
8050 .fixed_bits = 0x0000000080000000 },
52264166
AB
8051 };
8052 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
8053#endif
97ce8d61
PC
8054 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
8055 }
8056
2771db27 8057 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
834a6c69
PM
8058 ARMCPRegInfo auxcr_reginfo[] = {
8059 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
8060 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
99602377
RH
8061 .access = PL1_RW, .accessfn = access_tacr,
8062 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
834a6c69
PM
8063 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
8064 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
8065 .access = PL2_RW, .type = ARM_CP_CONST,
8066 .resetvalue = 0 },
8067 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
8068 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
8069 .access = PL3_RW, .type = ARM_CP_CONST,
8070 .resetvalue = 0 },
2771db27 8071 };
834a6c69 8072 define_arm_cp_regs(cpu, auxcr_reginfo);
f6287c24
PM
8073 if (cpu_isar_feature(aa32_ac2, cpu)) {
8074 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
0e0456ab 8075 }
2771db27
PM
8076 }
8077
d8ba780b 8078 if (arm_feature(env, ARM_FEATURE_CBAR)) {
d56974af
LM
8079 /*
8080 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8081 * There are two flavours:
8082 * (1) older 32-bit only cores have a simple 32-bit CBAR
8083 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8084 * 32-bit register visible to AArch32 at a different encoding
8085 * to the "flavour 1" register and with the bits rearranged to
8086 * be able to squash a 64-bit address into the 32-bit view.
8087 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
8088 * in future if we support AArch32-only configs of some of the
8089 * AArch64 cores we might need to add a specific feature flag
8090 * to indicate cores with "flavour 2" CBAR.
8091 */
f318cec6
PM
8092 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8093 /* 32 bit view is [31:18] 0...0 [43:32]. */
8094 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
8095 | extract64(cpu->reset_cbar, 32, 12);
8096 ARMCPRegInfo cbar_reginfo[] = {
8097 { .name = "CBAR",
8098 .type = ARM_CP_CONST,
d56974af
LM
8099 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
8100 .access = PL1_R, .resetvalue = cbar32 },
f318cec6
PM
8101 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
8102 .type = ARM_CP_CONST,
8103 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
d56974af 8104 .access = PL1_R, .resetvalue = cpu->reset_cbar },
f318cec6
PM
8105 };
8106 /* We don't implement a r/w 64 bit CBAR currently */
8107 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
8108 define_arm_cp_regs(cpu, cbar_reginfo);
8109 } else {
8110 ARMCPRegInfo cbar = {
8111 .name = "CBAR",
8112 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
8113 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
8114 .fieldoffset = offsetof(CPUARMState,
8115 cp15.c15_config_base_address)
8116 };
8117 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
8118 cbar.access = PL1_R;
8119 cbar.fieldoffset = 0;
8120 cbar.type = ARM_CP_CONST;
8121 }
8122 define_one_arm_cp_reg(cpu, &cbar);
8123 }
d8ba780b
PC
8124 }
8125
91db4642 8126 if (arm_feature(env, ARM_FEATURE_VBAR)) {
10b0220e 8127 static const ARMCPRegInfo vbar_cp_reginfo[] = {
91db4642
CLG
8128 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
8129 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
8130 .access = PL1_RW, .writefn = vbar_write,
8131 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
8132 offsetof(CPUARMState, cp15.vbar_ns) },
8133 .resetvalue = 0 },
91db4642
CLG
8134 };
8135 define_arm_cp_regs(cpu, vbar_cp_reginfo);
8136 }
8137
2771db27
PM
8138 /* Generic registers whose values depend on the implementation */
8139 {
8140 ARMCPRegInfo sctlr = {
5ebafdf3 8141 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
137feaa9 8142 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
84929218 8143 .access = PL1_RW, .accessfn = access_tvm_trvm,
137feaa9
FA
8144 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
8145 offsetof(CPUARMState, cp15.sctlr_ns) },
d4e6df63
PM
8146 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
8147 .raw_writefn = raw_write,
2771db27
PM
8148 };
8149 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8150 /* Normally we would always end the TB on an SCTLR write, but Linux
8151 * arch/arm/mach-pxa/sleep.S expects two instructions following
8152 * an MMU enable to execute from cache. Imitate this behaviour.
8153 */
8154 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
8155 }
8156 define_one_arm_cp_reg(cpu, &sctlr);
8157 }
5be5e8ed 8158
2d7137c1 8159 if (cpu_isar_feature(aa64_lor, cpu)) {
2d7137c1
RH
8160 define_arm_cp_regs(cpu, lor_reginfo);
8161 }
220f508f
RH
8162 if (cpu_isar_feature(aa64_pan, cpu)) {
8163 define_one_arm_cp_reg(cpu, &pan_reginfo);
8164 }
04b07d29
RH
8165#ifndef CONFIG_USER_ONLY
8166 if (cpu_isar_feature(aa64_ats1e1, cpu)) {
8167 define_arm_cp_regs(cpu, ats1e1_reginfo);
8168 }
8169 if (cpu_isar_feature(aa32_ats1e1, cpu)) {
8170 define_arm_cp_regs(cpu, ats1cp_reginfo);
8171 }
8172#endif
9eeb7a1c
RH
8173 if (cpu_isar_feature(aa64_uao, cpu)) {
8174 define_one_arm_cp_reg(cpu, &uao_reginfo);
8175 }
2d7137c1 8176
dc8b1853
RC
8177 if (cpu_isar_feature(aa64_dit, cpu)) {
8178 define_one_arm_cp_reg(cpu, &dit_reginfo);
8179 }
f2f68a78
RC
8180 if (cpu_isar_feature(aa64_ssbs, cpu)) {
8181 define_one_arm_cp_reg(cpu, &ssbs_reginfo);
8182 }
58e93b48
RH
8183 if (cpu_isar_feature(any_ras, cpu)) {
8184 define_arm_cp_regs(cpu, minimal_ras_reginfo);
8185 }
dc8b1853 8186
52d18727
RH
8187 if (cpu_isar_feature(aa64_vh, cpu) ||
8188 cpu_isar_feature(aa64_debugv8p2, cpu)) {
8189 define_one_arm_cp_reg(cpu, &contextidr_el2);
8190 }
e2a1a461
RH
8191 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8192 define_arm_cp_regs(cpu, vhe_reginfo);
8193 }
8194
cd208a1c 8195 if (cpu_isar_feature(aa64_sve, cpu)) {
60360d82 8196 define_arm_cp_regs(cpu, zcr_reginfo);
5be5e8ed 8197 }
967aa94f 8198
5814d587
RH
8199 if (cpu_isar_feature(aa64_hcx, cpu)) {
8200 define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
8201 }
8202
967aa94f 8203#ifdef TARGET_AARCH64
9e5ec745
RH
8204 if (cpu_isar_feature(aa64_sme, cpu)) {
8205 define_arm_cp_regs(cpu, sme_reginfo);
8206 }
967aa94f
RH
8207 if (cpu_isar_feature(aa64_pauth, cpu)) {
8208 define_arm_cp_regs(cpu, pauth_reginfo);
8209 }
de390645
RH
8210 if (cpu_isar_feature(aa64_rndr, cpu)) {
8211 define_arm_cp_regs(cpu, rndr_reginfo);
8212 }
84940ed8
RC
8213 if (cpu_isar_feature(aa64_tlbirange, cpu)) {
8214 define_arm_cp_regs(cpu, tlbirange_reginfo);
8215 }
7113d618
RC
8216 if (cpu_isar_feature(aa64_tlbios, cpu)) {
8217 define_arm_cp_regs(cpu, tlbios_reginfo);
8218 }
0d57b499
BM
8219#ifndef CONFIG_USER_ONLY
8220 /* Data Cache clean instructions up to PoP */
8221 if (cpu_isar_feature(aa64_dcpop, cpu)) {
8222 define_one_arm_cp_reg(cpu, dcpop_reg);
8223
8224 if (cpu_isar_feature(aa64_dcpodp, cpu)) {
8225 define_one_arm_cp_reg(cpu, dcpodp_reg);
8226 }
8227 }
8228#endif /*CONFIG_USER_ONLY*/
4b779ceb
RH
8229
8230 /*
8231 * If full MTE is enabled, add all of the system registers.
8232 * If only "instructions available at EL0" are enabled,
8233 * then define only a RAZ/WI version of PSTATE.TCO.
8234 */
8235 if (cpu_isar_feature(aa64_mte, cpu)) {
8236 define_arm_cp_regs(cpu, mte_reginfo);
5463df16 8237 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
4b779ceb
RH
8238 } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
8239 define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
5463df16 8240 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
4b779ceb 8241 }
7cb1e618
RH
8242
8243 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
8244 define_arm_cp_regs(cpu, scxtnum_reginfo);
8245 }
967aa94f 8246#endif
cb570bd3 8247
22e57073 8248 if (cpu_isar_feature(any_predinv, cpu)) {
cb570bd3
RH
8249 define_arm_cp_regs(cpu, predinv_reginfo);
8250 }
e2cce18f 8251
957e6155
PM
8252 if (cpu_isar_feature(any_ccidx, cpu)) {
8253 define_arm_cp_regs(cpu, ccsidr2_reginfo);
8254 }
8255
e2cce18f
RH
8256#ifndef CONFIG_USER_ONLY
8257 /*
8258 * Register redirections and aliases must be done last,
8259 * after the registers from the other extensions have been defined.
8260 */
8261 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8262 define_arm_vh_e2h_redirects_aliases(cpu);
8263 }
8264#endif
2ceb98c0
PM
8265}
8266
777dc784
PM
8267/* Sort alphabetically by type name, except for "any". */
8268static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5adb4839 8269{
777dc784
PM
8270 ObjectClass *class_a = (ObjectClass *)a;
8271 ObjectClass *class_b = (ObjectClass *)b;
8272 const char *name_a, *name_b;
5adb4839 8273
777dc784
PM
8274 name_a = object_class_get_name(class_a);
8275 name_b = object_class_get_name(class_b);
51492fd1 8276 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
777dc784 8277 return 1;
51492fd1 8278 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
777dc784
PM
8279 return -1;
8280 } else {
8281 return strcmp(name_a, name_b);
5adb4839
PB
8282 }
8283}
8284
777dc784 8285static void arm_cpu_list_entry(gpointer data, gpointer user_data)
40f137e1 8286{
777dc784 8287 ObjectClass *oc = data;
977c33ba 8288 CPUClass *cc = CPU_CLASS(oc);
51492fd1
AF
8289 const char *typename;
8290 char *name;
3371d272 8291
51492fd1
AF
8292 typename = object_class_get_name(oc);
8293 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
977c33ba
DB
8294 if (cc->deprecation_note) {
8295 qemu_printf(" %s (deprecated)\n", name);
8296 } else {
8297 qemu_printf(" %s\n", name);
8298 }
51492fd1 8299 g_free(name);
777dc784
PM
8300}
8301
0442428a 8302void arm_cpu_list(void)
777dc784 8303{
777dc784
PM
8304 GSList *list;
8305
8306 list = object_class_get_list(TYPE_ARM_CPU, false);
8307 list = g_slist_sort(list, arm_cpu_list_compare);
0442428a
MA
8308 qemu_printf("Available CPUs:\n");
8309 g_slist_foreach(list, arm_cpu_list_entry, NULL);
777dc784 8310 g_slist_free(list);
40f137e1
PB
8311}
8312
78027bb6
CR
8313static void arm_cpu_add_definition(gpointer data, gpointer user_data)
8314{
8315 ObjectClass *oc = data;
8316 CpuDefinitionInfoList **cpu_list = user_data;
78027bb6
CR
8317 CpuDefinitionInfo *info;
8318 const char *typename;
8319
8320 typename = object_class_get_name(oc);
8321 info = g_malloc0(sizeof(*info));
8322 info->name = g_strndup(typename,
8323 strlen(typename) - strlen("-" TYPE_ARM_CPU));
8ed877b7 8324 info->q_typename = g_strdup(typename);
78027bb6 8325
54aa3de7 8326 QAPI_LIST_PREPEND(*cpu_list, info);
78027bb6
CR
8327}
8328
25a9d6ca 8329CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
78027bb6
CR
8330{
8331 CpuDefinitionInfoList *cpu_list = NULL;
8332 GSList *list;
8333
8334 list = object_class_get_list(TYPE_ARM_CPU, false);
8335 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
8336 g_slist_free(list);
8337
8338 return cpu_list;
8339}
8340
1859f8c3
RH
8341/*
8342 * Private utility function for define_one_arm_cp_reg_with_opaque():
8343 * add a single reginfo struct to the hash table.
8344 */
6e6efd61 8345static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
cbe64585
RH
8346 void *opaque, CPState state,
8347 CPSecureState secstate,
9c513e78
AB
8348 int crm, int opc1, int opc2,
8349 const char *name)
6e6efd61 8350{
696ba377 8351 CPUARMState *env = &cpu->env;
5860362d 8352 uint32_t key;
c27f5d3a 8353 ARMCPRegInfo *r2;
4c8c4541
RH
8354 bool is64 = r->type & ARM_CP_64BIT;
8355 bool ns = secstate & ARM_CP_SECSTATE_NS;
cac65299 8356 int cp = r->cp;
c27f5d3a 8357 size_t name_len;
696ba377 8358 bool make_const;
c27f5d3a 8359
cac65299
RH
8360 switch (state) {
8361 case ARM_CP_STATE_AA32:
8362 /* We assume it is a cp15 register if the .cp field is left unset. */
8363 if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
8364 cp = 15;
8365 }
8366 key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
8367 break;
8368 case ARM_CP_STATE_AA64:
8369 /*
8370 * To allow abbreviation of ARMCPRegInfo definitions, we treat
8371 * cp == 0 as equivalent to the value for "standard guest-visible
8372 * sysreg". STATE_BOTH definitions are also always "standard sysreg"
8373 * in their AArch64 view (the .cp value may be non-zero for the
8374 * benefit of the AArch32 view).
8375 */
8376 if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
8377 cp = CP_REG_ARM64_SYSREG_CP;
8378 }
8379 key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
8380 break;
8381 default:
8382 g_assert_not_reached();
8383 }
8384
dc44545b
RH
8385 /* Overriding of an existing definition must be explicitly requested. */
8386 if (!(r->type & ARM_CP_OVERRIDE)) {
8387 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
8388 if (oldreg) {
8389 assert(oldreg->type & ARM_CP_OVERRIDE);
8390 }
8391 }
8392
696ba377
RH
8393 /*
8394 * Eliminate registers that are not present because the EL is missing.
8395 * Doing this here makes it easier to put all registers for a given
8396 * feature into the same ARMCPRegInfo array and define them all at once.
8397 */
8398 make_const = false;
8399 if (arm_feature(env, ARM_FEATURE_EL3)) {
8400 /*
8401 * An EL2 register without EL2 but with EL3 is (usually) RES0.
8402 * See rule RJFFP in section D1.1.3 of DDI0487H.a.
8403 */
8404 int min_el = ctz32(r->access) / 2;
8405 if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
8406 if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
8407 return;
8408 }
8409 make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
8410 }
8411 } else {
8412 CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
8413 ? PL2_RW : PL1_RW);
8414 if ((r->access & max_el) == 0) {
8415 return;
8416 }
8417 }
8418
c27f5d3a
RH
8419 /* Combine cpreg and name into one allocation. */
8420 name_len = strlen(name) + 1;
8421 r2 = g_malloc(sizeof(*r2) + name_len);
8422 *r2 = *r;
8423 r2->name = memcpy(r2 + 1, name, name_len);
3f3c82a5 8424
cc946d96
RH
8425 /*
8426 * Update fields to match the instantiation, overwiting wildcards
8427 * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
3f3c82a5 8428 */
cc946d96
RH
8429 r2->cp = cp;
8430 r2->crm = crm;
8431 r2->opc1 = opc1;
8432 r2->opc2 = opc2;
8433 r2->state = state;
3f3c82a5 8434 r2->secure = secstate;
cc946d96
RH
8435 if (opaque) {
8436 r2->opaque = opaque;
8437 }
3f3c82a5 8438
696ba377
RH
8439 if (make_const) {
8440 /* This should not have been a very special register to begin. */
8441 int old_special = r2->type & ARM_CP_SPECIAL_MASK;
8442 assert(old_special == 0 || old_special == ARM_CP_NOP);
1859f8c3 8443 /*
696ba377
RH
8444 * Set the special function to CONST, retaining the other flags.
8445 * This is important for e.g. ARM_CP_SVE so that we still
8446 * take the SVE trap if CPTR_EL3.EZ == 0.
f5a0a5a5 8447 */
696ba377
RH
8448 r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
8449 /*
8450 * Usually, these registers become RES0, but there are a few
8451 * special cases like VPIDR_EL2 which have a constant non-zero
8452 * value with writes ignored.
8453 */
8454 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
8455 r2->resetvalue = 0;
8456 }
8457 /*
8458 * ARM_CP_CONST has precedence, so removing the callbacks and
8459 * offsets are not strictly necessary, but it is potentially
8460 * less confusing to debug later.
8461 */
8462 r2->readfn = NULL;
8463 r2->writefn = NULL;
8464 r2->raw_readfn = NULL;
8465 r2->raw_writefn = NULL;
8466 r2->resetfn = NULL;
8467 r2->fieldoffset = 0;
8468 r2->bank_fieldoffsets[0] = 0;
8469 r2->bank_fieldoffsets[1] = 0;
8470 } else {
8471 bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
3f3c82a5 8472
10748a96 8473 if (isbanked) {
1859f8c3 8474 /*
696ba377
RH
8475 * Register is banked (using both entries in array).
8476 * Overwriting fieldoffset as the array is only used to define
8477 * banked registers but later only fieldoffset is used.
3f3c82a5 8478 */
696ba377
RH
8479 r2->fieldoffset = r->bank_fieldoffsets[ns];
8480 }
8481 if (state == ARM_CP_STATE_AA32) {
8482 if (isbanked) {
8483 /*
8484 * If the register is banked then we don't need to migrate or
8485 * reset the 32-bit instance in certain cases:
8486 *
8487 * 1) If the register has both 32-bit and 64-bit instances
8488 * then we can count on the 64-bit instance taking care
8489 * of the non-secure bank.
8490 * 2) If ARMv8 is enabled then we can count on a 64-bit
8491 * version taking care of the secure bank. This requires
8492 * that separate 32 and 64-bit definitions are provided.
8493 */
8494 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
8495 (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
8496 r2->type |= ARM_CP_ALIAS;
8497 }
8498 } else if ((secstate != r->secure) && !ns) {
8499 /*
8500 * The register is not banked so we only want to allow
8501 * migration of the non-secure instance.
8502 */
7a0e58fa 8503 r2->type |= ARM_CP_ALIAS;
3f3c82a5 8504 }
3f3c82a5 8505
696ba377
RH
8506 if (HOST_BIG_ENDIAN &&
8507 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
8508 r2->fieldoffset += sizeof(uint32_t);
8509 }
3f3c82a5 8510 }
f5a0a5a5 8511 }
cc946d96 8512
1859f8c3
RH
8513 /*
8514 * By convention, for wildcarded registers only the first
6e6efd61 8515 * entry is used for migration; the others are marked as
7a0e58fa 8516 * ALIAS so we don't try to transfer the register
6e6efd61 8517 * multiple times. Special registers (ie NOP/WFI) are
7a0e58fa 8518 * never migratable and not even raw-accessible.
6e6efd61 8519 */
696ba377 8520 if (r2->type & ARM_CP_SPECIAL_MASK) {
7a0e58fa
PM
8521 r2->type |= ARM_CP_NO_RAW;
8522 }
8523 if (((r->crm == CP_ANY) && crm != 0) ||
6e6efd61
PM
8524 ((r->opc1 == CP_ANY) && opc1 != 0) ||
8525 ((r->opc2 == CP_ANY) && opc2 != 0)) {
1f163787 8526 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
6e6efd61
PM
8527 }
8528
1859f8c3
RH
8529 /*
8530 * Check that raw accesses are either forbidden or handled. Note that
375421cc
PM
8531 * we can't assert this earlier because the setup of fieldoffset for
8532 * banked registers has to be done first.
8533 */
8534 if (!(r2->type & ARM_CP_NO_RAW)) {
8535 assert(!raw_accessors_invalid(r2));
8536 }
8537
5860362d 8538 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
6e6efd61
PM
8539}
8540
8541
4b6a83fb
PM
8542void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
8543 const ARMCPRegInfo *r, void *opaque)
8544{
8545 /* Define implementations of coprocessor registers.
8546 * We store these in a hashtable because typically
8547 * there are less than 150 registers in a space which
8548 * is 16*16*16*8*8 = 262144 in size.
8549 * Wildcarding is supported for the crm, opc1 and opc2 fields.
8550 * If a register is defined twice then the second definition is
8551 * used, so this can be used to define some generic registers and
8552 * then override them with implementation specific variations.
8553 * At least one of the original and the second definition should
8554 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8555 * against accidental use.
f5a0a5a5
PM
8556 *
8557 * The state field defines whether the register is to be
8558 * visible in the AArch32 or AArch64 execution state. If the
8559 * state is set to ARM_CP_STATE_BOTH then we synthesise a
8560 * reginfo structure for the AArch32 view, which sees the lower
8561 * 32 bits of the 64 bit register.
8562 *
8563 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8564 * be wildcarded. AArch64 registers are always considered to be 64
8565 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8566 * the register, if any.
4b6a83fb 8567 */
d95101d6 8568 int crm, opc1, opc2;
4b6a83fb
PM
8569 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
8570 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
8571 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
8572 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
8573 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
8574 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
d95101d6
RH
8575 CPState state;
8576
4b6a83fb
PM
8577 /* 64 bit registers have only CRm and Opc1 fields */
8578 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
f5a0a5a5
PM
8579 /* op0 only exists in the AArch64 encodings */
8580 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
8581 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8582 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
cd8be50e
PM
8583 /*
8584 * This API is only for Arm's system coprocessors (14 and 15) or
8585 * (M-profile or v7A-and-earlier only) for implementation defined
8586 * coprocessors in the range 0..7. Our decode assumes this, since
8587 * 8..13 can be used for other insns including VFP and Neon. See
8588 * valid_cp() in translate.c. Assert here that we haven't tried
8589 * to use an invalid coprocessor number.
8590 */
8591 switch (r->state) {
8592 case ARM_CP_STATE_BOTH:
8593 /* 0 has a special meaning, but otherwise the same rules as AA32. */
8594 if (r->cp == 0) {
8595 break;
8596 }
8597 /* fall through */
8598 case ARM_CP_STATE_AA32:
8599 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
8600 !arm_feature(&cpu->env, ARM_FEATURE_M)) {
8601 assert(r->cp >= 14 && r->cp <= 15);
8602 } else {
8603 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
8604 }
8605 break;
8606 case ARM_CP_STATE_AA64:
8607 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
8608 break;
8609 default:
8610 g_assert_not_reached();
8611 }
f5a0a5a5
PM
8612 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8613 * encodes a minimum access level for the register. We roll this
8614 * runtime check into our general permission check code, so check
8615 * here that the reginfo's specified permissions are strict enough
8616 * to encompass the generic architectural permission check.
8617 */
8618 if (r->state != ARM_CP_STATE_AA32) {
39107337 8619 CPAccessRights mask;
f5a0a5a5 8620 switch (r->opc1) {
b5bd7440
AB
8621 case 0:
8622 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8623 mask = PL0U_R | PL1_RW;
8624 break;
8625 case 1: case 2:
f5a0a5a5
PM
8626 /* min_EL EL1 */
8627 mask = PL1_RW;
8628 break;
8629 case 3:
8630 /* min_EL EL0 */
8631 mask = PL0_RW;
8632 break;
8633 case 4:
b4ecf60f 8634 case 5:
f5a0a5a5
PM
8635 /* min_EL EL2 */
8636 mask = PL2_RW;
8637 break;
f5a0a5a5
PM
8638 case 6:
8639 /* min_EL EL3 */
8640 mask = PL3_RW;
8641 break;
8642 case 7:
8643 /* min_EL EL1, secure mode only (we don't check the latter) */
8644 mask = PL1_RW;
8645 break;
8646 default:
8647 /* broken reginfo with out-of-range opc1 */
d385a605 8648 g_assert_not_reached();
f5a0a5a5
PM
8649 }
8650 /* assert our permissions are not too lax (stricter is fine) */
8651 assert((r->access & ~mask) == 0);
8652 }
8653
4b6a83fb
PM
8654 /* Check that the register definition has enough info to handle
8655 * reads and writes if they are permitted.
8656 */
87c3f0f2 8657 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
4b6a83fb 8658 if (r->access & PL3_R) {
3f3c82a5
FA
8659 assert((r->fieldoffset ||
8660 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8661 r->readfn);
4b6a83fb
PM
8662 }
8663 if (r->access & PL3_W) {
3f3c82a5
FA
8664 assert((r->fieldoffset ||
8665 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8666 r->writefn);
4b6a83fb
PM
8667 }
8668 }
5809ac57 8669
4b6a83fb
PM
8670 for (crm = crmmin; crm <= crmmax; crm++) {
8671 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
8672 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
f5a0a5a5
PM
8673 for (state = ARM_CP_STATE_AA32;
8674 state <= ARM_CP_STATE_AA64; state++) {
8675 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
8676 continue;
8677 }
3f3c82a5
FA
8678 if (state == ARM_CP_STATE_AA32) {
8679 /* Under AArch32 CP registers can be common
8680 * (same for secure and non-secure world) or banked.
8681 */
9c513e78
AB
8682 char *name;
8683
3f3c82a5
FA
8684 switch (r->secure) {
8685 case ARM_CP_SECSTATE_S:
8686 case ARM_CP_SECSTATE_NS:
8687 add_cpreg_to_hashtable(cpu, r, opaque, state,
9c513e78
AB
8688 r->secure, crm, opc1, opc2,
8689 r->name);
3f3c82a5 8690 break;
cbe64585 8691 case ARM_CP_SECSTATE_BOTH:
9c513e78 8692 name = g_strdup_printf("%s_S", r->name);
3f3c82a5
FA
8693 add_cpreg_to_hashtable(cpu, r, opaque, state,
8694 ARM_CP_SECSTATE_S,
9c513e78
AB
8695 crm, opc1, opc2, name);
8696 g_free(name);
3f3c82a5
FA
8697 add_cpreg_to_hashtable(cpu, r, opaque, state,
8698 ARM_CP_SECSTATE_NS,
9c513e78 8699 crm, opc1, opc2, r->name);
3f3c82a5 8700 break;
cbe64585
RH
8701 default:
8702 g_assert_not_reached();
3f3c82a5
FA
8703 }
8704 } else {
8705 /* AArch64 registers get mapped to non-secure instance
8706 * of AArch32 */
8707 add_cpreg_to_hashtable(cpu, r, opaque, state,
8708 ARM_CP_SECSTATE_NS,
9c513e78 8709 crm, opc1, opc2, r->name);
3f3c82a5 8710 }
f5a0a5a5 8711 }
4b6a83fb
PM
8712 }
8713 }
8714 }
8715}
8716
5809ac57
RH
8717/* Define a whole list of registers */
8718void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
8719 void *opaque, size_t len)
4b6a83fb 8720{
5809ac57
RH
8721 size_t i;
8722 for (i = 0; i < len; ++i) {
8723 define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
4b6a83fb
PM
8724 }
8725}
8726
6c5c0fec
AB
8727/*
8728 * Modify ARMCPRegInfo for access from userspace.
8729 *
8730 * This is a data driven modification directed by
8731 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8732 * user-space cannot alter any values and dynamic values pertaining to
8733 * execution state are hidden from user space view anyway.
8734 */
5809ac57
RH
8735void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
8736 const ARMCPRegUserSpaceInfo *mods,
8737 size_t mods_len)
6c5c0fec 8738{
5809ac57
RH
8739 for (size_t mi = 0; mi < mods_len; ++mi) {
8740 const ARMCPRegUserSpaceInfo *m = mods + mi;
d040242e 8741 GPatternSpec *pat = NULL;
5809ac57 8742
d040242e
AB
8743 if (m->is_glob) {
8744 pat = g_pattern_spec_new(m->name);
8745 }
5809ac57
RH
8746 for (size_t ri = 0; ri < regs_len; ++ri) {
8747 ARMCPRegInfo *r = regs + ri;
8748
d040242e
AB
8749 if (pat && g_pattern_match_string(pat, r->name)) {
8750 r->type = ARM_CP_CONST;
8751 r->access = PL0U_R;
8752 r->resetvalue = 0;
8753 /* continue */
8754 } else if (strcmp(r->name, m->name) == 0) {
6c5c0fec
AB
8755 r->type = ARM_CP_CONST;
8756 r->access = PL0U_R;
8757 r->resetvalue &= m->exported_bits;
8758 r->resetvalue |= m->fixed_bits;
8759 break;
8760 }
8761 }
d040242e
AB
8762 if (pat) {
8763 g_pattern_spec_free(pat);
8764 }
6c5c0fec
AB
8765 }
8766}
8767
60322b39 8768const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
4b6a83fb 8769{
5860362d 8770 return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
4b6a83fb
PM
8771}
8772
c4241c7d
PM
8773void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
8774 uint64_t value)
4b6a83fb
PM
8775{
8776 /* Helper coprocessor write function for write-ignore registers */
4b6a83fb
PM
8777}
8778
c4241c7d 8779uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
4b6a83fb
PM
8780{
8781 /* Helper coprocessor write function for read-as-zero registers */
4b6a83fb
PM
8782 return 0;
8783}
8784
f5a0a5a5
PM
8785void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
8786{
8787 /* Helper coprocessor reset function for do-nothing-on-reset registers */
8788}
8789
af393ffc 8790static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
37064a8b
PM
8791{
8792 /* Return true if it is not valid for us to switch to
8793 * this CPU mode (ie all the UNPREDICTABLE cases in
8794 * the ARM ARM CPSRWriteByInstr pseudocode).
8795 */
af393ffc
PM
8796
8797 /* Changes to or from Hyp via MSR and CPS are illegal. */
8798 if (write_type == CPSRWriteByInstr &&
8799 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
8800 mode == ARM_CPU_MODE_HYP)) {
8801 return 1;
8802 }
8803
37064a8b
PM
8804 switch (mode) {
8805 case ARM_CPU_MODE_USR:
10eacda7 8806 return 0;
37064a8b
PM
8807 case ARM_CPU_MODE_SYS:
8808 case ARM_CPU_MODE_SVC:
8809 case ARM_CPU_MODE_ABT:
8810 case ARM_CPU_MODE_UND:
8811 case ARM_CPU_MODE_IRQ:
8812 case ARM_CPU_MODE_FIQ:
52ff951b
PM
8813 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
8814 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8815 */
10eacda7
PM
8816 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8817 * and CPS are treated as illegal mode changes.
8818 */
8819 if (write_type == CPSRWriteByInstr &&
10eacda7 8820 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
7c208e0f 8821 (arm_hcr_el2_eff(env) & HCR_TGE)) {
10eacda7
PM
8822 return 1;
8823 }
37064a8b 8824 return 0;
e6c8fc07 8825 case ARM_CPU_MODE_HYP:
e6ef0169 8826 return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
027fc527 8827 case ARM_CPU_MODE_MON:
58ae2d1f 8828 return arm_current_el(env) < 3;
37064a8b
PM
8829 default:
8830 return 1;
8831 }
8832}
8833
2f4a40e5
AZ
8834uint32_t cpsr_read(CPUARMState *env)
8835{
8836 int ZF;
6fbe23d5
PB
8837 ZF = (env->ZF == 0);
8838 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
2f4a40e5
AZ
8839 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
8840 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
8841 | ((env->condexec_bits & 0xfc) << 8)
af519934 8842 | (env->GE << 16) | (env->daif & CPSR_AIF);
2f4a40e5
AZ
8843}
8844
50866ba5
PM
8845void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
8846 CPSRWriteType write_type)
2f4a40e5 8847{
6e8801f9 8848 uint32_t changed_daif;
e784807c
PM
8849 bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
8850 (mask & (CPSR_M | CPSR_E | CPSR_IL));
6e8801f9 8851
2f4a40e5 8852 if (mask & CPSR_NZCV) {
6fbe23d5
PB
8853 env->ZF = (~val) & CPSR_Z;
8854 env->NF = val;
2f4a40e5
AZ
8855 env->CF = (val >> 29) & 1;
8856 env->VF = (val << 3) & 0x80000000;
8857 }
8858 if (mask & CPSR_Q)
8859 env->QF = ((val & CPSR_Q) != 0);
8860 if (mask & CPSR_T)
8861 env->thumb = ((val & CPSR_T) != 0);
8862 if (mask & CPSR_IT_0_1) {
8863 env->condexec_bits &= ~3;
8864 env->condexec_bits |= (val >> 25) & 3;
8865 }
8866 if (mask & CPSR_IT_2_7) {
8867 env->condexec_bits &= 3;
8868 env->condexec_bits |= (val >> 8) & 0xfc;
8869 }
8870 if (mask & CPSR_GE) {
8871 env->GE = (val >> 16) & 0xf;
8872 }
8873
6e8801f9
FA
8874 /* In a V7 implementation that includes the security extensions but does
8875 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
8876 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
8877 * bits respectively.
8878 *
8879 * In a V8 implementation, it is permitted for privileged software to
8880 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
8881 */
f8c88bbc 8882 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
6e8801f9
FA
8883 arm_feature(env, ARM_FEATURE_EL3) &&
8884 !arm_feature(env, ARM_FEATURE_EL2) &&
8885 !arm_is_secure(env)) {
8886
8887 changed_daif = (env->daif ^ val) & mask;
8888
8889 if (changed_daif & CPSR_A) {
8890 /* Check to see if we are allowed to change the masking of async
8891 * abort exceptions from a non-secure state.
8892 */
8893 if (!(env->cp15.scr_el3 & SCR_AW)) {
8894 qemu_log_mask(LOG_GUEST_ERROR,
8895 "Ignoring attempt to switch CPSR_A flag from "
8896 "non-secure world with SCR.AW bit clear\n");
8897 mask &= ~CPSR_A;
8898 }
8899 }
8900
8901 if (changed_daif & CPSR_F) {
8902 /* Check to see if we are allowed to change the masking of FIQ
8903 * exceptions from a non-secure state.
8904 */
8905 if (!(env->cp15.scr_el3 & SCR_FW)) {
8906 qemu_log_mask(LOG_GUEST_ERROR,
8907 "Ignoring attempt to switch CPSR_F flag from "
8908 "non-secure world with SCR.FW bit clear\n");
8909 mask &= ~CPSR_F;
8910 }
8911
8912 /* Check whether non-maskable FIQ (NMFI) support is enabled.
8913 * If this bit is set software is not allowed to mask
8914 * FIQs, but is allowed to set CPSR_F to 0.
8915 */
8916 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
8917 (val & CPSR_F)) {
8918 qemu_log_mask(LOG_GUEST_ERROR,
8919 "Ignoring attempt to enable CPSR_F flag "
8920 "(non-maskable FIQ [NMFI] support enabled)\n");
8921 mask &= ~CPSR_F;
8922 }
8923 }
8924 }
8925
4cc35614
PM
8926 env->daif &= ~(CPSR_AIF & mask);
8927 env->daif |= val & CPSR_AIF & mask;
8928
f8c88bbc
PM
8929 if (write_type != CPSRWriteRaw &&
8930 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
8c4f0eb9
PM
8931 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
8932 /* Note that we can only get here in USR mode if this is a
8933 * gdb stub write; for this case we follow the architectural
8934 * behaviour for guest writes in USR mode of ignoring an attempt
8935 * to switch mode. (Those are caught by translate.c for writes
8936 * triggered by guest instructions.)
8937 */
8938 mask &= ~CPSR_M;
8939 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
81907a58
PM
8940 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
8941 * v7, and has defined behaviour in v8:
8942 * + leave CPSR.M untouched
8943 * + allow changes to the other CPSR fields
8944 * + set PSTATE.IL
8945 * For user changes via the GDB stub, we don't set PSTATE.IL,
8946 * as this would be unnecessarily harsh for a user error.
37064a8b
PM
8947 */
8948 mask &= ~CPSR_M;
81907a58
PM
8949 if (write_type != CPSRWriteByGDBStub &&
8950 arm_feature(env, ARM_FEATURE_V8)) {
8951 mask |= CPSR_IL;
8952 val |= CPSR_IL;
8953 }
81e37284
PM
8954 qemu_log_mask(LOG_GUEST_ERROR,
8955 "Illegal AArch32 mode switch attempt from %s to %s\n",
8956 aarch32_mode_name(env->uncached_cpsr),
8957 aarch32_mode_name(val));
37064a8b 8958 } else {
81e37284
PM
8959 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
8960 write_type == CPSRWriteExceptionReturn ?
8961 "Exception return from AArch32" :
8962 "AArch32 mode switch from",
8963 aarch32_mode_name(env->uncached_cpsr),
8964 aarch32_mode_name(val), env->regs[15]);
37064a8b
PM
8965 switch_mode(env, val & CPSR_M);
8966 }
2f4a40e5
AZ
8967 }
8968 mask &= ~CACHED_CPSR_BITS;
8969 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
e784807c
PM
8970 if (rebuild_hflags) {
8971 arm_rebuild_hflags(env);
8972 }
2f4a40e5
AZ
8973}
8974
b26eefb6
PB
8975/* Sign/zero extend */
8976uint32_t HELPER(sxtb16)(uint32_t x)
8977{
8978 uint32_t res;
8979 res = (uint16_t)(int8_t)x;
8980 res |= (uint32_t)(int8_t)(x >> 16) << 16;
8981 return res;
8982}
8983
e5346292
PM
8984static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
8985{
8986 /*
8987 * Take a division-by-zero exception if necessary; otherwise return
8988 * to get the usual non-trapping division behaviour (result of 0)
8989 */
8990 if (arm_feature(env, ARM_FEATURE_M)
8991 && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
8992 raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
8993 }
8994}
8995
b26eefb6
PB
8996uint32_t HELPER(uxtb16)(uint32_t x)
8997{
8998 uint32_t res;
8999 res = (uint16_t)(uint8_t)x;
9000 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
9001 return res;
9002}
9003
e5346292 9004int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
3670669c 9005{
fc7a5038 9006 if (den == 0) {
e5346292 9007 handle_possible_div0_trap(env, GETPC());
fc7a5038
PM
9008 return 0;
9009 }
9010 if (num == INT_MIN && den == -1) {
9011 return INT_MIN;
9012 }
3670669c
PB
9013 return num / den;
9014}
9015
e5346292 9016uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
3670669c 9017{
fc7a5038 9018 if (den == 0) {
e5346292 9019 handle_possible_div0_trap(env, GETPC());
fc7a5038
PM
9020 return 0;
9021 }
3670669c
PB
9022 return num / den;
9023}
9024
9025uint32_t HELPER(rbit)(uint32_t x)
9026{
42fedbca 9027 return revbit32(x);
3670669c
PB
9028}
9029
c47eaf9f 9030#ifdef CONFIG_USER_ONLY
b5ff1b31 9031
affdb64d 9032static void switch_mode(CPUARMState *env, int mode)
b5ff1b31 9033{
2fc0cc0e 9034 ARMCPU *cpu = env_archcpu(env);
a47dddd7
AF
9035
9036 if (mode != ARM_CPU_MODE_USR) {
9037 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
9038 }
b5ff1b31
FB
9039}
9040
012a906b
GB
9041uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9042 uint32_t cur_el, bool secure)
9e729b57
EI
9043{
9044 return 1;
9045}
9046
ce02049d
GB
9047void aarch64_sync_64_to_32(CPUARMState *env)
9048{
9049 g_assert_not_reached();
9050}
9051
b5ff1b31
FB
9052#else
9053
affdb64d 9054static void switch_mode(CPUARMState *env, int mode)
b5ff1b31
FB
9055{
9056 int old_mode;
9057 int i;
9058
9059 old_mode = env->uncached_cpsr & CPSR_M;
9060 if (mode == old_mode)
9061 return;
9062
9063 if (old_mode == ARM_CPU_MODE_FIQ) {
9064 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 9065 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
9066 } else if (mode == ARM_CPU_MODE_FIQ) {
9067 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 9068 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
9069 }
9070
f5206413 9071 i = bank_number(old_mode);
b5ff1b31 9072 env->banked_r13[i] = env->regs[13];
b5ff1b31
FB
9073 env->banked_spsr[i] = env->spsr;
9074
f5206413 9075 i = bank_number(mode);
b5ff1b31 9076 env->regs[13] = env->banked_r13[i];
b5ff1b31 9077 env->spsr = env->banked_spsr[i];
593cfa2b
PM
9078
9079 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
9080 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
b5ff1b31
FB
9081}
9082
0eeb17d6
GB
9083/* Physical Interrupt Target EL Lookup Table
9084 *
9085 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
9086 *
9087 * The below multi-dimensional table is used for looking up the target
9088 * exception level given numerous condition criteria. Specifically, the
9089 * target EL is based on SCR and HCR routing controls as well as the
9090 * currently executing EL and secure state.
9091 *
9092 * Dimensions:
9093 * target_el_table[2][2][2][2][2][4]
9094 * | | | | | +--- Current EL
9095 * | | | | +------ Non-secure(0)/Secure(1)
9096 * | | | +--------- HCR mask override
9097 * | | +------------ SCR exec state control
9098 * | +--------------- SCR mask override
9099 * +------------------ 32-bit(0)/64-bit(1) EL3
9100 *
9101 * The table values are as such:
9102 * 0-3 = EL0-EL3
9103 * -1 = Cannot occur
9104 *
9105 * The ARM ARM target EL table includes entries indicating that an "exception
9106 * is not taken". The two cases where this is applicable are:
9107 * 1) An exception is taken from EL3 but the SCR does not have the exception
9108 * routed to EL3.
9109 * 2) An exception is taken from EL2 but the HCR does not have the exception
9110 * routed to EL2.
9111 * In these two cases, the below table contain a target of EL1. This value is
9112 * returned as it is expected that the consumer of the table data will check
9113 * for "target EL >= current EL" to ensure the exception is not taken.
9114 *
9115 * SCR HCR
9116 * 64 EA AMO From
9117 * BIT IRQ IMO Non-secure Secure
9118 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
9119 */
82c39f6a 9120static const int8_t target_el_table[2][2][2][2][2][4] = {
0eeb17d6
GB
9121 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9122 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
9123 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9124 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
9125 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9126 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
9127 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9128 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
9129 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
6c85f906
RDC
9130 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
9131 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
9132 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
0eeb17d6
GB
9133 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
9134 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
6c85f906
RDC
9135 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
9136 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
0eeb17d6
GB
9137};
9138
9139/*
9140 * Determine the target EL for physical exceptions
9141 */
012a906b
GB
9142uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9143 uint32_t cur_el, bool secure)
0eeb17d6
GB
9144{
9145 CPUARMState *env = cs->env_ptr;
f7778444
RH
9146 bool rw;
9147 bool scr;
9148 bool hcr;
0eeb17d6 9149 int target_el;
2cde031f 9150 /* Is the highest EL AArch64? */
f7778444
RH
9151 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
9152 uint64_t hcr_el2;
2cde031f
SS
9153
9154 if (arm_feature(env, ARM_FEATURE_EL3)) {
9155 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
9156 } else {
9157 /* Either EL2 is the highest EL (and so the EL2 register width
9158 * is given by is64); or there is no EL2 or EL3, in which case
9159 * the value of 'rw' does not affect the table lookup anyway.
9160 */
9161 rw = is64;
9162 }
0eeb17d6 9163
f7778444 9164 hcr_el2 = arm_hcr_el2_eff(env);
0eeb17d6
GB
9165 switch (excp_idx) {
9166 case EXCP_IRQ:
9167 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
f7778444 9168 hcr = hcr_el2 & HCR_IMO;
0eeb17d6
GB
9169 break;
9170 case EXCP_FIQ:
9171 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
f7778444 9172 hcr = hcr_el2 & HCR_FMO;
0eeb17d6
GB
9173 break;
9174 default:
9175 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
f7778444 9176 hcr = hcr_el2 & HCR_AMO;
0eeb17d6
GB
9177 break;
9178 };
9179
d1b31428
RH
9180 /*
9181 * For these purposes, TGE and AMO/IMO/FMO both force the
9182 * interrupt to EL2. Fold TGE into the bit extracted above.
9183 */
9184 hcr |= (hcr_el2 & HCR_TGE) != 0;
9185
0eeb17d6
GB
9186 /* Perform a table-lookup for the target EL given the current state */
9187 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
9188
9189 assert(target_el > 0);
9190
9191 return target_el;
9192}
9193
fc6177af 9194void arm_log_exception(CPUState *cs)
b59f479b 9195{
fc6177af
PM
9196 int idx = cs->exception_index;
9197
b59f479b
PMD
9198 if (qemu_loglevel_mask(CPU_LOG_INT)) {
9199 const char *exc = NULL;
9200 static const char * const excnames[] = {
9201 [EXCP_UDEF] = "Undefined Instruction",
9202 [EXCP_SWI] = "SVC",
9203 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
9204 [EXCP_DATA_ABORT] = "Data Abort",
9205 [EXCP_IRQ] = "IRQ",
9206 [EXCP_FIQ] = "FIQ",
9207 [EXCP_BKPT] = "Breakpoint",
9208 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
9209 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
9210 [EXCP_HVC] = "Hypervisor Call",
9211 [EXCP_HYP_TRAP] = "Hypervisor Trap",
9212 [EXCP_SMC] = "Secure Monitor Call",
9213 [EXCP_VIRQ] = "Virtual IRQ",
9214 [EXCP_VFIQ] = "Virtual FIQ",
9215 [EXCP_SEMIHOST] = "Semihosting call",
9216 [EXCP_NOCP] = "v7M NOCP UsageFault",
9217 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
9218 [EXCP_STKOF] = "v8M STKOF UsageFault",
9219 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
9220 [EXCP_LSERR] = "v8M LSERR UsageFault",
9221 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
e5346292 9222 [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
3c29632f 9223 [EXCP_VSERR] = "Virtual SERR",
b59f479b
PMD
9224 };
9225
9226 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
9227 exc = excnames[idx];
9228 }
9229 if (!exc) {
9230 exc = "unknown";
9231 }
fc6177af
PM
9232 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
9233 idx, exc, cs->cpu_index);
b59f479b
PMD
9234 }
9235}
9236
a356dacf 9237/*
7aab5a8c
PMD
9238 * Function used to synchronize QEMU's AArch64 register set with AArch32
9239 * register set. This is necessary when switching between AArch32 and AArch64
9240 * execution state.
a356dacf 9241 */
7aab5a8c 9242void aarch64_sync_32_to_64(CPUARMState *env)
9ee6e8bb 9243{
7aab5a8c
PMD
9244 int i;
9245 uint32_t mode = env->uncached_cpsr & CPSR_M;
9246
9247 /* We can blanket copy R[0:7] to X[0:7] */
9248 for (i = 0; i < 8; i++) {
9249 env->xregs[i] = env->regs[i];
fd592d89 9250 }
70d74660 9251
9a223097 9252 /*
7aab5a8c
PMD
9253 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9254 * Otherwise, they come from the banked user regs.
fd592d89 9255 */
7aab5a8c
PMD
9256 if (mode == ARM_CPU_MODE_FIQ) {
9257 for (i = 8; i < 13; i++) {
9258 env->xregs[i] = env->usr_regs[i - 8];
9259 }
9260 } else {
9261 for (i = 8; i < 13; i++) {
9262 env->xregs[i] = env->regs[i];
9263 }
fd592d89 9264 }
9ee6e8bb 9265
7aab5a8c
PMD
9266 /*
9267 * Registers x13-x23 are the various mode SP and FP registers. Registers
9268 * r13 and r14 are only copied if we are in that mode, otherwise we copy
9269 * from the mode banked register.
9270 */
9271 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9272 env->xregs[13] = env->regs[13];
9273 env->xregs[14] = env->regs[14];
9274 } else {
9275 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
9276 /* HYP is an exception in that it is copied from r14 */
9277 if (mode == ARM_CPU_MODE_HYP) {
9278 env->xregs[14] = env->regs[14];
95695eff 9279 } else {
7aab5a8c 9280 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
95695eff 9281 }
95695eff
PM
9282 }
9283
7aab5a8c
PMD
9284 if (mode == ARM_CPU_MODE_HYP) {
9285 env->xregs[15] = env->regs[13];
9286 } else {
9287 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
95695eff
PM
9288 }
9289
7aab5a8c
PMD
9290 if (mode == ARM_CPU_MODE_IRQ) {
9291 env->xregs[16] = env->regs[14];
9292 env->xregs[17] = env->regs[13];
9293 } else {
9294 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
9295 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
9296 }
95695eff 9297
7aab5a8c
PMD
9298 if (mode == ARM_CPU_MODE_SVC) {
9299 env->xregs[18] = env->regs[14];
9300 env->xregs[19] = env->regs[13];
9301 } else {
9302 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
9303 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
9304 }
95695eff 9305
7aab5a8c
PMD
9306 if (mode == ARM_CPU_MODE_ABT) {
9307 env->xregs[20] = env->regs[14];
9308 env->xregs[21] = env->regs[13];
9309 } else {
9310 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
9311 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
9312 }
e33cf0f8 9313
7aab5a8c
PMD
9314 if (mode == ARM_CPU_MODE_UND) {
9315 env->xregs[22] = env->regs[14];
9316 env->xregs[23] = env->regs[13];
9317 } else {
9318 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
9319 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
e33cf0f8
PM
9320 }
9321
9322 /*
7aab5a8c
PMD
9323 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9324 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9325 * FIQ bank for r8-r14.
e33cf0f8 9326 */
7aab5a8c
PMD
9327 if (mode == ARM_CPU_MODE_FIQ) {
9328 for (i = 24; i < 31; i++) {
9329 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
9330 }
9331 } else {
9332 for (i = 24; i < 29; i++) {
9333 env->xregs[i] = env->fiq_regs[i - 24];
e33cf0f8 9334 }
7aab5a8c
PMD
9335 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
9336 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
e33cf0f8 9337 }
7aab5a8c
PMD
9338
9339 env->pc = env->regs[15];
e33cf0f8
PM
9340}
9341
9a223097 9342/*
7aab5a8c
PMD
9343 * Function used to synchronize QEMU's AArch32 register set with AArch64
9344 * register set. This is necessary when switching between AArch32 and AArch64
9345 * execution state.
de2db7ec 9346 */
7aab5a8c 9347void aarch64_sync_64_to_32(CPUARMState *env)
9ee6e8bb 9348{
7aab5a8c
PMD
9349 int i;
9350 uint32_t mode = env->uncached_cpsr & CPSR_M;
abc24d86 9351
7aab5a8c
PMD
9352 /* We can blanket copy X[0:7] to R[0:7] */
9353 for (i = 0; i < 8; i++) {
9354 env->regs[i] = env->xregs[i];
de2db7ec 9355 }
3f0cddee 9356
9a223097 9357 /*
7aab5a8c
PMD
9358 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9359 * Otherwise, we copy x8-x12 into the banked user regs.
de2db7ec 9360 */
7aab5a8c
PMD
9361 if (mode == ARM_CPU_MODE_FIQ) {
9362 for (i = 8; i < 13; i++) {
9363 env->usr_regs[i - 8] = env->xregs[i];
9364 }
9365 } else {
9366 for (i = 8; i < 13; i++) {
9367 env->regs[i] = env->xregs[i];
9368 }
fb602cb7
PM
9369 }
9370
9a223097 9371 /*
7aab5a8c
PMD
9372 * Registers r13 & r14 depend on the current mode.
9373 * If we are in a given mode, we copy the corresponding x registers to r13
9374 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9375 * for the mode.
fb602cb7 9376 */
7aab5a8c
PMD
9377 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9378 env->regs[13] = env->xregs[13];
9379 env->regs[14] = env->xregs[14];
fb602cb7 9380 } else {
7aab5a8c 9381 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
fb602cb7 9382
7aab5a8c
PMD
9383 /*
9384 * HYP is an exception in that it does not have its own banked r14 but
9385 * shares the USR r14
9386 */
9387 if (mode == ARM_CPU_MODE_HYP) {
9388 env->regs[14] = env->xregs[14];
9389 } else {
9390 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
9391 }
9392 }
fb602cb7 9393
7aab5a8c
PMD
9394 if (mode == ARM_CPU_MODE_HYP) {
9395 env->regs[13] = env->xregs[15];
fb602cb7 9396 } else {
7aab5a8c 9397 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
fb602cb7 9398 }
d02a8698 9399
7aab5a8c
PMD
9400 if (mode == ARM_CPU_MODE_IRQ) {
9401 env->regs[14] = env->xregs[16];
9402 env->regs[13] = env->xregs[17];
d02a8698 9403 } else {
7aab5a8c
PMD
9404 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
9405 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
d02a8698
PM
9406 }
9407
7aab5a8c
PMD
9408 if (mode == ARM_CPU_MODE_SVC) {
9409 env->regs[14] = env->xregs[18];
9410 env->regs[13] = env->xregs[19];
9411 } else {
9412 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
9413 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
fb602cb7
PM
9414 }
9415
7aab5a8c
PMD
9416 if (mode == ARM_CPU_MODE_ABT) {
9417 env->regs[14] = env->xregs[20];
9418 env->regs[13] = env->xregs[21];
9419 } else {
9420 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
9421 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
ce02049d
GB
9422 }
9423
9424 if (mode == ARM_CPU_MODE_UND) {
3a9148d0
SS
9425 env->regs[14] = env->xregs[22];
9426 env->regs[13] = env->xregs[23];
ce02049d 9427 } else {
593cfa2b 9428 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
3a9148d0 9429 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
ce02049d
GB
9430 }
9431
9432 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9433 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9434 * FIQ bank for r8-r14.
9435 */
9436 if (mode == ARM_CPU_MODE_FIQ) {
9437 for (i = 24; i < 31; i++) {
9438 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
9439 }
9440 } else {
9441 for (i = 24; i < 29; i++) {
9442 env->fiq_regs[i - 24] = env->xregs[i];
9443 }
9444 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
593cfa2b 9445 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
ce02049d
GB
9446 }
9447
9448 env->regs[15] = env->pc;
9449}
9450
dea8378b
PM
9451static void take_aarch32_exception(CPUARMState *env, int new_mode,
9452 uint32_t mask, uint32_t offset,
9453 uint32_t newpc)
9454{
4a2696c0
RH
9455 int new_el;
9456
dea8378b
PM
9457 /* Change the CPU state so as to actually take the exception. */
9458 switch_mode(env, new_mode);
4a2696c0 9459
dea8378b
PM
9460 /*
9461 * For exceptions taken to AArch32 we must clear the SS bit in both
9462 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9463 */
f944a854 9464 env->pstate &= ~PSTATE_SS;
dea8378b
PM
9465 env->spsr = cpsr_read(env);
9466 /* Clear IT bits. */
9467 env->condexec_bits = 0;
9468 /* Switch to the new mode, and to the correct instruction set. */
9469 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
88828bf1
CD
9470
9471 /* This must be after mode switching. */
9472 new_el = arm_current_el(env);
9473
dea8378b
PM
9474 /* Set new mode endianness */
9475 env->uncached_cpsr &= ~CPSR_E;
4a2696c0 9476 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
dea8378b
PM
9477 env->uncached_cpsr |= CPSR_E;
9478 }
829f9fd3
PM
9479 /* J and IL must always be cleared for exception entry */
9480 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
dea8378b
PM
9481 env->daif |= mask;
9482
f2f68a78
RC
9483 if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
9484 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
9485 env->uncached_cpsr |= CPSR_SSBS;
9486 } else {
9487 env->uncached_cpsr &= ~CPSR_SSBS;
9488 }
9489 }
9490
dea8378b
PM
9491 if (new_mode == ARM_CPU_MODE_HYP) {
9492 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
9493 env->elr_el[2] = env->regs[15];
9494 } else {
4a2696c0 9495 /* CPSR.PAN is normally preserved preserved unless... */
f8af1143 9496 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
4a2696c0
RH
9497 switch (new_el) {
9498 case 3:
9499 if (!arm_is_secure_below_el3(env)) {
9500 /* ... the target is EL3, from non-secure state. */
9501 env->uncached_cpsr &= ~CPSR_PAN;
9502 break;
9503 }
9504 /* ... the target is EL3, from secure state ... */
9505 /* fall through */
9506 case 1:
9507 /* ... the target is EL1 and SCTLR.SPAN is 0. */
9508 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
9509 env->uncached_cpsr |= CPSR_PAN;
9510 }
9511 break;
9512 }
9513 }
dea8378b
PM
9514 /*
9515 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9516 * and we should just guard the thumb mode on V4
9517 */
9518 if (arm_feature(env, ARM_FEATURE_V4T)) {
9519 env->thumb =
9520 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
9521 }
9522 env->regs[14] = env->regs[15] + offset;
9523 }
9524 env->regs[15] = newpc;
a8a79c7a 9525 arm_rebuild_hflags(env);
dea8378b
PM
9526}
9527
b9bc21ff
PM
9528static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
9529{
9530 /*
9531 * Handle exception entry to Hyp mode; this is sufficiently
9532 * different to entry to other AArch32 modes that we handle it
9533 * separately here.
9534 *
9535 * The vector table entry used is always the 0x14 Hyp mode entry point,
2c023d36 9536 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
b9bc21ff
PM
9537 * The offset applied to the preferred return address is always zero
9538 * (see DDI0487C.a section G1.12.3).
9539 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9540 */
9541 uint32_t addr, mask;
9542 ARMCPU *cpu = ARM_CPU(cs);
9543 CPUARMState *env = &cpu->env;
9544
9545 switch (cs->exception_index) {
9546 case EXCP_UDEF:
9547 addr = 0x04;
9548 break;
9549 case EXCP_SWI:
2c023d36 9550 addr = 0x08;
b9bc21ff
PM
9551 break;
9552 case EXCP_BKPT:
9553 /* Fall through to prefetch abort. */
9554 case EXCP_PREFETCH_ABORT:
9555 env->cp15.ifar_s = env->exception.vaddress;
9556 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
9557 (uint32_t)env->exception.vaddress);
9558 addr = 0x0c;
9559 break;
9560 case EXCP_DATA_ABORT:
9561 env->cp15.dfar_s = env->exception.vaddress;
9562 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
9563 (uint32_t)env->exception.vaddress);
9564 addr = 0x10;
9565 break;
9566 case EXCP_IRQ:
9567 addr = 0x18;
9568 break;
9569 case EXCP_FIQ:
9570 addr = 0x1c;
9571 break;
9572 case EXCP_HVC:
9573 addr = 0x08;
9574 break;
9575 case EXCP_HYP_TRAP:
9576 addr = 0x14;
9bbb4ef9 9577 break;
b9bc21ff
PM
9578 default:
9579 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9580 }
9581
9582 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
2ed08180
PM
9583 if (!arm_feature(env, ARM_FEATURE_V8)) {
9584 /*
9585 * QEMU syndrome values are v8-style. v7 has the IL bit
9586 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9587 * If this is a v7 CPU, squash the IL bit in those cases.
9588 */
9589 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
9590 (cs->exception_index == EXCP_DATA_ABORT &&
9591 !(env->exception.syndrome & ARM_EL_ISV)) ||
9592 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
9593 env->exception.syndrome &= ~ARM_EL_IL;
9594 }
9595 }
b9bc21ff
PM
9596 env->cp15.esr_el[2] = env->exception.syndrome;
9597 }
9598
9599 if (arm_current_el(env) != 2 && addr < 0x14) {
9600 addr = 0x14;
9601 }
9602
9603 mask = 0;
9604 if (!(env->cp15.scr_el3 & SCR_EA)) {
9605 mask |= CPSR_A;
9606 }
9607 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
9608 mask |= CPSR_I;
9609 }
9610 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
9611 mask |= CPSR_F;
9612 }
9613
9614 addr += env->cp15.hvbar;
9615
9616 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
9617}
9618
966f758c 9619static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
b5ff1b31 9620{
97a8ea5a
AF
9621 ARMCPU *cpu = ARM_CPU(cs);
9622 CPUARMState *env = &cpu->env;
b5ff1b31
FB
9623 uint32_t addr;
9624 uint32_t mask;
9625 int new_mode;
9626 uint32_t offset;
16a906fd 9627 uint32_t moe;
b5ff1b31 9628
16a906fd 9629 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
64b91e3f 9630 switch (syn_get_ec(env->exception.syndrome)) {
16a906fd
PM
9631 case EC_BREAKPOINT:
9632 case EC_BREAKPOINT_SAME_EL:
9633 moe = 1;
9634 break;
9635 case EC_WATCHPOINT:
9636 case EC_WATCHPOINT_SAME_EL:
9637 moe = 10;
9638 break;
9639 case EC_AA32_BKPT:
9640 moe = 3;
9641 break;
9642 case EC_VECTORCATCH:
9643 moe = 5;
9644 break;
9645 default:
9646 moe = 0;
9647 break;
9648 }
9649
9650 if (moe) {
9651 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
9652 }
9653
b9bc21ff
PM
9654 if (env->exception.target_el == 2) {
9655 arm_cpu_do_interrupt_aarch32_hyp(cs);
9656 return;
9657 }
9658
27103424 9659 switch (cs->exception_index) {
b5ff1b31
FB
9660 case EXCP_UDEF:
9661 new_mode = ARM_CPU_MODE_UND;
9662 addr = 0x04;
9663 mask = CPSR_I;
9664 if (env->thumb)
9665 offset = 2;
9666 else
9667 offset = 4;
9668 break;
9669 case EXCP_SWI:
9670 new_mode = ARM_CPU_MODE_SVC;
9671 addr = 0x08;
9672 mask = CPSR_I;
601d70b9 9673 /* The PC already points to the next instruction. */
b5ff1b31
FB
9674 offset = 0;
9675 break;
06c949e6 9676 case EXCP_BKPT:
9ee6e8bb
PB
9677 /* Fall through to prefetch abort. */
9678 case EXCP_PREFETCH_ABORT:
88ca1c2d 9679 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
b848ce2b 9680 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
3f1beaca 9681 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
88ca1c2d 9682 env->exception.fsr, (uint32_t)env->exception.vaddress);
b5ff1b31
FB
9683 new_mode = ARM_CPU_MODE_ABT;
9684 addr = 0x0c;
9685 mask = CPSR_A | CPSR_I;
9686 offset = 4;
9687 break;
9688 case EXCP_DATA_ABORT:
4a7e2d73 9689 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
b848ce2b 9690 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
3f1beaca 9691 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
4a7e2d73 9692 env->exception.fsr,
6cd8a264 9693 (uint32_t)env->exception.vaddress);
b5ff1b31
FB
9694 new_mode = ARM_CPU_MODE_ABT;
9695 addr = 0x10;
9696 mask = CPSR_A | CPSR_I;
9697 offset = 8;
9698 break;
9699 case EXCP_IRQ:
9700 new_mode = ARM_CPU_MODE_IRQ;
9701 addr = 0x18;
9702 /* Disable IRQ and imprecise data aborts. */
9703 mask = CPSR_A | CPSR_I;
9704 offset = 4;
de38d23b
FA
9705 if (env->cp15.scr_el3 & SCR_IRQ) {
9706 /* IRQ routed to monitor mode */
9707 new_mode = ARM_CPU_MODE_MON;
9708 mask |= CPSR_F;
9709 }
b5ff1b31
FB
9710 break;
9711 case EXCP_FIQ:
9712 new_mode = ARM_CPU_MODE_FIQ;
9713 addr = 0x1c;
9714 /* Disable FIQ, IRQ and imprecise data aborts. */
9715 mask = CPSR_A | CPSR_I | CPSR_F;
de38d23b
FA
9716 if (env->cp15.scr_el3 & SCR_FIQ) {
9717 /* FIQ routed to monitor mode */
9718 new_mode = ARM_CPU_MODE_MON;
9719 }
b5ff1b31
FB
9720 offset = 4;
9721 break;
87a4b270
PM
9722 case EXCP_VIRQ:
9723 new_mode = ARM_CPU_MODE_IRQ;
9724 addr = 0x18;
9725 /* Disable IRQ and imprecise data aborts. */
9726 mask = CPSR_A | CPSR_I;
9727 offset = 4;
9728 break;
9729 case EXCP_VFIQ:
9730 new_mode = ARM_CPU_MODE_FIQ;
9731 addr = 0x1c;
9732 /* Disable FIQ, IRQ and imprecise data aborts. */
9733 mask = CPSR_A | CPSR_I | CPSR_F;
9734 offset = 4;
9735 break;
3c29632f
RH
9736 case EXCP_VSERR:
9737 {
9738 /*
9739 * Note that this is reported as a data abort, but the DFAR
9740 * has an UNKNOWN value. Construct the SError syndrome from
9741 * AET and ExT fields.
9742 */
9743 ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
9744
9745 if (extended_addresses_enabled(env)) {
9746 env->exception.fsr = arm_fi_to_lfsc(&fi);
9747 } else {
9748 env->exception.fsr = arm_fi_to_sfsc(&fi);
9749 }
9750 env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
9751 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9752 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
9753 env->exception.fsr);
9754
9755 new_mode = ARM_CPU_MODE_ABT;
9756 addr = 0x10;
9757 mask = CPSR_A | CPSR_I;
9758 offset = 8;
9759 }
9760 break;
dbe9d163
FA
9761 case EXCP_SMC:
9762 new_mode = ARM_CPU_MODE_MON;
9763 addr = 0x08;
9764 mask = CPSR_A | CPSR_I | CPSR_F;
9765 offset = 0;
9766 break;
b5ff1b31 9767 default:
a47dddd7 9768 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
b5ff1b31
FB
9769 return; /* Never happens. Keep compiler happy. */
9770 }
e89e51a1
FA
9771
9772 if (new_mode == ARM_CPU_MODE_MON) {
9773 addr += env->cp15.mvbar;
137feaa9 9774 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
e89e51a1 9775 /* High vectors. When enabled, base address cannot be remapped. */
b5ff1b31 9776 addr += 0xffff0000;
8641136c
NR
9777 } else {
9778 /* ARM v7 architectures provide a vector base address register to remap
9779 * the interrupt vector table.
e89e51a1 9780 * This register is only followed in non-monitor mode, and is banked.
8641136c
NR
9781 * Note: only bits 31:5 are valid.
9782 */
fb6c91ba 9783 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
b5ff1b31 9784 }
dbe9d163
FA
9785
9786 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9787 env->cp15.scr_el3 &= ~SCR_NS;
9788 }
9789
dea8378b 9790 take_aarch32_exception(env, new_mode, mask, offset, addr);
b5ff1b31
FB
9791}
9792
a65dabf7
PM
9793static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
9794{
9795 /*
9796 * Return the register number of the AArch64 view of the AArch32
9797 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
9798 * be that of the AArch32 mode the exception came from.
9799 */
9800 int mode = env->uncached_cpsr & CPSR_M;
9801
9802 switch (aarch32_reg) {
9803 case 0 ... 7:
9804 return aarch32_reg;
9805 case 8 ... 12:
9806 return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
9807 case 13:
9808 switch (mode) {
9809 case ARM_CPU_MODE_USR:
9810 case ARM_CPU_MODE_SYS:
9811 return 13;
9812 case ARM_CPU_MODE_HYP:
9813 return 15;
9814 case ARM_CPU_MODE_IRQ:
9815 return 17;
9816 case ARM_CPU_MODE_SVC:
9817 return 19;
9818 case ARM_CPU_MODE_ABT:
9819 return 21;
9820 case ARM_CPU_MODE_UND:
9821 return 23;
9822 case ARM_CPU_MODE_FIQ:
9823 return 29;
9824 default:
9825 g_assert_not_reached();
9826 }
9827 case 14:
9828 switch (mode) {
9829 case ARM_CPU_MODE_USR:
9830 case ARM_CPU_MODE_SYS:
9831 case ARM_CPU_MODE_HYP:
9832 return 14;
9833 case ARM_CPU_MODE_IRQ:
9834 return 16;
9835 case ARM_CPU_MODE_SVC:
9836 return 18;
9837 case ARM_CPU_MODE_ABT:
9838 return 20;
9839 case ARM_CPU_MODE_UND:
9840 return 22;
9841 case ARM_CPU_MODE_FIQ:
9842 return 30;
9843 default:
9844 g_assert_not_reached();
9845 }
9846 case 15:
9847 return 31;
9848 default:
9849 g_assert_not_reached();
9850 }
9851}
9852
f944a854
RC
9853static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
9854{
9855 uint32_t ret = cpsr_read(env);
9856
9857 /* Move DIT to the correct location for SPSR_ELx */
9858 if (ret & CPSR_DIT) {
9859 ret &= ~CPSR_DIT;
9860 ret |= PSTATE_DIT;
9861 }
9862 /* Merge PSTATE.SS into SPSR_ELx */
9863 ret |= env->pstate & PSTATE_SS;
9864
9865 return ret;
9866}
9867
7ac61020
PM
9868static bool syndrome_is_sync_extabt(uint32_t syndrome)
9869{
9870 /* Return true if this syndrome value is a synchronous external abort */
9871 switch (syn_get_ec(syndrome)) {
9872 case EC_INSNABORT:
9873 case EC_INSNABORT_SAME_EL:
9874 case EC_DATAABORT:
9875 case EC_DATAABORT_SAME_EL:
9876 /* Look at fault status code for all the synchronous ext abort cases */
9877 switch (syndrome & 0x3f) {
9878 case 0x10:
9879 case 0x13:
9880 case 0x14:
9881 case 0x15:
9882 case 0x16:
9883 case 0x17:
9884 return true;
9885 default:
9886 return false;
9887 }
9888 default:
9889 return false;
9890 }
9891}
9892
966f758c
PM
9893/* Handle exception entry to a target EL which is using AArch64 */
9894static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
f3a9b694
PM
9895{
9896 ARMCPU *cpu = ARM_CPU(cs);
9897 CPUARMState *env = &cpu->env;
9898 unsigned int new_el = env->exception.target_el;
9899 target_ulong addr = env->cp15.vbar_el[new_el];
9900 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
4a2696c0 9901 unsigned int old_mode;
0ab5953b 9902 unsigned int cur_el = arm_current_el(env);
a65dabf7 9903 int rt;
0ab5953b 9904
9a05f7b6
RH
9905 /*
9906 * Note that new_el can never be 0. If cur_el is 0, then
9907 * el0_a64 is is_a64(), else el0_a64 is ignored.
9908 */
9909 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
f3a9b694 9910
0ab5953b 9911 if (cur_el < new_el) {
3d6f7617
PM
9912 /* Entry vector offset depends on whether the implemented EL
9913 * immediately lower than the target level is using AArch32 or AArch64
9914 */
9915 bool is_aa64;
cb092fbb 9916 uint64_t hcr;
3d6f7617
PM
9917
9918 switch (new_el) {
9919 case 3:
9920 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
9921 break;
9922 case 2:
cb092fbb
RH
9923 hcr = arm_hcr_el2_eff(env);
9924 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9925 is_aa64 = (hcr & HCR_RW) != 0;
9926 break;
9927 }
9928 /* fall through */
3d6f7617
PM
9929 case 1:
9930 is_aa64 = is_a64(env);
9931 break;
9932 default:
9933 g_assert_not_reached();
9934 }
9935
9936 if (is_aa64) {
f3a9b694
PM
9937 addr += 0x400;
9938 } else {
9939 addr += 0x600;
9940 }
9941 } else if (pstate_read(env) & PSTATE_SP) {
9942 addr += 0x200;
9943 }
9944
f3a9b694
PM
9945 switch (cs->exception_index) {
9946 case EXCP_PREFETCH_ABORT:
9947 case EXCP_DATA_ABORT:
7ac61020
PM
9948 /*
9949 * FEAT_DoubleFault allows synchronous external aborts taken to EL3
9950 * to be taken to the SError vector entrypoint.
9951 */
9952 if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
9953 syndrome_is_sync_extabt(env->exception.syndrome)) {
9954 addr += 0x180;
9955 }
f3a9b694
PM
9956 env->cp15.far_el[new_el] = env->exception.vaddress;
9957 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
9958 env->cp15.far_el[new_el]);
9959 /* fall through */
9960 case EXCP_BKPT:
9961 case EXCP_UDEF:
9962 case EXCP_SWI:
9963 case EXCP_HVC:
9964 case EXCP_HYP_TRAP:
9965 case EXCP_SMC:
a65dabf7
PM
9966 switch (syn_get_ec(env->exception.syndrome)) {
9967 case EC_ADVSIMDFPACCESSTRAP:
4be42f40
PM
9968 /*
9969 * QEMU internal FP/SIMD syndromes from AArch32 include the
9970 * TA and coproc fields which are only exposed if the exception
9971 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9972 * AArch64 format syndrome.
9973 */
9974 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
a65dabf7
PM
9975 break;
9976 case EC_CP14RTTRAP:
9977 case EC_CP15RTTRAP:
9978 case EC_CP14DTTRAP:
9979 /*
9980 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
9981 * the raw register field from the insn; when taking this to
9982 * AArch64 we must convert it to the AArch64 view of the register
9983 * number. Notice that we read a 4-bit AArch32 register number and
9984 * write back a 5-bit AArch64 one.
9985 */
9986 rt = extract32(env->exception.syndrome, 5, 4);
9987 rt = aarch64_regnum(env, rt);
9988 env->exception.syndrome = deposit32(env->exception.syndrome,
9989 5, 5, rt);
9990 break;
9991 case EC_CP15RRTTRAP:
9992 case EC_CP14RRTTRAP:
9993 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
9994 rt = extract32(env->exception.syndrome, 5, 4);
9995 rt = aarch64_regnum(env, rt);
9996 env->exception.syndrome = deposit32(env->exception.syndrome,
9997 5, 5, rt);
9998 rt = extract32(env->exception.syndrome, 10, 4);
9999 rt = aarch64_regnum(env, rt);
10000 env->exception.syndrome = deposit32(env->exception.syndrome,
10001 10, 5, rt);
10002 break;
4be42f40 10003 }
f3a9b694
PM
10004 env->cp15.esr_el[new_el] = env->exception.syndrome;
10005 break;
10006 case EXCP_IRQ:
10007 case EXCP_VIRQ:
10008 addr += 0x80;
10009 break;
10010 case EXCP_FIQ:
10011 case EXCP_VFIQ:
10012 addr += 0x100;
10013 break;
3c29632f
RH
10014 case EXCP_VSERR:
10015 addr += 0x180;
10016 /* Construct the SError syndrome from IDS and ISS fields. */
10017 env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
10018 env->cp15.esr_el[new_el] = env->exception.syndrome;
10019 break;
f3a9b694
PM
10020 default:
10021 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
10022 }
10023
10024 if (is_a64(env)) {
4a2696c0 10025 old_mode = pstate_read(env);
f3a9b694
PM
10026 aarch64_save_sp(env, arm_current_el(env));
10027 env->elr_el[new_el] = env->pc;
10028 } else {
f944a854 10029 old_mode = cpsr_read_for_spsr_elx(env);
f3a9b694
PM
10030 env->elr_el[new_el] = env->regs[15];
10031
10032 aarch64_sync_32_to_64(env);
10033
10034 env->condexec_bits = 0;
10035 }
4a2696c0
RH
10036 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
10037
f3a9b694
PM
10038 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
10039 env->elr_el[new_el]);
10040
4a2696c0
RH
10041 if (cpu_isar_feature(aa64_pan, cpu)) {
10042 /* The value of PSTATE.PAN is normally preserved, except when ... */
10043 new_mode |= old_mode & PSTATE_PAN;
10044 switch (new_el) {
10045 case 2:
10046 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
10047 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
10048 != (HCR_E2H | HCR_TGE)) {
10049 break;
10050 }
10051 /* fall through */
10052 case 1:
10053 /* ... the target is EL1 ... */
10054 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
10055 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
10056 new_mode |= PSTATE_PAN;
10057 }
10058 break;
10059 }
10060 }
34669338
RH
10061 if (cpu_isar_feature(aa64_mte, cpu)) {
10062 new_mode |= PSTATE_TCO;
10063 }
4a2696c0 10064
f2f68a78
RC
10065 if (cpu_isar_feature(aa64_ssbs, cpu)) {
10066 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
10067 new_mode |= PSTATE_SSBS;
10068 } else {
10069 new_mode &= ~PSTATE_SSBS;
10070 }
10071 }
10072
f3a9b694 10073 pstate_write(env, PSTATE_DAIF | new_mode);
53221552 10074 env->aarch64 = true;
f3a9b694 10075 aarch64_restore_sp(env, new_el);
a8a79c7a 10076 helper_rebuild_hflags_a64(env, new_el);
f3a9b694
PM
10077
10078 env->pc = addr;
10079
10080 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
10081 new_el, env->pc, pstate_read(env));
966f758c
PM
10082}
10083
ed6e6ba9
AB
10084/*
10085 * Do semihosting call and set the appropriate return value. All the
10086 * permission and validity checks have been done at translate time.
10087 *
10088 * We only see semihosting exceptions in TCG only as they are not
10089 * trapped to the hypervisor in KVM.
10090 */
91f78c58 10091#ifdef CONFIG_TCG
ed6e6ba9
AB
10092static void handle_semihosting(CPUState *cs)
10093{
904c04de
PM
10094 ARMCPU *cpu = ARM_CPU(cs);
10095 CPUARMState *env = &cpu->env;
10096
10097 if (is_a64(env)) {
ed6e6ba9
AB
10098 qemu_log_mask(CPU_LOG_INT,
10099 "...handling as semihosting call 0x%" PRIx64 "\n",
10100 env->xregs[0]);
ed3a06b1 10101 do_common_semihosting(cs);
4ff5ef9e 10102 env->pc += 4;
904c04de 10103 } else {
904c04de
PM
10104 qemu_log_mask(CPU_LOG_INT,
10105 "...handling as semihosting call 0x%x\n",
10106 env->regs[0]);
ed3a06b1 10107 do_common_semihosting(cs);
4ff5ef9e 10108 env->regs[15] += env->thumb ? 2 : 4;
904c04de
PM
10109 }
10110}
ed6e6ba9 10111#endif
904c04de 10112
966f758c
PM
10113/* Handle a CPU exception for A and R profile CPUs.
10114 * Do any appropriate logging, handle PSCI calls, and then hand off
10115 * to the AArch64-entry or AArch32-entry function depending on the
10116 * target exception level's register width.
853bfef4
CF
10117 *
10118 * Note: this is used for both TCG (as the do_interrupt tcg op),
10119 * and KVM to re-inject guest debug exceptions, and to
10120 * inject a Synchronous-External-Abort.
966f758c
PM
10121 */
10122void arm_cpu_do_interrupt(CPUState *cs)
10123{
10124 ARMCPU *cpu = ARM_CPU(cs);
10125 CPUARMState *env = &cpu->env;
10126 unsigned int new_el = env->exception.target_el;
10127
531c60a9 10128 assert(!arm_feature(env, ARM_FEATURE_M));
966f758c 10129
fc6177af 10130 arm_log_exception(cs);
966f758c
PM
10131 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
10132 new_el);
10133 if (qemu_loglevel_mask(CPU_LOG_INT)
10134 && !excp_is_internal(cs->exception_index)) {
6568da45 10135 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
64b91e3f 10136 syn_get_ec(env->exception.syndrome),
966f758c
PM
10137 env->exception.syndrome);
10138 }
10139
10140 if (arm_is_psci_call(cpu, cs->exception_index)) {
10141 arm_handle_psci_call(cpu);
10142 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
10143 return;
10144 }
10145
ed6e6ba9
AB
10146 /*
10147 * Semihosting semantics depend on the register width of the code
10148 * that caused the exception, not the target exception level, so
10149 * must be handled here.
966f758c 10150 */
ed6e6ba9
AB
10151#ifdef CONFIG_TCG
10152 if (cs->exception_index == EXCP_SEMIHOST) {
10153 handle_semihosting(cs);
904c04de
PM
10154 return;
10155 }
ed6e6ba9 10156#endif
904c04de 10157
b5c53d1b
AL
10158 /* Hooks may change global state so BQL should be held, also the
10159 * BQL needs to be held for any modification of
10160 * cs->interrupt_request.
10161 */
10162 g_assert(qemu_mutex_iothread_locked());
10163
10164 arm_call_pre_el_change_hook(cpu);
10165
904c04de
PM
10166 assert(!excp_is_internal(cs->exception_index));
10167 if (arm_el_is_aa64(env, new_el)) {
966f758c
PM
10168 arm_cpu_do_interrupt_aarch64(cs);
10169 } else {
10170 arm_cpu_do_interrupt_aarch32(cs);
10171 }
f3a9b694 10172
bd7d00fc
PM
10173 arm_call_el_change_hook(cpu);
10174
f3a9b694
PM
10175 if (!kvm_enabled()) {
10176 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
10177 }
10178}
c47eaf9f 10179#endif /* !CONFIG_USER_ONLY */
0480f69a 10180
aaec1432
RH
10181uint64_t arm_sctlr(CPUARMState *env, int el)
10182{
10183 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
10184 if (el == 0) {
10185 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
b6ad6062
RDC
10186 el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
10187 ? 2 : 1;
aaec1432
RH
10188 }
10189 return env->cp15.sctlr_el[el];
10190}
c47eaf9f 10191
8ae08860 10192int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
b830a5ee
RH
10193{
10194 if (regime_has_2_ranges(mmu_idx)) {
10195 return extract64(tcr, 37, 2);
b1a10c86 10196 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
b830a5ee
RH
10197 return 0; /* VTCR_EL2 */
10198 } else {
3e270f67
RH
10199 /* Replicate the single TBI bit so we always have 2 bits. */
10200 return extract32(tcr, 20, 1) * 3;
b830a5ee
RH
10201 }
10202}
10203
8ae08860 10204int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
b830a5ee
RH
10205{
10206 if (regime_has_2_ranges(mmu_idx)) {
10207 return extract64(tcr, 51, 2);
b1a10c86 10208 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
b830a5ee
RH
10209 return 0; /* VTCR_EL2 */
10210 } else {
3e270f67
RH
10211 /* Replicate the single TBID bit so we always have 2 bits. */
10212 return extract32(tcr, 29, 1) * 3;
b830a5ee
RH
10213 }
10214}
10215
81ae05fa
RH
10216static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
10217{
10218 if (regime_has_2_ranges(mmu_idx)) {
10219 return extract64(tcr, 57, 2);
10220 } else {
10221 /* Replicate the single TCMA bit so we always have 2 bits. */
10222 return extract32(tcr, 30, 1) * 3;
10223 }
10224}
10225
b830a5ee
RH
10226ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
10227 ARMMMUIdx mmu_idx, bool data)
ba97be9f 10228{
c1547bba 10229 uint64_t tcr = regime_tcr(env, mmu_idx);
ef56c242
RH
10230 bool epd, hpd, using16k, using64k, tsz_oob, ds;
10231 int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
10232 ARMCPU *cpu = env_archcpu(env);
ba97be9f 10233
339370b9 10234 if (!regime_has_2_ranges(mmu_idx)) {
71d18164 10235 select = 0;
ba97be9f
RH
10236 tsz = extract32(tcr, 0, 6);
10237 using64k = extract32(tcr, 14, 1);
10238 using16k = extract32(tcr, 15, 1);
b1a10c86 10239 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
ba97be9f 10240 /* VTCR_EL2 */
b830a5ee 10241 hpd = false;
ba97be9f 10242 } else {
ba97be9f
RH
10243 hpd = extract32(tcr, 24, 1);
10244 }
10245 epd = false;
ef56c242 10246 sh = extract32(tcr, 12, 2);
f4ecc015 10247 ps = extract32(tcr, 16, 3);
ef56c242 10248 ds = extract64(tcr, 32, 1);
ba97be9f 10249 } else {
71d18164
RH
10250 /*
10251 * Bit 55 is always between the two regions, and is canonical for
10252 * determining if address tagging is enabled.
10253 */
10254 select = extract64(va, 55, 1);
10255 if (!select) {
10256 tsz = extract32(tcr, 0, 6);
10257 epd = extract32(tcr, 7, 1);
ef56c242 10258 sh = extract32(tcr, 12, 2);
71d18164
RH
10259 using64k = extract32(tcr, 14, 1);
10260 using16k = extract32(tcr, 15, 1);
71d18164 10261 hpd = extract64(tcr, 41, 1);
71d18164
RH
10262 } else {
10263 int tg = extract32(tcr, 30, 2);
10264 using16k = tg == 1;
10265 using64k = tg == 3;
10266 tsz = extract32(tcr, 16, 6);
10267 epd = extract32(tcr, 23, 1);
ef56c242 10268 sh = extract32(tcr, 28, 2);
71d18164 10269 hpd = extract64(tcr, 42, 1);
71d18164 10270 }
f4ecc015 10271 ps = extract64(tcr, 32, 3);
ef56c242 10272 ds = extract64(tcr, 59, 1);
ba97be9f 10273 }
c36c65ea 10274
ef56c242 10275 if (cpu_isar_feature(aa64_st, cpu)) {
c36c65ea
RDC
10276 max_tsz = 48 - using64k;
10277 } else {
10278 max_tsz = 39;
10279 }
0af312b6 10280
ef56c242
RH
10281 /*
10282 * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
10283 * adjust the effective value of DS, as documented.
10284 */
0af312b6
RH
10285 min_tsz = 16;
10286 if (using64k) {
ef56c242
RH
10287 if (cpu_isar_feature(aa64_lva, cpu)) {
10288 min_tsz = 12;
10289 }
10290 ds = false;
10291 } else if (ds) {
10292 switch (mmu_idx) {
10293 case ARMMMUIdx_Stage2:
10294 case ARMMMUIdx_Stage2_S:
10295 if (using16k) {
10296 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
10297 } else {
10298 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
10299 }
10300 break;
10301 default:
10302 if (using16k) {
10303 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
10304 } else {
10305 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
10306 }
10307 break;
10308 }
10309 if (ds) {
0af312b6
RH
10310 min_tsz = 12;
10311 }
10312 }
c36c65ea 10313
ebf93ce7
RH
10314 if (tsz > max_tsz) {
10315 tsz = max_tsz;
10316 tsz_oob = true;
10317 } else if (tsz < min_tsz) {
10318 tsz = min_tsz;
10319 tsz_oob = true;
10320 } else {
10321 tsz_oob = false;
10322 }
ba97be9f 10323
b830a5ee
RH
10324 /* Present TBI as a composite with TBID. */
10325 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
10326 if (!data) {
10327 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
10328 }
10329 tbi = (tbi >> select) & 1;
10330
ba97be9f
RH
10331 return (ARMVAParameters) {
10332 .tsz = tsz,
f4ecc015 10333 .ps = ps,
ef56c242 10334 .sh = sh,
ba97be9f
RH
10335 .select = select,
10336 .tbi = tbi,
10337 .epd = epd,
10338 .hpd = hpd,
10339 .using16k = using16k,
10340 .using64k = using64k,
ebf93ce7 10341 .tsz_oob = tsz_oob,
ef56c242 10342 .ds = ds,
ba97be9f
RH
10343 };
10344}
10345
6ddbc6e4
PB
10346/* Note that signed overflow is undefined in C. The following routines are
10347 careful to use unsigned types where modulo arithmetic is required.
10348 Failure to do so _will_ break on newer gcc. */
10349
10350/* Signed saturating arithmetic. */
10351
1654b2d6 10352/* Perform 16-bit signed saturating addition. */
6ddbc6e4
PB
10353static inline uint16_t add16_sat(uint16_t a, uint16_t b)
10354{
10355 uint16_t res;
10356
10357 res = a + b;
10358 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
10359 if (a & 0x8000)
10360 res = 0x8000;
10361 else
10362 res = 0x7fff;
10363 }
10364 return res;
10365}
10366
1654b2d6 10367/* Perform 8-bit signed saturating addition. */
6ddbc6e4
PB
10368static inline uint8_t add8_sat(uint8_t a, uint8_t b)
10369{
10370 uint8_t res;
10371
10372 res = a + b;
10373 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
10374 if (a & 0x80)
10375 res = 0x80;
10376 else
10377 res = 0x7f;
10378 }
10379 return res;
10380}
10381
1654b2d6 10382/* Perform 16-bit signed saturating subtraction. */
6ddbc6e4
PB
10383static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
10384{
10385 uint16_t res;
10386
10387 res = a - b;
10388 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
10389 if (a & 0x8000)
10390 res = 0x8000;
10391 else
10392 res = 0x7fff;
10393 }
10394 return res;
10395}
10396
1654b2d6 10397/* Perform 8-bit signed saturating subtraction. */
6ddbc6e4
PB
10398static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
10399{
10400 uint8_t res;
10401
10402 res = a - b;
10403 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
10404 if (a & 0x80)
10405 res = 0x80;
10406 else
10407 res = 0x7f;
10408 }
10409 return res;
10410}
10411
10412#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
10413#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
10414#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
10415#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
10416#define PFX q
10417
10418#include "op_addsub.h"
10419
10420/* Unsigned saturating arithmetic. */
460a09c1 10421static inline uint16_t add16_usat(uint16_t a, uint16_t b)
6ddbc6e4
PB
10422{
10423 uint16_t res;
10424 res = a + b;
10425 if (res < a)
10426 res = 0xffff;
10427 return res;
10428}
10429
460a09c1 10430static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
6ddbc6e4 10431{
4c4fd3f8 10432 if (a > b)
6ddbc6e4
PB
10433 return a - b;
10434 else
10435 return 0;
10436}
10437
10438static inline uint8_t add8_usat(uint8_t a, uint8_t b)
10439{
10440 uint8_t res;
10441 res = a + b;
10442 if (res < a)
10443 res = 0xff;
10444 return res;
10445}
10446
10447static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
10448{
4c4fd3f8 10449 if (a > b)
6ddbc6e4
PB
10450 return a - b;
10451 else
10452 return 0;
10453}
10454
10455#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
10456#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
10457#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
10458#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
10459#define PFX uq
10460
10461#include "op_addsub.h"
10462
10463/* Signed modulo arithmetic. */
10464#define SARITH16(a, b, n, op) do { \
10465 int32_t sum; \
db6e2e65 10466 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
6ddbc6e4
PB
10467 RESULT(sum, n, 16); \
10468 if (sum >= 0) \
10469 ge |= 3 << (n * 2); \
10470 } while(0)
10471
10472#define SARITH8(a, b, n, op) do { \
10473 int32_t sum; \
db6e2e65 10474 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
6ddbc6e4
PB
10475 RESULT(sum, n, 8); \
10476 if (sum >= 0) \
10477 ge |= 1 << n; \
10478 } while(0)
10479
10480
10481#define ADD16(a, b, n) SARITH16(a, b, n, +)
10482#define SUB16(a, b, n) SARITH16(a, b, n, -)
10483#define ADD8(a, b, n) SARITH8(a, b, n, +)
10484#define SUB8(a, b, n) SARITH8(a, b, n, -)
10485#define PFX s
10486#define ARITH_GE
10487
10488#include "op_addsub.h"
10489
10490/* Unsigned modulo arithmetic. */
10491#define ADD16(a, b, n) do { \
10492 uint32_t sum; \
10493 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
10494 RESULT(sum, n, 16); \
a87aa10b 10495 if ((sum >> 16) == 1) \
6ddbc6e4
PB
10496 ge |= 3 << (n * 2); \
10497 } while(0)
10498
10499#define ADD8(a, b, n) do { \
10500 uint32_t sum; \
10501 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
10502 RESULT(sum, n, 8); \
a87aa10b
AZ
10503 if ((sum >> 8) == 1) \
10504 ge |= 1 << n; \
6ddbc6e4
PB
10505 } while(0)
10506
10507#define SUB16(a, b, n) do { \
10508 uint32_t sum; \
10509 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
10510 RESULT(sum, n, 16); \
10511 if ((sum >> 16) == 0) \
10512 ge |= 3 << (n * 2); \
10513 } while(0)
10514
10515#define SUB8(a, b, n) do { \
10516 uint32_t sum; \
10517 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
10518 RESULT(sum, n, 8); \
10519 if ((sum >> 8) == 0) \
a87aa10b 10520 ge |= 1 << n; \
6ddbc6e4
PB
10521 } while(0)
10522
10523#define PFX u
10524#define ARITH_GE
10525
10526#include "op_addsub.h"
10527
10528/* Halved signed arithmetic. */
10529#define ADD16(a, b, n) \
10530 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
10531#define SUB16(a, b, n) \
10532 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
10533#define ADD8(a, b, n) \
10534 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
10535#define SUB8(a, b, n) \
10536 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
10537#define PFX sh
10538
10539#include "op_addsub.h"
10540
10541/* Halved unsigned arithmetic. */
10542#define ADD16(a, b, n) \
10543 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10544#define SUB16(a, b, n) \
10545 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10546#define ADD8(a, b, n) \
10547 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10548#define SUB8(a, b, n) \
10549 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10550#define PFX uh
10551
10552#include "op_addsub.h"
10553
10554static inline uint8_t do_usad(uint8_t a, uint8_t b)
10555{
10556 if (a > b)
10557 return a - b;
10558 else
10559 return b - a;
10560}
10561
10562/* Unsigned sum of absolute byte differences. */
10563uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
10564{
10565 uint32_t sum;
10566 sum = do_usad(a, b);
10567 sum += do_usad(a >> 8, b >> 8);
bdc3b6f5 10568 sum += do_usad(a >> 16, b >> 16);
6ddbc6e4
PB
10569 sum += do_usad(a >> 24, b >> 24);
10570 return sum;
10571}
10572
10573/* For ARMv6 SEL instruction. */
10574uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
10575{
10576 uint32_t mask;
10577
10578 mask = 0;
10579 if (flags & 1)
10580 mask |= 0xff;
10581 if (flags & 2)
10582 mask |= 0xff00;
10583 if (flags & 4)
10584 mask |= 0xff0000;
10585 if (flags & 8)
10586 mask |= 0xff000000;
10587 return (a & mask) | (b & ~mask);
10588}
10589
aa633469
PM
10590/* CRC helpers.
10591 * The upper bytes of val (above the number specified by 'bytes') must have
10592 * been zeroed out by the caller.
10593 */
eb0ecd5a
WN
10594uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
10595{
10596 uint8_t buf[4];
10597
aa633469 10598 stl_le_p(buf, val);
eb0ecd5a
WN
10599
10600 /* zlib crc32 converts the accumulator and output to one's complement. */
10601 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
10602}
10603
10604uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
10605{
10606 uint8_t buf[4];
10607
aa633469 10608 stl_le_p(buf, val);
eb0ecd5a
WN
10609
10610 /* Linux crc32c converts the output to one's complement. */
10611 return crc32c(acc, buf, bytes) ^ 0xffffffff;
10612}
a9e01311
RH
10613
10614/* Return the exception level to which FP-disabled exceptions should
10615 * be taken, or 0 if FP is enabled.
10616 */
ced31551 10617int fp_exception_el(CPUARMState *env, int cur_el)
a9e01311 10618{
55faa212 10619#ifndef CONFIG_USER_ONLY
d5a6fa2d
RH
10620 uint64_t hcr_el2;
10621
a9e01311
RH
10622 /* CPACR and the CPTR registers don't exist before v6, so FP is
10623 * always accessible
10624 */
10625 if (!arm_feature(env, ARM_FEATURE_V6)) {
10626 return 0;
10627 }
10628
d87513c0
PM
10629 if (arm_feature(env, ARM_FEATURE_M)) {
10630 /* CPACR can cause a NOCP UsageFault taken to current security state */
10631 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
10632 return 1;
10633 }
10634
10635 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
10636 if (!extract32(env->v7m.nsacr, 10, 1)) {
10637 /* FP insns cause a NOCP UsageFault taken to Secure */
10638 return 3;
10639 }
10640 }
10641
10642 return 0;
10643 }
10644
d5a6fa2d
RH
10645 hcr_el2 = arm_hcr_el2_eff(env);
10646
a9e01311
RH
10647 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
10648 * 0, 2 : trap EL0 and EL1/PL1 accesses
10649 * 1 : trap only EL0 accesses
10650 * 3 : trap no accesses
c2ddb7cf 10651 * This register is ignored if E2H+TGE are both set.
a9e01311 10652 */
d5a6fa2d 10653 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
fab8ad39 10654 int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
c2ddb7cf
RH
10655
10656 switch (fpen) {
02e1de14
RH
10657 case 1:
10658 if (cur_el != 0) {
10659 break;
10660 }
10661 /* fall through */
c2ddb7cf
RH
10662 case 0:
10663 case 2:
02e1de14
RH
10664 /* Trap from Secure PL0 or PL1 to Secure PL1. */
10665 if (!arm_el_is_aa64(env, 3)
10666 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
a9e01311
RH
10667 return 3;
10668 }
02e1de14 10669 if (cur_el <= 1) {
c2ddb7cf
RH
10670 return 1;
10671 }
10672 break;
a9e01311 10673 }
a9e01311
RH
10674 }
10675
fc1120a7
PM
10676 /*
10677 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
10678 * to control non-secure access to the FPU. It doesn't have any
10679 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
10680 */
10681 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
10682 cur_el <= 2 && !arm_is_secure_below_el3(env))) {
10683 if (!extract32(env->cp15.nsacr, 10, 1)) {
10684 /* FP insns act as UNDEF */
10685 return cur_el == 2 ? 2 : 1;
10686 }
10687 }
10688
d5a6fa2d
RH
10689 /*
10690 * CPTR_EL2 is present in v7VE or v8, and changes format
10691 * with HCR_EL2.E2H (regardless of TGE).
a9e01311 10692 */
d5a6fa2d
RH
10693 if (cur_el <= 2) {
10694 if (hcr_el2 & HCR_E2H) {
fab8ad39 10695 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
d5a6fa2d
RH
10696 case 1:
10697 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
10698 break;
10699 }
10700 /* fall through */
10701 case 0:
10702 case 2:
10703 return 2;
10704 }
10705 } else if (arm_is_el2_enabled(env)) {
fab8ad39 10706 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
d5a6fa2d
RH
10707 return 2;
10708 }
10709 }
a9e01311
RH
10710 }
10711
10712 /* CPTR_EL3 : present in v8 */
fab8ad39 10713 if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
a9e01311
RH
10714 /* Trap all FP ops to EL3 */
10715 return 3;
10716 }
55faa212 10717#endif
a9e01311
RH
10718 return 0;
10719}
10720
b9f6033c
RH
10721/* Return the exception level we're running at if this is our mmu_idx */
10722int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
10723{
10724 if (mmu_idx & ARM_MMU_IDX_M) {
10725 return mmu_idx & ARM_MMU_IDX_M_PRIV;
10726 }
10727
10728 switch (mmu_idx) {
10729 case ARMMMUIdx_E10_0:
10730 case ARMMMUIdx_E20_0:
10731 case ARMMMUIdx_SE10_0:
b6ad6062 10732 case ARMMMUIdx_SE20_0:
b9f6033c
RH
10733 return 0;
10734 case ARMMMUIdx_E10_1:
452ef8cb 10735 case ARMMMUIdx_E10_1_PAN:
b9f6033c 10736 case ARMMMUIdx_SE10_1:
452ef8cb 10737 case ARMMMUIdx_SE10_1_PAN:
b9f6033c
RH
10738 return 1;
10739 case ARMMMUIdx_E2:
10740 case ARMMMUIdx_E20_2:
452ef8cb 10741 case ARMMMUIdx_E20_2_PAN:
b6ad6062
RDC
10742 case ARMMMUIdx_SE2:
10743 case ARMMMUIdx_SE20_2:
10744 case ARMMMUIdx_SE20_2_PAN:
b9f6033c
RH
10745 return 2;
10746 case ARMMMUIdx_SE3:
10747 return 3;
10748 default:
10749 g_assert_not_reached();
10750 }
10751}
10752
7aab5a8c 10753#ifndef CONFIG_TCG
65e4655c
RH
10754ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
10755{
7aab5a8c 10756 g_assert_not_reached();
65e4655c 10757}
7aab5a8c 10758#endif
65e4655c 10759
164690b2 10760ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
65e4655c 10761{
b6ad6062
RDC
10762 ARMMMUIdx idx;
10763 uint64_t hcr;
10764
65e4655c 10765 if (arm_feature(env, ARM_FEATURE_M)) {
50494a27 10766 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
65e4655c
RH
10767 }
10768
6003d980 10769 /* See ARM pseudo-function ELIsInHost. */
b9f6033c
RH
10770 switch (el) {
10771 case 0:
b6ad6062
RDC
10772 hcr = arm_hcr_el2_eff(env);
10773 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
10774 idx = ARMMMUIdx_E20_0;
10775 } else {
10776 idx = ARMMMUIdx_E10_0;
6003d980 10777 }
b6ad6062 10778 break;
b9f6033c 10779 case 1:
66412260 10780 if (env->pstate & PSTATE_PAN) {
b6ad6062
RDC
10781 idx = ARMMMUIdx_E10_1_PAN;
10782 } else {
10783 idx = ARMMMUIdx_E10_1;
66412260 10784 }
b6ad6062 10785 break;
b9f6033c 10786 case 2:
6003d980 10787 /* Note that TGE does not apply at EL2. */
b6ad6062 10788 if (arm_hcr_el2_eff(env) & HCR_E2H) {
66412260 10789 if (env->pstate & PSTATE_PAN) {
b6ad6062
RDC
10790 idx = ARMMMUIdx_E20_2_PAN;
10791 } else {
10792 idx = ARMMMUIdx_E20_2;
66412260 10793 }
b6ad6062
RDC
10794 } else {
10795 idx = ARMMMUIdx_E2;
6003d980 10796 }
b6ad6062 10797 break;
b9f6033c
RH
10798 case 3:
10799 return ARMMMUIdx_SE3;
10800 default:
10801 g_assert_not_reached();
65e4655c 10802 }
b6ad6062
RDC
10803
10804 if (arm_is_secure_below_el3(env)) {
10805 idx &= ~ARM_MMU_IDX_A_NS;
10806 }
10807
10808 return idx;
50494a27
RH
10809}
10810
164690b2
RH
10811ARMMMUIdx arm_mmu_idx(CPUARMState *env)
10812{
10813 return arm_mmu_idx_el(env, arm_current_el(env));
10814}
10815
3902bfc6
RH
10816static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
10817 ARMMMUIdx mmu_idx,
10818 CPUARMTBFlags flags)
fdd1b228 10819{
a729a46b
RH
10820 DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
10821 DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
fdd1b228 10822
fdd1b228 10823 if (arm_singlestep_active(env)) {
a729a46b 10824 DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
fdd1b228
RH
10825 }
10826 return flags;
10827}
10828
3902bfc6
RH
10829static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
10830 ARMMMUIdx mmu_idx,
10831 CPUARMTBFlags flags)
43eccfb6 10832{
8061a649
RH
10833 bool sctlr_b = arm_sctlr_b(env);
10834
10835 if (sctlr_b) {
a729a46b 10836 DP_TBFLAG_A32(flags, SCTLR__B, 1);
8061a649
RH
10837 }
10838 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
a729a46b 10839 DP_TBFLAG_ANY(flags, BE_DATA, 1);
8061a649 10840 }
a729a46b 10841 DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
43eccfb6
RH
10842
10843 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
10844}
10845
3902bfc6
RH
10846static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
10847 ARMMMUIdx mmu_idx)
6e33ced5 10848{
3902bfc6 10849 CPUARMTBFlags flags = {};
4479ec30
RH
10850 uint32_t ccr = env->v7m.ccr[env->v7m.secure];
10851
10852 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
10853 if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
10854 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
10855 }
6e33ced5
RH
10856
10857 if (arm_v7m_is_handler_mode(env)) {
a729a46b 10858 DP_TBFLAG_M32(flags, HANDLER, 1);
6e33ced5
RH
10859 }
10860
10861 /*
10862 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
10863 * is suppressing them because the requested execution priority
10864 * is less than 0.
10865 */
10866 if (arm_feature(env, ARM_FEATURE_V8) &&
10867 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
4479ec30 10868 (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
a729a46b 10869 DP_TBFLAG_M32(flags, STACKCHECK, 1);
6e33ced5
RH
10870 }
10871
10872 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
10873}
10874
3902bfc6
RH
10875static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
10876 ARMMMUIdx mmu_idx)
c747224c 10877{
8480e933 10878 CPUARMTBFlags flags = {};
4479ec30
RH
10879 int el = arm_current_el(env);
10880
10881 if (arm_sctlr(env, el) & SCTLR_A) {
10882 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
10883 }
0a54d68e
RH
10884
10885 if (arm_el_is_aa64(env, 1)) {
a729a46b 10886 DP_TBFLAG_A32(flags, VFPEN, 1);
0a54d68e 10887 }
5bb0a20b 10888
4479ec30 10889 if (el < 2 && env->cp15.hstr_el2 &&
5bb0a20b 10890 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
a729a46b 10891 DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
5bb0a20b
MZ
10892 }
10893
520d1621
PM
10894 if (env->uncached_cpsr & CPSR_IL) {
10895 DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
10896 }
10897
75fe8356
RH
10898 /*
10899 * The SME exception we are testing for is raised via
10900 * AArch64.CheckFPAdvSIMDEnabled(), as called from
10901 * AArch32.CheckAdvSIMDOrFPEnabled().
10902 */
10903 if (el == 0
10904 && FIELD_EX64(env->svcr, SVCR, SM)
10905 && (!arm_is_el2_enabled(env)
10906 || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
10907 && arm_el_is_aa64(env, 1)
10908 && !sme_fa64(env, el)) {
10909 DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
10910 }
10911
83f4baef 10912 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
c747224c
RH
10913}
10914
3902bfc6
RH
10915static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
10916 ARMMMUIdx mmu_idx)
a9e01311 10917{
8480e933 10918 CPUARMTBFlags flags = {};
d4d7503a 10919 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
c1547bba 10920 uint64_t tcr = regime_tcr(env, mmu_idx);
d4d7503a
RH
10921 uint64_t sctlr;
10922 int tbii, tbid;
b9adaa70 10923
a729a46b 10924 DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
cd208a1c 10925
339370b9 10926 /* Get control bits for tagged addresses. */
b830a5ee
RH
10927 tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
10928 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
5d8634f5 10929
a729a46b
RH
10930 DP_TBFLAG_A64(flags, TBII, tbii);
10931 DP_TBFLAG_A64(flags, TBID, tbid);
d4d7503a
RH
10932
10933 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
10934 int sve_el = sve_exception_el(env, el);
5d8634f5 10935
d4d7503a 10936 /*
397d922c
RH
10937 * If either FP or SVE are disabled, translator does not need len.
10938 * If SVE EL > FP EL, FP exception has precedence, and translator
10939 * does not need SVE EL. Save potential re-translations by forcing
10940 * the unneeded data to zero.
d4d7503a 10941 */
397d922c
RH
10942 if (fp_el != 0) {
10943 if (sve_el > fp_el) {
10944 sve_el = 0;
10945 }
10946 } else if (sve_el == 0) {
5ef3cc56 10947 DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
5d8634f5 10948 }
a729a46b 10949 DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
d4d7503a 10950 }
6b2ca83e 10951 if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
5d7953ad 10952 int sme_el = sme_exception_el(env, el);
62151133 10953 bool sm = FIELD_EX64(env->svcr, SVCR, SM);
5d7953ad
RH
10954
10955 DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
10956 if (sme_el == 0) {
10957 /* Similarly, do not compute SVL if SME is disabled. */
62151133
RH
10958 int svl = sve_vqm1_for_el_sm(env, el, true);
10959 DP_TBFLAG_A64(flags, SVL, svl);
10960 if (sm) {
10961 /* If SVE is disabled, we will not have set VL above. */
10962 DP_TBFLAG_A64(flags, VL, svl);
10963 }
5d7953ad 10964 }
62151133 10965 if (sm) {
a3637e88 10966 DP_TBFLAG_A64(flags, PSTATE_SM, 1);
75fe8356 10967 DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
a3637e88
RH
10968 }
10969 DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
6b2ca83e 10970 }
1db5e96c 10971
aaec1432 10972 sctlr = regime_sctlr(env, stage1);
1db5e96c 10973
4479ec30
RH
10974 if (sctlr & SCTLR_A) {
10975 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
10976 }
10977
8061a649 10978 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
a729a46b 10979 DP_TBFLAG_ANY(flags, BE_DATA, 1);
8061a649
RH
10980 }
10981
d4d7503a
RH
10982 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
10983 /*
10984 * In order to save space in flags, we record only whether
10985 * pauth is "inactive", meaning all insns are implemented as
10986 * a nop, or "active" when some action must be performed.
10987 * The decision of which action to take is left to a helper.
10988 */
10989 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
a729a46b 10990 DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
1db5e96c 10991 }
d4d7503a 10992 }
0816ef1b 10993
d4d7503a
RH
10994 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
10995 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
10996 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
a729a46b 10997 DP_TBFLAG_A64(flags, BT, 1);
0816ef1b 10998 }
d4d7503a 10999 }
08f1434a 11000
cc28fc30 11001 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
7a8014ab
RH
11002 if (!(env->pstate & PSTATE_UAO)) {
11003 switch (mmu_idx) {
11004 case ARMMMUIdx_E10_1:
11005 case ARMMMUIdx_E10_1_PAN:
11006 case ARMMMUIdx_SE10_1:
11007 case ARMMMUIdx_SE10_1_PAN:
11008 /* TODO: ARMv8.3-NV */
a729a46b 11009 DP_TBFLAG_A64(flags, UNPRIV, 1);
7a8014ab
RH
11010 break;
11011 case ARMMMUIdx_E20_2:
11012 case ARMMMUIdx_E20_2_PAN:
b6ad6062
RDC
11013 case ARMMMUIdx_SE20_2:
11014 case ARMMMUIdx_SE20_2_PAN:
7a8014ab
RH
11015 /*
11016 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
11017 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
11018 */
11019 if (env->cp15.hcr_el2 & HCR_TGE) {
a729a46b 11020 DP_TBFLAG_A64(flags, UNPRIV, 1);
7a8014ab
RH
11021 }
11022 break;
11023 default:
11024 break;
cc28fc30 11025 }
cc28fc30
RH
11026 }
11027
520d1621
PM
11028 if (env->pstate & PSTATE_IL) {
11029 DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
11030 }
11031
81ae05fa
RH
11032 if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
11033 /*
11034 * Set MTE_ACTIVE if any access may be Checked, and leave clear
11035 * if all accesses must be Unchecked:
11036 * 1) If no TBI, then there are no tags in the address to check,
11037 * 2) If Tag Check Override, then all accesses are Unchecked,
11038 * 3) If Tag Check Fail == 0, then Checked access have no effect,
11039 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
11040 */
11041 if (allocation_tag_access_enabled(env, el, sctlr)) {
a729a46b 11042 DP_TBFLAG_A64(flags, ATA, 1);
81ae05fa
RH
11043 if (tbid
11044 && !(env->pstate & PSTATE_TCO)
11045 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
a729a46b 11046 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
81ae05fa
RH
11047 }
11048 }
11049 /* And again for unprivileged accesses, if required. */
a729a46b 11050 if (EX_TBFLAG_A64(flags, UNPRIV)
81ae05fa
RH
11051 && tbid
11052 && !(env->pstate & PSTATE_TCO)
2d928adf 11053 && (sctlr & SCTLR_TCF0)
81ae05fa 11054 && allocation_tag_access_enabled(env, 0, sctlr)) {
a729a46b 11055 DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
81ae05fa
RH
11056 }
11057 /* Cache TCMA as well as TBI. */
a729a46b 11058 DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
81ae05fa
RH
11059 }
11060
d4d7503a
RH
11061 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
11062}
11063
3902bfc6 11064static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
3d74e2e9
RH
11065{
11066 int el = arm_current_el(env);
11067 int fp_el = fp_exception_el(env, el);
164690b2 11068 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
3d74e2e9
RH
11069
11070 if (is_a64(env)) {
11071 return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
11072 } else if (arm_feature(env, ARM_FEATURE_M)) {
11073 return rebuild_hflags_m32(env, fp_el, mmu_idx);
11074 } else {
11075 return rebuild_hflags_a32(env, fp_el, mmu_idx);
11076 }
11077}
11078
11079void arm_rebuild_hflags(CPUARMState *env)
11080{
11081 env->hflags = rebuild_hflags_internal(env);
11082}
11083
19717e9b
PM
11084/*
11085 * If we have triggered a EL state change we can't rely on the
11086 * translator having passed it to us, we need to recompute.
11087 */
11088void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
11089{
11090 int el = arm_current_el(env);
11091 int fp_el = fp_exception_el(env, el);
11092 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
3902bfc6 11093
19717e9b
PM
11094 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
11095}
11096
14f3c588
RH
11097void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
11098{
11099 int fp_el = fp_exception_el(env, el);
11100 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11101
11102 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
11103}
11104
f80741d1
AB
11105/*
11106 * If we have triggered a EL state change we can't rely on the
563152e0 11107 * translator having passed it to us, we need to recompute.
f80741d1
AB
11108 */
11109void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
11110{
11111 int el = arm_current_el(env);
11112 int fp_el = fp_exception_el(env, el);
11113 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11114 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
11115}
11116
14f3c588
RH
11117void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
11118{
11119 int fp_el = fp_exception_el(env, el);
11120 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11121
11122 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
11123}
11124
11125void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
11126{
11127 int fp_el = fp_exception_el(env, el);
11128 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11129
11130 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
11131}
11132
0ee8b24a
PMD
11133static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
11134{
11135#ifdef CONFIG_DEBUG_TCG
3902bfc6
RH
11136 CPUARMTBFlags c = env->hflags;
11137 CPUARMTBFlags r = rebuild_hflags_internal(env);
0ee8b24a 11138
a378206a
RH
11139 if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
11140 fprintf(stderr, "TCG hflags mismatch "
11141 "(current:(0x%08x,0x" TARGET_FMT_lx ")"
11142 " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
11143 c.flags, c.flags2, r.flags, r.flags2);
0ee8b24a
PMD
11144 abort();
11145 }
11146#endif
11147}
11148
26702213
PM
11149static bool mve_no_pred(CPUARMState *env)
11150{
11151 /*
11152 * Return true if there is definitely no predication of MVE
11153 * instructions by VPR or LTPSIZE. (Returning false even if there
11154 * isn't any predication is OK; generated code will just be
11155 * a little worse.)
11156 * If the CPU does not implement MVE then this TB flag is always 0.
11157 *
11158 * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
11159 * logic in gen_update_fp_context() needs to be updated to match.
11160 *
11161 * We do not include the effect of the ECI bits here -- they are
11162 * tracked in other TB flags. This simplifies the logic for
11163 * "when did we emit code that changes the MVE_NO_PRED TB flag
11164 * and thus need to end the TB?".
11165 */
11166 if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
11167 return false;
11168 }
11169 if (env->v7m.vpr) {
11170 return false;
11171 }
11172 if (env->v7m.ltpsize < 4) {
11173 return false;
11174 }
11175 return true;
11176}
11177
d4d7503a
RH
11178void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
11179 target_ulong *cs_base, uint32_t *pflags)
11180{
3902bfc6 11181 CPUARMTBFlags flags;
d4d7503a 11182
0ee8b24a 11183 assert_hflags_rebuild_correctly(env);
3902bfc6 11184 flags = env->hflags;
3d74e2e9 11185
a729a46b 11186 if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
d4d7503a 11187 *pc = env->pc;
d4d7503a 11188 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
a729a46b 11189 DP_TBFLAG_A64(flags, BTYPE, env->btype);
08f1434a 11190 }
a9e01311
RH
11191 } else {
11192 *pc = env->regs[15];
6e33ced5
RH
11193
11194 if (arm_feature(env, ARM_FEATURE_M)) {
9550d1bd
RH
11195 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11196 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
11197 != env->v7m.secure) {
a729a46b 11198 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
9550d1bd
RH
11199 }
11200
11201 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
11202 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
11203 (env->v7m.secure &&
11204 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
11205 /*
11206 * ASPEN is set, but FPCA/SFPA indicate that there is no
11207 * active FP context; we must create a new FP context before
11208 * executing any FP insn.
11209 */
a729a46b 11210 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
9550d1bd
RH
11211 }
11212
11213 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
11214 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
a729a46b 11215 DP_TBFLAG_M32(flags, LSPACT, 1);
9550d1bd 11216 }
26702213
PM
11217
11218 if (mve_no_pred(env)) {
11219 DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
11220 }
6e33ced5 11221 } else {
bbad7c62
RH
11222 /*
11223 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
11224 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
11225 */
11226 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
a729a46b 11227 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
bbad7c62 11228 } else {
a729a46b
RH
11229 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
11230 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
bbad7c62 11231 }
0a54d68e 11232 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
a729a46b 11233 DP_TBFLAG_A32(flags, VFPEN, 1);
0a54d68e 11234 }
6e33ced5
RH
11235 }
11236
a729a46b
RH
11237 DP_TBFLAG_AM32(flags, THUMB, env->thumb);
11238 DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
d4d7503a 11239 }
a9e01311 11240
60e12c37
RH
11241 /*
11242 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
a9e01311
RH
11243 * states defined in the ARM ARM for software singlestep:
11244 * SS_ACTIVE PSTATE.SS State
11245 * 0 x Inactive (the TB flag for SS is always 0)
11246 * 1 0 Active-pending
11247 * 1 1 Active-not-pending
ae6eb1e9 11248 * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
a9e01311 11249 */
a729a46b
RH
11250 if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
11251 DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
a9e01311 11252 }
a9e01311 11253
3902bfc6 11254 *pflags = flags.flags;
a378206a 11255 *cs_base = flags.flags2;
a9e01311 11256}
0ab5953b
RH
11257
11258#ifdef TARGET_AARCH64
11259/*
11260 * The manual says that when SVE is enabled and VQ is widened the
11261 * implementation is allowed to zero the previously inaccessible
11262 * portion of the registers. The corollary to that is that when
11263 * SVE is enabled and VQ is narrowed we are also allowed to zero
11264 * the now inaccessible portion of the registers.
11265 *
11266 * The intent of this is that no predicate bit beyond VQ is ever set.
11267 * Which means that some operations on predicate registers themselves
11268 * may operate on full uint64_t or even unrolled across the maximum
11269 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
11270 * may well be cheaper than conditionals to restrict the operation
11271 * to the relevant portion of a uint16_t[16].
11272 */
11273void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
11274{
11275 int i, j;
11276 uint64_t pmask;
11277
11278 assert(vq >= 1 && vq <= ARM_MAX_VQ);
2fc0cc0e 11279 assert(vq <= env_archcpu(env)->sve_max_vq);
0ab5953b
RH
11280
11281 /* Zap the high bits of the zregs. */
11282 for (i = 0; i < 32; i++) {
11283 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
11284 }
11285
11286 /* Zap the high bits of the pregs and ffr. */
11287 pmask = 0;
11288 if (vq & 3) {
11289 pmask = ~(-1ULL << (16 * (vq & 3)));
11290 }
11291 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
11292 for (i = 0; i < 17; ++i) {
11293 env->vfp.pregs[i].p[j] &= pmask;
11294 }
11295 pmask = 0;
11296 }
11297}
11298
6a775fd6
RH
11299static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
11300{
11301 int exc_el;
11302
11303 if (sm) {
11304 exc_el = sme_exception_el(env, el);
11305 } else {
11306 exc_el = sve_exception_el(env, el);
11307 }
11308 if (exc_el) {
11309 return 0; /* disabled */
11310 }
11311 return sve_vqm1_for_el_sm(env, el, sm);
11312}
11313
0ab5953b
RH
11314/*
11315 * Notice a change in SVE vector size when changing EL.
11316 */
9a05f7b6
RH
11317void aarch64_sve_change_el(CPUARMState *env, int old_el,
11318 int new_el, bool el0_a64)
0ab5953b 11319{
2fc0cc0e 11320 ARMCPU *cpu = env_archcpu(env);
0ab5953b 11321 int old_len, new_len;
6a775fd6 11322 bool old_a64, new_a64, sm;
0ab5953b
RH
11323
11324 /* Nothing to do if no SVE. */
cd208a1c 11325 if (!cpu_isar_feature(aa64_sve, cpu)) {
0ab5953b
RH
11326 return;
11327 }
11328
11329 /* Nothing to do if FP is disabled in either EL. */
11330 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
11331 return;
11332 }
11333
04fbce76
RH
11334 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
11335 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
11336
11337 /*
11338 * Both AArch64.TakeException and AArch64.ExceptionReturn
11339 * invoke ResetSVEState when taking an exception from, or
11340 * returning to, AArch32 state when PSTATE.SM is enabled.
11341 */
6a775fd6
RH
11342 sm = FIELD_EX64(env->svcr, SVCR, SM);
11343 if (old_a64 != new_a64 && sm) {
04fbce76
RH
11344 arm_reset_sve_state(env);
11345 return;
11346 }
11347
0ab5953b
RH
11348 /*
11349 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
11350 * at ELx, or not available because the EL is in AArch32 state, then
11351 * for all purposes other than a direct read, the ZCR_ELx.LEN field
11352 * has an effective value of 0".
11353 *
11354 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
11355 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
11356 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
11357 * we already have the correct register contents when encountering the
11358 * vq0->vq0 transition between EL0->EL1.
11359 */
6a775fd6
RH
11360 old_len = new_len = 0;
11361 if (old_a64) {
11362 old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
11363 }
11364 if (new_a64) {
11365 new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
11366 }
0ab5953b
RH
11367
11368 /* When changing vector length, clear inaccessible state. */
11369 if (new_len < old_len) {
11370 aarch64_sve_narrow_vq(env, new_len + 1);
11371 }
11372}
11373#endif