]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/helper.c
target/s390x: display deprecation status in '-cpu help'
[mirror_qemu.git] / target / arm / helper.c
CommitLineData
ed3baad1
PMD
1/*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
db725815 8
74c21bd0 9#include "qemu/osdep.h"
63159601 10#include "qemu/units.h"
cd617484 11#include "qemu/log.h"
194cbc49 12#include "trace.h"
b5ff1b31 13#include "cpu.h"
ccd38087 14#include "internals.h"
2ef6175a 15#include "exec/helper-proto.h"
1de7afc9 16#include "qemu/host-utils.h"
db725815 17#include "qemu/main-loop.h"
b8012ecf 18#include "qemu/timer.h"
1de7afc9 19#include "qemu/bitops.h"
eb0ecd5a 20#include "qemu/crc32c.h"
0442428a 21#include "qemu/qemu-print.h"
63c91552 22#include "exec/exec-all.h"
eb0ecd5a 23#include <zlib.h> /* For crc32 */
64552b6b 24#include "hw/irq.h"
6b5fe137 25#include "semihosting/semihost.h"
b2e23725 26#include "sysemu/cpus.h"
740b1759 27#include "sysemu/cpu-timers.h"
f3a9b694 28#include "sysemu/kvm.h"
9d2b5a58 29#include "qemu/range.h"
7f7b4e7a 30#include "qapi/qapi-commands-machine-target.h"
de390645
RH
31#include "qapi/error.h"
32#include "qemu/guest-random.h"
91f78c58
PMD
33#ifdef CONFIG_TCG
34#include "arm_ldst.h"
7aab5a8c 35#include "exec/cpu_ldst.h"
6b5fe137 36#include "semihosting/common-semi.h"
91f78c58 37#endif
cf7c6d10 38#include "cpregs.h"
0b03bdfc 39
352c98e5
LV
40#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
41
affdb64d
PM
42static void switch_mode(CPUARMState *env, int mode);
43
c4241c7d 44static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
d4e6df63 45{
375421cc 46 assert(ri->fieldoffset);
67ed771d 47 if (cpreg_field_is_64bit(ri)) {
c4241c7d 48 return CPREG_FIELD64(env, ri);
22d9e1a9 49 } else {
c4241c7d 50 return CPREG_FIELD32(env, ri);
22d9e1a9 51 }
d4e6df63
PM
52}
53
f43ee493 54void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
d4e6df63 55{
375421cc 56 assert(ri->fieldoffset);
67ed771d 57 if (cpreg_field_is_64bit(ri)) {
22d9e1a9
PM
58 CPREG_FIELD64(env, ri) = value;
59 } else {
60 CPREG_FIELD32(env, ri) = value;
61 }
d4e6df63
PM
62}
63
11f136ee
FA
64static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
65{
66 return (char *)env + ri->fieldoffset;
67}
68
49a66191 69uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
721fae12 70{
59a1c327 71 /* Raw read of a coprocessor register (as needed for migration, etc). */
721fae12 72 if (ri->type & ARM_CP_CONST) {
59a1c327 73 return ri->resetvalue;
721fae12 74 } else if (ri->raw_readfn) {
59a1c327 75 return ri->raw_readfn(env, ri);
721fae12 76 } else if (ri->readfn) {
59a1c327 77 return ri->readfn(env, ri);
721fae12 78 } else {
59a1c327 79 return raw_read(env, ri);
721fae12 80 }
721fae12
PM
81}
82
59a1c327 83static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
7900e9f1 84 uint64_t v)
721fae12
PM
85{
86 /* Raw write of a coprocessor register (as needed for migration, etc).
721fae12
PM
87 * Note that constant registers are treated as write-ignored; the
88 * caller should check for success by whether a readback gives the
89 * value written.
90 */
91 if (ri->type & ARM_CP_CONST) {
59a1c327 92 return;
721fae12 93 } else if (ri->raw_writefn) {
c4241c7d 94 ri->raw_writefn(env, ri, v);
721fae12 95 } else if (ri->writefn) {
c4241c7d 96 ri->writefn(env, ri, v);
721fae12 97 } else {
afb2530f 98 raw_write(env, ri, v);
721fae12 99 }
721fae12
PM
100}
101
375421cc
PM
102static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
103{
104 /* Return true if the regdef would cause an assertion if you called
105 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
106 * program bug for it not to have the NO_RAW flag).
107 * NB that returning false here doesn't necessarily mean that calling
108 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
109 * read/write access functions which are safe for raw use" from "has
110 * read/write access functions which have side effects but has forgotten
111 * to provide raw access functions".
112 * The tests here line up with the conditions in read/write_raw_cp_reg()
113 * and assertions in raw_read()/raw_write().
114 */
115 if ((ri->type & ARM_CP_CONST) ||
116 ri->fieldoffset ||
117 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
118 return false;
119 }
120 return true;
121}
122
b698e4ee 123bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
721fae12
PM
124{
125 /* Write the coprocessor state from cpu->env to the (index,value) list. */
126 int i;
127 bool ok = true;
128
129 for (i = 0; i < cpu->cpreg_array_len; i++) {
130 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
131 const ARMCPRegInfo *ri;
b698e4ee 132 uint64_t newval;
59a1c327 133
60322b39 134 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12
PM
135 if (!ri) {
136 ok = false;
137 continue;
138 }
7a0e58fa 139 if (ri->type & ARM_CP_NO_RAW) {
721fae12
PM
140 continue;
141 }
b698e4ee
PM
142
143 newval = read_raw_cp_reg(&cpu->env, ri);
144 if (kvm_sync) {
145 /*
146 * Only sync if the previous list->cpustate sync succeeded.
147 * Rather than tracking the success/failure state for every
148 * item in the list, we just recheck "does the raw write we must
149 * have made in write_list_to_cpustate() read back OK" here.
150 */
151 uint64_t oldval = cpu->cpreg_values[i];
152
153 if (oldval == newval) {
154 continue;
155 }
156
157 write_raw_cp_reg(&cpu->env, ri, oldval);
158 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
159 continue;
160 }
161
162 write_raw_cp_reg(&cpu->env, ri, newval);
163 }
164 cpu->cpreg_values[i] = newval;
721fae12
PM
165 }
166 return ok;
167}
168
169bool write_list_to_cpustate(ARMCPU *cpu)
170{
171 int i;
172 bool ok = true;
173
174 for (i = 0; i < cpu->cpreg_array_len; i++) {
175 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
176 uint64_t v = cpu->cpreg_values[i];
721fae12
PM
177 const ARMCPRegInfo *ri;
178
60322b39 179 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12
PM
180 if (!ri) {
181 ok = false;
182 continue;
183 }
7a0e58fa 184 if (ri->type & ARM_CP_NO_RAW) {
721fae12
PM
185 continue;
186 }
187 /* Write value and confirm it reads back as written
188 * (to catch read-only registers and partially read-only
189 * registers where the incoming migration value doesn't match)
190 */
59a1c327
PM
191 write_raw_cp_reg(&cpu->env, ri, v);
192 if (read_raw_cp_reg(&cpu->env, ri) != v) {
721fae12
PM
193 ok = false;
194 }
195 }
196 return ok;
197}
198
199static void add_cpreg_to_list(gpointer key, gpointer opaque)
200{
201 ARMCPU *cpu = opaque;
5860362d
RH
202 uint32_t regidx = (uintptr_t)key;
203 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12 204
7a0e58fa 205 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
721fae12
PM
206 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
207 /* The value array need not be initialized at this point */
208 cpu->cpreg_array_len++;
209 }
210}
211
212static void count_cpreg(gpointer key, gpointer opaque)
213{
214 ARMCPU *cpu = opaque;
721fae12
PM
215 const ARMCPRegInfo *ri;
216
5860362d 217 ri = g_hash_table_lookup(cpu->cp_regs, key);
721fae12 218
7a0e58fa 219 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
721fae12
PM
220 cpu->cpreg_array_len++;
221 }
222}
223
224static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
225{
5860362d
RH
226 uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
227 uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
721fae12 228
cbf239b7
AR
229 if (aidx > bidx) {
230 return 1;
231 }
232 if (aidx < bidx) {
233 return -1;
234 }
235 return 0;
721fae12
PM
236}
237
238void init_cpreg_list(ARMCPU *cpu)
239{
240 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
241 * Note that we require cpreg_tuples[] to be sorted by key ID.
242 */
57b6d95e 243 GList *keys;
721fae12
PM
244 int arraylen;
245
57b6d95e 246 keys = g_hash_table_get_keys(cpu->cp_regs);
721fae12
PM
247 keys = g_list_sort(keys, cpreg_key_compare);
248
249 cpu->cpreg_array_len = 0;
250
251 g_list_foreach(keys, count_cpreg, cpu);
252
253 arraylen = cpu->cpreg_array_len;
254 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
255 cpu->cpreg_values = g_new(uint64_t, arraylen);
256 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
257 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
258 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
259 cpu->cpreg_array_len = 0;
260
261 g_list_foreach(keys, add_cpreg_to_list, cpu);
262
263 assert(cpu->cpreg_array_len == arraylen);
264
265 g_list_free(keys);
266}
267
68e9c2fe 268/*
93dd1e61 269 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
68e9c2fe
EI
270 */
271static CPAccessResult access_el3_aa32ns(CPUARMState *env,
3f208fd7
PM
272 const ARMCPRegInfo *ri,
273 bool isread)
68e9c2fe 274{
93dd1e61
EI
275 if (!is_a64(env) && arm_current_el(env) == 3 &&
276 arm_is_secure_below_el3(env)) {
68e9c2fe
EI
277 return CP_ACCESS_TRAP_UNCATEGORIZED;
278 }
279 return CP_ACCESS_OK;
280}
281
5513c3ab
PM
282/* Some secure-only AArch32 registers trap to EL3 if used from
283 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
284 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
285 * We assume that the .access field is set to PL1_RW.
286 */
287static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
3f208fd7
PM
288 const ARMCPRegInfo *ri,
289 bool isread)
5513c3ab
PM
290{
291 if (arm_current_el(env) == 3) {
292 return CP_ACCESS_OK;
293 }
294 if (arm_is_secure_below_el3(env)) {
926c1b97
RDC
295 if (env->cp15.scr_el3 & SCR_EEL2) {
296 return CP_ACCESS_TRAP_EL2;
297 }
5513c3ab
PM
298 return CP_ACCESS_TRAP_EL3;
299 }
300 /* This will be EL1 NS and EL2 NS, which just UNDEF */
301 return CP_ACCESS_TRAP_UNCATEGORIZED;
302}
303
1fce1ba9
PM
304/* Check for traps to performance monitor registers, which are controlled
305 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
306 */
307static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
308 bool isread)
309{
310 int el = arm_current_el(env);
59dd089c 311 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1fce1ba9 312
59dd089c 313 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1fce1ba9
PM
314 return CP_ACCESS_TRAP_EL2;
315 }
316 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
317 return CP_ACCESS_TRAP_EL3;
318 }
319 return CP_ACCESS_OK;
320}
321
84929218
RH
322/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
323static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
324 bool isread)
325{
326 if (arm_current_el(env) == 1) {
327 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
328 if (arm_hcr_el2_eff(env) & trap) {
329 return CP_ACCESS_TRAP_EL2;
330 }
331 }
332 return CP_ACCESS_OK;
333}
334
1803d271
RH
335/* Check for traps from EL1 due to HCR_EL2.TSW. */
336static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
337 bool isread)
338{
339 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
340 return CP_ACCESS_TRAP_EL2;
341 }
342 return CP_ACCESS_OK;
343}
344
99602377
RH
345/* Check for traps from EL1 due to HCR_EL2.TACR. */
346static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
347 bool isread)
348{
349 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
350 return CP_ACCESS_TRAP_EL2;
351 }
352 return CP_ACCESS_OK;
353}
354
30881b73
RH
355/* Check for traps from EL1 due to HCR_EL2.TTLB. */
356static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
357 bool isread)
358{
359 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
360 return CP_ACCESS_TRAP_EL2;
361 }
362 return CP_ACCESS_OK;
363}
364
c4241c7d 365static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
c983fe6c 366{
2fc0cc0e 367 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 368
8d5c773e 369 raw_write(env, ri, value);
d10eb08f 370 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
c983fe6c
PM
371}
372
c4241c7d 373static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
08de207b 374{
2fc0cc0e 375 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 376
8d5c773e 377 if (raw_read(env, ri) != value) {
08de207b
PM
378 /* Unlike real hardware the qemu TLB uses virtual addresses,
379 * not modified virtual addresses, so this causes a TLB flush.
380 */
d10eb08f 381 tlb_flush(CPU(cpu));
8d5c773e 382 raw_write(env, ri, value);
08de207b 383 }
08de207b 384}
c4241c7d
PM
385
386static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
387 uint64_t value)
08de207b 388{
2fc0cc0e 389 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 390
452a0955 391 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
014406b5 392 && !extended_addresses_enabled(env)) {
08de207b
PM
393 /* For VMSA (when not using the LPAE long descriptor page table
394 * format) this register includes the ASID, so do a TLB flush.
395 * For PMSA it is purely a process ID and no action is needed.
396 */
d10eb08f 397 tlb_flush(CPU(cpu));
08de207b 398 }
8d5c773e 399 raw_write(env, ri, value);
08de207b
PM
400}
401
b4ab8ce9
PM
402/* IS variants of TLB operations must affect all cores */
403static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
404 uint64_t value)
405{
29a0af61 406 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
407
408 tlb_flush_all_cpus_synced(cs);
409}
410
411static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
412 uint64_t value)
413{
29a0af61 414 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
415
416 tlb_flush_all_cpus_synced(cs);
417}
418
419static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
420 uint64_t value)
421{
29a0af61 422 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
423
424 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
425}
426
427static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
428 uint64_t value)
429{
29a0af61 430 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
431
432 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
433}
434
435/*
436 * Non-IS variants of TLB operations are upgraded to
373e7ffd 437 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
b4ab8ce9
PM
438 * force broadcast of these operations.
439 */
440static bool tlb_force_broadcast(CPUARMState *env)
441{
373e7ffd 442 return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
b4ab8ce9
PM
443}
444
c4241c7d
PM
445static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
446 uint64_t value)
d929823f
PM
447{
448 /* Invalidate all (TLBIALL) */
527db2be 449 CPUState *cs = env_cpu(env);
00c8cb0a 450
b4ab8ce9 451 if (tlb_force_broadcast(env)) {
527db2be
RH
452 tlb_flush_all_cpus_synced(cs);
453 } else {
454 tlb_flush(cs);
b4ab8ce9 455 }
d929823f
PM
456}
457
c4241c7d
PM
458static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
459 uint64_t value)
d929823f
PM
460{
461 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
527db2be 462 CPUState *cs = env_cpu(env);
31b030d4 463
527db2be 464 value &= TARGET_PAGE_MASK;
b4ab8ce9 465 if (tlb_force_broadcast(env)) {
527db2be
RH
466 tlb_flush_page_all_cpus_synced(cs, value);
467 } else {
468 tlb_flush_page(cs, value);
b4ab8ce9 469 }
d929823f
PM
470}
471
c4241c7d
PM
472static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
473 uint64_t value)
d929823f
PM
474{
475 /* Invalidate by ASID (TLBIASID) */
527db2be 476 CPUState *cs = env_cpu(env);
00c8cb0a 477
b4ab8ce9 478 if (tlb_force_broadcast(env)) {
527db2be
RH
479 tlb_flush_all_cpus_synced(cs);
480 } else {
481 tlb_flush(cs);
b4ab8ce9 482 }
d929823f
PM
483}
484
c4241c7d
PM
485static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
486 uint64_t value)
d929823f
PM
487{
488 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
527db2be 489 CPUState *cs = env_cpu(env);
31b030d4 490
527db2be 491 value &= TARGET_PAGE_MASK;
b4ab8ce9 492 if (tlb_force_broadcast(env)) {
527db2be
RH
493 tlb_flush_page_all_cpus_synced(cs, value);
494 } else {
495 tlb_flush_page(cs, value);
b4ab8ce9 496 }
fa439fc5
PM
497}
498
541ef8c2
SS
499static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
500 uint64_t value)
501{
29a0af61 502 CPUState *cs = env_cpu(env);
541ef8c2 503
0336cbf8 504 tlb_flush_by_mmuidx(cs,
01b98b68 505 ARMMMUIdxBit_E10_1 |
452ef8cb 506 ARMMMUIdxBit_E10_1_PAN |
bf05340c 507 ARMMMUIdxBit_E10_0);
541ef8c2
SS
508}
509
510static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
511 uint64_t value)
512{
29a0af61 513 CPUState *cs = env_cpu(env);
541ef8c2 514
a67cf277 515 tlb_flush_by_mmuidx_all_cpus_synced(cs,
01b98b68 516 ARMMMUIdxBit_E10_1 |
452ef8cb 517 ARMMMUIdxBit_E10_1_PAN |
bf05340c 518 ARMMMUIdxBit_E10_0);
541ef8c2
SS
519}
520
541ef8c2
SS
521
522static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
523 uint64_t value)
524{
29a0af61 525 CPUState *cs = env_cpu(env);
541ef8c2 526
e013b741 527 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
541ef8c2
SS
528}
529
530static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
531 uint64_t value)
532{
29a0af61 533 CPUState *cs = env_cpu(env);
541ef8c2 534
e013b741 535 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
541ef8c2
SS
536}
537
538static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
539 uint64_t value)
540{
29a0af61 541 CPUState *cs = env_cpu(env);
541ef8c2
SS
542 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
543
e013b741 544 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
541ef8c2
SS
545}
546
547static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
548 uint64_t value)
549{
29a0af61 550 CPUState *cs = env_cpu(env);
541ef8c2
SS
551 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
552
a67cf277 553 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
e013b741 554 ARMMMUIdxBit_E2);
541ef8c2
SS
555}
556
e9aa6c21 557static const ARMCPRegInfo cp_reginfo[] = {
54bf36ed
FA
558 /* Define the secure and non-secure FCSE identifier CP registers
559 * separately because there is no secure bank in V8 (no _EL3). This allows
560 * the secure register to be properly reset and migrated. There is also no
561 * v8 EL1 version of the register so the non-secure instance stands alone.
562 */
9c513e78 563 { .name = "FCSEIDR",
54bf36ed
FA
564 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
565 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
566 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
567 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
9c513e78 568 { .name = "FCSEIDR_S",
54bf36ed
FA
569 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
570 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
571 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
d4e6df63 572 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
54bf36ed
FA
573 /* Define the secure and non-secure context identifier CP registers
574 * separately because there is no secure bank in V8 (no _EL3). This allows
575 * the secure register to be properly reset and migrated. In the
576 * non-secure case, the 32-bit register will have reset and migration
577 * disabled during registration as it is handled by the 64-bit instance.
578 */
579 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
014406b5 580 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
84929218
RH
581 .access = PL1_RW, .accessfn = access_tvm_trvm,
582 .secure = ARM_CP_SECSTATE_NS,
54bf36ed
FA
583 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
584 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
9c513e78 585 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
54bf36ed 586 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
84929218
RH
587 .access = PL1_RW, .accessfn = access_tvm_trvm,
588 .secure = ARM_CP_SECSTATE_S,
54bf36ed 589 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
d4e6df63 590 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
9449fdf6
PM
591};
592
593static const ARMCPRegInfo not_v8_cp_reginfo[] = {
594 /* NB: Some of these registers exist in v8 but with more precise
595 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
596 */
597 /* MMU Domain access control / MPU write buffer control */
0c17d68c
FA
598 { .name = "DACR",
599 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
84929218 600 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
0c17d68c
FA
601 .writefn = dacr_write, .raw_writefn = raw_write,
602 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
603 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
a903c449
EI
604 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
605 * For v6 and v5, these mappings are overly broad.
4fdd17dd 606 */
a903c449
EI
607 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
608 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
609 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
610 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
611 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
612 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
613 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
4fdd17dd 614 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
c4804214
PM
615 /* Cache maintenance ops; some of this space may be overridden later. */
616 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
617 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
618 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
e9aa6c21
PM
619};
620
7d57f408
PM
621static const ARMCPRegInfo not_v6_cp_reginfo[] = {
622 /* Not all pre-v6 cores implemented this WFI, so this is slightly
623 * over-broad.
624 */
625 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
626 .access = PL1_W, .type = ARM_CP_WFI },
7d57f408
PM
627};
628
629static const ARMCPRegInfo not_v7_cp_reginfo[] = {
630 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
631 * is UNPREDICTABLE; we choose to NOP as most implementations do).
632 */
633 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
634 .access = PL1_W, .type = ARM_CP_WFI },
34f90529
PM
635 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
636 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
637 * OMAPCP will override this space.
638 */
639 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
640 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
641 .resetvalue = 0 },
642 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
643 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
644 .resetvalue = 0 },
776d4e5c
PM
645 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
646 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
7a0e58fa 647 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 648 .resetvalue = 0 },
50300698
PM
649 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
650 * implementing it as RAZ means the "debug architecture version" bits
651 * will read as a reserved value, which should cause Linux to not try
652 * to use the debug hardware.
653 */
654 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
655 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
995939a6
PM
656 /* MMU TLB control. Note that the wildcarding means we cover not just
657 * the unified TLB ops but also the dside/iside/inner-shareable variants.
658 */
659 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
660 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
7a0e58fa 661 .type = ARM_CP_NO_RAW },
995939a6
PM
662 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
663 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
7a0e58fa 664 .type = ARM_CP_NO_RAW },
995939a6
PM
665 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
666 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
7a0e58fa 667 .type = ARM_CP_NO_RAW },
995939a6
PM
668 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
669 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
7a0e58fa 670 .type = ARM_CP_NO_RAW },
a903c449
EI
671 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
672 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
673 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
674 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
7d57f408
PM
675};
676
c4241c7d
PM
677static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
678 uint64_t value)
2771db27 679{
f0aff255
FA
680 uint32_t mask = 0;
681
682 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
683 if (!arm_feature(env, ARM_FEATURE_V8)) {
684 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
685 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
686 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
687 */
7fbc6a40 688 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
f0aff255 689 /* VFP coprocessor: cp10 & cp11 [23:20] */
fab8ad39
RH
690 mask |= R_CPACR_ASEDIS_MASK |
691 R_CPACR_D32DIS_MASK |
692 R_CPACR_CP11_MASK |
693 R_CPACR_CP10_MASK;
f0aff255
FA
694
695 if (!arm_feature(env, ARM_FEATURE_NEON)) {
696 /* ASEDIS [31] bit is RAO/WI */
fab8ad39 697 value |= R_CPACR_ASEDIS_MASK;
f0aff255
FA
698 }
699
700 /* VFPv3 and upwards with NEON implement 32 double precision
701 * registers (D0-D31).
702 */
a6627f5f 703 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
f0aff255 704 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
fab8ad39 705 value |= R_CPACR_D32DIS_MASK;
f0aff255
FA
706 }
707 }
708 value &= mask;
2771db27 709 }
fc1120a7
PM
710
711 /*
712 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
713 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
714 */
715 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
716 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
fab8ad39
RH
717 mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
718 value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
fc1120a7
PM
719 }
720
7ebd5f2e 721 env->cp15.cpacr_el1 = value;
2771db27
PM
722}
723
fc1120a7
PM
724static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
725{
726 /*
727 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
728 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
729 */
730 uint64_t value = env->cp15.cpacr_el1;
731
732 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
733 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
fab8ad39 734 value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
fc1120a7
PM
735 }
736 return value;
737}
738
739
5deac39c
PM
740static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
741{
742 /* Call cpacr_write() so that we reset with the correct RAO bits set
743 * for our CPU features.
744 */
745 cpacr_write(env, ri, 0);
746}
747
3f208fd7
PM
748static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
749 bool isread)
c6f19164
GB
750{
751 if (arm_feature(env, ARM_FEATURE_V8)) {
752 /* Check if CPACR accesses are to be trapped to EL2 */
e6ef0169 753 if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
fab8ad39 754 FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
c6f19164
GB
755 return CP_ACCESS_TRAP_EL2;
756 /* Check if CPACR accesses are to be trapped to EL3 */
757 } else if (arm_current_el(env) < 3 &&
fab8ad39 758 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
c6f19164
GB
759 return CP_ACCESS_TRAP_EL3;
760 }
761 }
762
763 return CP_ACCESS_OK;
764}
765
3f208fd7
PM
766static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
767 bool isread)
c6f19164
GB
768{
769 /* Check if CPTR accesses are set to trap to EL3 */
fab8ad39
RH
770 if (arm_current_el(env) == 2 &&
771 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
c6f19164
GB
772 return CP_ACCESS_TRAP_EL3;
773 }
774
775 return CP_ACCESS_OK;
776}
777
7d57f408
PM
778static const ARMCPRegInfo v6_cp_reginfo[] = {
779 /* prefetch by MVA in v6, NOP in v7 */
780 { .name = "MVA_prefetch",
781 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
782 .access = PL1_W, .type = ARM_CP_NOP },
6df99dec
SS
783 /* We need to break the TB after ISB to execute self-modifying code
784 * correctly and also to take any pending interrupts immediately.
785 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
786 */
7d57f408 787 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
6df99dec 788 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
091fd17c 789 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
7d57f408 790 .access = PL0_W, .type = ARM_CP_NOP },
091fd17c 791 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
7d57f408 792 .access = PL0_W, .type = ARM_CP_NOP },
06d76f31 793 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
84929218 794 .access = PL1_RW, .accessfn = access_tvm_trvm,
b848ce2b
FA
795 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
796 offsetof(CPUARMState, cp15.ifar_ns) },
06d76f31
PM
797 .resetvalue = 0, },
798 /* Watchpoint Fault Address Register : should actually only be present
799 * for 1136, 1176, 11MPCore.
800 */
801 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
802 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
34222fb8 803 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
c6f19164 804 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
7ebd5f2e 805 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
fc1120a7 806 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
7d57f408
PM
807};
808
57a4a11b
AL
809typedef struct pm_event {
810 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
811 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
812 bool (*supported)(CPUARMState *);
813 /*
814 * Retrieve the current count of the underlying event. The programmed
815 * counters hold a difference from the return value from this function
816 */
817 uint64_t (*get_count)(CPUARMState *);
4e7beb0c
AL
818 /*
819 * Return how many nanoseconds it will take (at a minimum) for count events
820 * to occur. A negative value indicates the counter will never overflow, or
821 * that the counter has otherwise arranged for the overflow bit to be set
822 * and the PMU interrupt to be raised on overflow.
823 */
824 int64_t (*ns_per_count)(uint64_t);
57a4a11b
AL
825} pm_event;
826
b2e23725
AL
827static bool event_always_supported(CPUARMState *env)
828{
829 return true;
830}
831
0d4bfd7d
AL
832static uint64_t swinc_get_count(CPUARMState *env)
833{
834 /*
835 * SW_INCR events are written directly to the pmevcntr's by writes to
836 * PMSWINC, so there is no underlying count maintained by the PMU itself
837 */
838 return 0;
839}
840
4e7beb0c
AL
841static int64_t swinc_ns_per(uint64_t ignored)
842{
843 return -1;
844}
845
b2e23725
AL
846/*
847 * Return the underlying cycle count for the PMU cycle counters. If we're in
848 * usermode, simply return 0.
849 */
850static uint64_t cycles_get_count(CPUARMState *env)
851{
852#ifndef CONFIG_USER_ONLY
853 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
854 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
855#else
856 return cpu_get_host_ticks();
857#endif
858}
859
860#ifndef CONFIG_USER_ONLY
4e7beb0c
AL
861static int64_t cycles_ns_per(uint64_t cycles)
862{
863 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
864}
865
b2e23725
AL
866static bool instructions_supported(CPUARMState *env)
867{
740b1759 868 return icount_enabled() == 1; /* Precise instruction counting */
b2e23725
AL
869}
870
871static uint64_t instructions_get_count(CPUARMState *env)
872{
8191d368 873 return (uint64_t)icount_get_raw();
b2e23725 874}
4e7beb0c
AL
875
876static int64_t instructions_ns_per(uint64_t icount)
877{
8191d368 878 return icount_to_ns((int64_t)icount);
4e7beb0c 879}
b2e23725
AL
880#endif
881
0727f63b
PM
882static bool pmu_8_1_events_supported(CPUARMState *env)
883{
884 /* For events which are supported in any v8.1 PMU */
885 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
886}
887
15dd1ebd
PM
888static bool pmu_8_4_events_supported(CPUARMState *env)
889{
890 /* For events which are supported in any v8.1 PMU */
891 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
892}
893
0727f63b
PM
894static uint64_t zero_event_get_count(CPUARMState *env)
895{
896 /* For events which on QEMU never fire, so their count is always zero */
897 return 0;
898}
899
900static int64_t zero_event_ns_per(uint64_t cycles)
901{
902 /* An event which never fires can never overflow */
903 return -1;
904}
905
57a4a11b 906static const pm_event pm_events[] = {
0d4bfd7d
AL
907 { .number = 0x000, /* SW_INCR */
908 .supported = event_always_supported,
909 .get_count = swinc_get_count,
4e7beb0c 910 .ns_per_count = swinc_ns_per,
0d4bfd7d 911 },
b2e23725
AL
912#ifndef CONFIG_USER_ONLY
913 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
914 .supported = instructions_supported,
915 .get_count = instructions_get_count,
4e7beb0c 916 .ns_per_count = instructions_ns_per,
b2e23725
AL
917 },
918 { .number = 0x011, /* CPU_CYCLES, Cycle */
919 .supported = event_always_supported,
920 .get_count = cycles_get_count,
4e7beb0c 921 .ns_per_count = cycles_ns_per,
0727f63b 922 },
b2e23725 923#endif
0727f63b
PM
924 { .number = 0x023, /* STALL_FRONTEND */
925 .supported = pmu_8_1_events_supported,
926 .get_count = zero_event_get_count,
927 .ns_per_count = zero_event_ns_per,
928 },
929 { .number = 0x024, /* STALL_BACKEND */
930 .supported = pmu_8_1_events_supported,
931 .get_count = zero_event_get_count,
932 .ns_per_count = zero_event_ns_per,
933 },
15dd1ebd
PM
934 { .number = 0x03c, /* STALL */
935 .supported = pmu_8_4_events_supported,
936 .get_count = zero_event_get_count,
937 .ns_per_count = zero_event_ns_per,
938 },
57a4a11b
AL
939};
940
941/*
942 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
943 * events (i.e. the statistical profiling extension), this implementation
944 * should first be updated to something sparse instead of the current
945 * supported_event_map[] array.
946 */
15dd1ebd 947#define MAX_EVENT_ID 0x3c
57a4a11b
AL
948#define UNSUPPORTED_EVENT UINT16_MAX
949static uint16_t supported_event_map[MAX_EVENT_ID + 1];
950
951/*
bf8d0969
AL
952 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
953 * of ARM event numbers to indices in our pm_events array.
57a4a11b
AL
954 *
955 * Note: Events in the 0x40XX range are not currently supported.
956 */
bf8d0969 957void pmu_init(ARMCPU *cpu)
57a4a11b 958{
57a4a11b
AL
959 unsigned int i;
960
bf8d0969
AL
961 /*
962 * Empty supported_event_map and cpu->pmceid[01] before adding supported
963 * events to them
964 */
57a4a11b
AL
965 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
966 supported_event_map[i] = UNSUPPORTED_EVENT;
967 }
bf8d0969
AL
968 cpu->pmceid0 = 0;
969 cpu->pmceid1 = 0;
57a4a11b
AL
970
971 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
972 const pm_event *cnt = &pm_events[i];
973 assert(cnt->number <= MAX_EVENT_ID);
974 /* We do not currently support events in the 0x40xx range */
975 assert(cnt->number <= 0x3f);
976
bf8d0969 977 if (cnt->supported(&cpu->env)) {
57a4a11b 978 supported_event_map[cnt->number] = i;
67da43d6 979 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
bf8d0969
AL
980 if (cnt->number & 0x20) {
981 cpu->pmceid1 |= event_mask;
982 } else {
983 cpu->pmceid0 |= event_mask;
984 }
57a4a11b
AL
985 }
986 }
57a4a11b
AL
987}
988
5ecdd3e4
AL
989/*
990 * Check at runtime whether a PMU event is supported for the current machine
991 */
992static bool event_supported(uint16_t number)
993{
994 if (number > MAX_EVENT_ID) {
995 return false;
996 }
997 return supported_event_map[number] != UNSUPPORTED_EVENT;
998}
999
3f208fd7
PM
1000static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1001 bool isread)
200ac0ef 1002{
3b163b01 1003 /* Performance monitor registers user accessibility is controlled
1fce1ba9
PM
1004 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1005 * trapping to EL2 or EL3 for other accesses.
200ac0ef 1006 */
1fce1ba9 1007 int el = arm_current_el(env);
59dd089c 1008 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1fce1ba9 1009
6ecd0b6b 1010 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
fcd25206 1011 return CP_ACCESS_TRAP;
200ac0ef 1012 }
59dd089c 1013 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1fce1ba9
PM
1014 return CP_ACCESS_TRAP_EL2;
1015 }
1016 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1017 return CP_ACCESS_TRAP_EL3;
1018 }
1019
fcd25206 1020 return CP_ACCESS_OK;
200ac0ef
PM
1021}
1022
6ecd0b6b
AB
1023static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1024 const ARMCPRegInfo *ri,
1025 bool isread)
1026{
1027 /* ER: event counter read trap control */
1028 if (arm_feature(env, ARM_FEATURE_V8)
1029 && arm_current_el(env) == 0
1030 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1031 && isread) {
1032 return CP_ACCESS_OK;
1033 }
1034
1035 return pmreg_access(env, ri, isread);
1036}
1037
1038static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1039 const ARMCPRegInfo *ri,
1040 bool isread)
1041{
1042 /* SW: software increment write trap control */
1043 if (arm_feature(env, ARM_FEATURE_V8)
1044 && arm_current_el(env) == 0
1045 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1046 && !isread) {
1047 return CP_ACCESS_OK;
1048 }
1049
1050 return pmreg_access(env, ri, isread);
1051}
1052
6ecd0b6b
AB
1053static CPAccessResult pmreg_access_selr(CPUARMState *env,
1054 const ARMCPRegInfo *ri,
1055 bool isread)
1056{
1057 /* ER: event counter read trap control */
1058 if (arm_feature(env, ARM_FEATURE_V8)
1059 && arm_current_el(env) == 0
1060 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1061 return CP_ACCESS_OK;
1062 }
1063
1064 return pmreg_access(env, ri, isread);
1065}
1066
1067static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1068 const ARMCPRegInfo *ri,
1069 bool isread)
1070{
1071 /* CR: cycle counter read trap control */
1072 if (arm_feature(env, ARM_FEATURE_V8)
1073 && arm_current_el(env) == 0
1074 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1075 && isread) {
1076 return CP_ACCESS_OK;
1077 }
1078
1079 return pmreg_access(env, ri, isread);
1080}
1081
033614c4
AL
1082/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1083 * the current EL, security state, and register configuration.
1084 */
1085static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
87124fde 1086{
033614c4
AL
1087 uint64_t filter;
1088 bool e, p, u, nsk, nsu, nsh, m;
1089 bool enabled, prohibited, filtered;
1090 bool secure = arm_is_secure(env);
1091 int el = arm_current_el(env);
59dd089c
RDC
1092 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1093 uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
87124fde 1094
cbbb3041
AJ
1095 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1096 return false;
1097 }
1098
033614c4
AL
1099 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1100 (counter < hpmn || counter == 31)) {
1101 e = env->cp15.c9_pmcr & PMCRE;
1102 } else {
59dd089c 1103 e = mdcr_el2 & MDCR_HPME;
87124fde 1104 }
033614c4 1105 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
87124fde 1106
033614c4
AL
1107 if (!secure) {
1108 if (el == 2 && (counter < hpmn || counter == 31)) {
59dd089c 1109 prohibited = mdcr_el2 & MDCR_HPMD;
033614c4
AL
1110 } else {
1111 prohibited = false;
1112 }
1113 } else {
1114 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
db1f3afb 1115 !(env->cp15.mdcr_el3 & MDCR_SPME);
033614c4
AL
1116 }
1117
1118 if (prohibited && counter == 31) {
1119 prohibited = env->cp15.c9_pmcr & PMCRDP;
1120 }
1121
5ecdd3e4
AL
1122 if (counter == 31) {
1123 filter = env->cp15.pmccfiltr_el0;
1124 } else {
1125 filter = env->cp15.c14_pmevtyper[counter];
1126 }
033614c4
AL
1127
1128 p = filter & PMXEVTYPER_P;
1129 u = filter & PMXEVTYPER_U;
1130 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1131 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1132 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1133 m = arm_el_is_aa64(env, 1) &&
1134 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1135
1136 if (el == 0) {
1137 filtered = secure ? u : u != nsu;
1138 } else if (el == 1) {
1139 filtered = secure ? p : p != nsk;
1140 } else if (el == 2) {
1141 filtered = !nsh;
1142 } else { /* EL3 */
1143 filtered = m != p;
1144 }
1145
5ecdd3e4
AL
1146 if (counter != 31) {
1147 /*
1148 * If not checking PMCCNTR, ensure the counter is setup to an event we
1149 * support
1150 */
1151 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1152 if (!event_supported(event)) {
1153 return false;
1154 }
1155 }
1156
033614c4 1157 return enabled && !prohibited && !filtered;
87124fde 1158}
033614c4 1159
f4efb4b2
AL
1160static void pmu_update_irq(CPUARMState *env)
1161{
2fc0cc0e 1162 ARMCPU *cpu = env_archcpu(env);
f4efb4b2
AL
1163 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1164 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1165}
1166
5d05b9d4
AL
1167/*
1168 * Ensure c15_ccnt is the guest-visible count so that operations such as
1169 * enabling/disabling the counter or filtering, modifying the count itself,
1170 * etc. can be done logically. This is essentially a no-op if the counter is
1171 * not enabled at the time of the call.
1172 */
f2b2f53f 1173static void pmccntr_op_start(CPUARMState *env)
ec7b4ce4 1174{
b2e23725 1175 uint64_t cycles = cycles_get_count(env);
ec7b4ce4 1176
033614c4 1177 if (pmu_counter_enabled(env, 31)) {
5d05b9d4
AL
1178 uint64_t eff_cycles = cycles;
1179 if (env->cp15.c9_pmcr & PMCRD) {
1180 /* Increment once every 64 processor clock cycles */
1181 eff_cycles /= 64;
1182 }
1183
f4efb4b2
AL
1184 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1185
1186 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1187 1ull << 63 : 1ull << 31;
1188 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1189 env->cp15.c9_pmovsr |= (1 << 31);
1190 pmu_update_irq(env);
1191 }
1192
1193 env->cp15.c15_ccnt = new_pmccntr;
ec7b4ce4 1194 }
5d05b9d4
AL
1195 env->cp15.c15_ccnt_delta = cycles;
1196}
ec7b4ce4 1197
5d05b9d4
AL
1198/*
1199 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1200 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1201 * pmccntr_op_start.
1202 */
f2b2f53f 1203static void pmccntr_op_finish(CPUARMState *env)
5d05b9d4 1204{
033614c4 1205 if (pmu_counter_enabled(env, 31)) {
4e7beb0c
AL
1206#ifndef CONFIG_USER_ONLY
1207 /* Calculate when the counter will next overflow */
1208 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1209 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1210 remaining_cycles = (uint32_t)remaining_cycles;
1211 }
1212 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1213
1214 if (overflow_in > 0) {
1215 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1216 overflow_in;
2fc0cc0e 1217 ARMCPU *cpu = env_archcpu(env);
4e7beb0c
AL
1218 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1219 }
1220#endif
5d05b9d4 1221
4e7beb0c 1222 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
5d05b9d4
AL
1223 if (env->cp15.c9_pmcr & PMCRD) {
1224 /* Increment once every 64 processor clock cycles */
1225 prev_cycles /= 64;
1226 }
5d05b9d4 1227 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
ec7b4ce4
AF
1228 }
1229}
1230
5ecdd3e4
AL
1231static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1232{
1233
1234 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1235 uint64_t count = 0;
1236 if (event_supported(event)) {
1237 uint16_t event_idx = supported_event_map[event];
1238 count = pm_events[event_idx].get_count(env);
1239 }
1240
1241 if (pmu_counter_enabled(env, counter)) {
f4efb4b2
AL
1242 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1243
1244 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1245 env->cp15.c9_pmovsr |= (1 << counter);
1246 pmu_update_irq(env);
1247 }
1248 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
5ecdd3e4
AL
1249 }
1250 env->cp15.c14_pmevcntr_delta[counter] = count;
1251}
1252
1253static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1254{
1255 if (pmu_counter_enabled(env, counter)) {
4e7beb0c
AL
1256#ifndef CONFIG_USER_ONLY
1257 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1258 uint16_t event_idx = supported_event_map[event];
1259 uint64_t delta = UINT32_MAX -
1260 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1261 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1262
1263 if (overflow_in > 0) {
1264 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1265 overflow_in;
2fc0cc0e 1266 ARMCPU *cpu = env_archcpu(env);
4e7beb0c
AL
1267 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1268 }
1269#endif
1270
5ecdd3e4
AL
1271 env->cp15.c14_pmevcntr_delta[counter] -=
1272 env->cp15.c14_pmevcntr[counter];
1273 }
1274}
1275
5d05b9d4
AL
1276void pmu_op_start(CPUARMState *env)
1277{
5ecdd3e4 1278 unsigned int i;
5d05b9d4 1279 pmccntr_op_start(env);
5ecdd3e4
AL
1280 for (i = 0; i < pmu_num_counters(env); i++) {
1281 pmevcntr_op_start(env, i);
1282 }
5d05b9d4
AL
1283}
1284
1285void pmu_op_finish(CPUARMState *env)
1286{
5ecdd3e4 1287 unsigned int i;
5d05b9d4 1288 pmccntr_op_finish(env);
5ecdd3e4
AL
1289 for (i = 0; i < pmu_num_counters(env); i++) {
1290 pmevcntr_op_finish(env, i);
1291 }
5d05b9d4
AL
1292}
1293
033614c4
AL
1294void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1295{
1296 pmu_op_start(&cpu->env);
1297}
1298
1299void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1300{
1301 pmu_op_finish(&cpu->env);
1302}
1303
4e7beb0c
AL
1304void arm_pmu_timer_cb(void *opaque)
1305{
1306 ARMCPU *cpu = opaque;
1307
1308 /*
1309 * Update all the counter values based on the current underlying counts,
1310 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1311 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1312 * counter may expire.
1313 */
1314 pmu_op_start(&cpu->env);
1315 pmu_op_finish(&cpu->env);
1316}
1317
c4241c7d
PM
1318static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1319 uint64_t value)
200ac0ef 1320{
5d05b9d4 1321 pmu_op_start(env);
7c2cb42b
AF
1322
1323 if (value & PMCRC) {
1324 /* The counter has been reset */
1325 env->cp15.c15_ccnt = 0;
1326 }
1327
5ecdd3e4
AL
1328 if (value & PMCRP) {
1329 unsigned int i;
1330 for (i = 0; i < pmu_num_counters(env); i++) {
1331 env->cp15.c14_pmevcntr[i] = 0;
1332 }
1333 }
1334
9323e79f
PM
1335 env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1336 env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
7c2cb42b 1337
5d05b9d4 1338 pmu_op_finish(env);
7c2cb42b
AF
1339}
1340
0d4bfd7d
AL
1341static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1342 uint64_t value)
1343{
1344 unsigned int i;
1345 for (i = 0; i < pmu_num_counters(env); i++) {
1346 /* Increment a counter's count iff: */
1347 if ((value & (1 << i)) && /* counter's bit is set */
1348 /* counter is enabled and not filtered */
1349 pmu_counter_enabled(env, i) &&
1350 /* counter is SW_INCR */
1351 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1352 pmevcntr_op_start(env, i);
f4efb4b2
AL
1353
1354 /*
1355 * Detect if this write causes an overflow since we can't predict
1356 * PMSWINC overflows like we can for other events
1357 */
1358 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1359
1360 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1361 env->cp15.c9_pmovsr |= (1 << i);
1362 pmu_update_irq(env);
1363 }
1364
1365 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1366
0d4bfd7d
AL
1367 pmevcntr_op_finish(env, i);
1368 }
1369 }
1370}
1371
7c2cb42b
AF
1372static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1373{
5d05b9d4
AL
1374 uint64_t ret;
1375 pmccntr_op_start(env);
1376 ret = env->cp15.c15_ccnt;
1377 pmccntr_op_finish(env);
1378 return ret;
7c2cb42b
AF
1379}
1380
6b040780
WH
1381static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1382 uint64_t value)
1383{
1384 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1385 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1386 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1387 * accessed.
1388 */
1389 env->cp15.c9_pmselr = value & 0x1f;
1390}
1391
7c2cb42b
AF
1392static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1393 uint64_t value)
1394{
5d05b9d4
AL
1395 pmccntr_op_start(env);
1396 env->cp15.c15_ccnt = value;
1397 pmccntr_op_finish(env);
200ac0ef 1398}
421c7ebd
PC
1399
1400static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1401 uint64_t value)
1402{
1403 uint64_t cur_val = pmccntr_read(env, NULL);
1404
1405 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1406}
1407
0614601c
AF
1408static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1409 uint64_t value)
1410{
5d05b9d4 1411 pmccntr_op_start(env);
4b8afa1f
AL
1412 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1413 pmccntr_op_finish(env);
1414}
1415
1416static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1417 uint64_t value)
1418{
1419 pmccntr_op_start(env);
1420 /* M is not accessible from AArch32 */
1421 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1422 (value & PMCCFILTR);
5d05b9d4 1423 pmccntr_op_finish(env);
0614601c
AF
1424}
1425
4b8afa1f
AL
1426static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1427{
1428 /* M is not visible in AArch32 */
1429 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1430}
1431
c4241c7d 1432static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
200ac0ef
PM
1433 uint64_t value)
1434{
7ece99b1 1435 value &= pmu_counter_mask(env);
200ac0ef 1436 env->cp15.c9_pmcnten |= value;
200ac0ef
PM
1437}
1438
c4241c7d
PM
1439static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1440 uint64_t value)
200ac0ef 1441{
7ece99b1 1442 value &= pmu_counter_mask(env);
200ac0ef 1443 env->cp15.c9_pmcnten &= ~value;
200ac0ef
PM
1444}
1445
c4241c7d
PM
1446static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1447 uint64_t value)
200ac0ef 1448{
599b71e2 1449 value &= pmu_counter_mask(env);
200ac0ef 1450 env->cp15.c9_pmovsr &= ~value;
f4efb4b2 1451 pmu_update_irq(env);
200ac0ef
PM
1452}
1453
327dd510
AL
1454static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1455 uint64_t value)
1456{
1457 value &= pmu_counter_mask(env);
1458 env->cp15.c9_pmovsr |= value;
f4efb4b2 1459 pmu_update_irq(env);
327dd510
AL
1460}
1461
5ecdd3e4
AL
1462static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1463 uint64_t value, const uint8_t counter)
200ac0ef 1464{
5ecdd3e4
AL
1465 if (counter == 31) {
1466 pmccfiltr_write(env, ri, value);
1467 } else if (counter < pmu_num_counters(env)) {
1468 pmevcntr_op_start(env, counter);
1469
1470 /*
1471 * If this counter's event type is changing, store the current
1472 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1473 * pmevcntr_op_finish has the correct baseline when it converts back to
1474 * a delta.
1475 */
1476 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1477 PMXEVTYPER_EVTCOUNT;
1478 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1479 if (old_event != new_event) {
1480 uint64_t count = 0;
1481 if (event_supported(new_event)) {
1482 uint16_t event_idx = supported_event_map[new_event];
1483 count = pm_events[event_idx].get_count(env);
1484 }
1485 env->cp15.c14_pmevcntr_delta[counter] = count;
1486 }
1487
1488 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1489 pmevcntr_op_finish(env, counter);
1490 }
fdb86656
WH
1491 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1492 * PMSELR value is equal to or greater than the number of implemented
1493 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1494 */
5ecdd3e4
AL
1495}
1496
1497static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1498 const uint8_t counter)
1499{
1500 if (counter == 31) {
1501 return env->cp15.pmccfiltr_el0;
1502 } else if (counter < pmu_num_counters(env)) {
1503 return env->cp15.c14_pmevtyper[counter];
1504 } else {
1505 /*
1506 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1507 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1508 */
1509 return 0;
1510 }
1511}
1512
1513static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1514 uint64_t value)
1515{
1516 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1517 pmevtyper_write(env, ri, value, counter);
1518}
1519
1520static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1521 uint64_t value)
1522{
1523 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1524 env->cp15.c14_pmevtyper[counter] = value;
1525
1526 /*
1527 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1528 * pmu_op_finish calls when loading saved state for a migration. Because
1529 * we're potentially updating the type of event here, the value written to
1530 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1531 * different counter type. Therefore, we need to set this value to the
1532 * current count for the counter type we're writing so that pmu_op_finish
1533 * has the correct count for its calculation.
1534 */
1535 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1536 if (event_supported(event)) {
1537 uint16_t event_idx = supported_event_map[event];
1538 env->cp15.c14_pmevcntr_delta[counter] =
1539 pm_events[event_idx].get_count(env);
fdb86656
WH
1540 }
1541}
1542
5ecdd3e4
AL
1543static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1544{
1545 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1546 return pmevtyper_read(env, ri, counter);
1547}
1548
1549static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1550 uint64_t value)
1551{
1552 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1553}
1554
fdb86656
WH
1555static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1556{
5ecdd3e4
AL
1557 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1558}
1559
1560static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1561 uint64_t value, uint8_t counter)
1562{
1563 if (counter < pmu_num_counters(env)) {
1564 pmevcntr_op_start(env, counter);
1565 env->cp15.c14_pmevcntr[counter] = value;
1566 pmevcntr_op_finish(env, counter);
1567 }
1568 /*
1569 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1570 * are CONSTRAINED UNPREDICTABLE.
fdb86656 1571 */
5ecdd3e4
AL
1572}
1573
1574static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1575 uint8_t counter)
1576{
1577 if (counter < pmu_num_counters(env)) {
1578 uint64_t ret;
1579 pmevcntr_op_start(env, counter);
1580 ret = env->cp15.c14_pmevcntr[counter];
1581 pmevcntr_op_finish(env, counter);
1582 return ret;
fdb86656 1583 } else {
5ecdd3e4
AL
1584 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1585 * are CONSTRAINED UNPREDICTABLE. */
fdb86656
WH
1586 return 0;
1587 }
200ac0ef
PM
1588}
1589
5ecdd3e4
AL
1590static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1591 uint64_t value)
1592{
1593 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1594 pmevcntr_write(env, ri, value, counter);
1595}
1596
1597static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1598{
1599 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1600 return pmevcntr_read(env, ri, counter);
1601}
1602
1603static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1604 uint64_t value)
1605{
1606 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1607 assert(counter < pmu_num_counters(env));
1608 env->cp15.c14_pmevcntr[counter] = value;
1609 pmevcntr_write(env, ri, value, counter);
1610}
1611
1612static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1613{
1614 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1615 assert(counter < pmu_num_counters(env));
1616 return env->cp15.c14_pmevcntr[counter];
1617}
1618
1619static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1620 uint64_t value)
1621{
1622 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1623}
1624
1625static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1626{
1627 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1628}
1629
c4241c7d 1630static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
200ac0ef
PM
1631 uint64_t value)
1632{
6ecd0b6b
AB
1633 if (arm_feature(env, ARM_FEATURE_V8)) {
1634 env->cp15.c9_pmuserenr = value & 0xf;
1635 } else {
1636 env->cp15.c9_pmuserenr = value & 1;
1637 }
200ac0ef
PM
1638}
1639
c4241c7d
PM
1640static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1641 uint64_t value)
200ac0ef
PM
1642{
1643 /* We have no event counters so only the C bit can be changed */
7ece99b1 1644 value &= pmu_counter_mask(env);
200ac0ef 1645 env->cp15.c9_pminten |= value;
f4efb4b2 1646 pmu_update_irq(env);
200ac0ef
PM
1647}
1648
c4241c7d
PM
1649static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1650 uint64_t value)
200ac0ef 1651{
7ece99b1 1652 value &= pmu_counter_mask(env);
200ac0ef 1653 env->cp15.c9_pminten &= ~value;
f4efb4b2 1654 pmu_update_irq(env);
200ac0ef
PM
1655}
1656
c4241c7d
PM
1657static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1658 uint64_t value)
8641136c 1659{
a505d7fe
PM
1660 /* Note that even though the AArch64 view of this register has bits
1661 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1662 * architectural requirements for bits which are RES0 only in some
1663 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1664 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1665 */
855ea66d 1666 raw_write(env, ri, value & ~0x1FULL);
8641136c
NR
1667}
1668
64e0e2de
EI
1669static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1670{
ea22747c
RH
1671 /* Begin with base v8.0 state. */
1672 uint32_t valid_mask = 0x3fff;
2fc0cc0e 1673 ARMCPU *cpu = env_archcpu(env);
ea22747c 1674
bfe43e3d
RH
1675 /*
1676 * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
1677 * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
1678 * Instead, choose the format based on the mode of EL3.
1679 */
1680 if (arm_el_is_aa64(env, 3)) {
1681 value |= SCR_FW | SCR_AW; /* RES1 */
1682 valid_mask &= ~SCR_NET; /* RES0 */
252e8c69 1683
6bcbb07a
RH
1684 if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
1685 !cpu_isar_feature(aa64_aa32_el2, cpu)) {
1686 value |= SCR_RW; /* RAO/WI */
1687 }
da3d8b13
RH
1688 if (cpu_isar_feature(aa64_ras, cpu)) {
1689 valid_mask |= SCR_TERR;
1690 }
252e8c69
RH
1691 if (cpu_isar_feature(aa64_lor, cpu)) {
1692 valid_mask |= SCR_TLOR;
1693 }
1694 if (cpu_isar_feature(aa64_pauth, cpu)) {
1695 valid_mask |= SCR_API | SCR_APK;
1696 }
926c1b97
RDC
1697 if (cpu_isar_feature(aa64_sel2, cpu)) {
1698 valid_mask |= SCR_EEL2;
1699 }
8ddb300b
RH
1700 if (cpu_isar_feature(aa64_mte, cpu)) {
1701 valid_mask |= SCR_ATA;
1702 }
7cb1e618
RH
1703 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
1704 valid_mask |= SCR_ENSCXT;
1705 }
7ac61020
PM
1706 if (cpu_isar_feature(aa64_doublefault, cpu)) {
1707 valid_mask |= SCR_EASE | SCR_NMEA;
1708 }
ea22747c
RH
1709 } else {
1710 valid_mask &= ~(SCR_RW | SCR_ST);
da3d8b13
RH
1711 if (cpu_isar_feature(aa32_ras, cpu)) {
1712 valid_mask |= SCR_TERR;
1713 }
ea22747c 1714 }
64e0e2de
EI
1715
1716 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1717 valid_mask &= ~SCR_HCE;
1718
1719 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1720 * supported if EL2 exists. The bit is UNK/SBZP when
1721 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1722 * when EL2 is unavailable.
4eb27640 1723 * On ARMv8, this bit is always available.
64e0e2de 1724 */
4eb27640
GB
1725 if (arm_feature(env, ARM_FEATURE_V7) &&
1726 !arm_feature(env, ARM_FEATURE_V8)) {
64e0e2de
EI
1727 valid_mask &= ~SCR_SMD;
1728 }
1729 }
1730
1731 /* Clear all-context RES0 bits. */
1732 value &= valid_mask;
1733 raw_write(env, ri, value);
1734}
1735
10d0ef3e
MN
1736static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1737{
1738 /*
1739 * scr_write will set the RES1 bits on an AArch64-only CPU.
1740 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1741 */
1742 scr_write(env, ri, 0);
1743}
1744
630fcd4d
MZ
1745static CPAccessResult access_aa64_tid2(CPUARMState *env,
1746 const ARMCPRegInfo *ri,
1747 bool isread)
1748{
1749 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
1750 return CP_ACCESS_TRAP_EL2;
1751 }
1752
1753 return CP_ACCESS_OK;
1754}
1755
c4241c7d 1756static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
776d4e5c 1757{
2fc0cc0e 1758 ARMCPU *cpu = env_archcpu(env);
b85a1fd6
FA
1759
1760 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1761 * bank
1762 */
1763 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1764 ri->secure & ARM_CP_SECSTATE_S);
1765
1766 return cpu->ccsidr[index];
776d4e5c
PM
1767}
1768
c4241c7d
PM
1769static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1770 uint64_t value)
776d4e5c 1771{
8d5c773e 1772 raw_write(env, ri, value & 0xf);
776d4e5c
PM
1773}
1774
1090b9c6
PM
1775static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1776{
29a0af61 1777 CPUState *cs = env_cpu(env);
cc974d5c
RDC
1778 bool el1 = arm_current_el(env) == 1;
1779 uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
1090b9c6
PM
1780 uint64_t ret = 0;
1781
cc974d5c 1782 if (hcr_el2 & HCR_IMO) {
636540e9
PM
1783 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1784 ret |= CPSR_I;
1785 }
1786 } else {
1787 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1788 ret |= CPSR_I;
1789 }
1090b9c6 1790 }
636540e9 1791
cc974d5c 1792 if (hcr_el2 & HCR_FMO) {
636540e9
PM
1793 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1794 ret |= CPSR_F;
1795 }
1796 } else {
1797 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1798 ret |= CPSR_F;
1799 }
1090b9c6 1800 }
636540e9 1801
3c29632f
RH
1802 if (hcr_el2 & HCR_AMO) {
1803 if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
1804 ret |= CPSR_A;
1805 }
1806 }
1807
1090b9c6
PM
1808 return ret;
1809}
1810
93fbc983
MZ
1811static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1812 bool isread)
1813{
1814 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
1815 return CP_ACCESS_TRAP_EL2;
1816 }
1817
1818 return CP_ACCESS_OK;
1819}
1820
1821static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1822 bool isread)
1823{
1824 if (arm_feature(env, ARM_FEATURE_V8)) {
1825 return access_aa64_tid1(env, ri, isread);
1826 }
1827
1828 return CP_ACCESS_OK;
1829}
1830
e9aa6c21 1831static const ARMCPRegInfo v7_cp_reginfo[] = {
7d57f408
PM
1832 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1833 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1834 .access = PL1_W, .type = ARM_CP_NOP },
200ac0ef
PM
1835 /* Performance monitors are implementation defined in v7,
1836 * but with an ARM recommended set of registers, which we
ac689a2e 1837 * follow.
200ac0ef
PM
1838 *
1839 * Performance registers fall into three categories:
1840 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1841 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1842 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1843 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1844 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1845 */
1846 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
7a0e58fa 1847 .access = PL0_RW, .type = ARM_CP_ALIAS,
8521466b 1848 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
fcd25206
PM
1849 .writefn = pmcntenset_write,
1850 .accessfn = pmreg_access,
1851 .raw_writefn = raw_write },
8521466b
AF
1852 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1853 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1854 .access = PL0_RW, .accessfn = pmreg_access,
1855 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1856 .writefn = pmcntenset_write, .raw_writefn = raw_write },
200ac0ef 1857 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
8521466b
AF
1858 .access = PL0_RW,
1859 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
fcd25206
PM
1860 .accessfn = pmreg_access,
1861 .writefn = pmcntenclr_write,
7a0e58fa 1862 .type = ARM_CP_ALIAS },
8521466b
AF
1863 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1864 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1865 .access = PL0_RW, .accessfn = pmreg_access,
7a0e58fa 1866 .type = ARM_CP_ALIAS,
8521466b
AF
1867 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1868 .writefn = pmcntenclr_write },
200ac0ef 1869 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
f4efb4b2 1870 .access = PL0_RW, .type = ARM_CP_IO,
e4e91a21 1871 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
fcd25206
PM
1872 .accessfn = pmreg_access,
1873 .writefn = pmovsr_write,
1874 .raw_writefn = raw_write },
978364f1
AF
1875 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1876 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1877 .access = PL0_RW, .accessfn = pmreg_access,
f4efb4b2 1878 .type = ARM_CP_ALIAS | ARM_CP_IO,
978364f1
AF
1879 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1880 .writefn = pmovsr_write,
1881 .raw_writefn = raw_write },
200ac0ef 1882 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
f4efb4b2
AL
1883 .access = PL0_W, .accessfn = pmreg_access_swinc,
1884 .type = ARM_CP_NO_RAW | ARM_CP_IO,
0d4bfd7d
AL
1885 .writefn = pmswinc_write },
1886 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
1887 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
f4efb4b2
AL
1888 .access = PL0_W, .accessfn = pmreg_access_swinc,
1889 .type = ARM_CP_NO_RAW | ARM_CP_IO,
0d4bfd7d 1890 .writefn = pmswinc_write },
6b040780
WH
1891 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1892 .access = PL0_RW, .type = ARM_CP_ALIAS,
1893 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
6ecd0b6b 1894 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
6b040780
WH
1895 .raw_writefn = raw_write},
1896 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1897 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
6ecd0b6b 1898 .access = PL0_RW, .accessfn = pmreg_access_selr,
6b040780
WH
1899 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1900 .writefn = pmselr_write, .raw_writefn = raw_write, },
200ac0ef 1901 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
169c8938 1902 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
421c7ebd 1903 .readfn = pmccntr_read, .writefn = pmccntr_write32,
6ecd0b6b 1904 .accessfn = pmreg_access_ccntr },
8521466b
AF
1905 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1906 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
6ecd0b6b 1907 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
8521466b 1908 .type = ARM_CP_IO,
980ebe87
AL
1909 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
1910 .readfn = pmccntr_read, .writefn = pmccntr_write,
1911 .raw_readfn = raw_read, .raw_writefn = raw_write, },
4b8afa1f
AL
1912 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
1913 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
1914 .access = PL0_RW, .accessfn = pmreg_access,
1915 .type = ARM_CP_ALIAS | ARM_CP_IO,
1916 .resetvalue = 0, },
8521466b
AF
1917 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1918 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
980ebe87 1919 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
8521466b
AF
1920 .access = PL0_RW, .accessfn = pmreg_access,
1921 .type = ARM_CP_IO,
1922 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1923 .resetvalue = 0, },
200ac0ef 1924 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
5ecdd3e4
AL
1925 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1926 .accessfn = pmreg_access,
fdb86656
WH
1927 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1928 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1929 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
5ecdd3e4
AL
1930 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1931 .accessfn = pmreg_access,
fdb86656 1932 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
200ac0ef 1933 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
5ecdd3e4
AL
1934 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1935 .accessfn = pmreg_access_xevcntr,
1936 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
1937 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
1938 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
1939 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1940 .accessfn = pmreg_access_xevcntr,
1941 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
200ac0ef 1942 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1fce1ba9 1943 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
e4e91a21 1944 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
200ac0ef 1945 .resetvalue = 0,
d4e6df63 1946 .writefn = pmuserenr_write, .raw_writefn = raw_write },
8a83ffc2
AF
1947 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1948 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1fce1ba9 1949 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
8a83ffc2
AF
1950 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1951 .resetvalue = 0,
1952 .writefn = pmuserenr_write, .raw_writefn = raw_write },
200ac0ef 1953 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1fce1ba9 1954 .access = PL1_RW, .accessfn = access_tpm,
b7d793ad 1955 .type = ARM_CP_ALIAS | ARM_CP_IO,
e6ec5457 1956 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
200ac0ef 1957 .resetvalue = 0,
d4e6df63 1958 .writefn = pmintenset_write, .raw_writefn = raw_write },
e6ec5457
WH
1959 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1960 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1961 .access = PL1_RW, .accessfn = access_tpm,
1962 .type = ARM_CP_IO,
1963 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1964 .writefn = pmintenset_write, .raw_writefn = raw_write,
1965 .resetvalue = 0x0 },
200ac0ef 1966 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
fc5f6856 1967 .access = PL1_RW, .accessfn = access_tpm,
887c0f15 1968 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
200ac0ef 1969 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
b061a82b 1970 .writefn = pmintenclr_write, },
978364f1
AF
1971 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1972 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
fc5f6856 1973 .access = PL1_RW, .accessfn = access_tpm,
887c0f15 1974 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
978364f1
AF
1975 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1976 .writefn = pmintenclr_write },
7da845b0
PM
1977 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1978 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
630fcd4d
MZ
1979 .access = PL1_R,
1980 .accessfn = access_aa64_tid2,
1981 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
7da845b0
PM
1982 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1983 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
630fcd4d
MZ
1984 .access = PL1_RW,
1985 .accessfn = access_aa64_tid2,
1986 .writefn = csselr_write, .resetvalue = 0,
b85a1fd6
FA
1987 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1988 offsetof(CPUARMState, cp15.csselr_ns) } },
776d4e5c
PM
1989 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1990 * just RAZ for all cores:
1991 */
0ff644a7
PM
1992 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1993 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
93fbc983
MZ
1994 .access = PL1_R, .type = ARM_CP_CONST,
1995 .accessfn = access_aa64_tid1,
1996 .resetvalue = 0 },
f32cdad5
PM
1997 /* Auxiliary fault status registers: these also are IMPDEF, and we
1998 * choose to RAZ/WI for all cores.
1999 */
2000 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2001 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
84929218
RH
2002 .access = PL1_RW, .accessfn = access_tvm_trvm,
2003 .type = ARM_CP_CONST, .resetvalue = 0 },
f32cdad5
PM
2004 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2005 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
84929218
RH
2006 .access = PL1_RW, .accessfn = access_tvm_trvm,
2007 .type = ARM_CP_CONST, .resetvalue = 0 },
b0fe2427
PM
2008 /* MAIR can just read-as-written because we don't implement caches
2009 * and so don't need to care about memory attributes.
2010 */
2011 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2012 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
84929218
RH
2013 .access = PL1_RW, .accessfn = access_tvm_trvm,
2014 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
b0fe2427 2015 .resetvalue = 0 },
4cfb8ad8
PM
2016 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2017 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2018 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2019 .resetvalue = 0 },
b0fe2427
PM
2020 /* For non-long-descriptor page tables these are PRRR and NMRR;
2021 * regardless they still act as reads-as-written for QEMU.
b0fe2427 2022 */
1281f8e3 2023 /* MAIR0/1 are defined separately from their 64-bit counterpart which
be693c87
GB
2024 * allows them to assign the correct fieldoffset based on the endianness
2025 * handled in the field definitions.
2026 */
a903c449 2027 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
84929218
RH
2028 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2029 .access = PL1_RW, .accessfn = access_tvm_trvm,
be693c87
GB
2030 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2031 offsetof(CPUARMState, cp15.mair0_ns) },
b0fe2427 2032 .resetfn = arm_cp_reset_ignore },
a903c449 2033 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
84929218
RH
2034 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2035 .access = PL1_RW, .accessfn = access_tvm_trvm,
be693c87
GB
2036 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2037 offsetof(CPUARMState, cp15.mair1_ns) },
b0fe2427 2038 .resetfn = arm_cp_reset_ignore },
1090b9c6
PM
2039 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2040 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
7a0e58fa 2041 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
995939a6
PM
2042 /* 32 bit ITLB invalidates */
2043 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
30881b73
RH
2044 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2045 .writefn = tlbiall_write },
995939a6 2046 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
30881b73
RH
2047 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2048 .writefn = tlbimva_write },
995939a6 2049 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
30881b73
RH
2050 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2051 .writefn = tlbiasid_write },
995939a6
PM
2052 /* 32 bit DTLB invalidates */
2053 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
30881b73
RH
2054 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2055 .writefn = tlbiall_write },
995939a6 2056 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
30881b73
RH
2057 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2058 .writefn = tlbimva_write },
995939a6 2059 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
30881b73
RH
2060 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2061 .writefn = tlbiasid_write },
995939a6
PM
2062 /* 32 bit TLB invalidates */
2063 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
30881b73
RH
2064 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2065 .writefn = tlbiall_write },
995939a6 2066 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
30881b73
RH
2067 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2068 .writefn = tlbimva_write },
995939a6 2069 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
30881b73
RH
2070 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2071 .writefn = tlbiasid_write },
995939a6 2072 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
30881b73
RH
2073 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2074 .writefn = tlbimvaa_write },
995939a6
PM
2075};
2076
2077static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2078 /* 32 bit TLB invalidates, Inner Shareable */
2079 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
30881b73
RH
2080 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2081 .writefn = tlbiall_is_write },
995939a6 2082 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
30881b73
RH
2083 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2084 .writefn = tlbimva_is_write },
995939a6 2085 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
30881b73 2086 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
fa439fc5 2087 .writefn = tlbiasid_is_write },
995939a6 2088 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
30881b73 2089 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
fa439fc5 2090 .writefn = tlbimvaa_is_write },
e9aa6c21
PM
2091};
2092
327dd510
AL
2093static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2094 /* PMOVSSET is not implemented in v7 before v7ve */
2095 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2096 .access = PL0_RW, .accessfn = pmreg_access,
f4efb4b2 2097 .type = ARM_CP_ALIAS | ARM_CP_IO,
327dd510
AL
2098 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2099 .writefn = pmovsset_write,
2100 .raw_writefn = raw_write },
2101 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2102 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2103 .access = PL0_RW, .accessfn = pmreg_access,
f4efb4b2 2104 .type = ARM_CP_ALIAS | ARM_CP_IO,
327dd510
AL
2105 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2106 .writefn = pmovsset_write,
2107 .raw_writefn = raw_write },
327dd510
AL
2108};
2109
c4241c7d
PM
2110static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2111 uint64_t value)
c326b979
PM
2112{
2113 value &= 1;
2114 env->teecr = value;
c326b979
PM
2115}
2116
cc7613bf
PM
2117static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2118 bool isread)
2119{
2120 /*
2121 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2122 * at all, so we don't need to check whether we're v8A.
2123 */
2124 if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
2125 (env->cp15.hstr_el2 & HSTR_TTEE)) {
2126 return CP_ACCESS_TRAP_EL2;
2127 }
2128 return CP_ACCESS_OK;
2129}
2130
3f208fd7
PM
2131static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2132 bool isread)
c326b979 2133{
dcbff19b 2134 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
92611c00 2135 return CP_ACCESS_TRAP;
c326b979 2136 }
cc7613bf 2137 return teecr_access(env, ri, isread);
c326b979
PM
2138}
2139
2140static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2141 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2142 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2143 .resetvalue = 0,
cc7613bf 2144 .writefn = teecr_write, .accessfn = teecr_access },
c326b979
PM
2145 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2146 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
92611c00 2147 .accessfn = teehbr_access, .resetvalue = 0 },
c326b979
PM
2148};
2149
4d31c596 2150static const ARMCPRegInfo v6k_cp_reginfo[] = {
e4fe830b
PM
2151 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2152 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2153 .access = PL0_RW,
54bf36ed 2154 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
4d31c596
PM
2155 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2156 .access = PL0_RW,
54bf36ed
FA
2157 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2158 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
e4fe830b
PM
2159 .resetfn = arm_cp_reset_ignore },
2160 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2161 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2162 .access = PL0_R|PL1_W,
54bf36ed
FA
2163 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2164 .resetvalue = 0},
4d31c596
PM
2165 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2166 .access = PL0_R|PL1_W,
54bf36ed
FA
2167 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2168 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
e4fe830b 2169 .resetfn = arm_cp_reset_ignore },
54bf36ed 2170 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
e4fe830b 2171 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
4d31c596 2172 .access = PL1_RW,
54bf36ed
FA
2173 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2174 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2175 .access = PL1_RW,
2176 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2177 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2178 .resetvalue = 0 },
4d31c596
PM
2179};
2180
55d284af
PM
2181#ifndef CONFIG_USER_ONLY
2182
3f208fd7
PM
2183static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2184 bool isread)
00108f2d 2185{
75502672
PM
2186 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2187 * Writable only at the highest implemented exception level.
2188 */
2189 int el = arm_current_el(env);
5bc84371
RH
2190 uint64_t hcr;
2191 uint32_t cntkctl;
75502672
PM
2192
2193 switch (el) {
2194 case 0:
5bc84371
RH
2195 hcr = arm_hcr_el2_eff(env);
2196 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2197 cntkctl = env->cp15.cnthctl_el2;
2198 } else {
2199 cntkctl = env->cp15.c14_cntkctl;
2200 }
2201 if (!extract32(cntkctl, 0, 2)) {
75502672
PM
2202 return CP_ACCESS_TRAP;
2203 }
2204 break;
2205 case 1:
2206 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2207 arm_is_secure_below_el3(env)) {
2208 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2209 return CP_ACCESS_TRAP_UNCATEGORIZED;
2210 }
2211 break;
2212 case 2:
2213 case 3:
2214 break;
00108f2d 2215 }
75502672
PM
2216
2217 if (!isread && el < arm_highest_el(env)) {
2218 return CP_ACCESS_TRAP_UNCATEGORIZED;
2219 }
2220
00108f2d
PM
2221 return CP_ACCESS_OK;
2222}
2223
3f208fd7
PM
2224static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2225 bool isread)
00108f2d 2226{
0b6440af 2227 unsigned int cur_el = arm_current_el(env);
e6ef0169 2228 bool has_el2 = arm_is_el2_enabled(env);
5bc84371 2229 uint64_t hcr = arm_hcr_el2_eff(env);
0b6440af 2230
5bc84371
RH
2231 switch (cur_el) {
2232 case 0:
2233 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2234 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2235 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2236 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2237 }
0b6440af 2238
5bc84371
RH
2239 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2240 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2241 return CP_ACCESS_TRAP;
2242 }
2243
2244 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2245 if (hcr & HCR_E2H) {
2246 if (timeridx == GTIMER_PHYS &&
2247 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2248 return CP_ACCESS_TRAP_EL2;
2249 }
2250 } else {
2251 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
e6ef0169 2252 if (has_el2 && timeridx == GTIMER_PHYS &&
5bc84371
RH
2253 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2254 return CP_ACCESS_TRAP_EL2;
2255 }
2256 }
2257 break;
2258
2259 case 1:
2260 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
e6ef0169 2261 if (has_el2 && timeridx == GTIMER_PHYS &&
5bc84371
RH
2262 (hcr & HCR_E2H
2263 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2264 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2265 return CP_ACCESS_TRAP_EL2;
2266 }
2267 break;
0b6440af 2268 }
00108f2d
PM
2269 return CP_ACCESS_OK;
2270}
2271
3f208fd7
PM
2272static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2273 bool isread)
00108f2d 2274{
0b6440af 2275 unsigned int cur_el = arm_current_el(env);
e6ef0169 2276 bool has_el2 = arm_is_el2_enabled(env);
5bc84371 2277 uint64_t hcr = arm_hcr_el2_eff(env);
0b6440af 2278
5bc84371
RH
2279 switch (cur_el) {
2280 case 0:
2281 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2282 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2283 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2284 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2285 }
0b6440af 2286
5bc84371
RH
2287 /*
2288 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2289 * EL0 if EL0[PV]TEN is zero.
2290 */
2291 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2292 return CP_ACCESS_TRAP;
2293 }
2294 /* fall through */
2295
2296 case 1:
e6ef0169 2297 if (has_el2 && timeridx == GTIMER_PHYS) {
5bc84371
RH
2298 if (hcr & HCR_E2H) {
2299 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2300 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2301 return CP_ACCESS_TRAP_EL2;
2302 }
2303 } else {
2304 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2305 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2306 return CP_ACCESS_TRAP_EL2;
2307 }
2308 }
2309 }
2310 break;
0b6440af 2311 }
00108f2d
PM
2312 return CP_ACCESS_OK;
2313}
2314
2315static CPAccessResult gt_pct_access(CPUARMState *env,
3f208fd7
PM
2316 const ARMCPRegInfo *ri,
2317 bool isread)
00108f2d 2318{
3f208fd7 2319 return gt_counter_access(env, GTIMER_PHYS, isread);
00108f2d
PM
2320}
2321
2322static CPAccessResult gt_vct_access(CPUARMState *env,
3f208fd7
PM
2323 const ARMCPRegInfo *ri,
2324 bool isread)
00108f2d 2325{
3f208fd7 2326 return gt_counter_access(env, GTIMER_VIRT, isread);
00108f2d
PM
2327}
2328
3f208fd7
PM
2329static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2330 bool isread)
00108f2d 2331{
3f208fd7 2332 return gt_timer_access(env, GTIMER_PHYS, isread);
00108f2d
PM
2333}
2334
3f208fd7
PM
2335static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2336 bool isread)
00108f2d 2337{
3f208fd7 2338 return gt_timer_access(env, GTIMER_VIRT, isread);
00108f2d
PM
2339}
2340
b4d3978c 2341static CPAccessResult gt_stimer_access(CPUARMState *env,
3f208fd7
PM
2342 const ARMCPRegInfo *ri,
2343 bool isread)
b4d3978c
PM
2344{
2345 /* The AArch64 register view of the secure physical timer is
2346 * always accessible from EL3, and configurably accessible from
2347 * Secure EL1.
2348 */
2349 switch (arm_current_el(env)) {
2350 case 1:
2351 if (!arm_is_secure(env)) {
2352 return CP_ACCESS_TRAP;
2353 }
2354 if (!(env->cp15.scr_el3 & SCR_ST)) {
2355 return CP_ACCESS_TRAP_EL3;
2356 }
2357 return CP_ACCESS_OK;
2358 case 0:
2359 case 2:
2360 return CP_ACCESS_TRAP;
2361 case 3:
2362 return CP_ACCESS_OK;
2363 default:
2364 g_assert_not_reached();
2365 }
2366}
2367
55d284af
PM
2368static uint64_t gt_get_countervalue(CPUARMState *env)
2369{
7def8754
AJ
2370 ARMCPU *cpu = env_archcpu(env);
2371
2372 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
55d284af
PM
2373}
2374
2375static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2376{
2377 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2378
2379 if (gt->ctl & 1) {
2380 /* Timer enabled: calculate and set current ISTATUS, irq, and
2381 * reset timer to when ISTATUS next has to change
2382 */
edac4d8a
EI
2383 uint64_t offset = timeridx == GTIMER_VIRT ?
2384 cpu->env.cp15.cntvoff_el2 : 0;
55d284af
PM
2385 uint64_t count = gt_get_countervalue(&cpu->env);
2386 /* Note that this must be unsigned 64 bit arithmetic: */
edac4d8a 2387 int istatus = count - offset >= gt->cval;
55d284af 2388 uint64_t nexttick;
194cbc49 2389 int irqstate;
55d284af
PM
2390
2391 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
194cbc49
PM
2392
2393 irqstate = (istatus && !(gt->ctl & 2));
2394 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2395
55d284af
PM
2396 if (istatus) {
2397 /* Next transition is when count rolls back over to zero */
2398 nexttick = UINT64_MAX;
2399 } else {
2400 /* Next transition is when we hit cval */
edac4d8a 2401 nexttick = gt->cval + offset;
55d284af
PM
2402 }
2403 /* Note that the desired next expiry time might be beyond the
2404 * signed-64-bit range of a QEMUTimer -- in this case we just
2405 * set the timer for as far in the future as possible. When the
2406 * timer expires we will reset the timer for any remaining period.
2407 */
7def8754 2408 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
4a0245b6
AJ
2409 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2410 } else {
2411 timer_mod(cpu->gt_timer[timeridx], nexttick);
55d284af 2412 }
194cbc49 2413 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
55d284af
PM
2414 } else {
2415 /* Timer disabled: ISTATUS and timer output always clear */
2416 gt->ctl &= ~4;
2417 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
bc72ad67 2418 timer_del(cpu->gt_timer[timeridx]);
194cbc49 2419 trace_arm_gt_recalc_disabled(timeridx);
55d284af
PM
2420 }
2421}
2422
0e3eca4c
EI
2423static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2424 int timeridx)
55d284af 2425{
2fc0cc0e 2426 ARMCPU *cpu = env_archcpu(env);
55d284af 2427
bc72ad67 2428 timer_del(cpu->gt_timer[timeridx]);
55d284af
PM
2429}
2430
c4241c7d 2431static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
55d284af 2432{
c4241c7d 2433 return gt_get_countervalue(env);
55d284af
PM
2434}
2435
53d1f856
RH
2436static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2437{
2438 uint64_t hcr;
2439
2440 switch (arm_current_el(env)) {
2441 case 2:
2442 hcr = arm_hcr_el2_eff(env);
2443 if (hcr & HCR_E2H) {
2444 return 0;
2445 }
2446 break;
2447 case 0:
2448 hcr = arm_hcr_el2_eff(env);
2449 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2450 return 0;
2451 }
2452 break;
2453 }
2454
2455 return env->cp15.cntvoff_el2;
2456}
2457
edac4d8a
EI
2458static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2459{
53d1f856 2460 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
edac4d8a
EI
2461}
2462
c4241c7d 2463static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
0e3eca4c 2464 int timeridx,
c4241c7d 2465 uint64_t value)
55d284af 2466{
194cbc49 2467 trace_arm_gt_cval_write(timeridx, value);
55d284af 2468 env->cp15.c14_timer[timeridx].cval = value;
2fc0cc0e 2469 gt_recalc_timer(env_archcpu(env), timeridx);
55d284af 2470}
c4241c7d 2471
0e3eca4c
EI
2472static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2473 int timeridx)
55d284af 2474{
53d1f856
RH
2475 uint64_t offset = 0;
2476
2477 switch (timeridx) {
2478 case GTIMER_VIRT:
8c94b071 2479 case GTIMER_HYPVIRT:
53d1f856
RH
2480 offset = gt_virt_cnt_offset(env);
2481 break;
2482 }
55d284af 2483
c4241c7d 2484 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
edac4d8a 2485 (gt_get_countervalue(env) - offset));
55d284af
PM
2486}
2487
c4241c7d 2488static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
0e3eca4c 2489 int timeridx,
c4241c7d 2490 uint64_t value)
55d284af 2491{
53d1f856
RH
2492 uint64_t offset = 0;
2493
2494 switch (timeridx) {
2495 case GTIMER_VIRT:
8c94b071 2496 case GTIMER_HYPVIRT:
53d1f856
RH
2497 offset = gt_virt_cnt_offset(env);
2498 break;
2499 }
55d284af 2500
194cbc49 2501 trace_arm_gt_tval_write(timeridx, value);
edac4d8a 2502 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
18084b2f 2503 sextract64(value, 0, 32);
2fc0cc0e 2504 gt_recalc_timer(env_archcpu(env), timeridx);
55d284af
PM
2505}
2506
c4241c7d 2507static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
0e3eca4c 2508 int timeridx,
c4241c7d 2509 uint64_t value)
55d284af 2510{
2fc0cc0e 2511 ARMCPU *cpu = env_archcpu(env);
55d284af
PM
2512 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2513
194cbc49 2514 trace_arm_gt_ctl_write(timeridx, value);
d3afacc7 2515 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
55d284af
PM
2516 if ((oldval ^ value) & 1) {
2517 /* Enable toggled */
2518 gt_recalc_timer(cpu, timeridx);
d3afacc7 2519 } else if ((oldval ^ value) & 2) {
55d284af
PM
2520 /* IMASK toggled: don't need to recalculate,
2521 * just set the interrupt line based on ISTATUS
2522 */
194cbc49
PM
2523 int irqstate = (oldval & 4) && !(value & 2);
2524
2525 trace_arm_gt_imask_toggle(timeridx, irqstate);
2526 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
55d284af 2527 }
55d284af
PM
2528}
2529
0e3eca4c
EI
2530static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2531{
2532 gt_timer_reset(env, ri, GTIMER_PHYS);
2533}
2534
2535static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2536 uint64_t value)
2537{
2538 gt_cval_write(env, ri, GTIMER_PHYS, value);
2539}
2540
2541static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2542{
2543 return gt_tval_read(env, ri, GTIMER_PHYS);
2544}
2545
2546static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2547 uint64_t value)
2548{
2549 gt_tval_write(env, ri, GTIMER_PHYS, value);
2550}
2551
2552static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2553 uint64_t value)
2554{
2555 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2556}
2557
bb5972e4
RH
2558static int gt_phys_redir_timeridx(CPUARMState *env)
2559{
2560 switch (arm_mmu_idx(env)) {
2561 case ARMMMUIdx_E20_0:
2562 case ARMMMUIdx_E20_2:
452ef8cb 2563 case ARMMMUIdx_E20_2_PAN:
b6ad6062
RDC
2564 case ARMMMUIdx_SE20_0:
2565 case ARMMMUIdx_SE20_2:
2566 case ARMMMUIdx_SE20_2_PAN:
bb5972e4
RH
2567 return GTIMER_HYP;
2568 default:
2569 return GTIMER_PHYS;
2570 }
2571}
2572
2573static int gt_virt_redir_timeridx(CPUARMState *env)
2574{
2575 switch (arm_mmu_idx(env)) {
2576 case ARMMMUIdx_E20_0:
2577 case ARMMMUIdx_E20_2:
452ef8cb 2578 case ARMMMUIdx_E20_2_PAN:
b6ad6062
RDC
2579 case ARMMMUIdx_SE20_0:
2580 case ARMMMUIdx_SE20_2:
2581 case ARMMMUIdx_SE20_2_PAN:
bb5972e4
RH
2582 return GTIMER_HYPVIRT;
2583 default:
2584 return GTIMER_VIRT;
2585 }
2586}
2587
2588static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2589 const ARMCPRegInfo *ri)
2590{
2591 int timeridx = gt_phys_redir_timeridx(env);
2592 return env->cp15.c14_timer[timeridx].cval;
2593}
2594
2595static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2596 uint64_t value)
2597{
2598 int timeridx = gt_phys_redir_timeridx(env);
2599 gt_cval_write(env, ri, timeridx, value);
2600}
2601
2602static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2603 const ARMCPRegInfo *ri)
2604{
2605 int timeridx = gt_phys_redir_timeridx(env);
2606 return gt_tval_read(env, ri, timeridx);
2607}
2608
2609static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2610 uint64_t value)
2611{
2612 int timeridx = gt_phys_redir_timeridx(env);
2613 gt_tval_write(env, ri, timeridx, value);
2614}
2615
2616static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2617 const ARMCPRegInfo *ri)
2618{
2619 int timeridx = gt_phys_redir_timeridx(env);
2620 return env->cp15.c14_timer[timeridx].ctl;
2621}
2622
2623static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2624 uint64_t value)
2625{
2626 int timeridx = gt_phys_redir_timeridx(env);
2627 gt_ctl_write(env, ri, timeridx, value);
2628}
2629
0e3eca4c
EI
2630static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2631{
2632 gt_timer_reset(env, ri, GTIMER_VIRT);
2633}
2634
2635static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2636 uint64_t value)
2637{
2638 gt_cval_write(env, ri, GTIMER_VIRT, value);
2639}
2640
2641static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2642{
2643 return gt_tval_read(env, ri, GTIMER_VIRT);
2644}
2645
2646static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2647 uint64_t value)
2648{
2649 gt_tval_write(env, ri, GTIMER_VIRT, value);
2650}
2651
2652static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2653 uint64_t value)
2654{
2655 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2656}
2657
edac4d8a
EI
2658static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2659 uint64_t value)
2660{
2fc0cc0e 2661 ARMCPU *cpu = env_archcpu(env);
edac4d8a 2662
194cbc49 2663 trace_arm_gt_cntvoff_write(value);
edac4d8a
EI
2664 raw_write(env, ri, value);
2665 gt_recalc_timer(cpu, GTIMER_VIRT);
2666}
2667
bb5972e4
RH
2668static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2669 const ARMCPRegInfo *ri)
2670{
2671 int timeridx = gt_virt_redir_timeridx(env);
2672 return env->cp15.c14_timer[timeridx].cval;
2673}
2674
2675static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2676 uint64_t value)
2677{
2678 int timeridx = gt_virt_redir_timeridx(env);
2679 gt_cval_write(env, ri, timeridx, value);
2680}
2681
2682static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2683 const ARMCPRegInfo *ri)
2684{
2685 int timeridx = gt_virt_redir_timeridx(env);
2686 return gt_tval_read(env, ri, timeridx);
2687}
2688
2689static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2690 uint64_t value)
2691{
2692 int timeridx = gt_virt_redir_timeridx(env);
2693 gt_tval_write(env, ri, timeridx, value);
2694}
2695
2696static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2697 const ARMCPRegInfo *ri)
2698{
2699 int timeridx = gt_virt_redir_timeridx(env);
2700 return env->cp15.c14_timer[timeridx].ctl;
2701}
2702
2703static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2704 uint64_t value)
2705{
2706 int timeridx = gt_virt_redir_timeridx(env);
2707 gt_ctl_write(env, ri, timeridx, value);
2708}
2709
b0e66d95
EI
2710static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2711{
2712 gt_timer_reset(env, ri, GTIMER_HYP);
2713}
2714
2715static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2716 uint64_t value)
2717{
2718 gt_cval_write(env, ri, GTIMER_HYP, value);
2719}
2720
2721static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2722{
2723 return gt_tval_read(env, ri, GTIMER_HYP);
2724}
2725
2726static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2727 uint64_t value)
2728{
2729 gt_tval_write(env, ri, GTIMER_HYP, value);
2730}
2731
2732static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2733 uint64_t value)
2734{
2735 gt_ctl_write(env, ri, GTIMER_HYP, value);
2736}
2737
b4d3978c
PM
2738static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2739{
2740 gt_timer_reset(env, ri, GTIMER_SEC);
2741}
2742
2743static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2744 uint64_t value)
2745{
2746 gt_cval_write(env, ri, GTIMER_SEC, value);
2747}
2748
2749static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2750{
2751 return gt_tval_read(env, ri, GTIMER_SEC);
2752}
2753
2754static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2755 uint64_t value)
2756{
2757 gt_tval_write(env, ri, GTIMER_SEC, value);
2758}
2759
2760static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2761 uint64_t value)
2762{
2763 gt_ctl_write(env, ri, GTIMER_SEC, value);
2764}
2765
8c94b071
RH
2766static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2767{
2768 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
2769}
2770
2771static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2772 uint64_t value)
2773{
2774 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
2775}
2776
2777static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2778{
2779 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
2780}
2781
2782static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2783 uint64_t value)
2784{
2785 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
2786}
2787
2788static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2789 uint64_t value)
2790{
2791 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
2792}
2793
55d284af
PM
2794void arm_gt_ptimer_cb(void *opaque)
2795{
2796 ARMCPU *cpu = opaque;
2797
2798 gt_recalc_timer(cpu, GTIMER_PHYS);
2799}
2800
2801void arm_gt_vtimer_cb(void *opaque)
2802{
2803 ARMCPU *cpu = opaque;
2804
2805 gt_recalc_timer(cpu, GTIMER_VIRT);
2806}
2807
b0e66d95
EI
2808void arm_gt_htimer_cb(void *opaque)
2809{
2810 ARMCPU *cpu = opaque;
2811
2812 gt_recalc_timer(cpu, GTIMER_HYP);
2813}
2814
b4d3978c
PM
2815void arm_gt_stimer_cb(void *opaque)
2816{
2817 ARMCPU *cpu = opaque;
2818
2819 gt_recalc_timer(cpu, GTIMER_SEC);
2820}
2821
8c94b071
RH
2822void arm_gt_hvtimer_cb(void *opaque)
2823{
2824 ARMCPU *cpu = opaque;
2825
2826 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
2827}
2828
96eec6b2
AJ
2829static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
2830{
2831 ARMCPU *cpu = env_archcpu(env);
2832
2833 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
2834}
2835
55d284af
PM
2836static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2837 /* Note that CNTFRQ is purely reads-as-written for the benefit
2838 * of software; writing it doesn't actually change the timer frequency.
2839 * Our reset value matches the fixed frequency we implement the timer at.
2840 */
2841 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
7a0e58fa 2842 .type = ARM_CP_ALIAS,
a7adc4b7
PM
2843 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2844 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
a7adc4b7
PM
2845 },
2846 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2847 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2848 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
55d284af 2849 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
96eec6b2 2850 .resetfn = arm_gt_cntfrq_reset,
55d284af
PM
2851 },
2852 /* overall control: mostly access permissions */
a7adc4b7
PM
2853 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2854 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
55d284af
PM
2855 .access = PL1_RW,
2856 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2857 .resetvalue = 0,
2858 },
2859 /* per-timer control */
2860 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
9ff9dd3c 2861 .secure = ARM_CP_SECSTATE_NS,
daf1dc5f 2862 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
a7adc4b7
PM
2863 .accessfn = gt_ptimer_access,
2864 .fieldoffset = offsetoflow32(CPUARMState,
2865 cp15.c14_timer[GTIMER_PHYS].ctl),
bb5972e4
RH
2866 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2867 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
a7adc4b7 2868 },
9c513e78 2869 { .name = "CNTP_CTL_S",
9ff9dd3c
PM
2870 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2871 .secure = ARM_CP_SECSTATE_S,
daf1dc5f 2872 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
9ff9dd3c
PM
2873 .accessfn = gt_ptimer_access,
2874 .fieldoffset = offsetoflow32(CPUARMState,
2875 cp15.c14_timer[GTIMER_SEC].ctl),
2876 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2877 },
a7adc4b7
PM
2878 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2879 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
daf1dc5f 2880 .type = ARM_CP_IO, .access = PL0_RW,
a7adc4b7 2881 .accessfn = gt_ptimer_access,
55d284af
PM
2882 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2883 .resetvalue = 0,
bb5972e4
RH
2884 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
2885 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
55d284af
PM
2886 },
2887 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
daf1dc5f 2888 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
a7adc4b7
PM
2889 .accessfn = gt_vtimer_access,
2890 .fieldoffset = offsetoflow32(CPUARMState,
2891 cp15.c14_timer[GTIMER_VIRT].ctl),
bb5972e4
RH
2892 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2893 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
a7adc4b7
PM
2894 },
2895 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2896 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
daf1dc5f 2897 .type = ARM_CP_IO, .access = PL0_RW,
a7adc4b7 2898 .accessfn = gt_vtimer_access,
55d284af
PM
2899 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2900 .resetvalue = 0,
bb5972e4
RH
2901 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
2902 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
55d284af
PM
2903 },
2904 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2905 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
9ff9dd3c 2906 .secure = ARM_CP_SECSTATE_NS,
daf1dc5f 2907 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
00108f2d 2908 .accessfn = gt_ptimer_access,
bb5972e4 2909 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
55d284af 2910 },
9c513e78 2911 { .name = "CNTP_TVAL_S",
9ff9dd3c
PM
2912 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2913 .secure = ARM_CP_SECSTATE_S,
daf1dc5f 2914 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
9ff9dd3c
PM
2915 .accessfn = gt_ptimer_access,
2916 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2917 },
a7adc4b7
PM
2918 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2919 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
daf1dc5f 2920 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
0e3eca4c 2921 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
bb5972e4 2922 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
a7adc4b7 2923 },
55d284af 2924 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
daf1dc5f 2925 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
00108f2d 2926 .accessfn = gt_vtimer_access,
bb5972e4 2927 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
55d284af 2928 },
a7adc4b7
PM
2929 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2930 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
daf1dc5f 2931 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
0e3eca4c 2932 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
bb5972e4 2933 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
a7adc4b7 2934 },
55d284af
PM
2935 /* The counter itself */
2936 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
7a0e58fa 2937 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
00108f2d 2938 .accessfn = gt_pct_access,
a7adc4b7
PM
2939 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2940 },
2941 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2942 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
7a0e58fa 2943 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
d57b9ee8 2944 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
55d284af
PM
2945 },
2946 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
7a0e58fa 2947 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
00108f2d 2948 .accessfn = gt_vct_access,
edac4d8a 2949 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
a7adc4b7
PM
2950 },
2951 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2952 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
7a0e58fa 2953 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
d57b9ee8 2954 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
55d284af
PM
2955 },
2956 /* Comparison value, indicating when the timer goes off */
2957 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
9ff9dd3c 2958 .secure = ARM_CP_SECSTATE_NS,
daf1dc5f 2959 .access = PL0_RW,
7a0e58fa 2960 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
55d284af 2961 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
b061a82b 2962 .accessfn = gt_ptimer_access,
bb5972e4
RH
2963 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2964 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
a7adc4b7 2965 },
9c513e78 2966 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
9ff9dd3c 2967 .secure = ARM_CP_SECSTATE_S,
daf1dc5f 2968 .access = PL0_RW,
9ff9dd3c
PM
2969 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2970 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2971 .accessfn = gt_ptimer_access,
2972 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2973 },
a7adc4b7
PM
2974 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2975 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
daf1dc5f 2976 .access = PL0_RW,
a7adc4b7
PM
2977 .type = ARM_CP_IO,
2978 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
12cde08a 2979 .resetvalue = 0, .accessfn = gt_ptimer_access,
bb5972e4
RH
2980 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
2981 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
55d284af
PM
2982 },
2983 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
daf1dc5f 2984 .access = PL0_RW,
7a0e58fa 2985 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
55d284af 2986 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
b061a82b 2987 .accessfn = gt_vtimer_access,
bb5972e4
RH
2988 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
2989 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
a7adc4b7
PM
2990 },
2991 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2992 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
daf1dc5f 2993 .access = PL0_RW,
a7adc4b7
PM
2994 .type = ARM_CP_IO,
2995 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2996 .resetvalue = 0, .accessfn = gt_vtimer_access,
bb5972e4
RH
2997 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
2998 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
55d284af 2999 },
b4d3978c
PM
3000 /* Secure timer -- this is actually restricted to only EL3
3001 * and configurably Secure-EL1 via the accessfn.
3002 */
3003 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3004 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3005 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3006 .accessfn = gt_stimer_access,
3007 .readfn = gt_sec_tval_read,
3008 .writefn = gt_sec_tval_write,
3009 .resetfn = gt_sec_timer_reset,
3010 },
3011 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3012 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3013 .type = ARM_CP_IO, .access = PL1_RW,
3014 .accessfn = gt_stimer_access,
3015 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3016 .resetvalue = 0,
3017 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3018 },
3019 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3020 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3021 .type = ARM_CP_IO, .access = PL1_RW,
3022 .accessfn = gt_stimer_access,
3023 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3024 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3025 },
55d284af
PM
3026};
3027
bb5972e4
RH
3028static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
3029 bool isread)
3030{
3031 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
3032 return CP_ACCESS_TRAP;
3033 }
3034 return CP_ACCESS_OK;
3035}
3036
55d284af 3037#else
26c4a83b
AB
3038
3039/* In user-mode most of the generic timer registers are inaccessible
3040 * however modern kernels (4.12+) allow access to cntvct_el0
55d284af 3041 */
26c4a83b
AB
3042
3043static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3044{
7def8754
AJ
3045 ARMCPU *cpu = env_archcpu(env);
3046
26c4a83b
AB
3047 /* Currently we have no support for QEMUTimer in linux-user so we
3048 * can't call gt_get_countervalue(env), instead we directly
3049 * call the lower level functions.
3050 */
7def8754 3051 return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
26c4a83b
AB
3052}
3053
6cc7a3ae 3054static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
26c4a83b
AB
3055 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3056 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3057 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3058 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3059 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3060 },
3061 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3062 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3063 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3064 .readfn = gt_virt_cnt_read,
3065 },
6cc7a3ae
PM
3066};
3067
55d284af
PM
3068#endif
3069
c4241c7d 3070static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4a501606 3071{
891a2fe7 3072 if (arm_feature(env, ARM_FEATURE_LPAE)) {
8d5c773e 3073 raw_write(env, ri, value);
891a2fe7 3074 } else if (arm_feature(env, ARM_FEATURE_V7)) {
8d5c773e 3075 raw_write(env, ri, value & 0xfffff6ff);
4a501606 3076 } else {
8d5c773e 3077 raw_write(env, ri, value & 0xfffff1ff);
4a501606 3078 }
4a501606
PM
3079}
3080
3081#ifndef CONFIG_USER_ONLY
3082/* get_phys_addr() isn't present for user-mode-only targets */
702a9357 3083
3f208fd7
PM
3084static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3085 bool isread)
92611c00
PM
3086{
3087 if (ri->opc2 & 4) {
926c1b97 3088 /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
87562e4f
PM
3089 * Secure EL1 (which can only happen if EL3 is AArch64).
3090 * They are simply UNDEF if executed from NS EL1.
3091 * They function normally from EL2 or EL3.
92611c00 3092 */
87562e4f
PM
3093 if (arm_current_el(env) == 1) {
3094 if (arm_is_secure_below_el3(env)) {
926c1b97
RDC
3095 if (env->cp15.scr_el3 & SCR_EEL2) {
3096 return CP_ACCESS_TRAP_UNCATEGORIZED_EL2;
3097 }
87562e4f
PM
3098 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
3099 }
3100 return CP_ACCESS_TRAP_UNCATEGORIZED;
3101 }
92611c00
PM
3102 }
3103 return CP_ACCESS_OK;
3104}
3105
9fb005b0 3106#ifdef CONFIG_TCG
060e8a48 3107static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
03ae85f8 3108 MMUAccessType access_type, ARMMMUIdx mmu_idx)
4a501606 3109{
a8170e5e 3110 hwaddr phys_addr;
4a501606
PM
3111 target_ulong page_size;
3112 int prot;
b7cc4e82 3113 bool ret;
01c097f7 3114 uint64_t par64;
1313e2d7 3115 bool format64 = false;
8bf5b6a9 3116 MemTxAttrs attrs = {};
e14b5a23 3117 ARMMMUFaultInfo fi = {};
5b2d261d 3118 ARMCacheAttrs cacheattrs = {};
4a501606 3119
5b2d261d 3120 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
bc52bfeb 3121 &prot, &page_size, &fi, &cacheattrs);
1313e2d7 3122
9f225e60
PM
3123 /*
3124 * ATS operations only do S1 or S1+S2 translations, so we never
3125 * have to deal with the ARMCacheAttrs format for S2 only.
3126 */
3127 assert(!cacheattrs.is_s2_format);
3128
0710b2fa
PM
3129 if (ret) {
3130 /*
3131 * Some kinds of translation fault must cause exceptions rather
3132 * than being reported in the PAR.
3133 */
3134 int current_el = arm_current_el(env);
3135 int target_el;
3136 uint32_t syn, fsr, fsc;
3137 bool take_exc = false;
3138
b1a10c86 3139 if (fi.s1ptw && current_el == 1
fee7aa46 3140 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
0710b2fa
PM
3141 /*
3142 * Synchronous stage 2 fault on an access made as part of the
3143 * translation table walk for AT S1E0* or AT S1E1* insn
3144 * executed from NS EL1. If this is a synchronous external abort
3145 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3146 * to EL3. Otherwise the fault is taken as an exception to EL2,
3147 * and HPFAR_EL2 holds the faulting IPA.
3148 */
3149 if (fi.type == ARMFault_SyncExternalOnWalk &&
3150 (env->cp15.scr_el3 & SCR_EA)) {
3151 target_el = 3;
3152 } else {
3153 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
9861248f
RDC
3154 if (arm_is_secure_below_el3(env) && fi.s1ns) {
3155 env->cp15.hpfar_el2 |= HPFAR_NS;
3156 }
0710b2fa
PM
3157 target_el = 2;
3158 }
3159 take_exc = true;
3160 } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3161 /*
3162 * Synchronous external aborts during a translation table walk
3163 * are taken as Data Abort exceptions.
3164 */
3165 if (fi.stage2) {
3166 if (current_el == 3) {
3167 target_el = 3;
3168 } else {
3169 target_el = 2;
3170 }
3171 } else {
3172 target_el = exception_target_el(env);
3173 }
3174 take_exc = true;
3175 }
3176
3177 if (take_exc) {
3178 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3179 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3180 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3181 fsr = arm_fi_to_lfsc(&fi);
3182 fsc = extract32(fsr, 0, 6);
3183 } else {
3184 fsr = arm_fi_to_sfsc(&fi);
3185 fsc = 0x3f;
3186 }
3187 /*
3188 * Report exception with ESR indicating a fault due to a
3189 * translation table walk for a cache maintenance instruction.
3190 */
e24fd076 3191 syn = syn_data_abort_no_iss(current_el == target_el, 0,
0710b2fa
PM
3192 fi.ea, 1, fi.s1ptw, 1, fsc);
3193 env->exception.vaddress = value;
3194 env->exception.fsr = fsr;
3195 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3196 }
3197 }
3198
1313e2d7
EI
3199 if (is_a64(env)) {
3200 format64 = true;
3201 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3202 /*
3203 * ATS1Cxx:
3204 * * TTBCR.EAE determines whether the result is returned using the
3205 * 32-bit or the 64-bit PAR format
3206 * * Instructions executed in Hyp mode always use the 64bit format
3207 *
3208 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3209 * * The Non-secure TTBCR.EAE bit is set to 1
3210 * * The implementation includes EL2, and the value of HCR.VM is 1
3211 *
9d1bab33
PM
3212 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3213 *
23463e0e 3214 * ATS1Hx always uses the 64bit format.
1313e2d7
EI
3215 */
3216 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3217
3218 if (arm_feature(env, ARM_FEATURE_EL2)) {
452ef8cb
RH
3219 if (mmu_idx == ARMMMUIdx_E10_0 ||
3220 mmu_idx == ARMMMUIdx_E10_1 ||
3221 mmu_idx == ARMMMUIdx_E10_1_PAN) {
9d1bab33 3222 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
1313e2d7
EI
3223 } else {
3224 format64 |= arm_current_el(env) == 2;
3225 }
3226 }
3227 }
3228
3229 if (format64) {
5efe9ed4 3230 /* Create a 64-bit PAR */
01c097f7 3231 par64 = (1 << 11); /* LPAE bit always set */
b7cc4e82 3232 if (!ret) {
702a9357 3233 par64 |= phys_addr & ~0xfffULL;
8bf5b6a9
PM
3234 if (!attrs.secure) {
3235 par64 |= (1 << 9); /* NS */
3236 }
5b2d261d
AB
3237 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
3238 par64 |= cacheattrs.shareability << 7; /* SH */
4a501606 3239 } else {
5efe9ed4
PM
3240 uint32_t fsr = arm_fi_to_lfsc(&fi);
3241
702a9357 3242 par64 |= 1; /* F */
b7cc4e82 3243 par64 |= (fsr & 0x3f) << 1; /* FS */
0f7b791b
PM
3244 if (fi.stage2) {
3245 par64 |= (1 << 9); /* S */
3246 }
3247 if (fi.s1ptw) {
3248 par64 |= (1 << 8); /* PTW */
3249 }
4a501606
PM
3250 }
3251 } else {
b7cc4e82 3252 /* fsr is a DFSR/IFSR value for the short descriptor
702a9357
PM
3253 * translation table format (with WnR always clear).
3254 * Convert it to a 32-bit PAR.
3255 */
b7cc4e82 3256 if (!ret) {
702a9357
PM
3257 /* We do not set any attribute bits in the PAR */
3258 if (page_size == (1 << 24)
3259 && arm_feature(env, ARM_FEATURE_V7)) {
01c097f7 3260 par64 = (phys_addr & 0xff000000) | (1 << 1);
702a9357 3261 } else {
01c097f7 3262 par64 = phys_addr & 0xfffff000;
702a9357 3263 }
8bf5b6a9
PM
3264 if (!attrs.secure) {
3265 par64 |= (1 << 9); /* NS */
3266 }
702a9357 3267 } else {
5efe9ed4
PM
3268 uint32_t fsr = arm_fi_to_sfsc(&fi);
3269
b7cc4e82
PC
3270 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3271 ((fsr & 0xf) << 1) | 1;
702a9357 3272 }
4a501606 3273 }
060e8a48
PM
3274 return par64;
3275}
9fb005b0 3276#endif /* CONFIG_TCG */
060e8a48
PM
3277
3278static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3279{
9fb005b0 3280#ifdef CONFIG_TCG
03ae85f8 3281 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
060e8a48 3282 uint64_t par64;
d3649702
PM
3283 ARMMMUIdx mmu_idx;
3284 int el = arm_current_el(env);
3285 bool secure = arm_is_secure_below_el3(env);
060e8a48 3286
d3649702
PM
3287 switch (ri->opc2 & 6) {
3288 case 0:
04b07d29 3289 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
d3649702
PM
3290 switch (el) {
3291 case 3:
127b2b08 3292 mmu_idx = ARMMMUIdx_SE3;
d3649702
PM
3293 break;
3294 case 2:
b6ad6062 3295 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
04b07d29 3296 /* fall through */
d3649702 3297 case 1:
04b07d29 3298 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
b1a10c86 3299 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
04b07d29
RH
3300 : ARMMMUIdx_Stage1_E1_PAN);
3301 } else {
b1a10c86 3302 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
04b07d29 3303 }
d3649702
PM
3304 break;
3305 default:
3306 g_assert_not_reached();
3307 }
3308 break;
3309 case 2:
3310 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3311 switch (el) {
3312 case 3:
fba37aed 3313 mmu_idx = ARMMMUIdx_SE10_0;
d3649702
PM
3314 break;
3315 case 2:
b1a10c86 3316 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
2859d7b5 3317 mmu_idx = ARMMMUIdx_Stage1_E0;
d3649702
PM
3318 break;
3319 case 1:
b1a10c86 3320 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
d3649702
PM
3321 break;
3322 default:
3323 g_assert_not_reached();
3324 }
3325 break;
3326 case 4:
3327 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
01b98b68 3328 mmu_idx = ARMMMUIdx_E10_1;
d3649702
PM
3329 break;
3330 case 6:
3331 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
01b98b68 3332 mmu_idx = ARMMMUIdx_E10_0;
d3649702
PM
3333 break;
3334 default:
3335 g_assert_not_reached();
3336 }
3337
3338 par64 = do_ats_write(env, value, access_type, mmu_idx);
01c097f7
FA
3339
3340 A32_BANKED_CURRENT_REG_SET(env, par, par64);
9fb005b0
PMD
3341#else
3342 /* Handled by hardware accelerator. */
3343 g_assert_not_reached();
3344#endif /* CONFIG_TCG */
4a501606 3345}
060e8a48 3346
14db7fe0
PM
3347static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3348 uint64_t value)
3349{
9fb005b0 3350#ifdef CONFIG_TCG
03ae85f8 3351 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
14db7fe0
PM
3352 uint64_t par64;
3353
e013b741 3354 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
14db7fe0
PM
3355
3356 A32_BANKED_CURRENT_REG_SET(env, par, par64);
9fb005b0
PMD
3357#else
3358 /* Handled by hardware accelerator. */
3359 g_assert_not_reached();
3360#endif /* CONFIG_TCG */
14db7fe0
PM
3361}
3362
3f208fd7
PM
3363static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3364 bool isread)
2a47df95 3365{
926c1b97
RDC
3366 if (arm_current_el(env) == 3 &&
3367 !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
2a47df95
PM
3368 return CP_ACCESS_TRAP;
3369 }
3370 return CP_ACCESS_OK;
3371}
3372
060e8a48
PM
3373static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3374 uint64_t value)
3375{
9fb005b0 3376#ifdef CONFIG_TCG
03ae85f8 3377 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
d3649702
PM
3378 ARMMMUIdx mmu_idx;
3379 int secure = arm_is_secure_below_el3(env);
3380
3381 switch (ri->opc2 & 6) {
3382 case 0:
3383 switch (ri->opc1) {
04b07d29
RH
3384 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3385 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
b1a10c86 3386 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
04b07d29
RH
3387 : ARMMMUIdx_Stage1_E1_PAN);
3388 } else {
b1a10c86 3389 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
04b07d29 3390 }
d3649702
PM
3391 break;
3392 case 4: /* AT S1E2R, AT S1E2W */
b6ad6062 3393 mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
d3649702
PM
3394 break;
3395 case 6: /* AT S1E3R, AT S1E3W */
127b2b08 3396 mmu_idx = ARMMMUIdx_SE3;
d3649702
PM
3397 break;
3398 default:
3399 g_assert_not_reached();
3400 }
3401 break;
3402 case 2: /* AT S1E0R, AT S1E0W */
b1a10c86 3403 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
d3649702
PM
3404 break;
3405 case 4: /* AT S12E1R, AT S12E1W */
fba37aed 3406 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
d3649702
PM
3407 break;
3408 case 6: /* AT S12E0R, AT S12E0W */
fba37aed 3409 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
d3649702
PM
3410 break;
3411 default:
3412 g_assert_not_reached();
3413 }
060e8a48 3414
d3649702 3415 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
9fb005b0
PMD
3416#else
3417 /* Handled by hardware accelerator. */
3418 g_assert_not_reached();
3419#endif /* CONFIG_TCG */
060e8a48 3420}
4a501606
PM
3421#endif
3422
3423static const ARMCPRegInfo vapa_cp_reginfo[] = {
3424 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3425 .access = PL1_RW, .resetvalue = 0,
01c097f7
FA
3426 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3427 offsetoflow32(CPUARMState, cp15.par_ns) },
4a501606
PM
3428 .writefn = par_write },
3429#ifndef CONFIG_USER_ONLY
87562e4f 3430 /* This underdecoding is safe because the reginfo is NO_RAW. */
4a501606 3431 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
92611c00 3432 .access = PL1_W, .accessfn = ats_access,
0710b2fa 3433 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
4a501606 3434#endif
4a501606
PM
3435};
3436
18032bec
PM
3437/* Return basic MPU access permission bits. */
3438static uint32_t simple_mpu_ap_bits(uint32_t val)
3439{
3440 uint32_t ret;
3441 uint32_t mask;
3442 int i;
3443 ret = 0;
3444 mask = 3;
3445 for (i = 0; i < 16; i += 2) {
3446 ret |= (val >> i) & mask;
3447 mask <<= 2;
3448 }
3449 return ret;
3450}
3451
3452/* Pad basic MPU access permission bits to extended format. */
3453static uint32_t extended_mpu_ap_bits(uint32_t val)
3454{
3455 uint32_t ret;
3456 uint32_t mask;
3457 int i;
3458 ret = 0;
3459 mask = 3;
3460 for (i = 0; i < 16; i += 2) {
3461 ret |= (val & mask) << i;
3462 mask <<= 2;
3463 }
3464 return ret;
3465}
3466
c4241c7d
PM
3467static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3468 uint64_t value)
18032bec 3469{
7e09797c 3470 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
18032bec
PM
3471}
3472
c4241c7d 3473static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
18032bec 3474{
7e09797c 3475 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
18032bec
PM
3476}
3477
c4241c7d
PM
3478static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3479 uint64_t value)
18032bec 3480{
7e09797c 3481 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
18032bec
PM
3482}
3483
c4241c7d 3484static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
18032bec 3485{
7e09797c 3486 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
18032bec
PM
3487}
3488
6cb0b013
PC
3489static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3490{
3491 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3492
3493 if (!u32p) {
3494 return 0;
3495 }
3496
1bc04a88 3497 u32p += env->pmsav7.rnr[M_REG_NS];
6cb0b013
PC
3498 return *u32p;
3499}
3500
3501static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3502 uint64_t value)
3503{
2fc0cc0e 3504 ARMCPU *cpu = env_archcpu(env);
6cb0b013
PC
3505 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3506
3507 if (!u32p) {
3508 return;
3509 }
3510
1bc04a88 3511 u32p += env->pmsav7.rnr[M_REG_NS];
d10eb08f 3512 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
6cb0b013
PC
3513 *u32p = value;
3514}
3515
6cb0b013
PC
3516static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3517 uint64_t value)
3518{
2fc0cc0e 3519 ARMCPU *cpu = env_archcpu(env);
6cb0b013
PC
3520 uint32_t nrgs = cpu->pmsav7_dregion;
3521
3522 if (value >= nrgs) {
3523 qemu_log_mask(LOG_GUEST_ERROR,
3524 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3525 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3526 return;
3527 }
3528
3529 raw_write(env, ri, value);
3530}
3531
3532static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
69ceea64
PM
3533 /* Reset for all these registers is handled in arm_cpu_reset(),
3534 * because the PMSAv7 is also used by M-profile CPUs, which do
3535 * not register cpregs but still need the state to be reset.
3536 */
6cb0b013
PC
3537 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3538 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3539 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
69ceea64
PM
3540 .readfn = pmsav7_read, .writefn = pmsav7_write,
3541 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3542 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3543 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3544 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
69ceea64
PM
3545 .readfn = pmsav7_read, .writefn = pmsav7_write,
3546 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3547 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3548 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3549 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
69ceea64
PM
3550 .readfn = pmsav7_read, .writefn = pmsav7_write,
3551 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3552 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3553 .access = PL1_RW,
1bc04a88 3554 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
69ceea64
PM
3555 .writefn = pmsav7_rgnr_write,
3556 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3557};
3558
18032bec
PM
3559static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3560 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
7a0e58fa 3561 .access = PL1_RW, .type = ARM_CP_ALIAS,
7e09797c 3562 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
18032bec
PM
3563 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3564 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
7a0e58fa 3565 .access = PL1_RW, .type = ARM_CP_ALIAS,
7e09797c 3566 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
18032bec
PM
3567 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3568 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3569 .access = PL1_RW,
7e09797c
PM
3570 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3571 .resetvalue = 0, },
18032bec
PM
3572 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3573 .access = PL1_RW,
7e09797c
PM
3574 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3575 .resetvalue = 0, },
ecce5c3c
PM
3576 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3577 .access = PL1_RW,
3578 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3579 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3580 .access = PL1_RW,
3581 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
06d76f31 3582 /* Protection region base and size registers */
e508a92b
PM
3583 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3584 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3585 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3586 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3587 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3588 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3589 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3590 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3591 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3592 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3593 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3594 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3595 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3596 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3597 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3598 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3599 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3600 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3601 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3602 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3603 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3604 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3605 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3606 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
18032bec
PM
3607};
3608
cb4a0a34
PM
3609static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3610 uint64_t value)
ecce5c3c 3611{
cb4a0a34 3612 ARMCPU *cpu = env_archcpu(env);
2ebcebe2 3613
e389be16
FA
3614 if (!arm_feature(env, ARM_FEATURE_V8)) {
3615 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
cb4a0a34
PM
3616 /*
3617 * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3618 * using Long-descriptor translation table format
3619 */
e389be16
FA
3620 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3621 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
cb4a0a34
PM
3622 /*
3623 * In an implementation that includes the Security Extensions
e389be16
FA
3624 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3625 * Short-descriptor translation table format.
3626 */
3627 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3628 } else {
3629 value &= TTBCR_N;
3630 }
e42c4db3 3631 }
e389be16 3632
d4e6df63
PM
3633 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3634 /* With LPAE the TTBCR could result in a change of ASID
3635 * via the TTBCR.A1 bit, so do a TLB flush.
3636 */
d10eb08f 3637 tlb_flush(CPU(cpu));
d4e6df63 3638 }
cb4a0a34 3639 raw_write(env, ri, value);
ecce5c3c
PM
3640}
3641
d06dc933 3642static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
cb2e37df
PM
3643 uint64_t value)
3644{
2fc0cc0e 3645 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 3646
cb2e37df 3647 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
d10eb08f 3648 tlb_flush(CPU(cpu));
cb4a0a34 3649 raw_write(env, ri, value);
cb2e37df
PM
3650}
3651
327ed10f
PM
3652static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3653 uint64_t value)
3654{
93f379b0
RH
3655 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3656 if (cpreg_field_is_64bit(ri) &&
3657 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2fc0cc0e 3658 ARMCPU *cpu = env_archcpu(env);
d10eb08f 3659 tlb_flush(CPU(cpu));
327ed10f
PM
3660 }
3661 raw_write(env, ri, value);
3662}
3663
ed30da8e
RH
3664static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3665 uint64_t value)
3666{
d06dc933
RH
3667 /*
3668 * If we are running with E2&0 regime, then an ASID is active.
3669 * Flush if that might be changing. Note we're not checking
3670 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
3671 * holds the active ASID, only checking the field that might.
3672 */
3673 if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
3674 (arm_hcr_el2_eff(env) & HCR_E2H)) {
b6ad6062
RDC
3675 uint16_t mask = ARMMMUIdxBit_E20_2 |
3676 ARMMMUIdxBit_E20_2_PAN |
3677 ARMMMUIdxBit_E20_0;
3678
3679 if (arm_is_secure_below_el3(env)) {
3680 mask >>= ARM_MMU_IDX_A_NS;
3681 }
3682
3683 tlb_flush_by_mmuidx(env_cpu(env), mask);
d06dc933 3684 }
ed30da8e
RH
3685 raw_write(env, ri, value);
3686}
3687
b698e9cf
EI
3688static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3689 uint64_t value)
3690{
2fc0cc0e 3691 ARMCPU *cpu = env_archcpu(env);
b698e9cf
EI
3692 CPUState *cs = CPU(cpu);
3693
97fa9350
RH
3694 /*
3695 * A change in VMID to the stage2 page table (Stage2) invalidates
3696 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
3697 */
b698e9cf 3698 if (raw_read(env, ri) != value) {
c4f060e8
RDC
3699 uint16_t mask = ARMMMUIdxBit_E10_1 |
3700 ARMMMUIdxBit_E10_1_PAN |
3701 ARMMMUIdxBit_E10_0;
3702
3703 if (arm_is_secure_below_el3(env)) {
3704 mask >>= ARM_MMU_IDX_A_NS;
3705 }
3706
3707 tlb_flush_by_mmuidx(cs, mask);
b698e9cf
EI
3708 raw_write(env, ri, value);
3709 }
3710}
3711
8e5d75c9 3712static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
18032bec 3713 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
84929218 3714 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
4a7e2d73 3715 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
b061a82b 3716 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
18032bec 3717 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
84929218 3718 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
88ca1c2d
FA
3719 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3720 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
8e5d75c9 3721 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
84929218 3722 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
8e5d75c9
PC
3723 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3724 offsetof(CPUARMState, cp15.dfar_ns) } },
3725 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3726 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
84929218
RH
3727 .access = PL1_RW, .accessfn = access_tvm_trvm,
3728 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
8e5d75c9 3729 .resetvalue = 0, },
8e5d75c9
PC
3730};
3731
3732static const ARMCPRegInfo vmsa_cp_reginfo[] = {
6cd8a264
RH
3733 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3734 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
84929218 3735 .access = PL1_RW, .accessfn = access_tvm_trvm,
d81c519c 3736 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
327ed10f 3737 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
7dd8c9af 3738 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
84929218
RH
3739 .access = PL1_RW, .accessfn = access_tvm_trvm,
3740 .writefn = vmsa_ttbr_write, .resetvalue = 0,
7dd8c9af
FA
3741 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3742 offsetof(CPUARMState, cp15.ttbr0_ns) } },
327ed10f 3743 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
7dd8c9af 3744 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
84929218
RH
3745 .access = PL1_RW, .accessfn = access_tvm_trvm,
3746 .writefn = vmsa_ttbr_write, .resetvalue = 0,
7dd8c9af
FA
3747 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3748 offsetof(CPUARMState, cp15.ttbr1_ns) } },
cb2e37df
PM
3749 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3750 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
84929218
RH
3751 .access = PL1_RW, .accessfn = access_tvm_trvm,
3752 .writefn = vmsa_tcr_el12_write,
cb4a0a34
PM
3753 .raw_writefn = raw_write,
3754 .resetvalue = 0,
11f136ee 3755 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
cb2e37df 3756 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
84929218
RH
3757 .access = PL1_RW, .accessfn = access_tvm_trvm,
3758 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
cb4a0a34
PM
3759 .raw_writefn = raw_write,
3760 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
3761 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
18032bec
PM
3762};
3763
ab638a32
RH
3764/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3765 * qemu tlbs nor adjusting cached masks.
3766 */
3767static const ARMCPRegInfo ttbcr2_reginfo = {
3768 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
84929218
RH
3769 .access = PL1_RW, .accessfn = access_tvm_trvm,
3770 .type = ARM_CP_ALIAS,
d102058e 3771 .bank_fieldoffsets = {
cb4a0a34
PM
3772 offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
3773 offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
d102058e 3774 },
ab638a32
RH
3775};
3776
c4241c7d
PM
3777static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3778 uint64_t value)
1047b9d7
PM
3779{
3780 env->cp15.c15_ticonfig = value & 0xe7;
3781 /* The OS_TYPE bit in this register changes the reported CPUID! */
3782 env->cp15.c0_cpuid = (value & (1 << 5)) ?
3783 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1047b9d7
PM
3784}
3785
c4241c7d
PM
3786static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3787 uint64_t value)
1047b9d7
PM
3788{
3789 env->cp15.c15_threadid = value & 0xffff;
1047b9d7
PM
3790}
3791
c4241c7d
PM
3792static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3793 uint64_t value)
1047b9d7
PM
3794{
3795 /* Wait-for-interrupt (deprecated) */
2fc0cc0e 3796 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
1047b9d7
PM
3797}
3798
c4241c7d
PM
3799static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3800 uint64_t value)
c4804214
PM
3801{
3802 /* On OMAP there are registers indicating the max/min index of dcache lines
3803 * containing a dirty line; cache flush operations have to reset these.
3804 */
3805 env->cp15.c15_i_max = 0x000;
3806 env->cp15.c15_i_min = 0xff0;
c4804214
PM
3807}
3808
18032bec
PM
3809static const ARMCPRegInfo omap_cp_reginfo[] = {
3810 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3811 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
d81c519c 3812 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
6cd8a264 3813 .resetvalue = 0, },
1047b9d7
PM
3814 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3815 .access = PL1_RW, .type = ARM_CP_NOP },
3816 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3817 .access = PL1_RW,
3818 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3819 .writefn = omap_ticonfig_write },
3820 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3821 .access = PL1_RW,
3822 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3823 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3824 .access = PL1_RW, .resetvalue = 0xff0,
3825 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3826 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3827 .access = PL1_RW,
3828 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3829 .writefn = omap_threadid_write },
3830 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
3831 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
7a0e58fa 3832 .type = ARM_CP_NO_RAW,
1047b9d7
PM
3833 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
3834 /* TODO: Peripheral port remap register:
3835 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3836 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3837 * when MMU is off.
3838 */
c4804214 3839 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
d4e6df63 3840 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
7a0e58fa 3841 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
c4804214 3842 .writefn = omap_cachemaint_write },
34f90529
PM
3843 { .name = "C9", .cp = 15, .crn = 9,
3844 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
3845 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
1047b9d7
PM
3846};
3847
c4241c7d
PM
3848static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3849 uint64_t value)
1047b9d7 3850{
c0f4af17 3851 env->cp15.c15_cpar = value & 0x3fff;
1047b9d7
PM
3852}
3853
3854static const ARMCPRegInfo xscale_cp_reginfo[] = {
3855 { .name = "XSCALE_CPAR",
3856 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3857 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
3858 .writefn = xscale_cpar_write, },
2771db27
PM
3859 { .name = "XSCALE_AUXCR",
3860 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
3861 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
3862 .resetvalue = 0, },
3b771579
PM
3863 /* XScale specific cache-lockdown: since we have no cache we NOP these
3864 * and hope the guest does not really rely on cache behaviour.
3865 */
3866 { .name = "XSCALE_LOCK_ICACHE_LINE",
3867 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
3868 .access = PL1_W, .type = ARM_CP_NOP },
3869 { .name = "XSCALE_UNLOCK_ICACHE",
3870 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
3871 .access = PL1_W, .type = ARM_CP_NOP },
3872 { .name = "XSCALE_DCACHE_LOCK",
3873 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
3874 .access = PL1_RW, .type = ARM_CP_NOP },
3875 { .name = "XSCALE_UNLOCK_DCACHE",
3876 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
3877 .access = PL1_W, .type = ARM_CP_NOP },
1047b9d7
PM
3878};
3879
3880static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
3881 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3882 * implementation of this implementation-defined space.
3883 * Ideally this should eventually disappear in favour of actually
3884 * implementing the correct behaviour for all cores.
3885 */
3886 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3887 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3671cd87 3888 .access = PL1_RW,
7a0e58fa 3889 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
d4e6df63 3890 .resetvalue = 0 },
18032bec
PM
3891};
3892
c4804214
PM
3893static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3894 /* Cache status: RAZ because we have no cache so it's always clean */
3895 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
7a0e58fa 3896 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3897 .resetvalue = 0 },
c4804214
PM
3898};
3899
3900static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
a07d9df0 3901 /* We never have a block transfer operation in progress */
c4804214 3902 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
7a0e58fa 3903 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3904 .resetvalue = 0 },
30b05bba
PM
3905 /* The cache ops themselves: these all NOP for QEMU */
3906 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
3907 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3908 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
3909 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3910 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
3911 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3912 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
3913 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3914 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
3915 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3916 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
3917 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
c4804214
PM
3918};
3919
3920static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
3921 /* The cache test-and-clean instructions always return (1 << 30)
3922 * to indicate that there are no dirty cache lines.
3923 */
3924 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
7a0e58fa 3925 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3926 .resetvalue = (1 << 30) },
c4804214 3927 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
7a0e58fa 3928 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3929 .resetvalue = (1 << 30) },
c4804214
PM
3930};
3931
34f90529
PM
3932static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3933 /* Ignore ReadBuffer accesses */
3934 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3935 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
d4e6df63 3936 .access = PL1_RW, .resetvalue = 0,
7a0e58fa 3937 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
34f90529
PM
3938};
3939
731de9e6
EI
3940static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3941{
731de9e6 3942 unsigned int cur_el = arm_current_el(env);
731de9e6 3943
e6ef0169 3944 if (arm_is_el2_enabled(env) && cur_el == 1) {
731de9e6
EI
3945 return env->cp15.vpidr_el2;
3946 }
3947 return raw_read(env, ri);
3948}
3949
06a7e647 3950static uint64_t mpidr_read_val(CPUARMState *env)
81bdde9d 3951{
2fc0cc0e 3952 ARMCPU *cpu = env_archcpu(env);
eb5e1d3c
PF
3953 uint64_t mpidr = cpu->mp_affinity;
3954
81bdde9d 3955 if (arm_feature(env, ARM_FEATURE_V7MP)) {
78dbbbe4 3956 mpidr |= (1U << 31);
81bdde9d
PM
3957 /* Cores which are uniprocessor (non-coherent)
3958 * but still implement the MP extensions set
a8e81b31 3959 * bit 30. (For instance, Cortex-R5).
81bdde9d 3960 */
a8e81b31
PC
3961 if (cpu->mp_is_up) {
3962 mpidr |= (1u << 30);
3963 }
81bdde9d 3964 }
c4241c7d 3965 return mpidr;
81bdde9d
PM
3966}
3967
06a7e647
EI
3968static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3969{
f0d574d6 3970 unsigned int cur_el = arm_current_el(env);
f0d574d6 3971
e6ef0169 3972 if (arm_is_el2_enabled(env) && cur_el == 1) {
f0d574d6
EI
3973 return env->cp15.vmpidr_el2;
3974 }
06a7e647
EI
3975 return mpidr_read_val(env);
3976}
3977
7ac681cf 3978static const ARMCPRegInfo lpae_cp_reginfo[] = {
a903c449 3979 /* NOP AMAIR0/1 */
b0fe2427
PM
3980 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3981 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
84929218
RH
3982 .access = PL1_RW, .accessfn = access_tvm_trvm,
3983 .type = ARM_CP_CONST, .resetvalue = 0 },
b0fe2427 3984 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
7ac681cf 3985 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
84929218
RH
3986 .access = PL1_RW, .accessfn = access_tvm_trvm,
3987 .type = ARM_CP_CONST, .resetvalue = 0 },
891a2fe7 3988 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
01c097f7
FA
3989 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3990 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3991 offsetof(CPUARMState, cp15.par_ns)} },
891a2fe7 3992 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
84929218
RH
3993 .access = PL1_RW, .accessfn = access_tvm_trvm,
3994 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
7dd8c9af
FA
3995 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3996 offsetof(CPUARMState, cp15.ttbr0_ns) },
b061a82b 3997 .writefn = vmsa_ttbr_write, },
891a2fe7 3998 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
84929218
RH
3999 .access = PL1_RW, .accessfn = access_tvm_trvm,
4000 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
7dd8c9af
FA
4001 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4002 offsetof(CPUARMState, cp15.ttbr1_ns) },
b061a82b 4003 .writefn = vmsa_ttbr_write, },
7ac681cf
PM
4004};
4005
c4241c7d 4006static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
b0d2b7d0 4007{
c4241c7d 4008 return vfp_get_fpcr(env);
b0d2b7d0
PM
4009}
4010
c4241c7d
PM
4011static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4012 uint64_t value)
b0d2b7d0
PM
4013{
4014 vfp_set_fpcr(env, value);
b0d2b7d0
PM
4015}
4016
c4241c7d 4017static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
b0d2b7d0 4018{
c4241c7d 4019 return vfp_get_fpsr(env);
b0d2b7d0
PM
4020}
4021
c4241c7d
PM
4022static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4023 uint64_t value)
b0d2b7d0
PM
4024{
4025 vfp_set_fpsr(env, value);
b0d2b7d0
PM
4026}
4027
3f208fd7
PM
4028static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4029 bool isread)
c2b820fe 4030{
aaec1432 4031 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
c2b820fe
PM
4032 return CP_ACCESS_TRAP;
4033 }
4034 return CP_ACCESS_OK;
4035}
4036
4037static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4038 uint64_t value)
4039{
4040 env->daif = value & PSTATE_DAIF;
4041}
4042
220f508f
RH
4043static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4044{
4045 return env->pstate & PSTATE_PAN;
4046}
4047
4048static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4049 uint64_t value)
4050{
4051 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4052}
4053
4054static const ARMCPRegInfo pan_reginfo = {
4055 .name = "PAN", .state = ARM_CP_STATE_AA64,
4056 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4057 .type = ARM_CP_NO_RAW, .access = PL1_RW,
4058 .readfn = aa64_pan_read, .writefn = aa64_pan_write
4059};
4060
9eeb7a1c
RH
4061static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4062{
4063 return env->pstate & PSTATE_UAO;
4064}
4065
4066static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4067 uint64_t value)
4068{
4069 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4070}
4071
4072static const ARMCPRegInfo uao_reginfo = {
4073 .name = "UAO", .state = ARM_CP_STATE_AA64,
4074 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4075 .type = ARM_CP_NO_RAW, .access = PL1_RW,
4076 .readfn = aa64_uao_read, .writefn = aa64_uao_write
4077};
4078
dc8b1853
RC
4079static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
4080{
4081 return env->pstate & PSTATE_DIT;
4082}
4083
4084static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
4085 uint64_t value)
4086{
4087 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4088}
4089
4090static const ARMCPRegInfo dit_reginfo = {
4091 .name = "DIT", .state = ARM_CP_STATE_AA64,
4092 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
4093 .type = ARM_CP_NO_RAW, .access = PL0_RW,
4094 .readfn = aa64_dit_read, .writefn = aa64_dit_write
4095};
4096
f2f68a78
RC
4097static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
4098{
4099 return env->pstate & PSTATE_SSBS;
4100}
4101
4102static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
4103 uint64_t value)
4104{
4105 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
4106}
4107
4108static const ARMCPRegInfo ssbs_reginfo = {
4109 .name = "SSBS", .state = ARM_CP_STATE_AA64,
4110 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
4111 .type = ARM_CP_NO_RAW, .access = PL0_RW,
4112 .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
4113};
4114
38262d8a
RH
4115static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4116 const ARMCPRegInfo *ri,
4117 bool isread)
8af35c37 4118{
38262d8a
RH
4119 /* Cache invalidate/clean to Point of Coherency or Persistence... */
4120 switch (arm_current_el(env)) {
4121 case 0:
4122 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4123 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4124 return CP_ACCESS_TRAP;
4125 }
4126 /* fall through */
4127 case 1:
4128 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
4129 if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4130 return CP_ACCESS_TRAP_EL2;
4131 }
4132 break;
8af35c37
PM
4133 }
4134 return CP_ACCESS_OK;
4135}
4136
38262d8a 4137static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env,
1bed4d2e
RH
4138 const ARMCPRegInfo *ri,
4139 bool isread)
4140{
38262d8a 4141 /* Cache invalidate/clean to Point of Unification... */
1bed4d2e
RH
4142 switch (arm_current_el(env)) {
4143 case 0:
4144 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4145 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4146 return CP_ACCESS_TRAP;
4147 }
4148 /* fall through */
4149 case 1:
38262d8a
RH
4150 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */
4151 if (arm_hcr_el2_eff(env) & HCR_TPU) {
1bed4d2e
RH
4152 return CP_ACCESS_TRAP_EL2;
4153 }
4154 break;
4155 }
4156 return CP_ACCESS_OK;
4157}
4158
dbb1fb27
AB
4159/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4160 * Page D4-1736 (DDI0487A.b)
4161 */
4162
b7e0730d
RH
4163static int vae1_tlbmask(CPUARMState *env)
4164{
e04a5752 4165 uint64_t hcr = arm_hcr_el2_eff(env);
bc944d3a 4166 uint16_t mask;
e04a5752
RDC
4167
4168 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
bc944d3a
RDC
4169 mask = ARMMMUIdxBit_E20_2 |
4170 ARMMMUIdxBit_E20_2_PAN |
4171 ARMMMUIdxBit_E20_0;
b7e0730d 4172 } else {
bc944d3a 4173 mask = ARMMMUIdxBit_E10_1 |
452ef8cb
RH
4174 ARMMMUIdxBit_E10_1_PAN |
4175 ARMMMUIdxBit_E10_0;
b7e0730d 4176 }
bc944d3a
RDC
4177
4178 if (arm_is_secure_below_el3(env)) {
4179 mask >>= ARM_MMU_IDX_A_NS;
4180 }
4181
4182 return mask;
b7e0730d
RH
4183}
4184
ea04dce7
RH
4185/* Return 56 if TBI is enabled, 64 otherwise. */
4186static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
4187 uint64_t addr)
4188{
c1547bba 4189 uint64_t tcr = regime_tcr(env, mmu_idx);
ea04dce7
RH
4190 int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
4191 int select = extract64(addr, 55, 1);
4192
4193 return (tbi >> select) & 1 ? 56 : 64;
4194}
4195
4196static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
4197{
b6ad6062 4198 uint64_t hcr = arm_hcr_el2_eff(env);
ea04dce7
RH
4199 ARMMMUIdx mmu_idx;
4200
4201 /* Only the regime of the mmu_idx below is significant. */
b6ad6062 4202 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
ea04dce7
RH
4203 mmu_idx = ARMMMUIdx_E20_0;
4204 } else {
4205 mmu_idx = ARMMMUIdx_E10_0;
4206 }
b6ad6062
RDC
4207
4208 if (arm_is_secure_below_el3(env)) {
4209 mmu_idx &= ~ARM_MMU_IDX_A_NS;
4210 }
4211
ea04dce7
RH
4212 return tlbbits_for_regime(env, mmu_idx, addr);
4213}
4214
fd3ed969
PM
4215static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4216 uint64_t value)
168aa23b 4217{
29a0af61 4218 CPUState *cs = env_cpu(env);
b7e0730d 4219 int mask = vae1_tlbmask(env);
dbb1fb27 4220
b7e0730d 4221 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
168aa23b
PM
4222}
4223
b4ab8ce9
PM
4224static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4225 uint64_t value)
4226{
29a0af61 4227 CPUState *cs = env_cpu(env);
b7e0730d 4228 int mask = vae1_tlbmask(env);
b4ab8ce9
PM
4229
4230 if (tlb_force_broadcast(env)) {
527db2be
RH
4231 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4232 } else {
4233 tlb_flush_by_mmuidx(cs, mask);
b4ab8ce9 4234 }
b4ab8ce9
PM
4235}
4236
90c19cdf 4237static int alle1_tlbmask(CPUARMState *env)
168aa23b 4238{
90c19cdf
RH
4239 /*
4240 * Note that the 'ALL' scope must invalidate both stage 1 and
fd3ed969
PM
4241 * stage 2 translations, whereas most other scopes only invalidate
4242 * stage 1 translations.
4243 */
fd3ed969 4244 if (arm_is_secure_below_el3(env)) {
452ef8cb
RH
4245 return ARMMMUIdxBit_SE10_1 |
4246 ARMMMUIdxBit_SE10_1_PAN |
4247 ARMMMUIdxBit_SE10_0;
fd3ed969 4248 } else {
452ef8cb
RH
4249 return ARMMMUIdxBit_E10_1 |
4250 ARMMMUIdxBit_E10_1_PAN |
4251 ARMMMUIdxBit_E10_0;
fd3ed969 4252 }
168aa23b
PM
4253}
4254
85d0dc9f
RH
4255static int e2_tlbmask(CPUARMState *env)
4256{
b6ad6062
RDC
4257 if (arm_is_secure_below_el3(env)) {
4258 return ARMMMUIdxBit_SE20_0 |
4259 ARMMMUIdxBit_SE20_2 |
4260 ARMMMUIdxBit_SE20_2_PAN |
4261 ARMMMUIdxBit_SE2;
4262 } else {
4263 return ARMMMUIdxBit_E20_0 |
4264 ARMMMUIdxBit_E20_2 |
4265 ARMMMUIdxBit_E20_2_PAN |
4266 ARMMMUIdxBit_E2;
4267 }
85d0dc9f
RH
4268}
4269
90c19cdf
RH
4270static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4271 uint64_t value)
4272{
4273 CPUState *cs = env_cpu(env);
4274 int mask = alle1_tlbmask(env);
4275
4276 tlb_flush_by_mmuidx(cs, mask);
4277}
4278
fd3ed969 4279static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
fa439fc5
PM
4280 uint64_t value)
4281{
85d0dc9f
RH
4282 CPUState *cs = env_cpu(env);
4283 int mask = e2_tlbmask(env);
fd3ed969 4284
85d0dc9f 4285 tlb_flush_by_mmuidx(cs, mask);
fd3ed969
PM
4286}
4287
43efaa33
PM
4288static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4289 uint64_t value)
4290{
2fc0cc0e 4291 ARMCPU *cpu = env_archcpu(env);
43efaa33
PM
4292 CPUState *cs = CPU(cpu);
4293
127b2b08 4294 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
43efaa33
PM
4295}
4296
fd3ed969
PM
4297static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4298 uint64_t value)
4299{
29a0af61 4300 CPUState *cs = env_cpu(env);
90c19cdf
RH
4301 int mask = alle1_tlbmask(env);
4302
4303 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
fa439fc5
PM
4304}
4305
2bfb9d75
PM
4306static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4307 uint64_t value)
4308{
29a0af61 4309 CPUState *cs = env_cpu(env);
85d0dc9f 4310 int mask = e2_tlbmask(env);
2bfb9d75 4311
85d0dc9f 4312 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
2bfb9d75
PM
4313}
4314
43efaa33
PM
4315static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4316 uint64_t value)
4317{
29a0af61 4318 CPUState *cs = env_cpu(env);
43efaa33 4319
127b2b08 4320 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
43efaa33
PM
4321}
4322
fd3ed969
PM
4323static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4324 uint64_t value)
fa439fc5 4325{
fd3ed969
PM
4326 /* Invalidate by VA, EL2
4327 * Currently handles both VAE2 and VALE2, since we don't support
4328 * flush-last-level-only.
4329 */
85d0dc9f
RH
4330 CPUState *cs = env_cpu(env);
4331 int mask = e2_tlbmask(env);
fd3ed969
PM
4332 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4333
85d0dc9f 4334 tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
fd3ed969
PM
4335}
4336
43efaa33
PM
4337static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4338 uint64_t value)
4339{
4340 /* Invalidate by VA, EL3
4341 * Currently handles both VAE3 and VALE3, since we don't support
4342 * flush-last-level-only.
4343 */
2fc0cc0e 4344 ARMCPU *cpu = env_archcpu(env);
43efaa33
PM
4345 CPUState *cs = CPU(cpu);
4346 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4347
127b2b08 4348 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
43efaa33
PM
4349}
4350
fd3ed969
PM
4351static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4352 uint64_t value)
4353{
90c19cdf
RH
4354 CPUState *cs = env_cpu(env);
4355 int mask = vae1_tlbmask(env);
fa439fc5 4356 uint64_t pageaddr = sextract64(value << 12, 0, 56);
ea04dce7 4357 int bits = vae1_tlbbits(env, pageaddr);
fa439fc5 4358
ea04dce7 4359 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
fa439fc5
PM
4360}
4361
b4ab8ce9
PM
4362static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4363 uint64_t value)
4364{
4365 /* Invalidate by VA, EL1&0 (AArch64 version).
4366 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4367 * since we don't support flush-for-specific-ASID-only or
4368 * flush-last-level-only.
4369 */
90c19cdf
RH
4370 CPUState *cs = env_cpu(env);
4371 int mask = vae1_tlbmask(env);
b4ab8ce9 4372 uint64_t pageaddr = sextract64(value << 12, 0, 56);
ea04dce7 4373 int bits = vae1_tlbbits(env, pageaddr);
b4ab8ce9
PM
4374
4375 if (tlb_force_broadcast(env)) {
ea04dce7 4376 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
527db2be 4377 } else {
ea04dce7 4378 tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
b4ab8ce9 4379 }
b4ab8ce9
PM
4380}
4381
fd3ed969
PM
4382static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4383 uint64_t value)
fa439fc5 4384{
29a0af61 4385 CPUState *cs = env_cpu(env);
fd3ed969 4386 uint64_t pageaddr = sextract64(value << 12, 0, 56);
b6ad6062
RDC
4387 bool secure = arm_is_secure_below_el3(env);
4388 int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
eb849d8f 4389 int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
b6ad6062 4390 pageaddr);
fa439fc5 4391
b6ad6062 4392 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
fa439fc5
PM
4393}
4394
43efaa33
PM
4395static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4396 uint64_t value)
4397{
29a0af61 4398 CPUState *cs = env_cpu(env);
43efaa33 4399 uint64_t pageaddr = sextract64(value << 12, 0, 56);
ea04dce7 4400 int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
43efaa33 4401
ea04dce7
RH
4402 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
4403 ARMMMUIdxBit_SE3, bits);
43efaa33
PM
4404}
4405
84940ed8 4406#ifdef TARGET_AARCH64
ab1cdb47
RH
4407typedef struct {
4408 uint64_t base;
84940ed8 4409 uint64_t length;
ab1cdb47
RH
4410} TLBIRange;
4411
4412static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
4413 uint64_t value)
4414{
4415 unsigned int page_size_granule, page_shift, num, scale, exponent;
3974ff93
RH
4416 /* Extract one bit to represent the va selector in use. */
4417 uint64_t select = sextract64(value, 36, 1);
4418 ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true);
ab1cdb47 4419 TLBIRange ret = { };
84940ed8 4420
84940ed8
RC
4421 page_size_granule = extract64(value, 46, 2);
4422
3974ff93
RH
4423 /* The granule encoded in value must match the granule in use. */
4424 if (page_size_granule != (param.using64k ? 3 : param.using16k ? 2 : 1)) {
4425 qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
84940ed8 4426 page_size_granule);
ab1cdb47 4427 return ret;
84940ed8
RC
4428 }
4429
52a9f609 4430 page_shift = (page_size_granule - 1) * 2 + 12;
ab1cdb47
RH
4431 num = extract64(value, 39, 5);
4432 scale = extract64(value, 44, 2);
84940ed8 4433 exponent = (5 * scale) + 1;
84940ed8 4434
ab1cdb47 4435 ret.length = (num + 1) << (exponent + page_shift);
84940ed8 4436
3974ff93 4437 if (param.select) {
d976de21 4438 ret.base = sextract64(value, 0, 37);
84940ed8 4439 } else {
d976de21 4440 ret.base = extract64(value, 0, 37);
84940ed8 4441 }
ef56c242
RH
4442 if (param.ds) {
4443 /*
4444 * With DS=1, BaseADDR is always shifted 16 so that it is able
4445 * to address all 52 va bits. The input address is perforce
4446 * aligned on a 64k boundary regardless of translation granule.
4447 */
4448 page_shift = 16;
4449 }
d976de21 4450 ret.base <<= page_shift;
84940ed8 4451
ab1cdb47 4452 return ret;
84940ed8
RC
4453}
4454
4455static void do_rvae_write(CPUARMState *env, uint64_t value,
4456 int idxmap, bool synced)
4457{
4458 ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
ab1cdb47 4459 TLBIRange range;
84940ed8
RC
4460 int bits;
4461
ab1cdb47
RH
4462 range = tlbi_aa64_get_range(env, one_idx, value);
4463 bits = tlbbits_for_regime(env, one_idx, range.base);
84940ed8
RC
4464
4465 if (synced) {
4466 tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
ab1cdb47
RH
4467 range.base,
4468 range.length,
84940ed8
RC
4469 idxmap,
4470 bits);
4471 } else {
ab1cdb47
RH
4472 tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
4473 range.length, idxmap, bits);
84940ed8
RC
4474 }
4475}
4476
4477static void tlbi_aa64_rvae1_write(CPUARMState *env,
4478 const ARMCPRegInfo *ri,
4479 uint64_t value)
4480{
4481 /*
4482 * Invalidate by VA range, EL1&0.
4483 * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
4484 * since we don't support flush-for-specific-ASID-only or
4485 * flush-last-level-only.
4486 */
4487
4488 do_rvae_write(env, value, vae1_tlbmask(env),
4489 tlb_force_broadcast(env));
4490}
4491
4492static void tlbi_aa64_rvae1is_write(CPUARMState *env,
4493 const ARMCPRegInfo *ri,
4494 uint64_t value)
4495{
4496 /*
4497 * Invalidate by VA range, Inner/Outer Shareable EL1&0.
4498 * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
4499 * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
4500 * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
4501 * shareable specific flushes.
4502 */
4503
4504 do_rvae_write(env, value, vae1_tlbmask(env), true);
4505}
4506
4507static int vae2_tlbmask(CPUARMState *env)
4508{
4509 return (arm_is_secure_below_el3(env)
4510 ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
4511}
4512
4513static void tlbi_aa64_rvae2_write(CPUARMState *env,
4514 const ARMCPRegInfo *ri,
4515 uint64_t value)
4516{
4517 /*
4518 * Invalidate by VA range, EL2.
4519 * Currently handles all of RVAE2 and RVALE2,
4520 * since we don't support flush-for-specific-ASID-only or
4521 * flush-last-level-only.
4522 */
4523
4524 do_rvae_write(env, value, vae2_tlbmask(env),
4525 tlb_force_broadcast(env));
4526
4527
4528}
4529
4530static void tlbi_aa64_rvae2is_write(CPUARMState *env,
4531 const ARMCPRegInfo *ri,
4532 uint64_t value)
4533{
4534 /*
4535 * Invalidate by VA range, Inner/Outer Shareable, EL2.
4536 * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
4537 * since we don't support flush-for-specific-ASID-only,
4538 * flush-last-level-only or inner/outer shareable specific flushes.
4539 */
4540
4541 do_rvae_write(env, value, vae2_tlbmask(env), true);
4542
4543}
4544
4545static void tlbi_aa64_rvae3_write(CPUARMState *env,
4546 const ARMCPRegInfo *ri,
4547 uint64_t value)
4548{
4549 /*
4550 * Invalidate by VA range, EL3.
4551 * Currently handles all of RVAE3 and RVALE3,
4552 * since we don't support flush-for-specific-ASID-only or
4553 * flush-last-level-only.
4554 */
4555
4556 do_rvae_write(env, value, ARMMMUIdxBit_SE3,
4557 tlb_force_broadcast(env));
4558}
4559
4560static void tlbi_aa64_rvae3is_write(CPUARMState *env,
4561 const ARMCPRegInfo *ri,
4562 uint64_t value)
4563{
4564 /*
4565 * Invalidate by VA range, EL3, Inner/Outer Shareable.
4566 * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
4567 * since we don't support flush-for-specific-ASID-only,
4568 * flush-last-level-only or inner/outer specific flushes.
4569 */
4570
4571 do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
4572}
4573#endif
4574
3f208fd7
PM
4575static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4576 bool isread)
aca3f40b 4577{
4351cb72
RH
4578 int cur_el = arm_current_el(env);
4579
4580 if (cur_el < 2) {
4581 uint64_t hcr = arm_hcr_el2_eff(env);
4582
4583 if (cur_el == 0) {
4584 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4585 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
4586 return CP_ACCESS_TRAP_EL2;
4587 }
4588 } else {
4589 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4590 return CP_ACCESS_TRAP;
4591 }
4592 if (hcr & HCR_TDZ) {
4593 return CP_ACCESS_TRAP_EL2;
4594 }
4595 }
4596 } else if (hcr & HCR_TDZ) {
4597 return CP_ACCESS_TRAP_EL2;
4598 }
aca3f40b
PM
4599 }
4600 return CP_ACCESS_OK;
4601}
4602
4603static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4604{
2fc0cc0e 4605 ARMCPU *cpu = env_archcpu(env);
aca3f40b
PM
4606 int dzp_bit = 1 << 4;
4607
4608 /* DZP indicates whether DC ZVA access is allowed */
3f208fd7 4609 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
aca3f40b
PM
4610 dzp_bit = 0;
4611 }
4612 return cpu->dcz_blocksize | dzp_bit;
4613}
4614
3f208fd7
PM
4615static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4616 bool isread)
f502cfc2 4617{
cdcf1405 4618 if (!(env->pstate & PSTATE_SP)) {
f502cfc2
PM
4619 /* Access to SP_EL0 is undefined if it's being used as
4620 * the stack pointer.
4621 */
4622 return CP_ACCESS_TRAP_UNCATEGORIZED;
4623 }
4624 return CP_ACCESS_OK;
4625}
4626
4627static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4628{
4629 return env->pstate & PSTATE_SP;
4630}
4631
4632static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4633{
4634 update_spsel(env, val);
4635}
4636
137feaa9
FA
4637static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4638 uint64_t value)
4639{
2fc0cc0e 4640 ARMCPU *cpu = env_archcpu(env);
137feaa9 4641
f00faf13
RH
4642 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4643 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4644 value &= ~SCTLR_M;
4645 }
4646
4647 /* ??? Lots of these bits are not implemented. */
4648
4649 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
4650 if (ri->opc1 == 6) { /* SCTLR_EL3 */
4651 value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
4652 } else {
4653 value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
4654 SCTLR_ATA0 | SCTLR_ATA);
4655 }
4656 }
4657
137feaa9
FA
4658 if (raw_read(env, ri) == value) {
4659 /* Skip the TLB flush if nothing actually changed; Linux likes
4660 * to do a lot of pointless SCTLR writes.
4661 */
4662 return;
4663 }
4664
4665 raw_write(env, ri, value);
f00faf13 4666
137feaa9 4667 /* This may enable/disable the MMU, so do a TLB flush. */
d10eb08f 4668 tlb_flush(CPU(cpu));
2e5dcf36
RH
4669
4670 if (ri->type & ARM_CP_SUPPRESS_TB_END) {
4671 /*
4672 * Normally we would always end the TB on an SCTLR write; see the
4673 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4674 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4675 * of hflags from the translator, so do it here.
4676 */
4677 arm_rebuild_hflags(env);
4678 }
137feaa9
FA
4679}
4680
a8d64e73
PM
4681static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4682 uint64_t value)
4683{
4684 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4685}
4686
b0d2b7d0
PM
4687static const ARMCPRegInfo v8_cp_reginfo[] = {
4688 /* Minimal set of EL0-visible registers. This will need to be expanded
4689 * significantly for system emulation of AArch64 CPUs.
4690 */
4691 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4692 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4693 .access = PL0_RW, .type = ARM_CP_NZCV },
c2b820fe
PM
4694 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4695 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
7a0e58fa 4696 .type = ARM_CP_NO_RAW,
c2b820fe
PM
4697 .access = PL0_RW, .accessfn = aa64_daif_access,
4698 .fieldoffset = offsetof(CPUARMState, daif),
4699 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
b0d2b7d0
PM
4700 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4701 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
b916c9c3 4702 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
fe03d45f 4703 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
b0d2b7d0
PM
4704 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4705 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
b916c9c3 4706 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
fe03d45f 4707 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
b0d2b7d0
PM
4708 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4709 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
7a0e58fa 4710 .access = PL0_R, .type = ARM_CP_NO_RAW,
aca3f40b
PM
4711 .readfn = aa64_dczid_read },
4712 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4713 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4714 .access = PL0_W, .type = ARM_CP_DC_ZVA,
4715#ifndef CONFIG_USER_ONLY
4716 /* Avoid overhead of an access check that always passes in user-mode */
4717 .accessfn = aa64_zva_access,
4718#endif
4719 },
0eef9d98
PM
4720 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4721 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4722 .access = PL1_R, .type = ARM_CP_CURRENTEL },
8af35c37
PM
4723 /* Cache ops: all NOPs since we don't emulate caches */
4724 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4725 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
38262d8a
RH
4726 .access = PL1_W, .type = ARM_CP_NOP,
4727 .accessfn = aa64_cacheop_pou_access },
8af35c37
PM
4728 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4729 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
38262d8a
RH
4730 .access = PL1_W, .type = ARM_CP_NOP,
4731 .accessfn = aa64_cacheop_pou_access },
8af35c37
PM
4732 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4733 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4734 .access = PL0_W, .type = ARM_CP_NOP,
38262d8a 4735 .accessfn = aa64_cacheop_pou_access },
8af35c37
PM
4736 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4737 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
1bed4d2e
RH
4738 .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
4739 .type = ARM_CP_NOP },
8af35c37
PM
4740 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4741 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
1803d271 4742 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
8af35c37
PM
4743 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4744 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4745 .access = PL0_W, .type = ARM_CP_NOP,
1bed4d2e 4746 .accessfn = aa64_cacheop_poc_access },
8af35c37
PM
4747 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4748 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
1803d271 4749 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
8af35c37
PM
4750 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4751 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4752 .access = PL0_W, .type = ARM_CP_NOP,
38262d8a 4753 .accessfn = aa64_cacheop_pou_access },
8af35c37
PM
4754 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4755 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4756 .access = PL0_W, .type = ARM_CP_NOP,
1bed4d2e 4757 .accessfn = aa64_cacheop_poc_access },
8af35c37
PM
4758 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4759 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
1803d271 4760 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
168aa23b
PM
4761 /* TLBI operations */
4762 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4763 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
30881b73 4764 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4765 .writefn = tlbi_aa64_vmalle1is_write },
168aa23b 4766 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4767 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
30881b73 4768 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4769 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4770 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4771 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
30881b73 4772 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4773 .writefn = tlbi_aa64_vmalle1is_write },
168aa23b 4774 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4775 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
30881b73 4776 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4777 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4778 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4779 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
30881b73 4780 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4781 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4782 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4783 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
30881b73 4784 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4785 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4786 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4787 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
30881b73 4788 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4789 .writefn = tlbi_aa64_vmalle1_write },
168aa23b 4790 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4791 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
30881b73 4792 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4793 .writefn = tlbi_aa64_vae1_write },
168aa23b 4794 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4795 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
30881b73 4796 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4797 .writefn = tlbi_aa64_vmalle1_write },
168aa23b 4798 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4799 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
30881b73 4800 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4801 .writefn = tlbi_aa64_vae1_write },
168aa23b 4802 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4803 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
30881b73 4804 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4805 .writefn = tlbi_aa64_vae1_write },
168aa23b 4806 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4807 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
30881b73 4808 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
fd3ed969 4809 .writefn = tlbi_aa64_vae1_write },
cea66e91
PM
4810 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4811 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
bf05340c 4812 .access = PL2_W, .type = ARM_CP_NOP },
cea66e91
PM
4813 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4814 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
bf05340c 4815 .access = PL2_W, .type = ARM_CP_NOP },
83ddf975
PM
4816 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4817 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4818 .access = PL2_W, .type = ARM_CP_NO_RAW,
fd3ed969 4819 .writefn = tlbi_aa64_alle1is_write },
43efaa33
PM
4820 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4821 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4822 .access = PL2_W, .type = ARM_CP_NO_RAW,
4823 .writefn = tlbi_aa64_alle1is_write },
cea66e91
PM
4824 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4825 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
bf05340c 4826 .access = PL2_W, .type = ARM_CP_NOP },
cea66e91
PM
4827 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4828 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
bf05340c 4829 .access = PL2_W, .type = ARM_CP_NOP },
83ddf975
PM
4830 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4831 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4832 .access = PL2_W, .type = ARM_CP_NO_RAW,
fd3ed969 4833 .writefn = tlbi_aa64_alle1_write },
43efaa33
PM
4834 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4835 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4836 .access = PL2_W, .type = ARM_CP_NO_RAW,
4837 .writefn = tlbi_aa64_alle1is_write },
19525524
PM
4838#ifndef CONFIG_USER_ONLY
4839 /* 64 bit address translation operations */
4840 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4841 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
0710b2fa
PM
4842 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4843 .writefn = ats_write64 },
19525524
PM
4844 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4845 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
0710b2fa
PM
4846 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4847 .writefn = ats_write64 },
19525524
PM
4848 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4849 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
0710b2fa
PM
4850 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4851 .writefn = ats_write64 },
19525524
PM
4852 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4853 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
0710b2fa
PM
4854 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4855 .writefn = ats_write64 },
2a47df95 4856 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
7a379c7e 4857 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
0710b2fa
PM
4858 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4859 .writefn = ats_write64 },
2a47df95 4860 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
7a379c7e 4861 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
0710b2fa
PM
4862 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4863 .writefn = ats_write64 },
2a47df95 4864 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
7a379c7e 4865 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
0710b2fa
PM
4866 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4867 .writefn = ats_write64 },
2a47df95 4868 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
7a379c7e 4869 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
0710b2fa
PM
4870 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4871 .writefn = ats_write64 },
2a47df95
PM
4872 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4873 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4874 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
0710b2fa
PM
4875 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4876 .writefn = ats_write64 },
2a47df95
PM
4877 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4878 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
0710b2fa
PM
4879 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4880 .writefn = ats_write64 },
c96fc9b5
EI
4881 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4882 .type = ARM_CP_ALIAS,
4883 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4884 .access = PL1_RW, .resetvalue = 0,
4885 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4886 .writefn = par_write },
19525524 4887#endif
995939a6 4888 /* TLB invalidate last level of translation table walk */
9449fdf6 4889 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
30881b73
RH
4890 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4891 .writefn = tlbimva_is_write },
9449fdf6 4892 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
30881b73 4893 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
fa439fc5 4894 .writefn = tlbimvaa_is_write },
9449fdf6 4895 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
30881b73
RH
4896 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4897 .writefn = tlbimva_write },
9449fdf6 4898 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
30881b73
RH
4899 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4900 .writefn = tlbimvaa_write },
541ef8c2
SS
4901 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4902 .type = ARM_CP_NO_RAW, .access = PL2_W,
4903 .writefn = tlbimva_hyp_write },
4904 { .name = "TLBIMVALHIS",
4905 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4906 .type = ARM_CP_NO_RAW, .access = PL2_W,
4907 .writefn = tlbimva_hyp_is_write },
4908 { .name = "TLBIIPAS2",
4909 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
bf05340c 4910 .type = ARM_CP_NOP, .access = PL2_W },
541ef8c2
SS
4911 { .name = "TLBIIPAS2IS",
4912 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
bf05340c 4913 .type = ARM_CP_NOP, .access = PL2_W },
541ef8c2
SS
4914 { .name = "TLBIIPAS2L",
4915 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
bf05340c 4916 .type = ARM_CP_NOP, .access = PL2_W },
541ef8c2
SS
4917 { .name = "TLBIIPAS2LIS",
4918 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
bf05340c 4919 .type = ARM_CP_NOP, .access = PL2_W },
9449fdf6
PM
4920 /* 32 bit cache operations */
4921 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
38262d8a 4922 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
9449fdf6
PM
4923 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
4924 .type = ARM_CP_NOP, .access = PL1_W },
4925 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
38262d8a 4926 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
9449fdf6 4927 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
38262d8a 4928 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
9449fdf6
PM
4929 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
4930 .type = ARM_CP_NOP, .access = PL1_W },
4931 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
4932 .type = ARM_CP_NOP, .access = PL1_W },
4933 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
1bed4d2e 4934 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
9449fdf6 4935 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
1803d271 4936 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
9449fdf6 4937 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
1bed4d2e 4938 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
9449fdf6 4939 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
1803d271 4940 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
9449fdf6 4941 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
38262d8a 4942 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
9449fdf6 4943 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
1bed4d2e 4944 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
9449fdf6 4945 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
1803d271 4946 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
9449fdf6 4947 /* MMU Domain access control / MPU write buffer control */
0c17d68c 4948 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
84929218 4949 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
0c17d68c
FA
4950 .writefn = dacr_write, .raw_writefn = raw_write,
4951 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
4952 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
a0618a19 4953 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
7a0e58fa 4954 .type = ARM_CP_ALIAS,
a0618a19 4955 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
6947f059
EI
4956 .access = PL1_RW,
4957 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
a65f1de9 4958 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
7a0e58fa 4959 .type = ARM_CP_ALIAS,
a65f1de9 4960 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
99a99c1f
SB
4961 .access = PL1_RW,
4962 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
f502cfc2
PM
4963 /* We rely on the access checks not allowing the guest to write to the
4964 * state field when SPSel indicates that it's being used as the stack
4965 * pointer.
4966 */
4967 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
4968 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
4969 .access = PL1_RW, .accessfn = sp_el0_access,
7a0e58fa 4970 .type = ARM_CP_ALIAS,
f502cfc2 4971 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
884b4dee
GB
4972 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
4973 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
7a0e58fa 4974 .access = PL2_RW, .type = ARM_CP_ALIAS,
884b4dee 4975 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
f502cfc2
PM
4976 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
4977 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
7a0e58fa 4978 .type = ARM_CP_NO_RAW,
f502cfc2 4979 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
03fbf20f
PM
4980 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
4981 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
696ba377
RH
4982 .access = PL2_RW,
4983 .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
a4c88675 4984 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
6a43e0b6
PM
4985 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
4986 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
696ba377 4987 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
6a43e0b6
PM
4988 .writefn = dacr_write, .raw_writefn = raw_write,
4989 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
4990 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
4991 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
696ba377 4992 .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
6a43e0b6
PM
4993 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
4994 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
4995 .type = ARM_CP_ALIAS,
4996 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
4997 .access = PL2_RW,
4998 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
4999 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5000 .type = ARM_CP_ALIAS,
5001 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5002 .access = PL2_RW,
5003 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5004 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5005 .type = ARM_CP_ALIAS,
5006 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5007 .access = PL2_RW,
5008 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5009 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5010 .type = ARM_CP_ALIAS,
5011 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5012 .access = PL2_RW,
5013 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
a8d64e73
PM
5014 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5015 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5016 .resetvalue = 0,
5017 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5018 { .name = "SDCR", .type = ARM_CP_ALIAS,
5019 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5020 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5021 .writefn = sdcr_write,
5022 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
b0d2b7d0
PM
5023};
5024
d1fb4da2 5025static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
f149e3e8 5026{
2fc0cc0e 5027 ARMCPU *cpu = env_archcpu(env);
d1fb4da2
RH
5028
5029 if (arm_feature(env, ARM_FEATURE_V8)) {
5030 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
5031 } else {
5032 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
5033 }
f149e3e8
EI
5034
5035 if (arm_feature(env, ARM_FEATURE_EL3)) {
5036 valid_mask &= ~HCR_HCD;
77077a83
JK
5037 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5038 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5039 * However, if we're using the SMC PSCI conduit then QEMU is
5040 * effectively acting like EL3 firmware and so the guest at
5041 * EL2 should retain the ability to prevent EL1 from being
5042 * able to make SMC calls into the ersatz firmware, so in
5043 * that case HCR.TSC should be read/write.
5044 */
f149e3e8
EI
5045 valid_mask &= ~HCR_TSC;
5046 }
d1fb4da2
RH
5047
5048 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5049 if (cpu_isar_feature(aa64_vh, cpu)) {
5050 valid_mask |= HCR_E2H;
5051 }
da3d8b13
RH
5052 if (cpu_isar_feature(aa64_ras, cpu)) {
5053 valid_mask |= HCR_TERR | HCR_TEA;
5054 }
d1fb4da2
RH
5055 if (cpu_isar_feature(aa64_lor, cpu)) {
5056 valid_mask |= HCR_TLOR;
5057 }
5058 if (cpu_isar_feature(aa64_pauth, cpu)) {
5059 valid_mask |= HCR_API | HCR_APK;
5060 }
8ddb300b
RH
5061 if (cpu_isar_feature(aa64_mte, cpu)) {
5062 valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
5063 }
7cb1e618
RH
5064 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
5065 valid_mask |= HCR_ENSCXT;
5066 }
8c7e17ef
PM
5067 if (cpu_isar_feature(aa64_fwb, cpu)) {
5068 valid_mask |= HCR_FWB;
5069 }
ef682cdb 5070 }
f149e3e8
EI
5071
5072 /* Clear RES0 bits. */
5073 value &= valid_mask;
5074
8ddb300b
RH
5075 /*
5076 * These bits change the MMU setup:
f149e3e8
EI
5077 * HCR_VM enables stage 2 translation
5078 * HCR_PTW forbids certain page-table setups
8ddb300b
RH
5079 * HCR_DC disables stage1 and enables stage2 translation
5080 * HCR_DCT enables tagging on (disabled) stage1 translation
8c7e17ef 5081 * HCR_FWB changes the interpretation of stage2 descriptor bits
f149e3e8 5082 */
8c7e17ef
PM
5083 if ((env->cp15.hcr_el2 ^ value) &
5084 (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) {
d10eb08f 5085 tlb_flush(CPU(cpu));
f149e3e8 5086 }
ce4afed8 5087 env->cp15.hcr_el2 = value;
89430fc6
PM
5088
5089 /*
5090 * Updates to VI and VF require us to update the status of
5091 * virtual interrupts, which are the logical OR of these bits
5092 * and the state of the input lines from the GIC. (This requires
5093 * that we have the iothread lock, which is done by marking the
5094 * reginfo structs as ARM_CP_IO.)
5095 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5096 * possible for it to be taken immediately, because VIRQ and
5097 * VFIQ are masked unless running at EL0 or EL1, and HCR
5098 * can only be written at EL2.
5099 */
5100 g_assert(qemu_mutex_iothread_locked());
5101 arm_cpu_update_virq(cpu);
5102 arm_cpu_update_vfiq(cpu);
3c29632f 5103 arm_cpu_update_vserr(cpu);
ce4afed8
PM
5104}
5105
d1fb4da2
RH
5106static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
5107{
5108 do_hcr_write(env, value, 0);
5109}
5110
ce4afed8
PM
5111static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
5112 uint64_t value)
5113{
5114 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5115 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
d1fb4da2 5116 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
ce4afed8
PM
5117}
5118
5119static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
5120 uint64_t value)
5121{
5122 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5123 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
d1fb4da2 5124 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
f149e3e8
EI
5125}
5126
f7778444
RH
5127/*
5128 * Return the effective value of HCR_EL2.
5129 * Bits that are not included here:
5130 * RW (read from SCR_EL3.RW as needed)
5131 */
5132uint64_t arm_hcr_el2_eff(CPUARMState *env)
5133{
5134 uint64_t ret = env->cp15.hcr_el2;
5135
e6ef0169 5136 if (!arm_is_el2_enabled(env)) {
f7778444
RH
5137 /*
5138 * "This register has no effect if EL2 is not enabled in the
5139 * current Security state". This is ARMv8.4-SecEL2 speak for
5140 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5141 *
5142 * Prior to that, the language was "In an implementation that
5143 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5144 * as if this field is 0 for all purposes other than a direct
5145 * read or write access of HCR_EL2". With lots of enumeration
5146 * on a per-field basis. In current QEMU, this is condition
5147 * is arm_is_secure_below_el3.
5148 *
5149 * Since the v8.4 language applies to the entire register, and
5150 * appears to be backward compatible, use that.
5151 */
4990e1d3
RH
5152 return 0;
5153 }
5154
5155 /*
5156 * For a cpu that supports both aarch64 and aarch32, we can set bits
5157 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5158 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5159 */
5160 if (!arm_el_is_aa64(env, 2)) {
5161 uint64_t aa32_valid;
5162
5163 /*
5164 * These bits are up-to-date as of ARMv8.6.
5165 * For HCR, it's easiest to list just the 2 bits that are invalid.
5166 * For HCR2, list those that are valid.
5167 */
5168 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
5169 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
5170 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
5171 ret &= aa32_valid;
5172 }
5173
5174 if (ret & HCR_TGE) {
5175 /* These bits are up-to-date as of ARMv8.6. */
f7778444
RH
5176 if (ret & HCR_E2H) {
5177 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
5178 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
5179 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
4990e1d3
RH
5180 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
5181 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
5182 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
f7778444
RH
5183 } else {
5184 ret |= HCR_FMO | HCR_IMO | HCR_AMO;
5185 }
5186 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
5187 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
5188 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
5189 HCR_TLOR);
5190 }
5191
5192 return ret;
5193}
5194
19668718
RH
5195/*
5196 * Corresponds to ARM pseudocode function ELIsInHost().
5197 */
5198bool el_is_in_host(CPUARMState *env, int el)
5199{
5200 uint64_t mask;
5201
5202 /*
5203 * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
5204 * Perform the simplest bit tests first, and validate EL2 afterward.
5205 */
5206 if (el & 1) {
5207 return false; /* EL1 or EL3 */
5208 }
5209
5210 /*
5211 * Note that hcr_write() checks isar_feature_aa64_vh(),
5212 * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
5213 */
5214 mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
5215 if ((env->cp15.hcr_el2 & mask) != mask) {
5216 return false;
5217 }
5218
5219 /* TGE and/or E2H set: double check those bits are currently legal. */
5220 return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
5221}
5222
5814d587
RH
5223static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
5224 uint64_t value)
5225{
5226 uint64_t valid_mask = 0;
5227
5228 /* No features adding bits to HCRX are implemented. */
5229
5230 /* Clear RES0 bits. */
5231 env->cp15.hcrx_el2 = value & valid_mask;
5232}
5233
5234static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
5235 bool isread)
5236{
5237 if (arm_current_el(env) < 3
5238 && arm_feature(env, ARM_FEATURE_EL3)
5239 && !(env->cp15.scr_el3 & SCR_HXEN)) {
5240 return CP_ACCESS_TRAP_EL3;
5241 }
5242 return CP_ACCESS_OK;
5243}
5244
5245static const ARMCPRegInfo hcrx_el2_reginfo = {
5246 .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
5247 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
5248 .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
5249 .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
5250};
5251
5252/* Return the effective value of HCRX_EL2. */
5253uint64_t arm_hcrx_el2_eff(CPUARMState *env)
5254{
5255 /*
5256 * The bits in this register behave as 0 for all purposes other than
5257 * direct reads of the register if:
5258 * - EL2 is not enabled in the current security state,
5259 * - SCR_EL3.HXEn is 0.
5260 */
5261 if (!arm_is_el2_enabled(env)
5262 || (arm_feature(env, ARM_FEATURE_EL3)
5263 && !(env->cp15.scr_el3 & SCR_HXEN))) {
5264 return 0;
5265 }
5266 return env->cp15.hcrx_el2;
5267}
5268
fc1120a7
PM
5269static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5270 uint64_t value)
5271{
5272 /*
5273 * For A-profile AArch32 EL3, if NSACR.CP10
5274 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5275 */
5276 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5277 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
fab8ad39
RH
5278 uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
5279 value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
fc1120a7
PM
5280 }
5281 env->cp15.cptr_el[2] = value;
5282}
5283
5284static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5285{
5286 /*
5287 * For A-profile AArch32 EL3, if NSACR.CP10
5288 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5289 */
5290 uint64_t value = env->cp15.cptr_el[2];
5291
5292 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5293 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
fab8ad39 5294 value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
fc1120a7
PM
5295 }
5296 return value;
5297}
5298
4771cd01 5299static const ARMCPRegInfo el2_cp_reginfo[] = {
f149e3e8 5300 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
89430fc6 5301 .type = ARM_CP_IO,
f149e3e8
EI
5302 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5303 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
c624ea0f 5304 .writefn = hcr_write },
ce4afed8 5305 { .name = "HCR", .state = ARM_CP_STATE_AA32,
89430fc6 5306 .type = ARM_CP_ALIAS | ARM_CP_IO,
ce4afed8
PM
5307 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5308 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
c624ea0f 5309 .writefn = hcr_writelow },
831a2fca
PM
5310 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5311 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5312 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3b685ba7 5313 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
7a0e58fa 5314 .type = ARM_CP_ALIAS,
3b685ba7
EI
5315 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
5316 .access = PL2_RW,
5317 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
68e78e33 5318 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
f2c30f42
EI
5319 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5320 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
cba517c3 5321 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
63b60551
EI
5322 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5323 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
cba517c3
PM
5324 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5325 .type = ARM_CP_ALIAS,
5326 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5327 .access = PL2_RW,
5328 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
3b685ba7 5329 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
7a0e58fa 5330 .type = ARM_CP_ALIAS,
3b685ba7 5331 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
99a99c1f
SB
5332 .access = PL2_RW,
5333 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
d79e0c06 5334 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
d42e3c26
EI
5335 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5336 .access = PL2_RW, .writefn = vbar_write,
5337 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
5338 .resetvalue = 0 },
884b4dee
GB
5339 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
5340 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
7a0e58fa 5341 .access = PL3_RW, .type = ARM_CP_ALIAS,
884b4dee 5342 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
c6f19164
GB
5343 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5344 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5345 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
fc1120a7
PM
5346 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
5347 .readfn = cptr_el2_read, .writefn = cptr_el2_write },
95f949ac
EI
5348 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5349 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5350 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
5351 .resetvalue = 0 },
5352 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
b5ede85b 5353 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
95f949ac
EI
5354 .access = PL2_RW, .type = ARM_CP_ALIAS,
5355 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
2179ef95
PM
5356 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5357 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5358 .access = PL2_RW, .type = ARM_CP_CONST,
5359 .resetvalue = 0 },
5360 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
55b53c71 5361 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
b5ede85b 5362 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
2179ef95
PM
5363 .access = PL2_RW, .type = ARM_CP_CONST,
5364 .resetvalue = 0 },
37cd6c24
PM
5365 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5366 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5367 .access = PL2_RW, .type = ARM_CP_CONST,
5368 .resetvalue = 0 },
5369 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5370 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5371 .access = PL2_RW, .type = ARM_CP_CONST,
5372 .resetvalue = 0 },
06ec4c8c
EI
5373 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5374 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
d06dc933 5375 .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
06ec4c8c 5376 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
68e9c2fe
EI
5377 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
5378 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
bf06c112 5379 .type = ARM_CP_ALIAS,
68e9c2fe 5380 .access = PL2_RW, .accessfn = access_el3_aa32ns,
afbb181c 5381 .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
68e9c2fe
EI
5382 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
5383 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
bf06c112 5384 .access = PL2_RW,
988cc190 5385 /* no .writefn needed as this can't cause an ASID change */
68e9c2fe 5386 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
b698e9cf
EI
5387 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5388 .cp = 15, .opc1 = 6, .crm = 2,
5389 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5390 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5391 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
5392 .writefn = vttbr_write },
5393 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5394 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5395 .access = PL2_RW, .writefn = vttbr_write,
5396 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
b9cb5323
EI
5397 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5398 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5399 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
5400 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
ff05f37b
EI
5401 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5402 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5403 .access = PL2_RW, .resetvalue = 0,
5404 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
a57633c0
EI
5405 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5406 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
ed30da8e 5407 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
a57633c0
EI
5408 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5409 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5410 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
a57633c0 5411 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
541ef8c2
SS
5412 { .name = "TLBIALLNSNH",
5413 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5414 .type = ARM_CP_NO_RAW, .access = PL2_W,
5415 .writefn = tlbiall_nsnh_write },
5416 { .name = "TLBIALLNSNHIS",
5417 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5418 .type = ARM_CP_NO_RAW, .access = PL2_W,
5419 .writefn = tlbiall_nsnh_is_write },
5420 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5421 .type = ARM_CP_NO_RAW, .access = PL2_W,
5422 .writefn = tlbiall_hyp_write },
5423 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5424 .type = ARM_CP_NO_RAW, .access = PL2_W,
5425 .writefn = tlbiall_hyp_is_write },
5426 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5427 .type = ARM_CP_NO_RAW, .access = PL2_W,
5428 .writefn = tlbimva_hyp_write },
5429 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5430 .type = ARM_CP_NO_RAW, .access = PL2_W,
5431 .writefn = tlbimva_hyp_is_write },
51da9014
EI
5432 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
5433 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
696ba377 5434 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
fd3ed969 5435 .writefn = tlbi_aa64_alle2_write },
8742d49d
EI
5436 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
5437 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
696ba377 5438 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
fd3ed969 5439 .writefn = tlbi_aa64_vae2_write },
2bfb9d75
PM
5440 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
5441 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
696ba377 5442 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
2bfb9d75
PM
5443 .writefn = tlbi_aa64_vae2_write },
5444 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
5445 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
696ba377 5446 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
2bfb9d75 5447 .writefn = tlbi_aa64_alle2is_write },
8742d49d
EI
5448 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
5449 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
696ba377 5450 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
fd3ed969 5451 .writefn = tlbi_aa64_vae2is_write },
2bfb9d75
PM
5452 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
5453 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
696ba377 5454 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
2bfb9d75 5455 .writefn = tlbi_aa64_vae2is_write },
edac4d8a 5456#ifndef CONFIG_USER_ONLY
2a47df95
PM
5457 /* Unlike the other EL2-related AT operations, these must
5458 * UNDEF from EL3 if EL2 is not implemented, which is why we
5459 * define them here rather than with the rest of the AT ops.
5460 */
5461 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
5462 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5463 .access = PL2_W, .accessfn = at_s1e2_access,
696ba377
RH
5464 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
5465 .writefn = ats_write64 },
2a47df95
PM
5466 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
5467 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5468 .access = PL2_W, .accessfn = at_s1e2_access,
696ba377
RH
5469 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
5470 .writefn = ats_write64 },
14db7fe0
PM
5471 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5472 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5473 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5474 * to behave as if SCR.NS was 1.
5475 */
5476 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5477 .access = PL2_W,
0710b2fa 5478 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
14db7fe0
PM
5479 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5480 .access = PL2_W,
0710b2fa 5481 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
0b6440af
EI
5482 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5483 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5484 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5485 * reset values as IMPDEF. We choose to reset to 3 to comply with
5486 * both ARMv7 and ARMv8.
5487 */
5488 .access = PL2_RW, .resetvalue = 3,
5489 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
edac4d8a
EI
5490 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5491 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5492 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
5493 .writefn = gt_cntvoff_write,
5494 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5495 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5496 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
5497 .writefn = gt_cntvoff_write,
5498 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
b0e66d95
EI
5499 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5500 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5501 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5502 .type = ARM_CP_IO, .access = PL2_RW,
5503 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5504 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5505 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5506 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
5507 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5508 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5509 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
d44ec156 5510 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
b0e66d95
EI
5511 .resetfn = gt_hyp_timer_reset,
5512 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
5513 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5514 .type = ARM_CP_IO,
5515 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5516 .access = PL2_RW,
5517 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
5518 .resetvalue = 0,
5519 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
edac4d8a 5520#endif
59e05530
EI
5521 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5522 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5523 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5524 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5525 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5526 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5527 .access = PL2_RW,
5528 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
2a5a9abd
AF
5529 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5530 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5531 .access = PL2_RW,
5532 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
3b685ba7
EI
5533};
5534
ce4afed8
PM
5535static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
5536 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
89430fc6 5537 .type = ARM_CP_ALIAS | ARM_CP_IO,
ce4afed8
PM
5538 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5539 .access = PL2_RW,
5540 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
5541 .writefn = hcr_writehigh },
ce4afed8
PM
5542};
5543
e9152ee9
RDC
5544static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
5545 bool isread)
5546{
5547 if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
5548 return CP_ACCESS_OK;
5549 }
5550 return CP_ACCESS_TRAP_UNCATEGORIZED;
5551}
5552
5553static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
5554 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
5555 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
5556 .access = PL2_RW, .accessfn = sel2_access,
5557 .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
5558 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
5559 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
5560 .access = PL2_RW, .accessfn = sel2_access,
5561 .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
e9152ee9
RDC
5562};
5563
2f027fc5
PM
5564static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
5565 bool isread)
5566{
5567 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
926c1b97 5568 * At Secure EL1 it traps to EL3 or EL2.
2f027fc5
PM
5569 */
5570 if (arm_current_el(env) == 3) {
5571 return CP_ACCESS_OK;
5572 }
5573 if (arm_is_secure_below_el3(env)) {
926c1b97
RDC
5574 if (env->cp15.scr_el3 & SCR_EEL2) {
5575 return CP_ACCESS_TRAP_EL2;
5576 }
2f027fc5
PM
5577 return CP_ACCESS_TRAP_EL3;
5578 }
5579 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5580 if (isread) {
5581 return CP_ACCESS_OK;
5582 }
5583 return CP_ACCESS_TRAP_UNCATEGORIZED;
5584}
5585
60fb1a87
GB
5586static const ARMCPRegInfo el3_cp_reginfo[] = {
5587 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5588 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5589 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
10d0ef3e 5590 .resetfn = scr_reset, .writefn = scr_write },
f80741d1 5591 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
60fb1a87 5592 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
efe4a274
PM
5593 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5594 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
b061a82b 5595 .writefn = scr_write },
60fb1a87
GB
5596 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5597 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5598 .access = PL3_RW, .resetvalue = 0,
5599 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5600 { .name = "SDER",
5601 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5602 .access = PL3_RW, .resetvalue = 0,
5603 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
60fb1a87 5604 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
efe4a274
PM
5605 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5606 .writefn = vbar_write, .resetvalue = 0,
60fb1a87 5607 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
7dd8c9af
FA
5608 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5609 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
f478847f 5610 .access = PL3_RW, .resetvalue = 0,
7dd8c9af 5611 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
11f136ee
FA
5612 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5613 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
6459b94c 5614 .access = PL3_RW,
cb4a0a34
PM
5615 /* no .writefn needed as this can't cause an ASID change */
5616 .resetvalue = 0,
11f136ee 5617 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
81547d66 5618 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
7a0e58fa 5619 .type = ARM_CP_ALIAS,
81547d66
EI
5620 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5621 .access = PL3_RW,
5622 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
f2c30f42 5623 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
f2c30f42
EI
5624 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5625 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
63b60551
EI
5626 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5627 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5628 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
81547d66 5629 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
7a0e58fa 5630 .type = ARM_CP_ALIAS,
81547d66 5631 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
99a99c1f
SB
5632 .access = PL3_RW,
5633 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
a1ba125c
EI
5634 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5635 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5636 .access = PL3_RW, .writefn = vbar_write,
5637 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5638 .resetvalue = 0 },
c6f19164
GB
5639 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5640 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5641 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5642 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4cfb8ad8
PM
5643 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5644 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5645 .access = PL3_RW, .resetvalue = 0,
5646 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
2179ef95
PM
5647 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5648 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5649 .access = PL3_RW, .type = ARM_CP_CONST,
5650 .resetvalue = 0 },
37cd6c24
PM
5651 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5652 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5653 .access = PL3_RW, .type = ARM_CP_CONST,
5654 .resetvalue = 0 },
5655 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5656 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5657 .access = PL3_RW, .type = ARM_CP_CONST,
5658 .resetvalue = 0 },
43efaa33
PM
5659 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5660 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5661 .access = PL3_W, .type = ARM_CP_NO_RAW,
5662 .writefn = tlbi_aa64_alle3is_write },
5663 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5664 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5665 .access = PL3_W, .type = ARM_CP_NO_RAW,
5666 .writefn = tlbi_aa64_vae3is_write },
5667 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5668 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5669 .access = PL3_W, .type = ARM_CP_NO_RAW,
5670 .writefn = tlbi_aa64_vae3is_write },
5671 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5672 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5673 .access = PL3_W, .type = ARM_CP_NO_RAW,
5674 .writefn = tlbi_aa64_alle3_write },
5675 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5676 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5677 .access = PL3_W, .type = ARM_CP_NO_RAW,
5678 .writefn = tlbi_aa64_vae3_write },
5679 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5680 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5681 .access = PL3_W, .type = ARM_CP_NO_RAW,
5682 .writefn = tlbi_aa64_vae3_write },
0f1a3b24
FA
5683};
5684
e2cce18f
RH
5685#ifndef CONFIG_USER_ONLY
5686/* Test if system register redirection is to occur in the current state. */
5687static bool redirect_for_e2h(CPUARMState *env)
5688{
5689 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
5690}
5691
5692static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
5693{
5694 CPReadFn *readfn;
5695
5696 if (redirect_for_e2h(env)) {
5697 /* Switch to the saved EL2 version of the register. */
5698 ri = ri->opaque;
5699 readfn = ri->readfn;
5700 } else {
5701 readfn = ri->orig_readfn;
5702 }
5703 if (readfn == NULL) {
5704 readfn = raw_read;
5705 }
5706 return readfn(env, ri);
5707}
5708
5709static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
5710 uint64_t value)
5711{
5712 CPWriteFn *writefn;
5713
5714 if (redirect_for_e2h(env)) {
5715 /* Switch to the saved EL2 version of the register. */
5716 ri = ri->opaque;
5717 writefn = ri->writefn;
5718 } else {
5719 writefn = ri->orig_writefn;
5720 }
5721 if (writefn == NULL) {
5722 writefn = raw_write;
5723 }
5724 writefn(env, ri, value);
5725}
5726
5727static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
5728{
5729 struct E2HAlias {
5730 uint32_t src_key, dst_key, new_key;
5731 const char *src_name, *dst_name, *new_name;
5732 bool (*feature)(const ARMISARegisters *id);
5733 };
5734
5735#define K(op0, op1, crn, crm, op2) \
5736 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5737
5738 static const struct E2HAlias aliases[] = {
5739 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
5740 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5741 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
5742 "CPACR", "CPTR_EL2", "CPACR_EL12" },
5743 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
5744 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5745 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
5746 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5747 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
5748 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5749 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
5750 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5751 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
5752 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5753 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
5754 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5755 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
5756 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5757 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
5758 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5759 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
5760 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5761 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5762 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5763 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5764 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5765 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5766 "VBAR", "VBAR_EL2", "VBAR_EL12" },
5767 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5768 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5769 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5770 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5771
5772 /*
5773 * Note that redirection of ZCR is mentioned in the description
5774 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5775 * not in the summary table.
5776 */
5777 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
5778 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
de561988
RH
5779 { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6),
5780 "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
e2cce18f 5781
4b779ceb
RH
5782 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
5783 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
5784
7cb1e618
RH
5785 { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
5786 "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
5787 isar_feature_aa64_scxtnum },
5788
e2cce18f
RH
5789 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5790 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5791 };
5792#undef K
5793
5794 size_t i;
5795
5796 for (i = 0; i < ARRAY_SIZE(aliases); i++) {
5797 const struct E2HAlias *a = &aliases[i];
9da35a40 5798 ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
9da35a40 5799 bool ok;
e2cce18f
RH
5800
5801 if (a->feature && !a->feature(&cpu->isar)) {
5802 continue;
5803 }
5804
5860362d
RH
5805 src_reg = g_hash_table_lookup(cpu->cp_regs,
5806 (gpointer)(uintptr_t)a->src_key);
5807 dst_reg = g_hash_table_lookup(cpu->cp_regs,
5808 (gpointer)(uintptr_t)a->dst_key);
e2cce18f
RH
5809 g_assert(src_reg != NULL);
5810 g_assert(dst_reg != NULL);
5811
5812 /* Cross-compare names to detect typos in the keys. */
5813 g_assert(strcmp(src_reg->name, a->src_name) == 0);
5814 g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
5815
5816 /* None of the core system registers use opaque; we will. */
5817 g_assert(src_reg->opaque == NULL);
5818
5819 /* Create alias before redirection so we dup the right data. */
9da35a40 5820 new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
9da35a40
RH
5821
5822 new_reg->name = a->new_name;
5823 new_reg->type |= ARM_CP_ALIAS;
5824 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
5825 new_reg->access &= PL2_RW | PL3_RW;
5826
5860362d
RH
5827 ok = g_hash_table_insert(cpu->cp_regs,
5828 (gpointer)(uintptr_t)a->new_key, new_reg);
9da35a40 5829 g_assert(ok);
e2cce18f
RH
5830
5831 src_reg->opaque = dst_reg;
5832 src_reg->orig_readfn = src_reg->readfn ?: raw_read;
5833 src_reg->orig_writefn = src_reg->writefn ?: raw_write;
5834 if (!src_reg->raw_readfn) {
5835 src_reg->raw_readfn = raw_read;
5836 }
5837 if (!src_reg->raw_writefn) {
5838 src_reg->raw_writefn = raw_write;
5839 }
5840 src_reg->readfn = el2_e2h_read;
5841 src_reg->writefn = el2_e2h_write;
5842 }
5843}
5844#endif
5845
3f208fd7
PM
5846static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5847 bool isread)
7da845b0 5848{
97475a89
RH
5849 int cur_el = arm_current_el(env);
5850
5851 if (cur_el < 2) {
5852 uint64_t hcr = arm_hcr_el2_eff(env);
5853
5854 if (cur_el == 0) {
5855 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
5856 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
5857 return CP_ACCESS_TRAP_EL2;
5858 }
5859 } else {
5860 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
5861 return CP_ACCESS_TRAP;
5862 }
5863 if (hcr & HCR_TID2) {
5864 return CP_ACCESS_TRAP_EL2;
5865 }
5866 }
5867 } else if (hcr & HCR_TID2) {
5868 return CP_ACCESS_TRAP_EL2;
5869 }
7da845b0 5870 }
630fcd4d
MZ
5871
5872 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
5873 return CP_ACCESS_TRAP_EL2;
5874 }
5875
7da845b0
PM
5876 return CP_ACCESS_OK;
5877}
5878
58e93b48
RH
5879/*
5880 * Check for traps to RAS registers, which are controlled
5881 * by HCR_EL2.TERR and SCR_EL3.TERR.
5882 */
5883static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
5884 bool isread)
5885{
5886 int el = arm_current_el(env);
5887
5888 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
5889 return CP_ACCESS_TRAP_EL2;
5890 }
5891 if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
5892 return CP_ACCESS_TRAP_EL3;
5893 }
5894 return CP_ACCESS_OK;
5895}
5896
5897static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
5898{
5899 int el = arm_current_el(env);
5900
5901 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
5902 return env->cp15.vdisr_el2;
5903 }
5904 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
5905 return 0; /* RAZ/WI */
5906 }
5907 return env->cp15.disr_el1;
5908}
5909
5910static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
5911{
5912 int el = arm_current_el(env);
5913
5914 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
5915 env->cp15.vdisr_el2 = val;
5916 return;
5917 }
5918 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
5919 return; /* RAZ/WI */
5920 }
5921 env->cp15.disr_el1 = val;
5922}
5923
5924/*
5925 * Minimal RAS implementation with no Error Records.
5926 * Which means that all of the Error Record registers:
5927 * ERXADDR_EL1
5928 * ERXCTLR_EL1
5929 * ERXFR_EL1
5930 * ERXMISC0_EL1
5931 * ERXMISC1_EL1
5932 * ERXMISC2_EL1
5933 * ERXMISC3_EL1
5934 * ERXPFGCDN_EL1 (RASv1p1)
5935 * ERXPFGCTL_EL1 (RASv1p1)
5936 * ERXPFGF_EL1 (RASv1p1)
5937 * ERXSTATUS_EL1
5938 * and
5939 * ERRSELR_EL1
5940 * may generate UNDEFINED, which is the effect we get by not
5941 * listing them at all.
5942 */
5943static const ARMCPRegInfo minimal_ras_reginfo[] = {
5944 { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
5945 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
5946 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
5947 .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
5948 { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
5949 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
5950 .access = PL1_R, .accessfn = access_terr,
5951 .type = ARM_CP_CONST, .resetvalue = 0 },
5952 { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
5953 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
5954 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
5955 { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
5956 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
5957 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
5958};
5959
397d922c
RH
5960/*
5961 * Return the exception level to which exceptions should be taken
5962 * via SVEAccessTrap. This excludes the check for whether the exception
5963 * should be routed through AArch64.AdvSIMDFPAccessTrap. That can easily
5964 * be found by testing 0 < fp_exception_el < sve_exception_el.
5965 *
5966 * C.f. the ARM pseudocode function CheckSVEEnabled. Note that the
5967 * pseudocode does *not* separate out the FP trap checks, but has them
5968 * all in one function.
5be5e8ed 5969 */
ced31551 5970int sve_exception_el(CPUARMState *env, int el)
5be5e8ed
RH
5971{
5972#ifndef CONFIG_USER_ONLY
aa4451b6 5973 if (el <= 1 && !el_is_in_host(env, el)) {
fab8ad39 5974 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
7701cee5
RH
5975 case 1:
5976 if (el != 0) {
5977 break;
5978 }
5979 /* fall through */
5980 case 0:
5981 case 2:
61a8c23a 5982 return 1;
5be5e8ed 5983 }
5be5e8ed
RH
5984 }
5985
7d38cb92
RH
5986 if (el <= 2 && arm_is_el2_enabled(env)) {
5987 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
5988 if (env->cp15.hcr_el2 & HCR_E2H) {
fab8ad39 5989 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
d5a6fa2d 5990 case 1:
7d38cb92 5991 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
d5a6fa2d
RH
5992 break;
5993 }
5994 /* fall through */
5995 case 0:
5996 case 2:
5997 return 2;
5998 }
7d38cb92 5999 } else {
fab8ad39 6000 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
d5a6fa2d
RH
6001 return 2;
6002 }
60eed086 6003 }
5be5e8ed
RH
6004 }
6005
60eed086
RH
6006 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
6007 if (arm_feature(env, ARM_FEATURE_EL3)
fab8ad39 6008 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
5be5e8ed
RH
6009 return 3;
6010 }
6011#endif
6012 return 0;
6013}
6014
6b2ca83e
RH
6015/*
6016 * Return the exception level to which exceptions should be taken for SME.
6017 * C.f. the ARM pseudocode function CheckSMEAccess.
6018 */
6019int sme_exception_el(CPUARMState *env, int el)
6020{
6021#ifndef CONFIG_USER_ONLY
6022 if (el <= 1 && !el_is_in_host(env, el)) {
6023 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
6024 case 1:
6025 if (el != 0) {
6026 break;
6027 }
6028 /* fall through */
6029 case 0:
6030 case 2:
6031 return 1;
6032 }
6033 }
6034
6035 if (el <= 2 && arm_is_el2_enabled(env)) {
6036 /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
6037 if (env->cp15.hcr_el2 & HCR_E2H) {
6038 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
6039 case 1:
6040 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
6041 break;
6042 }
6043 /* fall through */
6044 case 0:
6045 case 2:
6046 return 2;
6047 }
6048 } else {
6049 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
6050 return 2;
6051 }
6052 }
6053 }
6054
6055 /* CPTR_EL3. Since ESM is negative we must check for EL3. */
6056 if (arm_feature(env, ARM_FEATURE_EL3)
6057 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6058 return 3;
6059 }
6060#endif
6061 return 0;
6062}
6063
75fe8356
RH
6064/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */
6065static bool sme_fa64(CPUARMState *env, int el)
6066{
6067 if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) {
6068 return false;
6069 }
6070
6071 if (el <= 1 && !el_is_in_host(env, el)) {
6072 if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) {
6073 return false;
6074 }
6075 }
6076 if (el <= 2 && arm_is_el2_enabled(env)) {
6077 if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) {
6078 return false;
6079 }
6080 }
6081 if (arm_feature(env, ARM_FEATURE_EL3)) {
6082 if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) {
6083 return false;
6084 }
6085 }
6086
6087 return true;
6088}
6089
0ab5953b
RH
6090/*
6091 * Given that SVE is enabled, return the vector length for EL.
6092 */
6ca54aa9 6093uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
0ab5953b 6094{
2fc0cc0e 6095 ARMCPU *cpu = env_archcpu(env);
6ca54aa9
RH
6096 uint64_t *cr = env->vfp.zcr_el;
6097 uint32_t map = cpu->sve_vq.map;
6098 uint32_t len = ARM_MAX_VQ - 1;
6099
6100 if (sm) {
6101 cr = env->vfp.smcr_el;
6102 map = cpu->sme_vq.map;
6103 }
0ab5953b 6104
c6225beb 6105 if (el <= 1 && !el_is_in_host(env, el)) {
6ca54aa9 6106 len = MIN(len, 0xf & (uint32_t)cr[1]);
0ab5953b 6107 }
6a02a732 6108 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
6ca54aa9 6109 len = MIN(len, 0xf & (uint32_t)cr[2]);
0ab5953b 6110 }
6a02a732 6111 if (arm_feature(env, ARM_FEATURE_EL3)) {
6ca54aa9
RH
6112 len = MIN(len, 0xf & (uint32_t)cr[3]);
6113 }
6114
6115 map &= MAKE_64BIT_MASK(0, len + 1);
6116 if (map != 0) {
6117 return 31 - clz32(map);
0ab5953b 6118 }
0df9142d 6119
6ca54aa9
RH
6120 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
6121 assert(sm);
6122 return ctz32(cpu->sme_vq.map);
6123}
6124
6125uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
6126{
6127 return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
0ab5953b
RH
6128}
6129
5be5e8ed
RH
6130static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6131 uint64_t value)
6132{
0ab5953b 6133 int cur_el = arm_current_el(env);
5ef3cc56 6134 int old_len = sve_vqm1_for_el(env, cur_el);
0ab5953b
RH
6135 int new_len;
6136
5be5e8ed 6137 /* Bits other than [3:0] are RAZ/WI. */
7b351d98 6138 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
5be5e8ed 6139 raw_write(env, ri, value & 0xf);
0ab5953b
RH
6140
6141 /*
6142 * Because we arrived here, we know both FP and SVE are enabled;
6143 * otherwise we would have trapped access to the ZCR_ELn register.
6144 */
5ef3cc56 6145 new_len = sve_vqm1_for_el(env, cur_el);
0ab5953b
RH
6146 if (new_len < old_len) {
6147 aarch64_sve_narrow_vq(env, new_len + 1);
6148 }
5be5e8ed
RH
6149}
6150
60360d82
RH
6151static const ARMCPRegInfo zcr_reginfo[] = {
6152 { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
6153 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
6154 .access = PL1_RW, .type = ARM_CP_SVE,
6155 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
6156 .writefn = zcr_write, .raw_writefn = raw_write },
6157 { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6158 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6159 .access = PL2_RW, .type = ARM_CP_SVE,
6160 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
6161 .writefn = zcr_write, .raw_writefn = raw_write },
6162 { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
6163 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
6164 .access = PL3_RW, .type = ARM_CP_SVE,
6165 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
6166 .writefn = zcr_write, .raw_writefn = raw_write },
5be5e8ed
RH
6167};
6168
9e5ec745
RH
6169#ifdef TARGET_AARCH64
6170static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
6171 bool isread)
6172{
6173 int el = arm_current_el(env);
6174
6175 if (el == 0) {
6176 uint64_t sctlr = arm_sctlr(env, el);
6177 if (!(sctlr & SCTLR_EnTP2)) {
6178 return CP_ACCESS_TRAP;
6179 }
6180 }
6181 /* TODO: FEAT_FGT */
6182 if (el < 3
6183 && arm_feature(env, ARM_FEATURE_EL3)
6184 && !(env->cp15.scr_el3 & SCR_ENTP2)) {
6185 return CP_ACCESS_TRAP_EL3;
6186 }
6187 return CP_ACCESS_OK;
6188}
6189
d5b1223a
RH
6190static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri,
6191 bool isread)
6192{
6193 /* TODO: FEAT_FGT for SMPRI_EL1 but not SMPRIMAP_EL2 */
6194 if (arm_current_el(env) < 3
6195 && arm_feature(env, ARM_FEATURE_EL3)
6196 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
6197 return CP_ACCESS_TRAP_EL3;
6198 }
6199 return CP_ACCESS_OK;
6200}
6201
c37e6ac9
RH
6202static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6203 uint64_t value)
6204{
f84734b8
RH
6205 helper_set_pstate_sm(env, FIELD_EX64(value, SVCR, SM));
6206 helper_set_pstate_za(env, FIELD_EX64(value, SVCR, ZA));
6207 arm_rebuild_hflags(env);
c37e6ac9
RH
6208}
6209
de561988
RH
6210static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6211 uint64_t value)
6212{
6213 int cur_el = arm_current_el(env);
6214 int old_len = sve_vqm1_for_el(env, cur_el);
6215 int new_len;
6216
6217 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
6218 value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
6219 raw_write(env, ri, value);
6220
6221 /*
6222 * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
6223 * when SVL is widened (old values kept, or zeros). Choose to keep the
6224 * current values for simplicity. But for QEMU internals, we must still
6225 * apply the narrower SVL to the Zregs and Pregs -- see the comment
6226 * above aarch64_sve_narrow_vq.
6227 */
6228 new_len = sve_vqm1_for_el(env, cur_el);
6229 if (new_len < old_len) {
6230 aarch64_sve_narrow_vq(env, new_len + 1);
6231 }
6232}
6233
9e5ec745
RH
6234static const ARMCPRegInfo sme_reginfo[] = {
6235 { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
6236 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
6237 .access = PL0_RW, .accessfn = access_tpidr2,
6238 .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
c37e6ac9
RH
6239 { .name = "SVCR", .state = ARM_CP_STATE_AA64,
6240 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
6241 .access = PL0_RW, .type = ARM_CP_SME,
6242 .fieldoffset = offsetof(CPUARMState, svcr),
6243 .writefn = svcr_write, .raw_writefn = raw_write },
de561988
RH
6244 { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
6245 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
6246 .access = PL1_RW, .type = ARM_CP_SME,
6247 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
6248 .writefn = smcr_write, .raw_writefn = raw_write },
6249 { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
6250 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
6251 .access = PL2_RW, .type = ARM_CP_SME,
6252 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
6253 .writefn = smcr_write, .raw_writefn = raw_write },
6254 { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
6255 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
6256 .access = PL3_RW, .type = ARM_CP_SME,
6257 .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
6258 .writefn = smcr_write, .raw_writefn = raw_write },
d5b1223a
RH
6259 { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
6260 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
6261 .access = PL1_R, .accessfn = access_aa64_tid1,
6262 /*
6263 * IMPLEMENTOR = 0 (software)
6264 * REVISION = 0 (implementation defined)
6265 * SMPS = 0 (no streaming execution priority in QEMU)
6266 * AFFINITY = 0 (streaming sve mode not shared with other PEs)
6267 */
6268 .type = ARM_CP_CONST, .resetvalue = 0, },
6269 /*
6270 * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
6271 */
6272 { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
6273 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
6274 .access = PL1_RW, .accessfn = access_esm,
6275 .type = ARM_CP_CONST, .resetvalue = 0 },
6276 { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
6277 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
6278 .access = PL2_RW, .accessfn = access_esm,
6279 .type = ARM_CP_CONST, .resetvalue = 0 },
9e5ec745
RH
6280};
6281#endif /* TARGET_AARCH64 */
6282
24183fb6
PM
6283static void define_pmu_regs(ARMCPU *cpu)
6284{
6285 /*
6286 * v7 performance monitor control register: same implementor
6287 * field as main ID register, and we implement four counters in
6288 * addition to the cycle count register.
6289 */
24526bb9 6290 unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
24183fb6
PM
6291 ARMCPRegInfo pmcr = {
6292 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
6293 .access = PL0_RW,
6294 .type = ARM_CP_IO | ARM_CP_ALIAS,
6295 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
6296 .accessfn = pmreg_access, .writefn = pmcr_write,
6297 .raw_writefn = raw_write,
6298 };
6299 ARMCPRegInfo pmcr64 = {
6300 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
6301 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
6302 .access = PL0_RW, .accessfn = pmreg_access,
6303 .type = ARM_CP_IO,
6304 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
24526bb9 6305 .resetvalue = cpu->isar.reset_pmcr_el0,
24183fb6
PM
6306 .writefn = pmcr_write, .raw_writefn = raw_write,
6307 };
24526bb9 6308
24183fb6
PM
6309 define_one_arm_cp_reg(cpu, &pmcr);
6310 define_one_arm_cp_reg(cpu, &pmcr64);
6311 for (i = 0; i < pmcrn; i++) {
6312 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6313 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6314 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6315 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6316 ARMCPRegInfo pmev_regs[] = {
6317 { .name = pmevcntr_name, .cp = 15, .crn = 14,
6318 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6319 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6320 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
99a50d1a 6321 .accessfn = pmreg_access_xevcntr },
24183fb6
PM
6322 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
6323 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
99a50d1a 6324 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
24183fb6
PM
6325 .type = ARM_CP_IO,
6326 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6327 .raw_readfn = pmevcntr_rawread,
6328 .raw_writefn = pmevcntr_rawwrite },
6329 { .name = pmevtyper_name, .cp = 15, .crn = 14,
6330 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6331 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6332 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6333 .accessfn = pmreg_access },
6334 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
6335 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
6336 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6337 .type = ARM_CP_IO,
6338 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6339 .raw_writefn = pmevtyper_rawwrite },
24183fb6
PM
6340 };
6341 define_arm_cp_regs(cpu, pmev_regs);
6342 g_free(pmevcntr_name);
6343 g_free(pmevcntr_el0_name);
6344 g_free(pmevtyper_name);
6345 g_free(pmevtyper_el0_name);
6346 }
a6179538 6347 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
24183fb6
PM
6348 ARMCPRegInfo v81_pmu_regs[] = {
6349 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6350 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6351 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6352 .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6353 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6354 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6355 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6356 .resetvalue = extract64(cpu->pmceid1, 32, 32) },
24183fb6
PM
6357 };
6358 define_arm_cp_regs(cpu, v81_pmu_regs);
6359 }
15dd1ebd
PM
6360 if (cpu_isar_feature(any_pmu_8_4, cpu)) {
6361 static const ARMCPRegInfo v84_pmmir = {
6362 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
6363 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
6364 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6365 .resetvalue = 0
6366 };
6367 define_one_arm_cp_reg(cpu, &v84_pmmir);
6368 }
24183fb6
PM
6369}
6370
96a8b92e
PM
6371/* We don't know until after realize whether there's a GICv3
6372 * attached, and that is what registers the gicv3 sysregs.
6373 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6374 * at runtime.
6375 */
6376static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
6377{
2fc0cc0e 6378 ARMCPU *cpu = env_archcpu(env);
8a130a7b 6379 uint64_t pfr1 = cpu->isar.id_pfr1;
96a8b92e
PM
6380
6381 if (env->gicv3state) {
6382 pfr1 |= 1 << 28;
6383 }
6384 return pfr1;
6385}
6386
976b99b6 6387#ifndef CONFIG_USER_ONLY
96a8b92e
PM
6388static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
6389{
2fc0cc0e 6390 ARMCPU *cpu = env_archcpu(env);
47576b94 6391 uint64_t pfr0 = cpu->isar.id_aa64pfr0;
96a8b92e
PM
6392
6393 if (env->gicv3state) {
6394 pfr0 |= 1 << 24;
6395 }
6396 return pfr0;
6397}
976b99b6 6398#endif
96a8b92e 6399
2d7137c1 6400/* Shared logic between LORID and the rest of the LOR* registers.
9bd268ba 6401 * Secure state exclusion has already been dealt with.
2d7137c1 6402 */
9bd268ba
RDC
6403static CPAccessResult access_lor_ns(CPUARMState *env,
6404 const ARMCPRegInfo *ri, bool isread)
2d7137c1
RH
6405{
6406 int el = arm_current_el(env);
6407
6408 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
6409 return CP_ACCESS_TRAP_EL2;
6410 }
6411 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
6412 return CP_ACCESS_TRAP_EL3;
6413 }
6414 return CP_ACCESS_OK;
6415}
6416
2d7137c1
RH
6417static CPAccessResult access_lor_other(CPUARMState *env,
6418 const ARMCPRegInfo *ri, bool isread)
6419{
6420 if (arm_is_secure_below_el3(env)) {
6421 /* Access denied in secure mode. */
6422 return CP_ACCESS_TRAP;
6423 }
9bd268ba 6424 return access_lor_ns(env, ri, isread);
2d7137c1
RH
6425}
6426
d8564ee4
RH
6427/*
6428 * A trivial implementation of ARMv8.1-LOR leaves all of these
6429 * registers fixed at 0, which indicates that there are zero
6430 * supported Limited Ordering regions.
6431 */
6432static const ARMCPRegInfo lor_reginfo[] = {
6433 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6434 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6435 .access = PL1_RW, .accessfn = access_lor_other,
6436 .type = ARM_CP_CONST, .resetvalue = 0 },
6437 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6438 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6439 .access = PL1_RW, .accessfn = access_lor_other,
6440 .type = ARM_CP_CONST, .resetvalue = 0 },
6441 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6442 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6443 .access = PL1_RW, .accessfn = access_lor_other,
6444 .type = ARM_CP_CONST, .resetvalue = 0 },
6445 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6446 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6447 .access = PL1_RW, .accessfn = access_lor_other,
6448 .type = ARM_CP_CONST, .resetvalue = 0 },
6449 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6450 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
9bd268ba 6451 .access = PL1_R, .accessfn = access_lor_ns,
d8564ee4 6452 .type = ARM_CP_CONST, .resetvalue = 0 },
d8564ee4
RH
6453};
6454
967aa94f
RH
6455#ifdef TARGET_AARCH64
6456static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
6457 bool isread)
6458{
6459 int el = arm_current_el(env);
6460
6461 if (el < 2 &&
07b034ea 6462 arm_is_el2_enabled(env) &&
967aa94f
RH
6463 !(arm_hcr_el2_eff(env) & HCR_APK)) {
6464 return CP_ACCESS_TRAP_EL2;
6465 }
6466 if (el < 3 &&
6467 arm_feature(env, ARM_FEATURE_EL3) &&
6468 !(env->cp15.scr_el3 & SCR_APK)) {
6469 return CP_ACCESS_TRAP_EL3;
6470 }
6471 return CP_ACCESS_OK;
6472}
6473
6474static const ARMCPRegInfo pauth_reginfo[] = {
6475 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6476 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
6477 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6478 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
967aa94f
RH
6479 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6480 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
6481 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6482 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
967aa94f
RH
6483 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6484 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
6485 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6486 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
967aa94f
RH
6487 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6488 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
6489 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6490 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
967aa94f
RH
6491 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6492 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
6493 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6494 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
967aa94f
RH
6495 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6496 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
6497 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6498 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
967aa94f
RH
6499 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6500 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
6501 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6502 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
967aa94f
RH
6503 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6504 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
6505 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6506 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
967aa94f
RH
6507 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6508 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
6509 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6510 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
967aa94f
RH
6511 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6512 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
6513 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 6514 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
967aa94f 6515};
de390645 6516
84940ed8
RC
6517static const ARMCPRegInfo tlbirange_reginfo[] = {
6518 { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
6519 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
6520 .access = PL1_W, .type = ARM_CP_NO_RAW,
6521 .writefn = tlbi_aa64_rvae1is_write },
6522 { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
6523 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
6524 .access = PL1_W, .type = ARM_CP_NO_RAW,
6525 .writefn = tlbi_aa64_rvae1is_write },
6526 { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
6527 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
6528 .access = PL1_W, .type = ARM_CP_NO_RAW,
6529 .writefn = tlbi_aa64_rvae1is_write },
6530 { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
6531 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
6532 .access = PL1_W, .type = ARM_CP_NO_RAW,
6533 .writefn = tlbi_aa64_rvae1is_write },
6534 { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
6535 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
6536 .access = PL1_W, .type = ARM_CP_NO_RAW,
6537 .writefn = tlbi_aa64_rvae1is_write },
6538 { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
6539 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
6540 .access = PL1_W, .type = ARM_CP_NO_RAW,
6541 .writefn = tlbi_aa64_rvae1is_write },
6542 { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
6543 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
6544 .access = PL1_W, .type = ARM_CP_NO_RAW,
6545 .writefn = tlbi_aa64_rvae1is_write },
6546 { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
6547 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
6548 .access = PL1_W, .type = ARM_CP_NO_RAW,
6549 .writefn = tlbi_aa64_rvae1is_write },
6550 { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
6551 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
6552 .access = PL1_W, .type = ARM_CP_NO_RAW,
6553 .writefn = tlbi_aa64_rvae1_write },
6554 { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
6555 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
6556 .access = PL1_W, .type = ARM_CP_NO_RAW,
6557 .writefn = tlbi_aa64_rvae1_write },
6558 { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
6559 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
6560 .access = PL1_W, .type = ARM_CP_NO_RAW,
6561 .writefn = tlbi_aa64_rvae1_write },
6562 { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
6563 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
6564 .access = PL1_W, .type = ARM_CP_NO_RAW,
6565 .writefn = tlbi_aa64_rvae1_write },
6566 { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
6567 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
6568 .access = PL2_W, .type = ARM_CP_NOP },
6569 { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
6570 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
6571 .access = PL2_W, .type = ARM_CP_NOP },
6572 { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
6573 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
696ba377 6574 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6575 .writefn = tlbi_aa64_rvae2is_write },
6576 { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
6577 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
696ba377 6578 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6579 .writefn = tlbi_aa64_rvae2is_write },
6580 { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
6581 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
6582 .access = PL2_W, .type = ARM_CP_NOP },
6583 { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
6584 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
6585 .access = PL2_W, .type = ARM_CP_NOP },
6586 { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
6587 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
696ba377 6588 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6589 .writefn = tlbi_aa64_rvae2is_write },
6590 { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
6591 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
696ba377 6592 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6593 .writefn = tlbi_aa64_rvae2is_write },
6594 { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
6595 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
696ba377 6596 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6597 .writefn = tlbi_aa64_rvae2_write },
6598 { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
6599 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
696ba377 6600 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
84940ed8
RC
6601 .writefn = tlbi_aa64_rvae2_write },
6602 { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
6603 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
6604 .access = PL3_W, .type = ARM_CP_NO_RAW,
6605 .writefn = tlbi_aa64_rvae3is_write },
6606 { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
6607 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
6608 .access = PL3_W, .type = ARM_CP_NO_RAW,
6609 .writefn = tlbi_aa64_rvae3is_write },
6610 { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
6611 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
6612 .access = PL3_W, .type = ARM_CP_NO_RAW,
6613 .writefn = tlbi_aa64_rvae3is_write },
6614 { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
6615 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
6616 .access = PL3_W, .type = ARM_CP_NO_RAW,
6617 .writefn = tlbi_aa64_rvae3is_write },
6618 { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
6619 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
6620 .access = PL3_W, .type = ARM_CP_NO_RAW,
6621 .writefn = tlbi_aa64_rvae3_write },
6622 { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
6623 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
6624 .access = PL3_W, .type = ARM_CP_NO_RAW,
6625 .writefn = tlbi_aa64_rvae3_write },
84940ed8
RC
6626};
6627
7113d618
RC
6628static const ARMCPRegInfo tlbios_reginfo[] = {
6629 { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
6630 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
6631 .access = PL1_W, .type = ARM_CP_NO_RAW,
6632 .writefn = tlbi_aa64_vmalle1is_write },
b7469ef9
IH
6633 { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
6634 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
6635 .access = PL1_W, .type = ARM_CP_NO_RAW,
6636 .writefn = tlbi_aa64_vae1is_write },
7113d618
RC
6637 { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
6638 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
6639 .access = PL1_W, .type = ARM_CP_NO_RAW,
6640 .writefn = tlbi_aa64_vmalle1is_write },
b7469ef9
IH
6641 { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
6642 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
6643 .access = PL1_W, .type = ARM_CP_NO_RAW,
6644 .writefn = tlbi_aa64_vae1is_write },
6645 { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
6646 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
6647 .access = PL1_W, .type = ARM_CP_NO_RAW,
6648 .writefn = tlbi_aa64_vae1is_write },
6649 { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
6650 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
6651 .access = PL1_W, .type = ARM_CP_NO_RAW,
6652 .writefn = tlbi_aa64_vae1is_write },
7113d618
RC
6653 { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
6654 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
696ba377 6655 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7113d618 6656 .writefn = tlbi_aa64_alle2is_write },
b7469ef9
IH
6657 { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
6658 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
696ba377 6659 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
b7469ef9 6660 .writefn = tlbi_aa64_vae2is_write },
7113d618
RC
6661 { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
6662 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
6663 .access = PL2_W, .type = ARM_CP_NO_RAW,
6664 .writefn = tlbi_aa64_alle1is_write },
b7469ef9
IH
6665 { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
6666 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
696ba377 6667 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
b7469ef9 6668 .writefn = tlbi_aa64_vae2is_write },
7113d618
RC
6669 { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
6670 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
6671 .access = PL2_W, .type = ARM_CP_NO_RAW,
6672 .writefn = tlbi_aa64_alle1is_write },
6673 { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
6674 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
6675 .access = PL2_W, .type = ARM_CP_NOP },
6676 { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
6677 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
6678 .access = PL2_W, .type = ARM_CP_NOP },
6679 { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
6680 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
6681 .access = PL2_W, .type = ARM_CP_NOP },
6682 { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
6683 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
6684 .access = PL2_W, .type = ARM_CP_NOP },
6685 { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
6686 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
6687 .access = PL3_W, .type = ARM_CP_NO_RAW,
6688 .writefn = tlbi_aa64_alle3is_write },
b7469ef9
IH
6689 { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
6690 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
6691 .access = PL3_W, .type = ARM_CP_NO_RAW,
6692 .writefn = tlbi_aa64_vae3is_write },
6693 { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
6694 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
6695 .access = PL3_W, .type = ARM_CP_NO_RAW,
6696 .writefn = tlbi_aa64_vae3is_write },
7113d618
RC
6697};
6698
de390645
RH
6699static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
6700{
6701 Error *err = NULL;
6702 uint64_t ret;
6703
6704 /* Success sets NZCV = 0000. */
6705 env->NF = env->CF = env->VF = 0, env->ZF = 1;
6706
6707 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
6708 /*
6709 * ??? Failed, for unknown reasons in the crypto subsystem.
6710 * The best we can do is log the reason and return the
6711 * timed-out indication to the guest. There is no reason
6712 * we know to expect this failure to be transitory, so the
6713 * guest may well hang retrying the operation.
6714 */
6715 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
6716 ri->name, error_get_pretty(err));
6717 error_free(err);
6718
6719 env->ZF = 0; /* NZCF = 0100 */
6720 return 0;
6721 }
6722 return ret;
6723}
6724
6725/* We do not support re-seeding, so the two registers operate the same. */
6726static const ARMCPRegInfo rndr_reginfo[] = {
6727 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
6728 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6729 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
6730 .access = PL0_R, .readfn = rndr_readfn },
6731 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
6732 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6733 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
6734 .access = PL0_R, .readfn = rndr_readfn },
de390645 6735};
0d57b499
BM
6736
6737#ifndef CONFIG_USER_ONLY
6738static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
6739 uint64_t value)
6740{
6741 ARMCPU *cpu = env_archcpu(env);
6742 /* CTR_EL0 System register -> DminLine, bits [19:16] */
6743 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
6744 uint64_t vaddr_in = (uint64_t) value;
6745 uint64_t vaddr = vaddr_in & ~(dline_size - 1);
6746 void *haddr;
6747 int mem_idx = cpu_mmu_index(env, false);
6748
6749 /* This won't be crossing page boundaries */
6750 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
6751 if (haddr) {
6752
6753 ram_addr_t offset;
6754 MemoryRegion *mr;
6755
6756 /* RCU lock is already being held */
6757 mr = memory_region_from_host(haddr, &offset);
6758
6759 if (mr) {
4dfe59d1 6760 memory_region_writeback(mr, offset, dline_size);
0d57b499
BM
6761 }
6762 }
6763}
6764
6765static const ARMCPRegInfo dcpop_reg[] = {
6766 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
6767 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
6768 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
1bed4d2e 6769 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
0d57b499
BM
6770};
6771
6772static const ARMCPRegInfo dcpodp_reg[] = {
6773 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
6774 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
6775 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
1bed4d2e 6776 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
0d57b499
BM
6777};
6778#endif /*CONFIG_USER_ONLY*/
6779
4b779ceb
RH
6780static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
6781 bool isread)
6782{
6783 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
6784 return CP_ACCESS_TRAP_EL2;
6785 }
6786
6787 return CP_ACCESS_OK;
6788}
6789
6790static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
6791 bool isread)
6792{
6793 int el = arm_current_el(env);
6794
0da067f2 6795 if (el < 2 && arm_is_el2_enabled(env)) {
4301acd7
RH
6796 uint64_t hcr = arm_hcr_el2_eff(env);
6797 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
6798 return CP_ACCESS_TRAP_EL2;
6799 }
4b779ceb
RH
6800 }
6801 if (el < 3 &&
6802 arm_feature(env, ARM_FEATURE_EL3) &&
6803 !(env->cp15.scr_el3 & SCR_ATA)) {
6804 return CP_ACCESS_TRAP_EL3;
6805 }
6806 return CP_ACCESS_OK;
6807}
6808
6809static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
6810{
6811 return env->pstate & PSTATE_TCO;
6812}
6813
6814static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
6815{
6816 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
6817}
6818
6819static const ARMCPRegInfo mte_reginfo[] = {
6820 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
6821 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
6822 .access = PL1_RW, .accessfn = access_mte,
6823 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
6824 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
6825 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
6826 .access = PL1_RW, .accessfn = access_mte,
6827 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
6828 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
6829 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
6830 .access = PL2_RW, .accessfn = access_mte,
6831 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
6832 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
6833 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
6834 .access = PL3_RW,
6835 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
6836 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
6837 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
6838 .access = PL1_RW, .accessfn = access_mte,
6839 .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
6840 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
6841 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
6842 .access = PL1_RW, .accessfn = access_mte,
6843 .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
6844 { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
6845 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
6846 .access = PL1_R, .accessfn = access_aa64_tid5,
6847 .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
6848 { .name = "TCO", .state = ARM_CP_STATE_AA64,
6849 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
6850 .type = ARM_CP_NO_RAW,
6851 .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
5463df16
RH
6852 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
6853 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
6854 .type = ARM_CP_NOP, .access = PL1_W,
6855 .accessfn = aa64_cacheop_poc_access },
6856 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
6857 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
6858 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6859 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
6860 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
6861 .type = ARM_CP_NOP, .access = PL1_W,
6862 .accessfn = aa64_cacheop_poc_access },
6863 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
6864 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
6865 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6866 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
6867 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
6868 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6869 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
6870 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
6871 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6872 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
6873 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
6874 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6875 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
6876 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
6877 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
4b779ceb
RH
6878};
6879
6880static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
6881 { .name = "TCO", .state = ARM_CP_STATE_AA64,
6882 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
6883 .type = ARM_CP_CONST, .access = PL0_RW, },
4b779ceb 6884};
5463df16
RH
6885
6886static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
6887 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
6888 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
6889 .type = ARM_CP_NOP, .access = PL0_W,
6890 .accessfn = aa64_cacheop_poc_access },
6891 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
6892 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
6893 .type = ARM_CP_NOP, .access = PL0_W,
6894 .accessfn = aa64_cacheop_poc_access },
6895 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
6896 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
6897 .type = ARM_CP_NOP, .access = PL0_W,
6898 .accessfn = aa64_cacheop_poc_access },
6899 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
6900 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
6901 .type = ARM_CP_NOP, .access = PL0_W,
6902 .accessfn = aa64_cacheop_poc_access },
6903 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
6904 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
6905 .type = ARM_CP_NOP, .access = PL0_W,
6906 .accessfn = aa64_cacheop_poc_access },
6907 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
6908 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
6909 .type = ARM_CP_NOP, .access = PL0_W,
6910 .accessfn = aa64_cacheop_poc_access },
6911 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
6912 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
6913 .type = ARM_CP_NOP, .access = PL0_W,
6914 .accessfn = aa64_cacheop_poc_access },
6915 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
6916 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
6917 .type = ARM_CP_NOP, .access = PL0_W,
6918 .accessfn = aa64_cacheop_poc_access },
eb821168
RH
6919 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
6920 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
6921 .access = PL0_W, .type = ARM_CP_DC_GVA,
6922#ifndef CONFIG_USER_ONLY
6923 /* Avoid overhead of an access check that always passes in user-mode */
6924 .accessfn = aa64_zva_access,
6925#endif
6926 },
6927 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
6928 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
6929 .access = PL0_W, .type = ARM_CP_DC_GZVA,
6930#ifndef CONFIG_USER_ONLY
6931 /* Avoid overhead of an access check that always passes in user-mode */
6932 .accessfn = aa64_zva_access,
6933#endif
6934 },
5463df16
RH
6935};
6936
7cb1e618
RH
6937static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
6938 bool isread)
6939{
6940 uint64_t hcr = arm_hcr_el2_eff(env);
6941 int el = arm_current_el(env);
6942
6943 if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
6944 if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
6945 if (hcr & HCR_TGE) {
6946 return CP_ACCESS_TRAP_EL2;
6947 }
6948 return CP_ACCESS_TRAP;
6949 }
6950 } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
6951 return CP_ACCESS_TRAP_EL2;
6952 }
6953 if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
6954 return CP_ACCESS_TRAP_EL2;
6955 }
6956 if (el < 3
6957 && arm_feature(env, ARM_FEATURE_EL3)
6958 && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
6959 return CP_ACCESS_TRAP_EL3;
6960 }
6961 return CP_ACCESS_OK;
6962}
6963
6964static const ARMCPRegInfo scxtnum_reginfo[] = {
6965 { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
6966 .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
6967 .access = PL0_RW, .accessfn = access_scxtnum,
6968 .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
6969 { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
6970 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
6971 .access = PL1_RW, .accessfn = access_scxtnum,
6972 .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
6973 { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
6974 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
6975 .access = PL2_RW, .accessfn = access_scxtnum,
6976 .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
6977 { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
6978 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
6979 .access = PL3_RW,
6980 .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
6981};
6982#endif /* TARGET_AARCH64 */
967aa94f 6983
cb570bd3
RH
6984static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
6985 bool isread)
6986{
6987 int el = arm_current_el(env);
6988
6989 if (el == 0) {
6990 uint64_t sctlr = arm_sctlr(env, el);
6991 if (!(sctlr & SCTLR_EnRCTX)) {
6992 return CP_ACCESS_TRAP;
6993 }
6994 } else if (el == 1) {
6995 uint64_t hcr = arm_hcr_el2_eff(env);
6996 if (hcr & HCR_NV) {
6997 return CP_ACCESS_TRAP_EL2;
6998 }
6999 }
7000 return CP_ACCESS_OK;
7001}
7002
7003static const ARMCPRegInfo predinv_reginfo[] = {
7004 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
7005 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
7006 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7007 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
7008 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
7009 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7010 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
7011 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
7012 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7013 /*
7014 * Note the AArch32 opcodes have a different OPC1.
7015 */
7016 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
7017 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
7018 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7019 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
7020 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
7021 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7022 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
7023 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
7024 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
cb570bd3
RH
7025};
7026
957e6155
PM
7027static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
7028{
7029 /* Read the high 32 bits of the current CCSIDR */
7030 return extract64(ccsidr_read(env, ri), 32, 32);
7031}
7032
7033static const ARMCPRegInfo ccsidr2_reginfo[] = {
7034 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
7035 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
7036 .access = PL1_R,
7037 .accessfn = access_aa64_tid2,
7038 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
957e6155
PM
7039};
7040
6a4ef4e5
MZ
7041static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7042 bool isread)
7043{
7044 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
7045 return CP_ACCESS_TRAP_EL2;
7046 }
7047
7048 return CP_ACCESS_OK;
7049}
7050
7051static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7052 bool isread)
7053{
7054 if (arm_feature(env, ARM_FEATURE_V8)) {
7055 return access_aa64_tid3(env, ri, isread);
7056 }
7057
7058 return CP_ACCESS_OK;
7059}
7060
f96f3d5f
MZ
7061static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
7062 bool isread)
7063{
7064 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
7065 return CP_ACCESS_TRAP_EL2;
7066 }
7067
7068 return CP_ACCESS_OK;
7069}
7070
8e228c9e
PM
7071static CPAccessResult access_joscr_jmcr(CPUARMState *env,
7072 const ARMCPRegInfo *ri, bool isread)
7073{
7074 /*
7075 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
7076 * in v7A, not in v8A.
7077 */
7078 if (!arm_feature(env, ARM_FEATURE_V8) &&
7079 arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
7080 (env->cp15.hstr_el2 & HSTR_TJDBX)) {
7081 return CP_ACCESS_TRAP_EL2;
7082 }
7083 return CP_ACCESS_OK;
7084}
7085
f96f3d5f
MZ
7086static const ARMCPRegInfo jazelle_regs[] = {
7087 { .name = "JIDR",
7088 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
7089 .access = PL1_R, .accessfn = access_jazelle,
7090 .type = ARM_CP_CONST, .resetvalue = 0 },
7091 { .name = "JOSCR",
7092 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
8e228c9e 7093 .accessfn = access_joscr_jmcr,
f96f3d5f
MZ
7094 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7095 { .name = "JMCR",
7096 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
8e228c9e 7097 .accessfn = access_joscr_jmcr,
f96f3d5f 7098 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
f96f3d5f
MZ
7099};
7100
52d18727
RH
7101static const ARMCPRegInfo contextidr_el2 = {
7102 .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
7103 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
7104 .access = PL2_RW,
7105 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
7106};
7107
e2a1a461 7108static const ARMCPRegInfo vhe_reginfo[] = {
ed30da8e
RH
7109 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
7110 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
7111 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
7112 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
8c94b071
RH
7113#ifndef CONFIG_USER_ONLY
7114 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
7115 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
7116 .fieldoffset =
7117 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
7118 .type = ARM_CP_IO, .access = PL2_RW,
7119 .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
7120 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
7121 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
7122 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
7123 .resetfn = gt_hv_timer_reset,
7124 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
7125 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
7126 .type = ARM_CP_IO,
7127 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
7128 .access = PL2_RW,
7129 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
7130 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
bb5972e4
RH
7131 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
7132 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
7133 .type = ARM_CP_IO | ARM_CP_ALIAS,
7134 .access = PL2_RW, .accessfn = e2h_access,
7135 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
7136 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
7137 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
7138 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
7139 .type = ARM_CP_IO | ARM_CP_ALIAS,
7140 .access = PL2_RW, .accessfn = e2h_access,
7141 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
7142 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
7143 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7144 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
7145 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7146 .access = PL2_RW, .accessfn = e2h_access,
7147 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
7148 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7149 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
7150 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7151 .access = PL2_RW, .accessfn = e2h_access,
7152 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
7153 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7154 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
7155 .type = ARM_CP_IO | ARM_CP_ALIAS,
7156 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
7157 .access = PL2_RW, .accessfn = e2h_access,
7158 .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
7159 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7160 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
7161 .type = ARM_CP_IO | ARM_CP_ALIAS,
7162 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
7163 .access = PL2_RW, .accessfn = e2h_access,
7164 .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
8c94b071 7165#endif
e2a1a461
RH
7166};
7167
04b07d29
RH
7168#ifndef CONFIG_USER_ONLY
7169static const ARMCPRegInfo ats1e1_reginfo[] = {
7170 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
7171 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7172 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7173 .writefn = ats_write64 },
7174 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
7175 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7176 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7177 .writefn = ats_write64 },
04b07d29
RH
7178};
7179
7180static const ARMCPRegInfo ats1cp_reginfo[] = {
7181 { .name = "ATS1CPRP",
7182 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7183 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7184 .writefn = ats_write },
7185 { .name = "ATS1CPWP",
7186 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7187 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7188 .writefn = ats_write },
04b07d29
RH
7189};
7190#endif
7191
f6287c24
PM
7192/*
7193 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7194 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7195 * is non-zero, which is never for ARMv7, optionally in ARMv8
7196 * and mandatorily for ARMv8.2 and up.
7197 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7198 * implementation is RAZ/WI we can ignore this detail, as we
7199 * do for ACTLR.
7200 */
7201static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
7202 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
7203 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
99602377
RH
7204 .access = PL1_RW, .accessfn = access_tacr,
7205 .type = ARM_CP_CONST, .resetvalue = 0 },
f6287c24
PM
7206 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
7207 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
7208 .access = PL2_RW, .type = ARM_CP_CONST,
7209 .resetvalue = 0 },
f6287c24
PM
7210};
7211
2ceb98c0
PM
7212void register_cp_regs_for_features(ARMCPU *cpu)
7213{
7214 /* Register all the coprocessor registers based on feature bits */
7215 CPUARMState *env = &cpu->env;
7216 if (arm_feature(env, ARM_FEATURE_M)) {
7217 /* M profile has no coprocessor registers */
7218 return;
7219 }
7220
e9aa6c21 7221 define_arm_cp_regs(cpu, cp_reginfo);
9449fdf6
PM
7222 if (!arm_feature(env, ARM_FEATURE_V8)) {
7223 /* Must go early as it is full of wildcards that may be
7224 * overridden by later definitions.
7225 */
7226 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
7227 }
7228
7d57f408 7229 if (arm_feature(env, ARM_FEATURE_V6)) {
8515a092
PM
7230 /* The ID registers all have impdef reset values */
7231 ARMCPRegInfo v6_idregs[] = {
0ff644a7
PM
7232 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
7233 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
7234 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7235 .accessfn = access_aa32_tid3,
8a130a7b 7236 .resetvalue = cpu->isar.id_pfr0 },
96a8b92e
PM
7237 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7238 * the value of the GIC field until after we define these regs.
7239 */
0ff644a7
PM
7240 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
7241 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
96a8b92e 7242 .access = PL1_R, .type = ARM_CP_NO_RAW,
6a4ef4e5 7243 .accessfn = access_aa32_tid3,
96a8b92e
PM
7244 .readfn = id_pfr1_read,
7245 .writefn = arm_cp_write_ignore },
0ff644a7
PM
7246 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
7247 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
7248 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7249 .accessfn = access_aa32_tid3,
a6179538 7250 .resetvalue = cpu->isar.id_dfr0 },
0ff644a7
PM
7251 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
7252 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
7253 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7254 .accessfn = access_aa32_tid3,
8515a092 7255 .resetvalue = cpu->id_afr0 },
0ff644a7
PM
7256 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
7257 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
7258 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7259 .accessfn = access_aa32_tid3,
10054016 7260 .resetvalue = cpu->isar.id_mmfr0 },
0ff644a7
PM
7261 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
7262 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
7263 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7264 .accessfn = access_aa32_tid3,
10054016 7265 .resetvalue = cpu->isar.id_mmfr1 },
0ff644a7
PM
7266 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
7267 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
7268 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7269 .accessfn = access_aa32_tid3,
10054016 7270 .resetvalue = cpu->isar.id_mmfr2 },
0ff644a7
PM
7271 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
7272 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
7273 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7274 .accessfn = access_aa32_tid3,
10054016 7275 .resetvalue = cpu->isar.id_mmfr3 },
0ff644a7
PM
7276 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
7277 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
7278 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7279 .accessfn = access_aa32_tid3,
47576b94 7280 .resetvalue = cpu->isar.id_isar0 },
0ff644a7
PM
7281 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
7282 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
7283 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7284 .accessfn = access_aa32_tid3,
47576b94 7285 .resetvalue = cpu->isar.id_isar1 },
0ff644a7
PM
7286 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
7287 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
7288 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7289 .accessfn = access_aa32_tid3,
47576b94 7290 .resetvalue = cpu->isar.id_isar2 },
0ff644a7
PM
7291 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
7292 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
7293 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7294 .accessfn = access_aa32_tid3,
47576b94 7295 .resetvalue = cpu->isar.id_isar3 },
0ff644a7
PM
7296 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
7297 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
7298 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7299 .accessfn = access_aa32_tid3,
47576b94 7300 .resetvalue = cpu->isar.id_isar4 },
0ff644a7
PM
7301 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
7302 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
7303 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7304 .accessfn = access_aa32_tid3,
47576b94 7305 .resetvalue = cpu->isar.id_isar5 },
e20d84c1
PM
7306 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
7307 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
7308 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7309 .accessfn = access_aa32_tid3,
10054016 7310 .resetvalue = cpu->isar.id_mmfr4 },
802abf40 7311 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
e20d84c1
PM
7312 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
7313 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7314 .accessfn = access_aa32_tid3,
47576b94 7315 .resetvalue = cpu->isar.id_isar6 },
8515a092
PM
7316 };
7317 define_arm_cp_regs(cpu, v6_idregs);
7d57f408
PM
7318 define_arm_cp_regs(cpu, v6_cp_reginfo);
7319 } else {
7320 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
7321 }
4d31c596
PM
7322 if (arm_feature(env, ARM_FEATURE_V6K)) {
7323 define_arm_cp_regs(cpu, v6k_cp_reginfo);
7324 }
5e5cf9e3 7325 if (arm_feature(env, ARM_FEATURE_V7MP) &&
452a0955 7326 !arm_feature(env, ARM_FEATURE_PMSA)) {
995939a6
PM
7327 define_arm_cp_regs(cpu, v7mp_cp_reginfo);
7328 }
327dd510
AL
7329 if (arm_feature(env, ARM_FEATURE_V7VE)) {
7330 define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
7331 }
e9aa6c21 7332 if (arm_feature(env, ARM_FEATURE_V7)) {
776d4e5c 7333 ARMCPRegInfo clidr = {
7da845b0
PM
7334 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
7335 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
630fcd4d
MZ
7336 .access = PL1_R, .type = ARM_CP_CONST,
7337 .accessfn = access_aa64_tid2,
7338 .resetvalue = cpu->clidr
776d4e5c 7339 };
776d4e5c 7340 define_one_arm_cp_reg(cpu, &clidr);
e9aa6c21 7341 define_arm_cp_regs(cpu, v7_cp_reginfo);
50300698 7342 define_debug_regs(cpu);
24183fb6 7343 define_pmu_regs(cpu);
7d57f408
PM
7344 } else {
7345 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
e9aa6c21 7346 }
b0d2b7d0 7347 if (arm_feature(env, ARM_FEATURE_V8)) {
e20d84c1
PM
7348 /* AArch64 ID registers, which all have impdef reset values.
7349 * Note that within the ID register ranges the unused slots
7350 * must all RAZ, not UNDEF; future architecture versions may
7351 * define new registers here.
7352 */
e60cef86 7353 ARMCPRegInfo v8_idregs[] = {
976b99b6
AB
7354 /*
7355 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7356 * emulation because we don't know the right value for the
7357 * GIC field until after we define these regs.
96a8b92e 7358 */
e60cef86
PM
7359 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
7360 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
976b99b6
AB
7361 .access = PL1_R,
7362#ifdef CONFIG_USER_ONLY
7363 .type = ARM_CP_CONST,
7364 .resetvalue = cpu->isar.id_aa64pfr0
7365#else
7366 .type = ARM_CP_NO_RAW,
6a4ef4e5 7367 .accessfn = access_aa64_tid3,
96a8b92e 7368 .readfn = id_aa64pfr0_read,
976b99b6
AB
7369 .writefn = arm_cp_write_ignore
7370#endif
7371 },
e60cef86
PM
7372 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
7373 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
7374 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7375 .accessfn = access_aa64_tid3,
47576b94 7376 .resetvalue = cpu->isar.id_aa64pfr1},
e20d84c1
PM
7377 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7378 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
7379 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7380 .accessfn = access_aa64_tid3,
e20d84c1
PM
7381 .resetvalue = 0 },
7382 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7383 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
7384 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7385 .accessfn = access_aa64_tid3,
e20d84c1 7386 .resetvalue = 0 },
9516d772 7387 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
e20d84c1
PM
7388 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
7389 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7390 .accessfn = access_aa64_tid3,
2dc10fa2 7391 .resetvalue = cpu->isar.id_aa64zfr0 },
414c54d5 7392 { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
e20d84c1
PM
7393 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
7394 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7395 .accessfn = access_aa64_tid3,
414c54d5 7396 .resetvalue = cpu->isar.id_aa64smfr0 },
e20d84c1
PM
7397 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7398 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
7399 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7400 .accessfn = access_aa64_tid3,
e20d84c1
PM
7401 .resetvalue = 0 },
7402 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7403 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
7404 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7405 .accessfn = access_aa64_tid3,
e20d84c1 7406 .resetvalue = 0 },
e60cef86
PM
7407 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
7408 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
7409 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7410 .accessfn = access_aa64_tid3,
2a609df8 7411 .resetvalue = cpu->isar.id_aa64dfr0 },
e60cef86
PM
7412 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
7413 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
7414 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7415 .accessfn = access_aa64_tid3,
2a609df8 7416 .resetvalue = cpu->isar.id_aa64dfr1 },
e20d84c1
PM
7417 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7418 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
7419 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7420 .accessfn = access_aa64_tid3,
e20d84c1
PM
7421 .resetvalue = 0 },
7422 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7423 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
7424 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7425 .accessfn = access_aa64_tid3,
e20d84c1 7426 .resetvalue = 0 },
e60cef86
PM
7427 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
7428 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
7429 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7430 .accessfn = access_aa64_tid3,
e60cef86
PM
7431 .resetvalue = cpu->id_aa64afr0 },
7432 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
7433 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
7434 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7435 .accessfn = access_aa64_tid3,
e60cef86 7436 .resetvalue = cpu->id_aa64afr1 },
e20d84c1
PM
7437 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7438 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
7439 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7440 .accessfn = access_aa64_tid3,
e20d84c1
PM
7441 .resetvalue = 0 },
7442 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7443 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
7444 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7445 .accessfn = access_aa64_tid3,
e20d84c1 7446 .resetvalue = 0 },
e60cef86
PM
7447 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
7448 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
7449 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7450 .accessfn = access_aa64_tid3,
47576b94 7451 .resetvalue = cpu->isar.id_aa64isar0 },
e60cef86
PM
7452 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
7453 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
7454 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7455 .accessfn = access_aa64_tid3,
47576b94 7456 .resetvalue = cpu->isar.id_aa64isar1 },
e20d84c1
PM
7457 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7458 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
7459 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7460 .accessfn = access_aa64_tid3,
e20d84c1
PM
7461 .resetvalue = 0 },
7462 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7463 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
7464 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7465 .accessfn = access_aa64_tid3,
e20d84c1
PM
7466 .resetvalue = 0 },
7467 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7468 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
7469 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7470 .accessfn = access_aa64_tid3,
e20d84c1
PM
7471 .resetvalue = 0 },
7472 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7473 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
7474 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7475 .accessfn = access_aa64_tid3,
e20d84c1
PM
7476 .resetvalue = 0 },
7477 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7478 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
7479 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7480 .accessfn = access_aa64_tid3,
e20d84c1
PM
7481 .resetvalue = 0 },
7482 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7483 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
7484 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7485 .accessfn = access_aa64_tid3,
e20d84c1 7486 .resetvalue = 0 },
e60cef86
PM
7487 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
7488 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
7489 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7490 .accessfn = access_aa64_tid3,
3dc91ddb 7491 .resetvalue = cpu->isar.id_aa64mmfr0 },
e60cef86
PM
7492 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
7493 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
7494 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7495 .accessfn = access_aa64_tid3,
3dc91ddb 7496 .resetvalue = cpu->isar.id_aa64mmfr1 },
64761e10 7497 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
e20d84c1
PM
7498 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
7499 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7500 .accessfn = access_aa64_tid3,
64761e10 7501 .resetvalue = cpu->isar.id_aa64mmfr2 },
e20d84c1
PM
7502 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7503 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
7504 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7505 .accessfn = access_aa64_tid3,
e20d84c1
PM
7506 .resetvalue = 0 },
7507 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7508 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
7509 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7510 .accessfn = access_aa64_tid3,
e20d84c1
PM
7511 .resetvalue = 0 },
7512 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7513 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
7514 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7515 .accessfn = access_aa64_tid3,
e20d84c1
PM
7516 .resetvalue = 0 },
7517 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7518 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
7519 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7520 .accessfn = access_aa64_tid3,
e20d84c1
PM
7521 .resetvalue = 0 },
7522 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7523 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
7524 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7525 .accessfn = access_aa64_tid3,
e20d84c1 7526 .resetvalue = 0 },
a50c0f51
PM
7527 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
7528 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
7529 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7530 .accessfn = access_aa64_tid3,
47576b94 7531 .resetvalue = cpu->isar.mvfr0 },
a50c0f51
PM
7532 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
7533 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
7534 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7535 .accessfn = access_aa64_tid3,
47576b94 7536 .resetvalue = cpu->isar.mvfr1 },
a50c0f51
PM
7537 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
7538 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
7539 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7540 .accessfn = access_aa64_tid3,
47576b94 7541 .resetvalue = cpu->isar.mvfr2 },
e20d84c1
PM
7542 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7543 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
7544 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7545 .accessfn = access_aa64_tid3,
e20d84c1 7546 .resetvalue = 0 },
1d51bc96 7547 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
e20d84c1
PM
7548 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
7549 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7550 .accessfn = access_aa64_tid3,
1d51bc96 7551 .resetvalue = cpu->isar.id_pfr2 },
e20d84c1
PM
7552 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7553 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
7554 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7555 .accessfn = access_aa64_tid3,
e20d84c1
PM
7556 .resetvalue = 0 },
7557 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7558 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
7559 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7560 .accessfn = access_aa64_tid3,
e20d84c1
PM
7561 .resetvalue = 0 },
7562 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7563 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
7564 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 7565 .accessfn = access_aa64_tid3,
e20d84c1 7566 .resetvalue = 0 },
4054bfa9
AF
7567 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
7568 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
7569 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
cad86737 7570 .resetvalue = extract64(cpu->pmceid0, 0, 32) },
4054bfa9
AF
7571 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
7572 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
7573 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7574 .resetvalue = cpu->pmceid0 },
7575 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
7576 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
7577 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
cad86737 7578 .resetvalue = extract64(cpu->pmceid1, 0, 32) },
4054bfa9
AF
7579 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
7580 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
7581 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7582 .resetvalue = cpu->pmceid1 },
e60cef86 7583 };
6c5c0fec 7584#ifdef CONFIG_USER_ONLY
10b0220e 7585 static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6c5c0fec
AB
7586 { .name = "ID_AA64PFR0_EL1",
7587 .exported_bits = 0x000f000f00ff0000,
7588 .fixed_bits = 0x0000000000000011 },
7589 { .name = "ID_AA64PFR1_EL1",
7590 .exported_bits = 0x00000000000000f0 },
d040242e
AB
7591 { .name = "ID_AA64PFR*_EL1_RESERVED",
7592 .is_glob = true },
6c5c0fec
AB
7593 { .name = "ID_AA64ZFR0_EL1" },
7594 { .name = "ID_AA64MMFR0_EL1",
7595 .fixed_bits = 0x00000000ff000000 },
7596 { .name = "ID_AA64MMFR1_EL1" },
d040242e
AB
7597 { .name = "ID_AA64MMFR*_EL1_RESERVED",
7598 .is_glob = true },
6c5c0fec
AB
7599 { .name = "ID_AA64DFR0_EL1",
7600 .fixed_bits = 0x0000000000000006 },
7601 { .name = "ID_AA64DFR1_EL1" },
d040242e
AB
7602 { .name = "ID_AA64DFR*_EL1_RESERVED",
7603 .is_glob = true },
7604 { .name = "ID_AA64AFR*",
7605 .is_glob = true },
6c5c0fec
AB
7606 { .name = "ID_AA64ISAR0_EL1",
7607 .exported_bits = 0x00fffffff0fffff0 },
7608 { .name = "ID_AA64ISAR1_EL1",
7609 .exported_bits = 0x000000f0ffffffff },
d040242e
AB
7610 { .name = "ID_AA64ISAR*_EL1_RESERVED",
7611 .is_glob = true },
6c5c0fec
AB
7612 };
7613 modify_arm_cp_regs(v8_idregs, v8_user_idregs);
7614#endif
be8e8128
GB
7615 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7616 if (!arm_feature(env, ARM_FEATURE_EL3) &&
7617 !arm_feature(env, ARM_FEATURE_EL2)) {
7618 ARMCPRegInfo rvbar = {
7619 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
7620 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4a7319b7
EI
7621 .access = PL1_R,
7622 .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
be8e8128
GB
7623 };
7624 define_one_arm_cp_reg(cpu, &rvbar);
7625 }
e60cef86 7626 define_arm_cp_regs(cpu, v8_idregs);
b0d2b7d0
PM
7627 define_arm_cp_regs(cpu, v8_cp_reginfo);
7628 }
99a90811
RH
7629
7630 /*
7631 * Register the base EL2 cpregs.
7632 * Pre v8, these registers are implemented only as part of the
7633 * Virtualization Extensions (EL2 present). Beginning with v8,
7634 * if EL2 is missing but EL3 is enabled, mostly these become
7635 * RES0 from EL3, with some specific exceptions.
7636 */
7637 if (arm_feature(env, ARM_FEATURE_EL2)
7638 || (arm_feature(env, ARM_FEATURE_EL3)
7639 && arm_feature(env, ARM_FEATURE_V8))) {
f0d574d6 7640 uint64_t vmpidr_def = mpidr_read_val(env);
731de9e6
EI
7641 ARMCPRegInfo vpidr_regs[] = {
7642 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
7643 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7644 .access = PL2_RW, .accessfn = access_el3_aa32ns,
696ba377
RH
7645 .resetvalue = cpu->midr,
7646 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
36476562 7647 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
731de9e6
EI
7648 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
7649 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7650 .access = PL2_RW, .resetvalue = cpu->midr,
696ba377 7651 .type = ARM_CP_EL3_NO_EL2_C_NZ,
731de9e6 7652 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
f0d574d6
EI
7653 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
7654 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7655 .access = PL2_RW, .accessfn = access_el3_aa32ns,
696ba377
RH
7656 .resetvalue = vmpidr_def,
7657 .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
36476562 7658 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
f0d574d6
EI
7659 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
7660 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
696ba377
RH
7661 .access = PL2_RW, .resetvalue = vmpidr_def,
7662 .type = ARM_CP_EL3_NO_EL2_C_NZ,
f0d574d6 7663 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
731de9e6 7664 };
24526bb9
PM
7665 /*
7666 * The only field of MDCR_EL2 that has a defined architectural reset
7667 * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
7668 */
7669 ARMCPRegInfo mdcr_el2 = {
7670 .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
7671 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
7672 .access = PL2_RW, .resetvalue = pmu_num_counters(env),
7673 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
7674 };
7675 define_one_arm_cp_reg(cpu, &mdcr_el2);
731de9e6 7676 define_arm_cp_regs(cpu, vpidr_regs);
4771cd01 7677 define_arm_cp_regs(cpu, el2_cp_reginfo);
ce4afed8
PM
7678 if (arm_feature(env, ARM_FEATURE_V8)) {
7679 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
7680 }
e9152ee9
RDC
7681 if (cpu_isar_feature(aa64_sel2, cpu)) {
7682 define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
7683 }
be8e8128
GB
7684 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
7685 if (!arm_feature(env, ARM_FEATURE_EL3)) {
7686 ARMCPRegInfo rvbar = {
7687 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
7688 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
4a7319b7
EI
7689 .access = PL2_R,
7690 .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
be8e8128
GB
7691 };
7692 define_one_arm_cp_reg(cpu, &rvbar);
7693 }
3b685ba7 7694 }
99a90811
RH
7695
7696 /* Register the base EL3 cpregs. */
81547d66 7697 if (arm_feature(env, ARM_FEATURE_EL3)) {
0f1a3b24 7698 define_arm_cp_regs(cpu, el3_cp_reginfo);
e24fdd23
PM
7699 ARMCPRegInfo el3_regs[] = {
7700 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
7701 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
4a7319b7
EI
7702 .access = PL3_R,
7703 .fieldoffset = offsetof(CPUARMState, cp15.rvbar),
7704 },
e24fdd23
PM
7705 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
7706 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
7707 .access = PL3_RW,
7708 .raw_writefn = raw_write, .writefn = sctlr_write,
7709 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
7710 .resetvalue = cpu->reset_sctlr },
be8e8128 7711 };
e24fdd23
PM
7712
7713 define_arm_cp_regs(cpu, el3_regs);
81547d66 7714 }
2f027fc5
PM
7715 /* The behaviour of NSACR is sufficiently various that we don't
7716 * try to describe it in a single reginfo:
7717 * if EL3 is 64 bit, then trap to EL3 from S EL1,
7718 * reads as constant 0xc00 from NS EL1 and NS EL2
7719 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
7720 * if v7 without EL3, register doesn't exist
7721 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
7722 */
7723 if (arm_feature(env, ARM_FEATURE_EL3)) {
7724 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
10b0220e 7725 static const ARMCPRegInfo nsacr = {
2f027fc5
PM
7726 .name = "NSACR", .type = ARM_CP_CONST,
7727 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7728 .access = PL1_RW, .accessfn = nsacr_access,
7729 .resetvalue = 0xc00
7730 };
7731 define_one_arm_cp_reg(cpu, &nsacr);
7732 } else {
10b0220e 7733 static const ARMCPRegInfo nsacr = {
2f027fc5
PM
7734 .name = "NSACR",
7735 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7736 .access = PL3_RW | PL1_R,
7737 .resetvalue = 0,
7738 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
7739 };
7740 define_one_arm_cp_reg(cpu, &nsacr);
7741 }
7742 } else {
7743 if (arm_feature(env, ARM_FEATURE_V8)) {
10b0220e 7744 static const ARMCPRegInfo nsacr = {
2f027fc5
PM
7745 .name = "NSACR", .type = ARM_CP_CONST,
7746 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7747 .access = PL1_R,
7748 .resetvalue = 0xc00
7749 };
7750 define_one_arm_cp_reg(cpu, &nsacr);
7751 }
7752 }
7753
452a0955 7754 if (arm_feature(env, ARM_FEATURE_PMSA)) {
6cb0b013
PC
7755 if (arm_feature(env, ARM_FEATURE_V6)) {
7756 /* PMSAv6 not implemented */
7757 assert(arm_feature(env, ARM_FEATURE_V7));
7758 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
7759 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
7760 } else {
7761 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
7762 }
18032bec 7763 } else {
8e5d75c9 7764 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
18032bec 7765 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
4036b7d1
PM
7766 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
7767 if (cpu_isar_feature(aa32_hpd, cpu)) {
ab638a32
RH
7768 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
7769 }
18032bec 7770 }
c326b979
PM
7771 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
7772 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
7773 }
6cc7a3ae
PM
7774 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
7775 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
7776 }
4a501606
PM
7777 if (arm_feature(env, ARM_FEATURE_VAPA)) {
7778 define_arm_cp_regs(cpu, vapa_cp_reginfo);
7779 }
c4804214
PM
7780 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
7781 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
7782 }
7783 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
7784 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
7785 }
7786 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
7787 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
7788 }
18032bec
PM
7789 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
7790 define_arm_cp_regs(cpu, omap_cp_reginfo);
7791 }
34f90529
PM
7792 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
7793 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
7794 }
1047b9d7
PM
7795 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7796 define_arm_cp_regs(cpu, xscale_cp_reginfo);
7797 }
7798 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
7799 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
7800 }
7ac681cf
PM
7801 if (arm_feature(env, ARM_FEATURE_LPAE)) {
7802 define_arm_cp_regs(cpu, lpae_cp_reginfo);
7803 }
873b73c0 7804 if (cpu_isar_feature(aa32_jazelle, cpu)) {
f96f3d5f
MZ
7805 define_arm_cp_regs(cpu, jazelle_regs);
7806 }
7884849c
PM
7807 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
7808 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
7809 * be read-only (ie write causes UNDEF exception).
7810 */
7811 {
00a29f3d
PM
7812 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
7813 /* Pre-v8 MIDR space.
7814 * Note that the MIDR isn't a simple constant register because
7884849c
PM
7815 * of the TI925 behaviour where writes to another register can
7816 * cause the MIDR value to change.
97ce8d61
PC
7817 *
7818 * Unimplemented registers in the c15 0 0 0 space default to
7819 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
7820 * and friends override accordingly.
7884849c
PM
7821 */
7822 { .name = "MIDR",
97ce8d61 7823 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
7884849c 7824 .access = PL1_R, .resetvalue = cpu->midr,
d4e6df63 7825 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
731de9e6 7826 .readfn = midr_read,
97ce8d61
PC
7827 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7828 .type = ARM_CP_OVERRIDE },
7884849c
PM
7829 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
7830 { .name = "DUMMY",
7831 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
7832 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7833 { .name = "DUMMY",
7834 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
7835 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7836 { .name = "DUMMY",
7837 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
7838 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7839 { .name = "DUMMY",
7840 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
7841 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7842 { .name = "DUMMY",
7843 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
7844 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7884849c 7845 };
00a29f3d 7846 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
00a29f3d
PM
7847 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
7848 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
731de9e6
EI
7849 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
7850 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7851 .readfn = midr_read },
ac00c79f
SF
7852 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
7853 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
7854 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7855 .access = PL1_R, .resetvalue = cpu->midr },
7856 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
7857 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
7858 .access = PL1_R, .resetvalue = cpu->midr },
00a29f3d
PM
7859 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
7860 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
93fbc983
MZ
7861 .access = PL1_R,
7862 .accessfn = access_aa64_tid1,
7863 .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
00a29f3d
PM
7864 };
7865 ARMCPRegInfo id_cp_reginfo[] = {
7866 /* These are common to v8 and pre-v8 */
7867 { .name = "CTR",
7868 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
630fcd4d
MZ
7869 .access = PL1_R, .accessfn = ctr_el0_access,
7870 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
00a29f3d
PM
7871 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
7872 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
7873 .access = PL0_R, .accessfn = ctr_el0_access,
7874 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
7875 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
7876 { .name = "TCMTR",
7877 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
93fbc983
MZ
7878 .access = PL1_R,
7879 .accessfn = access_aa32_tid1,
7880 .type = ARM_CP_CONST, .resetvalue = 0 },
00a29f3d 7881 };
8085ce63
PC
7882 /* TLBTR is specific to VMSA */
7883 ARMCPRegInfo id_tlbtr_reginfo = {
7884 .name = "TLBTR",
7885 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
93fbc983
MZ
7886 .access = PL1_R,
7887 .accessfn = access_aa32_tid1,
7888 .type = ARM_CP_CONST, .resetvalue = 0,
8085ce63 7889 };
3281af81
PC
7890 /* MPUIR is specific to PMSA V6+ */
7891 ARMCPRegInfo id_mpuir_reginfo = {
7892 .name = "MPUIR",
7893 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7894 .access = PL1_R, .type = ARM_CP_CONST,
7895 .resetvalue = cpu->pmsav7_dregion << 8
7896 };
10b0220e 7897 static const ARMCPRegInfo crn0_wi_reginfo = {
7884849c
PM
7898 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
7899 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
7900 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
7901 };
6c5c0fec 7902#ifdef CONFIG_USER_ONLY
10b0220e 7903 static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
6c5c0fec
AB
7904 { .name = "MIDR_EL1",
7905 .exported_bits = 0x00000000ffffffff },
7906 { .name = "REVIDR_EL1" },
6c5c0fec
AB
7907 };
7908 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
7909#endif
7884849c
PM
7910 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
7911 arm_feature(env, ARM_FEATURE_STRONGARM)) {
5809ac57 7912 size_t i;
7884849c 7913 /* Register the blanket "writes ignored" value first to cover the
a703eda1
PC
7914 * whole space. Then update the specific ID registers to allow write
7915 * access, so that they ignore writes rather than causing them to
7916 * UNDEF.
7884849c
PM
7917 */
7918 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
5809ac57
RH
7919 for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
7920 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
00a29f3d 7921 }
5809ac57
RH
7922 for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
7923 id_cp_reginfo[i].access = PL1_RW;
7884849c 7924 }
10006112 7925 id_mpuir_reginfo.access = PL1_RW;
3281af81 7926 id_tlbtr_reginfo.access = PL1_RW;
7884849c 7927 }
00a29f3d
PM
7928 if (arm_feature(env, ARM_FEATURE_V8)) {
7929 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
7930 } else {
7931 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
7932 }
a703eda1 7933 define_arm_cp_regs(cpu, id_cp_reginfo);
452a0955 7934 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
8085ce63 7935 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
3281af81
PC
7936 } else if (arm_feature(env, ARM_FEATURE_V7)) {
7937 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
8085ce63 7938 }
7884849c
PM
7939 }
7940
97ce8d61 7941 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
52264166
AB
7942 ARMCPRegInfo mpidr_cp_reginfo[] = {
7943 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
7944 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
7945 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
52264166
AB
7946 };
7947#ifdef CONFIG_USER_ONLY
10b0220e 7948 static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
52264166
AB
7949 { .name = "MPIDR_EL1",
7950 .fixed_bits = 0x0000000080000000 },
52264166
AB
7951 };
7952 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
7953#endif
97ce8d61
PC
7954 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
7955 }
7956
2771db27 7957 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
834a6c69
PM
7958 ARMCPRegInfo auxcr_reginfo[] = {
7959 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
7960 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
99602377
RH
7961 .access = PL1_RW, .accessfn = access_tacr,
7962 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
834a6c69
PM
7963 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
7964 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
7965 .access = PL2_RW, .type = ARM_CP_CONST,
7966 .resetvalue = 0 },
7967 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
7968 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
7969 .access = PL3_RW, .type = ARM_CP_CONST,
7970 .resetvalue = 0 },
2771db27 7971 };
834a6c69 7972 define_arm_cp_regs(cpu, auxcr_reginfo);
f6287c24
PM
7973 if (cpu_isar_feature(aa32_ac2, cpu)) {
7974 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
0e0456ab 7975 }
2771db27
PM
7976 }
7977
d8ba780b 7978 if (arm_feature(env, ARM_FEATURE_CBAR)) {
d56974af
LM
7979 /*
7980 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
7981 * There are two flavours:
7982 * (1) older 32-bit only cores have a simple 32-bit CBAR
7983 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
7984 * 32-bit register visible to AArch32 at a different encoding
7985 * to the "flavour 1" register and with the bits rearranged to
7986 * be able to squash a 64-bit address into the 32-bit view.
7987 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
7988 * in future if we support AArch32-only configs of some of the
7989 * AArch64 cores we might need to add a specific feature flag
7990 * to indicate cores with "flavour 2" CBAR.
7991 */
f318cec6
PM
7992 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
7993 /* 32 bit view is [31:18] 0...0 [43:32]. */
7994 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
7995 | extract64(cpu->reset_cbar, 32, 12);
7996 ARMCPRegInfo cbar_reginfo[] = {
7997 { .name = "CBAR",
7998 .type = ARM_CP_CONST,
d56974af
LM
7999 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
8000 .access = PL1_R, .resetvalue = cbar32 },
f318cec6
PM
8001 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
8002 .type = ARM_CP_CONST,
8003 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
d56974af 8004 .access = PL1_R, .resetvalue = cpu->reset_cbar },
f318cec6
PM
8005 };
8006 /* We don't implement a r/w 64 bit CBAR currently */
8007 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
8008 define_arm_cp_regs(cpu, cbar_reginfo);
8009 } else {
8010 ARMCPRegInfo cbar = {
8011 .name = "CBAR",
8012 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
8013 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
8014 .fieldoffset = offsetof(CPUARMState,
8015 cp15.c15_config_base_address)
8016 };
8017 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
8018 cbar.access = PL1_R;
8019 cbar.fieldoffset = 0;
8020 cbar.type = ARM_CP_CONST;
8021 }
8022 define_one_arm_cp_reg(cpu, &cbar);
8023 }
d8ba780b
PC
8024 }
8025
91db4642 8026 if (arm_feature(env, ARM_FEATURE_VBAR)) {
10b0220e 8027 static const ARMCPRegInfo vbar_cp_reginfo[] = {
91db4642
CLG
8028 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
8029 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
8030 .access = PL1_RW, .writefn = vbar_write,
8031 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
8032 offsetof(CPUARMState, cp15.vbar_ns) },
8033 .resetvalue = 0 },
91db4642
CLG
8034 };
8035 define_arm_cp_regs(cpu, vbar_cp_reginfo);
8036 }
8037
2771db27
PM
8038 /* Generic registers whose values depend on the implementation */
8039 {
8040 ARMCPRegInfo sctlr = {
5ebafdf3 8041 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
137feaa9 8042 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
84929218 8043 .access = PL1_RW, .accessfn = access_tvm_trvm,
137feaa9
FA
8044 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
8045 offsetof(CPUARMState, cp15.sctlr_ns) },
d4e6df63
PM
8046 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
8047 .raw_writefn = raw_write,
2771db27
PM
8048 };
8049 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8050 /* Normally we would always end the TB on an SCTLR write, but Linux
8051 * arch/arm/mach-pxa/sleep.S expects two instructions following
8052 * an MMU enable to execute from cache. Imitate this behaviour.
8053 */
8054 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
8055 }
8056 define_one_arm_cp_reg(cpu, &sctlr);
8057 }
5be5e8ed 8058
2d7137c1 8059 if (cpu_isar_feature(aa64_lor, cpu)) {
2d7137c1
RH
8060 define_arm_cp_regs(cpu, lor_reginfo);
8061 }
220f508f
RH
8062 if (cpu_isar_feature(aa64_pan, cpu)) {
8063 define_one_arm_cp_reg(cpu, &pan_reginfo);
8064 }
04b07d29
RH
8065#ifndef CONFIG_USER_ONLY
8066 if (cpu_isar_feature(aa64_ats1e1, cpu)) {
8067 define_arm_cp_regs(cpu, ats1e1_reginfo);
8068 }
8069 if (cpu_isar_feature(aa32_ats1e1, cpu)) {
8070 define_arm_cp_regs(cpu, ats1cp_reginfo);
8071 }
8072#endif
9eeb7a1c
RH
8073 if (cpu_isar_feature(aa64_uao, cpu)) {
8074 define_one_arm_cp_reg(cpu, &uao_reginfo);
8075 }
2d7137c1 8076
dc8b1853
RC
8077 if (cpu_isar_feature(aa64_dit, cpu)) {
8078 define_one_arm_cp_reg(cpu, &dit_reginfo);
8079 }
f2f68a78
RC
8080 if (cpu_isar_feature(aa64_ssbs, cpu)) {
8081 define_one_arm_cp_reg(cpu, &ssbs_reginfo);
8082 }
58e93b48
RH
8083 if (cpu_isar_feature(any_ras, cpu)) {
8084 define_arm_cp_regs(cpu, minimal_ras_reginfo);
8085 }
dc8b1853 8086
52d18727
RH
8087 if (cpu_isar_feature(aa64_vh, cpu) ||
8088 cpu_isar_feature(aa64_debugv8p2, cpu)) {
8089 define_one_arm_cp_reg(cpu, &contextidr_el2);
8090 }
e2a1a461
RH
8091 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8092 define_arm_cp_regs(cpu, vhe_reginfo);
8093 }
8094
cd208a1c 8095 if (cpu_isar_feature(aa64_sve, cpu)) {
60360d82 8096 define_arm_cp_regs(cpu, zcr_reginfo);
5be5e8ed 8097 }
967aa94f 8098
5814d587
RH
8099 if (cpu_isar_feature(aa64_hcx, cpu)) {
8100 define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
8101 }
8102
967aa94f 8103#ifdef TARGET_AARCH64
9e5ec745
RH
8104 if (cpu_isar_feature(aa64_sme, cpu)) {
8105 define_arm_cp_regs(cpu, sme_reginfo);
8106 }
967aa94f
RH
8107 if (cpu_isar_feature(aa64_pauth, cpu)) {
8108 define_arm_cp_regs(cpu, pauth_reginfo);
8109 }
de390645
RH
8110 if (cpu_isar_feature(aa64_rndr, cpu)) {
8111 define_arm_cp_regs(cpu, rndr_reginfo);
8112 }
84940ed8
RC
8113 if (cpu_isar_feature(aa64_tlbirange, cpu)) {
8114 define_arm_cp_regs(cpu, tlbirange_reginfo);
8115 }
7113d618
RC
8116 if (cpu_isar_feature(aa64_tlbios, cpu)) {
8117 define_arm_cp_regs(cpu, tlbios_reginfo);
8118 }
0d57b499
BM
8119#ifndef CONFIG_USER_ONLY
8120 /* Data Cache clean instructions up to PoP */
8121 if (cpu_isar_feature(aa64_dcpop, cpu)) {
8122 define_one_arm_cp_reg(cpu, dcpop_reg);
8123
8124 if (cpu_isar_feature(aa64_dcpodp, cpu)) {
8125 define_one_arm_cp_reg(cpu, dcpodp_reg);
8126 }
8127 }
8128#endif /*CONFIG_USER_ONLY*/
4b779ceb
RH
8129
8130 /*
8131 * If full MTE is enabled, add all of the system registers.
8132 * If only "instructions available at EL0" are enabled,
8133 * then define only a RAZ/WI version of PSTATE.TCO.
8134 */
8135 if (cpu_isar_feature(aa64_mte, cpu)) {
8136 define_arm_cp_regs(cpu, mte_reginfo);
5463df16 8137 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
4b779ceb
RH
8138 } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
8139 define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
5463df16 8140 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
4b779ceb 8141 }
7cb1e618
RH
8142
8143 if (cpu_isar_feature(aa64_scxtnum, cpu)) {
8144 define_arm_cp_regs(cpu, scxtnum_reginfo);
8145 }
967aa94f 8146#endif
cb570bd3 8147
22e57073 8148 if (cpu_isar_feature(any_predinv, cpu)) {
cb570bd3
RH
8149 define_arm_cp_regs(cpu, predinv_reginfo);
8150 }
e2cce18f 8151
957e6155
PM
8152 if (cpu_isar_feature(any_ccidx, cpu)) {
8153 define_arm_cp_regs(cpu, ccsidr2_reginfo);
8154 }
8155
e2cce18f
RH
8156#ifndef CONFIG_USER_ONLY
8157 /*
8158 * Register redirections and aliases must be done last,
8159 * after the registers from the other extensions have been defined.
8160 */
8161 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8162 define_arm_vh_e2h_redirects_aliases(cpu);
8163 }
8164#endif
2ceb98c0
PM
8165}
8166
777dc784
PM
8167/* Sort alphabetically by type name, except for "any". */
8168static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5adb4839 8169{
777dc784
PM
8170 ObjectClass *class_a = (ObjectClass *)a;
8171 ObjectClass *class_b = (ObjectClass *)b;
8172 const char *name_a, *name_b;
5adb4839 8173
777dc784
PM
8174 name_a = object_class_get_name(class_a);
8175 name_b = object_class_get_name(class_b);
51492fd1 8176 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
777dc784 8177 return 1;
51492fd1 8178 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
777dc784
PM
8179 return -1;
8180 } else {
8181 return strcmp(name_a, name_b);
5adb4839
PB
8182 }
8183}
8184
777dc784 8185static void arm_cpu_list_entry(gpointer data, gpointer user_data)
40f137e1 8186{
777dc784 8187 ObjectClass *oc = data;
51492fd1
AF
8188 const char *typename;
8189 char *name;
3371d272 8190
51492fd1
AF
8191 typename = object_class_get_name(oc);
8192 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
0442428a 8193 qemu_printf(" %s\n", name);
51492fd1 8194 g_free(name);
777dc784
PM
8195}
8196
0442428a 8197void arm_cpu_list(void)
777dc784 8198{
777dc784
PM
8199 GSList *list;
8200
8201 list = object_class_get_list(TYPE_ARM_CPU, false);
8202 list = g_slist_sort(list, arm_cpu_list_compare);
0442428a
MA
8203 qemu_printf("Available CPUs:\n");
8204 g_slist_foreach(list, arm_cpu_list_entry, NULL);
777dc784 8205 g_slist_free(list);
40f137e1
PB
8206}
8207
78027bb6
CR
8208static void arm_cpu_add_definition(gpointer data, gpointer user_data)
8209{
8210 ObjectClass *oc = data;
8211 CpuDefinitionInfoList **cpu_list = user_data;
78027bb6
CR
8212 CpuDefinitionInfo *info;
8213 const char *typename;
8214
8215 typename = object_class_get_name(oc);
8216 info = g_malloc0(sizeof(*info));
8217 info->name = g_strndup(typename,
8218 strlen(typename) - strlen("-" TYPE_ARM_CPU));
8ed877b7 8219 info->q_typename = g_strdup(typename);
78027bb6 8220
54aa3de7 8221 QAPI_LIST_PREPEND(*cpu_list, info);
78027bb6
CR
8222}
8223
25a9d6ca 8224CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
78027bb6
CR
8225{
8226 CpuDefinitionInfoList *cpu_list = NULL;
8227 GSList *list;
8228
8229 list = object_class_get_list(TYPE_ARM_CPU, false);
8230 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
8231 g_slist_free(list);
8232
8233 return cpu_list;
8234}
8235
1859f8c3
RH
8236/*
8237 * Private utility function for define_one_arm_cp_reg_with_opaque():
8238 * add a single reginfo struct to the hash table.
8239 */
6e6efd61 8240static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
cbe64585
RH
8241 void *opaque, CPState state,
8242 CPSecureState secstate,
9c513e78
AB
8243 int crm, int opc1, int opc2,
8244 const char *name)
6e6efd61 8245{
696ba377 8246 CPUARMState *env = &cpu->env;
5860362d 8247 uint32_t key;
c27f5d3a 8248 ARMCPRegInfo *r2;
4c8c4541
RH
8249 bool is64 = r->type & ARM_CP_64BIT;
8250 bool ns = secstate & ARM_CP_SECSTATE_NS;
cac65299 8251 int cp = r->cp;
c27f5d3a 8252 size_t name_len;
696ba377 8253 bool make_const;
c27f5d3a 8254
cac65299
RH
8255 switch (state) {
8256 case ARM_CP_STATE_AA32:
8257 /* We assume it is a cp15 register if the .cp field is left unset. */
8258 if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
8259 cp = 15;
8260 }
8261 key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
8262 break;
8263 case ARM_CP_STATE_AA64:
8264 /*
8265 * To allow abbreviation of ARMCPRegInfo definitions, we treat
8266 * cp == 0 as equivalent to the value for "standard guest-visible
8267 * sysreg". STATE_BOTH definitions are also always "standard sysreg"
8268 * in their AArch64 view (the .cp value may be non-zero for the
8269 * benefit of the AArch32 view).
8270 */
8271 if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
8272 cp = CP_REG_ARM64_SYSREG_CP;
8273 }
8274 key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
8275 break;
8276 default:
8277 g_assert_not_reached();
8278 }
8279
dc44545b
RH
8280 /* Overriding of an existing definition must be explicitly requested. */
8281 if (!(r->type & ARM_CP_OVERRIDE)) {
8282 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
8283 if (oldreg) {
8284 assert(oldreg->type & ARM_CP_OVERRIDE);
8285 }
8286 }
8287
696ba377
RH
8288 /*
8289 * Eliminate registers that are not present because the EL is missing.
8290 * Doing this here makes it easier to put all registers for a given
8291 * feature into the same ARMCPRegInfo array and define them all at once.
8292 */
8293 make_const = false;
8294 if (arm_feature(env, ARM_FEATURE_EL3)) {
8295 /*
8296 * An EL2 register without EL2 but with EL3 is (usually) RES0.
8297 * See rule RJFFP in section D1.1.3 of DDI0487H.a.
8298 */
8299 int min_el = ctz32(r->access) / 2;
8300 if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
8301 if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
8302 return;
8303 }
8304 make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
8305 }
8306 } else {
8307 CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
8308 ? PL2_RW : PL1_RW);
8309 if ((r->access & max_el) == 0) {
8310 return;
8311 }
8312 }
8313
c27f5d3a
RH
8314 /* Combine cpreg and name into one allocation. */
8315 name_len = strlen(name) + 1;
8316 r2 = g_malloc(sizeof(*r2) + name_len);
8317 *r2 = *r;
8318 r2->name = memcpy(r2 + 1, name, name_len);
3f3c82a5 8319
cc946d96
RH
8320 /*
8321 * Update fields to match the instantiation, overwiting wildcards
8322 * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
3f3c82a5 8323 */
cc946d96
RH
8324 r2->cp = cp;
8325 r2->crm = crm;
8326 r2->opc1 = opc1;
8327 r2->opc2 = opc2;
8328 r2->state = state;
3f3c82a5 8329 r2->secure = secstate;
cc946d96
RH
8330 if (opaque) {
8331 r2->opaque = opaque;
8332 }
3f3c82a5 8333
696ba377
RH
8334 if (make_const) {
8335 /* This should not have been a very special register to begin. */
8336 int old_special = r2->type & ARM_CP_SPECIAL_MASK;
8337 assert(old_special == 0 || old_special == ARM_CP_NOP);
1859f8c3 8338 /*
696ba377
RH
8339 * Set the special function to CONST, retaining the other flags.
8340 * This is important for e.g. ARM_CP_SVE so that we still
8341 * take the SVE trap if CPTR_EL3.EZ == 0.
f5a0a5a5 8342 */
696ba377
RH
8343 r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
8344 /*
8345 * Usually, these registers become RES0, but there are a few
8346 * special cases like VPIDR_EL2 which have a constant non-zero
8347 * value with writes ignored.
8348 */
8349 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
8350 r2->resetvalue = 0;
8351 }
8352 /*
8353 * ARM_CP_CONST has precedence, so removing the callbacks and
8354 * offsets are not strictly necessary, but it is potentially
8355 * less confusing to debug later.
8356 */
8357 r2->readfn = NULL;
8358 r2->writefn = NULL;
8359 r2->raw_readfn = NULL;
8360 r2->raw_writefn = NULL;
8361 r2->resetfn = NULL;
8362 r2->fieldoffset = 0;
8363 r2->bank_fieldoffsets[0] = 0;
8364 r2->bank_fieldoffsets[1] = 0;
8365 } else {
8366 bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
3f3c82a5 8367
10748a96 8368 if (isbanked) {
1859f8c3 8369 /*
696ba377
RH
8370 * Register is banked (using both entries in array).
8371 * Overwriting fieldoffset as the array is only used to define
8372 * banked registers but later only fieldoffset is used.
3f3c82a5 8373 */
696ba377
RH
8374 r2->fieldoffset = r->bank_fieldoffsets[ns];
8375 }
8376 if (state == ARM_CP_STATE_AA32) {
8377 if (isbanked) {
8378 /*
8379 * If the register is banked then we don't need to migrate or
8380 * reset the 32-bit instance in certain cases:
8381 *
8382 * 1) If the register has both 32-bit and 64-bit instances
8383 * then we can count on the 64-bit instance taking care
8384 * of the non-secure bank.
8385 * 2) If ARMv8 is enabled then we can count on a 64-bit
8386 * version taking care of the secure bank. This requires
8387 * that separate 32 and 64-bit definitions are provided.
8388 */
8389 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
8390 (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
8391 r2->type |= ARM_CP_ALIAS;
8392 }
8393 } else if ((secstate != r->secure) && !ns) {
8394 /*
8395 * The register is not banked so we only want to allow
8396 * migration of the non-secure instance.
8397 */
7a0e58fa 8398 r2->type |= ARM_CP_ALIAS;
3f3c82a5 8399 }
3f3c82a5 8400
696ba377
RH
8401 if (HOST_BIG_ENDIAN &&
8402 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
8403 r2->fieldoffset += sizeof(uint32_t);
8404 }
3f3c82a5 8405 }
f5a0a5a5 8406 }
cc946d96 8407
1859f8c3
RH
8408 /*
8409 * By convention, for wildcarded registers only the first
6e6efd61 8410 * entry is used for migration; the others are marked as
7a0e58fa 8411 * ALIAS so we don't try to transfer the register
6e6efd61 8412 * multiple times. Special registers (ie NOP/WFI) are
7a0e58fa 8413 * never migratable and not even raw-accessible.
6e6efd61 8414 */
696ba377 8415 if (r2->type & ARM_CP_SPECIAL_MASK) {
7a0e58fa
PM
8416 r2->type |= ARM_CP_NO_RAW;
8417 }
8418 if (((r->crm == CP_ANY) && crm != 0) ||
6e6efd61
PM
8419 ((r->opc1 == CP_ANY) && opc1 != 0) ||
8420 ((r->opc2 == CP_ANY) && opc2 != 0)) {
1f163787 8421 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
6e6efd61
PM
8422 }
8423
1859f8c3
RH
8424 /*
8425 * Check that raw accesses are either forbidden or handled. Note that
375421cc
PM
8426 * we can't assert this earlier because the setup of fieldoffset for
8427 * banked registers has to be done first.
8428 */
8429 if (!(r2->type & ARM_CP_NO_RAW)) {
8430 assert(!raw_accessors_invalid(r2));
8431 }
8432
5860362d 8433 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
6e6efd61
PM
8434}
8435
8436
4b6a83fb
PM
8437void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
8438 const ARMCPRegInfo *r, void *opaque)
8439{
8440 /* Define implementations of coprocessor registers.
8441 * We store these in a hashtable because typically
8442 * there are less than 150 registers in a space which
8443 * is 16*16*16*8*8 = 262144 in size.
8444 * Wildcarding is supported for the crm, opc1 and opc2 fields.
8445 * If a register is defined twice then the second definition is
8446 * used, so this can be used to define some generic registers and
8447 * then override them with implementation specific variations.
8448 * At least one of the original and the second definition should
8449 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8450 * against accidental use.
f5a0a5a5
PM
8451 *
8452 * The state field defines whether the register is to be
8453 * visible in the AArch32 or AArch64 execution state. If the
8454 * state is set to ARM_CP_STATE_BOTH then we synthesise a
8455 * reginfo structure for the AArch32 view, which sees the lower
8456 * 32 bits of the 64 bit register.
8457 *
8458 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8459 * be wildcarded. AArch64 registers are always considered to be 64
8460 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8461 * the register, if any.
4b6a83fb 8462 */
d95101d6 8463 int crm, opc1, opc2;
4b6a83fb
PM
8464 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
8465 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
8466 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
8467 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
8468 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
8469 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
d95101d6
RH
8470 CPState state;
8471
4b6a83fb
PM
8472 /* 64 bit registers have only CRm and Opc1 fields */
8473 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
f5a0a5a5
PM
8474 /* op0 only exists in the AArch64 encodings */
8475 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
8476 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8477 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
cd8be50e
PM
8478 /*
8479 * This API is only for Arm's system coprocessors (14 and 15) or
8480 * (M-profile or v7A-and-earlier only) for implementation defined
8481 * coprocessors in the range 0..7. Our decode assumes this, since
8482 * 8..13 can be used for other insns including VFP and Neon. See
8483 * valid_cp() in translate.c. Assert here that we haven't tried
8484 * to use an invalid coprocessor number.
8485 */
8486 switch (r->state) {
8487 case ARM_CP_STATE_BOTH:
8488 /* 0 has a special meaning, but otherwise the same rules as AA32. */
8489 if (r->cp == 0) {
8490 break;
8491 }
8492 /* fall through */
8493 case ARM_CP_STATE_AA32:
8494 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
8495 !arm_feature(&cpu->env, ARM_FEATURE_M)) {
8496 assert(r->cp >= 14 && r->cp <= 15);
8497 } else {
8498 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
8499 }
8500 break;
8501 case ARM_CP_STATE_AA64:
8502 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
8503 break;
8504 default:
8505 g_assert_not_reached();
8506 }
f5a0a5a5
PM
8507 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8508 * encodes a minimum access level for the register. We roll this
8509 * runtime check into our general permission check code, so check
8510 * here that the reginfo's specified permissions are strict enough
8511 * to encompass the generic architectural permission check.
8512 */
8513 if (r->state != ARM_CP_STATE_AA32) {
39107337 8514 CPAccessRights mask;
f5a0a5a5 8515 switch (r->opc1) {
b5bd7440
AB
8516 case 0:
8517 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8518 mask = PL0U_R | PL1_RW;
8519 break;
8520 case 1: case 2:
f5a0a5a5
PM
8521 /* min_EL EL1 */
8522 mask = PL1_RW;
8523 break;
8524 case 3:
8525 /* min_EL EL0 */
8526 mask = PL0_RW;
8527 break;
8528 case 4:
b4ecf60f 8529 case 5:
f5a0a5a5
PM
8530 /* min_EL EL2 */
8531 mask = PL2_RW;
8532 break;
f5a0a5a5
PM
8533 case 6:
8534 /* min_EL EL3 */
8535 mask = PL3_RW;
8536 break;
8537 case 7:
8538 /* min_EL EL1, secure mode only (we don't check the latter) */
8539 mask = PL1_RW;
8540 break;
8541 default:
8542 /* broken reginfo with out-of-range opc1 */
d385a605 8543 g_assert_not_reached();
f5a0a5a5
PM
8544 }
8545 /* assert our permissions are not too lax (stricter is fine) */
8546 assert((r->access & ~mask) == 0);
8547 }
8548
4b6a83fb
PM
8549 /* Check that the register definition has enough info to handle
8550 * reads and writes if they are permitted.
8551 */
87c3f0f2 8552 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
4b6a83fb 8553 if (r->access & PL3_R) {
3f3c82a5
FA
8554 assert((r->fieldoffset ||
8555 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8556 r->readfn);
4b6a83fb
PM
8557 }
8558 if (r->access & PL3_W) {
3f3c82a5
FA
8559 assert((r->fieldoffset ||
8560 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8561 r->writefn);
4b6a83fb
PM
8562 }
8563 }
5809ac57 8564
4b6a83fb
PM
8565 for (crm = crmmin; crm <= crmmax; crm++) {
8566 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
8567 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
f5a0a5a5
PM
8568 for (state = ARM_CP_STATE_AA32;
8569 state <= ARM_CP_STATE_AA64; state++) {
8570 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
8571 continue;
8572 }
3f3c82a5
FA
8573 if (state == ARM_CP_STATE_AA32) {
8574 /* Under AArch32 CP registers can be common
8575 * (same for secure and non-secure world) or banked.
8576 */
9c513e78
AB
8577 char *name;
8578
3f3c82a5
FA
8579 switch (r->secure) {
8580 case ARM_CP_SECSTATE_S:
8581 case ARM_CP_SECSTATE_NS:
8582 add_cpreg_to_hashtable(cpu, r, opaque, state,
9c513e78
AB
8583 r->secure, crm, opc1, opc2,
8584 r->name);
3f3c82a5 8585 break;
cbe64585 8586 case ARM_CP_SECSTATE_BOTH:
9c513e78 8587 name = g_strdup_printf("%s_S", r->name);
3f3c82a5
FA
8588 add_cpreg_to_hashtable(cpu, r, opaque, state,
8589 ARM_CP_SECSTATE_S,
9c513e78
AB
8590 crm, opc1, opc2, name);
8591 g_free(name);
3f3c82a5
FA
8592 add_cpreg_to_hashtable(cpu, r, opaque, state,
8593 ARM_CP_SECSTATE_NS,
9c513e78 8594 crm, opc1, opc2, r->name);
3f3c82a5 8595 break;
cbe64585
RH
8596 default:
8597 g_assert_not_reached();
3f3c82a5
FA
8598 }
8599 } else {
8600 /* AArch64 registers get mapped to non-secure instance
8601 * of AArch32 */
8602 add_cpreg_to_hashtable(cpu, r, opaque, state,
8603 ARM_CP_SECSTATE_NS,
9c513e78 8604 crm, opc1, opc2, r->name);
3f3c82a5 8605 }
f5a0a5a5 8606 }
4b6a83fb
PM
8607 }
8608 }
8609 }
8610}
8611
5809ac57
RH
8612/* Define a whole list of registers */
8613void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
8614 void *opaque, size_t len)
4b6a83fb 8615{
5809ac57
RH
8616 size_t i;
8617 for (i = 0; i < len; ++i) {
8618 define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
4b6a83fb
PM
8619 }
8620}
8621
6c5c0fec
AB
8622/*
8623 * Modify ARMCPRegInfo for access from userspace.
8624 *
8625 * This is a data driven modification directed by
8626 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8627 * user-space cannot alter any values and dynamic values pertaining to
8628 * execution state are hidden from user space view anyway.
8629 */
5809ac57
RH
8630void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
8631 const ARMCPRegUserSpaceInfo *mods,
8632 size_t mods_len)
6c5c0fec 8633{
5809ac57
RH
8634 for (size_t mi = 0; mi < mods_len; ++mi) {
8635 const ARMCPRegUserSpaceInfo *m = mods + mi;
d040242e 8636 GPatternSpec *pat = NULL;
5809ac57 8637
d040242e
AB
8638 if (m->is_glob) {
8639 pat = g_pattern_spec_new(m->name);
8640 }
5809ac57
RH
8641 for (size_t ri = 0; ri < regs_len; ++ri) {
8642 ARMCPRegInfo *r = regs + ri;
8643
d040242e
AB
8644 if (pat && g_pattern_match_string(pat, r->name)) {
8645 r->type = ARM_CP_CONST;
8646 r->access = PL0U_R;
8647 r->resetvalue = 0;
8648 /* continue */
8649 } else if (strcmp(r->name, m->name) == 0) {
6c5c0fec
AB
8650 r->type = ARM_CP_CONST;
8651 r->access = PL0U_R;
8652 r->resetvalue &= m->exported_bits;
8653 r->resetvalue |= m->fixed_bits;
8654 break;
8655 }
8656 }
d040242e
AB
8657 if (pat) {
8658 g_pattern_spec_free(pat);
8659 }
6c5c0fec
AB
8660 }
8661}
8662
60322b39 8663const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
4b6a83fb 8664{
5860362d 8665 return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
4b6a83fb
PM
8666}
8667
c4241c7d
PM
8668void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
8669 uint64_t value)
4b6a83fb
PM
8670{
8671 /* Helper coprocessor write function for write-ignore registers */
4b6a83fb
PM
8672}
8673
c4241c7d 8674uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
4b6a83fb
PM
8675{
8676 /* Helper coprocessor write function for read-as-zero registers */
4b6a83fb
PM
8677 return 0;
8678}
8679
f5a0a5a5
PM
8680void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
8681{
8682 /* Helper coprocessor reset function for do-nothing-on-reset registers */
8683}
8684
af393ffc 8685static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
37064a8b
PM
8686{
8687 /* Return true if it is not valid for us to switch to
8688 * this CPU mode (ie all the UNPREDICTABLE cases in
8689 * the ARM ARM CPSRWriteByInstr pseudocode).
8690 */
af393ffc
PM
8691
8692 /* Changes to or from Hyp via MSR and CPS are illegal. */
8693 if (write_type == CPSRWriteByInstr &&
8694 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
8695 mode == ARM_CPU_MODE_HYP)) {
8696 return 1;
8697 }
8698
37064a8b
PM
8699 switch (mode) {
8700 case ARM_CPU_MODE_USR:
10eacda7 8701 return 0;
37064a8b
PM
8702 case ARM_CPU_MODE_SYS:
8703 case ARM_CPU_MODE_SVC:
8704 case ARM_CPU_MODE_ABT:
8705 case ARM_CPU_MODE_UND:
8706 case ARM_CPU_MODE_IRQ:
8707 case ARM_CPU_MODE_FIQ:
52ff951b
PM
8708 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
8709 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8710 */
10eacda7
PM
8711 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8712 * and CPS are treated as illegal mode changes.
8713 */
8714 if (write_type == CPSRWriteByInstr &&
10eacda7 8715 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
7c208e0f 8716 (arm_hcr_el2_eff(env) & HCR_TGE)) {
10eacda7
PM
8717 return 1;
8718 }
37064a8b 8719 return 0;
e6c8fc07 8720 case ARM_CPU_MODE_HYP:
e6ef0169 8721 return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
027fc527 8722 case ARM_CPU_MODE_MON:
58ae2d1f 8723 return arm_current_el(env) < 3;
37064a8b
PM
8724 default:
8725 return 1;
8726 }
8727}
8728
2f4a40e5
AZ
8729uint32_t cpsr_read(CPUARMState *env)
8730{
8731 int ZF;
6fbe23d5
PB
8732 ZF = (env->ZF == 0);
8733 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
2f4a40e5
AZ
8734 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
8735 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
8736 | ((env->condexec_bits & 0xfc) << 8)
af519934 8737 | (env->GE << 16) | (env->daif & CPSR_AIF);
2f4a40e5
AZ
8738}
8739
50866ba5
PM
8740void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
8741 CPSRWriteType write_type)
2f4a40e5 8742{
6e8801f9 8743 uint32_t changed_daif;
e784807c
PM
8744 bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
8745 (mask & (CPSR_M | CPSR_E | CPSR_IL));
6e8801f9 8746
2f4a40e5 8747 if (mask & CPSR_NZCV) {
6fbe23d5
PB
8748 env->ZF = (~val) & CPSR_Z;
8749 env->NF = val;
2f4a40e5
AZ
8750 env->CF = (val >> 29) & 1;
8751 env->VF = (val << 3) & 0x80000000;
8752 }
8753 if (mask & CPSR_Q)
8754 env->QF = ((val & CPSR_Q) != 0);
8755 if (mask & CPSR_T)
8756 env->thumb = ((val & CPSR_T) != 0);
8757 if (mask & CPSR_IT_0_1) {
8758 env->condexec_bits &= ~3;
8759 env->condexec_bits |= (val >> 25) & 3;
8760 }
8761 if (mask & CPSR_IT_2_7) {
8762 env->condexec_bits &= 3;
8763 env->condexec_bits |= (val >> 8) & 0xfc;
8764 }
8765 if (mask & CPSR_GE) {
8766 env->GE = (val >> 16) & 0xf;
8767 }
8768
6e8801f9
FA
8769 /* In a V7 implementation that includes the security extensions but does
8770 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
8771 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
8772 * bits respectively.
8773 *
8774 * In a V8 implementation, it is permitted for privileged software to
8775 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
8776 */
f8c88bbc 8777 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
6e8801f9
FA
8778 arm_feature(env, ARM_FEATURE_EL3) &&
8779 !arm_feature(env, ARM_FEATURE_EL2) &&
8780 !arm_is_secure(env)) {
8781
8782 changed_daif = (env->daif ^ val) & mask;
8783
8784 if (changed_daif & CPSR_A) {
8785 /* Check to see if we are allowed to change the masking of async
8786 * abort exceptions from a non-secure state.
8787 */
8788 if (!(env->cp15.scr_el3 & SCR_AW)) {
8789 qemu_log_mask(LOG_GUEST_ERROR,
8790 "Ignoring attempt to switch CPSR_A flag from "
8791 "non-secure world with SCR.AW bit clear\n");
8792 mask &= ~CPSR_A;
8793 }
8794 }
8795
8796 if (changed_daif & CPSR_F) {
8797 /* Check to see if we are allowed to change the masking of FIQ
8798 * exceptions from a non-secure state.
8799 */
8800 if (!(env->cp15.scr_el3 & SCR_FW)) {
8801 qemu_log_mask(LOG_GUEST_ERROR,
8802 "Ignoring attempt to switch CPSR_F flag from "
8803 "non-secure world with SCR.FW bit clear\n");
8804 mask &= ~CPSR_F;
8805 }
8806
8807 /* Check whether non-maskable FIQ (NMFI) support is enabled.
8808 * If this bit is set software is not allowed to mask
8809 * FIQs, but is allowed to set CPSR_F to 0.
8810 */
8811 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
8812 (val & CPSR_F)) {
8813 qemu_log_mask(LOG_GUEST_ERROR,
8814 "Ignoring attempt to enable CPSR_F flag "
8815 "(non-maskable FIQ [NMFI] support enabled)\n");
8816 mask &= ~CPSR_F;
8817 }
8818 }
8819 }
8820
4cc35614
PM
8821 env->daif &= ~(CPSR_AIF & mask);
8822 env->daif |= val & CPSR_AIF & mask;
8823
f8c88bbc
PM
8824 if (write_type != CPSRWriteRaw &&
8825 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
8c4f0eb9
PM
8826 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
8827 /* Note that we can only get here in USR mode if this is a
8828 * gdb stub write; for this case we follow the architectural
8829 * behaviour for guest writes in USR mode of ignoring an attempt
8830 * to switch mode. (Those are caught by translate.c for writes
8831 * triggered by guest instructions.)
8832 */
8833 mask &= ~CPSR_M;
8834 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
81907a58
PM
8835 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
8836 * v7, and has defined behaviour in v8:
8837 * + leave CPSR.M untouched
8838 * + allow changes to the other CPSR fields
8839 * + set PSTATE.IL
8840 * For user changes via the GDB stub, we don't set PSTATE.IL,
8841 * as this would be unnecessarily harsh for a user error.
37064a8b
PM
8842 */
8843 mask &= ~CPSR_M;
81907a58
PM
8844 if (write_type != CPSRWriteByGDBStub &&
8845 arm_feature(env, ARM_FEATURE_V8)) {
8846 mask |= CPSR_IL;
8847 val |= CPSR_IL;
8848 }
81e37284
PM
8849 qemu_log_mask(LOG_GUEST_ERROR,
8850 "Illegal AArch32 mode switch attempt from %s to %s\n",
8851 aarch32_mode_name(env->uncached_cpsr),
8852 aarch32_mode_name(val));
37064a8b 8853 } else {
81e37284
PM
8854 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
8855 write_type == CPSRWriteExceptionReturn ?
8856 "Exception return from AArch32" :
8857 "AArch32 mode switch from",
8858 aarch32_mode_name(env->uncached_cpsr),
8859 aarch32_mode_name(val), env->regs[15]);
37064a8b
PM
8860 switch_mode(env, val & CPSR_M);
8861 }
2f4a40e5
AZ
8862 }
8863 mask &= ~CACHED_CPSR_BITS;
8864 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
e784807c
PM
8865 if (rebuild_hflags) {
8866 arm_rebuild_hflags(env);
8867 }
2f4a40e5
AZ
8868}
8869
b26eefb6
PB
8870/* Sign/zero extend */
8871uint32_t HELPER(sxtb16)(uint32_t x)
8872{
8873 uint32_t res;
8874 res = (uint16_t)(int8_t)x;
8875 res |= (uint32_t)(int8_t)(x >> 16) << 16;
8876 return res;
8877}
8878
e5346292
PM
8879static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
8880{
8881 /*
8882 * Take a division-by-zero exception if necessary; otherwise return
8883 * to get the usual non-trapping division behaviour (result of 0)
8884 */
8885 if (arm_feature(env, ARM_FEATURE_M)
8886 && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
8887 raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
8888 }
8889}
8890
b26eefb6
PB
8891uint32_t HELPER(uxtb16)(uint32_t x)
8892{
8893 uint32_t res;
8894 res = (uint16_t)(uint8_t)x;
8895 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
8896 return res;
8897}
8898
e5346292 8899int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
3670669c 8900{
fc7a5038 8901 if (den == 0) {
e5346292 8902 handle_possible_div0_trap(env, GETPC());
fc7a5038
PM
8903 return 0;
8904 }
8905 if (num == INT_MIN && den == -1) {
8906 return INT_MIN;
8907 }
3670669c
PB
8908 return num / den;
8909}
8910
e5346292 8911uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
3670669c 8912{
fc7a5038 8913 if (den == 0) {
e5346292 8914 handle_possible_div0_trap(env, GETPC());
fc7a5038
PM
8915 return 0;
8916 }
3670669c
PB
8917 return num / den;
8918}
8919
8920uint32_t HELPER(rbit)(uint32_t x)
8921{
42fedbca 8922 return revbit32(x);
3670669c
PB
8923}
8924
c47eaf9f 8925#ifdef CONFIG_USER_ONLY
b5ff1b31 8926
affdb64d 8927static void switch_mode(CPUARMState *env, int mode)
b5ff1b31 8928{
2fc0cc0e 8929 ARMCPU *cpu = env_archcpu(env);
a47dddd7
AF
8930
8931 if (mode != ARM_CPU_MODE_USR) {
8932 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
8933 }
b5ff1b31
FB
8934}
8935
012a906b
GB
8936uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
8937 uint32_t cur_el, bool secure)
9e729b57
EI
8938{
8939 return 1;
8940}
8941
ce02049d
GB
8942void aarch64_sync_64_to_32(CPUARMState *env)
8943{
8944 g_assert_not_reached();
8945}
8946
b5ff1b31
FB
8947#else
8948
affdb64d 8949static void switch_mode(CPUARMState *env, int mode)
b5ff1b31
FB
8950{
8951 int old_mode;
8952 int i;
8953
8954 old_mode = env->uncached_cpsr & CPSR_M;
8955 if (mode == old_mode)
8956 return;
8957
8958 if (old_mode == ARM_CPU_MODE_FIQ) {
8959 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 8960 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
8961 } else if (mode == ARM_CPU_MODE_FIQ) {
8962 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 8963 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
8964 }
8965
f5206413 8966 i = bank_number(old_mode);
b5ff1b31 8967 env->banked_r13[i] = env->regs[13];
b5ff1b31
FB
8968 env->banked_spsr[i] = env->spsr;
8969
f5206413 8970 i = bank_number(mode);
b5ff1b31 8971 env->regs[13] = env->banked_r13[i];
b5ff1b31 8972 env->spsr = env->banked_spsr[i];
593cfa2b
PM
8973
8974 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
8975 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
b5ff1b31
FB
8976}
8977
0eeb17d6
GB
8978/* Physical Interrupt Target EL Lookup Table
8979 *
8980 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
8981 *
8982 * The below multi-dimensional table is used for looking up the target
8983 * exception level given numerous condition criteria. Specifically, the
8984 * target EL is based on SCR and HCR routing controls as well as the
8985 * currently executing EL and secure state.
8986 *
8987 * Dimensions:
8988 * target_el_table[2][2][2][2][2][4]
8989 * | | | | | +--- Current EL
8990 * | | | | +------ Non-secure(0)/Secure(1)
8991 * | | | +--------- HCR mask override
8992 * | | +------------ SCR exec state control
8993 * | +--------------- SCR mask override
8994 * +------------------ 32-bit(0)/64-bit(1) EL3
8995 *
8996 * The table values are as such:
8997 * 0-3 = EL0-EL3
8998 * -1 = Cannot occur
8999 *
9000 * The ARM ARM target EL table includes entries indicating that an "exception
9001 * is not taken". The two cases where this is applicable are:
9002 * 1) An exception is taken from EL3 but the SCR does not have the exception
9003 * routed to EL3.
9004 * 2) An exception is taken from EL2 but the HCR does not have the exception
9005 * routed to EL2.
9006 * In these two cases, the below table contain a target of EL1. This value is
9007 * returned as it is expected that the consumer of the table data will check
9008 * for "target EL >= current EL" to ensure the exception is not taken.
9009 *
9010 * SCR HCR
9011 * 64 EA AMO From
9012 * BIT IRQ IMO Non-secure Secure
9013 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
9014 */
82c39f6a 9015static const int8_t target_el_table[2][2][2][2][2][4] = {
0eeb17d6
GB
9016 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9017 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
9018 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9019 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
9020 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9021 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
9022 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9023 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
9024 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
6c85f906
RDC
9025 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
9026 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
9027 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
0eeb17d6
GB
9028 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
9029 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
6c85f906
RDC
9030 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
9031 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
0eeb17d6
GB
9032};
9033
9034/*
9035 * Determine the target EL for physical exceptions
9036 */
012a906b
GB
9037uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9038 uint32_t cur_el, bool secure)
0eeb17d6
GB
9039{
9040 CPUARMState *env = cs->env_ptr;
f7778444
RH
9041 bool rw;
9042 bool scr;
9043 bool hcr;
0eeb17d6 9044 int target_el;
2cde031f 9045 /* Is the highest EL AArch64? */
f7778444
RH
9046 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
9047 uint64_t hcr_el2;
2cde031f
SS
9048
9049 if (arm_feature(env, ARM_FEATURE_EL3)) {
9050 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
9051 } else {
9052 /* Either EL2 is the highest EL (and so the EL2 register width
9053 * is given by is64); or there is no EL2 or EL3, in which case
9054 * the value of 'rw' does not affect the table lookup anyway.
9055 */
9056 rw = is64;
9057 }
0eeb17d6 9058
f7778444 9059 hcr_el2 = arm_hcr_el2_eff(env);
0eeb17d6
GB
9060 switch (excp_idx) {
9061 case EXCP_IRQ:
9062 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
f7778444 9063 hcr = hcr_el2 & HCR_IMO;
0eeb17d6
GB
9064 break;
9065 case EXCP_FIQ:
9066 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
f7778444 9067 hcr = hcr_el2 & HCR_FMO;
0eeb17d6
GB
9068 break;
9069 default:
9070 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
f7778444 9071 hcr = hcr_el2 & HCR_AMO;
0eeb17d6
GB
9072 break;
9073 };
9074
d1b31428
RH
9075 /*
9076 * For these purposes, TGE and AMO/IMO/FMO both force the
9077 * interrupt to EL2. Fold TGE into the bit extracted above.
9078 */
9079 hcr |= (hcr_el2 & HCR_TGE) != 0;
9080
0eeb17d6
GB
9081 /* Perform a table-lookup for the target EL given the current state */
9082 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
9083
9084 assert(target_el > 0);
9085
9086 return target_el;
9087}
9088
fc6177af 9089void arm_log_exception(CPUState *cs)
b59f479b 9090{
fc6177af
PM
9091 int idx = cs->exception_index;
9092
b59f479b
PMD
9093 if (qemu_loglevel_mask(CPU_LOG_INT)) {
9094 const char *exc = NULL;
9095 static const char * const excnames[] = {
9096 [EXCP_UDEF] = "Undefined Instruction",
9097 [EXCP_SWI] = "SVC",
9098 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
9099 [EXCP_DATA_ABORT] = "Data Abort",
9100 [EXCP_IRQ] = "IRQ",
9101 [EXCP_FIQ] = "FIQ",
9102 [EXCP_BKPT] = "Breakpoint",
9103 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
9104 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
9105 [EXCP_HVC] = "Hypervisor Call",
9106 [EXCP_HYP_TRAP] = "Hypervisor Trap",
9107 [EXCP_SMC] = "Secure Monitor Call",
9108 [EXCP_VIRQ] = "Virtual IRQ",
9109 [EXCP_VFIQ] = "Virtual FIQ",
9110 [EXCP_SEMIHOST] = "Semihosting call",
9111 [EXCP_NOCP] = "v7M NOCP UsageFault",
9112 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
9113 [EXCP_STKOF] = "v8M STKOF UsageFault",
9114 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
9115 [EXCP_LSERR] = "v8M LSERR UsageFault",
9116 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
e5346292 9117 [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
3c29632f 9118 [EXCP_VSERR] = "Virtual SERR",
b59f479b
PMD
9119 };
9120
9121 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
9122 exc = excnames[idx];
9123 }
9124 if (!exc) {
9125 exc = "unknown";
9126 }
fc6177af
PM
9127 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
9128 idx, exc, cs->cpu_index);
b59f479b
PMD
9129 }
9130}
9131
a356dacf 9132/*
7aab5a8c
PMD
9133 * Function used to synchronize QEMU's AArch64 register set with AArch32
9134 * register set. This is necessary when switching between AArch32 and AArch64
9135 * execution state.
a356dacf 9136 */
7aab5a8c 9137void aarch64_sync_32_to_64(CPUARMState *env)
9ee6e8bb 9138{
7aab5a8c
PMD
9139 int i;
9140 uint32_t mode = env->uncached_cpsr & CPSR_M;
9141
9142 /* We can blanket copy R[0:7] to X[0:7] */
9143 for (i = 0; i < 8; i++) {
9144 env->xregs[i] = env->regs[i];
fd592d89 9145 }
70d74660 9146
9a223097 9147 /*
7aab5a8c
PMD
9148 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9149 * Otherwise, they come from the banked user regs.
fd592d89 9150 */
7aab5a8c
PMD
9151 if (mode == ARM_CPU_MODE_FIQ) {
9152 for (i = 8; i < 13; i++) {
9153 env->xregs[i] = env->usr_regs[i - 8];
9154 }
9155 } else {
9156 for (i = 8; i < 13; i++) {
9157 env->xregs[i] = env->regs[i];
9158 }
fd592d89 9159 }
9ee6e8bb 9160
7aab5a8c
PMD
9161 /*
9162 * Registers x13-x23 are the various mode SP and FP registers. Registers
9163 * r13 and r14 are only copied if we are in that mode, otherwise we copy
9164 * from the mode banked register.
9165 */
9166 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9167 env->xregs[13] = env->regs[13];
9168 env->xregs[14] = env->regs[14];
9169 } else {
9170 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
9171 /* HYP is an exception in that it is copied from r14 */
9172 if (mode == ARM_CPU_MODE_HYP) {
9173 env->xregs[14] = env->regs[14];
95695eff 9174 } else {
7aab5a8c 9175 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
95695eff 9176 }
95695eff
PM
9177 }
9178
7aab5a8c
PMD
9179 if (mode == ARM_CPU_MODE_HYP) {
9180 env->xregs[15] = env->regs[13];
9181 } else {
9182 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
95695eff
PM
9183 }
9184
7aab5a8c
PMD
9185 if (mode == ARM_CPU_MODE_IRQ) {
9186 env->xregs[16] = env->regs[14];
9187 env->xregs[17] = env->regs[13];
9188 } else {
9189 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
9190 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
9191 }
95695eff 9192
7aab5a8c
PMD
9193 if (mode == ARM_CPU_MODE_SVC) {
9194 env->xregs[18] = env->regs[14];
9195 env->xregs[19] = env->regs[13];
9196 } else {
9197 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
9198 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
9199 }
95695eff 9200
7aab5a8c
PMD
9201 if (mode == ARM_CPU_MODE_ABT) {
9202 env->xregs[20] = env->regs[14];
9203 env->xregs[21] = env->regs[13];
9204 } else {
9205 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
9206 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
9207 }
e33cf0f8 9208
7aab5a8c
PMD
9209 if (mode == ARM_CPU_MODE_UND) {
9210 env->xregs[22] = env->regs[14];
9211 env->xregs[23] = env->regs[13];
9212 } else {
9213 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
9214 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
e33cf0f8
PM
9215 }
9216
9217 /*
7aab5a8c
PMD
9218 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9219 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9220 * FIQ bank for r8-r14.
e33cf0f8 9221 */
7aab5a8c
PMD
9222 if (mode == ARM_CPU_MODE_FIQ) {
9223 for (i = 24; i < 31; i++) {
9224 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
9225 }
9226 } else {
9227 for (i = 24; i < 29; i++) {
9228 env->xregs[i] = env->fiq_regs[i - 24];
e33cf0f8 9229 }
7aab5a8c
PMD
9230 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
9231 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
e33cf0f8 9232 }
7aab5a8c
PMD
9233
9234 env->pc = env->regs[15];
e33cf0f8
PM
9235}
9236
9a223097 9237/*
7aab5a8c
PMD
9238 * Function used to synchronize QEMU's AArch32 register set with AArch64
9239 * register set. This is necessary when switching between AArch32 and AArch64
9240 * execution state.
de2db7ec 9241 */
7aab5a8c 9242void aarch64_sync_64_to_32(CPUARMState *env)
9ee6e8bb 9243{
7aab5a8c
PMD
9244 int i;
9245 uint32_t mode = env->uncached_cpsr & CPSR_M;
abc24d86 9246
7aab5a8c
PMD
9247 /* We can blanket copy X[0:7] to R[0:7] */
9248 for (i = 0; i < 8; i++) {
9249 env->regs[i] = env->xregs[i];
de2db7ec 9250 }
3f0cddee 9251
9a223097 9252 /*
7aab5a8c
PMD
9253 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9254 * Otherwise, we copy x8-x12 into the banked user regs.
de2db7ec 9255 */
7aab5a8c
PMD
9256 if (mode == ARM_CPU_MODE_FIQ) {
9257 for (i = 8; i < 13; i++) {
9258 env->usr_regs[i - 8] = env->xregs[i];
9259 }
9260 } else {
9261 for (i = 8; i < 13; i++) {
9262 env->regs[i] = env->xregs[i];
9263 }
fb602cb7
PM
9264 }
9265
9a223097 9266 /*
7aab5a8c
PMD
9267 * Registers r13 & r14 depend on the current mode.
9268 * If we are in a given mode, we copy the corresponding x registers to r13
9269 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9270 * for the mode.
fb602cb7 9271 */
7aab5a8c
PMD
9272 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9273 env->regs[13] = env->xregs[13];
9274 env->regs[14] = env->xregs[14];
fb602cb7 9275 } else {
7aab5a8c 9276 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
fb602cb7 9277
7aab5a8c
PMD
9278 /*
9279 * HYP is an exception in that it does not have its own banked r14 but
9280 * shares the USR r14
9281 */
9282 if (mode == ARM_CPU_MODE_HYP) {
9283 env->regs[14] = env->xregs[14];
9284 } else {
9285 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
9286 }
9287 }
fb602cb7 9288
7aab5a8c
PMD
9289 if (mode == ARM_CPU_MODE_HYP) {
9290 env->regs[13] = env->xregs[15];
fb602cb7 9291 } else {
7aab5a8c 9292 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
fb602cb7 9293 }
d02a8698 9294
7aab5a8c
PMD
9295 if (mode == ARM_CPU_MODE_IRQ) {
9296 env->regs[14] = env->xregs[16];
9297 env->regs[13] = env->xregs[17];
d02a8698 9298 } else {
7aab5a8c
PMD
9299 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
9300 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
d02a8698
PM
9301 }
9302
7aab5a8c
PMD
9303 if (mode == ARM_CPU_MODE_SVC) {
9304 env->regs[14] = env->xregs[18];
9305 env->regs[13] = env->xregs[19];
9306 } else {
9307 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
9308 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
fb602cb7
PM
9309 }
9310
7aab5a8c
PMD
9311 if (mode == ARM_CPU_MODE_ABT) {
9312 env->regs[14] = env->xregs[20];
9313 env->regs[13] = env->xregs[21];
9314 } else {
9315 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
9316 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
ce02049d
GB
9317 }
9318
9319 if (mode == ARM_CPU_MODE_UND) {
3a9148d0
SS
9320 env->regs[14] = env->xregs[22];
9321 env->regs[13] = env->xregs[23];
ce02049d 9322 } else {
593cfa2b 9323 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
3a9148d0 9324 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
ce02049d
GB
9325 }
9326
9327 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9328 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9329 * FIQ bank for r8-r14.
9330 */
9331 if (mode == ARM_CPU_MODE_FIQ) {
9332 for (i = 24; i < 31; i++) {
9333 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
9334 }
9335 } else {
9336 for (i = 24; i < 29; i++) {
9337 env->fiq_regs[i - 24] = env->xregs[i];
9338 }
9339 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
593cfa2b 9340 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
ce02049d
GB
9341 }
9342
9343 env->regs[15] = env->pc;
9344}
9345
dea8378b
PM
9346static void take_aarch32_exception(CPUARMState *env, int new_mode,
9347 uint32_t mask, uint32_t offset,
9348 uint32_t newpc)
9349{
4a2696c0
RH
9350 int new_el;
9351
dea8378b
PM
9352 /* Change the CPU state so as to actually take the exception. */
9353 switch_mode(env, new_mode);
4a2696c0 9354
dea8378b
PM
9355 /*
9356 * For exceptions taken to AArch32 we must clear the SS bit in both
9357 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9358 */
f944a854 9359 env->pstate &= ~PSTATE_SS;
dea8378b
PM
9360 env->spsr = cpsr_read(env);
9361 /* Clear IT bits. */
9362 env->condexec_bits = 0;
9363 /* Switch to the new mode, and to the correct instruction set. */
9364 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
88828bf1
CD
9365
9366 /* This must be after mode switching. */
9367 new_el = arm_current_el(env);
9368
dea8378b
PM
9369 /* Set new mode endianness */
9370 env->uncached_cpsr &= ~CPSR_E;
4a2696c0 9371 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
dea8378b
PM
9372 env->uncached_cpsr |= CPSR_E;
9373 }
829f9fd3
PM
9374 /* J and IL must always be cleared for exception entry */
9375 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
dea8378b
PM
9376 env->daif |= mask;
9377
f2f68a78
RC
9378 if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
9379 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
9380 env->uncached_cpsr |= CPSR_SSBS;
9381 } else {
9382 env->uncached_cpsr &= ~CPSR_SSBS;
9383 }
9384 }
9385
dea8378b
PM
9386 if (new_mode == ARM_CPU_MODE_HYP) {
9387 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
9388 env->elr_el[2] = env->regs[15];
9389 } else {
4a2696c0 9390 /* CPSR.PAN is normally preserved preserved unless... */
f8af1143 9391 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
4a2696c0
RH
9392 switch (new_el) {
9393 case 3:
9394 if (!arm_is_secure_below_el3(env)) {
9395 /* ... the target is EL3, from non-secure state. */
9396 env->uncached_cpsr &= ~CPSR_PAN;
9397 break;
9398 }
9399 /* ... the target is EL3, from secure state ... */
9400 /* fall through */
9401 case 1:
9402 /* ... the target is EL1 and SCTLR.SPAN is 0. */
9403 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
9404 env->uncached_cpsr |= CPSR_PAN;
9405 }
9406 break;
9407 }
9408 }
dea8378b
PM
9409 /*
9410 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9411 * and we should just guard the thumb mode on V4
9412 */
9413 if (arm_feature(env, ARM_FEATURE_V4T)) {
9414 env->thumb =
9415 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
9416 }
9417 env->regs[14] = env->regs[15] + offset;
9418 }
9419 env->regs[15] = newpc;
a8a79c7a 9420 arm_rebuild_hflags(env);
dea8378b
PM
9421}
9422
b9bc21ff
PM
9423static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
9424{
9425 /*
9426 * Handle exception entry to Hyp mode; this is sufficiently
9427 * different to entry to other AArch32 modes that we handle it
9428 * separately here.
9429 *
9430 * The vector table entry used is always the 0x14 Hyp mode entry point,
2c023d36 9431 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
b9bc21ff
PM
9432 * The offset applied to the preferred return address is always zero
9433 * (see DDI0487C.a section G1.12.3).
9434 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9435 */
9436 uint32_t addr, mask;
9437 ARMCPU *cpu = ARM_CPU(cs);
9438 CPUARMState *env = &cpu->env;
9439
9440 switch (cs->exception_index) {
9441 case EXCP_UDEF:
9442 addr = 0x04;
9443 break;
9444 case EXCP_SWI:
2c023d36 9445 addr = 0x08;
b9bc21ff
PM
9446 break;
9447 case EXCP_BKPT:
9448 /* Fall through to prefetch abort. */
9449 case EXCP_PREFETCH_ABORT:
9450 env->cp15.ifar_s = env->exception.vaddress;
9451 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
9452 (uint32_t)env->exception.vaddress);
9453 addr = 0x0c;
9454 break;
9455 case EXCP_DATA_ABORT:
9456 env->cp15.dfar_s = env->exception.vaddress;
9457 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
9458 (uint32_t)env->exception.vaddress);
9459 addr = 0x10;
9460 break;
9461 case EXCP_IRQ:
9462 addr = 0x18;
9463 break;
9464 case EXCP_FIQ:
9465 addr = 0x1c;
9466 break;
9467 case EXCP_HVC:
9468 addr = 0x08;
9469 break;
9470 case EXCP_HYP_TRAP:
9471 addr = 0x14;
9bbb4ef9 9472 break;
b9bc21ff
PM
9473 default:
9474 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9475 }
9476
9477 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
2ed08180
PM
9478 if (!arm_feature(env, ARM_FEATURE_V8)) {
9479 /*
9480 * QEMU syndrome values are v8-style. v7 has the IL bit
9481 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9482 * If this is a v7 CPU, squash the IL bit in those cases.
9483 */
9484 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
9485 (cs->exception_index == EXCP_DATA_ABORT &&
9486 !(env->exception.syndrome & ARM_EL_ISV)) ||
9487 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
9488 env->exception.syndrome &= ~ARM_EL_IL;
9489 }
9490 }
b9bc21ff
PM
9491 env->cp15.esr_el[2] = env->exception.syndrome;
9492 }
9493
9494 if (arm_current_el(env) != 2 && addr < 0x14) {
9495 addr = 0x14;
9496 }
9497
9498 mask = 0;
9499 if (!(env->cp15.scr_el3 & SCR_EA)) {
9500 mask |= CPSR_A;
9501 }
9502 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
9503 mask |= CPSR_I;
9504 }
9505 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
9506 mask |= CPSR_F;
9507 }
9508
9509 addr += env->cp15.hvbar;
9510
9511 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
9512}
9513
966f758c 9514static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
b5ff1b31 9515{
97a8ea5a
AF
9516 ARMCPU *cpu = ARM_CPU(cs);
9517 CPUARMState *env = &cpu->env;
b5ff1b31
FB
9518 uint32_t addr;
9519 uint32_t mask;
9520 int new_mode;
9521 uint32_t offset;
16a906fd 9522 uint32_t moe;
b5ff1b31 9523
16a906fd 9524 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
64b91e3f 9525 switch (syn_get_ec(env->exception.syndrome)) {
16a906fd
PM
9526 case EC_BREAKPOINT:
9527 case EC_BREAKPOINT_SAME_EL:
9528 moe = 1;
9529 break;
9530 case EC_WATCHPOINT:
9531 case EC_WATCHPOINT_SAME_EL:
9532 moe = 10;
9533 break;
9534 case EC_AA32_BKPT:
9535 moe = 3;
9536 break;
9537 case EC_VECTORCATCH:
9538 moe = 5;
9539 break;
9540 default:
9541 moe = 0;
9542 break;
9543 }
9544
9545 if (moe) {
9546 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
9547 }
9548
b9bc21ff
PM
9549 if (env->exception.target_el == 2) {
9550 arm_cpu_do_interrupt_aarch32_hyp(cs);
9551 return;
9552 }
9553
27103424 9554 switch (cs->exception_index) {
b5ff1b31
FB
9555 case EXCP_UDEF:
9556 new_mode = ARM_CPU_MODE_UND;
9557 addr = 0x04;
9558 mask = CPSR_I;
9559 if (env->thumb)
9560 offset = 2;
9561 else
9562 offset = 4;
9563 break;
9564 case EXCP_SWI:
9565 new_mode = ARM_CPU_MODE_SVC;
9566 addr = 0x08;
9567 mask = CPSR_I;
601d70b9 9568 /* The PC already points to the next instruction. */
b5ff1b31
FB
9569 offset = 0;
9570 break;
06c949e6 9571 case EXCP_BKPT:
9ee6e8bb
PB
9572 /* Fall through to prefetch abort. */
9573 case EXCP_PREFETCH_ABORT:
88ca1c2d 9574 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
b848ce2b 9575 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
3f1beaca 9576 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
88ca1c2d 9577 env->exception.fsr, (uint32_t)env->exception.vaddress);
b5ff1b31
FB
9578 new_mode = ARM_CPU_MODE_ABT;
9579 addr = 0x0c;
9580 mask = CPSR_A | CPSR_I;
9581 offset = 4;
9582 break;
9583 case EXCP_DATA_ABORT:
4a7e2d73 9584 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
b848ce2b 9585 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
3f1beaca 9586 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
4a7e2d73 9587 env->exception.fsr,
6cd8a264 9588 (uint32_t)env->exception.vaddress);
b5ff1b31
FB
9589 new_mode = ARM_CPU_MODE_ABT;
9590 addr = 0x10;
9591 mask = CPSR_A | CPSR_I;
9592 offset = 8;
9593 break;
9594 case EXCP_IRQ:
9595 new_mode = ARM_CPU_MODE_IRQ;
9596 addr = 0x18;
9597 /* Disable IRQ and imprecise data aborts. */
9598 mask = CPSR_A | CPSR_I;
9599 offset = 4;
de38d23b
FA
9600 if (env->cp15.scr_el3 & SCR_IRQ) {
9601 /* IRQ routed to monitor mode */
9602 new_mode = ARM_CPU_MODE_MON;
9603 mask |= CPSR_F;
9604 }
b5ff1b31
FB
9605 break;
9606 case EXCP_FIQ:
9607 new_mode = ARM_CPU_MODE_FIQ;
9608 addr = 0x1c;
9609 /* Disable FIQ, IRQ and imprecise data aborts. */
9610 mask = CPSR_A | CPSR_I | CPSR_F;
de38d23b
FA
9611 if (env->cp15.scr_el3 & SCR_FIQ) {
9612 /* FIQ routed to monitor mode */
9613 new_mode = ARM_CPU_MODE_MON;
9614 }
b5ff1b31
FB
9615 offset = 4;
9616 break;
87a4b270
PM
9617 case EXCP_VIRQ:
9618 new_mode = ARM_CPU_MODE_IRQ;
9619 addr = 0x18;
9620 /* Disable IRQ and imprecise data aborts. */
9621 mask = CPSR_A | CPSR_I;
9622 offset = 4;
9623 break;
9624 case EXCP_VFIQ:
9625 new_mode = ARM_CPU_MODE_FIQ;
9626 addr = 0x1c;
9627 /* Disable FIQ, IRQ and imprecise data aborts. */
9628 mask = CPSR_A | CPSR_I | CPSR_F;
9629 offset = 4;
9630 break;
3c29632f
RH
9631 case EXCP_VSERR:
9632 {
9633 /*
9634 * Note that this is reported as a data abort, but the DFAR
9635 * has an UNKNOWN value. Construct the SError syndrome from
9636 * AET and ExT fields.
9637 */
9638 ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
9639
9640 if (extended_addresses_enabled(env)) {
9641 env->exception.fsr = arm_fi_to_lfsc(&fi);
9642 } else {
9643 env->exception.fsr = arm_fi_to_sfsc(&fi);
9644 }
9645 env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
9646 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9647 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
9648 env->exception.fsr);
9649
9650 new_mode = ARM_CPU_MODE_ABT;
9651 addr = 0x10;
9652 mask = CPSR_A | CPSR_I;
9653 offset = 8;
9654 }
9655 break;
dbe9d163
FA
9656 case EXCP_SMC:
9657 new_mode = ARM_CPU_MODE_MON;
9658 addr = 0x08;
9659 mask = CPSR_A | CPSR_I | CPSR_F;
9660 offset = 0;
9661 break;
b5ff1b31 9662 default:
a47dddd7 9663 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
b5ff1b31
FB
9664 return; /* Never happens. Keep compiler happy. */
9665 }
e89e51a1
FA
9666
9667 if (new_mode == ARM_CPU_MODE_MON) {
9668 addr += env->cp15.mvbar;
137feaa9 9669 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
e89e51a1 9670 /* High vectors. When enabled, base address cannot be remapped. */
b5ff1b31 9671 addr += 0xffff0000;
8641136c
NR
9672 } else {
9673 /* ARM v7 architectures provide a vector base address register to remap
9674 * the interrupt vector table.
e89e51a1 9675 * This register is only followed in non-monitor mode, and is banked.
8641136c
NR
9676 * Note: only bits 31:5 are valid.
9677 */
fb6c91ba 9678 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
b5ff1b31 9679 }
dbe9d163
FA
9680
9681 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9682 env->cp15.scr_el3 &= ~SCR_NS;
9683 }
9684
dea8378b 9685 take_aarch32_exception(env, new_mode, mask, offset, addr);
b5ff1b31
FB
9686}
9687
a65dabf7
PM
9688static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
9689{
9690 /*
9691 * Return the register number of the AArch64 view of the AArch32
9692 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
9693 * be that of the AArch32 mode the exception came from.
9694 */
9695 int mode = env->uncached_cpsr & CPSR_M;
9696
9697 switch (aarch32_reg) {
9698 case 0 ... 7:
9699 return aarch32_reg;
9700 case 8 ... 12:
9701 return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
9702 case 13:
9703 switch (mode) {
9704 case ARM_CPU_MODE_USR:
9705 case ARM_CPU_MODE_SYS:
9706 return 13;
9707 case ARM_CPU_MODE_HYP:
9708 return 15;
9709 case ARM_CPU_MODE_IRQ:
9710 return 17;
9711 case ARM_CPU_MODE_SVC:
9712 return 19;
9713 case ARM_CPU_MODE_ABT:
9714 return 21;
9715 case ARM_CPU_MODE_UND:
9716 return 23;
9717 case ARM_CPU_MODE_FIQ:
9718 return 29;
9719 default:
9720 g_assert_not_reached();
9721 }
9722 case 14:
9723 switch (mode) {
9724 case ARM_CPU_MODE_USR:
9725 case ARM_CPU_MODE_SYS:
9726 case ARM_CPU_MODE_HYP:
9727 return 14;
9728 case ARM_CPU_MODE_IRQ:
9729 return 16;
9730 case ARM_CPU_MODE_SVC:
9731 return 18;
9732 case ARM_CPU_MODE_ABT:
9733 return 20;
9734 case ARM_CPU_MODE_UND:
9735 return 22;
9736 case ARM_CPU_MODE_FIQ:
9737 return 30;
9738 default:
9739 g_assert_not_reached();
9740 }
9741 case 15:
9742 return 31;
9743 default:
9744 g_assert_not_reached();
9745 }
9746}
9747
f944a854
RC
9748static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
9749{
9750 uint32_t ret = cpsr_read(env);
9751
9752 /* Move DIT to the correct location for SPSR_ELx */
9753 if (ret & CPSR_DIT) {
9754 ret &= ~CPSR_DIT;
9755 ret |= PSTATE_DIT;
9756 }
9757 /* Merge PSTATE.SS into SPSR_ELx */
9758 ret |= env->pstate & PSTATE_SS;
9759
9760 return ret;
9761}
9762
7ac61020
PM
9763static bool syndrome_is_sync_extabt(uint32_t syndrome)
9764{
9765 /* Return true if this syndrome value is a synchronous external abort */
9766 switch (syn_get_ec(syndrome)) {
9767 case EC_INSNABORT:
9768 case EC_INSNABORT_SAME_EL:
9769 case EC_DATAABORT:
9770 case EC_DATAABORT_SAME_EL:
9771 /* Look at fault status code for all the synchronous ext abort cases */
9772 switch (syndrome & 0x3f) {
9773 case 0x10:
9774 case 0x13:
9775 case 0x14:
9776 case 0x15:
9777 case 0x16:
9778 case 0x17:
9779 return true;
9780 default:
9781 return false;
9782 }
9783 default:
9784 return false;
9785 }
9786}
9787
966f758c
PM
9788/* Handle exception entry to a target EL which is using AArch64 */
9789static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
f3a9b694
PM
9790{
9791 ARMCPU *cpu = ARM_CPU(cs);
9792 CPUARMState *env = &cpu->env;
9793 unsigned int new_el = env->exception.target_el;
9794 target_ulong addr = env->cp15.vbar_el[new_el];
9795 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
4a2696c0 9796 unsigned int old_mode;
0ab5953b 9797 unsigned int cur_el = arm_current_el(env);
a65dabf7 9798 int rt;
0ab5953b 9799
9a05f7b6
RH
9800 /*
9801 * Note that new_el can never be 0. If cur_el is 0, then
9802 * el0_a64 is is_a64(), else el0_a64 is ignored.
9803 */
9804 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
f3a9b694 9805
0ab5953b 9806 if (cur_el < new_el) {
3d6f7617
PM
9807 /* Entry vector offset depends on whether the implemented EL
9808 * immediately lower than the target level is using AArch32 or AArch64
9809 */
9810 bool is_aa64;
cb092fbb 9811 uint64_t hcr;
3d6f7617
PM
9812
9813 switch (new_el) {
9814 case 3:
9815 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
9816 break;
9817 case 2:
cb092fbb
RH
9818 hcr = arm_hcr_el2_eff(env);
9819 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9820 is_aa64 = (hcr & HCR_RW) != 0;
9821 break;
9822 }
9823 /* fall through */
3d6f7617
PM
9824 case 1:
9825 is_aa64 = is_a64(env);
9826 break;
9827 default:
9828 g_assert_not_reached();
9829 }
9830
9831 if (is_aa64) {
f3a9b694
PM
9832 addr += 0x400;
9833 } else {
9834 addr += 0x600;
9835 }
9836 } else if (pstate_read(env) & PSTATE_SP) {
9837 addr += 0x200;
9838 }
9839
f3a9b694
PM
9840 switch (cs->exception_index) {
9841 case EXCP_PREFETCH_ABORT:
9842 case EXCP_DATA_ABORT:
7ac61020
PM
9843 /*
9844 * FEAT_DoubleFault allows synchronous external aborts taken to EL3
9845 * to be taken to the SError vector entrypoint.
9846 */
9847 if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
9848 syndrome_is_sync_extabt(env->exception.syndrome)) {
9849 addr += 0x180;
9850 }
f3a9b694
PM
9851 env->cp15.far_el[new_el] = env->exception.vaddress;
9852 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
9853 env->cp15.far_el[new_el]);
9854 /* fall through */
9855 case EXCP_BKPT:
9856 case EXCP_UDEF:
9857 case EXCP_SWI:
9858 case EXCP_HVC:
9859 case EXCP_HYP_TRAP:
9860 case EXCP_SMC:
a65dabf7
PM
9861 switch (syn_get_ec(env->exception.syndrome)) {
9862 case EC_ADVSIMDFPACCESSTRAP:
4be42f40
PM
9863 /*
9864 * QEMU internal FP/SIMD syndromes from AArch32 include the
9865 * TA and coproc fields which are only exposed if the exception
9866 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9867 * AArch64 format syndrome.
9868 */
9869 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
a65dabf7
PM
9870 break;
9871 case EC_CP14RTTRAP:
9872 case EC_CP15RTTRAP:
9873 case EC_CP14DTTRAP:
9874 /*
9875 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
9876 * the raw register field from the insn; when taking this to
9877 * AArch64 we must convert it to the AArch64 view of the register
9878 * number. Notice that we read a 4-bit AArch32 register number and
9879 * write back a 5-bit AArch64 one.
9880 */
9881 rt = extract32(env->exception.syndrome, 5, 4);
9882 rt = aarch64_regnum(env, rt);
9883 env->exception.syndrome = deposit32(env->exception.syndrome,
9884 5, 5, rt);
9885 break;
9886 case EC_CP15RRTTRAP:
9887 case EC_CP14RRTTRAP:
9888 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
9889 rt = extract32(env->exception.syndrome, 5, 4);
9890 rt = aarch64_regnum(env, rt);
9891 env->exception.syndrome = deposit32(env->exception.syndrome,
9892 5, 5, rt);
9893 rt = extract32(env->exception.syndrome, 10, 4);
9894 rt = aarch64_regnum(env, rt);
9895 env->exception.syndrome = deposit32(env->exception.syndrome,
9896 10, 5, rt);
9897 break;
4be42f40 9898 }
f3a9b694
PM
9899 env->cp15.esr_el[new_el] = env->exception.syndrome;
9900 break;
9901 case EXCP_IRQ:
9902 case EXCP_VIRQ:
9903 addr += 0x80;
9904 break;
9905 case EXCP_FIQ:
9906 case EXCP_VFIQ:
9907 addr += 0x100;
9908 break;
3c29632f
RH
9909 case EXCP_VSERR:
9910 addr += 0x180;
9911 /* Construct the SError syndrome from IDS and ISS fields. */
9912 env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
9913 env->cp15.esr_el[new_el] = env->exception.syndrome;
9914 break;
f3a9b694
PM
9915 default:
9916 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9917 }
9918
9919 if (is_a64(env)) {
4a2696c0 9920 old_mode = pstate_read(env);
f3a9b694
PM
9921 aarch64_save_sp(env, arm_current_el(env));
9922 env->elr_el[new_el] = env->pc;
9923 } else {
f944a854 9924 old_mode = cpsr_read_for_spsr_elx(env);
f3a9b694
PM
9925 env->elr_el[new_el] = env->regs[15];
9926
9927 aarch64_sync_32_to_64(env);
9928
9929 env->condexec_bits = 0;
9930 }
4a2696c0
RH
9931 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
9932
f3a9b694
PM
9933 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
9934 env->elr_el[new_el]);
9935
4a2696c0
RH
9936 if (cpu_isar_feature(aa64_pan, cpu)) {
9937 /* The value of PSTATE.PAN is normally preserved, except when ... */
9938 new_mode |= old_mode & PSTATE_PAN;
9939 switch (new_el) {
9940 case 2:
9941 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
9942 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
9943 != (HCR_E2H | HCR_TGE)) {
9944 break;
9945 }
9946 /* fall through */
9947 case 1:
9948 /* ... the target is EL1 ... */
9949 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
9950 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
9951 new_mode |= PSTATE_PAN;
9952 }
9953 break;
9954 }
9955 }
34669338
RH
9956 if (cpu_isar_feature(aa64_mte, cpu)) {
9957 new_mode |= PSTATE_TCO;
9958 }
4a2696c0 9959
f2f68a78
RC
9960 if (cpu_isar_feature(aa64_ssbs, cpu)) {
9961 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
9962 new_mode |= PSTATE_SSBS;
9963 } else {
9964 new_mode &= ~PSTATE_SSBS;
9965 }
9966 }
9967
f3a9b694 9968 pstate_write(env, PSTATE_DAIF | new_mode);
53221552 9969 env->aarch64 = true;
f3a9b694 9970 aarch64_restore_sp(env, new_el);
a8a79c7a 9971 helper_rebuild_hflags_a64(env, new_el);
f3a9b694
PM
9972
9973 env->pc = addr;
9974
9975 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
9976 new_el, env->pc, pstate_read(env));
966f758c
PM
9977}
9978
ed6e6ba9
AB
9979/*
9980 * Do semihosting call and set the appropriate return value. All the
9981 * permission and validity checks have been done at translate time.
9982 *
9983 * We only see semihosting exceptions in TCG only as they are not
9984 * trapped to the hypervisor in KVM.
9985 */
91f78c58 9986#ifdef CONFIG_TCG
ed6e6ba9
AB
9987static void handle_semihosting(CPUState *cs)
9988{
904c04de
PM
9989 ARMCPU *cpu = ARM_CPU(cs);
9990 CPUARMState *env = &cpu->env;
9991
9992 if (is_a64(env)) {
ed6e6ba9
AB
9993 qemu_log_mask(CPU_LOG_INT,
9994 "...handling as semihosting call 0x%" PRIx64 "\n",
9995 env->xregs[0]);
ed3a06b1 9996 do_common_semihosting(cs);
4ff5ef9e 9997 env->pc += 4;
904c04de 9998 } else {
904c04de
PM
9999 qemu_log_mask(CPU_LOG_INT,
10000 "...handling as semihosting call 0x%x\n",
10001 env->regs[0]);
ed3a06b1 10002 do_common_semihosting(cs);
4ff5ef9e 10003 env->regs[15] += env->thumb ? 2 : 4;
904c04de
PM
10004 }
10005}
ed6e6ba9 10006#endif
904c04de 10007
966f758c
PM
10008/* Handle a CPU exception for A and R profile CPUs.
10009 * Do any appropriate logging, handle PSCI calls, and then hand off
10010 * to the AArch64-entry or AArch32-entry function depending on the
10011 * target exception level's register width.
853bfef4
CF
10012 *
10013 * Note: this is used for both TCG (as the do_interrupt tcg op),
10014 * and KVM to re-inject guest debug exceptions, and to
10015 * inject a Synchronous-External-Abort.
966f758c
PM
10016 */
10017void arm_cpu_do_interrupt(CPUState *cs)
10018{
10019 ARMCPU *cpu = ARM_CPU(cs);
10020 CPUARMState *env = &cpu->env;
10021 unsigned int new_el = env->exception.target_el;
10022
531c60a9 10023 assert(!arm_feature(env, ARM_FEATURE_M));
966f758c 10024
fc6177af 10025 arm_log_exception(cs);
966f758c
PM
10026 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
10027 new_el);
10028 if (qemu_loglevel_mask(CPU_LOG_INT)
10029 && !excp_is_internal(cs->exception_index)) {
6568da45 10030 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
64b91e3f 10031 syn_get_ec(env->exception.syndrome),
966f758c
PM
10032 env->exception.syndrome);
10033 }
10034
10035 if (arm_is_psci_call(cpu, cs->exception_index)) {
10036 arm_handle_psci_call(cpu);
10037 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
10038 return;
10039 }
10040
ed6e6ba9
AB
10041 /*
10042 * Semihosting semantics depend on the register width of the code
10043 * that caused the exception, not the target exception level, so
10044 * must be handled here.
966f758c 10045 */
ed6e6ba9
AB
10046#ifdef CONFIG_TCG
10047 if (cs->exception_index == EXCP_SEMIHOST) {
10048 handle_semihosting(cs);
904c04de
PM
10049 return;
10050 }
ed6e6ba9 10051#endif
904c04de 10052
b5c53d1b
AL
10053 /* Hooks may change global state so BQL should be held, also the
10054 * BQL needs to be held for any modification of
10055 * cs->interrupt_request.
10056 */
10057 g_assert(qemu_mutex_iothread_locked());
10058
10059 arm_call_pre_el_change_hook(cpu);
10060
904c04de
PM
10061 assert(!excp_is_internal(cs->exception_index));
10062 if (arm_el_is_aa64(env, new_el)) {
966f758c
PM
10063 arm_cpu_do_interrupt_aarch64(cs);
10064 } else {
10065 arm_cpu_do_interrupt_aarch32(cs);
10066 }
f3a9b694 10067
bd7d00fc
PM
10068 arm_call_el_change_hook(cpu);
10069
f3a9b694
PM
10070 if (!kvm_enabled()) {
10071 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
10072 }
10073}
c47eaf9f 10074#endif /* !CONFIG_USER_ONLY */
0480f69a 10075
aaec1432
RH
10076uint64_t arm_sctlr(CPUARMState *env, int el)
10077{
10078 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
10079 if (el == 0) {
10080 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
b6ad6062
RDC
10081 el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
10082 ? 2 : 1;
aaec1432
RH
10083 }
10084 return env->cp15.sctlr_el[el];
10085}
c47eaf9f 10086
8ae08860 10087int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
b830a5ee
RH
10088{
10089 if (regime_has_2_ranges(mmu_idx)) {
10090 return extract64(tcr, 37, 2);
b1a10c86 10091 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
b830a5ee
RH
10092 return 0; /* VTCR_EL2 */
10093 } else {
3e270f67
RH
10094 /* Replicate the single TBI bit so we always have 2 bits. */
10095 return extract32(tcr, 20, 1) * 3;
b830a5ee
RH
10096 }
10097}
10098
8ae08860 10099int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
b830a5ee
RH
10100{
10101 if (regime_has_2_ranges(mmu_idx)) {
10102 return extract64(tcr, 51, 2);
b1a10c86 10103 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
b830a5ee
RH
10104 return 0; /* VTCR_EL2 */
10105 } else {
3e270f67
RH
10106 /* Replicate the single TBID bit so we always have 2 bits. */
10107 return extract32(tcr, 29, 1) * 3;
b830a5ee
RH
10108 }
10109}
10110
81ae05fa
RH
10111static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
10112{
10113 if (regime_has_2_ranges(mmu_idx)) {
10114 return extract64(tcr, 57, 2);
10115 } else {
10116 /* Replicate the single TCMA bit so we always have 2 bits. */
10117 return extract32(tcr, 30, 1) * 3;
10118 }
10119}
10120
b830a5ee
RH
10121ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
10122 ARMMMUIdx mmu_idx, bool data)
ba97be9f 10123{
c1547bba 10124 uint64_t tcr = regime_tcr(env, mmu_idx);
ef56c242
RH
10125 bool epd, hpd, using16k, using64k, tsz_oob, ds;
10126 int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
10127 ARMCPU *cpu = env_archcpu(env);
ba97be9f 10128
339370b9 10129 if (!regime_has_2_ranges(mmu_idx)) {
71d18164 10130 select = 0;
ba97be9f
RH
10131 tsz = extract32(tcr, 0, 6);
10132 using64k = extract32(tcr, 14, 1);
10133 using16k = extract32(tcr, 15, 1);
b1a10c86 10134 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
ba97be9f 10135 /* VTCR_EL2 */
b830a5ee 10136 hpd = false;
ba97be9f 10137 } else {
ba97be9f
RH
10138 hpd = extract32(tcr, 24, 1);
10139 }
10140 epd = false;
ef56c242 10141 sh = extract32(tcr, 12, 2);
f4ecc015 10142 ps = extract32(tcr, 16, 3);
ef56c242 10143 ds = extract64(tcr, 32, 1);
ba97be9f 10144 } else {
71d18164
RH
10145 /*
10146 * Bit 55 is always between the two regions, and is canonical for
10147 * determining if address tagging is enabled.
10148 */
10149 select = extract64(va, 55, 1);
10150 if (!select) {
10151 tsz = extract32(tcr, 0, 6);
10152 epd = extract32(tcr, 7, 1);
ef56c242 10153 sh = extract32(tcr, 12, 2);
71d18164
RH
10154 using64k = extract32(tcr, 14, 1);
10155 using16k = extract32(tcr, 15, 1);
71d18164 10156 hpd = extract64(tcr, 41, 1);
71d18164
RH
10157 } else {
10158 int tg = extract32(tcr, 30, 2);
10159 using16k = tg == 1;
10160 using64k = tg == 3;
10161 tsz = extract32(tcr, 16, 6);
10162 epd = extract32(tcr, 23, 1);
ef56c242 10163 sh = extract32(tcr, 28, 2);
71d18164 10164 hpd = extract64(tcr, 42, 1);
71d18164 10165 }
f4ecc015 10166 ps = extract64(tcr, 32, 3);
ef56c242 10167 ds = extract64(tcr, 59, 1);
ba97be9f 10168 }
c36c65ea 10169
ef56c242 10170 if (cpu_isar_feature(aa64_st, cpu)) {
c36c65ea
RDC
10171 max_tsz = 48 - using64k;
10172 } else {
10173 max_tsz = 39;
10174 }
0af312b6 10175
ef56c242
RH
10176 /*
10177 * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
10178 * adjust the effective value of DS, as documented.
10179 */
0af312b6
RH
10180 min_tsz = 16;
10181 if (using64k) {
ef56c242
RH
10182 if (cpu_isar_feature(aa64_lva, cpu)) {
10183 min_tsz = 12;
10184 }
10185 ds = false;
10186 } else if (ds) {
10187 switch (mmu_idx) {
10188 case ARMMMUIdx_Stage2:
10189 case ARMMMUIdx_Stage2_S:
10190 if (using16k) {
10191 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
10192 } else {
10193 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
10194 }
10195 break;
10196 default:
10197 if (using16k) {
10198 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
10199 } else {
10200 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
10201 }
10202 break;
10203 }
10204 if (ds) {
0af312b6
RH
10205 min_tsz = 12;
10206 }
10207 }
c36c65ea 10208
ebf93ce7
RH
10209 if (tsz > max_tsz) {
10210 tsz = max_tsz;
10211 tsz_oob = true;
10212 } else if (tsz < min_tsz) {
10213 tsz = min_tsz;
10214 tsz_oob = true;
10215 } else {
10216 tsz_oob = false;
10217 }
ba97be9f 10218
b830a5ee
RH
10219 /* Present TBI as a composite with TBID. */
10220 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
10221 if (!data) {
10222 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
10223 }
10224 tbi = (tbi >> select) & 1;
10225
ba97be9f
RH
10226 return (ARMVAParameters) {
10227 .tsz = tsz,
f4ecc015 10228 .ps = ps,
ef56c242 10229 .sh = sh,
ba97be9f
RH
10230 .select = select,
10231 .tbi = tbi,
10232 .epd = epd,
10233 .hpd = hpd,
10234 .using16k = using16k,
10235 .using64k = using64k,
ebf93ce7 10236 .tsz_oob = tsz_oob,
ef56c242 10237 .ds = ds,
ba97be9f
RH
10238 };
10239}
10240
6ddbc6e4
PB
10241/* Note that signed overflow is undefined in C. The following routines are
10242 careful to use unsigned types where modulo arithmetic is required.
10243 Failure to do so _will_ break on newer gcc. */
10244
10245/* Signed saturating arithmetic. */
10246
1654b2d6 10247/* Perform 16-bit signed saturating addition. */
6ddbc6e4
PB
10248static inline uint16_t add16_sat(uint16_t a, uint16_t b)
10249{
10250 uint16_t res;
10251
10252 res = a + b;
10253 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
10254 if (a & 0x8000)
10255 res = 0x8000;
10256 else
10257 res = 0x7fff;
10258 }
10259 return res;
10260}
10261
1654b2d6 10262/* Perform 8-bit signed saturating addition. */
6ddbc6e4
PB
10263static inline uint8_t add8_sat(uint8_t a, uint8_t b)
10264{
10265 uint8_t res;
10266
10267 res = a + b;
10268 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
10269 if (a & 0x80)
10270 res = 0x80;
10271 else
10272 res = 0x7f;
10273 }
10274 return res;
10275}
10276
1654b2d6 10277/* Perform 16-bit signed saturating subtraction. */
6ddbc6e4
PB
10278static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
10279{
10280 uint16_t res;
10281
10282 res = a - b;
10283 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
10284 if (a & 0x8000)
10285 res = 0x8000;
10286 else
10287 res = 0x7fff;
10288 }
10289 return res;
10290}
10291
1654b2d6 10292/* Perform 8-bit signed saturating subtraction. */
6ddbc6e4
PB
10293static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
10294{
10295 uint8_t res;
10296
10297 res = a - b;
10298 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
10299 if (a & 0x80)
10300 res = 0x80;
10301 else
10302 res = 0x7f;
10303 }
10304 return res;
10305}
10306
10307#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
10308#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
10309#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
10310#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
10311#define PFX q
10312
10313#include "op_addsub.h"
10314
10315/* Unsigned saturating arithmetic. */
460a09c1 10316static inline uint16_t add16_usat(uint16_t a, uint16_t b)
6ddbc6e4
PB
10317{
10318 uint16_t res;
10319 res = a + b;
10320 if (res < a)
10321 res = 0xffff;
10322 return res;
10323}
10324
460a09c1 10325static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
6ddbc6e4 10326{
4c4fd3f8 10327 if (a > b)
6ddbc6e4
PB
10328 return a - b;
10329 else
10330 return 0;
10331}
10332
10333static inline uint8_t add8_usat(uint8_t a, uint8_t b)
10334{
10335 uint8_t res;
10336 res = a + b;
10337 if (res < a)
10338 res = 0xff;
10339 return res;
10340}
10341
10342static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
10343{
4c4fd3f8 10344 if (a > b)
6ddbc6e4
PB
10345 return a - b;
10346 else
10347 return 0;
10348}
10349
10350#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
10351#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
10352#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
10353#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
10354#define PFX uq
10355
10356#include "op_addsub.h"
10357
10358/* Signed modulo arithmetic. */
10359#define SARITH16(a, b, n, op) do { \
10360 int32_t sum; \
db6e2e65 10361 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
6ddbc6e4
PB
10362 RESULT(sum, n, 16); \
10363 if (sum >= 0) \
10364 ge |= 3 << (n * 2); \
10365 } while(0)
10366
10367#define SARITH8(a, b, n, op) do { \
10368 int32_t sum; \
db6e2e65 10369 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
6ddbc6e4
PB
10370 RESULT(sum, n, 8); \
10371 if (sum >= 0) \
10372 ge |= 1 << n; \
10373 } while(0)
10374
10375
10376#define ADD16(a, b, n) SARITH16(a, b, n, +)
10377#define SUB16(a, b, n) SARITH16(a, b, n, -)
10378#define ADD8(a, b, n) SARITH8(a, b, n, +)
10379#define SUB8(a, b, n) SARITH8(a, b, n, -)
10380#define PFX s
10381#define ARITH_GE
10382
10383#include "op_addsub.h"
10384
10385/* Unsigned modulo arithmetic. */
10386#define ADD16(a, b, n) do { \
10387 uint32_t sum; \
10388 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
10389 RESULT(sum, n, 16); \
a87aa10b 10390 if ((sum >> 16) == 1) \
6ddbc6e4
PB
10391 ge |= 3 << (n * 2); \
10392 } while(0)
10393
10394#define ADD8(a, b, n) do { \
10395 uint32_t sum; \
10396 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
10397 RESULT(sum, n, 8); \
a87aa10b
AZ
10398 if ((sum >> 8) == 1) \
10399 ge |= 1 << n; \
6ddbc6e4
PB
10400 } while(0)
10401
10402#define SUB16(a, b, n) do { \
10403 uint32_t sum; \
10404 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
10405 RESULT(sum, n, 16); \
10406 if ((sum >> 16) == 0) \
10407 ge |= 3 << (n * 2); \
10408 } while(0)
10409
10410#define SUB8(a, b, n) do { \
10411 uint32_t sum; \
10412 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
10413 RESULT(sum, n, 8); \
10414 if ((sum >> 8) == 0) \
a87aa10b 10415 ge |= 1 << n; \
6ddbc6e4
PB
10416 } while(0)
10417
10418#define PFX u
10419#define ARITH_GE
10420
10421#include "op_addsub.h"
10422
10423/* Halved signed arithmetic. */
10424#define ADD16(a, b, n) \
10425 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
10426#define SUB16(a, b, n) \
10427 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
10428#define ADD8(a, b, n) \
10429 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
10430#define SUB8(a, b, n) \
10431 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
10432#define PFX sh
10433
10434#include "op_addsub.h"
10435
10436/* Halved unsigned arithmetic. */
10437#define ADD16(a, b, n) \
10438 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10439#define SUB16(a, b, n) \
10440 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10441#define ADD8(a, b, n) \
10442 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10443#define SUB8(a, b, n) \
10444 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10445#define PFX uh
10446
10447#include "op_addsub.h"
10448
10449static inline uint8_t do_usad(uint8_t a, uint8_t b)
10450{
10451 if (a > b)
10452 return a - b;
10453 else
10454 return b - a;
10455}
10456
10457/* Unsigned sum of absolute byte differences. */
10458uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
10459{
10460 uint32_t sum;
10461 sum = do_usad(a, b);
10462 sum += do_usad(a >> 8, b >> 8);
bdc3b6f5 10463 sum += do_usad(a >> 16, b >> 16);
6ddbc6e4
PB
10464 sum += do_usad(a >> 24, b >> 24);
10465 return sum;
10466}
10467
10468/* For ARMv6 SEL instruction. */
10469uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
10470{
10471 uint32_t mask;
10472
10473 mask = 0;
10474 if (flags & 1)
10475 mask |= 0xff;
10476 if (flags & 2)
10477 mask |= 0xff00;
10478 if (flags & 4)
10479 mask |= 0xff0000;
10480 if (flags & 8)
10481 mask |= 0xff000000;
10482 return (a & mask) | (b & ~mask);
10483}
10484
aa633469
PM
10485/* CRC helpers.
10486 * The upper bytes of val (above the number specified by 'bytes') must have
10487 * been zeroed out by the caller.
10488 */
eb0ecd5a
WN
10489uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
10490{
10491 uint8_t buf[4];
10492
aa633469 10493 stl_le_p(buf, val);
eb0ecd5a
WN
10494
10495 /* zlib crc32 converts the accumulator and output to one's complement. */
10496 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
10497}
10498
10499uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
10500{
10501 uint8_t buf[4];
10502
aa633469 10503 stl_le_p(buf, val);
eb0ecd5a
WN
10504
10505 /* Linux crc32c converts the output to one's complement. */
10506 return crc32c(acc, buf, bytes) ^ 0xffffffff;
10507}
a9e01311
RH
10508
10509/* Return the exception level to which FP-disabled exceptions should
10510 * be taken, or 0 if FP is enabled.
10511 */
ced31551 10512int fp_exception_el(CPUARMState *env, int cur_el)
a9e01311 10513{
55faa212 10514#ifndef CONFIG_USER_ONLY
d5a6fa2d
RH
10515 uint64_t hcr_el2;
10516
a9e01311
RH
10517 /* CPACR and the CPTR registers don't exist before v6, so FP is
10518 * always accessible
10519 */
10520 if (!arm_feature(env, ARM_FEATURE_V6)) {
10521 return 0;
10522 }
10523
d87513c0
PM
10524 if (arm_feature(env, ARM_FEATURE_M)) {
10525 /* CPACR can cause a NOCP UsageFault taken to current security state */
10526 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
10527 return 1;
10528 }
10529
10530 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
10531 if (!extract32(env->v7m.nsacr, 10, 1)) {
10532 /* FP insns cause a NOCP UsageFault taken to Secure */
10533 return 3;
10534 }
10535 }
10536
10537 return 0;
10538 }
10539
d5a6fa2d
RH
10540 hcr_el2 = arm_hcr_el2_eff(env);
10541
a9e01311
RH
10542 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
10543 * 0, 2 : trap EL0 and EL1/PL1 accesses
10544 * 1 : trap only EL0 accesses
10545 * 3 : trap no accesses
c2ddb7cf 10546 * This register is ignored if E2H+TGE are both set.
a9e01311 10547 */
d5a6fa2d 10548 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
fab8ad39 10549 int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
c2ddb7cf
RH
10550
10551 switch (fpen) {
02e1de14
RH
10552 case 1:
10553 if (cur_el != 0) {
10554 break;
10555 }
10556 /* fall through */
c2ddb7cf
RH
10557 case 0:
10558 case 2:
02e1de14
RH
10559 /* Trap from Secure PL0 or PL1 to Secure PL1. */
10560 if (!arm_el_is_aa64(env, 3)
10561 && (cur_el == 3 || arm_is_secure_below_el3(env))) {
a9e01311
RH
10562 return 3;
10563 }
02e1de14 10564 if (cur_el <= 1) {
c2ddb7cf
RH
10565 return 1;
10566 }
10567 break;
a9e01311 10568 }
a9e01311
RH
10569 }
10570
fc1120a7
PM
10571 /*
10572 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
10573 * to control non-secure access to the FPU. It doesn't have any
10574 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
10575 */
10576 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
10577 cur_el <= 2 && !arm_is_secure_below_el3(env))) {
10578 if (!extract32(env->cp15.nsacr, 10, 1)) {
10579 /* FP insns act as UNDEF */
10580 return cur_el == 2 ? 2 : 1;
10581 }
10582 }
10583
d5a6fa2d
RH
10584 /*
10585 * CPTR_EL2 is present in v7VE or v8, and changes format
10586 * with HCR_EL2.E2H (regardless of TGE).
a9e01311 10587 */
d5a6fa2d
RH
10588 if (cur_el <= 2) {
10589 if (hcr_el2 & HCR_E2H) {
fab8ad39 10590 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
d5a6fa2d
RH
10591 case 1:
10592 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
10593 break;
10594 }
10595 /* fall through */
10596 case 0:
10597 case 2:
10598 return 2;
10599 }
10600 } else if (arm_is_el2_enabled(env)) {
fab8ad39 10601 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
d5a6fa2d
RH
10602 return 2;
10603 }
10604 }
a9e01311
RH
10605 }
10606
10607 /* CPTR_EL3 : present in v8 */
fab8ad39 10608 if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
a9e01311
RH
10609 /* Trap all FP ops to EL3 */
10610 return 3;
10611 }
55faa212 10612#endif
a9e01311
RH
10613 return 0;
10614}
10615
b9f6033c
RH
10616/* Return the exception level we're running at if this is our mmu_idx */
10617int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
10618{
10619 if (mmu_idx & ARM_MMU_IDX_M) {
10620 return mmu_idx & ARM_MMU_IDX_M_PRIV;
10621 }
10622
10623 switch (mmu_idx) {
10624 case ARMMMUIdx_E10_0:
10625 case ARMMMUIdx_E20_0:
10626 case ARMMMUIdx_SE10_0:
b6ad6062 10627 case ARMMMUIdx_SE20_0:
b9f6033c
RH
10628 return 0;
10629 case ARMMMUIdx_E10_1:
452ef8cb 10630 case ARMMMUIdx_E10_1_PAN:
b9f6033c 10631 case ARMMMUIdx_SE10_1:
452ef8cb 10632 case ARMMMUIdx_SE10_1_PAN:
b9f6033c
RH
10633 return 1;
10634 case ARMMMUIdx_E2:
10635 case ARMMMUIdx_E20_2:
452ef8cb 10636 case ARMMMUIdx_E20_2_PAN:
b6ad6062
RDC
10637 case ARMMMUIdx_SE2:
10638 case ARMMMUIdx_SE20_2:
10639 case ARMMMUIdx_SE20_2_PAN:
b9f6033c
RH
10640 return 2;
10641 case ARMMMUIdx_SE3:
10642 return 3;
10643 default:
10644 g_assert_not_reached();
10645 }
10646}
10647
7aab5a8c 10648#ifndef CONFIG_TCG
65e4655c
RH
10649ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
10650{
7aab5a8c 10651 g_assert_not_reached();
65e4655c 10652}
7aab5a8c 10653#endif
65e4655c 10654
164690b2 10655ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
65e4655c 10656{
b6ad6062
RDC
10657 ARMMMUIdx idx;
10658 uint64_t hcr;
10659
65e4655c 10660 if (arm_feature(env, ARM_FEATURE_M)) {
50494a27 10661 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
65e4655c
RH
10662 }
10663
6003d980 10664 /* See ARM pseudo-function ELIsInHost. */
b9f6033c
RH
10665 switch (el) {
10666 case 0:
b6ad6062
RDC
10667 hcr = arm_hcr_el2_eff(env);
10668 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
10669 idx = ARMMMUIdx_E20_0;
10670 } else {
10671 idx = ARMMMUIdx_E10_0;
6003d980 10672 }
b6ad6062 10673 break;
b9f6033c 10674 case 1:
66412260 10675 if (env->pstate & PSTATE_PAN) {
b6ad6062
RDC
10676 idx = ARMMMUIdx_E10_1_PAN;
10677 } else {
10678 idx = ARMMMUIdx_E10_1;
66412260 10679 }
b6ad6062 10680 break;
b9f6033c 10681 case 2:
6003d980 10682 /* Note that TGE does not apply at EL2. */
b6ad6062 10683 if (arm_hcr_el2_eff(env) & HCR_E2H) {
66412260 10684 if (env->pstate & PSTATE_PAN) {
b6ad6062
RDC
10685 idx = ARMMMUIdx_E20_2_PAN;
10686 } else {
10687 idx = ARMMMUIdx_E20_2;
66412260 10688 }
b6ad6062
RDC
10689 } else {
10690 idx = ARMMMUIdx_E2;
6003d980 10691 }
b6ad6062 10692 break;
b9f6033c
RH
10693 case 3:
10694 return ARMMMUIdx_SE3;
10695 default:
10696 g_assert_not_reached();
65e4655c 10697 }
b6ad6062
RDC
10698
10699 if (arm_is_secure_below_el3(env)) {
10700 idx &= ~ARM_MMU_IDX_A_NS;
10701 }
10702
10703 return idx;
50494a27
RH
10704}
10705
164690b2
RH
10706ARMMMUIdx arm_mmu_idx(CPUARMState *env)
10707{
10708 return arm_mmu_idx_el(env, arm_current_el(env));
10709}
10710
3902bfc6
RH
10711static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el,
10712 ARMMMUIdx mmu_idx,
10713 CPUARMTBFlags flags)
fdd1b228 10714{
a729a46b
RH
10715 DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el);
10716 DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
fdd1b228 10717
fdd1b228 10718 if (arm_singlestep_active(env)) {
a729a46b 10719 DP_TBFLAG_ANY(flags, SS_ACTIVE, 1);
fdd1b228
RH
10720 }
10721 return flags;
10722}
10723
3902bfc6
RH
10724static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el,
10725 ARMMMUIdx mmu_idx,
10726 CPUARMTBFlags flags)
43eccfb6 10727{
8061a649
RH
10728 bool sctlr_b = arm_sctlr_b(env);
10729
10730 if (sctlr_b) {
a729a46b 10731 DP_TBFLAG_A32(flags, SCTLR__B, 1);
8061a649
RH
10732 }
10733 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
a729a46b 10734 DP_TBFLAG_ANY(flags, BE_DATA, 1);
8061a649 10735 }
a729a46b 10736 DP_TBFLAG_A32(flags, NS, !access_secure_reg(env));
43eccfb6
RH
10737
10738 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
10739}
10740
3902bfc6
RH
10741static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el,
10742 ARMMMUIdx mmu_idx)
6e33ced5 10743{
3902bfc6 10744 CPUARMTBFlags flags = {};
4479ec30
RH
10745 uint32_t ccr = env->v7m.ccr[env->v7m.secure];
10746
10747 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */
10748 if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) {
10749 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
10750 }
6e33ced5
RH
10751
10752 if (arm_v7m_is_handler_mode(env)) {
a729a46b 10753 DP_TBFLAG_M32(flags, HANDLER, 1);
6e33ced5
RH
10754 }
10755
10756 /*
10757 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
10758 * is suppressing them because the requested execution priority
10759 * is less than 0.
10760 */
10761 if (arm_feature(env, ARM_FEATURE_V8) &&
10762 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
4479ec30 10763 (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
a729a46b 10764 DP_TBFLAG_M32(flags, STACKCHECK, 1);
6e33ced5
RH
10765 }
10766
10767 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
10768}
10769
3902bfc6
RH
10770static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
10771 ARMMMUIdx mmu_idx)
c747224c 10772{
8480e933 10773 CPUARMTBFlags flags = {};
4479ec30
RH
10774 int el = arm_current_el(env);
10775
10776 if (arm_sctlr(env, el) & SCTLR_A) {
10777 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
10778 }
0a54d68e
RH
10779
10780 if (arm_el_is_aa64(env, 1)) {
a729a46b 10781 DP_TBFLAG_A32(flags, VFPEN, 1);
0a54d68e 10782 }
5bb0a20b 10783
4479ec30 10784 if (el < 2 && env->cp15.hstr_el2 &&
5bb0a20b 10785 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
a729a46b 10786 DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1);
5bb0a20b
MZ
10787 }
10788
520d1621
PM
10789 if (env->uncached_cpsr & CPSR_IL) {
10790 DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
10791 }
10792
75fe8356
RH
10793 /*
10794 * The SME exception we are testing for is raised via
10795 * AArch64.CheckFPAdvSIMDEnabled(), as called from
10796 * AArch32.CheckAdvSIMDOrFPEnabled().
10797 */
10798 if (el == 0
10799 && FIELD_EX64(env->svcr, SVCR, SM)
10800 && (!arm_is_el2_enabled(env)
10801 || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE)))
10802 && arm_el_is_aa64(env, 1)
10803 && !sme_fa64(env, el)) {
10804 DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
10805 }
10806
83f4baef 10807 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
c747224c
RH
10808}
10809
3902bfc6
RH
10810static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
10811 ARMMMUIdx mmu_idx)
a9e01311 10812{
8480e933 10813 CPUARMTBFlags flags = {};
d4d7503a 10814 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
c1547bba 10815 uint64_t tcr = regime_tcr(env, mmu_idx);
d4d7503a
RH
10816 uint64_t sctlr;
10817 int tbii, tbid;
b9adaa70 10818
a729a46b 10819 DP_TBFLAG_ANY(flags, AARCH64_STATE, 1);
cd208a1c 10820
339370b9 10821 /* Get control bits for tagged addresses. */
b830a5ee
RH
10822 tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
10823 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
5d8634f5 10824
a729a46b
RH
10825 DP_TBFLAG_A64(flags, TBII, tbii);
10826 DP_TBFLAG_A64(flags, TBID, tbid);
d4d7503a
RH
10827
10828 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
10829 int sve_el = sve_exception_el(env, el);
5d8634f5 10830
d4d7503a 10831 /*
397d922c
RH
10832 * If either FP or SVE are disabled, translator does not need len.
10833 * If SVE EL > FP EL, FP exception has precedence, and translator
10834 * does not need SVE EL. Save potential re-translations by forcing
10835 * the unneeded data to zero.
d4d7503a 10836 */
397d922c
RH
10837 if (fp_el != 0) {
10838 if (sve_el > fp_el) {
10839 sve_el = 0;
10840 }
10841 } else if (sve_el == 0) {
5ef3cc56 10842 DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
5d8634f5 10843 }
a729a46b 10844 DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
d4d7503a 10845 }
6b2ca83e 10846 if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
5d7953ad 10847 int sme_el = sme_exception_el(env, el);
62151133 10848 bool sm = FIELD_EX64(env->svcr, SVCR, SM);
5d7953ad
RH
10849
10850 DP_TBFLAG_A64(flags, SMEEXC_EL, sme_el);
10851 if (sme_el == 0) {
10852 /* Similarly, do not compute SVL if SME is disabled. */
62151133
RH
10853 int svl = sve_vqm1_for_el_sm(env, el, true);
10854 DP_TBFLAG_A64(flags, SVL, svl);
10855 if (sm) {
10856 /* If SVE is disabled, we will not have set VL above. */
10857 DP_TBFLAG_A64(flags, VL, svl);
10858 }
5d7953ad 10859 }
62151133 10860 if (sm) {
a3637e88 10861 DP_TBFLAG_A64(flags, PSTATE_SM, 1);
75fe8356 10862 DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
a3637e88
RH
10863 }
10864 DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
6b2ca83e 10865 }
1db5e96c 10866
aaec1432 10867 sctlr = regime_sctlr(env, stage1);
1db5e96c 10868
4479ec30
RH
10869 if (sctlr & SCTLR_A) {
10870 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1);
10871 }
10872
8061a649 10873 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
a729a46b 10874 DP_TBFLAG_ANY(flags, BE_DATA, 1);
8061a649
RH
10875 }
10876
d4d7503a
RH
10877 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
10878 /*
10879 * In order to save space in flags, we record only whether
10880 * pauth is "inactive", meaning all insns are implemented as
10881 * a nop, or "active" when some action must be performed.
10882 * The decision of which action to take is left to a helper.
10883 */
10884 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
a729a46b 10885 DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1);
1db5e96c 10886 }
d4d7503a 10887 }
0816ef1b 10888
d4d7503a
RH
10889 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
10890 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
10891 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
a729a46b 10892 DP_TBFLAG_A64(flags, BT, 1);
0816ef1b 10893 }
d4d7503a 10894 }
08f1434a 10895
cc28fc30 10896 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
7a8014ab
RH
10897 if (!(env->pstate & PSTATE_UAO)) {
10898 switch (mmu_idx) {
10899 case ARMMMUIdx_E10_1:
10900 case ARMMMUIdx_E10_1_PAN:
10901 case ARMMMUIdx_SE10_1:
10902 case ARMMMUIdx_SE10_1_PAN:
10903 /* TODO: ARMv8.3-NV */
a729a46b 10904 DP_TBFLAG_A64(flags, UNPRIV, 1);
7a8014ab
RH
10905 break;
10906 case ARMMMUIdx_E20_2:
10907 case ARMMMUIdx_E20_2_PAN:
b6ad6062
RDC
10908 case ARMMMUIdx_SE20_2:
10909 case ARMMMUIdx_SE20_2_PAN:
7a8014ab
RH
10910 /*
10911 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
10912 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
10913 */
10914 if (env->cp15.hcr_el2 & HCR_TGE) {
a729a46b 10915 DP_TBFLAG_A64(flags, UNPRIV, 1);
7a8014ab
RH
10916 }
10917 break;
10918 default:
10919 break;
cc28fc30 10920 }
cc28fc30
RH
10921 }
10922
520d1621
PM
10923 if (env->pstate & PSTATE_IL) {
10924 DP_TBFLAG_ANY(flags, PSTATE__IL, 1);
10925 }
10926
81ae05fa
RH
10927 if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
10928 /*
10929 * Set MTE_ACTIVE if any access may be Checked, and leave clear
10930 * if all accesses must be Unchecked:
10931 * 1) If no TBI, then there are no tags in the address to check,
10932 * 2) If Tag Check Override, then all accesses are Unchecked,
10933 * 3) If Tag Check Fail == 0, then Checked access have no effect,
10934 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
10935 */
10936 if (allocation_tag_access_enabled(env, el, sctlr)) {
a729a46b 10937 DP_TBFLAG_A64(flags, ATA, 1);
81ae05fa
RH
10938 if (tbid
10939 && !(env->pstate & PSTATE_TCO)
10940 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
a729a46b 10941 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1);
81ae05fa
RH
10942 }
10943 }
10944 /* And again for unprivileged accesses, if required. */
a729a46b 10945 if (EX_TBFLAG_A64(flags, UNPRIV)
81ae05fa
RH
10946 && tbid
10947 && !(env->pstate & PSTATE_TCO)
2d928adf 10948 && (sctlr & SCTLR_TCF0)
81ae05fa 10949 && allocation_tag_access_enabled(env, 0, sctlr)) {
a729a46b 10950 DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1);
81ae05fa
RH
10951 }
10952 /* Cache TCMA as well as TBI. */
a729a46b 10953 DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
81ae05fa
RH
10954 }
10955
d4d7503a
RH
10956 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
10957}
10958
3902bfc6 10959static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env)
3d74e2e9
RH
10960{
10961 int el = arm_current_el(env);
10962 int fp_el = fp_exception_el(env, el);
164690b2 10963 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
3d74e2e9
RH
10964
10965 if (is_a64(env)) {
10966 return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
10967 } else if (arm_feature(env, ARM_FEATURE_M)) {
10968 return rebuild_hflags_m32(env, fp_el, mmu_idx);
10969 } else {
10970 return rebuild_hflags_a32(env, fp_el, mmu_idx);
10971 }
10972}
10973
10974void arm_rebuild_hflags(CPUARMState *env)
10975{
10976 env->hflags = rebuild_hflags_internal(env);
10977}
10978
19717e9b
PM
10979/*
10980 * If we have triggered a EL state change we can't rely on the
10981 * translator having passed it to us, we need to recompute.
10982 */
10983void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
10984{
10985 int el = arm_current_el(env);
10986 int fp_el = fp_exception_el(env, el);
10987 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
3902bfc6 10988
19717e9b
PM
10989 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
10990}
10991
14f3c588
RH
10992void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
10993{
10994 int fp_el = fp_exception_el(env, el);
10995 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
10996
10997 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
10998}
10999
f80741d1
AB
11000/*
11001 * If we have triggered a EL state change we can't rely on the
563152e0 11002 * translator having passed it to us, we need to recompute.
f80741d1
AB
11003 */
11004void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
11005{
11006 int el = arm_current_el(env);
11007 int fp_el = fp_exception_el(env, el);
11008 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11009 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
11010}
11011
14f3c588
RH
11012void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
11013{
11014 int fp_el = fp_exception_el(env, el);
11015 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11016
11017 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
11018}
11019
11020void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
11021{
11022 int fp_el = fp_exception_el(env, el);
11023 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11024
11025 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
11026}
11027
0ee8b24a
PMD
11028static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
11029{
11030#ifdef CONFIG_DEBUG_TCG
3902bfc6
RH
11031 CPUARMTBFlags c = env->hflags;
11032 CPUARMTBFlags r = rebuild_hflags_internal(env);
0ee8b24a 11033
a378206a
RH
11034 if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
11035 fprintf(stderr, "TCG hflags mismatch "
11036 "(current:(0x%08x,0x" TARGET_FMT_lx ")"
11037 " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
11038 c.flags, c.flags2, r.flags, r.flags2);
0ee8b24a
PMD
11039 abort();
11040 }
11041#endif
11042}
11043
26702213
PM
11044static bool mve_no_pred(CPUARMState *env)
11045{
11046 /*
11047 * Return true if there is definitely no predication of MVE
11048 * instructions by VPR or LTPSIZE. (Returning false even if there
11049 * isn't any predication is OK; generated code will just be
11050 * a little worse.)
11051 * If the CPU does not implement MVE then this TB flag is always 0.
11052 *
11053 * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
11054 * logic in gen_update_fp_context() needs to be updated to match.
11055 *
11056 * We do not include the effect of the ECI bits here -- they are
11057 * tracked in other TB flags. This simplifies the logic for
11058 * "when did we emit code that changes the MVE_NO_PRED TB flag
11059 * and thus need to end the TB?".
11060 */
11061 if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
11062 return false;
11063 }
11064 if (env->v7m.vpr) {
11065 return false;
11066 }
11067 if (env->v7m.ltpsize < 4) {
11068 return false;
11069 }
11070 return true;
11071}
11072
d4d7503a
RH
11073void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
11074 target_ulong *cs_base, uint32_t *pflags)
11075{
3902bfc6 11076 CPUARMTBFlags flags;
d4d7503a 11077
0ee8b24a 11078 assert_hflags_rebuild_correctly(env);
3902bfc6 11079 flags = env->hflags;
3d74e2e9 11080
a729a46b 11081 if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
d4d7503a 11082 *pc = env->pc;
d4d7503a 11083 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
a729a46b 11084 DP_TBFLAG_A64(flags, BTYPE, env->btype);
08f1434a 11085 }
a9e01311
RH
11086 } else {
11087 *pc = env->regs[15];
6e33ced5
RH
11088
11089 if (arm_feature(env, ARM_FEATURE_M)) {
9550d1bd
RH
11090 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11091 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
11092 != env->v7m.secure) {
a729a46b 11093 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
9550d1bd
RH
11094 }
11095
11096 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
11097 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
11098 (env->v7m.secure &&
11099 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
11100 /*
11101 * ASPEN is set, but FPCA/SFPA indicate that there is no
11102 * active FP context; we must create a new FP context before
11103 * executing any FP insn.
11104 */
a729a46b 11105 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
9550d1bd
RH
11106 }
11107
11108 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
11109 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
a729a46b 11110 DP_TBFLAG_M32(flags, LSPACT, 1);
9550d1bd 11111 }
26702213
PM
11112
11113 if (mve_no_pred(env)) {
11114 DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
11115 }
6e33ced5 11116 } else {
bbad7c62
RH
11117 /*
11118 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
11119 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
11120 */
11121 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
a729a46b 11122 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
bbad7c62 11123 } else {
a729a46b
RH
11124 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
11125 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
bbad7c62 11126 }
0a54d68e 11127 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
a729a46b 11128 DP_TBFLAG_A32(flags, VFPEN, 1);
0a54d68e 11129 }
6e33ced5
RH
11130 }
11131
a729a46b
RH
11132 DP_TBFLAG_AM32(flags, THUMB, env->thumb);
11133 DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
d4d7503a 11134 }
a9e01311 11135
60e12c37
RH
11136 /*
11137 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
a9e01311
RH
11138 * states defined in the ARM ARM for software singlestep:
11139 * SS_ACTIVE PSTATE.SS State
11140 * 0 x Inactive (the TB flag for SS is always 0)
11141 * 1 0 Active-pending
11142 * 1 1 Active-not-pending
ae6eb1e9 11143 * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
a9e01311 11144 */
a729a46b
RH
11145 if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
11146 DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
a9e01311 11147 }
a9e01311 11148
3902bfc6 11149 *pflags = flags.flags;
a378206a 11150 *cs_base = flags.flags2;
a9e01311 11151}
0ab5953b
RH
11152
11153#ifdef TARGET_AARCH64
11154/*
11155 * The manual says that when SVE is enabled and VQ is widened the
11156 * implementation is allowed to zero the previously inaccessible
11157 * portion of the registers. The corollary to that is that when
11158 * SVE is enabled and VQ is narrowed we are also allowed to zero
11159 * the now inaccessible portion of the registers.
11160 *
11161 * The intent of this is that no predicate bit beyond VQ is ever set.
11162 * Which means that some operations on predicate registers themselves
11163 * may operate on full uint64_t or even unrolled across the maximum
11164 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
11165 * may well be cheaper than conditionals to restrict the operation
11166 * to the relevant portion of a uint16_t[16].
11167 */
11168void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
11169{
11170 int i, j;
11171 uint64_t pmask;
11172
11173 assert(vq >= 1 && vq <= ARM_MAX_VQ);
2fc0cc0e 11174 assert(vq <= env_archcpu(env)->sve_max_vq);
0ab5953b
RH
11175
11176 /* Zap the high bits of the zregs. */
11177 for (i = 0; i < 32; i++) {
11178 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
11179 }
11180
11181 /* Zap the high bits of the pregs and ffr. */
11182 pmask = 0;
11183 if (vq & 3) {
11184 pmask = ~(-1ULL << (16 * (vq & 3)));
11185 }
11186 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
11187 for (i = 0; i < 17; ++i) {
11188 env->vfp.pregs[i].p[j] &= pmask;
11189 }
11190 pmask = 0;
11191 }
11192}
11193
6a775fd6
RH
11194static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
11195{
11196 int exc_el;
11197
11198 if (sm) {
11199 exc_el = sme_exception_el(env, el);
11200 } else {
11201 exc_el = sve_exception_el(env, el);
11202 }
11203 if (exc_el) {
11204 return 0; /* disabled */
11205 }
11206 return sve_vqm1_for_el_sm(env, el, sm);
11207}
11208
0ab5953b
RH
11209/*
11210 * Notice a change in SVE vector size when changing EL.
11211 */
9a05f7b6
RH
11212void aarch64_sve_change_el(CPUARMState *env, int old_el,
11213 int new_el, bool el0_a64)
0ab5953b 11214{
2fc0cc0e 11215 ARMCPU *cpu = env_archcpu(env);
0ab5953b 11216 int old_len, new_len;
6a775fd6 11217 bool old_a64, new_a64, sm;
0ab5953b
RH
11218
11219 /* Nothing to do if no SVE. */
cd208a1c 11220 if (!cpu_isar_feature(aa64_sve, cpu)) {
0ab5953b
RH
11221 return;
11222 }
11223
11224 /* Nothing to do if FP is disabled in either EL. */
11225 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
11226 return;
11227 }
11228
04fbce76
RH
11229 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
11230 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
11231
11232 /*
11233 * Both AArch64.TakeException and AArch64.ExceptionReturn
11234 * invoke ResetSVEState when taking an exception from, or
11235 * returning to, AArch32 state when PSTATE.SM is enabled.
11236 */
6a775fd6
RH
11237 sm = FIELD_EX64(env->svcr, SVCR, SM);
11238 if (old_a64 != new_a64 && sm) {
04fbce76
RH
11239 arm_reset_sve_state(env);
11240 return;
11241 }
11242
0ab5953b
RH
11243 /*
11244 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
11245 * at ELx, or not available because the EL is in AArch32 state, then
11246 * for all purposes other than a direct read, the ZCR_ELx.LEN field
11247 * has an effective value of 0".
11248 *
11249 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
11250 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
11251 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
11252 * we already have the correct register contents when encountering the
11253 * vq0->vq0 transition between EL0->EL1.
11254 */
6a775fd6
RH
11255 old_len = new_len = 0;
11256 if (old_a64) {
11257 old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
11258 }
11259 if (new_a64) {
11260 new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
11261 }
0ab5953b
RH
11262
11263 /* When changing vector length, clear inaccessible state. */
11264 if (new_len < old_len) {
11265 aarch64_sve_narrow_vq(env, new_len + 1);
11266 }
11267}
11268#endif