]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/helper.c
target/arm: Simplify tlb_force_broadcast alternatives
[mirror_qemu.git] / target / arm / helper.c
CommitLineData
ed3baad1
PMD
1/*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
db725815 8
74c21bd0 9#include "qemu/osdep.h"
63159601 10#include "qemu/units.h"
181962fd 11#include "target/arm/idau.h"
194cbc49 12#include "trace.h"
b5ff1b31 13#include "cpu.h"
ccd38087 14#include "internals.h"
022c62cb 15#include "exec/gdbstub.h"
2ef6175a 16#include "exec/helper-proto.h"
1de7afc9 17#include "qemu/host-utils.h"
db725815 18#include "qemu/main-loop.h"
1de7afc9 19#include "qemu/bitops.h"
eb0ecd5a 20#include "qemu/crc32c.h"
0442428a 21#include "qemu/qemu-print.h"
63c91552 22#include "exec/exec-all.h"
eb0ecd5a 23#include <zlib.h> /* For crc32 */
64552b6b 24#include "hw/irq.h"
f1672e6f 25#include "hw/semihosting/semihost.h"
b2e23725 26#include "sysemu/cpus.h"
f3a9b694 27#include "sysemu/kvm.h"
9d2b5a58 28#include "qemu/range.h"
7f7b4e7a 29#include "qapi/qapi-commands-machine-target.h"
de390645
RH
30#include "qapi/error.h"
31#include "qemu/guest-random.h"
91f78c58
PMD
32#ifdef CONFIG_TCG
33#include "arm_ldst.h"
7aab5a8c 34#include "exec/cpu_ldst.h"
91f78c58 35#endif
0b03bdfc 36
352c98e5
LV
37#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
38
4a501606 39#ifndef CONFIG_USER_ONLY
7c2cb42b 40
37785977 41static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
03ae85f8 42 MMUAccessType access_type, ARMMMUIdx mmu_idx,
37785977 43 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
da909b2c 44 target_ulong *page_size_ptr,
5b2d261d 45 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
4a501606
PM
46#endif
47
affdb64d
PM
48static void switch_mode(CPUARMState *env, int mode);
49
0ecb72a5 50static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
56aebc89
PB
51{
52 int nregs;
53
54 /* VFP data registers are always little-endian. */
55 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
56 if (reg < nregs) {
9a2b5256 57 stq_le_p(buf, *aa32_vfp_dreg(env, reg));
56aebc89
PB
58 return 8;
59 }
60 if (arm_feature(env, ARM_FEATURE_NEON)) {
61 /* Aliases for Q regs. */
62 nregs += 16;
63 if (reg < nregs) {
9a2b5256
RH
64 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
65 stq_le_p(buf, q[0]);
66 stq_le_p(buf + 8, q[1]);
56aebc89
PB
67 return 16;
68 }
69 }
70 switch (reg - nregs) {
71 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
b0a909a4 72 case 1: stl_p(buf, vfp_get_fpscr(env)); return 4;
56aebc89
PB
73 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
74 }
75 return 0;
76}
77
0ecb72a5 78static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
56aebc89
PB
79{
80 int nregs;
81
82 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
83 if (reg < nregs) {
9a2b5256 84 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
56aebc89
PB
85 return 8;
86 }
87 if (arm_feature(env, ARM_FEATURE_NEON)) {
88 nregs += 16;
89 if (reg < nregs) {
9a2b5256
RH
90 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
91 q[0] = ldq_le_p(buf);
92 q[1] = ldq_le_p(buf + 8);
56aebc89
PB
93 return 16;
94 }
95 }
96 switch (reg - nregs) {
97 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
b0a909a4 98 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
71b3c3de 99 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
56aebc89
PB
100 }
101 return 0;
102}
103
6a669427
PM
104static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
105{
106 switch (reg) {
107 case 0 ... 31:
108 /* 128 bit FP register */
9a2b5256
RH
109 {
110 uint64_t *q = aa64_vfp_qreg(env, reg);
111 stq_le_p(buf, q[0]);
112 stq_le_p(buf + 8, q[1]);
113 return 16;
114 }
6a669427
PM
115 case 32:
116 /* FPSR */
117 stl_p(buf, vfp_get_fpsr(env));
118 return 4;
119 case 33:
120 /* FPCR */
121 stl_p(buf, vfp_get_fpcr(env));
122 return 4;
123 default:
124 return 0;
125 }
126}
127
128static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
129{
130 switch (reg) {
131 case 0 ... 31:
132 /* 128 bit FP register */
9a2b5256
RH
133 {
134 uint64_t *q = aa64_vfp_qreg(env, reg);
135 q[0] = ldq_le_p(buf);
136 q[1] = ldq_le_p(buf + 8);
137 return 16;
138 }
6a669427
PM
139 case 32:
140 /* FPSR */
141 vfp_set_fpsr(env, ldl_p(buf));
142 return 4;
143 case 33:
144 /* FPCR */
145 vfp_set_fpcr(env, ldl_p(buf));
146 return 4;
147 default:
148 return 0;
149 }
150}
151
c4241c7d 152static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
d4e6df63 153{
375421cc 154 assert(ri->fieldoffset);
67ed771d 155 if (cpreg_field_is_64bit(ri)) {
c4241c7d 156 return CPREG_FIELD64(env, ri);
22d9e1a9 157 } else {
c4241c7d 158 return CPREG_FIELD32(env, ri);
22d9e1a9 159 }
d4e6df63
PM
160}
161
c4241c7d
PM
162static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
163 uint64_t value)
d4e6df63 164{
375421cc 165 assert(ri->fieldoffset);
67ed771d 166 if (cpreg_field_is_64bit(ri)) {
22d9e1a9
PM
167 CPREG_FIELD64(env, ri) = value;
168 } else {
169 CPREG_FIELD32(env, ri) = value;
170 }
d4e6df63
PM
171}
172
11f136ee
FA
173static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
174{
175 return (char *)env + ri->fieldoffset;
176}
177
49a66191 178uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
721fae12 179{
59a1c327 180 /* Raw read of a coprocessor register (as needed for migration, etc). */
721fae12 181 if (ri->type & ARM_CP_CONST) {
59a1c327 182 return ri->resetvalue;
721fae12 183 } else if (ri->raw_readfn) {
59a1c327 184 return ri->raw_readfn(env, ri);
721fae12 185 } else if (ri->readfn) {
59a1c327 186 return ri->readfn(env, ri);
721fae12 187 } else {
59a1c327 188 return raw_read(env, ri);
721fae12 189 }
721fae12
PM
190}
191
59a1c327 192static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
7900e9f1 193 uint64_t v)
721fae12
PM
194{
195 /* Raw write of a coprocessor register (as needed for migration, etc).
721fae12
PM
196 * Note that constant registers are treated as write-ignored; the
197 * caller should check for success by whether a readback gives the
198 * value written.
199 */
200 if (ri->type & ARM_CP_CONST) {
59a1c327 201 return;
721fae12 202 } else if (ri->raw_writefn) {
c4241c7d 203 ri->raw_writefn(env, ri, v);
721fae12 204 } else if (ri->writefn) {
c4241c7d 205 ri->writefn(env, ri, v);
721fae12 206 } else {
afb2530f 207 raw_write(env, ri, v);
721fae12 208 }
721fae12
PM
209}
210
200bf5b7
AB
211static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
212{
2fc0cc0e 213 ARMCPU *cpu = env_archcpu(env);
200bf5b7
AB
214 const ARMCPRegInfo *ri;
215 uint32_t key;
216
217 key = cpu->dyn_xml.cpregs_keys[reg];
218 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
219 if (ri) {
220 if (cpreg_field_is_64bit(ri)) {
221 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
222 } else {
223 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
224 }
225 }
226 return 0;
227}
228
229static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
230{
231 return 0;
232}
233
375421cc
PM
234static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
235{
236 /* Return true if the regdef would cause an assertion if you called
237 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
238 * program bug for it not to have the NO_RAW flag).
239 * NB that returning false here doesn't necessarily mean that calling
240 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
241 * read/write access functions which are safe for raw use" from "has
242 * read/write access functions which have side effects but has forgotten
243 * to provide raw access functions".
244 * The tests here line up with the conditions in read/write_raw_cp_reg()
245 * and assertions in raw_read()/raw_write().
246 */
247 if ((ri->type & ARM_CP_CONST) ||
248 ri->fieldoffset ||
249 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
250 return false;
251 }
252 return true;
253}
254
b698e4ee 255bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
721fae12
PM
256{
257 /* Write the coprocessor state from cpu->env to the (index,value) list. */
258 int i;
259 bool ok = true;
260
261 for (i = 0; i < cpu->cpreg_array_len; i++) {
262 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
263 const ARMCPRegInfo *ri;
b698e4ee 264 uint64_t newval;
59a1c327 265
60322b39 266 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12
PM
267 if (!ri) {
268 ok = false;
269 continue;
270 }
7a0e58fa 271 if (ri->type & ARM_CP_NO_RAW) {
721fae12
PM
272 continue;
273 }
b698e4ee
PM
274
275 newval = read_raw_cp_reg(&cpu->env, ri);
276 if (kvm_sync) {
277 /*
278 * Only sync if the previous list->cpustate sync succeeded.
279 * Rather than tracking the success/failure state for every
280 * item in the list, we just recheck "does the raw write we must
281 * have made in write_list_to_cpustate() read back OK" here.
282 */
283 uint64_t oldval = cpu->cpreg_values[i];
284
285 if (oldval == newval) {
286 continue;
287 }
288
289 write_raw_cp_reg(&cpu->env, ri, oldval);
290 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
291 continue;
292 }
293
294 write_raw_cp_reg(&cpu->env, ri, newval);
295 }
296 cpu->cpreg_values[i] = newval;
721fae12
PM
297 }
298 return ok;
299}
300
301bool write_list_to_cpustate(ARMCPU *cpu)
302{
303 int i;
304 bool ok = true;
305
306 for (i = 0; i < cpu->cpreg_array_len; i++) {
307 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
308 uint64_t v = cpu->cpreg_values[i];
721fae12
PM
309 const ARMCPRegInfo *ri;
310
60322b39 311 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12
PM
312 if (!ri) {
313 ok = false;
314 continue;
315 }
7a0e58fa 316 if (ri->type & ARM_CP_NO_RAW) {
721fae12
PM
317 continue;
318 }
319 /* Write value and confirm it reads back as written
320 * (to catch read-only registers and partially read-only
321 * registers where the incoming migration value doesn't match)
322 */
59a1c327
PM
323 write_raw_cp_reg(&cpu->env, ri, v);
324 if (read_raw_cp_reg(&cpu->env, ri) != v) {
721fae12
PM
325 ok = false;
326 }
327 }
328 return ok;
329}
330
331static void add_cpreg_to_list(gpointer key, gpointer opaque)
332{
333 ARMCPU *cpu = opaque;
334 uint64_t regidx;
335 const ARMCPRegInfo *ri;
336
337 regidx = *(uint32_t *)key;
60322b39 338 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12 339
7a0e58fa 340 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
721fae12
PM
341 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
342 /* The value array need not be initialized at this point */
343 cpu->cpreg_array_len++;
344 }
345}
346
347static void count_cpreg(gpointer key, gpointer opaque)
348{
349 ARMCPU *cpu = opaque;
350 uint64_t regidx;
351 const ARMCPRegInfo *ri;
352
353 regidx = *(uint32_t *)key;
60322b39 354 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12 355
7a0e58fa 356 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
721fae12
PM
357 cpu->cpreg_array_len++;
358 }
359}
360
361static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
362{
cbf239b7
AR
363 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
364 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
721fae12 365
cbf239b7
AR
366 if (aidx > bidx) {
367 return 1;
368 }
369 if (aidx < bidx) {
370 return -1;
371 }
372 return 0;
721fae12
PM
373}
374
375void init_cpreg_list(ARMCPU *cpu)
376{
377 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
378 * Note that we require cpreg_tuples[] to be sorted by key ID.
379 */
57b6d95e 380 GList *keys;
721fae12
PM
381 int arraylen;
382
57b6d95e 383 keys = g_hash_table_get_keys(cpu->cp_regs);
721fae12
PM
384 keys = g_list_sort(keys, cpreg_key_compare);
385
386 cpu->cpreg_array_len = 0;
387
388 g_list_foreach(keys, count_cpreg, cpu);
389
390 arraylen = cpu->cpreg_array_len;
391 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
392 cpu->cpreg_values = g_new(uint64_t, arraylen);
393 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
394 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
395 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
396 cpu->cpreg_array_len = 0;
397
398 g_list_foreach(keys, add_cpreg_to_list, cpu);
399
400 assert(cpu->cpreg_array_len == arraylen);
401
402 g_list_free(keys);
403}
404
68e9c2fe
EI
405/*
406 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
407 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
408 *
409 * access_el3_aa32ns: Used to check AArch32 register views.
410 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
411 */
412static CPAccessResult access_el3_aa32ns(CPUARMState *env,
3f208fd7
PM
413 const ARMCPRegInfo *ri,
414 bool isread)
68e9c2fe
EI
415{
416 bool secure = arm_is_secure_below_el3(env);
417
418 assert(!arm_el_is_aa64(env, 3));
419 if (secure) {
420 return CP_ACCESS_TRAP_UNCATEGORIZED;
421 }
422 return CP_ACCESS_OK;
423}
424
425static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
3f208fd7
PM
426 const ARMCPRegInfo *ri,
427 bool isread)
68e9c2fe
EI
428{
429 if (!arm_el_is_aa64(env, 3)) {
3f208fd7 430 return access_el3_aa32ns(env, ri, isread);
68e9c2fe
EI
431 }
432 return CP_ACCESS_OK;
433}
434
5513c3ab
PM
435/* Some secure-only AArch32 registers trap to EL3 if used from
436 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
437 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
438 * We assume that the .access field is set to PL1_RW.
439 */
440static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
3f208fd7
PM
441 const ARMCPRegInfo *ri,
442 bool isread)
5513c3ab
PM
443{
444 if (arm_current_el(env) == 3) {
445 return CP_ACCESS_OK;
446 }
447 if (arm_is_secure_below_el3(env)) {
448 return CP_ACCESS_TRAP_EL3;
449 }
450 /* This will be EL1 NS and EL2 NS, which just UNDEF */
451 return CP_ACCESS_TRAP_UNCATEGORIZED;
452}
453
187f678d
PM
454/* Check for traps to "powerdown debug" registers, which are controlled
455 * by MDCR.TDOSA
456 */
457static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
458 bool isread)
459{
460 int el = arm_current_el(env);
30ac6339
PM
461 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
462 (env->cp15.mdcr_el2 & MDCR_TDE) ||
7c208e0f 463 (arm_hcr_el2_eff(env) & HCR_TGE);
187f678d 464
30ac6339 465 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
187f678d
PM
466 return CP_ACCESS_TRAP_EL2;
467 }
468 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
469 return CP_ACCESS_TRAP_EL3;
470 }
471 return CP_ACCESS_OK;
472}
473
91b0a238
PM
474/* Check for traps to "debug ROM" registers, which are controlled
475 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
476 */
477static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
478 bool isread)
479{
480 int el = arm_current_el(env);
30ac6339
PM
481 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
482 (env->cp15.mdcr_el2 & MDCR_TDE) ||
7c208e0f 483 (arm_hcr_el2_eff(env) & HCR_TGE);
91b0a238 484
30ac6339 485 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
91b0a238
PM
486 return CP_ACCESS_TRAP_EL2;
487 }
488 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
489 return CP_ACCESS_TRAP_EL3;
490 }
491 return CP_ACCESS_OK;
492}
493
d6c8cf81
PM
494/* Check for traps to general debug registers, which are controlled
495 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
496 */
497static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
498 bool isread)
499{
500 int el = arm_current_el(env);
30ac6339
PM
501 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
502 (env->cp15.mdcr_el2 & MDCR_TDE) ||
7c208e0f 503 (arm_hcr_el2_eff(env) & HCR_TGE);
d6c8cf81 504
30ac6339 505 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
d6c8cf81
PM
506 return CP_ACCESS_TRAP_EL2;
507 }
508 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
509 return CP_ACCESS_TRAP_EL3;
510 }
511 return CP_ACCESS_OK;
512}
513
1fce1ba9
PM
514/* Check for traps to performance monitor registers, which are controlled
515 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
516 */
517static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
518 bool isread)
519{
520 int el = arm_current_el(env);
521
522 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
523 && !arm_is_secure_below_el3(env)) {
524 return CP_ACCESS_TRAP_EL2;
525 }
526 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
527 return CP_ACCESS_TRAP_EL3;
528 }
529 return CP_ACCESS_OK;
530}
531
c4241c7d 532static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
c983fe6c 533{
2fc0cc0e 534 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 535
8d5c773e 536 raw_write(env, ri, value);
d10eb08f 537 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
c983fe6c
PM
538}
539
c4241c7d 540static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
08de207b 541{
2fc0cc0e 542 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 543
8d5c773e 544 if (raw_read(env, ri) != value) {
08de207b
PM
545 /* Unlike real hardware the qemu TLB uses virtual addresses,
546 * not modified virtual addresses, so this causes a TLB flush.
547 */
d10eb08f 548 tlb_flush(CPU(cpu));
8d5c773e 549 raw_write(env, ri, value);
08de207b 550 }
08de207b 551}
c4241c7d
PM
552
553static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
554 uint64_t value)
08de207b 555{
2fc0cc0e 556 ARMCPU *cpu = env_archcpu(env);
00c8cb0a 557
452a0955 558 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
014406b5 559 && !extended_addresses_enabled(env)) {
08de207b
PM
560 /* For VMSA (when not using the LPAE long descriptor page table
561 * format) this register includes the ASID, so do a TLB flush.
562 * For PMSA it is purely a process ID and no action is needed.
563 */
d10eb08f 564 tlb_flush(CPU(cpu));
08de207b 565 }
8d5c773e 566 raw_write(env, ri, value);
08de207b
PM
567}
568
b4ab8ce9
PM
569/* IS variants of TLB operations must affect all cores */
570static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
571 uint64_t value)
572{
29a0af61 573 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
574
575 tlb_flush_all_cpus_synced(cs);
576}
577
578static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
579 uint64_t value)
580{
29a0af61 581 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
582
583 tlb_flush_all_cpus_synced(cs);
584}
585
586static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
587 uint64_t value)
588{
29a0af61 589 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
590
591 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
592}
593
594static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
595 uint64_t value)
596{
29a0af61 597 CPUState *cs = env_cpu(env);
b4ab8ce9
PM
598
599 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
600}
601
602/*
603 * Non-IS variants of TLB operations are upgraded to
604 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
605 * force broadcast of these operations.
606 */
607static bool tlb_force_broadcast(CPUARMState *env)
608{
609 return (env->cp15.hcr_el2 & HCR_FB) &&
610 arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
611}
612
c4241c7d
PM
613static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
614 uint64_t value)
d929823f
PM
615{
616 /* Invalidate all (TLBIALL) */
527db2be 617 CPUState *cs = env_cpu(env);
00c8cb0a 618
b4ab8ce9 619 if (tlb_force_broadcast(env)) {
527db2be
RH
620 tlb_flush_all_cpus_synced(cs);
621 } else {
622 tlb_flush(cs);
b4ab8ce9 623 }
d929823f
PM
624}
625
c4241c7d
PM
626static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
627 uint64_t value)
d929823f
PM
628{
629 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
527db2be 630 CPUState *cs = env_cpu(env);
31b030d4 631
527db2be 632 value &= TARGET_PAGE_MASK;
b4ab8ce9 633 if (tlb_force_broadcast(env)) {
527db2be
RH
634 tlb_flush_page_all_cpus_synced(cs, value);
635 } else {
636 tlb_flush_page(cs, value);
b4ab8ce9 637 }
d929823f
PM
638}
639
c4241c7d
PM
640static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
641 uint64_t value)
d929823f
PM
642{
643 /* Invalidate by ASID (TLBIASID) */
527db2be 644 CPUState *cs = env_cpu(env);
00c8cb0a 645
b4ab8ce9 646 if (tlb_force_broadcast(env)) {
527db2be
RH
647 tlb_flush_all_cpus_synced(cs);
648 } else {
649 tlb_flush(cs);
b4ab8ce9 650 }
d929823f
PM
651}
652
c4241c7d
PM
653static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
654 uint64_t value)
d929823f
PM
655{
656 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
527db2be 657 CPUState *cs = env_cpu(env);
31b030d4 658
527db2be 659 value &= TARGET_PAGE_MASK;
b4ab8ce9 660 if (tlb_force_broadcast(env)) {
527db2be
RH
661 tlb_flush_page_all_cpus_synced(cs, value);
662 } else {
663 tlb_flush_page(cs, value);
b4ab8ce9 664 }
fa439fc5
PM
665}
666
541ef8c2
SS
667static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
668 uint64_t value)
669{
29a0af61 670 CPUState *cs = env_cpu(env);
541ef8c2 671
0336cbf8 672 tlb_flush_by_mmuidx(cs,
8bd5c820
PM
673 ARMMMUIdxBit_S12NSE1 |
674 ARMMMUIdxBit_S12NSE0 |
675 ARMMMUIdxBit_S2NS);
541ef8c2
SS
676}
677
678static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
679 uint64_t value)
680{
29a0af61 681 CPUState *cs = env_cpu(env);
541ef8c2 682
a67cf277 683 tlb_flush_by_mmuidx_all_cpus_synced(cs,
8bd5c820
PM
684 ARMMMUIdxBit_S12NSE1 |
685 ARMMMUIdxBit_S12NSE0 |
686 ARMMMUIdxBit_S2NS);
541ef8c2
SS
687}
688
689static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
690 uint64_t value)
691{
692 /* Invalidate by IPA. This has to invalidate any structures that
693 * contain only stage 2 translation information, but does not need
694 * to apply to structures that contain combined stage 1 and stage 2
695 * translation information.
696 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
697 */
29a0af61 698 CPUState *cs = env_cpu(env);
541ef8c2
SS
699 uint64_t pageaddr;
700
701 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
702 return;
703 }
704
705 pageaddr = sextract64(value << 12, 0, 40);
706
8bd5c820 707 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
541ef8c2
SS
708}
709
710static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
711 uint64_t value)
712{
29a0af61 713 CPUState *cs = env_cpu(env);
541ef8c2
SS
714 uint64_t pageaddr;
715
716 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
717 return;
718 }
719
720 pageaddr = sextract64(value << 12, 0, 40);
721
a67cf277 722 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
8bd5c820 723 ARMMMUIdxBit_S2NS);
541ef8c2
SS
724}
725
726static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
727 uint64_t value)
728{
29a0af61 729 CPUState *cs = env_cpu(env);
541ef8c2 730
8bd5c820 731 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
541ef8c2
SS
732}
733
734static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
735 uint64_t value)
736{
29a0af61 737 CPUState *cs = env_cpu(env);
541ef8c2 738
8bd5c820 739 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
541ef8c2
SS
740}
741
742static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
743 uint64_t value)
744{
29a0af61 745 CPUState *cs = env_cpu(env);
541ef8c2
SS
746 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
747
8bd5c820 748 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
541ef8c2
SS
749}
750
751static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
752 uint64_t value)
753{
29a0af61 754 CPUState *cs = env_cpu(env);
541ef8c2
SS
755 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
756
a67cf277 757 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
8bd5c820 758 ARMMMUIdxBit_S1E2);
541ef8c2
SS
759}
760
e9aa6c21 761static const ARMCPRegInfo cp_reginfo[] = {
54bf36ed
FA
762 /* Define the secure and non-secure FCSE identifier CP registers
763 * separately because there is no secure bank in V8 (no _EL3). This allows
764 * the secure register to be properly reset and migrated. There is also no
765 * v8 EL1 version of the register so the non-secure instance stands alone.
766 */
9c513e78 767 { .name = "FCSEIDR",
54bf36ed
FA
768 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
769 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
770 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
771 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
9c513e78 772 { .name = "FCSEIDR_S",
54bf36ed
FA
773 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
774 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
775 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
d4e6df63 776 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
54bf36ed
FA
777 /* Define the secure and non-secure context identifier CP registers
778 * separately because there is no secure bank in V8 (no _EL3). This allows
779 * the secure register to be properly reset and migrated. In the
780 * non-secure case, the 32-bit register will have reset and migration
781 * disabled during registration as it is handled by the 64-bit instance.
782 */
783 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
014406b5 784 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
54bf36ed
FA
785 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
786 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
787 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
9c513e78 788 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
54bf36ed
FA
789 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
790 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
791 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
d4e6df63 792 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
9449fdf6
PM
793 REGINFO_SENTINEL
794};
795
796static const ARMCPRegInfo not_v8_cp_reginfo[] = {
797 /* NB: Some of these registers exist in v8 but with more precise
798 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
799 */
800 /* MMU Domain access control / MPU write buffer control */
0c17d68c
FA
801 { .name = "DACR",
802 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
803 .access = PL1_RW, .resetvalue = 0,
804 .writefn = dacr_write, .raw_writefn = raw_write,
805 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
806 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
a903c449
EI
807 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
808 * For v6 and v5, these mappings are overly broad.
4fdd17dd 809 */
a903c449
EI
810 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
811 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
812 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
813 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
814 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
815 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
816 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
4fdd17dd 817 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
c4804214
PM
818 /* Cache maintenance ops; some of this space may be overridden later. */
819 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
820 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
821 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
e9aa6c21
PM
822 REGINFO_SENTINEL
823};
824
7d57f408
PM
825static const ARMCPRegInfo not_v6_cp_reginfo[] = {
826 /* Not all pre-v6 cores implemented this WFI, so this is slightly
827 * over-broad.
828 */
829 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
830 .access = PL1_W, .type = ARM_CP_WFI },
831 REGINFO_SENTINEL
832};
833
834static const ARMCPRegInfo not_v7_cp_reginfo[] = {
835 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
836 * is UNPREDICTABLE; we choose to NOP as most implementations do).
837 */
838 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
839 .access = PL1_W, .type = ARM_CP_WFI },
34f90529
PM
840 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
841 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
842 * OMAPCP will override this space.
843 */
844 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
845 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
846 .resetvalue = 0 },
847 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
848 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
849 .resetvalue = 0 },
776d4e5c
PM
850 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
851 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
7a0e58fa 852 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 853 .resetvalue = 0 },
50300698
PM
854 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
855 * implementing it as RAZ means the "debug architecture version" bits
856 * will read as a reserved value, which should cause Linux to not try
857 * to use the debug hardware.
858 */
859 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
860 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
995939a6
PM
861 /* MMU TLB control. Note that the wildcarding means we cover not just
862 * the unified TLB ops but also the dside/iside/inner-shareable variants.
863 */
864 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
865 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
7a0e58fa 866 .type = ARM_CP_NO_RAW },
995939a6
PM
867 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
868 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
7a0e58fa 869 .type = ARM_CP_NO_RAW },
995939a6
PM
870 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
871 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
7a0e58fa 872 .type = ARM_CP_NO_RAW },
995939a6
PM
873 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
874 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
7a0e58fa 875 .type = ARM_CP_NO_RAW },
a903c449
EI
876 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
877 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
878 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
879 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
7d57f408
PM
880 REGINFO_SENTINEL
881};
882
c4241c7d
PM
883static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
884 uint64_t value)
2771db27 885{
f0aff255
FA
886 uint32_t mask = 0;
887
888 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
889 if (!arm_feature(env, ARM_FEATURE_V8)) {
890 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
891 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
892 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
893 */
894 if (arm_feature(env, ARM_FEATURE_VFP)) {
895 /* VFP coprocessor: cp10 & cp11 [23:20] */
896 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
897
898 if (!arm_feature(env, ARM_FEATURE_NEON)) {
899 /* ASEDIS [31] bit is RAO/WI */
900 value |= (1 << 31);
901 }
902
903 /* VFPv3 and upwards with NEON implement 32 double precision
904 * registers (D0-D31).
905 */
906 if (!arm_feature(env, ARM_FEATURE_NEON) ||
907 !arm_feature(env, ARM_FEATURE_VFP3)) {
908 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
909 value |= (1 << 30);
910 }
911 }
912 value &= mask;
2771db27 913 }
fc1120a7
PM
914
915 /*
916 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
917 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
918 */
919 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
920 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
921 value &= ~(0xf << 20);
922 value |= env->cp15.cpacr_el1 & (0xf << 20);
923 }
924
7ebd5f2e 925 env->cp15.cpacr_el1 = value;
2771db27
PM
926}
927
fc1120a7
PM
928static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
929{
930 /*
931 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
932 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
933 */
934 uint64_t value = env->cp15.cpacr_el1;
935
936 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
937 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
938 value &= ~(0xf << 20);
939 }
940 return value;
941}
942
943
5deac39c
PM
944static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
945{
946 /* Call cpacr_write() so that we reset with the correct RAO bits set
947 * for our CPU features.
948 */
949 cpacr_write(env, ri, 0);
950}
951
3f208fd7
PM
952static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
953 bool isread)
c6f19164
GB
954{
955 if (arm_feature(env, ARM_FEATURE_V8)) {
956 /* Check if CPACR accesses are to be trapped to EL2 */
957 if (arm_current_el(env) == 1 &&
958 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
959 return CP_ACCESS_TRAP_EL2;
960 /* Check if CPACR accesses are to be trapped to EL3 */
961 } else if (arm_current_el(env) < 3 &&
962 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
963 return CP_ACCESS_TRAP_EL3;
964 }
965 }
966
967 return CP_ACCESS_OK;
968}
969
3f208fd7
PM
970static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
971 bool isread)
c6f19164
GB
972{
973 /* Check if CPTR accesses are set to trap to EL3 */
974 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
975 return CP_ACCESS_TRAP_EL3;
976 }
977
978 return CP_ACCESS_OK;
979}
980
7d57f408
PM
981static const ARMCPRegInfo v6_cp_reginfo[] = {
982 /* prefetch by MVA in v6, NOP in v7 */
983 { .name = "MVA_prefetch",
984 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
985 .access = PL1_W, .type = ARM_CP_NOP },
6df99dec
SS
986 /* We need to break the TB after ISB to execute self-modifying code
987 * correctly and also to take any pending interrupts immediately.
988 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
989 */
7d57f408 990 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
6df99dec 991 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
091fd17c 992 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
7d57f408 993 .access = PL0_W, .type = ARM_CP_NOP },
091fd17c 994 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
7d57f408 995 .access = PL0_W, .type = ARM_CP_NOP },
06d76f31 996 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
6cd8a264 997 .access = PL1_RW,
b848ce2b
FA
998 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
999 offsetof(CPUARMState, cp15.ifar_ns) },
06d76f31
PM
1000 .resetvalue = 0, },
1001 /* Watchpoint Fault Address Register : should actually only be present
1002 * for 1136, 1176, 11MPCore.
1003 */
1004 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1005 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
34222fb8 1006 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
c6f19164 1007 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
7ebd5f2e 1008 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
fc1120a7 1009 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
7d57f408
PM
1010 REGINFO_SENTINEL
1011};
1012
7ece99b1
AL
1013/* Definitions for the PMU registers */
1014#define PMCRN_MASK 0xf800
1015#define PMCRN_SHIFT 11
f4efb4b2 1016#define PMCRLC 0x40
033614c4 1017#define PMCRDP 0x10
7ece99b1
AL
1018#define PMCRD 0x8
1019#define PMCRC 0x4
5ecdd3e4 1020#define PMCRP 0x2
7ece99b1
AL
1021#define PMCRE 0x1
1022
033614c4
AL
1023#define PMXEVTYPER_P 0x80000000
1024#define PMXEVTYPER_U 0x40000000
1025#define PMXEVTYPER_NSK 0x20000000
1026#define PMXEVTYPER_NSU 0x10000000
1027#define PMXEVTYPER_NSH 0x08000000
1028#define PMXEVTYPER_M 0x04000000
1029#define PMXEVTYPER_MT 0x02000000
1030#define PMXEVTYPER_EVTCOUNT 0x0000ffff
1031#define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1032 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1033 PMXEVTYPER_M | PMXEVTYPER_MT | \
1034 PMXEVTYPER_EVTCOUNT)
1035
4b8afa1f
AL
1036#define PMCCFILTR 0xf8000000
1037#define PMCCFILTR_M PMXEVTYPER_M
1038#define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1039
7ece99b1
AL
1040static inline uint32_t pmu_num_counters(CPUARMState *env)
1041{
1042 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1043}
1044
1045/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1046static inline uint64_t pmu_counter_mask(CPUARMState *env)
1047{
1048 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1049}
1050
57a4a11b
AL
1051typedef struct pm_event {
1052 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1053 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1054 bool (*supported)(CPUARMState *);
1055 /*
1056 * Retrieve the current count of the underlying event. The programmed
1057 * counters hold a difference from the return value from this function
1058 */
1059 uint64_t (*get_count)(CPUARMState *);
4e7beb0c
AL
1060 /*
1061 * Return how many nanoseconds it will take (at a minimum) for count events
1062 * to occur. A negative value indicates the counter will never overflow, or
1063 * that the counter has otherwise arranged for the overflow bit to be set
1064 * and the PMU interrupt to be raised on overflow.
1065 */
1066 int64_t (*ns_per_count)(uint64_t);
57a4a11b
AL
1067} pm_event;
1068
b2e23725
AL
1069static bool event_always_supported(CPUARMState *env)
1070{
1071 return true;
1072}
1073
0d4bfd7d
AL
1074static uint64_t swinc_get_count(CPUARMState *env)
1075{
1076 /*
1077 * SW_INCR events are written directly to the pmevcntr's by writes to
1078 * PMSWINC, so there is no underlying count maintained by the PMU itself
1079 */
1080 return 0;
1081}
1082
4e7beb0c
AL
1083static int64_t swinc_ns_per(uint64_t ignored)
1084{
1085 return -1;
1086}
1087
b2e23725
AL
1088/*
1089 * Return the underlying cycle count for the PMU cycle counters. If we're in
1090 * usermode, simply return 0.
1091 */
1092static uint64_t cycles_get_count(CPUARMState *env)
1093{
1094#ifndef CONFIG_USER_ONLY
1095 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1096 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1097#else
1098 return cpu_get_host_ticks();
1099#endif
1100}
1101
1102#ifndef CONFIG_USER_ONLY
4e7beb0c
AL
1103static int64_t cycles_ns_per(uint64_t cycles)
1104{
1105 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1106}
1107
b2e23725
AL
1108static bool instructions_supported(CPUARMState *env)
1109{
1110 return use_icount == 1 /* Precise instruction counting */;
1111}
1112
1113static uint64_t instructions_get_count(CPUARMState *env)
1114{
1115 return (uint64_t)cpu_get_icount_raw();
1116}
4e7beb0c
AL
1117
1118static int64_t instructions_ns_per(uint64_t icount)
1119{
1120 return cpu_icount_to_ns((int64_t)icount);
1121}
b2e23725
AL
1122#endif
1123
57a4a11b 1124static const pm_event pm_events[] = {
0d4bfd7d
AL
1125 { .number = 0x000, /* SW_INCR */
1126 .supported = event_always_supported,
1127 .get_count = swinc_get_count,
4e7beb0c 1128 .ns_per_count = swinc_ns_per,
0d4bfd7d 1129 },
b2e23725
AL
1130#ifndef CONFIG_USER_ONLY
1131 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1132 .supported = instructions_supported,
1133 .get_count = instructions_get_count,
4e7beb0c 1134 .ns_per_count = instructions_ns_per,
b2e23725
AL
1135 },
1136 { .number = 0x011, /* CPU_CYCLES, Cycle */
1137 .supported = event_always_supported,
1138 .get_count = cycles_get_count,
4e7beb0c 1139 .ns_per_count = cycles_ns_per,
b2e23725
AL
1140 }
1141#endif
57a4a11b
AL
1142};
1143
1144/*
1145 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1146 * events (i.e. the statistical profiling extension), this implementation
1147 * should first be updated to something sparse instead of the current
1148 * supported_event_map[] array.
1149 */
b2e23725 1150#define MAX_EVENT_ID 0x11
57a4a11b
AL
1151#define UNSUPPORTED_EVENT UINT16_MAX
1152static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1153
1154/*
bf8d0969
AL
1155 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1156 * of ARM event numbers to indices in our pm_events array.
57a4a11b
AL
1157 *
1158 * Note: Events in the 0x40XX range are not currently supported.
1159 */
bf8d0969 1160void pmu_init(ARMCPU *cpu)
57a4a11b 1161{
57a4a11b
AL
1162 unsigned int i;
1163
bf8d0969
AL
1164 /*
1165 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1166 * events to them
1167 */
57a4a11b
AL
1168 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1169 supported_event_map[i] = UNSUPPORTED_EVENT;
1170 }
bf8d0969
AL
1171 cpu->pmceid0 = 0;
1172 cpu->pmceid1 = 0;
57a4a11b
AL
1173
1174 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1175 const pm_event *cnt = &pm_events[i];
1176 assert(cnt->number <= MAX_EVENT_ID);
1177 /* We do not currently support events in the 0x40xx range */
1178 assert(cnt->number <= 0x3f);
1179
bf8d0969 1180 if (cnt->supported(&cpu->env)) {
57a4a11b 1181 supported_event_map[cnt->number] = i;
67da43d6 1182 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
bf8d0969
AL
1183 if (cnt->number & 0x20) {
1184 cpu->pmceid1 |= event_mask;
1185 } else {
1186 cpu->pmceid0 |= event_mask;
1187 }
57a4a11b
AL
1188 }
1189 }
57a4a11b
AL
1190}
1191
5ecdd3e4
AL
1192/*
1193 * Check at runtime whether a PMU event is supported for the current machine
1194 */
1195static bool event_supported(uint16_t number)
1196{
1197 if (number > MAX_EVENT_ID) {
1198 return false;
1199 }
1200 return supported_event_map[number] != UNSUPPORTED_EVENT;
1201}
1202
3f208fd7
PM
1203static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1204 bool isread)
200ac0ef 1205{
3b163b01 1206 /* Performance monitor registers user accessibility is controlled
1fce1ba9
PM
1207 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1208 * trapping to EL2 or EL3 for other accesses.
200ac0ef 1209 */
1fce1ba9
PM
1210 int el = arm_current_el(env);
1211
6ecd0b6b 1212 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
fcd25206 1213 return CP_ACCESS_TRAP;
200ac0ef 1214 }
1fce1ba9
PM
1215 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1216 && !arm_is_secure_below_el3(env)) {
1217 return CP_ACCESS_TRAP_EL2;
1218 }
1219 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1220 return CP_ACCESS_TRAP_EL3;
1221 }
1222
fcd25206 1223 return CP_ACCESS_OK;
200ac0ef
PM
1224}
1225
6ecd0b6b
AB
1226static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1227 const ARMCPRegInfo *ri,
1228 bool isread)
1229{
1230 /* ER: event counter read trap control */
1231 if (arm_feature(env, ARM_FEATURE_V8)
1232 && arm_current_el(env) == 0
1233 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1234 && isread) {
1235 return CP_ACCESS_OK;
1236 }
1237
1238 return pmreg_access(env, ri, isread);
1239}
1240
1241static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1242 const ARMCPRegInfo *ri,
1243 bool isread)
1244{
1245 /* SW: software increment write trap control */
1246 if (arm_feature(env, ARM_FEATURE_V8)
1247 && arm_current_el(env) == 0
1248 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1249 && !isread) {
1250 return CP_ACCESS_OK;
1251 }
1252
1253 return pmreg_access(env, ri, isread);
1254}
1255
6ecd0b6b
AB
1256static CPAccessResult pmreg_access_selr(CPUARMState *env,
1257 const ARMCPRegInfo *ri,
1258 bool isread)
1259{
1260 /* ER: event counter read trap control */
1261 if (arm_feature(env, ARM_FEATURE_V8)
1262 && arm_current_el(env) == 0
1263 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1264 return CP_ACCESS_OK;
1265 }
1266
1267 return pmreg_access(env, ri, isread);
1268}
1269
1270static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1271 const ARMCPRegInfo *ri,
1272 bool isread)
1273{
1274 /* CR: cycle counter read trap control */
1275 if (arm_feature(env, ARM_FEATURE_V8)
1276 && arm_current_el(env) == 0
1277 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1278 && isread) {
1279 return CP_ACCESS_OK;
1280 }
1281
1282 return pmreg_access(env, ri, isread);
1283}
1284
033614c4
AL
1285/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1286 * the current EL, security state, and register configuration.
1287 */
1288static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
87124fde 1289{
033614c4
AL
1290 uint64_t filter;
1291 bool e, p, u, nsk, nsu, nsh, m;
1292 bool enabled, prohibited, filtered;
1293 bool secure = arm_is_secure(env);
1294 int el = arm_current_el(env);
1295 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
87124fde 1296
cbbb3041
AJ
1297 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1298 return false;
1299 }
1300
033614c4
AL
1301 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1302 (counter < hpmn || counter == 31)) {
1303 e = env->cp15.c9_pmcr & PMCRE;
1304 } else {
1305 e = env->cp15.mdcr_el2 & MDCR_HPME;
87124fde 1306 }
033614c4 1307 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
87124fde 1308
033614c4
AL
1309 if (!secure) {
1310 if (el == 2 && (counter < hpmn || counter == 31)) {
1311 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1312 } else {
1313 prohibited = false;
1314 }
1315 } else {
1316 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1317 (env->cp15.mdcr_el3 & MDCR_SPME);
1318 }
1319
1320 if (prohibited && counter == 31) {
1321 prohibited = env->cp15.c9_pmcr & PMCRDP;
1322 }
1323
5ecdd3e4
AL
1324 if (counter == 31) {
1325 filter = env->cp15.pmccfiltr_el0;
1326 } else {
1327 filter = env->cp15.c14_pmevtyper[counter];
1328 }
033614c4
AL
1329
1330 p = filter & PMXEVTYPER_P;
1331 u = filter & PMXEVTYPER_U;
1332 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1333 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1334 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1335 m = arm_el_is_aa64(env, 1) &&
1336 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1337
1338 if (el == 0) {
1339 filtered = secure ? u : u != nsu;
1340 } else if (el == 1) {
1341 filtered = secure ? p : p != nsk;
1342 } else if (el == 2) {
1343 filtered = !nsh;
1344 } else { /* EL3 */
1345 filtered = m != p;
1346 }
1347
5ecdd3e4
AL
1348 if (counter != 31) {
1349 /*
1350 * If not checking PMCCNTR, ensure the counter is setup to an event we
1351 * support
1352 */
1353 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1354 if (!event_supported(event)) {
1355 return false;
1356 }
1357 }
1358
033614c4 1359 return enabled && !prohibited && !filtered;
87124fde 1360}
033614c4 1361
f4efb4b2
AL
1362static void pmu_update_irq(CPUARMState *env)
1363{
2fc0cc0e 1364 ARMCPU *cpu = env_archcpu(env);
f4efb4b2
AL
1365 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1366 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1367}
1368
5d05b9d4
AL
1369/*
1370 * Ensure c15_ccnt is the guest-visible count so that operations such as
1371 * enabling/disabling the counter or filtering, modifying the count itself,
1372 * etc. can be done logically. This is essentially a no-op if the counter is
1373 * not enabled at the time of the call.
1374 */
f2b2f53f 1375static void pmccntr_op_start(CPUARMState *env)
ec7b4ce4 1376{
b2e23725 1377 uint64_t cycles = cycles_get_count(env);
ec7b4ce4 1378
033614c4 1379 if (pmu_counter_enabled(env, 31)) {
5d05b9d4
AL
1380 uint64_t eff_cycles = cycles;
1381 if (env->cp15.c9_pmcr & PMCRD) {
1382 /* Increment once every 64 processor clock cycles */
1383 eff_cycles /= 64;
1384 }
1385
f4efb4b2
AL
1386 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1387
1388 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1389 1ull << 63 : 1ull << 31;
1390 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1391 env->cp15.c9_pmovsr |= (1 << 31);
1392 pmu_update_irq(env);
1393 }
1394
1395 env->cp15.c15_ccnt = new_pmccntr;
ec7b4ce4 1396 }
5d05b9d4
AL
1397 env->cp15.c15_ccnt_delta = cycles;
1398}
ec7b4ce4 1399
5d05b9d4
AL
1400/*
1401 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1402 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1403 * pmccntr_op_start.
1404 */
f2b2f53f 1405static void pmccntr_op_finish(CPUARMState *env)
5d05b9d4 1406{
033614c4 1407 if (pmu_counter_enabled(env, 31)) {
4e7beb0c
AL
1408#ifndef CONFIG_USER_ONLY
1409 /* Calculate when the counter will next overflow */
1410 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1411 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1412 remaining_cycles = (uint32_t)remaining_cycles;
1413 }
1414 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1415
1416 if (overflow_in > 0) {
1417 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1418 overflow_in;
2fc0cc0e 1419 ARMCPU *cpu = env_archcpu(env);
4e7beb0c
AL
1420 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1421 }
1422#endif
5d05b9d4 1423
4e7beb0c 1424 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
5d05b9d4
AL
1425 if (env->cp15.c9_pmcr & PMCRD) {
1426 /* Increment once every 64 processor clock cycles */
1427 prev_cycles /= 64;
1428 }
5d05b9d4 1429 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
ec7b4ce4
AF
1430 }
1431}
1432
5ecdd3e4
AL
1433static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1434{
1435
1436 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1437 uint64_t count = 0;
1438 if (event_supported(event)) {
1439 uint16_t event_idx = supported_event_map[event];
1440 count = pm_events[event_idx].get_count(env);
1441 }
1442
1443 if (pmu_counter_enabled(env, counter)) {
f4efb4b2
AL
1444 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1445
1446 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1447 env->cp15.c9_pmovsr |= (1 << counter);
1448 pmu_update_irq(env);
1449 }
1450 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
5ecdd3e4
AL
1451 }
1452 env->cp15.c14_pmevcntr_delta[counter] = count;
1453}
1454
1455static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1456{
1457 if (pmu_counter_enabled(env, counter)) {
4e7beb0c
AL
1458#ifndef CONFIG_USER_ONLY
1459 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1460 uint16_t event_idx = supported_event_map[event];
1461 uint64_t delta = UINT32_MAX -
1462 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1463 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1464
1465 if (overflow_in > 0) {
1466 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1467 overflow_in;
2fc0cc0e 1468 ARMCPU *cpu = env_archcpu(env);
4e7beb0c
AL
1469 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1470 }
1471#endif
1472
5ecdd3e4
AL
1473 env->cp15.c14_pmevcntr_delta[counter] -=
1474 env->cp15.c14_pmevcntr[counter];
1475 }
1476}
1477
5d05b9d4
AL
1478void pmu_op_start(CPUARMState *env)
1479{
5ecdd3e4 1480 unsigned int i;
5d05b9d4 1481 pmccntr_op_start(env);
5ecdd3e4
AL
1482 for (i = 0; i < pmu_num_counters(env); i++) {
1483 pmevcntr_op_start(env, i);
1484 }
5d05b9d4
AL
1485}
1486
1487void pmu_op_finish(CPUARMState *env)
1488{
5ecdd3e4 1489 unsigned int i;
5d05b9d4 1490 pmccntr_op_finish(env);
5ecdd3e4
AL
1491 for (i = 0; i < pmu_num_counters(env); i++) {
1492 pmevcntr_op_finish(env, i);
1493 }
5d05b9d4
AL
1494}
1495
033614c4
AL
1496void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1497{
1498 pmu_op_start(&cpu->env);
1499}
1500
1501void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1502{
1503 pmu_op_finish(&cpu->env);
1504}
1505
4e7beb0c
AL
1506void arm_pmu_timer_cb(void *opaque)
1507{
1508 ARMCPU *cpu = opaque;
1509
1510 /*
1511 * Update all the counter values based on the current underlying counts,
1512 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1513 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1514 * counter may expire.
1515 */
1516 pmu_op_start(&cpu->env);
1517 pmu_op_finish(&cpu->env);
1518}
1519
c4241c7d
PM
1520static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1521 uint64_t value)
200ac0ef 1522{
5d05b9d4 1523 pmu_op_start(env);
7c2cb42b
AF
1524
1525 if (value & PMCRC) {
1526 /* The counter has been reset */
1527 env->cp15.c15_ccnt = 0;
1528 }
1529
5ecdd3e4
AL
1530 if (value & PMCRP) {
1531 unsigned int i;
1532 for (i = 0; i < pmu_num_counters(env); i++) {
1533 env->cp15.c14_pmevcntr[i] = 0;
1534 }
1535 }
1536
200ac0ef
PM
1537 /* only the DP, X, D and E bits are writable */
1538 env->cp15.c9_pmcr &= ~0x39;
1539 env->cp15.c9_pmcr |= (value & 0x39);
7c2cb42b 1540
5d05b9d4 1541 pmu_op_finish(env);
7c2cb42b
AF
1542}
1543
0d4bfd7d
AL
1544static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1545 uint64_t value)
1546{
1547 unsigned int i;
1548 for (i = 0; i < pmu_num_counters(env); i++) {
1549 /* Increment a counter's count iff: */
1550 if ((value & (1 << i)) && /* counter's bit is set */
1551 /* counter is enabled and not filtered */
1552 pmu_counter_enabled(env, i) &&
1553 /* counter is SW_INCR */
1554 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1555 pmevcntr_op_start(env, i);
f4efb4b2
AL
1556
1557 /*
1558 * Detect if this write causes an overflow since we can't predict
1559 * PMSWINC overflows like we can for other events
1560 */
1561 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1562
1563 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1564 env->cp15.c9_pmovsr |= (1 << i);
1565 pmu_update_irq(env);
1566 }
1567
1568 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1569
0d4bfd7d
AL
1570 pmevcntr_op_finish(env, i);
1571 }
1572 }
1573}
1574
7c2cb42b
AF
1575static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1576{
5d05b9d4
AL
1577 uint64_t ret;
1578 pmccntr_op_start(env);
1579 ret = env->cp15.c15_ccnt;
1580 pmccntr_op_finish(env);
1581 return ret;
7c2cb42b
AF
1582}
1583
6b040780
WH
1584static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1585 uint64_t value)
1586{
1587 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1588 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1589 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1590 * accessed.
1591 */
1592 env->cp15.c9_pmselr = value & 0x1f;
1593}
1594
7c2cb42b
AF
1595static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1596 uint64_t value)
1597{
5d05b9d4
AL
1598 pmccntr_op_start(env);
1599 env->cp15.c15_ccnt = value;
1600 pmccntr_op_finish(env);
200ac0ef 1601}
421c7ebd
PC
1602
1603static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1604 uint64_t value)
1605{
1606 uint64_t cur_val = pmccntr_read(env, NULL);
1607
1608 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1609}
1610
0614601c
AF
1611static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1612 uint64_t value)
1613{
5d05b9d4 1614 pmccntr_op_start(env);
4b8afa1f
AL
1615 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1616 pmccntr_op_finish(env);
1617}
1618
1619static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1620 uint64_t value)
1621{
1622 pmccntr_op_start(env);
1623 /* M is not accessible from AArch32 */
1624 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1625 (value & PMCCFILTR);
5d05b9d4 1626 pmccntr_op_finish(env);
0614601c
AF
1627}
1628
4b8afa1f
AL
1629static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1630{
1631 /* M is not visible in AArch32 */
1632 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1633}
1634
c4241c7d 1635static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
200ac0ef
PM
1636 uint64_t value)
1637{
7ece99b1 1638 value &= pmu_counter_mask(env);
200ac0ef 1639 env->cp15.c9_pmcnten |= value;
200ac0ef
PM
1640}
1641
c4241c7d
PM
1642static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1643 uint64_t value)
200ac0ef 1644{
7ece99b1 1645 value &= pmu_counter_mask(env);
200ac0ef 1646 env->cp15.c9_pmcnten &= ~value;
200ac0ef
PM
1647}
1648
c4241c7d
PM
1649static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1650 uint64_t value)
200ac0ef 1651{
599b71e2 1652 value &= pmu_counter_mask(env);
200ac0ef 1653 env->cp15.c9_pmovsr &= ~value;
f4efb4b2 1654 pmu_update_irq(env);
200ac0ef
PM
1655}
1656
327dd510
AL
1657static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1658 uint64_t value)
1659{
1660 value &= pmu_counter_mask(env);
1661 env->cp15.c9_pmovsr |= value;
f4efb4b2 1662 pmu_update_irq(env);
327dd510
AL
1663}
1664
5ecdd3e4
AL
1665static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1666 uint64_t value, const uint8_t counter)
200ac0ef 1667{
5ecdd3e4
AL
1668 if (counter == 31) {
1669 pmccfiltr_write(env, ri, value);
1670 } else if (counter < pmu_num_counters(env)) {
1671 pmevcntr_op_start(env, counter);
1672
1673 /*
1674 * If this counter's event type is changing, store the current
1675 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1676 * pmevcntr_op_finish has the correct baseline when it converts back to
1677 * a delta.
1678 */
1679 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1680 PMXEVTYPER_EVTCOUNT;
1681 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1682 if (old_event != new_event) {
1683 uint64_t count = 0;
1684 if (event_supported(new_event)) {
1685 uint16_t event_idx = supported_event_map[new_event];
1686 count = pm_events[event_idx].get_count(env);
1687 }
1688 env->cp15.c14_pmevcntr_delta[counter] = count;
1689 }
1690
1691 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1692 pmevcntr_op_finish(env, counter);
1693 }
fdb86656
WH
1694 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1695 * PMSELR value is equal to or greater than the number of implemented
1696 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1697 */
5ecdd3e4
AL
1698}
1699
1700static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1701 const uint8_t counter)
1702{
1703 if (counter == 31) {
1704 return env->cp15.pmccfiltr_el0;
1705 } else if (counter < pmu_num_counters(env)) {
1706 return env->cp15.c14_pmevtyper[counter];
1707 } else {
1708 /*
1709 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1710 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1711 */
1712 return 0;
1713 }
1714}
1715
1716static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1717 uint64_t value)
1718{
1719 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1720 pmevtyper_write(env, ri, value, counter);
1721}
1722
1723static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1724 uint64_t value)
1725{
1726 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1727 env->cp15.c14_pmevtyper[counter] = value;
1728
1729 /*
1730 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1731 * pmu_op_finish calls when loading saved state for a migration. Because
1732 * we're potentially updating the type of event here, the value written to
1733 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1734 * different counter type. Therefore, we need to set this value to the
1735 * current count for the counter type we're writing so that pmu_op_finish
1736 * has the correct count for its calculation.
1737 */
1738 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1739 if (event_supported(event)) {
1740 uint16_t event_idx = supported_event_map[event];
1741 env->cp15.c14_pmevcntr_delta[counter] =
1742 pm_events[event_idx].get_count(env);
fdb86656
WH
1743 }
1744}
1745
5ecdd3e4
AL
1746static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1747{
1748 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1749 return pmevtyper_read(env, ri, counter);
1750}
1751
1752static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1753 uint64_t value)
1754{
1755 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1756}
1757
fdb86656
WH
1758static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1759{
5ecdd3e4
AL
1760 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1761}
1762
1763static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1764 uint64_t value, uint8_t counter)
1765{
1766 if (counter < pmu_num_counters(env)) {
1767 pmevcntr_op_start(env, counter);
1768 env->cp15.c14_pmevcntr[counter] = value;
1769 pmevcntr_op_finish(env, counter);
1770 }
1771 /*
1772 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1773 * are CONSTRAINED UNPREDICTABLE.
fdb86656 1774 */
5ecdd3e4
AL
1775}
1776
1777static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1778 uint8_t counter)
1779{
1780 if (counter < pmu_num_counters(env)) {
1781 uint64_t ret;
1782 pmevcntr_op_start(env, counter);
1783 ret = env->cp15.c14_pmevcntr[counter];
1784 pmevcntr_op_finish(env, counter);
1785 return ret;
fdb86656 1786 } else {
5ecdd3e4
AL
1787 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1788 * are CONSTRAINED UNPREDICTABLE. */
fdb86656
WH
1789 return 0;
1790 }
200ac0ef
PM
1791}
1792
5ecdd3e4
AL
1793static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1794 uint64_t value)
1795{
1796 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1797 pmevcntr_write(env, ri, value, counter);
1798}
1799
1800static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1801{
1802 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1803 return pmevcntr_read(env, ri, counter);
1804}
1805
1806static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1807 uint64_t value)
1808{
1809 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1810 assert(counter < pmu_num_counters(env));
1811 env->cp15.c14_pmevcntr[counter] = value;
1812 pmevcntr_write(env, ri, value, counter);
1813}
1814
1815static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1816{
1817 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1818 assert(counter < pmu_num_counters(env));
1819 return env->cp15.c14_pmevcntr[counter];
1820}
1821
1822static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1823 uint64_t value)
1824{
1825 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1826}
1827
1828static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1829{
1830 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1831}
1832
c4241c7d 1833static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
200ac0ef
PM
1834 uint64_t value)
1835{
6ecd0b6b
AB
1836 if (arm_feature(env, ARM_FEATURE_V8)) {
1837 env->cp15.c9_pmuserenr = value & 0xf;
1838 } else {
1839 env->cp15.c9_pmuserenr = value & 1;
1840 }
200ac0ef
PM
1841}
1842
c4241c7d
PM
1843static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1844 uint64_t value)
200ac0ef
PM
1845{
1846 /* We have no event counters so only the C bit can be changed */
7ece99b1 1847 value &= pmu_counter_mask(env);
200ac0ef 1848 env->cp15.c9_pminten |= value;
f4efb4b2 1849 pmu_update_irq(env);
200ac0ef
PM
1850}
1851
c4241c7d
PM
1852static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1853 uint64_t value)
200ac0ef 1854{
7ece99b1 1855 value &= pmu_counter_mask(env);
200ac0ef 1856 env->cp15.c9_pminten &= ~value;
f4efb4b2 1857 pmu_update_irq(env);
200ac0ef
PM
1858}
1859
c4241c7d
PM
1860static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1861 uint64_t value)
8641136c 1862{
a505d7fe
PM
1863 /* Note that even though the AArch64 view of this register has bits
1864 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1865 * architectural requirements for bits which are RES0 only in some
1866 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1867 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1868 */
855ea66d 1869 raw_write(env, ri, value & ~0x1FULL);
8641136c
NR
1870}
1871
64e0e2de
EI
1872static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1873{
ea22747c
RH
1874 /* Begin with base v8.0 state. */
1875 uint32_t valid_mask = 0x3fff;
2fc0cc0e 1876 ARMCPU *cpu = env_archcpu(env);
ea22747c
RH
1877
1878 if (arm_el_is_aa64(env, 3)) {
1879 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
1880 valid_mask &= ~SCR_NET;
1881 } else {
1882 valid_mask &= ~(SCR_RW | SCR_ST);
1883 }
64e0e2de
EI
1884
1885 if (!arm_feature(env, ARM_FEATURE_EL2)) {
1886 valid_mask &= ~SCR_HCE;
1887
1888 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1889 * supported if EL2 exists. The bit is UNK/SBZP when
1890 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1891 * when EL2 is unavailable.
4eb27640 1892 * On ARMv8, this bit is always available.
64e0e2de 1893 */
4eb27640
GB
1894 if (arm_feature(env, ARM_FEATURE_V7) &&
1895 !arm_feature(env, ARM_FEATURE_V8)) {
64e0e2de
EI
1896 valid_mask &= ~SCR_SMD;
1897 }
1898 }
2d7137c1
RH
1899 if (cpu_isar_feature(aa64_lor, cpu)) {
1900 valid_mask |= SCR_TLOR;
1901 }
ef682cdb
RH
1902 if (cpu_isar_feature(aa64_pauth, cpu)) {
1903 valid_mask |= SCR_API | SCR_APK;
1904 }
64e0e2de
EI
1905
1906 /* Clear all-context RES0 bits. */
1907 value &= valid_mask;
1908 raw_write(env, ri, value);
1909}
1910
630fcd4d
MZ
1911static CPAccessResult access_aa64_tid2(CPUARMState *env,
1912 const ARMCPRegInfo *ri,
1913 bool isread)
1914{
1915 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
1916 return CP_ACCESS_TRAP_EL2;
1917 }
1918
1919 return CP_ACCESS_OK;
1920}
1921
c4241c7d 1922static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
776d4e5c 1923{
2fc0cc0e 1924 ARMCPU *cpu = env_archcpu(env);
b85a1fd6
FA
1925
1926 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1927 * bank
1928 */
1929 uint32_t index = A32_BANKED_REG_GET(env, csselr,
1930 ri->secure & ARM_CP_SECSTATE_S);
1931
1932 return cpu->ccsidr[index];
776d4e5c
PM
1933}
1934
c4241c7d
PM
1935static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1936 uint64_t value)
776d4e5c 1937{
8d5c773e 1938 raw_write(env, ri, value & 0xf);
776d4e5c
PM
1939}
1940
1090b9c6
PM
1941static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1942{
29a0af61 1943 CPUState *cs = env_cpu(env);
f7778444 1944 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1090b9c6 1945 uint64_t ret = 0;
7cf95aed
MZ
1946 bool allow_virt = (arm_current_el(env) == 1 &&
1947 (!arm_is_secure_below_el3(env) ||
1948 (env->cp15.scr_el3 & SCR_EEL2)));
1090b9c6 1949
7cf95aed 1950 if (allow_virt && (hcr_el2 & HCR_IMO)) {
636540e9
PM
1951 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1952 ret |= CPSR_I;
1953 }
1954 } else {
1955 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1956 ret |= CPSR_I;
1957 }
1090b9c6 1958 }
636540e9 1959
7cf95aed 1960 if (allow_virt && (hcr_el2 & HCR_FMO)) {
636540e9
PM
1961 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1962 ret |= CPSR_F;
1963 }
1964 } else {
1965 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1966 ret |= CPSR_F;
1967 }
1090b9c6 1968 }
636540e9 1969
1090b9c6
PM
1970 /* External aborts are not possible in QEMU so A bit is always clear */
1971 return ret;
1972}
1973
93fbc983
MZ
1974static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1975 bool isread)
1976{
1977 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
1978 return CP_ACCESS_TRAP_EL2;
1979 }
1980
1981 return CP_ACCESS_OK;
1982}
1983
1984static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
1985 bool isread)
1986{
1987 if (arm_feature(env, ARM_FEATURE_V8)) {
1988 return access_aa64_tid1(env, ri, isread);
1989 }
1990
1991 return CP_ACCESS_OK;
1992}
1993
e9aa6c21 1994static const ARMCPRegInfo v7_cp_reginfo[] = {
7d57f408
PM
1995 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1996 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1997 .access = PL1_W, .type = ARM_CP_NOP },
200ac0ef
PM
1998 /* Performance monitors are implementation defined in v7,
1999 * but with an ARM recommended set of registers, which we
ac689a2e 2000 * follow.
200ac0ef
PM
2001 *
2002 * Performance registers fall into three categories:
2003 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2004 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2005 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2006 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2007 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2008 */
2009 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
7a0e58fa 2010 .access = PL0_RW, .type = ARM_CP_ALIAS,
8521466b 2011 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
fcd25206
PM
2012 .writefn = pmcntenset_write,
2013 .accessfn = pmreg_access,
2014 .raw_writefn = raw_write },
8521466b
AF
2015 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
2016 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2017 .access = PL0_RW, .accessfn = pmreg_access,
2018 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2019 .writefn = pmcntenset_write, .raw_writefn = raw_write },
200ac0ef 2020 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
8521466b
AF
2021 .access = PL0_RW,
2022 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
fcd25206
PM
2023 .accessfn = pmreg_access,
2024 .writefn = pmcntenclr_write,
7a0e58fa 2025 .type = ARM_CP_ALIAS },
8521466b
AF
2026 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2027 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2028 .access = PL0_RW, .accessfn = pmreg_access,
7a0e58fa 2029 .type = ARM_CP_ALIAS,
8521466b
AF
2030 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2031 .writefn = pmcntenclr_write },
200ac0ef 2032 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
f4efb4b2 2033 .access = PL0_RW, .type = ARM_CP_IO,
e4e91a21 2034 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
fcd25206
PM
2035 .accessfn = pmreg_access,
2036 .writefn = pmovsr_write,
2037 .raw_writefn = raw_write },
978364f1
AF
2038 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2039 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2040 .access = PL0_RW, .accessfn = pmreg_access,
f4efb4b2 2041 .type = ARM_CP_ALIAS | ARM_CP_IO,
978364f1
AF
2042 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2043 .writefn = pmovsr_write,
2044 .raw_writefn = raw_write },
200ac0ef 2045 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
f4efb4b2
AL
2046 .access = PL0_W, .accessfn = pmreg_access_swinc,
2047 .type = ARM_CP_NO_RAW | ARM_CP_IO,
0d4bfd7d
AL
2048 .writefn = pmswinc_write },
2049 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2050 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
f4efb4b2
AL
2051 .access = PL0_W, .accessfn = pmreg_access_swinc,
2052 .type = ARM_CP_NO_RAW | ARM_CP_IO,
0d4bfd7d 2053 .writefn = pmswinc_write },
6b040780
WH
2054 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2055 .access = PL0_RW, .type = ARM_CP_ALIAS,
2056 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
6ecd0b6b 2057 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
6b040780
WH
2058 .raw_writefn = raw_write},
2059 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2060 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
6ecd0b6b 2061 .access = PL0_RW, .accessfn = pmreg_access_selr,
6b040780
WH
2062 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2063 .writefn = pmselr_write, .raw_writefn = raw_write, },
200ac0ef 2064 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
169c8938 2065 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
421c7ebd 2066 .readfn = pmccntr_read, .writefn = pmccntr_write32,
6ecd0b6b 2067 .accessfn = pmreg_access_ccntr },
8521466b
AF
2068 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2069 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
6ecd0b6b 2070 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
8521466b 2071 .type = ARM_CP_IO,
980ebe87
AL
2072 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2073 .readfn = pmccntr_read, .writefn = pmccntr_write,
2074 .raw_readfn = raw_read, .raw_writefn = raw_write, },
4b8afa1f
AL
2075 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2076 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2077 .access = PL0_RW, .accessfn = pmreg_access,
2078 .type = ARM_CP_ALIAS | ARM_CP_IO,
2079 .resetvalue = 0, },
8521466b
AF
2080 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2081 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
980ebe87 2082 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
8521466b
AF
2083 .access = PL0_RW, .accessfn = pmreg_access,
2084 .type = ARM_CP_IO,
2085 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2086 .resetvalue = 0, },
200ac0ef 2087 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
5ecdd3e4
AL
2088 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2089 .accessfn = pmreg_access,
fdb86656
WH
2090 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2091 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2092 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
5ecdd3e4
AL
2093 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2094 .accessfn = pmreg_access,
fdb86656 2095 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
200ac0ef 2096 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
5ecdd3e4
AL
2097 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2098 .accessfn = pmreg_access_xevcntr,
2099 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2100 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2101 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2102 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2103 .accessfn = pmreg_access_xevcntr,
2104 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
200ac0ef 2105 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1fce1ba9 2106 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
e4e91a21 2107 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
200ac0ef 2108 .resetvalue = 0,
d4e6df63 2109 .writefn = pmuserenr_write, .raw_writefn = raw_write },
8a83ffc2
AF
2110 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2111 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1fce1ba9 2112 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
8a83ffc2
AF
2113 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2114 .resetvalue = 0,
2115 .writefn = pmuserenr_write, .raw_writefn = raw_write },
200ac0ef 2116 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1fce1ba9 2117 .access = PL1_RW, .accessfn = access_tpm,
b7d793ad 2118 .type = ARM_CP_ALIAS | ARM_CP_IO,
e6ec5457 2119 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
200ac0ef 2120 .resetvalue = 0,
d4e6df63 2121 .writefn = pmintenset_write, .raw_writefn = raw_write },
e6ec5457
WH
2122 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2123 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2124 .access = PL1_RW, .accessfn = access_tpm,
2125 .type = ARM_CP_IO,
2126 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2127 .writefn = pmintenset_write, .raw_writefn = raw_write,
2128 .resetvalue = 0x0 },
200ac0ef 2129 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
fc5f6856
AL
2130 .access = PL1_RW, .accessfn = access_tpm,
2131 .type = ARM_CP_ALIAS | ARM_CP_IO,
200ac0ef 2132 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
b061a82b 2133 .writefn = pmintenclr_write, },
978364f1
AF
2134 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2135 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
fc5f6856
AL
2136 .access = PL1_RW, .accessfn = access_tpm,
2137 .type = ARM_CP_ALIAS | ARM_CP_IO,
978364f1
AF
2138 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2139 .writefn = pmintenclr_write },
7da845b0
PM
2140 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2141 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
630fcd4d
MZ
2142 .access = PL1_R,
2143 .accessfn = access_aa64_tid2,
2144 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
7da845b0
PM
2145 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2146 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
630fcd4d
MZ
2147 .access = PL1_RW,
2148 .accessfn = access_aa64_tid2,
2149 .writefn = csselr_write, .resetvalue = 0,
b85a1fd6
FA
2150 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2151 offsetof(CPUARMState, cp15.csselr_ns) } },
776d4e5c
PM
2152 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2153 * just RAZ for all cores:
2154 */
0ff644a7
PM
2155 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2156 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
93fbc983
MZ
2157 .access = PL1_R, .type = ARM_CP_CONST,
2158 .accessfn = access_aa64_tid1,
2159 .resetvalue = 0 },
f32cdad5
PM
2160 /* Auxiliary fault status registers: these also are IMPDEF, and we
2161 * choose to RAZ/WI for all cores.
2162 */
2163 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2164 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2165 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2166 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2167 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2168 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
b0fe2427
PM
2169 /* MAIR can just read-as-written because we don't implement caches
2170 * and so don't need to care about memory attributes.
2171 */
2172 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2173 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
be693c87 2174 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
b0fe2427 2175 .resetvalue = 0 },
4cfb8ad8
PM
2176 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2177 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2178 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2179 .resetvalue = 0 },
b0fe2427
PM
2180 /* For non-long-descriptor page tables these are PRRR and NMRR;
2181 * regardless they still act as reads-as-written for QEMU.
b0fe2427 2182 */
1281f8e3 2183 /* MAIR0/1 are defined separately from their 64-bit counterpart which
be693c87
GB
2184 * allows them to assign the correct fieldoffset based on the endianness
2185 * handled in the field definitions.
2186 */
a903c449 2187 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
b0fe2427 2188 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
be693c87
GB
2189 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2190 offsetof(CPUARMState, cp15.mair0_ns) },
b0fe2427 2191 .resetfn = arm_cp_reset_ignore },
a903c449 2192 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
b0fe2427 2193 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
be693c87
GB
2194 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2195 offsetof(CPUARMState, cp15.mair1_ns) },
b0fe2427 2196 .resetfn = arm_cp_reset_ignore },
1090b9c6
PM
2197 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2198 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
7a0e58fa 2199 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
995939a6
PM
2200 /* 32 bit ITLB invalidates */
2201 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
7a0e58fa 2202 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
995939a6 2203 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
7a0e58fa 2204 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
995939a6 2205 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
7a0e58fa 2206 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
995939a6
PM
2207 /* 32 bit DTLB invalidates */
2208 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
7a0e58fa 2209 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
995939a6 2210 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
7a0e58fa 2211 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
995939a6 2212 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
7a0e58fa 2213 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
995939a6
PM
2214 /* 32 bit TLB invalidates */
2215 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
7a0e58fa 2216 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
995939a6 2217 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
7a0e58fa 2218 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
995939a6 2219 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
7a0e58fa 2220 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
995939a6 2221 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
7a0e58fa 2222 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
995939a6
PM
2223 REGINFO_SENTINEL
2224};
2225
2226static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2227 /* 32 bit TLB invalidates, Inner Shareable */
2228 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
7a0e58fa 2229 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
995939a6 2230 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
7a0e58fa 2231 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
995939a6 2232 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
7a0e58fa 2233 .type = ARM_CP_NO_RAW, .access = PL1_W,
fa439fc5 2234 .writefn = tlbiasid_is_write },
995939a6 2235 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
7a0e58fa 2236 .type = ARM_CP_NO_RAW, .access = PL1_W,
fa439fc5 2237 .writefn = tlbimvaa_is_write },
e9aa6c21
PM
2238 REGINFO_SENTINEL
2239};
2240
327dd510
AL
2241static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2242 /* PMOVSSET is not implemented in v7 before v7ve */
2243 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2244 .access = PL0_RW, .accessfn = pmreg_access,
f4efb4b2 2245 .type = ARM_CP_ALIAS | ARM_CP_IO,
327dd510
AL
2246 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2247 .writefn = pmovsset_write,
2248 .raw_writefn = raw_write },
2249 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2250 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2251 .access = PL0_RW, .accessfn = pmreg_access,
f4efb4b2 2252 .type = ARM_CP_ALIAS | ARM_CP_IO,
327dd510
AL
2253 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2254 .writefn = pmovsset_write,
2255 .raw_writefn = raw_write },
2256 REGINFO_SENTINEL
2257};
2258
c4241c7d
PM
2259static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2260 uint64_t value)
c326b979
PM
2261{
2262 value &= 1;
2263 env->teecr = value;
c326b979
PM
2264}
2265
3f208fd7
PM
2266static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2267 bool isread)
c326b979 2268{
dcbff19b 2269 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
92611c00 2270 return CP_ACCESS_TRAP;
c326b979 2271 }
92611c00 2272 return CP_ACCESS_OK;
c326b979
PM
2273}
2274
2275static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2276 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2277 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2278 .resetvalue = 0,
2279 .writefn = teecr_write },
2280 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2281 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
92611c00 2282 .accessfn = teehbr_access, .resetvalue = 0 },
c326b979
PM
2283 REGINFO_SENTINEL
2284};
2285
4d31c596 2286static const ARMCPRegInfo v6k_cp_reginfo[] = {
e4fe830b
PM
2287 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2288 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2289 .access = PL0_RW,
54bf36ed 2290 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
4d31c596
PM
2291 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2292 .access = PL0_RW,
54bf36ed
FA
2293 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2294 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
e4fe830b
PM
2295 .resetfn = arm_cp_reset_ignore },
2296 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2297 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2298 .access = PL0_R|PL1_W,
54bf36ed
FA
2299 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2300 .resetvalue = 0},
4d31c596
PM
2301 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2302 .access = PL0_R|PL1_W,
54bf36ed
FA
2303 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2304 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
e4fe830b 2305 .resetfn = arm_cp_reset_ignore },
54bf36ed 2306 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
e4fe830b 2307 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
4d31c596 2308 .access = PL1_RW,
54bf36ed
FA
2309 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2310 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2311 .access = PL1_RW,
2312 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2313 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2314 .resetvalue = 0 },
4d31c596
PM
2315 REGINFO_SENTINEL
2316};
2317
55d284af
PM
2318#ifndef CONFIG_USER_ONLY
2319
3f208fd7
PM
2320static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2321 bool isread)
00108f2d 2322{
75502672
PM
2323 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2324 * Writable only at the highest implemented exception level.
2325 */
2326 int el = arm_current_el(env);
2327
2328 switch (el) {
2329 case 0:
2330 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
2331 return CP_ACCESS_TRAP;
2332 }
2333 break;
2334 case 1:
2335 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2336 arm_is_secure_below_el3(env)) {
2337 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2338 return CP_ACCESS_TRAP_UNCATEGORIZED;
2339 }
2340 break;
2341 case 2:
2342 case 3:
2343 break;
00108f2d 2344 }
75502672
PM
2345
2346 if (!isread && el < arm_highest_el(env)) {
2347 return CP_ACCESS_TRAP_UNCATEGORIZED;
2348 }
2349
00108f2d
PM
2350 return CP_ACCESS_OK;
2351}
2352
3f208fd7
PM
2353static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2354 bool isread)
00108f2d 2355{
0b6440af
EI
2356 unsigned int cur_el = arm_current_el(env);
2357 bool secure = arm_is_secure(env);
2358
00108f2d 2359 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
0b6440af 2360 if (cur_el == 0 &&
00108f2d
PM
2361 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2362 return CP_ACCESS_TRAP;
2363 }
0b6440af
EI
2364
2365 if (arm_feature(env, ARM_FEATURE_EL2) &&
2366 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2367 !extract32(env->cp15.cnthctl_el2, 0, 1)) {
2368 return CP_ACCESS_TRAP_EL2;
2369 }
00108f2d
PM
2370 return CP_ACCESS_OK;
2371}
2372
3f208fd7
PM
2373static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2374 bool isread)
00108f2d 2375{
0b6440af
EI
2376 unsigned int cur_el = arm_current_el(env);
2377 bool secure = arm_is_secure(env);
2378
00108f2d
PM
2379 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2380 * EL0[PV]TEN is zero.
2381 */
0b6440af 2382 if (cur_el == 0 &&
00108f2d
PM
2383 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2384 return CP_ACCESS_TRAP;
2385 }
0b6440af
EI
2386
2387 if (arm_feature(env, ARM_FEATURE_EL2) &&
2388 timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2389 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2390 return CP_ACCESS_TRAP_EL2;
2391 }
00108f2d
PM
2392 return CP_ACCESS_OK;
2393}
2394
2395static CPAccessResult gt_pct_access(CPUARMState *env,
3f208fd7
PM
2396 const ARMCPRegInfo *ri,
2397 bool isread)
00108f2d 2398{
3f208fd7 2399 return gt_counter_access(env, GTIMER_PHYS, isread);
00108f2d
PM
2400}
2401
2402static CPAccessResult gt_vct_access(CPUARMState *env,
3f208fd7
PM
2403 const ARMCPRegInfo *ri,
2404 bool isread)
00108f2d 2405{
3f208fd7 2406 return gt_counter_access(env, GTIMER_VIRT, isread);
00108f2d
PM
2407}
2408
3f208fd7
PM
2409static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2410 bool isread)
00108f2d 2411{
3f208fd7 2412 return gt_timer_access(env, GTIMER_PHYS, isread);
00108f2d
PM
2413}
2414
3f208fd7
PM
2415static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2416 bool isread)
00108f2d 2417{
3f208fd7 2418 return gt_timer_access(env, GTIMER_VIRT, isread);
00108f2d
PM
2419}
2420
b4d3978c 2421static CPAccessResult gt_stimer_access(CPUARMState *env,
3f208fd7
PM
2422 const ARMCPRegInfo *ri,
2423 bool isread)
b4d3978c
PM
2424{
2425 /* The AArch64 register view of the secure physical timer is
2426 * always accessible from EL3, and configurably accessible from
2427 * Secure EL1.
2428 */
2429 switch (arm_current_el(env)) {
2430 case 1:
2431 if (!arm_is_secure(env)) {
2432 return CP_ACCESS_TRAP;
2433 }
2434 if (!(env->cp15.scr_el3 & SCR_ST)) {
2435 return CP_ACCESS_TRAP_EL3;
2436 }
2437 return CP_ACCESS_OK;
2438 case 0:
2439 case 2:
2440 return CP_ACCESS_TRAP;
2441 case 3:
2442 return CP_ACCESS_OK;
2443 default:
2444 g_assert_not_reached();
2445 }
2446}
2447
55d284af
PM
2448static uint64_t gt_get_countervalue(CPUARMState *env)
2449{
7def8754
AJ
2450 ARMCPU *cpu = env_archcpu(env);
2451
2452 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
55d284af
PM
2453}
2454
2455static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2456{
2457 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2458
2459 if (gt->ctl & 1) {
2460 /* Timer enabled: calculate and set current ISTATUS, irq, and
2461 * reset timer to when ISTATUS next has to change
2462 */
edac4d8a
EI
2463 uint64_t offset = timeridx == GTIMER_VIRT ?
2464 cpu->env.cp15.cntvoff_el2 : 0;
55d284af
PM
2465 uint64_t count = gt_get_countervalue(&cpu->env);
2466 /* Note that this must be unsigned 64 bit arithmetic: */
edac4d8a 2467 int istatus = count - offset >= gt->cval;
55d284af 2468 uint64_t nexttick;
194cbc49 2469 int irqstate;
55d284af
PM
2470
2471 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
194cbc49
PM
2472
2473 irqstate = (istatus && !(gt->ctl & 2));
2474 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2475
55d284af
PM
2476 if (istatus) {
2477 /* Next transition is when count rolls back over to zero */
2478 nexttick = UINT64_MAX;
2479 } else {
2480 /* Next transition is when we hit cval */
edac4d8a 2481 nexttick = gt->cval + offset;
55d284af
PM
2482 }
2483 /* Note that the desired next expiry time might be beyond the
2484 * signed-64-bit range of a QEMUTimer -- in this case we just
2485 * set the timer for as far in the future as possible. When the
2486 * timer expires we will reset the timer for any remaining period.
2487 */
7def8754 2488 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
4a0245b6
AJ
2489 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2490 } else {
2491 timer_mod(cpu->gt_timer[timeridx], nexttick);
55d284af 2492 }
194cbc49 2493 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
55d284af
PM
2494 } else {
2495 /* Timer disabled: ISTATUS and timer output always clear */
2496 gt->ctl &= ~4;
2497 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
bc72ad67 2498 timer_del(cpu->gt_timer[timeridx]);
194cbc49 2499 trace_arm_gt_recalc_disabled(timeridx);
55d284af
PM
2500 }
2501}
2502
0e3eca4c
EI
2503static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2504 int timeridx)
55d284af 2505{
2fc0cc0e 2506 ARMCPU *cpu = env_archcpu(env);
55d284af 2507
bc72ad67 2508 timer_del(cpu->gt_timer[timeridx]);
55d284af
PM
2509}
2510
c4241c7d 2511static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
55d284af 2512{
c4241c7d 2513 return gt_get_countervalue(env);
55d284af
PM
2514}
2515
53d1f856
RH
2516static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2517{
2518 uint64_t hcr;
2519
2520 switch (arm_current_el(env)) {
2521 case 2:
2522 hcr = arm_hcr_el2_eff(env);
2523 if (hcr & HCR_E2H) {
2524 return 0;
2525 }
2526 break;
2527 case 0:
2528 hcr = arm_hcr_el2_eff(env);
2529 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2530 return 0;
2531 }
2532 break;
2533 }
2534
2535 return env->cp15.cntvoff_el2;
2536}
2537
edac4d8a
EI
2538static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2539{
53d1f856 2540 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
edac4d8a
EI
2541}
2542
c4241c7d 2543static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
0e3eca4c 2544 int timeridx,
c4241c7d 2545 uint64_t value)
55d284af 2546{
194cbc49 2547 trace_arm_gt_cval_write(timeridx, value);
55d284af 2548 env->cp15.c14_timer[timeridx].cval = value;
2fc0cc0e 2549 gt_recalc_timer(env_archcpu(env), timeridx);
55d284af 2550}
c4241c7d 2551
0e3eca4c
EI
2552static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2553 int timeridx)
55d284af 2554{
53d1f856
RH
2555 uint64_t offset = 0;
2556
2557 switch (timeridx) {
2558 case GTIMER_VIRT:
2559 offset = gt_virt_cnt_offset(env);
2560 break;
2561 }
55d284af 2562
c4241c7d 2563 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
edac4d8a 2564 (gt_get_countervalue(env) - offset));
55d284af
PM
2565}
2566
c4241c7d 2567static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
0e3eca4c 2568 int timeridx,
c4241c7d 2569 uint64_t value)
55d284af 2570{
53d1f856
RH
2571 uint64_t offset = 0;
2572
2573 switch (timeridx) {
2574 case GTIMER_VIRT:
2575 offset = gt_virt_cnt_offset(env);
2576 break;
2577 }
55d284af 2578
194cbc49 2579 trace_arm_gt_tval_write(timeridx, value);
edac4d8a 2580 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
18084b2f 2581 sextract64(value, 0, 32);
2fc0cc0e 2582 gt_recalc_timer(env_archcpu(env), timeridx);
55d284af
PM
2583}
2584
c4241c7d 2585static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
0e3eca4c 2586 int timeridx,
c4241c7d 2587 uint64_t value)
55d284af 2588{
2fc0cc0e 2589 ARMCPU *cpu = env_archcpu(env);
55d284af
PM
2590 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2591
194cbc49 2592 trace_arm_gt_ctl_write(timeridx, value);
d3afacc7 2593 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
55d284af
PM
2594 if ((oldval ^ value) & 1) {
2595 /* Enable toggled */
2596 gt_recalc_timer(cpu, timeridx);
d3afacc7 2597 } else if ((oldval ^ value) & 2) {
55d284af
PM
2598 /* IMASK toggled: don't need to recalculate,
2599 * just set the interrupt line based on ISTATUS
2600 */
194cbc49
PM
2601 int irqstate = (oldval & 4) && !(value & 2);
2602
2603 trace_arm_gt_imask_toggle(timeridx, irqstate);
2604 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
55d284af 2605 }
55d284af
PM
2606}
2607
0e3eca4c
EI
2608static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2609{
2610 gt_timer_reset(env, ri, GTIMER_PHYS);
2611}
2612
2613static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2614 uint64_t value)
2615{
2616 gt_cval_write(env, ri, GTIMER_PHYS, value);
2617}
2618
2619static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2620{
2621 return gt_tval_read(env, ri, GTIMER_PHYS);
2622}
2623
2624static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2625 uint64_t value)
2626{
2627 gt_tval_write(env, ri, GTIMER_PHYS, value);
2628}
2629
2630static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2631 uint64_t value)
2632{
2633 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2634}
2635
2636static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2637{
2638 gt_timer_reset(env, ri, GTIMER_VIRT);
2639}
2640
2641static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2642 uint64_t value)
2643{
2644 gt_cval_write(env, ri, GTIMER_VIRT, value);
2645}
2646
2647static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2648{
2649 return gt_tval_read(env, ri, GTIMER_VIRT);
2650}
2651
2652static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2653 uint64_t value)
2654{
2655 gt_tval_write(env, ri, GTIMER_VIRT, value);
2656}
2657
2658static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2659 uint64_t value)
2660{
2661 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2662}
2663
edac4d8a
EI
2664static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2665 uint64_t value)
2666{
2fc0cc0e 2667 ARMCPU *cpu = env_archcpu(env);
edac4d8a 2668
194cbc49 2669 trace_arm_gt_cntvoff_write(value);
edac4d8a
EI
2670 raw_write(env, ri, value);
2671 gt_recalc_timer(cpu, GTIMER_VIRT);
2672}
2673
b0e66d95
EI
2674static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2675{
2676 gt_timer_reset(env, ri, GTIMER_HYP);
2677}
2678
2679static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2680 uint64_t value)
2681{
2682 gt_cval_write(env, ri, GTIMER_HYP, value);
2683}
2684
2685static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2686{
2687 return gt_tval_read(env, ri, GTIMER_HYP);
2688}
2689
2690static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2691 uint64_t value)
2692{
2693 gt_tval_write(env, ri, GTIMER_HYP, value);
2694}
2695
2696static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2697 uint64_t value)
2698{
2699 gt_ctl_write(env, ri, GTIMER_HYP, value);
2700}
2701
b4d3978c
PM
2702static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2703{
2704 gt_timer_reset(env, ri, GTIMER_SEC);
2705}
2706
2707static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2708 uint64_t value)
2709{
2710 gt_cval_write(env, ri, GTIMER_SEC, value);
2711}
2712
2713static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2714{
2715 return gt_tval_read(env, ri, GTIMER_SEC);
2716}
2717
2718static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2719 uint64_t value)
2720{
2721 gt_tval_write(env, ri, GTIMER_SEC, value);
2722}
2723
2724static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2725 uint64_t value)
2726{
2727 gt_ctl_write(env, ri, GTIMER_SEC, value);
2728}
2729
55d284af
PM
2730void arm_gt_ptimer_cb(void *opaque)
2731{
2732 ARMCPU *cpu = opaque;
2733
2734 gt_recalc_timer(cpu, GTIMER_PHYS);
2735}
2736
2737void arm_gt_vtimer_cb(void *opaque)
2738{
2739 ARMCPU *cpu = opaque;
2740
2741 gt_recalc_timer(cpu, GTIMER_VIRT);
2742}
2743
b0e66d95
EI
2744void arm_gt_htimer_cb(void *opaque)
2745{
2746 ARMCPU *cpu = opaque;
2747
2748 gt_recalc_timer(cpu, GTIMER_HYP);
2749}
2750
b4d3978c
PM
2751void arm_gt_stimer_cb(void *opaque)
2752{
2753 ARMCPU *cpu = opaque;
2754
2755 gt_recalc_timer(cpu, GTIMER_SEC);
2756}
2757
96eec6b2
AJ
2758static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
2759{
2760 ARMCPU *cpu = env_archcpu(env);
2761
2762 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
2763}
2764
55d284af
PM
2765static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2766 /* Note that CNTFRQ is purely reads-as-written for the benefit
2767 * of software; writing it doesn't actually change the timer frequency.
2768 * Our reset value matches the fixed frequency we implement the timer at.
2769 */
2770 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
7a0e58fa 2771 .type = ARM_CP_ALIAS,
a7adc4b7
PM
2772 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2773 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
a7adc4b7
PM
2774 },
2775 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2776 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2777 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
55d284af 2778 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
96eec6b2 2779 .resetfn = arm_gt_cntfrq_reset,
55d284af
PM
2780 },
2781 /* overall control: mostly access permissions */
a7adc4b7
PM
2782 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2783 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
55d284af
PM
2784 .access = PL1_RW,
2785 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2786 .resetvalue = 0,
2787 },
2788 /* per-timer control */
2789 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
9ff9dd3c 2790 .secure = ARM_CP_SECSTATE_NS,
daf1dc5f 2791 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
a7adc4b7
PM
2792 .accessfn = gt_ptimer_access,
2793 .fieldoffset = offsetoflow32(CPUARMState,
2794 cp15.c14_timer[GTIMER_PHYS].ctl),
0e3eca4c 2795 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
a7adc4b7 2796 },
9c513e78 2797 { .name = "CNTP_CTL_S",
9ff9dd3c
PM
2798 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2799 .secure = ARM_CP_SECSTATE_S,
daf1dc5f 2800 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
9ff9dd3c
PM
2801 .accessfn = gt_ptimer_access,
2802 .fieldoffset = offsetoflow32(CPUARMState,
2803 cp15.c14_timer[GTIMER_SEC].ctl),
2804 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2805 },
a7adc4b7
PM
2806 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2807 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
daf1dc5f 2808 .type = ARM_CP_IO, .access = PL0_RW,
a7adc4b7 2809 .accessfn = gt_ptimer_access,
55d284af
PM
2810 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2811 .resetvalue = 0,
0e3eca4c 2812 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
55d284af
PM
2813 },
2814 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
daf1dc5f 2815 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
a7adc4b7
PM
2816 .accessfn = gt_vtimer_access,
2817 .fieldoffset = offsetoflow32(CPUARMState,
2818 cp15.c14_timer[GTIMER_VIRT].ctl),
0e3eca4c 2819 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
a7adc4b7
PM
2820 },
2821 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2822 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
daf1dc5f 2823 .type = ARM_CP_IO, .access = PL0_RW,
a7adc4b7 2824 .accessfn = gt_vtimer_access,
55d284af
PM
2825 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2826 .resetvalue = 0,
0e3eca4c 2827 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
55d284af
PM
2828 },
2829 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2830 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
9ff9dd3c 2831 .secure = ARM_CP_SECSTATE_NS,
daf1dc5f 2832 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
00108f2d 2833 .accessfn = gt_ptimer_access,
0e3eca4c 2834 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
55d284af 2835 },
9c513e78 2836 { .name = "CNTP_TVAL_S",
9ff9dd3c
PM
2837 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2838 .secure = ARM_CP_SECSTATE_S,
daf1dc5f 2839 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
9ff9dd3c
PM
2840 .accessfn = gt_ptimer_access,
2841 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2842 },
a7adc4b7
PM
2843 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2844 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
daf1dc5f 2845 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
0e3eca4c
EI
2846 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2847 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
a7adc4b7 2848 },
55d284af 2849 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
daf1dc5f 2850 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
00108f2d 2851 .accessfn = gt_vtimer_access,
0e3eca4c 2852 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
55d284af 2853 },
a7adc4b7
PM
2854 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2855 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
daf1dc5f 2856 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
0e3eca4c
EI
2857 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2858 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
a7adc4b7 2859 },
55d284af
PM
2860 /* The counter itself */
2861 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
7a0e58fa 2862 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
00108f2d 2863 .accessfn = gt_pct_access,
a7adc4b7
PM
2864 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2865 },
2866 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2867 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
7a0e58fa 2868 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
d57b9ee8 2869 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
55d284af
PM
2870 },
2871 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
7a0e58fa 2872 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
00108f2d 2873 .accessfn = gt_vct_access,
edac4d8a 2874 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
a7adc4b7
PM
2875 },
2876 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2877 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
7a0e58fa 2878 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
d57b9ee8 2879 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
55d284af
PM
2880 },
2881 /* Comparison value, indicating when the timer goes off */
2882 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
9ff9dd3c 2883 .secure = ARM_CP_SECSTATE_NS,
daf1dc5f 2884 .access = PL0_RW,
7a0e58fa 2885 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
55d284af 2886 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
b061a82b 2887 .accessfn = gt_ptimer_access,
0e3eca4c 2888 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
a7adc4b7 2889 },
9c513e78 2890 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
9ff9dd3c 2891 .secure = ARM_CP_SECSTATE_S,
daf1dc5f 2892 .access = PL0_RW,
9ff9dd3c
PM
2893 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2894 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2895 .accessfn = gt_ptimer_access,
2896 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2897 },
a7adc4b7
PM
2898 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2899 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
daf1dc5f 2900 .access = PL0_RW,
a7adc4b7
PM
2901 .type = ARM_CP_IO,
2902 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
12cde08a 2903 .resetvalue = 0, .accessfn = gt_ptimer_access,
0e3eca4c 2904 .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
55d284af
PM
2905 },
2906 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
daf1dc5f 2907 .access = PL0_RW,
7a0e58fa 2908 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
55d284af 2909 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
b061a82b 2910 .accessfn = gt_vtimer_access,
0e3eca4c 2911 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
a7adc4b7
PM
2912 },
2913 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2914 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
daf1dc5f 2915 .access = PL0_RW,
a7adc4b7
PM
2916 .type = ARM_CP_IO,
2917 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2918 .resetvalue = 0, .accessfn = gt_vtimer_access,
0e3eca4c 2919 .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
55d284af 2920 },
b4d3978c
PM
2921 /* Secure timer -- this is actually restricted to only EL3
2922 * and configurably Secure-EL1 via the accessfn.
2923 */
2924 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2925 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2926 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2927 .accessfn = gt_stimer_access,
2928 .readfn = gt_sec_tval_read,
2929 .writefn = gt_sec_tval_write,
2930 .resetfn = gt_sec_timer_reset,
2931 },
2932 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2933 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2934 .type = ARM_CP_IO, .access = PL1_RW,
2935 .accessfn = gt_stimer_access,
2936 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2937 .resetvalue = 0,
2938 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2939 },
2940 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2941 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2942 .type = ARM_CP_IO, .access = PL1_RW,
2943 .accessfn = gt_stimer_access,
2944 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2945 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2946 },
55d284af
PM
2947 REGINFO_SENTINEL
2948};
2949
2950#else
26c4a83b
AB
2951
2952/* In user-mode most of the generic timer registers are inaccessible
2953 * however modern kernels (4.12+) allow access to cntvct_el0
55d284af 2954 */
26c4a83b
AB
2955
2956static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2957{
7def8754
AJ
2958 ARMCPU *cpu = env_archcpu(env);
2959
26c4a83b
AB
2960 /* Currently we have no support for QEMUTimer in linux-user so we
2961 * can't call gt_get_countervalue(env), instead we directly
2962 * call the lower level functions.
2963 */
7def8754 2964 return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
26c4a83b
AB
2965}
2966
6cc7a3ae 2967static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
26c4a83b
AB
2968 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2969 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2970 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2971 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2972 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2973 },
2974 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2975 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2976 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2977 .readfn = gt_virt_cnt_read,
2978 },
6cc7a3ae
PM
2979 REGINFO_SENTINEL
2980};
2981
55d284af
PM
2982#endif
2983
c4241c7d 2984static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4a501606 2985{
891a2fe7 2986 if (arm_feature(env, ARM_FEATURE_LPAE)) {
8d5c773e 2987 raw_write(env, ri, value);
891a2fe7 2988 } else if (arm_feature(env, ARM_FEATURE_V7)) {
8d5c773e 2989 raw_write(env, ri, value & 0xfffff6ff);
4a501606 2990 } else {
8d5c773e 2991 raw_write(env, ri, value & 0xfffff1ff);
4a501606 2992 }
4a501606
PM
2993}
2994
2995#ifndef CONFIG_USER_ONLY
2996/* get_phys_addr() isn't present for user-mode-only targets */
702a9357 2997
3f208fd7
PM
2998static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2999 bool isread)
92611c00
PM
3000{
3001 if (ri->opc2 & 4) {
87562e4f
PM
3002 /* The ATS12NSO* operations must trap to EL3 if executed in
3003 * Secure EL1 (which can only happen if EL3 is AArch64).
3004 * They are simply UNDEF if executed from NS EL1.
3005 * They function normally from EL2 or EL3.
92611c00 3006 */
87562e4f
PM
3007 if (arm_current_el(env) == 1) {
3008 if (arm_is_secure_below_el3(env)) {
3009 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
3010 }
3011 return CP_ACCESS_TRAP_UNCATEGORIZED;
3012 }
92611c00
PM
3013 }
3014 return CP_ACCESS_OK;
3015}
3016
060e8a48 3017static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
03ae85f8 3018 MMUAccessType access_type, ARMMMUIdx mmu_idx)
4a501606 3019{
a8170e5e 3020 hwaddr phys_addr;
4a501606
PM
3021 target_ulong page_size;
3022 int prot;
b7cc4e82 3023 bool ret;
01c097f7 3024 uint64_t par64;
1313e2d7 3025 bool format64 = false;
8bf5b6a9 3026 MemTxAttrs attrs = {};
e14b5a23 3027 ARMMMUFaultInfo fi = {};
5b2d261d 3028 ARMCacheAttrs cacheattrs = {};
4a501606 3029
5b2d261d 3030 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
bc52bfeb 3031 &prot, &page_size, &fi, &cacheattrs);
1313e2d7 3032
0710b2fa
PM
3033 if (ret) {
3034 /*
3035 * Some kinds of translation fault must cause exceptions rather
3036 * than being reported in the PAR.
3037 */
3038 int current_el = arm_current_el(env);
3039 int target_el;
3040 uint32_t syn, fsr, fsc;
3041 bool take_exc = false;
3042
3043 if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
3044 && (mmu_idx == ARMMMUIdx_S1NSE1 || mmu_idx == ARMMMUIdx_S1NSE0)) {
3045 /*
3046 * Synchronous stage 2 fault on an access made as part of the
3047 * translation table walk for AT S1E0* or AT S1E1* insn
3048 * executed from NS EL1. If this is a synchronous external abort
3049 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3050 * to EL3. Otherwise the fault is taken as an exception to EL2,
3051 * and HPFAR_EL2 holds the faulting IPA.
3052 */
3053 if (fi.type == ARMFault_SyncExternalOnWalk &&
3054 (env->cp15.scr_el3 & SCR_EA)) {
3055 target_el = 3;
3056 } else {
3057 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3058 target_el = 2;
3059 }
3060 take_exc = true;
3061 } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3062 /*
3063 * Synchronous external aborts during a translation table walk
3064 * are taken as Data Abort exceptions.
3065 */
3066 if (fi.stage2) {
3067 if (current_el == 3) {
3068 target_el = 3;
3069 } else {
3070 target_el = 2;
3071 }
3072 } else {
3073 target_el = exception_target_el(env);
3074 }
3075 take_exc = true;
3076 }
3077
3078 if (take_exc) {
3079 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3080 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3081 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3082 fsr = arm_fi_to_lfsc(&fi);
3083 fsc = extract32(fsr, 0, 6);
3084 } else {
3085 fsr = arm_fi_to_sfsc(&fi);
3086 fsc = 0x3f;
3087 }
3088 /*
3089 * Report exception with ESR indicating a fault due to a
3090 * translation table walk for a cache maintenance instruction.
3091 */
3092 syn = syn_data_abort_no_iss(current_el == target_el,
3093 fi.ea, 1, fi.s1ptw, 1, fsc);
3094 env->exception.vaddress = value;
3095 env->exception.fsr = fsr;
3096 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3097 }
3098 }
3099
1313e2d7
EI
3100 if (is_a64(env)) {
3101 format64 = true;
3102 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3103 /*
3104 * ATS1Cxx:
3105 * * TTBCR.EAE determines whether the result is returned using the
3106 * 32-bit or the 64-bit PAR format
3107 * * Instructions executed in Hyp mode always use the 64bit format
3108 *
3109 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3110 * * The Non-secure TTBCR.EAE bit is set to 1
3111 * * The implementation includes EL2, and the value of HCR.VM is 1
3112 *
9d1bab33
PM
3113 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3114 *
23463e0e 3115 * ATS1Hx always uses the 64bit format.
1313e2d7
EI
3116 */
3117 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3118
3119 if (arm_feature(env, ARM_FEATURE_EL2)) {
3120 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
9d1bab33 3121 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
1313e2d7
EI
3122 } else {
3123 format64 |= arm_current_el(env) == 2;
3124 }
3125 }
3126 }
3127
3128 if (format64) {
5efe9ed4 3129 /* Create a 64-bit PAR */
01c097f7 3130 par64 = (1 << 11); /* LPAE bit always set */
b7cc4e82 3131 if (!ret) {
702a9357 3132 par64 |= phys_addr & ~0xfffULL;
8bf5b6a9
PM
3133 if (!attrs.secure) {
3134 par64 |= (1 << 9); /* NS */
3135 }
5b2d261d
AB
3136 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
3137 par64 |= cacheattrs.shareability << 7; /* SH */
4a501606 3138 } else {
5efe9ed4
PM
3139 uint32_t fsr = arm_fi_to_lfsc(&fi);
3140
702a9357 3141 par64 |= 1; /* F */
b7cc4e82 3142 par64 |= (fsr & 0x3f) << 1; /* FS */
0f7b791b
PM
3143 if (fi.stage2) {
3144 par64 |= (1 << 9); /* S */
3145 }
3146 if (fi.s1ptw) {
3147 par64 |= (1 << 8); /* PTW */
3148 }
4a501606
PM
3149 }
3150 } else {
b7cc4e82 3151 /* fsr is a DFSR/IFSR value for the short descriptor
702a9357
PM
3152 * translation table format (with WnR always clear).
3153 * Convert it to a 32-bit PAR.
3154 */
b7cc4e82 3155 if (!ret) {
702a9357
PM
3156 /* We do not set any attribute bits in the PAR */
3157 if (page_size == (1 << 24)
3158 && arm_feature(env, ARM_FEATURE_V7)) {
01c097f7 3159 par64 = (phys_addr & 0xff000000) | (1 << 1);
702a9357 3160 } else {
01c097f7 3161 par64 = phys_addr & 0xfffff000;
702a9357 3162 }
8bf5b6a9
PM
3163 if (!attrs.secure) {
3164 par64 |= (1 << 9); /* NS */
3165 }
702a9357 3166 } else {
5efe9ed4
PM
3167 uint32_t fsr = arm_fi_to_sfsc(&fi);
3168
b7cc4e82
PC
3169 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3170 ((fsr & 0xf) << 1) | 1;
702a9357 3171 }
4a501606 3172 }
060e8a48
PM
3173 return par64;
3174}
3175
3176static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3177{
03ae85f8 3178 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
060e8a48 3179 uint64_t par64;
d3649702
PM
3180 ARMMMUIdx mmu_idx;
3181 int el = arm_current_el(env);
3182 bool secure = arm_is_secure_below_el3(env);
060e8a48 3183
d3649702
PM
3184 switch (ri->opc2 & 6) {
3185 case 0:
3186 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
3187 switch (el) {
3188 case 3:
3189 mmu_idx = ARMMMUIdx_S1E3;
3190 break;
3191 case 2:
3192 mmu_idx = ARMMMUIdx_S1NSE1;
3193 break;
3194 case 1:
3195 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3196 break;
3197 default:
3198 g_assert_not_reached();
3199 }
3200 break;
3201 case 2:
3202 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3203 switch (el) {
3204 case 3:
3205 mmu_idx = ARMMMUIdx_S1SE0;
3206 break;
3207 case 2:
3208 mmu_idx = ARMMMUIdx_S1NSE0;
3209 break;
3210 case 1:
3211 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3212 break;
3213 default:
3214 g_assert_not_reached();
3215 }
3216 break;
3217 case 4:
3218 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3219 mmu_idx = ARMMMUIdx_S12NSE1;
3220 break;
3221 case 6:
3222 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3223 mmu_idx = ARMMMUIdx_S12NSE0;
3224 break;
3225 default:
3226 g_assert_not_reached();
3227 }
3228
3229 par64 = do_ats_write(env, value, access_type, mmu_idx);
01c097f7
FA
3230
3231 A32_BANKED_CURRENT_REG_SET(env, par, par64);
4a501606 3232}
060e8a48 3233
14db7fe0
PM
3234static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3235 uint64_t value)
3236{
03ae85f8 3237 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
14db7fe0
PM
3238 uint64_t par64;
3239
23463e0e 3240 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2);
14db7fe0
PM
3241
3242 A32_BANKED_CURRENT_REG_SET(env, par, par64);
3243}
3244
3f208fd7
PM
3245static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3246 bool isread)
2a47df95
PM
3247{
3248 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
3249 return CP_ACCESS_TRAP;
3250 }
3251 return CP_ACCESS_OK;
3252}
3253
060e8a48
PM
3254static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3255 uint64_t value)
3256{
03ae85f8 3257 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
d3649702
PM
3258 ARMMMUIdx mmu_idx;
3259 int secure = arm_is_secure_below_el3(env);
3260
3261 switch (ri->opc2 & 6) {
3262 case 0:
3263 switch (ri->opc1) {
3264 case 0: /* AT S1E1R, AT S1E1W */
3265 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3266 break;
3267 case 4: /* AT S1E2R, AT S1E2W */
3268 mmu_idx = ARMMMUIdx_S1E2;
3269 break;
3270 case 6: /* AT S1E3R, AT S1E3W */
3271 mmu_idx = ARMMMUIdx_S1E3;
3272 break;
3273 default:
3274 g_assert_not_reached();
3275 }
3276 break;
3277 case 2: /* AT S1E0R, AT S1E0W */
3278 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3279 break;
3280 case 4: /* AT S12E1R, AT S12E1W */
2a47df95 3281 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
d3649702
PM
3282 break;
3283 case 6: /* AT S12E0R, AT S12E0W */
2a47df95 3284 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
d3649702
PM
3285 break;
3286 default:
3287 g_assert_not_reached();
3288 }
060e8a48 3289
d3649702 3290 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
060e8a48 3291}
4a501606
PM
3292#endif
3293
3294static const ARMCPRegInfo vapa_cp_reginfo[] = {
3295 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3296 .access = PL1_RW, .resetvalue = 0,
01c097f7
FA
3297 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3298 offsetoflow32(CPUARMState, cp15.par_ns) },
4a501606
PM
3299 .writefn = par_write },
3300#ifndef CONFIG_USER_ONLY
87562e4f 3301 /* This underdecoding is safe because the reginfo is NO_RAW. */
4a501606 3302 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
92611c00 3303 .access = PL1_W, .accessfn = ats_access,
0710b2fa 3304 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
4a501606
PM
3305#endif
3306 REGINFO_SENTINEL
3307};
3308
18032bec
PM
3309/* Return basic MPU access permission bits. */
3310static uint32_t simple_mpu_ap_bits(uint32_t val)
3311{
3312 uint32_t ret;
3313 uint32_t mask;
3314 int i;
3315 ret = 0;
3316 mask = 3;
3317 for (i = 0; i < 16; i += 2) {
3318 ret |= (val >> i) & mask;
3319 mask <<= 2;
3320 }
3321 return ret;
3322}
3323
3324/* Pad basic MPU access permission bits to extended format. */
3325static uint32_t extended_mpu_ap_bits(uint32_t val)
3326{
3327 uint32_t ret;
3328 uint32_t mask;
3329 int i;
3330 ret = 0;
3331 mask = 3;
3332 for (i = 0; i < 16; i += 2) {
3333 ret |= (val & mask) << i;
3334 mask <<= 2;
3335 }
3336 return ret;
3337}
3338
c4241c7d
PM
3339static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3340 uint64_t value)
18032bec 3341{
7e09797c 3342 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
18032bec
PM
3343}
3344
c4241c7d 3345static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
18032bec 3346{
7e09797c 3347 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
18032bec
PM
3348}
3349
c4241c7d
PM
3350static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3351 uint64_t value)
18032bec 3352{
7e09797c 3353 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
18032bec
PM
3354}
3355
c4241c7d 3356static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
18032bec 3357{
7e09797c 3358 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
18032bec
PM
3359}
3360
6cb0b013
PC
3361static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3362{
3363 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3364
3365 if (!u32p) {
3366 return 0;
3367 }
3368
1bc04a88 3369 u32p += env->pmsav7.rnr[M_REG_NS];
6cb0b013
PC
3370 return *u32p;
3371}
3372
3373static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3374 uint64_t value)
3375{
2fc0cc0e 3376 ARMCPU *cpu = env_archcpu(env);
6cb0b013
PC
3377 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3378
3379 if (!u32p) {
3380 return;
3381 }
3382
1bc04a88 3383 u32p += env->pmsav7.rnr[M_REG_NS];
d10eb08f 3384 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
6cb0b013
PC
3385 *u32p = value;
3386}
3387
6cb0b013
PC
3388static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3389 uint64_t value)
3390{
2fc0cc0e 3391 ARMCPU *cpu = env_archcpu(env);
6cb0b013
PC
3392 uint32_t nrgs = cpu->pmsav7_dregion;
3393
3394 if (value >= nrgs) {
3395 qemu_log_mask(LOG_GUEST_ERROR,
3396 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3397 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3398 return;
3399 }
3400
3401 raw_write(env, ri, value);
3402}
3403
3404static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
69ceea64
PM
3405 /* Reset for all these registers is handled in arm_cpu_reset(),
3406 * because the PMSAv7 is also used by M-profile CPUs, which do
3407 * not register cpregs but still need the state to be reset.
3408 */
6cb0b013
PC
3409 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3410 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3411 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
69ceea64
PM
3412 .readfn = pmsav7_read, .writefn = pmsav7_write,
3413 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3414 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3415 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3416 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
69ceea64
PM
3417 .readfn = pmsav7_read, .writefn = pmsav7_write,
3418 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3419 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3420 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3421 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
69ceea64
PM
3422 .readfn = pmsav7_read, .writefn = pmsav7_write,
3423 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3424 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3425 .access = PL1_RW,
1bc04a88 3426 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
69ceea64
PM
3427 .writefn = pmsav7_rgnr_write,
3428 .resetfn = arm_cp_reset_ignore },
6cb0b013
PC
3429 REGINFO_SENTINEL
3430};
3431
18032bec
PM
3432static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3433 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
7a0e58fa 3434 .access = PL1_RW, .type = ARM_CP_ALIAS,
7e09797c 3435 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
18032bec
PM
3436 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3437 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
7a0e58fa 3438 .access = PL1_RW, .type = ARM_CP_ALIAS,
7e09797c 3439 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
18032bec
PM
3440 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3441 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3442 .access = PL1_RW,
7e09797c
PM
3443 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3444 .resetvalue = 0, },
18032bec
PM
3445 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3446 .access = PL1_RW,
7e09797c
PM
3447 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3448 .resetvalue = 0, },
ecce5c3c
PM
3449 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3450 .access = PL1_RW,
3451 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3452 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3453 .access = PL1_RW,
3454 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
06d76f31 3455 /* Protection region base and size registers */
e508a92b
PM
3456 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3457 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3458 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3459 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3460 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3461 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3462 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3463 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3464 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3465 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3466 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3467 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3468 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3469 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3470 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3471 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3472 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3473 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3474 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3475 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3476 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3477 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3478 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3479 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
18032bec
PM
3480 REGINFO_SENTINEL
3481};
3482
c4241c7d
PM
3483static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3484 uint64_t value)
ecce5c3c 3485{
11f136ee 3486 TCR *tcr = raw_ptr(env, ri);
2ebcebe2
PM
3487 int maskshift = extract32(value, 0, 3);
3488
e389be16
FA
3489 if (!arm_feature(env, ARM_FEATURE_V8)) {
3490 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3491 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3492 * using Long-desciptor translation table format */
3493 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3494 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3495 /* In an implementation that includes the Security Extensions
3496 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3497 * Short-descriptor translation table format.
3498 */
3499 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3500 } else {
3501 value &= TTBCR_N;
3502 }
e42c4db3 3503 }
e389be16 3504
b6af0975 3505 /* Update the masks corresponding to the TCR bank being written
11f136ee 3506 * Note that we always calculate mask and base_mask, but
e42c4db3 3507 * they are only used for short-descriptor tables (ie if EAE is 0);
11f136ee
FA
3508 * for long-descriptor tables the TCR fields are used differently
3509 * and the mask and base_mask values are meaningless.
e42c4db3 3510 */
11f136ee
FA
3511 tcr->raw_tcr = value;
3512 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3513 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
ecce5c3c
PM
3514}
3515
c4241c7d
PM
3516static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3517 uint64_t value)
d4e6df63 3518{
2fc0cc0e 3519 ARMCPU *cpu = env_archcpu(env);
ab638a32 3520 TCR *tcr = raw_ptr(env, ri);
00c8cb0a 3521
d4e6df63
PM
3522 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3523 /* With LPAE the TTBCR could result in a change of ASID
3524 * via the TTBCR.A1 bit, so do a TLB flush.
3525 */
d10eb08f 3526 tlb_flush(CPU(cpu));
d4e6df63 3527 }
ab638a32
RH
3528 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3529 value = deposit64(tcr->raw_tcr, 0, 32, value);
c4241c7d 3530 vmsa_ttbcr_raw_write(env, ri, value);
d4e6df63
PM
3531}
3532
ecce5c3c
PM
3533static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3534{
11f136ee
FA
3535 TCR *tcr = raw_ptr(env, ri);
3536
3537 /* Reset both the TCR as well as the masks corresponding to the bank of
3538 * the TCR being reset.
3539 */
3540 tcr->raw_tcr = 0;
3541 tcr->mask = 0;
3542 tcr->base_mask = 0xffffc000u;
ecce5c3c
PM
3543}
3544
cb2e37df
PM
3545static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3546 uint64_t value)
3547{
2fc0cc0e 3548 ARMCPU *cpu = env_archcpu(env);
11f136ee 3549 TCR *tcr = raw_ptr(env, ri);
00c8cb0a 3550
cb2e37df 3551 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
d10eb08f 3552 tlb_flush(CPU(cpu));
11f136ee 3553 tcr->raw_tcr = value;
cb2e37df
PM
3554}
3555
327ed10f
PM
3556static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3557 uint64_t value)
3558{
93f379b0
RH
3559 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3560 if (cpreg_field_is_64bit(ri) &&
3561 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2fc0cc0e 3562 ARMCPU *cpu = env_archcpu(env);
d10eb08f 3563 tlb_flush(CPU(cpu));
327ed10f
PM
3564 }
3565 raw_write(env, ri, value);
3566}
3567
ed30da8e
RH
3568static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3569 uint64_t value)
3570{
3571 /* TODO: There are ASID fields in here with HCR_EL2.E2H */
3572 raw_write(env, ri, value);
3573}
3574
b698e9cf
EI
3575static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3576 uint64_t value)
3577{
2fc0cc0e 3578 ARMCPU *cpu = env_archcpu(env);
b698e9cf
EI
3579 CPUState *cs = CPU(cpu);
3580
3581 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
3582 if (raw_read(env, ri) != value) {
0336cbf8 3583 tlb_flush_by_mmuidx(cs,
8bd5c820
PM
3584 ARMMMUIdxBit_S12NSE1 |
3585 ARMMMUIdxBit_S12NSE0 |
3586 ARMMMUIdxBit_S2NS);
b698e9cf
EI
3587 raw_write(env, ri, value);
3588 }
3589}
3590
8e5d75c9 3591static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
18032bec 3592 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
7a0e58fa 3593 .access = PL1_RW, .type = ARM_CP_ALIAS,
4a7e2d73 3594 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
b061a82b 3595 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
18032bec 3596 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
88ca1c2d
FA
3597 .access = PL1_RW, .resetvalue = 0,
3598 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3599 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
8e5d75c9
PC
3600 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
3601 .access = PL1_RW, .resetvalue = 0,
3602 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3603 offsetof(CPUARMState, cp15.dfar_ns) } },
3604 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3605 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
3606 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
3607 .resetvalue = 0, },
3608 REGINFO_SENTINEL
3609};
3610
3611static const ARMCPRegInfo vmsa_cp_reginfo[] = {
6cd8a264
RH
3612 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3613 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
3614 .access = PL1_RW,
d81c519c 3615 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
327ed10f 3616 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
7dd8c9af
FA
3617 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
3618 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3619 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3620 offsetof(CPUARMState, cp15.ttbr0_ns) } },
327ed10f 3621 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
7dd8c9af
FA
3622 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
3623 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3624 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3625 offsetof(CPUARMState, cp15.ttbr1_ns) } },
cb2e37df
PM
3626 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3627 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3628 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
3629 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
11f136ee 3630 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
cb2e37df 3631 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
7a0e58fa 3632 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
b061a82b 3633 .raw_writefn = vmsa_ttbcr_raw_write,
11f136ee
FA
3634 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
3635 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
18032bec
PM
3636 REGINFO_SENTINEL
3637};
3638
ab638a32
RH
3639/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3640 * qemu tlbs nor adjusting cached masks.
3641 */
3642static const ARMCPRegInfo ttbcr2_reginfo = {
3643 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
3644 .access = PL1_RW, .type = ARM_CP_ALIAS,
3645 .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
3646 offsetofhigh32(CPUARMState, cp15.tcr_el[1]) },
3647};
3648
c4241c7d
PM
3649static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3650 uint64_t value)
1047b9d7
PM
3651{
3652 env->cp15.c15_ticonfig = value & 0xe7;
3653 /* The OS_TYPE bit in this register changes the reported CPUID! */
3654 env->cp15.c0_cpuid = (value & (1 << 5)) ?
3655 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1047b9d7
PM
3656}
3657
c4241c7d
PM
3658static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3659 uint64_t value)
1047b9d7
PM
3660{
3661 env->cp15.c15_threadid = value & 0xffff;
1047b9d7
PM
3662}
3663
c4241c7d
PM
3664static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3665 uint64_t value)
1047b9d7
PM
3666{
3667 /* Wait-for-interrupt (deprecated) */
2fc0cc0e 3668 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
1047b9d7
PM
3669}
3670
c4241c7d
PM
3671static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3672 uint64_t value)
c4804214
PM
3673{
3674 /* On OMAP there are registers indicating the max/min index of dcache lines
3675 * containing a dirty line; cache flush operations have to reset these.
3676 */
3677 env->cp15.c15_i_max = 0x000;
3678 env->cp15.c15_i_min = 0xff0;
c4804214
PM
3679}
3680
18032bec
PM
3681static const ARMCPRegInfo omap_cp_reginfo[] = {
3682 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3683 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
d81c519c 3684 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
6cd8a264 3685 .resetvalue = 0, },
1047b9d7
PM
3686 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3687 .access = PL1_RW, .type = ARM_CP_NOP },
3688 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3689 .access = PL1_RW,
3690 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3691 .writefn = omap_ticonfig_write },
3692 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3693 .access = PL1_RW,
3694 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3695 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3696 .access = PL1_RW, .resetvalue = 0xff0,
3697 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3698 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3699 .access = PL1_RW,
3700 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3701 .writefn = omap_threadid_write },
3702 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
3703 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
7a0e58fa 3704 .type = ARM_CP_NO_RAW,
1047b9d7
PM
3705 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
3706 /* TODO: Peripheral port remap register:
3707 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3708 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3709 * when MMU is off.
3710 */
c4804214 3711 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
d4e6df63 3712 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
7a0e58fa 3713 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
c4804214 3714 .writefn = omap_cachemaint_write },
34f90529
PM
3715 { .name = "C9", .cp = 15, .crn = 9,
3716 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
3717 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
1047b9d7
PM
3718 REGINFO_SENTINEL
3719};
3720
c4241c7d
PM
3721static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3722 uint64_t value)
1047b9d7 3723{
c0f4af17 3724 env->cp15.c15_cpar = value & 0x3fff;
1047b9d7
PM
3725}
3726
3727static const ARMCPRegInfo xscale_cp_reginfo[] = {
3728 { .name = "XSCALE_CPAR",
3729 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3730 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
3731 .writefn = xscale_cpar_write, },
2771db27
PM
3732 { .name = "XSCALE_AUXCR",
3733 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
3734 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
3735 .resetvalue = 0, },
3b771579
PM
3736 /* XScale specific cache-lockdown: since we have no cache we NOP these
3737 * and hope the guest does not really rely on cache behaviour.
3738 */
3739 { .name = "XSCALE_LOCK_ICACHE_LINE",
3740 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
3741 .access = PL1_W, .type = ARM_CP_NOP },
3742 { .name = "XSCALE_UNLOCK_ICACHE",
3743 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
3744 .access = PL1_W, .type = ARM_CP_NOP },
3745 { .name = "XSCALE_DCACHE_LOCK",
3746 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
3747 .access = PL1_RW, .type = ARM_CP_NOP },
3748 { .name = "XSCALE_UNLOCK_DCACHE",
3749 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
3750 .access = PL1_W, .type = ARM_CP_NOP },
1047b9d7
PM
3751 REGINFO_SENTINEL
3752};
3753
3754static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
3755 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3756 * implementation of this implementation-defined space.
3757 * Ideally this should eventually disappear in favour of actually
3758 * implementing the correct behaviour for all cores.
3759 */
3760 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3761 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3671cd87 3762 .access = PL1_RW,
7a0e58fa 3763 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
d4e6df63 3764 .resetvalue = 0 },
18032bec
PM
3765 REGINFO_SENTINEL
3766};
3767
c4804214
PM
3768static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3769 /* Cache status: RAZ because we have no cache so it's always clean */
3770 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
7a0e58fa 3771 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3772 .resetvalue = 0 },
c4804214
PM
3773 REGINFO_SENTINEL
3774};
3775
3776static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
3777 /* We never have a a block transfer operation in progress */
3778 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
7a0e58fa 3779 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3780 .resetvalue = 0 },
30b05bba
PM
3781 /* The cache ops themselves: these all NOP for QEMU */
3782 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
3783 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3784 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
3785 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3786 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
3787 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3788 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
3789 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3790 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
3791 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3792 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
3793 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
c4804214
PM
3794 REGINFO_SENTINEL
3795};
3796
3797static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
3798 /* The cache test-and-clean instructions always return (1 << 30)
3799 * to indicate that there are no dirty cache lines.
3800 */
3801 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
7a0e58fa 3802 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3803 .resetvalue = (1 << 30) },
c4804214 3804 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
7a0e58fa 3805 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
d4e6df63 3806 .resetvalue = (1 << 30) },
c4804214
PM
3807 REGINFO_SENTINEL
3808};
3809
34f90529
PM
3810static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3811 /* Ignore ReadBuffer accesses */
3812 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3813 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
d4e6df63 3814 .access = PL1_RW, .resetvalue = 0,
7a0e58fa 3815 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
34f90529
PM
3816 REGINFO_SENTINEL
3817};
3818
731de9e6
EI
3819static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3820{
2fc0cc0e 3821 ARMCPU *cpu = env_archcpu(env);
731de9e6
EI
3822 unsigned int cur_el = arm_current_el(env);
3823 bool secure = arm_is_secure(env);
3824
3825 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3826 return env->cp15.vpidr_el2;
3827 }
3828 return raw_read(env, ri);
3829}
3830
06a7e647 3831static uint64_t mpidr_read_val(CPUARMState *env)
81bdde9d 3832{
2fc0cc0e 3833 ARMCPU *cpu = env_archcpu(env);
eb5e1d3c
PF
3834 uint64_t mpidr = cpu->mp_affinity;
3835
81bdde9d 3836 if (arm_feature(env, ARM_FEATURE_V7MP)) {
78dbbbe4 3837 mpidr |= (1U << 31);
81bdde9d
PM
3838 /* Cores which are uniprocessor (non-coherent)
3839 * but still implement the MP extensions set
a8e81b31 3840 * bit 30. (For instance, Cortex-R5).
81bdde9d 3841 */
a8e81b31
PC
3842 if (cpu->mp_is_up) {
3843 mpidr |= (1u << 30);
3844 }
81bdde9d 3845 }
c4241c7d 3846 return mpidr;
81bdde9d
PM
3847}
3848
06a7e647
EI
3849static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3850{
f0d574d6
EI
3851 unsigned int cur_el = arm_current_el(env);
3852 bool secure = arm_is_secure(env);
3853
3854 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3855 return env->cp15.vmpidr_el2;
3856 }
06a7e647
EI
3857 return mpidr_read_val(env);
3858}
3859
7ac681cf 3860static const ARMCPRegInfo lpae_cp_reginfo[] = {
a903c449 3861 /* NOP AMAIR0/1 */
b0fe2427
PM
3862 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3863 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
a903c449 3864 .access = PL1_RW, .type = ARM_CP_CONST,
7ac681cf 3865 .resetvalue = 0 },
b0fe2427 3866 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
7ac681cf 3867 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
a903c449 3868 .access = PL1_RW, .type = ARM_CP_CONST,
7ac681cf 3869 .resetvalue = 0 },
891a2fe7 3870 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
01c097f7
FA
3871 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3872 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3873 offsetof(CPUARMState, cp15.par_ns)} },
891a2fe7 3874 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
7a0e58fa 3875 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
7dd8c9af
FA
3876 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3877 offsetof(CPUARMState, cp15.ttbr0_ns) },
b061a82b 3878 .writefn = vmsa_ttbr_write, },
891a2fe7 3879 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
7a0e58fa 3880 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
7dd8c9af
FA
3881 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3882 offsetof(CPUARMState, cp15.ttbr1_ns) },
b061a82b 3883 .writefn = vmsa_ttbr_write, },
7ac681cf
PM
3884 REGINFO_SENTINEL
3885};
3886
c4241c7d 3887static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
b0d2b7d0 3888{
c4241c7d 3889 return vfp_get_fpcr(env);
b0d2b7d0
PM
3890}
3891
c4241c7d
PM
3892static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3893 uint64_t value)
b0d2b7d0
PM
3894{
3895 vfp_set_fpcr(env, value);
b0d2b7d0
PM
3896}
3897
c4241c7d 3898static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
b0d2b7d0 3899{
c4241c7d 3900 return vfp_get_fpsr(env);
b0d2b7d0
PM
3901}
3902
c4241c7d
PM
3903static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3904 uint64_t value)
b0d2b7d0
PM
3905{
3906 vfp_set_fpsr(env, value);
b0d2b7d0
PM
3907}
3908
3f208fd7
PM
3909static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3910 bool isread)
c2b820fe 3911{
137feaa9 3912 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
c2b820fe
PM
3913 return CP_ACCESS_TRAP;
3914 }
3915 return CP_ACCESS_OK;
3916}
3917
3918static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3919 uint64_t value)
3920{
3921 env->daif = value & PSTATE_DAIF;
3922}
3923
8af35c37 3924static CPAccessResult aa64_cacheop_access(CPUARMState *env,
3f208fd7
PM
3925 const ARMCPRegInfo *ri,
3926 bool isread)
8af35c37
PM
3927{
3928 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3929 * SCTLR_EL1.UCI is set.
3930 */
137feaa9 3931 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
8af35c37
PM
3932 return CP_ACCESS_TRAP;
3933 }
3934 return CP_ACCESS_OK;
3935}
3936
dbb1fb27
AB
3937/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3938 * Page D4-1736 (DDI0487A.b)
3939 */
3940
b7e0730d
RH
3941static int vae1_tlbmask(CPUARMState *env)
3942{
3943 if (arm_is_secure_below_el3(env)) {
3944 return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
3945 } else {
3946 return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0;
3947 }
3948}
3949
fd3ed969
PM
3950static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3951 uint64_t value)
168aa23b 3952{
29a0af61 3953 CPUState *cs = env_cpu(env);
b7e0730d 3954 int mask = vae1_tlbmask(env);
dbb1fb27 3955
b7e0730d 3956 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
168aa23b
PM
3957}
3958
b4ab8ce9
PM
3959static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3960 uint64_t value)
3961{
29a0af61 3962 CPUState *cs = env_cpu(env);
b7e0730d 3963 int mask = vae1_tlbmask(env);
b4ab8ce9
PM
3964
3965 if (tlb_force_broadcast(env)) {
527db2be
RH
3966 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
3967 } else {
3968 tlb_flush_by_mmuidx(cs, mask);
b4ab8ce9 3969 }
b4ab8ce9
PM
3970}
3971
90c19cdf 3972static int alle1_tlbmask(CPUARMState *env)
168aa23b 3973{
90c19cdf
RH
3974 /*
3975 * Note that the 'ALL' scope must invalidate both stage 1 and
fd3ed969
PM
3976 * stage 2 translations, whereas most other scopes only invalidate
3977 * stage 1 translations.
3978 */
fd3ed969 3979 if (arm_is_secure_below_el3(env)) {
90c19cdf
RH
3980 return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0;
3981 } else if (arm_feature(env, ARM_FEATURE_EL2)) {
3982 return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0 | ARMMMUIdxBit_S2NS;
fd3ed969 3983 } else {
90c19cdf 3984 return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0;
fd3ed969 3985 }
168aa23b
PM
3986}
3987
90c19cdf
RH
3988static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3989 uint64_t value)
3990{
3991 CPUState *cs = env_cpu(env);
3992 int mask = alle1_tlbmask(env);
3993
3994 tlb_flush_by_mmuidx(cs, mask);
3995}
3996
fd3ed969 3997static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
fa439fc5
PM
3998 uint64_t value)
3999{
2fc0cc0e 4000 ARMCPU *cpu = env_archcpu(env);
fd3ed969
PM
4001 CPUState *cs = CPU(cpu);
4002
8bd5c820 4003 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
fd3ed969
PM
4004}
4005
43efaa33
PM
4006static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4007 uint64_t value)
4008{
2fc0cc0e 4009 ARMCPU *cpu = env_archcpu(env);
43efaa33
PM
4010 CPUState *cs = CPU(cpu);
4011
8bd5c820 4012 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
43efaa33
PM
4013}
4014
fd3ed969
PM
4015static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4016 uint64_t value)
4017{
29a0af61 4018 CPUState *cs = env_cpu(env);
90c19cdf
RH
4019 int mask = alle1_tlbmask(env);
4020
4021 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
fa439fc5
PM
4022}
4023
2bfb9d75
PM
4024static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4025 uint64_t value)
4026{
29a0af61 4027 CPUState *cs = env_cpu(env);
2bfb9d75 4028
8bd5c820 4029 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
2bfb9d75
PM
4030}
4031
43efaa33
PM
4032static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4033 uint64_t value)
4034{
29a0af61 4035 CPUState *cs = env_cpu(env);
43efaa33 4036
8bd5c820 4037 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
43efaa33
PM
4038}
4039
fd3ed969
PM
4040static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4041 uint64_t value)
fa439fc5 4042{
fd3ed969
PM
4043 /* Invalidate by VA, EL2
4044 * Currently handles both VAE2 and VALE2, since we don't support
4045 * flush-last-level-only.
4046 */
2fc0cc0e 4047 ARMCPU *cpu = env_archcpu(env);
fd3ed969
PM
4048 CPUState *cs = CPU(cpu);
4049 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4050
8bd5c820 4051 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
fd3ed969
PM
4052}
4053
43efaa33
PM
4054static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4055 uint64_t value)
4056{
4057 /* Invalidate by VA, EL3
4058 * Currently handles both VAE3 and VALE3, since we don't support
4059 * flush-last-level-only.
4060 */
2fc0cc0e 4061 ARMCPU *cpu = env_archcpu(env);
43efaa33
PM
4062 CPUState *cs = CPU(cpu);
4063 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4064
8bd5c820 4065 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
43efaa33
PM
4066}
4067
fd3ed969
PM
4068static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4069 uint64_t value)
4070{
90c19cdf
RH
4071 CPUState *cs = env_cpu(env);
4072 int mask = vae1_tlbmask(env);
fa439fc5
PM
4073 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4074
90c19cdf 4075 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
fa439fc5
PM
4076}
4077
b4ab8ce9
PM
4078static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4079 uint64_t value)
4080{
4081 /* Invalidate by VA, EL1&0 (AArch64 version).
4082 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4083 * since we don't support flush-for-specific-ASID-only or
4084 * flush-last-level-only.
4085 */
90c19cdf
RH
4086 CPUState *cs = env_cpu(env);
4087 int mask = vae1_tlbmask(env);
b4ab8ce9
PM
4088 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4089
4090 if (tlb_force_broadcast(env)) {
527db2be
RH
4091 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
4092 } else {
4093 tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
b4ab8ce9 4094 }
b4ab8ce9
PM
4095}
4096
fd3ed969
PM
4097static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4098 uint64_t value)
fa439fc5 4099{
29a0af61 4100 CPUState *cs = env_cpu(env);
fd3ed969 4101 uint64_t pageaddr = sextract64(value << 12, 0, 56);
fa439fc5 4102
a67cf277 4103 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
8bd5c820 4104 ARMMMUIdxBit_S1E2);
fa439fc5
PM
4105}
4106
43efaa33
PM
4107static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4108 uint64_t value)
4109{
29a0af61 4110 CPUState *cs = env_cpu(env);
43efaa33
PM
4111 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4112
a67cf277 4113 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
8bd5c820 4114 ARMMMUIdxBit_S1E3);
43efaa33
PM
4115}
4116
cea66e91
PM
4117static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4118 uint64_t value)
4119{
4120 /* Invalidate by IPA. This has to invalidate any structures that
4121 * contain only stage 2 translation information, but does not need
4122 * to apply to structures that contain combined stage 1 and stage 2
4123 * translation information.
4124 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
4125 */
2fc0cc0e 4126 ARMCPU *cpu = env_archcpu(env);
cea66e91
PM
4127 CPUState *cs = CPU(cpu);
4128 uint64_t pageaddr;
4129
4130 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4131 return;
4132 }
4133
4134 pageaddr = sextract64(value << 12, 0, 48);
4135
8bd5c820 4136 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
cea66e91
PM
4137}
4138
4139static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4140 uint64_t value)
4141{
29a0af61 4142 CPUState *cs = env_cpu(env);
cea66e91
PM
4143 uint64_t pageaddr;
4144
4145 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4146 return;
4147 }
4148
4149 pageaddr = sextract64(value << 12, 0, 48);
4150
a67cf277 4151 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
8bd5c820 4152 ARMMMUIdxBit_S2NS);
cea66e91
PM
4153}
4154
3f208fd7
PM
4155static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4156 bool isread)
aca3f40b
PM
4157{
4158 /* We don't implement EL2, so the only control on DC ZVA is the
4159 * bit in the SCTLR which can prohibit access for EL0.
4160 */
137feaa9 4161 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
aca3f40b
PM
4162 return CP_ACCESS_TRAP;
4163 }
4164 return CP_ACCESS_OK;
4165}
4166
4167static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4168{
2fc0cc0e 4169 ARMCPU *cpu = env_archcpu(env);
aca3f40b
PM
4170 int dzp_bit = 1 << 4;
4171
4172 /* DZP indicates whether DC ZVA access is allowed */
3f208fd7 4173 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
aca3f40b
PM
4174 dzp_bit = 0;
4175 }
4176 return cpu->dcz_blocksize | dzp_bit;
4177}
4178
3f208fd7
PM
4179static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4180 bool isread)
f502cfc2 4181{
cdcf1405 4182 if (!(env->pstate & PSTATE_SP)) {
f502cfc2
PM
4183 /* Access to SP_EL0 is undefined if it's being used as
4184 * the stack pointer.
4185 */
4186 return CP_ACCESS_TRAP_UNCATEGORIZED;
4187 }
4188 return CP_ACCESS_OK;
4189}
4190
4191static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4192{
4193 return env->pstate & PSTATE_SP;
4194}
4195
4196static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4197{
4198 update_spsel(env, val);
4199}
4200
137feaa9
FA
4201static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4202 uint64_t value)
4203{
2fc0cc0e 4204 ARMCPU *cpu = env_archcpu(env);
137feaa9
FA
4205
4206 if (raw_read(env, ri) == value) {
4207 /* Skip the TLB flush if nothing actually changed; Linux likes
4208 * to do a lot of pointless SCTLR writes.
4209 */
4210 return;
4211 }
4212
06312feb
PM
4213 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4214 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4215 value &= ~SCTLR_M;
4216 }
4217
137feaa9
FA
4218 raw_write(env, ri, value);
4219 /* ??? Lots of these bits are not implemented. */
4220 /* This may enable/disable the MMU, so do a TLB flush. */
d10eb08f 4221 tlb_flush(CPU(cpu));
2e5dcf36
RH
4222
4223 if (ri->type & ARM_CP_SUPPRESS_TB_END) {
4224 /*
4225 * Normally we would always end the TB on an SCTLR write; see the
4226 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4227 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4228 * of hflags from the translator, so do it here.
4229 */
4230 arm_rebuild_hflags(env);
4231 }
137feaa9
FA
4232}
4233
3f208fd7
PM
4234static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
4235 bool isread)
03fbf20f
PM
4236{
4237 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
f2cae609 4238 return CP_ACCESS_TRAP_FP_EL2;
03fbf20f
PM
4239 }
4240 if (env->cp15.cptr_el[3] & CPTR_TFP) {
f2cae609 4241 return CP_ACCESS_TRAP_FP_EL3;
03fbf20f
PM
4242 }
4243 return CP_ACCESS_OK;
4244}
4245
a8d64e73
PM
4246static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4247 uint64_t value)
4248{
4249 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4250}
4251
b0d2b7d0
PM
4252static const ARMCPRegInfo v8_cp_reginfo[] = {
4253 /* Minimal set of EL0-visible registers. This will need to be expanded
4254 * significantly for system emulation of AArch64 CPUs.
4255 */
4256 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4257 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4258 .access = PL0_RW, .type = ARM_CP_NZCV },
c2b820fe
PM
4259 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4260 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
7a0e58fa 4261 .type = ARM_CP_NO_RAW,
c2b820fe
PM
4262 .access = PL0_RW, .accessfn = aa64_daif_access,
4263 .fieldoffset = offsetof(CPUARMState, daif),
4264 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
b0d2b7d0
PM
4265 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4266 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
b916c9c3 4267 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
fe03d45f 4268 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
b0d2b7d0
PM
4269 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4270 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
b916c9c3 4271 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
fe03d45f 4272 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
b0d2b7d0
PM
4273 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4274 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
7a0e58fa 4275 .access = PL0_R, .type = ARM_CP_NO_RAW,
aca3f40b
PM
4276 .readfn = aa64_dczid_read },
4277 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4278 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4279 .access = PL0_W, .type = ARM_CP_DC_ZVA,
4280#ifndef CONFIG_USER_ONLY
4281 /* Avoid overhead of an access check that always passes in user-mode */
4282 .accessfn = aa64_zva_access,
4283#endif
4284 },
0eef9d98
PM
4285 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4286 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4287 .access = PL1_R, .type = ARM_CP_CURRENTEL },
8af35c37
PM
4288 /* Cache ops: all NOPs since we don't emulate caches */
4289 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4290 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4291 .access = PL1_W, .type = ARM_CP_NOP },
4292 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4293 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4294 .access = PL1_W, .type = ARM_CP_NOP },
4295 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4296 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4297 .access = PL0_W, .type = ARM_CP_NOP,
4298 .accessfn = aa64_cacheop_access },
4299 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4300 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4301 .access = PL1_W, .type = ARM_CP_NOP },
4302 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4303 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4304 .access = PL1_W, .type = ARM_CP_NOP },
4305 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4306 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4307 .access = PL0_W, .type = ARM_CP_NOP,
4308 .accessfn = aa64_cacheop_access },
4309 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4310 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4311 .access = PL1_W, .type = ARM_CP_NOP },
4312 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4313 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4314 .access = PL0_W, .type = ARM_CP_NOP,
4315 .accessfn = aa64_cacheop_access },
4316 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4317 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4318 .access = PL0_W, .type = ARM_CP_NOP,
4319 .accessfn = aa64_cacheop_access },
4320 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4321 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4322 .access = PL1_W, .type = ARM_CP_NOP },
168aa23b
PM
4323 /* TLBI operations */
4324 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4325 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
7a0e58fa 4326 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4327 .writefn = tlbi_aa64_vmalle1is_write },
168aa23b 4328 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4329 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
7a0e58fa 4330 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4331 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4332 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4333 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
7a0e58fa 4334 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4335 .writefn = tlbi_aa64_vmalle1is_write },
168aa23b 4336 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4337 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
7a0e58fa 4338 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4339 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4340 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4341 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
7a0e58fa 4342 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4343 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4344 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 4345 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
7a0e58fa 4346 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4347 .writefn = tlbi_aa64_vae1is_write },
168aa23b 4348 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4349 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
7a0e58fa 4350 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4351 .writefn = tlbi_aa64_vmalle1_write },
168aa23b 4352 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4353 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
7a0e58fa 4354 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4355 .writefn = tlbi_aa64_vae1_write },
168aa23b 4356 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4357 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
7a0e58fa 4358 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4359 .writefn = tlbi_aa64_vmalle1_write },
168aa23b 4360 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4361 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
7a0e58fa 4362 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4363 .writefn = tlbi_aa64_vae1_write },
168aa23b 4364 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4365 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
7a0e58fa 4366 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4367 .writefn = tlbi_aa64_vae1_write },
168aa23b 4368 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
6ab9f499 4369 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
7a0e58fa 4370 .access = PL1_W, .type = ARM_CP_NO_RAW,
fd3ed969 4371 .writefn = tlbi_aa64_vae1_write },
cea66e91
PM
4372 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4373 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4374 .access = PL2_W, .type = ARM_CP_NO_RAW,
4375 .writefn = tlbi_aa64_ipas2e1is_write },
4376 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4377 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4378 .access = PL2_W, .type = ARM_CP_NO_RAW,
4379 .writefn = tlbi_aa64_ipas2e1is_write },
83ddf975
PM
4380 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4381 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4382 .access = PL2_W, .type = ARM_CP_NO_RAW,
fd3ed969 4383 .writefn = tlbi_aa64_alle1is_write },
43efaa33
PM
4384 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4385 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4386 .access = PL2_W, .type = ARM_CP_NO_RAW,
4387 .writefn = tlbi_aa64_alle1is_write },
cea66e91
PM
4388 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4389 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4390 .access = PL2_W, .type = ARM_CP_NO_RAW,
4391 .writefn = tlbi_aa64_ipas2e1_write },
4392 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4393 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4394 .access = PL2_W, .type = ARM_CP_NO_RAW,
4395 .writefn = tlbi_aa64_ipas2e1_write },
83ddf975
PM
4396 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4397 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4398 .access = PL2_W, .type = ARM_CP_NO_RAW,
fd3ed969 4399 .writefn = tlbi_aa64_alle1_write },
43efaa33
PM
4400 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4401 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4402 .access = PL2_W, .type = ARM_CP_NO_RAW,
4403 .writefn = tlbi_aa64_alle1is_write },
19525524
PM
4404#ifndef CONFIG_USER_ONLY
4405 /* 64 bit address translation operations */
4406 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4407 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
0710b2fa
PM
4408 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4409 .writefn = ats_write64 },
19525524
PM
4410 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4411 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
0710b2fa
PM
4412 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4413 .writefn = ats_write64 },
19525524
PM
4414 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4415 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
0710b2fa
PM
4416 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4417 .writefn = ats_write64 },
19525524
PM
4418 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4419 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
0710b2fa
PM
4420 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4421 .writefn = ats_write64 },
2a47df95 4422 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
7a379c7e 4423 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
0710b2fa
PM
4424 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4425 .writefn = ats_write64 },
2a47df95 4426 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
7a379c7e 4427 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
0710b2fa
PM
4428 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4429 .writefn = ats_write64 },
2a47df95 4430 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
7a379c7e 4431 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
0710b2fa
PM
4432 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4433 .writefn = ats_write64 },
2a47df95 4434 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
7a379c7e 4435 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
0710b2fa
PM
4436 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4437 .writefn = ats_write64 },
2a47df95
PM
4438 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4439 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4440 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
0710b2fa
PM
4441 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4442 .writefn = ats_write64 },
2a47df95
PM
4443 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4444 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
0710b2fa
PM
4445 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4446 .writefn = ats_write64 },
c96fc9b5
EI
4447 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4448 .type = ARM_CP_ALIAS,
4449 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4450 .access = PL1_RW, .resetvalue = 0,
4451 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4452 .writefn = par_write },
19525524 4453#endif
995939a6 4454 /* TLB invalidate last level of translation table walk */
9449fdf6 4455 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
7a0e58fa 4456 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
9449fdf6 4457 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
7a0e58fa 4458 .type = ARM_CP_NO_RAW, .access = PL1_W,
fa439fc5 4459 .writefn = tlbimvaa_is_write },
9449fdf6 4460 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
7a0e58fa 4461 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
9449fdf6 4462 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
7a0e58fa 4463 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
541ef8c2
SS
4464 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4465 .type = ARM_CP_NO_RAW, .access = PL2_W,
4466 .writefn = tlbimva_hyp_write },
4467 { .name = "TLBIMVALHIS",
4468 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4469 .type = ARM_CP_NO_RAW, .access = PL2_W,
4470 .writefn = tlbimva_hyp_is_write },
4471 { .name = "TLBIIPAS2",
4472 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4473 .type = ARM_CP_NO_RAW, .access = PL2_W,
4474 .writefn = tlbiipas2_write },
4475 { .name = "TLBIIPAS2IS",
4476 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4477 .type = ARM_CP_NO_RAW, .access = PL2_W,
4478 .writefn = tlbiipas2_is_write },
4479 { .name = "TLBIIPAS2L",
4480 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4481 .type = ARM_CP_NO_RAW, .access = PL2_W,
4482 .writefn = tlbiipas2_write },
4483 { .name = "TLBIIPAS2LIS",
4484 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4485 .type = ARM_CP_NO_RAW, .access = PL2_W,
4486 .writefn = tlbiipas2_is_write },
9449fdf6
PM
4487 /* 32 bit cache operations */
4488 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4489 .type = ARM_CP_NOP, .access = PL1_W },
4490 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
4491 .type = ARM_CP_NOP, .access = PL1_W },
4492 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4493 .type = ARM_CP_NOP, .access = PL1_W },
4494 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
4495 .type = ARM_CP_NOP, .access = PL1_W },
4496 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
4497 .type = ARM_CP_NOP, .access = PL1_W },
4498 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
4499 .type = ARM_CP_NOP, .access = PL1_W },
4500 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4501 .type = ARM_CP_NOP, .access = PL1_W },
4502 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4503 .type = ARM_CP_NOP, .access = PL1_W },
4504 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
4505 .type = ARM_CP_NOP, .access = PL1_W },
4506 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4507 .type = ARM_CP_NOP, .access = PL1_W },
4508 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
4509 .type = ARM_CP_NOP, .access = PL1_W },
4510 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
4511 .type = ARM_CP_NOP, .access = PL1_W },
4512 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4513 .type = ARM_CP_NOP, .access = PL1_W },
4514 /* MMU Domain access control / MPU write buffer control */
0c17d68c
FA
4515 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
4516 .access = PL1_RW, .resetvalue = 0,
4517 .writefn = dacr_write, .raw_writefn = raw_write,
4518 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
4519 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
a0618a19 4520 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
7a0e58fa 4521 .type = ARM_CP_ALIAS,
a0618a19 4522 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
6947f059
EI
4523 .access = PL1_RW,
4524 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
a65f1de9 4525 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
7a0e58fa 4526 .type = ARM_CP_ALIAS,
a65f1de9 4527 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
99a99c1f
SB
4528 .access = PL1_RW,
4529 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
f502cfc2
PM
4530 /* We rely on the access checks not allowing the guest to write to the
4531 * state field when SPSel indicates that it's being used as the stack
4532 * pointer.
4533 */
4534 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
4535 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
4536 .access = PL1_RW, .accessfn = sp_el0_access,
7a0e58fa 4537 .type = ARM_CP_ALIAS,
f502cfc2 4538 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
884b4dee
GB
4539 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
4540 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
7a0e58fa 4541 .access = PL2_RW, .type = ARM_CP_ALIAS,
884b4dee 4542 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
f502cfc2
PM
4543 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
4544 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
7a0e58fa 4545 .type = ARM_CP_NO_RAW,
f502cfc2 4546 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
03fbf20f
PM
4547 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
4548 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
4549 .type = ARM_CP_ALIAS,
4550 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
4551 .access = PL2_RW, .accessfn = fpexc32_access },
6a43e0b6
PM
4552 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
4553 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
4554 .access = PL2_RW, .resetvalue = 0,
4555 .writefn = dacr_write, .raw_writefn = raw_write,
4556 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
4557 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
4558 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
4559 .access = PL2_RW, .resetvalue = 0,
4560 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
4561 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
4562 .type = ARM_CP_ALIAS,
4563 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
4564 .access = PL2_RW,
4565 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
4566 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
4567 .type = ARM_CP_ALIAS,
4568 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
4569 .access = PL2_RW,
4570 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
4571 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
4572 .type = ARM_CP_ALIAS,
4573 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
4574 .access = PL2_RW,
4575 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
4576 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
4577 .type = ARM_CP_ALIAS,
4578 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
4579 .access = PL2_RW,
4580 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
a8d64e73
PM
4581 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
4582 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
4583 .resetvalue = 0,
4584 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
4585 { .name = "SDCR", .type = ARM_CP_ALIAS,
4586 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
4587 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4588 .writefn = sdcr_write,
4589 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
b0d2b7d0
PM
4590 REGINFO_SENTINEL
4591};
4592
d42e3c26 4593/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
4771cd01 4594static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
d79e0c06 4595 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
d42e3c26
EI
4596 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4597 .access = PL2_RW,
4598 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
ce4afed8 4599 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
7a0e58fa 4600 .type = ARM_CP_NO_RAW,
f149e3e8
EI
4601 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4602 .access = PL2_RW,
ce4afed8 4603 .type = ARM_CP_CONST, .resetvalue = 0 },
831a2fca
PM
4604 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4605 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4606 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
68e78e33
PM
4607 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4608 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4609 .access = PL2_RW,
4610 .type = ARM_CP_CONST, .resetvalue = 0 },
c6f19164
GB
4611 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4612 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4613 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
95f949ac
EI
4614 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4615 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4616 .access = PL2_RW, .type = ARM_CP_CONST,
4617 .resetvalue = 0 },
4618 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
b5ede85b 4619 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
95f949ac 4620 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2179ef95
PM
4621 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4622 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4623 .access = PL2_RW, .type = ARM_CP_CONST,
4624 .resetvalue = 0 },
55b53c71 4625 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
b5ede85b 4626 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
2179ef95
PM
4627 .access = PL2_RW, .type = ARM_CP_CONST,
4628 .resetvalue = 0 },
37cd6c24
PM
4629 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4630 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4631 .access = PL2_RW, .type = ARM_CP_CONST,
4632 .resetvalue = 0 },
4633 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4634 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4635 .access = PL2_RW, .type = ARM_CP_CONST,
4636 .resetvalue = 0 },
06ec4c8c
EI
4637 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4638 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4639 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
68e9c2fe
EI
4640 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
4641 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4642 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4643 .type = ARM_CP_CONST, .resetvalue = 0 },
b698e9cf
EI
4644 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4645 .cp = 15, .opc1 = 6, .crm = 2,
4646 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4647 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
4648 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4649 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4650 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
b9cb5323
EI
4651 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4652 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4653 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
ff05f37b
EI
4654 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4655 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4656 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
a57633c0
EI
4657 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4658 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4659 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4660 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4661 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4662 .resetvalue = 0 },
0b6440af
EI
4663 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4664 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4665 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
edac4d8a
EI
4666 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4667 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4668 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4669 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4670 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4671 .resetvalue = 0 },
b0e66d95
EI
4672 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4673 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4674 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4675 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4676 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4677 .resetvalue = 0 },
4678 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4679 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4680 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4681 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4682 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4683 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
14cc7b54
SF
4684 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4685 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
d6c8cf81
PM
4686 .access = PL2_RW, .accessfn = access_tda,
4687 .type = ARM_CP_CONST, .resetvalue = 0 },
59e05530
EI
4688 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
4689 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4690 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4691 .type = ARM_CP_CONST, .resetvalue = 0 },
2a5a9abd
AF
4692 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4693 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4694 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
cba517c3
PM
4695 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4696 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4697 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4698 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4699 .type = ARM_CP_CONST,
4700 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4701 .access = PL2_RW, .resetvalue = 0 },
d42e3c26
EI
4702 REGINFO_SENTINEL
4703};
4704
ce4afed8
PM
4705/* Ditto, but for registers which exist in ARMv8 but not v7 */
4706static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
4707 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4708 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4709 .access = PL2_RW,
4710 .type = ARM_CP_CONST, .resetvalue = 0 },
4711 REGINFO_SENTINEL
4712};
4713
f149e3e8
EI
4714static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4715{
2fc0cc0e 4716 ARMCPU *cpu = env_archcpu(env);
03c76131
RH
4717 /* Begin with bits defined in base ARMv8.0. */
4718 uint64_t valid_mask = MAKE_64BIT_MASK(0, 34);
f149e3e8
EI
4719
4720 if (arm_feature(env, ARM_FEATURE_EL3)) {
4721 valid_mask &= ~HCR_HCD;
77077a83
JK
4722 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
4723 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4724 * However, if we're using the SMC PSCI conduit then QEMU is
4725 * effectively acting like EL3 firmware and so the guest at
4726 * EL2 should retain the ability to prevent EL1 from being
4727 * able to make SMC calls into the ersatz firmware, so in
4728 * that case HCR.TSC should be read/write.
4729 */
f149e3e8
EI
4730 valid_mask &= ~HCR_TSC;
4731 }
03c76131
RH
4732 if (cpu_isar_feature(aa64_vh, cpu)) {
4733 valid_mask |= HCR_E2H;
4734 }
2d7137c1
RH
4735 if (cpu_isar_feature(aa64_lor, cpu)) {
4736 valid_mask |= HCR_TLOR;
4737 }
ef682cdb
RH
4738 if (cpu_isar_feature(aa64_pauth, cpu)) {
4739 valid_mask |= HCR_API | HCR_APK;
4740 }
f149e3e8
EI
4741
4742 /* Clear RES0 bits. */
4743 value &= valid_mask;
4744
4745 /* These bits change the MMU setup:
4746 * HCR_VM enables stage 2 translation
4747 * HCR_PTW forbids certain page-table setups
4748 * HCR_DC Disables stage1 and enables stage2 translation
4749 */
ce4afed8 4750 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
d10eb08f 4751 tlb_flush(CPU(cpu));
f149e3e8 4752 }
ce4afed8 4753 env->cp15.hcr_el2 = value;
89430fc6
PM
4754
4755 /*
4756 * Updates to VI and VF require us to update the status of
4757 * virtual interrupts, which are the logical OR of these bits
4758 * and the state of the input lines from the GIC. (This requires
4759 * that we have the iothread lock, which is done by marking the
4760 * reginfo structs as ARM_CP_IO.)
4761 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4762 * possible for it to be taken immediately, because VIRQ and
4763 * VFIQ are masked unless running at EL0 or EL1, and HCR
4764 * can only be written at EL2.
4765 */
4766 g_assert(qemu_mutex_iothread_locked());
4767 arm_cpu_update_virq(cpu);
4768 arm_cpu_update_vfiq(cpu);
ce4afed8
PM
4769}
4770
4771static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
4772 uint64_t value)
4773{
4774 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4775 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
4776 hcr_write(env, NULL, value);
4777}
4778
4779static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
4780 uint64_t value)
4781{
4782 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4783 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
4784 hcr_write(env, NULL, value);
f149e3e8
EI
4785}
4786
f7778444
RH
4787/*
4788 * Return the effective value of HCR_EL2.
4789 * Bits that are not included here:
4790 * RW (read from SCR_EL3.RW as needed)
4791 */
4792uint64_t arm_hcr_el2_eff(CPUARMState *env)
4793{
4794 uint64_t ret = env->cp15.hcr_el2;
4795
4796 if (arm_is_secure_below_el3(env)) {
4797 /*
4798 * "This register has no effect if EL2 is not enabled in the
4799 * current Security state". This is ARMv8.4-SecEL2 speak for
4800 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4801 *
4802 * Prior to that, the language was "In an implementation that
4803 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4804 * as if this field is 0 for all purposes other than a direct
4805 * read or write access of HCR_EL2". With lots of enumeration
4806 * on a per-field basis. In current QEMU, this is condition
4807 * is arm_is_secure_below_el3.
4808 *
4809 * Since the v8.4 language applies to the entire register, and
4810 * appears to be backward compatible, use that.
4811 */
4812 ret = 0;
4813 } else if (ret & HCR_TGE) {
4814 /* These bits are up-to-date as of ARMv8.4. */
4815 if (ret & HCR_E2H) {
4816 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
4817 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
4818 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
4819 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE);
4820 } else {
4821 ret |= HCR_FMO | HCR_IMO | HCR_AMO;
4822 }
4823 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
4824 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
4825 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
4826 HCR_TLOR);
4827 }
4828
4829 return ret;
4830}
4831
fc1120a7
PM
4832static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4833 uint64_t value)
4834{
4835 /*
4836 * For A-profile AArch32 EL3, if NSACR.CP10
4837 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4838 */
4839 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4840 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4841 value &= ~(0x3 << 10);
4842 value |= env->cp15.cptr_el[2] & (0x3 << 10);
4843 }
4844 env->cp15.cptr_el[2] = value;
4845}
4846
4847static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
4848{
4849 /*
4850 * For A-profile AArch32 EL3, if NSACR.CP10
4851 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4852 */
4853 uint64_t value = env->cp15.cptr_el[2];
4854
4855 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4856 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4857 value |= 0x3 << 10;
4858 }
4859 return value;
4860}
4861
4771cd01 4862static const ARMCPRegInfo el2_cp_reginfo[] = {
f149e3e8 4863 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
89430fc6 4864 .type = ARM_CP_IO,
f149e3e8
EI
4865 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4866 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
c624ea0f 4867 .writefn = hcr_write },
ce4afed8 4868 { .name = "HCR", .state = ARM_CP_STATE_AA32,
89430fc6 4869 .type = ARM_CP_ALIAS | ARM_CP_IO,
ce4afed8
PM
4870 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4871 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
c624ea0f 4872 .writefn = hcr_writelow },
831a2fca
PM
4873 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4874 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4875 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3b685ba7 4876 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
7a0e58fa 4877 .type = ARM_CP_ALIAS,
3b685ba7
EI
4878 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4879 .access = PL2_RW,
4880 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
68e78e33 4881 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
f2c30f42
EI
4882 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4883 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
cba517c3 4884 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
63b60551
EI
4885 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4886 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
cba517c3
PM
4887 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4888 .type = ARM_CP_ALIAS,
4889 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4890 .access = PL2_RW,
4891 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
3b685ba7 4892 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
7a0e58fa 4893 .type = ARM_CP_ALIAS,
3b685ba7 4894 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
99a99c1f
SB
4895 .access = PL2_RW,
4896 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
d79e0c06 4897 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
d42e3c26
EI
4898 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4899 .access = PL2_RW, .writefn = vbar_write,
4900 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4901 .resetvalue = 0 },
884b4dee
GB
4902 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4903 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
7a0e58fa 4904 .access = PL3_RW, .type = ARM_CP_ALIAS,
884b4dee 4905 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
c6f19164
GB
4906 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4907 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4908 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
fc1120a7
PM
4909 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
4910 .readfn = cptr_el2_read, .writefn = cptr_el2_write },
95f949ac
EI
4911 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4912 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4913 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4914 .resetvalue = 0 },
4915 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
b5ede85b 4916 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
95f949ac
EI
4917 .access = PL2_RW, .type = ARM_CP_ALIAS,
4918 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
2179ef95
PM
4919 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4920 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4921 .access = PL2_RW, .type = ARM_CP_CONST,
4922 .resetvalue = 0 },
4923 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
55b53c71 4924 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
b5ede85b 4925 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
2179ef95
PM
4926 .access = PL2_RW, .type = ARM_CP_CONST,
4927 .resetvalue = 0 },
37cd6c24
PM
4928 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4929 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4930 .access = PL2_RW, .type = ARM_CP_CONST,
4931 .resetvalue = 0 },
4932 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4933 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4934 .access = PL2_RW, .type = ARM_CP_CONST,
4935 .resetvalue = 0 },
06ec4c8c
EI
4936 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4937 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
6459b94c
PM
4938 .access = PL2_RW,
4939 /* no .writefn needed as this can't cause an ASID change;
4940 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4941 */
06ec4c8c 4942 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
68e9c2fe
EI
4943 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4944 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
bf06c112 4945 .type = ARM_CP_ALIAS,
68e9c2fe
EI
4946 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4947 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4948 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4949 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
bf06c112
PM
4950 .access = PL2_RW,
4951 /* no .writefn needed as this can't cause an ASID change;
4952 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4953 */
68e9c2fe 4954 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
b698e9cf
EI
4955 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4956 .cp = 15, .opc1 = 6, .crm = 2,
4957 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4958 .access = PL2_RW, .accessfn = access_el3_aa32ns,
4959 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4960 .writefn = vttbr_write },
4961 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4962 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4963 .access = PL2_RW, .writefn = vttbr_write,
4964 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
b9cb5323
EI
4965 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4966 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4967 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4968 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
ff05f37b
EI
4969 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4970 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4971 .access = PL2_RW, .resetvalue = 0,
4972 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
a57633c0
EI
4973 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4974 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
ed30da8e 4975 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
a57633c0
EI
4976 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4977 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4978 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
a57633c0 4979 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
541ef8c2
SS
4980 { .name = "TLBIALLNSNH",
4981 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4982 .type = ARM_CP_NO_RAW, .access = PL2_W,
4983 .writefn = tlbiall_nsnh_write },
4984 { .name = "TLBIALLNSNHIS",
4985 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4986 .type = ARM_CP_NO_RAW, .access = PL2_W,
4987 .writefn = tlbiall_nsnh_is_write },
4988 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4989 .type = ARM_CP_NO_RAW, .access = PL2_W,
4990 .writefn = tlbiall_hyp_write },
4991 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4992 .type = ARM_CP_NO_RAW, .access = PL2_W,
4993 .writefn = tlbiall_hyp_is_write },
4994 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4995 .type = ARM_CP_NO_RAW, .access = PL2_W,
4996 .writefn = tlbimva_hyp_write },
4997 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4998 .type = ARM_CP_NO_RAW, .access = PL2_W,
4999 .writefn = tlbimva_hyp_is_write },
51da9014
EI
5000 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
5001 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5002 .type = ARM_CP_NO_RAW, .access = PL2_W,
fd3ed969 5003 .writefn = tlbi_aa64_alle2_write },
8742d49d
EI
5004 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
5005 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5006 .type = ARM_CP_NO_RAW, .access = PL2_W,
fd3ed969 5007 .writefn = tlbi_aa64_vae2_write },
2bfb9d75
PM
5008 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
5009 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5010 .access = PL2_W, .type = ARM_CP_NO_RAW,
5011 .writefn = tlbi_aa64_vae2_write },
5012 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
5013 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5014 .access = PL2_W, .type = ARM_CP_NO_RAW,
5015 .writefn = tlbi_aa64_alle2is_write },
8742d49d
EI
5016 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
5017 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5018 .type = ARM_CP_NO_RAW, .access = PL2_W,
fd3ed969 5019 .writefn = tlbi_aa64_vae2is_write },
2bfb9d75
PM
5020 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
5021 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5022 .access = PL2_W, .type = ARM_CP_NO_RAW,
5023 .writefn = tlbi_aa64_vae2is_write },
edac4d8a 5024#ifndef CONFIG_USER_ONLY
2a47df95
PM
5025 /* Unlike the other EL2-related AT operations, these must
5026 * UNDEF from EL3 if EL2 is not implemented, which is why we
5027 * define them here rather than with the rest of the AT ops.
5028 */
5029 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
5030 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5031 .access = PL2_W, .accessfn = at_s1e2_access,
0710b2fa 5032 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
2a47df95
PM
5033 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
5034 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5035 .access = PL2_W, .accessfn = at_s1e2_access,
0710b2fa 5036 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
14db7fe0
PM
5037 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5038 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5039 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5040 * to behave as if SCR.NS was 1.
5041 */
5042 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5043 .access = PL2_W,
0710b2fa 5044 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
14db7fe0
PM
5045 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5046 .access = PL2_W,
0710b2fa 5047 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
0b6440af
EI
5048 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5049 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5050 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5051 * reset values as IMPDEF. We choose to reset to 3 to comply with
5052 * both ARMv7 and ARMv8.
5053 */
5054 .access = PL2_RW, .resetvalue = 3,
5055 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
edac4d8a
EI
5056 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5057 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5058 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
5059 .writefn = gt_cntvoff_write,
5060 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5061 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5062 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
5063 .writefn = gt_cntvoff_write,
5064 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
b0e66d95
EI
5065 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5066 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5067 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5068 .type = ARM_CP_IO, .access = PL2_RW,
5069 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5070 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5071 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5072 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
5073 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5074 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5075 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
d44ec156 5076 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
b0e66d95
EI
5077 .resetfn = gt_hyp_timer_reset,
5078 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
5079 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5080 .type = ARM_CP_IO,
5081 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5082 .access = PL2_RW,
5083 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
5084 .resetvalue = 0,
5085 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
edac4d8a 5086#endif
14cc7b54
SF
5087 /* The only field of MDCR_EL2 that has a defined architectural reset value
5088 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
5ecdd3e4 5089 * don't implement any PMU event counters, so using zero as a reset
14cc7b54
SF
5090 * value for MDCR_EL2 is okay
5091 */
5092 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
5093 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
5094 .access = PL2_RW, .resetvalue = 0,
5095 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
59e05530
EI
5096 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5097 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5098 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5099 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5100 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5101 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5102 .access = PL2_RW,
5103 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
2a5a9abd
AF
5104 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5105 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5106 .access = PL2_RW,
5107 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
3b685ba7
EI
5108 REGINFO_SENTINEL
5109};
5110
ce4afed8
PM
5111static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
5112 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
89430fc6 5113 .type = ARM_CP_ALIAS | ARM_CP_IO,
ce4afed8
PM
5114 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5115 .access = PL2_RW,
5116 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
5117 .writefn = hcr_writehigh },
5118 REGINFO_SENTINEL
5119};
5120
2f027fc5
PM
5121static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
5122 bool isread)
5123{
5124 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5125 * At Secure EL1 it traps to EL3.
5126 */
5127 if (arm_current_el(env) == 3) {
5128 return CP_ACCESS_OK;
5129 }
5130 if (arm_is_secure_below_el3(env)) {
5131 return CP_ACCESS_TRAP_EL3;
5132 }
5133 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5134 if (isread) {
5135 return CP_ACCESS_OK;
5136 }
5137 return CP_ACCESS_TRAP_UNCATEGORIZED;
5138}
5139
60fb1a87
GB
5140static const ARMCPRegInfo el3_cp_reginfo[] = {
5141 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5142 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5143 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
5144 .resetvalue = 0, .writefn = scr_write },
f80741d1 5145 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
60fb1a87 5146 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
efe4a274
PM
5147 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5148 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
b061a82b 5149 .writefn = scr_write },
60fb1a87
GB
5150 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5151 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5152 .access = PL3_RW, .resetvalue = 0,
5153 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5154 { .name = "SDER",
5155 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5156 .access = PL3_RW, .resetvalue = 0,
5157 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
60fb1a87 5158 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
efe4a274
PM
5159 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5160 .writefn = vbar_write, .resetvalue = 0,
60fb1a87 5161 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
7dd8c9af
FA
5162 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5163 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
f478847f 5164 .access = PL3_RW, .resetvalue = 0,
7dd8c9af 5165 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
11f136ee
FA
5166 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5167 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
6459b94c
PM
5168 .access = PL3_RW,
5169 /* no .writefn needed as this can't cause an ASID change;
811595a2
PM
5170 * we must provide a .raw_writefn and .resetfn because we handle
5171 * reset and migration for the AArch32 TTBCR(S), which might be
5172 * using mask and base_mask.
6459b94c 5173 */
811595a2 5174 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
11f136ee 5175 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
81547d66 5176 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
7a0e58fa 5177 .type = ARM_CP_ALIAS,
81547d66
EI
5178 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5179 .access = PL3_RW,
5180 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
f2c30f42 5181 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
f2c30f42
EI
5182 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5183 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
63b60551
EI
5184 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5185 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5186 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
81547d66 5187 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
7a0e58fa 5188 .type = ARM_CP_ALIAS,
81547d66 5189 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
99a99c1f
SB
5190 .access = PL3_RW,
5191 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
a1ba125c
EI
5192 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5193 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5194 .access = PL3_RW, .writefn = vbar_write,
5195 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5196 .resetvalue = 0 },
c6f19164
GB
5197 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5198 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5199 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5200 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4cfb8ad8
PM
5201 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5202 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5203 .access = PL3_RW, .resetvalue = 0,
5204 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
2179ef95
PM
5205 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5206 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5207 .access = PL3_RW, .type = ARM_CP_CONST,
5208 .resetvalue = 0 },
37cd6c24
PM
5209 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5210 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5211 .access = PL3_RW, .type = ARM_CP_CONST,
5212 .resetvalue = 0 },
5213 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5214 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5215 .access = PL3_RW, .type = ARM_CP_CONST,
5216 .resetvalue = 0 },
43efaa33
PM
5217 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5218 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5219 .access = PL3_W, .type = ARM_CP_NO_RAW,
5220 .writefn = tlbi_aa64_alle3is_write },
5221 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5222 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5223 .access = PL3_W, .type = ARM_CP_NO_RAW,
5224 .writefn = tlbi_aa64_vae3is_write },
5225 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5226 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5227 .access = PL3_W, .type = ARM_CP_NO_RAW,
5228 .writefn = tlbi_aa64_vae3is_write },
5229 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5230 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5231 .access = PL3_W, .type = ARM_CP_NO_RAW,
5232 .writefn = tlbi_aa64_alle3_write },
5233 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5234 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5235 .access = PL3_W, .type = ARM_CP_NO_RAW,
5236 .writefn = tlbi_aa64_vae3_write },
5237 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5238 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5239 .access = PL3_W, .type = ARM_CP_NO_RAW,
5240 .writefn = tlbi_aa64_vae3_write },
0f1a3b24
FA
5241 REGINFO_SENTINEL
5242};
5243
3f208fd7
PM
5244static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5245 bool isread)
7da845b0
PM
5246{
5247 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
5248 * but the AArch32 CTR has its own reginfo struct)
5249 */
137feaa9 5250 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
7da845b0
PM
5251 return CP_ACCESS_TRAP;
5252 }
630fcd4d
MZ
5253
5254 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
5255 return CP_ACCESS_TRAP_EL2;
5256 }
5257
7da845b0
PM
5258 return CP_ACCESS_OK;
5259}
5260
1424ca8d
DM
5261static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
5262 uint64_t value)
5263{
5264 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5265 * read via a bit in OSLSR_EL1.
5266 */
5267 int oslock;
5268
5269 if (ri->state == ARM_CP_STATE_AA32) {
5270 oslock = (value == 0xC5ACCE55);
5271 } else {
5272 oslock = value & 1;
5273 }
5274
5275 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
5276}
5277
50300698 5278static const ARMCPRegInfo debug_cp_reginfo[] = {
50300698 5279 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
10aae104
PM
5280 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5281 * unlike DBGDRAR it is never accessible from EL0.
5282 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5283 * accessor.
50300698
PM
5284 */
5285 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
91b0a238
PM
5286 .access = PL0_R, .accessfn = access_tdra,
5287 .type = ARM_CP_CONST, .resetvalue = 0 },
10aae104
PM
5288 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
5289 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
91b0a238
PM
5290 .access = PL1_R, .accessfn = access_tdra,
5291 .type = ARM_CP_CONST, .resetvalue = 0 },
50300698 5292 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
91b0a238
PM
5293 .access = PL0_R, .accessfn = access_tdra,
5294 .type = ARM_CP_CONST, .resetvalue = 0 },
17a9eb53 5295 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
10aae104
PM
5296 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
5297 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
d6c8cf81 5298 .access = PL1_RW, .accessfn = access_tda,
0e5e8935
PM
5299 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
5300 .resetvalue = 0 },
5e8b12ff
PM
5301 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5302 * We don't implement the configurable EL0 access.
5303 */
5304 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
5305 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
7a0e58fa 5306 .type = ARM_CP_ALIAS,
d6c8cf81 5307 .access = PL1_R, .accessfn = access_tda,
b061a82b 5308 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
10aae104
PM
5309 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
5310 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
1424ca8d 5311 .access = PL1_W, .type = ARM_CP_NO_RAW,
187f678d 5312 .accessfn = access_tdosa,
1424ca8d
DM
5313 .writefn = oslar_write },
5314 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
5315 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
5316 .access = PL1_R, .resetvalue = 10,
187f678d 5317 .accessfn = access_tdosa,
1424ca8d 5318 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
5e8b12ff
PM
5319 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5320 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
5321 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
187f678d
PM
5322 .access = PL1_RW, .accessfn = access_tdosa,
5323 .type = ARM_CP_NOP },
5e8b12ff
PM
5324 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5325 * implement vector catch debug events yet.
5326 */
5327 { .name = "DBGVCR",
5328 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
d6c8cf81
PM
5329 .access = PL1_RW, .accessfn = access_tda,
5330 .type = ARM_CP_NOP },
4d2ec4da
PM
5331 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5332 * to save and restore a 32-bit guest's DBGVCR)
5333 */
5334 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
5335 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
5336 .access = PL2_RW, .accessfn = access_tda,
5337 .type = ARM_CP_NOP },
5dbdc434
PM
5338 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5339 * Channel but Linux may try to access this register. The 32-bit
5340 * alias is DBGDCCINT.
5341 */
5342 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
5343 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5344 .access = PL1_RW, .accessfn = access_tda,
5345 .type = ARM_CP_NOP },
50300698
PM
5346 REGINFO_SENTINEL
5347};
5348
5349static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
5350 /* 64 bit access versions of the (dummy) debug registers */
5351 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
5352 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5353 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
5354 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5355 REGINFO_SENTINEL
5356};
5357
60eed086
RH
5358/* Return the exception level to which exceptions should be taken
5359 * via SVEAccessTrap. If an exception should be routed through
5360 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5361 * take care of raising that exception.
5362 * C.f. the ARM pseudocode function CheckSVEEnabled.
5be5e8ed 5363 */
ced31551 5364int sve_exception_el(CPUARMState *env, int el)
5be5e8ed
RH
5365{
5366#ifndef CONFIG_USER_ONLY
2de7ace2 5367 if (el <= 1) {
60eed086
RH
5368 bool disabled = false;
5369
5370 /* The CPACR.ZEN controls traps to EL1:
5371 * 0, 2 : trap EL0 and EL1 accesses
5372 * 1 : trap only EL0 accesses
5373 * 3 : trap no accesses
5374 */
5375 if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
5376 disabled = true;
5377 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
2de7ace2 5378 disabled = el == 0;
5be5e8ed 5379 }
60eed086
RH
5380 if (disabled) {
5381 /* route_to_el2 */
5382 return (arm_feature(env, ARM_FEATURE_EL2)
7c208e0f 5383 && (arm_hcr_el2_eff(env) & HCR_TGE) ? 2 : 1);
5be5e8ed 5384 }
5be5e8ed 5385
60eed086
RH
5386 /* Check CPACR.FPEN. */
5387 if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
5388 disabled = true;
5389 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
2de7ace2 5390 disabled = el == 0;
5be5e8ed 5391 }
60eed086
RH
5392 if (disabled) {
5393 return 0;
5be5e8ed 5394 }
5be5e8ed
RH
5395 }
5396
60eed086
RH
5397 /* CPTR_EL2. Since TZ and TFP are positive,
5398 * they will be zero when EL2 is not present.
5399 */
2de7ace2 5400 if (el <= 2 && !arm_is_secure_below_el3(env)) {
60eed086
RH
5401 if (env->cp15.cptr_el[2] & CPTR_TZ) {
5402 return 2;
5403 }
5404 if (env->cp15.cptr_el[2] & CPTR_TFP) {
5405 return 0;
5406 }
5be5e8ed
RH
5407 }
5408
60eed086
RH
5409 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
5410 if (arm_feature(env, ARM_FEATURE_EL3)
5411 && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
5be5e8ed
RH
5412 return 3;
5413 }
5414#endif
5415 return 0;
5416}
5417
0df9142d
AJ
5418static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
5419{
6e553f2a 5420 uint32_t end_len;
0df9142d 5421
6e553f2a
RH
5422 end_len = start_len &= 0xf;
5423 if (!test_bit(start_len, cpu->sve_vq_map)) {
5424 end_len = find_last_bit(cpu->sve_vq_map, start_len);
5425 assert(end_len < start_len);
5426 }
5427 return end_len;
0df9142d
AJ
5428}
5429
0ab5953b
RH
5430/*
5431 * Given that SVE is enabled, return the vector length for EL.
5432 */
ced31551 5433uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
0ab5953b 5434{
2fc0cc0e 5435 ARMCPU *cpu = env_archcpu(env);
0ab5953b
RH
5436 uint32_t zcr_len = cpu->sve_max_vq - 1;
5437
5438 if (el <= 1) {
5439 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
5440 }
6a02a732 5441 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
0ab5953b
RH
5442 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
5443 }
6a02a732 5444 if (arm_feature(env, ARM_FEATURE_EL3)) {
0ab5953b
RH
5445 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
5446 }
0df9142d
AJ
5447
5448 return sve_zcr_get_valid_len(cpu, zcr_len);
0ab5953b
RH
5449}
5450
5be5e8ed
RH
5451static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5452 uint64_t value)
5453{
0ab5953b
RH
5454 int cur_el = arm_current_el(env);
5455 int old_len = sve_zcr_len_for_el(env, cur_el);
5456 int new_len;
5457
5be5e8ed 5458 /* Bits other than [3:0] are RAZ/WI. */
7b351d98 5459 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
5be5e8ed 5460 raw_write(env, ri, value & 0xf);
0ab5953b
RH
5461
5462 /*
5463 * Because we arrived here, we know both FP and SVE are enabled;
5464 * otherwise we would have trapped access to the ZCR_ELn register.
5465 */
5466 new_len = sve_zcr_len_for_el(env, cur_el);
5467 if (new_len < old_len) {
5468 aarch64_sve_narrow_vq(env, new_len + 1);
5469 }
5be5e8ed
RH
5470}
5471
5472static const ARMCPRegInfo zcr_el1_reginfo = {
5473 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
5474 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
11d7870b 5475 .access = PL1_RW, .type = ARM_CP_SVE,
5be5e8ed
RH
5476 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
5477 .writefn = zcr_write, .raw_writefn = raw_write
5478};
5479
5480static const ARMCPRegInfo zcr_el2_reginfo = {
5481 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
5482 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
11d7870b 5483 .access = PL2_RW, .type = ARM_CP_SVE,
5be5e8ed
RH
5484 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
5485 .writefn = zcr_write, .raw_writefn = raw_write
5486};
5487
5488static const ARMCPRegInfo zcr_no_el2_reginfo = {
5489 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
5490 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
11d7870b 5491 .access = PL2_RW, .type = ARM_CP_SVE,
5be5e8ed
RH
5492 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
5493};
5494
5495static const ARMCPRegInfo zcr_el3_reginfo = {
5496 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
5497 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
11d7870b 5498 .access = PL3_RW, .type = ARM_CP_SVE,
5be5e8ed
RH
5499 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
5500 .writefn = zcr_write, .raw_writefn = raw_write
5501};
5502
9ee98ce8
PM
5503void hw_watchpoint_update(ARMCPU *cpu, int n)
5504{
5505 CPUARMState *env = &cpu->env;
5506 vaddr len = 0;
5507 vaddr wvr = env->cp15.dbgwvr[n];
5508 uint64_t wcr = env->cp15.dbgwcr[n];
5509 int mask;
5510 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
5511
5512 if (env->cpu_watchpoint[n]) {
5513 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
5514 env->cpu_watchpoint[n] = NULL;
5515 }
5516
5517 if (!extract64(wcr, 0, 1)) {
5518 /* E bit clear : watchpoint disabled */
5519 return;
5520 }
5521
5522 switch (extract64(wcr, 3, 2)) {
5523 case 0:
5524 /* LSC 00 is reserved and must behave as if the wp is disabled */
5525 return;
5526 case 1:
5527 flags |= BP_MEM_READ;
5528 break;
5529 case 2:
5530 flags |= BP_MEM_WRITE;
5531 break;
5532 case 3:
5533 flags |= BP_MEM_ACCESS;
5534 break;
5535 }
5536
5537 /* Attempts to use both MASK and BAS fields simultaneously are
5538 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5539 * thus generating a watchpoint for every byte in the masked region.
5540 */
5541 mask = extract64(wcr, 24, 4);
5542 if (mask == 1 || mask == 2) {
5543 /* Reserved values of MASK; we must act as if the mask value was
5544 * some non-reserved value, or as if the watchpoint were disabled.
5545 * We choose the latter.
5546 */
5547 return;
5548 } else if (mask) {
5549 /* Watchpoint covers an aligned area up to 2GB in size */
5550 len = 1ULL << mask;
5551 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5552 * whether the watchpoint fires when the unmasked bits match; we opt
5553 * to generate the exceptions.
5554 */
5555 wvr &= ~(len - 1);
5556 } else {
5557 /* Watchpoint covers bytes defined by the byte address select bits */
5558 int bas = extract64(wcr, 5, 8);
5559 int basstart;
5560
5561 if (bas == 0) {
5562 /* This must act as if the watchpoint is disabled */
5563 return;
5564 }
5565
5566 if (extract64(wvr, 2, 1)) {
5567 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5568 * ignored, and BAS[3:0] define which bytes to watch.
5569 */
5570 bas &= 0xf;
5571 }
5572 /* The BAS bits are supposed to be programmed to indicate a contiguous
5573 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5574 * we fire for each byte in the word/doubleword addressed by the WVR.
5575 * We choose to ignore any non-zero bits after the first range of 1s.
5576 */
5577 basstart = ctz32(bas);
5578 len = cto32(bas >> basstart);
5579 wvr += basstart;
5580 }
5581
5582 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
5583 &env->cpu_watchpoint[n]);
5584}
5585
5586void hw_watchpoint_update_all(ARMCPU *cpu)
5587{
5588 int i;
5589 CPUARMState *env = &cpu->env;
5590
5591 /* Completely clear out existing QEMU watchpoints and our array, to
5592 * avoid possible stale entries following migration load.
5593 */
5594 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
5595 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
5596
5597 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
5598 hw_watchpoint_update(cpu, i);
5599 }
5600}
5601
5602static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5603 uint64_t value)
5604{
2fc0cc0e 5605 ARMCPU *cpu = env_archcpu(env);
9ee98ce8
PM
5606 int i = ri->crm;
5607
5608 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5609 * register reads and behaves as if values written are sign extended.
5610 * Bits [1:0] are RES0.
5611 */
5612 value = sextract64(value, 0, 49) & ~3ULL;
5613
5614 raw_write(env, ri, value);
5615 hw_watchpoint_update(cpu, i);
5616}
5617
5618static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5619 uint64_t value)
5620{
2fc0cc0e 5621 ARMCPU *cpu = env_archcpu(env);
9ee98ce8
PM
5622 int i = ri->crm;
5623
5624 raw_write(env, ri, value);
5625 hw_watchpoint_update(cpu, i);
5626}
5627
46747d15
PM
5628void hw_breakpoint_update(ARMCPU *cpu, int n)
5629{
5630 CPUARMState *env = &cpu->env;
5631 uint64_t bvr = env->cp15.dbgbvr[n];
5632 uint64_t bcr = env->cp15.dbgbcr[n];
5633 vaddr addr;
5634 int bt;
5635 int flags = BP_CPU;
5636
5637 if (env->cpu_breakpoint[n]) {
5638 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
5639 env->cpu_breakpoint[n] = NULL;
5640 }
5641
5642 if (!extract64(bcr, 0, 1)) {
5643 /* E bit clear : watchpoint disabled */
5644 return;
5645 }
5646
5647 bt = extract64(bcr, 20, 4);
5648
5649 switch (bt) {
5650 case 4: /* unlinked address mismatch (reserved if AArch64) */
5651 case 5: /* linked address mismatch (reserved if AArch64) */
5652 qemu_log_mask(LOG_UNIMP,
0221c8fd 5653 "arm: address mismatch breakpoint types not implemented\n");
46747d15
PM
5654 return;
5655 case 0: /* unlinked address match */
5656 case 1: /* linked address match */
5657 {
5658 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5659 * we behave as if the register was sign extended. Bits [1:0] are
5660 * RES0. The BAS field is used to allow setting breakpoints on 16
5661 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5662 * a bp will fire if the addresses covered by the bp and the addresses
5663 * covered by the insn overlap but the insn doesn't start at the
5664 * start of the bp address range. We choose to require the insn and
5665 * the bp to have the same address. The constraints on writing to
5666 * BAS enforced in dbgbcr_write mean we have only four cases:
5667 * 0b0000 => no breakpoint
5668 * 0b0011 => breakpoint on addr
5669 * 0b1100 => breakpoint on addr + 2
5670 * 0b1111 => breakpoint on addr
5671 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5672 */
5673 int bas = extract64(bcr, 5, 4);
5674 addr = sextract64(bvr, 0, 49) & ~3ULL;
5675 if (bas == 0) {
5676 return;
5677 }
5678 if (bas == 0xc) {
5679 addr += 2;
5680 }
5681 break;
5682 }
5683 case 2: /* unlinked context ID match */
5684 case 8: /* unlinked VMID match (reserved if no EL2) */
5685 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5686 qemu_log_mask(LOG_UNIMP,
0221c8fd 5687 "arm: unlinked context breakpoint types not implemented\n");
46747d15
PM
5688 return;
5689 case 9: /* linked VMID match (reserved if no EL2) */
5690 case 11: /* linked context ID and VMID match (reserved if no EL2) */
5691 case 3: /* linked context ID match */
5692 default:
5693 /* We must generate no events for Linked context matches (unless
5694 * they are linked to by some other bp/wp, which is handled in
5695 * updates for the linking bp/wp). We choose to also generate no events
5696 * for reserved values.
5697 */
5698 return;
5699 }
5700
5701 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
5702}
5703
5704void hw_breakpoint_update_all(ARMCPU *cpu)
5705{
5706 int i;
5707 CPUARMState *env = &cpu->env;
5708
5709 /* Completely clear out existing QEMU breakpoints and our array, to
5710 * avoid possible stale entries following migration load.
5711 */
5712 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
5713 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
5714
5715 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
5716 hw_breakpoint_update(cpu, i);
5717 }
5718}
5719
5720static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5721 uint64_t value)
5722{
2fc0cc0e 5723 ARMCPU *cpu = env_archcpu(env);
46747d15
PM
5724 int i = ri->crm;
5725
5726 raw_write(env, ri, value);
5727 hw_breakpoint_update(cpu, i);
5728}
5729
5730static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5731 uint64_t value)
5732{
2fc0cc0e 5733 ARMCPU *cpu = env_archcpu(env);
46747d15
PM
5734 int i = ri->crm;
5735
5736 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5737 * copy of BAS[0].
5738 */
5739 value = deposit64(value, 6, 1, extract64(value, 5, 1));
5740 value = deposit64(value, 8, 1, extract64(value, 7, 1));
5741
5742 raw_write(env, ri, value);
5743 hw_breakpoint_update(cpu, i);
5744}
5745
50300698 5746static void define_debug_regs(ARMCPU *cpu)
0b45451e 5747{
50300698
PM
5748 /* Define v7 and v8 architectural debug registers.
5749 * These are just dummy implementations for now.
0b45451e
PM
5750 */
5751 int i;
3ff6fc91 5752 int wrps, brps, ctx_cmps;
48eb3ae6
PM
5753 ARMCPRegInfo dbgdidr = {
5754 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
d6c8cf81
PM
5755 .access = PL0_R, .accessfn = access_tda,
5756 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
48eb3ae6
PM
5757 };
5758
3ff6fc91 5759 /* Note that all these register fields hold "number of Xs minus 1". */
48eb3ae6
PM
5760 brps = extract32(cpu->dbgdidr, 24, 4);
5761 wrps = extract32(cpu->dbgdidr, 28, 4);
3ff6fc91
PM
5762 ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
5763
5764 assert(ctx_cmps <= brps);
48eb3ae6
PM
5765
5766 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5767 * of the debug registers such as number of breakpoints;
5768 * check that if they both exist then they agree.
5769 */
5770 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
5771 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
5772 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
3ff6fc91 5773 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
48eb3ae6 5774 }
0b45451e 5775
48eb3ae6 5776 define_one_arm_cp_reg(cpu, &dbgdidr);
50300698
PM
5777 define_arm_cp_regs(cpu, debug_cp_reginfo);
5778
5779 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
5780 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
5781 }
5782
48eb3ae6 5783 for (i = 0; i < brps + 1; i++) {
0b45451e 5784 ARMCPRegInfo dbgregs[] = {
10aae104
PM
5785 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
5786 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
d6c8cf81 5787 .access = PL1_RW, .accessfn = access_tda,
46747d15
PM
5788 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
5789 .writefn = dbgbvr_write, .raw_writefn = raw_write
5790 },
10aae104
PM
5791 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
5792 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
d6c8cf81 5793 .access = PL1_RW, .accessfn = access_tda,
46747d15
PM
5794 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
5795 .writefn = dbgbcr_write, .raw_writefn = raw_write
5796 },
48eb3ae6
PM
5797 REGINFO_SENTINEL
5798 };
5799 define_arm_cp_regs(cpu, dbgregs);
5800 }
5801
5802 for (i = 0; i < wrps + 1; i++) {
5803 ARMCPRegInfo dbgregs[] = {
10aae104
PM
5804 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
5805 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
d6c8cf81 5806 .access = PL1_RW, .accessfn = access_tda,
9ee98ce8
PM
5807 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
5808 .writefn = dbgwvr_write, .raw_writefn = raw_write
5809 },
10aae104
PM
5810 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
5811 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
d6c8cf81 5812 .access = PL1_RW, .accessfn = access_tda,
9ee98ce8
PM
5813 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
5814 .writefn = dbgwcr_write, .raw_writefn = raw_write
5815 },
5816 REGINFO_SENTINEL
0b45451e
PM
5817 };
5818 define_arm_cp_regs(cpu, dbgregs);
5819 }
5820}
5821
96a8b92e
PM
5822/* We don't know until after realize whether there's a GICv3
5823 * attached, and that is what registers the gicv3 sysregs.
5824 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5825 * at runtime.
5826 */
5827static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
5828{
2fc0cc0e 5829 ARMCPU *cpu = env_archcpu(env);
96a8b92e
PM
5830 uint64_t pfr1 = cpu->id_pfr1;
5831
5832 if (env->gicv3state) {
5833 pfr1 |= 1 << 28;
5834 }
5835 return pfr1;
5836}
5837
5838static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
5839{
2fc0cc0e 5840 ARMCPU *cpu = env_archcpu(env);
47576b94 5841 uint64_t pfr0 = cpu->isar.id_aa64pfr0;
96a8b92e
PM
5842
5843 if (env->gicv3state) {
5844 pfr0 |= 1 << 24;
5845 }
5846 return pfr0;
5847}
5848
2d7137c1
RH
5849/* Shared logic between LORID and the rest of the LOR* registers.
5850 * Secure state has already been delt with.
5851 */
5852static CPAccessResult access_lor_ns(CPUARMState *env)
5853{
5854 int el = arm_current_el(env);
5855
5856 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
5857 return CP_ACCESS_TRAP_EL2;
5858 }
5859 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
5860 return CP_ACCESS_TRAP_EL3;
5861 }
5862 return CP_ACCESS_OK;
5863}
5864
5865static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
5866 bool isread)
5867{
5868 if (arm_is_secure_below_el3(env)) {
5869 /* Access ok in secure mode. */
5870 return CP_ACCESS_OK;
5871 }
5872 return access_lor_ns(env);
5873}
5874
5875static CPAccessResult access_lor_other(CPUARMState *env,
5876 const ARMCPRegInfo *ri, bool isread)
5877{
5878 if (arm_is_secure_below_el3(env)) {
5879 /* Access denied in secure mode. */
5880 return CP_ACCESS_TRAP;
5881 }
5882 return access_lor_ns(env);
5883}
5884
967aa94f
RH
5885#ifdef TARGET_AARCH64
5886static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
5887 bool isread)
5888{
5889 int el = arm_current_el(env);
5890
5891 if (el < 2 &&
5892 arm_feature(env, ARM_FEATURE_EL2) &&
5893 !(arm_hcr_el2_eff(env) & HCR_APK)) {
5894 return CP_ACCESS_TRAP_EL2;
5895 }
5896 if (el < 3 &&
5897 arm_feature(env, ARM_FEATURE_EL3) &&
5898 !(env->cp15.scr_el3 & SCR_APK)) {
5899 return CP_ACCESS_TRAP_EL3;
5900 }
5901 return CP_ACCESS_OK;
5902}
5903
5904static const ARMCPRegInfo pauth_reginfo[] = {
5905 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5906 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
5907 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 5908 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
967aa94f
RH
5909 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5910 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
5911 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 5912 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
967aa94f
RH
5913 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5914 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
5915 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 5916 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
967aa94f
RH
5917 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5918 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
5919 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 5920 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
967aa94f
RH
5921 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5922 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
5923 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 5924 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
967aa94f
RH
5925 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5926 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
5927 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 5928 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
967aa94f
RH
5929 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5930 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
5931 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 5932 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
967aa94f
RH
5933 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5934 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
5935 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 5936 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
967aa94f
RH
5937 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5938 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
5939 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 5940 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
967aa94f
RH
5941 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5942 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
5943 .access = PL1_RW, .accessfn = access_pauth,
108b3ba8 5944 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
967aa94f
RH
5945 REGINFO_SENTINEL
5946};
de390645
RH
5947
5948static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
5949{
5950 Error *err = NULL;
5951 uint64_t ret;
5952
5953 /* Success sets NZCV = 0000. */
5954 env->NF = env->CF = env->VF = 0, env->ZF = 1;
5955
5956 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
5957 /*
5958 * ??? Failed, for unknown reasons in the crypto subsystem.
5959 * The best we can do is log the reason and return the
5960 * timed-out indication to the guest. There is no reason
5961 * we know to expect this failure to be transitory, so the
5962 * guest may well hang retrying the operation.
5963 */
5964 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5965 ri->name, error_get_pretty(err));
5966 error_free(err);
5967
5968 env->ZF = 0; /* NZCF = 0100 */
5969 return 0;
5970 }
5971 return ret;
5972}
5973
5974/* We do not support re-seeding, so the two registers operate the same. */
5975static const ARMCPRegInfo rndr_reginfo[] = {
5976 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
5977 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5978 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
5979 .access = PL0_R, .readfn = rndr_readfn },
5980 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
5981 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5982 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
5983 .access = PL0_R, .readfn = rndr_readfn },
5984 REGINFO_SENTINEL
5985};
0d57b499
BM
5986
5987#ifndef CONFIG_USER_ONLY
5988static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
5989 uint64_t value)
5990{
5991 ARMCPU *cpu = env_archcpu(env);
5992 /* CTR_EL0 System register -> DminLine, bits [19:16] */
5993 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
5994 uint64_t vaddr_in = (uint64_t) value;
5995 uint64_t vaddr = vaddr_in & ~(dline_size - 1);
5996 void *haddr;
5997 int mem_idx = cpu_mmu_index(env, false);
5998
5999 /* This won't be crossing page boundaries */
6000 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
6001 if (haddr) {
6002
6003 ram_addr_t offset;
6004 MemoryRegion *mr;
6005
6006 /* RCU lock is already being held */
6007 mr = memory_region_from_host(haddr, &offset);
6008
6009 if (mr) {
6010 memory_region_do_writeback(mr, offset, dline_size);
6011 }
6012 }
6013}
6014
6015static const ARMCPRegInfo dcpop_reg[] = {
6016 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
6017 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
6018 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
6019 .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn },
6020 REGINFO_SENTINEL
6021};
6022
6023static const ARMCPRegInfo dcpodp_reg[] = {
6024 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
6025 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
6026 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
6027 .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn },
6028 REGINFO_SENTINEL
6029};
6030#endif /*CONFIG_USER_ONLY*/
6031
967aa94f
RH
6032#endif
6033
cb570bd3
RH
6034static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
6035 bool isread)
6036{
6037 int el = arm_current_el(env);
6038
6039 if (el == 0) {
6040 uint64_t sctlr = arm_sctlr(env, el);
6041 if (!(sctlr & SCTLR_EnRCTX)) {
6042 return CP_ACCESS_TRAP;
6043 }
6044 } else if (el == 1) {
6045 uint64_t hcr = arm_hcr_el2_eff(env);
6046 if (hcr & HCR_NV) {
6047 return CP_ACCESS_TRAP_EL2;
6048 }
6049 }
6050 return CP_ACCESS_OK;
6051}
6052
6053static const ARMCPRegInfo predinv_reginfo[] = {
6054 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
6055 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
6056 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6057 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
6058 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
6059 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6060 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
6061 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
6062 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6063 /*
6064 * Note the AArch32 opcodes have a different OPC1.
6065 */
6066 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
6067 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
6068 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6069 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
6070 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
6071 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6072 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
6073 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
6074 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
6075 REGINFO_SENTINEL
6076};
6077
6a4ef4e5
MZ
6078static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
6079 bool isread)
6080{
6081 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
6082 return CP_ACCESS_TRAP_EL2;
6083 }
6084
6085 return CP_ACCESS_OK;
6086}
6087
6088static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
6089 bool isread)
6090{
6091 if (arm_feature(env, ARM_FEATURE_V8)) {
6092 return access_aa64_tid3(env, ri, isread);
6093 }
6094
6095 return CP_ACCESS_OK;
6096}
6097
f96f3d5f
MZ
6098static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
6099 bool isread)
6100{
6101 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
6102 return CP_ACCESS_TRAP_EL2;
6103 }
6104
6105 return CP_ACCESS_OK;
6106}
6107
6108static const ARMCPRegInfo jazelle_regs[] = {
6109 { .name = "JIDR",
6110 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
6111 .access = PL1_R, .accessfn = access_jazelle,
6112 .type = ARM_CP_CONST, .resetvalue = 0 },
6113 { .name = "JOSCR",
6114 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
6115 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
6116 { .name = "JMCR",
6117 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
6118 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
6119 REGINFO_SENTINEL
6120};
6121
e2a1a461
RH
6122static const ARMCPRegInfo vhe_reginfo[] = {
6123 { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
6124 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
6125 .access = PL2_RW,
6126 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
ed30da8e
RH
6127 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
6128 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
6129 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
6130 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
e2a1a461
RH
6131 REGINFO_SENTINEL
6132};
6133
2ceb98c0
PM
6134void register_cp_regs_for_features(ARMCPU *cpu)
6135{
6136 /* Register all the coprocessor registers based on feature bits */
6137 CPUARMState *env = &cpu->env;
6138 if (arm_feature(env, ARM_FEATURE_M)) {
6139 /* M profile has no coprocessor registers */
6140 return;
6141 }
6142
e9aa6c21 6143 define_arm_cp_regs(cpu, cp_reginfo);
9449fdf6
PM
6144 if (!arm_feature(env, ARM_FEATURE_V8)) {
6145 /* Must go early as it is full of wildcards that may be
6146 * overridden by later definitions.
6147 */
6148 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
6149 }
6150
7d57f408 6151 if (arm_feature(env, ARM_FEATURE_V6)) {
8515a092
PM
6152 /* The ID registers all have impdef reset values */
6153 ARMCPRegInfo v6_idregs[] = {
0ff644a7
PM
6154 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
6155 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6156 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6157 .accessfn = access_aa32_tid3,
8515a092 6158 .resetvalue = cpu->id_pfr0 },
96a8b92e
PM
6159 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
6160 * the value of the GIC field until after we define these regs.
6161 */
0ff644a7
PM
6162 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
6163 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
96a8b92e 6164 .access = PL1_R, .type = ARM_CP_NO_RAW,
6a4ef4e5 6165 .accessfn = access_aa32_tid3,
96a8b92e
PM
6166 .readfn = id_pfr1_read,
6167 .writefn = arm_cp_write_ignore },
0ff644a7
PM
6168 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
6169 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
6170 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6171 .accessfn = access_aa32_tid3,
8515a092 6172 .resetvalue = cpu->id_dfr0 },
0ff644a7
PM
6173 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
6174 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
6175 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6176 .accessfn = access_aa32_tid3,
8515a092 6177 .resetvalue = cpu->id_afr0 },
0ff644a7
PM
6178 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
6179 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
6180 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6181 .accessfn = access_aa32_tid3,
8515a092 6182 .resetvalue = cpu->id_mmfr0 },
0ff644a7
PM
6183 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
6184 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
6185 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6186 .accessfn = access_aa32_tid3,
8515a092 6187 .resetvalue = cpu->id_mmfr1 },
0ff644a7
PM
6188 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
6189 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
6190 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6191 .accessfn = access_aa32_tid3,
8515a092 6192 .resetvalue = cpu->id_mmfr2 },
0ff644a7
PM
6193 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
6194 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
6195 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6196 .accessfn = access_aa32_tid3,
8515a092 6197 .resetvalue = cpu->id_mmfr3 },
0ff644a7
PM
6198 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
6199 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6200 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6201 .accessfn = access_aa32_tid3,
47576b94 6202 .resetvalue = cpu->isar.id_isar0 },
0ff644a7
PM
6203 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
6204 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
6205 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6206 .accessfn = access_aa32_tid3,
47576b94 6207 .resetvalue = cpu->isar.id_isar1 },
0ff644a7
PM
6208 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
6209 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6210 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6211 .accessfn = access_aa32_tid3,
47576b94 6212 .resetvalue = cpu->isar.id_isar2 },
0ff644a7
PM
6213 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
6214 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
6215 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6216 .accessfn = access_aa32_tid3,
47576b94 6217 .resetvalue = cpu->isar.id_isar3 },
0ff644a7
PM
6218 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
6219 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
6220 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6221 .accessfn = access_aa32_tid3,
47576b94 6222 .resetvalue = cpu->isar.id_isar4 },
0ff644a7
PM
6223 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
6224 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
6225 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6226 .accessfn = access_aa32_tid3,
47576b94 6227 .resetvalue = cpu->isar.id_isar5 },
e20d84c1
PM
6228 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
6229 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
6230 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6231 .accessfn = access_aa32_tid3,
e20d84c1 6232 .resetvalue = cpu->id_mmfr4 },
802abf40 6233 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
e20d84c1
PM
6234 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
6235 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6236 .accessfn = access_aa32_tid3,
47576b94 6237 .resetvalue = cpu->isar.id_isar6 },
8515a092
PM
6238 REGINFO_SENTINEL
6239 };
6240 define_arm_cp_regs(cpu, v6_idregs);
7d57f408
PM
6241 define_arm_cp_regs(cpu, v6_cp_reginfo);
6242 } else {
6243 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
6244 }
4d31c596
PM
6245 if (arm_feature(env, ARM_FEATURE_V6K)) {
6246 define_arm_cp_regs(cpu, v6k_cp_reginfo);
6247 }
5e5cf9e3 6248 if (arm_feature(env, ARM_FEATURE_V7MP) &&
452a0955 6249 !arm_feature(env, ARM_FEATURE_PMSA)) {
995939a6
PM
6250 define_arm_cp_regs(cpu, v7mp_cp_reginfo);
6251 }
327dd510
AL
6252 if (arm_feature(env, ARM_FEATURE_V7VE)) {
6253 define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
6254 }
e9aa6c21 6255 if (arm_feature(env, ARM_FEATURE_V7)) {
200ac0ef 6256 /* v7 performance monitor control register: same implementor
ac689a2e
AL
6257 * field as main ID register, and we implement four counters in
6258 * addition to the cycle count register.
200ac0ef 6259 */
ac689a2e 6260 unsigned int i, pmcrn = 4;
200ac0ef
PM
6261 ARMCPRegInfo pmcr = {
6262 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
8521466b 6263 .access = PL0_RW,
7a0e58fa 6264 .type = ARM_CP_IO | ARM_CP_ALIAS,
8521466b 6265 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
fcd25206
PM
6266 .accessfn = pmreg_access, .writefn = pmcr_write,
6267 .raw_writefn = raw_write,
200ac0ef 6268 };
8521466b
AF
6269 ARMCPRegInfo pmcr64 = {
6270 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
6271 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
6272 .access = PL0_RW, .accessfn = pmreg_access,
6273 .type = ARM_CP_IO,
6274 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
ac689a2e 6275 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT),
8521466b
AF
6276 .writefn = pmcr_write, .raw_writefn = raw_write,
6277 };
7c2cb42b 6278 define_one_arm_cp_reg(cpu, &pmcr);
8521466b 6279 define_one_arm_cp_reg(cpu, &pmcr64);
5ecdd3e4
AL
6280 for (i = 0; i < pmcrn; i++) {
6281 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6282 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6283 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6284 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6285 ARMCPRegInfo pmev_regs[] = {
62c7ec34 6286 { .name = pmevcntr_name, .cp = 15, .crn = 14,
5ecdd3e4
AL
6287 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6288 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6289 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6290 .accessfn = pmreg_access },
6291 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
62c7ec34 6292 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
5ecdd3e4
AL
6293 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6294 .type = ARM_CP_IO,
6295 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6296 .raw_readfn = pmevcntr_rawread,
6297 .raw_writefn = pmevcntr_rawwrite },
62c7ec34 6298 { .name = pmevtyper_name, .cp = 15, .crn = 14,
5ecdd3e4
AL
6299 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6300 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6301 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6302 .accessfn = pmreg_access },
6303 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
62c7ec34 6304 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
5ecdd3e4
AL
6305 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6306 .type = ARM_CP_IO,
6307 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6308 .raw_writefn = pmevtyper_rawwrite },
6309 REGINFO_SENTINEL
6310 };
6311 define_arm_cp_regs(cpu, pmev_regs);
6312 g_free(pmevcntr_name);
6313 g_free(pmevcntr_el0_name);
6314 g_free(pmevtyper_name);
6315 g_free(pmevtyper_el0_name);
6316 }
776d4e5c 6317 ARMCPRegInfo clidr = {
7da845b0
PM
6318 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
6319 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
630fcd4d
MZ
6320 .access = PL1_R, .type = ARM_CP_CONST,
6321 .accessfn = access_aa64_tid2,
6322 .resetvalue = cpu->clidr
776d4e5c 6323 };
776d4e5c 6324 define_one_arm_cp_reg(cpu, &clidr);
e9aa6c21 6325 define_arm_cp_regs(cpu, v7_cp_reginfo);
50300698 6326 define_debug_regs(cpu);
7d57f408
PM
6327 } else {
6328 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
e9aa6c21 6329 }
cad86737
AL
6330 if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
6331 FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) {
6332 ARMCPRegInfo v81_pmu_regs[] = {
6333 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6334 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6335 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6336 .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6337 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6338 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6339 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6340 .resetvalue = extract64(cpu->pmceid1, 32, 32) },
6341 REGINFO_SENTINEL
6342 };
6343 define_arm_cp_regs(cpu, v81_pmu_regs);
6344 }
b0d2b7d0 6345 if (arm_feature(env, ARM_FEATURE_V8)) {
e20d84c1
PM
6346 /* AArch64 ID registers, which all have impdef reset values.
6347 * Note that within the ID register ranges the unused slots
6348 * must all RAZ, not UNDEF; future architecture versions may
6349 * define new registers here.
6350 */
e60cef86 6351 ARMCPRegInfo v8_idregs[] = {
96a8b92e
PM
6352 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
6353 * know the right value for the GIC field until after we
6354 * define these regs.
6355 */
e60cef86
PM
6356 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
6357 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
96a8b92e 6358 .access = PL1_R, .type = ARM_CP_NO_RAW,
6a4ef4e5 6359 .accessfn = access_aa64_tid3,
96a8b92e
PM
6360 .readfn = id_aa64pfr0_read,
6361 .writefn = arm_cp_write_ignore },
e60cef86
PM
6362 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
6363 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
6364 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6365 .accessfn = access_aa64_tid3,
47576b94 6366 .resetvalue = cpu->isar.id_aa64pfr1},
e20d84c1
PM
6367 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6368 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
6369 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6370 .accessfn = access_aa64_tid3,
e20d84c1
PM
6371 .resetvalue = 0 },
6372 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6373 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
6374 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6375 .accessfn = access_aa64_tid3,
e20d84c1 6376 .resetvalue = 0 },
9516d772 6377 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
e20d84c1
PM
6378 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
6379 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6380 .accessfn = access_aa64_tid3,
9516d772 6381 /* At present, only SVEver == 0 is defined anyway. */
e20d84c1
PM
6382 .resetvalue = 0 },
6383 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6384 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
6385 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6386 .accessfn = access_aa64_tid3,
e20d84c1
PM
6387 .resetvalue = 0 },
6388 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6389 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
6390 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6391 .accessfn = access_aa64_tid3,
e20d84c1
PM
6392 .resetvalue = 0 },
6393 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6394 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
6395 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6396 .accessfn = access_aa64_tid3,
e20d84c1 6397 .resetvalue = 0 },
e60cef86
PM
6398 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
6399 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
6400 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6401 .accessfn = access_aa64_tid3,
d6f02ce3 6402 .resetvalue = cpu->id_aa64dfr0 },
e60cef86
PM
6403 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
6404 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
6405 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6406 .accessfn = access_aa64_tid3,
e60cef86 6407 .resetvalue = cpu->id_aa64dfr1 },
e20d84c1
PM
6408 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6409 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
6410 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6411 .accessfn = access_aa64_tid3,
e20d84c1
PM
6412 .resetvalue = 0 },
6413 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6414 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
6415 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6416 .accessfn = access_aa64_tid3,
e20d84c1 6417 .resetvalue = 0 },
e60cef86
PM
6418 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
6419 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
6420 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6421 .accessfn = access_aa64_tid3,
e60cef86
PM
6422 .resetvalue = cpu->id_aa64afr0 },
6423 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
6424 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
6425 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6426 .accessfn = access_aa64_tid3,
e60cef86 6427 .resetvalue = cpu->id_aa64afr1 },
e20d84c1
PM
6428 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6429 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
6430 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6431 .accessfn = access_aa64_tid3,
e20d84c1
PM
6432 .resetvalue = 0 },
6433 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6434 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
6435 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6436 .accessfn = access_aa64_tid3,
e20d84c1 6437 .resetvalue = 0 },
e60cef86
PM
6438 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
6439 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
6440 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6441 .accessfn = access_aa64_tid3,
47576b94 6442 .resetvalue = cpu->isar.id_aa64isar0 },
e60cef86
PM
6443 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
6444 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
6445 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6446 .accessfn = access_aa64_tid3,
47576b94 6447 .resetvalue = cpu->isar.id_aa64isar1 },
e20d84c1
PM
6448 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6449 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
6450 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6451 .accessfn = access_aa64_tid3,
e20d84c1
PM
6452 .resetvalue = 0 },
6453 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6454 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
6455 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6456 .accessfn = access_aa64_tid3,
e20d84c1
PM
6457 .resetvalue = 0 },
6458 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6459 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
6460 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6461 .accessfn = access_aa64_tid3,
e20d84c1
PM
6462 .resetvalue = 0 },
6463 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6464 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
6465 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6466 .accessfn = access_aa64_tid3,
e20d84c1
PM
6467 .resetvalue = 0 },
6468 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6469 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
6470 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6471 .accessfn = access_aa64_tid3,
e20d84c1
PM
6472 .resetvalue = 0 },
6473 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6474 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
6475 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6476 .accessfn = access_aa64_tid3,
e20d84c1 6477 .resetvalue = 0 },
e60cef86
PM
6478 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
6479 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6480 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6481 .accessfn = access_aa64_tid3,
3dc91ddb 6482 .resetvalue = cpu->isar.id_aa64mmfr0 },
e60cef86
PM
6483 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
6484 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
6485 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6486 .accessfn = access_aa64_tid3,
3dc91ddb 6487 .resetvalue = cpu->isar.id_aa64mmfr1 },
e20d84c1
PM
6488 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6489 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
6490 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6491 .accessfn = access_aa64_tid3,
e20d84c1
PM
6492 .resetvalue = 0 },
6493 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6494 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
6495 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6496 .accessfn = access_aa64_tid3,
e20d84c1
PM
6497 .resetvalue = 0 },
6498 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6499 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
6500 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6501 .accessfn = access_aa64_tid3,
e20d84c1
PM
6502 .resetvalue = 0 },
6503 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6504 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
6505 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6506 .accessfn = access_aa64_tid3,
e20d84c1
PM
6507 .resetvalue = 0 },
6508 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6509 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
6510 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6511 .accessfn = access_aa64_tid3,
e20d84c1
PM
6512 .resetvalue = 0 },
6513 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6514 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
6515 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6516 .accessfn = access_aa64_tid3,
e20d84c1 6517 .resetvalue = 0 },
a50c0f51
PM
6518 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
6519 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6520 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6521 .accessfn = access_aa64_tid3,
47576b94 6522 .resetvalue = cpu->isar.mvfr0 },
a50c0f51
PM
6523 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
6524 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6525 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6526 .accessfn = access_aa64_tid3,
47576b94 6527 .resetvalue = cpu->isar.mvfr1 },
a50c0f51
PM
6528 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
6529 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6530 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6531 .accessfn = access_aa64_tid3,
47576b94 6532 .resetvalue = cpu->isar.mvfr2 },
e20d84c1
PM
6533 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6534 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
6535 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6536 .accessfn = access_aa64_tid3,
e20d84c1
PM
6537 .resetvalue = 0 },
6538 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6539 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
6540 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6541 .accessfn = access_aa64_tid3,
e20d84c1
PM
6542 .resetvalue = 0 },
6543 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6544 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
6545 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6546 .accessfn = access_aa64_tid3,
e20d84c1
PM
6547 .resetvalue = 0 },
6548 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6549 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
6550 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6551 .accessfn = access_aa64_tid3,
e20d84c1
PM
6552 .resetvalue = 0 },
6553 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6554 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
6555 .access = PL1_R, .type = ARM_CP_CONST,
6a4ef4e5 6556 .accessfn = access_aa64_tid3,
e20d84c1 6557 .resetvalue = 0 },
4054bfa9
AF
6558 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
6559 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
6560 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
cad86737 6561 .resetvalue = extract64(cpu->pmceid0, 0, 32) },
4054bfa9
AF
6562 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
6563 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
6564 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6565 .resetvalue = cpu->pmceid0 },
6566 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
6567 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
6568 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
cad86737 6569 .resetvalue = extract64(cpu->pmceid1, 0, 32) },
4054bfa9
AF
6570 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
6571 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
6572 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6573 .resetvalue = cpu->pmceid1 },
e60cef86
PM
6574 REGINFO_SENTINEL
6575 };
6c5c0fec
AB
6576#ifdef CONFIG_USER_ONLY
6577 ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6578 { .name = "ID_AA64PFR0_EL1",
6579 .exported_bits = 0x000f000f00ff0000,
6580 .fixed_bits = 0x0000000000000011 },
6581 { .name = "ID_AA64PFR1_EL1",
6582 .exported_bits = 0x00000000000000f0 },
d040242e
AB
6583 { .name = "ID_AA64PFR*_EL1_RESERVED",
6584 .is_glob = true },
6c5c0fec
AB
6585 { .name = "ID_AA64ZFR0_EL1" },
6586 { .name = "ID_AA64MMFR0_EL1",
6587 .fixed_bits = 0x00000000ff000000 },
6588 { .name = "ID_AA64MMFR1_EL1" },
d040242e
AB
6589 { .name = "ID_AA64MMFR*_EL1_RESERVED",
6590 .is_glob = true },
6c5c0fec
AB
6591 { .name = "ID_AA64DFR0_EL1",
6592 .fixed_bits = 0x0000000000000006 },
6593 { .name = "ID_AA64DFR1_EL1" },
d040242e
AB
6594 { .name = "ID_AA64DFR*_EL1_RESERVED",
6595 .is_glob = true },
6596 { .name = "ID_AA64AFR*",
6597 .is_glob = true },
6c5c0fec
AB
6598 { .name = "ID_AA64ISAR0_EL1",
6599 .exported_bits = 0x00fffffff0fffff0 },
6600 { .name = "ID_AA64ISAR1_EL1",
6601 .exported_bits = 0x000000f0ffffffff },
d040242e
AB
6602 { .name = "ID_AA64ISAR*_EL1_RESERVED",
6603 .is_glob = true },
6c5c0fec
AB
6604 REGUSERINFO_SENTINEL
6605 };
6606 modify_arm_cp_regs(v8_idregs, v8_user_idregs);
6607#endif
be8e8128
GB
6608 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
6609 if (!arm_feature(env, ARM_FEATURE_EL3) &&
6610 !arm_feature(env, ARM_FEATURE_EL2)) {
6611 ARMCPRegInfo rvbar = {
6612 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
6613 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6614 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
6615 };
6616 define_one_arm_cp_reg(cpu, &rvbar);
6617 }
e60cef86 6618 define_arm_cp_regs(cpu, v8_idregs);
b0d2b7d0
PM
6619 define_arm_cp_regs(cpu, v8_cp_reginfo);
6620 }
3b685ba7 6621 if (arm_feature(env, ARM_FEATURE_EL2)) {
f0d574d6 6622 uint64_t vmpidr_def = mpidr_read_val(env);
731de9e6
EI
6623 ARMCPRegInfo vpidr_regs[] = {
6624 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
6625 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6626 .access = PL2_RW, .accessfn = access_el3_aa32ns,
36476562
PM
6627 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
6628 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
731de9e6
EI
6629 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
6630 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6631 .access = PL2_RW, .resetvalue = cpu->midr,
6632 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
f0d574d6
EI
6633 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
6634 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6635 .access = PL2_RW, .accessfn = access_el3_aa32ns,
36476562
PM
6636 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
6637 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
f0d574d6
EI
6638 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
6639 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6640 .access = PL2_RW,
6641 .resetvalue = vmpidr_def,
6642 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
731de9e6
EI
6643 REGINFO_SENTINEL
6644 };
6645 define_arm_cp_regs(cpu, vpidr_regs);
4771cd01 6646 define_arm_cp_regs(cpu, el2_cp_reginfo);
ce4afed8
PM
6647 if (arm_feature(env, ARM_FEATURE_V8)) {
6648 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
6649 }
be8e8128
GB
6650 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6651 if (!arm_feature(env, ARM_FEATURE_EL3)) {
6652 ARMCPRegInfo rvbar = {
6653 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
6654 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
6655 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
6656 };
6657 define_one_arm_cp_reg(cpu, &rvbar);
6658 }
d42e3c26
EI
6659 } else {
6660 /* If EL2 is missing but higher ELs are enabled, we need to
6661 * register the no_el2 reginfos.
6662 */
6663 if (arm_feature(env, ARM_FEATURE_EL3)) {
f0d574d6
EI
6664 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6665 * of MIDR_EL1 and MPIDR_EL1.
731de9e6
EI
6666 */
6667 ARMCPRegInfo vpidr_regs[] = {
6668 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6669 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6670 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
6671 .type = ARM_CP_CONST, .resetvalue = cpu->midr,
6672 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
f0d574d6
EI
6673 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6674 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6675 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
6676 .type = ARM_CP_NO_RAW,
6677 .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
731de9e6
EI
6678 REGINFO_SENTINEL
6679 };
6680 define_arm_cp_regs(cpu, vpidr_regs);
4771cd01 6681 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
ce4afed8
PM
6682 if (arm_feature(env, ARM_FEATURE_V8)) {
6683 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
6684 }
d42e3c26 6685 }
3b685ba7 6686 }
81547d66 6687 if (arm_feature(env, ARM_FEATURE_EL3)) {
0f1a3b24 6688 define_arm_cp_regs(cpu, el3_cp_reginfo);
e24fdd23
PM
6689 ARMCPRegInfo el3_regs[] = {
6690 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
6691 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
6692 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
6693 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
6694 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
6695 .access = PL3_RW,
6696 .raw_writefn = raw_write, .writefn = sctlr_write,
6697 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
6698 .resetvalue = cpu->reset_sctlr },
6699 REGINFO_SENTINEL
be8e8128 6700 };
e24fdd23
PM
6701
6702 define_arm_cp_regs(cpu, el3_regs);
81547d66 6703 }
2f027fc5
PM
6704 /* The behaviour of NSACR is sufficiently various that we don't
6705 * try to describe it in a single reginfo:
6706 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6707 * reads as constant 0xc00 from NS EL1 and NS EL2
6708 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6709 * if v7 without EL3, register doesn't exist
6710 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6711 */
6712 if (arm_feature(env, ARM_FEATURE_EL3)) {
6713 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6714 ARMCPRegInfo nsacr = {
6715 .name = "NSACR", .type = ARM_CP_CONST,
6716 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6717 .access = PL1_RW, .accessfn = nsacr_access,
6718 .resetvalue = 0xc00
6719 };
6720 define_one_arm_cp_reg(cpu, &nsacr);
6721 } else {
6722 ARMCPRegInfo nsacr = {
6723 .name = "NSACR",
6724 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6725 .access = PL3_RW | PL1_R,
6726 .resetvalue = 0,
6727 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
6728 };
6729 define_one_arm_cp_reg(cpu, &nsacr);
6730 }
6731 } else {
6732 if (arm_feature(env, ARM_FEATURE_V8)) {
6733 ARMCPRegInfo nsacr = {
6734 .name = "NSACR", .type = ARM_CP_CONST,
6735 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6736 .access = PL1_R,
6737 .resetvalue = 0xc00
6738 };
6739 define_one_arm_cp_reg(cpu, &nsacr);
6740 }
6741 }
6742
452a0955 6743 if (arm_feature(env, ARM_FEATURE_PMSA)) {
6cb0b013
PC
6744 if (arm_feature(env, ARM_FEATURE_V6)) {
6745 /* PMSAv6 not implemented */
6746 assert(arm_feature(env, ARM_FEATURE_V7));
6747 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6748 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
6749 } else {
6750 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
6751 }
18032bec 6752 } else {
8e5d75c9 6753 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
18032bec 6754 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
ab638a32
RH
6755 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
6756 if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) {
6757 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
6758 }
18032bec 6759 }
c326b979
PM
6760 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6761 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
6762 }
6cc7a3ae
PM
6763 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
6764 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
6765 }
4a501606
PM
6766 if (arm_feature(env, ARM_FEATURE_VAPA)) {
6767 define_arm_cp_regs(cpu, vapa_cp_reginfo);
6768 }
c4804214
PM
6769 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
6770 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
6771 }
6772 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
6773 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
6774 }
6775 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
6776 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
6777 }
18032bec
PM
6778 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
6779 define_arm_cp_regs(cpu, omap_cp_reginfo);
6780 }
34f90529
PM
6781 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
6782 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
6783 }
1047b9d7
PM
6784 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6785 define_arm_cp_regs(cpu, xscale_cp_reginfo);
6786 }
6787 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
6788 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
6789 }
7ac681cf
PM
6790 if (arm_feature(env, ARM_FEATURE_LPAE)) {
6791 define_arm_cp_regs(cpu, lpae_cp_reginfo);
6792 }
f96f3d5f
MZ
6793 if (cpu_isar_feature(jazelle, cpu)) {
6794 define_arm_cp_regs(cpu, jazelle_regs);
6795 }
7884849c
PM
6796 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6797 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6798 * be read-only (ie write causes UNDEF exception).
6799 */
6800 {
00a29f3d
PM
6801 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
6802 /* Pre-v8 MIDR space.
6803 * Note that the MIDR isn't a simple constant register because
7884849c
PM
6804 * of the TI925 behaviour where writes to another register can
6805 * cause the MIDR value to change.
97ce8d61
PC
6806 *
6807 * Unimplemented registers in the c15 0 0 0 space default to
6808 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6809 * and friends override accordingly.
7884849c
PM
6810 */
6811 { .name = "MIDR",
97ce8d61 6812 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
7884849c 6813 .access = PL1_R, .resetvalue = cpu->midr,
d4e6df63 6814 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
731de9e6 6815 .readfn = midr_read,
97ce8d61
PC
6816 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6817 .type = ARM_CP_OVERRIDE },
7884849c
PM
6818 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6819 { .name = "DUMMY",
6820 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
6821 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6822 { .name = "DUMMY",
6823 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
6824 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6825 { .name = "DUMMY",
6826 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
6827 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6828 { .name = "DUMMY",
6829 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
6830 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6831 { .name = "DUMMY",
6832 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
6833 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6834 REGINFO_SENTINEL
6835 };
00a29f3d 6836 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
00a29f3d
PM
6837 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
6838 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
731de9e6
EI
6839 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
6840 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6841 .readfn = midr_read },
ac00c79f
SF
6842 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6843 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6844 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6845 .access = PL1_R, .resetvalue = cpu->midr },
6846 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6847 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
6848 .access = PL1_R, .resetvalue = cpu->midr },
00a29f3d
PM
6849 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
6850 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
93fbc983
MZ
6851 .access = PL1_R,
6852 .accessfn = access_aa64_tid1,
6853 .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
00a29f3d
PM
6854 REGINFO_SENTINEL
6855 };
6856 ARMCPRegInfo id_cp_reginfo[] = {
6857 /* These are common to v8 and pre-v8 */
6858 { .name = "CTR",
6859 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
630fcd4d
MZ
6860 .access = PL1_R, .accessfn = ctr_el0_access,
6861 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
00a29f3d
PM
6862 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
6863 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
6864 .access = PL0_R, .accessfn = ctr_el0_access,
6865 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6866 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6867 { .name = "TCMTR",
6868 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
93fbc983
MZ
6869 .access = PL1_R,
6870 .accessfn = access_aa32_tid1,
6871 .type = ARM_CP_CONST, .resetvalue = 0 },
00a29f3d
PM
6872 REGINFO_SENTINEL
6873 };
8085ce63
PC
6874 /* TLBTR is specific to VMSA */
6875 ARMCPRegInfo id_tlbtr_reginfo = {
6876 .name = "TLBTR",
6877 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
93fbc983
MZ
6878 .access = PL1_R,
6879 .accessfn = access_aa32_tid1,
6880 .type = ARM_CP_CONST, .resetvalue = 0,
8085ce63 6881 };
3281af81
PC
6882 /* MPUIR is specific to PMSA V6+ */
6883 ARMCPRegInfo id_mpuir_reginfo = {
6884 .name = "MPUIR",
6885 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6886 .access = PL1_R, .type = ARM_CP_CONST,
6887 .resetvalue = cpu->pmsav7_dregion << 8
6888 };
7884849c
PM
6889 ARMCPRegInfo crn0_wi_reginfo = {
6890 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
6891 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
6892 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
6893 };
6c5c0fec
AB
6894#ifdef CONFIG_USER_ONLY
6895 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
6896 { .name = "MIDR_EL1",
6897 .exported_bits = 0x00000000ffffffff },
6898 { .name = "REVIDR_EL1" },
6899 REGUSERINFO_SENTINEL
6900 };
6901 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
6902#endif
7884849c
PM
6903 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
6904 arm_feature(env, ARM_FEATURE_STRONGARM)) {
6905 ARMCPRegInfo *r;
6906 /* Register the blanket "writes ignored" value first to cover the
a703eda1
PC
6907 * whole space. Then update the specific ID registers to allow write
6908 * access, so that they ignore writes rather than causing them to
6909 * UNDEF.
7884849c
PM
6910 */
6911 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
00a29f3d
PM
6912 for (r = id_pre_v8_midr_cp_reginfo;
6913 r->type != ARM_CP_SENTINEL; r++) {
6914 r->access = PL1_RW;
6915 }
7884849c
PM
6916 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
6917 r->access = PL1_RW;
7884849c 6918 }
10006112 6919 id_mpuir_reginfo.access = PL1_RW;
3281af81 6920 id_tlbtr_reginfo.access = PL1_RW;
7884849c 6921 }
00a29f3d
PM
6922 if (arm_feature(env, ARM_FEATURE_V8)) {
6923 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
6924 } else {
6925 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
6926 }
a703eda1 6927 define_arm_cp_regs(cpu, id_cp_reginfo);
452a0955 6928 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
8085ce63 6929 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
3281af81
PC
6930 } else if (arm_feature(env, ARM_FEATURE_V7)) {
6931 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
8085ce63 6932 }
7884849c
PM
6933 }
6934
97ce8d61 6935 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
52264166
AB
6936 ARMCPRegInfo mpidr_cp_reginfo[] = {
6937 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
6938 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
6939 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
6940 REGINFO_SENTINEL
6941 };
6942#ifdef CONFIG_USER_ONLY
6943 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
6944 { .name = "MPIDR_EL1",
6945 .fixed_bits = 0x0000000080000000 },
6946 REGUSERINFO_SENTINEL
6947 };
6948 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
6949#endif
97ce8d61
PC
6950 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
6951 }
6952
2771db27 6953 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
834a6c69
PM
6954 ARMCPRegInfo auxcr_reginfo[] = {
6955 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
6956 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
6957 .access = PL1_RW, .type = ARM_CP_CONST,
6958 .resetvalue = cpu->reset_auxcr },
6959 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
6960 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
6961 .access = PL2_RW, .type = ARM_CP_CONST,
6962 .resetvalue = 0 },
6963 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
6964 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
6965 .access = PL3_RW, .type = ARM_CP_CONST,
6966 .resetvalue = 0 },
6967 REGINFO_SENTINEL
2771db27 6968 };
834a6c69 6969 define_arm_cp_regs(cpu, auxcr_reginfo);
0e0456ab
PM
6970 if (arm_feature(env, ARM_FEATURE_V8)) {
6971 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6972 ARMCPRegInfo hactlr2_reginfo = {
6973 .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
6974 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
6975 .access = PL2_RW, .type = ARM_CP_CONST,
6976 .resetvalue = 0
6977 };
6978 define_one_arm_cp_reg(cpu, &hactlr2_reginfo);
6979 }
2771db27
PM
6980 }
6981
d8ba780b 6982 if (arm_feature(env, ARM_FEATURE_CBAR)) {
d56974af
LM
6983 /*
6984 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
6985 * There are two flavours:
6986 * (1) older 32-bit only cores have a simple 32-bit CBAR
6987 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
6988 * 32-bit register visible to AArch32 at a different encoding
6989 * to the "flavour 1" register and with the bits rearranged to
6990 * be able to squash a 64-bit address into the 32-bit view.
6991 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
6992 * in future if we support AArch32-only configs of some of the
6993 * AArch64 cores we might need to add a specific feature flag
6994 * to indicate cores with "flavour 2" CBAR.
6995 */
f318cec6
PM
6996 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6997 /* 32 bit view is [31:18] 0...0 [43:32]. */
6998 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
6999 | extract64(cpu->reset_cbar, 32, 12);
7000 ARMCPRegInfo cbar_reginfo[] = {
7001 { .name = "CBAR",
7002 .type = ARM_CP_CONST,
d56974af
LM
7003 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
7004 .access = PL1_R, .resetvalue = cbar32 },
f318cec6
PM
7005 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
7006 .type = ARM_CP_CONST,
7007 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
d56974af 7008 .access = PL1_R, .resetvalue = cpu->reset_cbar },
f318cec6
PM
7009 REGINFO_SENTINEL
7010 };
7011 /* We don't implement a r/w 64 bit CBAR currently */
7012 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
7013 define_arm_cp_regs(cpu, cbar_reginfo);
7014 } else {
7015 ARMCPRegInfo cbar = {
7016 .name = "CBAR",
7017 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
7018 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
7019 .fieldoffset = offsetof(CPUARMState,
7020 cp15.c15_config_base_address)
7021 };
7022 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
7023 cbar.access = PL1_R;
7024 cbar.fieldoffset = 0;
7025 cbar.type = ARM_CP_CONST;
7026 }
7027 define_one_arm_cp_reg(cpu, &cbar);
7028 }
d8ba780b
PC
7029 }
7030
91db4642
CLG
7031 if (arm_feature(env, ARM_FEATURE_VBAR)) {
7032 ARMCPRegInfo vbar_cp_reginfo[] = {
7033 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
7034 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
7035 .access = PL1_RW, .writefn = vbar_write,
7036 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
7037 offsetof(CPUARMState, cp15.vbar_ns) },
7038 .resetvalue = 0 },
7039 REGINFO_SENTINEL
7040 };
7041 define_arm_cp_regs(cpu, vbar_cp_reginfo);
7042 }
7043
2771db27
PM
7044 /* Generic registers whose values depend on the implementation */
7045 {
7046 ARMCPRegInfo sctlr = {
5ebafdf3 7047 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
137feaa9
FA
7048 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
7049 .access = PL1_RW,
7050 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
7051 offsetof(CPUARMState, cp15.sctlr_ns) },
d4e6df63
PM
7052 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
7053 .raw_writefn = raw_write,
2771db27
PM
7054 };
7055 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7056 /* Normally we would always end the TB on an SCTLR write, but Linux
7057 * arch/arm/mach-pxa/sleep.S expects two instructions following
7058 * an MMU enable to execute from cache. Imitate this behaviour.
7059 */
7060 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
7061 }
7062 define_one_arm_cp_reg(cpu, &sctlr);
7063 }
5be5e8ed 7064
2d7137c1
RH
7065 if (cpu_isar_feature(aa64_lor, cpu)) {
7066 /*
7067 * A trivial implementation of ARMv8.1-LOR leaves all of these
7068 * registers fixed at 0, which indicates that there are zero
7069 * supported Limited Ordering regions.
7070 */
7071 static const ARMCPRegInfo lor_reginfo[] = {
7072 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
7073 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
7074 .access = PL1_RW, .accessfn = access_lor_other,
7075 .type = ARM_CP_CONST, .resetvalue = 0 },
7076 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
7077 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
7078 .access = PL1_RW, .accessfn = access_lor_other,
7079 .type = ARM_CP_CONST, .resetvalue = 0 },
7080 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
7081 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
7082 .access = PL1_RW, .accessfn = access_lor_other,
7083 .type = ARM_CP_CONST, .resetvalue = 0 },
7084 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
7085 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
7086 .access = PL1_RW, .accessfn = access_lor_other,
7087 .type = ARM_CP_CONST, .resetvalue = 0 },
7088 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
7089 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
7090 .access = PL1_R, .accessfn = access_lorid,
7091 .type = ARM_CP_CONST, .resetvalue = 0 },
7092 REGINFO_SENTINEL
7093 };
7094 define_arm_cp_regs(cpu, lor_reginfo);
7095 }
7096
e2a1a461
RH
7097 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
7098 define_arm_cp_regs(cpu, vhe_reginfo);
7099 }
7100
cd208a1c 7101 if (cpu_isar_feature(aa64_sve, cpu)) {
5be5e8ed
RH
7102 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
7103 if (arm_feature(env, ARM_FEATURE_EL2)) {
7104 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
7105 } else {
7106 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
7107 }
7108 if (arm_feature(env, ARM_FEATURE_EL3)) {
7109 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
7110 }
7111 }
967aa94f
RH
7112
7113#ifdef TARGET_AARCH64
7114 if (cpu_isar_feature(aa64_pauth, cpu)) {
7115 define_arm_cp_regs(cpu, pauth_reginfo);
7116 }
de390645
RH
7117 if (cpu_isar_feature(aa64_rndr, cpu)) {
7118 define_arm_cp_regs(cpu, rndr_reginfo);
7119 }
0d57b499
BM
7120#ifndef CONFIG_USER_ONLY
7121 /* Data Cache clean instructions up to PoP */
7122 if (cpu_isar_feature(aa64_dcpop, cpu)) {
7123 define_one_arm_cp_reg(cpu, dcpop_reg);
7124
7125 if (cpu_isar_feature(aa64_dcpodp, cpu)) {
7126 define_one_arm_cp_reg(cpu, dcpodp_reg);
7127 }
7128 }
7129#endif /*CONFIG_USER_ONLY*/
967aa94f 7130#endif
cb570bd3
RH
7131
7132 /*
7133 * While all v8.0 cpus support aarch64, QEMU does have configurations
7134 * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
7135 * which will set ID_ISAR6.
7136 */
7137 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
7138 ? cpu_isar_feature(aa64_predinv, cpu)
7139 : cpu_isar_feature(aa32_predinv, cpu)) {
7140 define_arm_cp_regs(cpu, predinv_reginfo);
7141 }
2ceb98c0
PM
7142}
7143
14969266
AF
7144void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
7145{
22169d41 7146 CPUState *cs = CPU(cpu);
14969266
AF
7147 CPUARMState *env = &cpu->env;
7148
6a669427
PM
7149 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
7150 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
7151 aarch64_fpu_gdb_set_reg,
7152 34, "aarch64-fpu.xml", 0);
7153 } else if (arm_feature(env, ARM_FEATURE_NEON)) {
22169d41 7154 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
56aebc89
PB
7155 51, "arm-neon.xml", 0);
7156 } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
22169d41 7157 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
56aebc89
PB
7158 35, "arm-vfp3.xml", 0);
7159 } else if (arm_feature(env, ARM_FEATURE_VFP)) {
22169d41 7160 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
56aebc89
PB
7161 19, "arm-vfp.xml", 0);
7162 }
200bf5b7
AB
7163 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
7164 arm_gen_dynamic_xml(cs),
7165 "system-registers.xml", 0);
40f137e1
PB
7166}
7167
777dc784
PM
7168/* Sort alphabetically by type name, except for "any". */
7169static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5adb4839 7170{
777dc784
PM
7171 ObjectClass *class_a = (ObjectClass *)a;
7172 ObjectClass *class_b = (ObjectClass *)b;
7173 const char *name_a, *name_b;
5adb4839 7174
777dc784
PM
7175 name_a = object_class_get_name(class_a);
7176 name_b = object_class_get_name(class_b);
51492fd1 7177 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
777dc784 7178 return 1;
51492fd1 7179 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
777dc784
PM
7180 return -1;
7181 } else {
7182 return strcmp(name_a, name_b);
5adb4839
PB
7183 }
7184}
7185
777dc784 7186static void arm_cpu_list_entry(gpointer data, gpointer user_data)
40f137e1 7187{
777dc784 7188 ObjectClass *oc = data;
51492fd1
AF
7189 const char *typename;
7190 char *name;
3371d272 7191
51492fd1
AF
7192 typename = object_class_get_name(oc);
7193 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
0442428a 7194 qemu_printf(" %s\n", name);
51492fd1 7195 g_free(name);
777dc784
PM
7196}
7197
0442428a 7198void arm_cpu_list(void)
777dc784 7199{
777dc784
PM
7200 GSList *list;
7201
7202 list = object_class_get_list(TYPE_ARM_CPU, false);
7203 list = g_slist_sort(list, arm_cpu_list_compare);
0442428a
MA
7204 qemu_printf("Available CPUs:\n");
7205 g_slist_foreach(list, arm_cpu_list_entry, NULL);
777dc784 7206 g_slist_free(list);
40f137e1
PB
7207}
7208
78027bb6
CR
7209static void arm_cpu_add_definition(gpointer data, gpointer user_data)
7210{
7211 ObjectClass *oc = data;
7212 CpuDefinitionInfoList **cpu_list = user_data;
7213 CpuDefinitionInfoList *entry;
7214 CpuDefinitionInfo *info;
7215 const char *typename;
7216
7217 typename = object_class_get_name(oc);
7218 info = g_malloc0(sizeof(*info));
7219 info->name = g_strndup(typename,
7220 strlen(typename) - strlen("-" TYPE_ARM_CPU));
8ed877b7 7221 info->q_typename = g_strdup(typename);
78027bb6
CR
7222
7223 entry = g_malloc0(sizeof(*entry));
7224 entry->value = info;
7225 entry->next = *cpu_list;
7226 *cpu_list = entry;
7227}
7228
25a9d6ca 7229CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
78027bb6
CR
7230{
7231 CpuDefinitionInfoList *cpu_list = NULL;
7232 GSList *list;
7233
7234 list = object_class_get_list(TYPE_ARM_CPU, false);
7235 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
7236 g_slist_free(list);
7237
7238 return cpu_list;
7239}
7240
6e6efd61 7241static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
51a79b03 7242 void *opaque, int state, int secstate,
9c513e78
AB
7243 int crm, int opc1, int opc2,
7244 const char *name)
6e6efd61
PM
7245{
7246 /* Private utility function for define_one_arm_cp_reg_with_opaque():
7247 * add a single reginfo struct to the hash table.
7248 */
7249 uint32_t *key = g_new(uint32_t, 1);
7250 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
7251 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
3f3c82a5
FA
7252 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
7253
9c513e78 7254 r2->name = g_strdup(name);
3f3c82a5
FA
7255 /* Reset the secure state to the specific incoming state. This is
7256 * necessary as the register may have been defined with both states.
7257 */
7258 r2->secure = secstate;
7259
7260 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
7261 /* Register is banked (using both entries in array).
7262 * Overwriting fieldoffset as the array is only used to define
7263 * banked registers but later only fieldoffset is used.
f5a0a5a5 7264 */
3f3c82a5
FA
7265 r2->fieldoffset = r->bank_fieldoffsets[ns];
7266 }
7267
7268 if (state == ARM_CP_STATE_AA32) {
7269 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
7270 /* If the register is banked then we don't need to migrate or
7271 * reset the 32-bit instance in certain cases:
7272 *
7273 * 1) If the register has both 32-bit and 64-bit instances then we
7274 * can count on the 64-bit instance taking care of the
7275 * non-secure bank.
7276 * 2) If ARMv8 is enabled then we can count on a 64-bit version
7277 * taking care of the secure bank. This requires that separate
7278 * 32 and 64-bit definitions are provided.
7279 */
7280 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
7281 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
7a0e58fa 7282 r2->type |= ARM_CP_ALIAS;
3f3c82a5
FA
7283 }
7284 } else if ((secstate != r->secure) && !ns) {
7285 /* The register is not banked so we only want to allow migration of
7286 * the non-secure instance.
7287 */
7a0e58fa 7288 r2->type |= ARM_CP_ALIAS;
58a1d8ce 7289 }
3f3c82a5
FA
7290
7291 if (r->state == ARM_CP_STATE_BOTH) {
7292 /* We assume it is a cp15 register if the .cp field is left unset.
7293 */
7294 if (r2->cp == 0) {
7295 r2->cp = 15;
7296 }
7297
f5a0a5a5 7298#ifdef HOST_WORDS_BIGENDIAN
3f3c82a5
FA
7299 if (r2->fieldoffset) {
7300 r2->fieldoffset += sizeof(uint32_t);
7301 }
f5a0a5a5 7302#endif
3f3c82a5 7303 }
f5a0a5a5
PM
7304 }
7305 if (state == ARM_CP_STATE_AA64) {
7306 /* To allow abbreviation of ARMCPRegInfo
7307 * definitions, we treat cp == 0 as equivalent to
7308 * the value for "standard guest-visible sysreg".
58a1d8ce
PM
7309 * STATE_BOTH definitions are also always "standard
7310 * sysreg" in their AArch64 view (the .cp value may
7311 * be non-zero for the benefit of the AArch32 view).
f5a0a5a5 7312 */
58a1d8ce 7313 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
f5a0a5a5
PM
7314 r2->cp = CP_REG_ARM64_SYSREG_CP;
7315 }
7316 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
7317 r2->opc0, opc1, opc2);
7318 } else {
51a79b03 7319 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
f5a0a5a5 7320 }
6e6efd61
PM
7321 if (opaque) {
7322 r2->opaque = opaque;
7323 }
67ed771d
PM
7324 /* reginfo passed to helpers is correct for the actual access,
7325 * and is never ARM_CP_STATE_BOTH:
7326 */
7327 r2->state = state;
6e6efd61
PM
7328 /* Make sure reginfo passed to helpers for wildcarded regs
7329 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
7330 */
7331 r2->crm = crm;
7332 r2->opc1 = opc1;
7333 r2->opc2 = opc2;
7334 /* By convention, for wildcarded registers only the first
7335 * entry is used for migration; the others are marked as
7a0e58fa 7336 * ALIAS so we don't try to transfer the register
6e6efd61 7337 * multiple times. Special registers (ie NOP/WFI) are
7a0e58fa 7338 * never migratable and not even raw-accessible.
6e6efd61 7339 */
7a0e58fa
PM
7340 if ((r->type & ARM_CP_SPECIAL)) {
7341 r2->type |= ARM_CP_NO_RAW;
7342 }
7343 if (((r->crm == CP_ANY) && crm != 0) ||
6e6efd61
PM
7344 ((r->opc1 == CP_ANY) && opc1 != 0) ||
7345 ((r->opc2 == CP_ANY) && opc2 != 0)) {
1f163787 7346 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
6e6efd61
PM
7347 }
7348
375421cc
PM
7349 /* Check that raw accesses are either forbidden or handled. Note that
7350 * we can't assert this earlier because the setup of fieldoffset for
7351 * banked registers has to be done first.
7352 */
7353 if (!(r2->type & ARM_CP_NO_RAW)) {
7354 assert(!raw_accessors_invalid(r2));
7355 }
7356
6e6efd61
PM
7357 /* Overriding of an existing definition must be explicitly
7358 * requested.
7359 */
7360 if (!(r->type & ARM_CP_OVERRIDE)) {
7361 ARMCPRegInfo *oldreg;
7362 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
7363 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
7364 fprintf(stderr, "Register redefined: cp=%d %d bit "
7365 "crn=%d crm=%d opc1=%d opc2=%d, "
7366 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
7367 r2->crn, r2->crm, r2->opc1, r2->opc2,
7368 oldreg->name, r2->name);
7369 g_assert_not_reached();
7370 }
7371 }
7372 g_hash_table_insert(cpu->cp_regs, key, r2);
7373}
7374
7375
4b6a83fb
PM
7376void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
7377 const ARMCPRegInfo *r, void *opaque)
7378{
7379 /* Define implementations of coprocessor registers.
7380 * We store these in a hashtable because typically
7381 * there are less than 150 registers in a space which
7382 * is 16*16*16*8*8 = 262144 in size.
7383 * Wildcarding is supported for the crm, opc1 and opc2 fields.
7384 * If a register is defined twice then the second definition is
7385 * used, so this can be used to define some generic registers and
7386 * then override them with implementation specific variations.
7387 * At least one of the original and the second definition should
7388 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
7389 * against accidental use.
f5a0a5a5
PM
7390 *
7391 * The state field defines whether the register is to be
7392 * visible in the AArch32 or AArch64 execution state. If the
7393 * state is set to ARM_CP_STATE_BOTH then we synthesise a
7394 * reginfo structure for the AArch32 view, which sees the lower
7395 * 32 bits of the 64 bit register.
7396 *
7397 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
7398 * be wildcarded. AArch64 registers are always considered to be 64
7399 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
7400 * the register, if any.
4b6a83fb 7401 */
f5a0a5a5 7402 int crm, opc1, opc2, state;
4b6a83fb
PM
7403 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
7404 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
7405 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
7406 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
7407 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
7408 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
7409 /* 64 bit registers have only CRm and Opc1 fields */
7410 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
f5a0a5a5
PM
7411 /* op0 only exists in the AArch64 encodings */
7412 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
7413 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
7414 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
7415 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
7416 * encodes a minimum access level for the register. We roll this
7417 * runtime check into our general permission check code, so check
7418 * here that the reginfo's specified permissions are strict enough
7419 * to encompass the generic architectural permission check.
7420 */
7421 if (r->state != ARM_CP_STATE_AA32) {
7422 int mask = 0;
7423 switch (r->opc1) {
b5bd7440
AB
7424 case 0:
7425 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
7426 mask = PL0U_R | PL1_RW;
7427 break;
7428 case 1: case 2:
f5a0a5a5
PM
7429 /* min_EL EL1 */
7430 mask = PL1_RW;
7431 break;
7432 case 3:
7433 /* min_EL EL0 */
7434 mask = PL0_RW;
7435 break;
7436 case 4:
7437 /* min_EL EL2 */
7438 mask = PL2_RW;
7439 break;
7440 case 5:
7441 /* unallocated encoding, so not possible */
7442 assert(false);
7443 break;
7444 case 6:
7445 /* min_EL EL3 */
7446 mask = PL3_RW;
7447 break;
7448 case 7:
7449 /* min_EL EL1, secure mode only (we don't check the latter) */
7450 mask = PL1_RW;
7451 break;
7452 default:
7453 /* broken reginfo with out-of-range opc1 */
7454 assert(false);
7455 break;
7456 }
7457 /* assert our permissions are not too lax (stricter is fine) */
7458 assert((r->access & ~mask) == 0);
7459 }
7460
4b6a83fb
PM
7461 /* Check that the register definition has enough info to handle
7462 * reads and writes if they are permitted.
7463 */
7464 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
7465 if (r->access & PL3_R) {
3f3c82a5
FA
7466 assert((r->fieldoffset ||
7467 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7468 r->readfn);
4b6a83fb
PM
7469 }
7470 if (r->access & PL3_W) {
3f3c82a5
FA
7471 assert((r->fieldoffset ||
7472 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7473 r->writefn);
4b6a83fb
PM
7474 }
7475 }
7476 /* Bad type field probably means missing sentinel at end of reg list */
7477 assert(cptype_valid(r->type));
7478 for (crm = crmmin; crm <= crmmax; crm++) {
7479 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
7480 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
f5a0a5a5
PM
7481 for (state = ARM_CP_STATE_AA32;
7482 state <= ARM_CP_STATE_AA64; state++) {
7483 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
7484 continue;
7485 }
3f3c82a5
FA
7486 if (state == ARM_CP_STATE_AA32) {
7487 /* Under AArch32 CP registers can be common
7488 * (same for secure and non-secure world) or banked.
7489 */
9c513e78
AB
7490 char *name;
7491
3f3c82a5
FA
7492 switch (r->secure) {
7493 case ARM_CP_SECSTATE_S:
7494 case ARM_CP_SECSTATE_NS:
7495 add_cpreg_to_hashtable(cpu, r, opaque, state,
9c513e78
AB
7496 r->secure, crm, opc1, opc2,
7497 r->name);
3f3c82a5
FA
7498 break;
7499 default:
9c513e78 7500 name = g_strdup_printf("%s_S", r->name);
3f3c82a5
FA
7501 add_cpreg_to_hashtable(cpu, r, opaque, state,
7502 ARM_CP_SECSTATE_S,
9c513e78
AB
7503 crm, opc1, opc2, name);
7504 g_free(name);
3f3c82a5
FA
7505 add_cpreg_to_hashtable(cpu, r, opaque, state,
7506 ARM_CP_SECSTATE_NS,
9c513e78 7507 crm, opc1, opc2, r->name);
3f3c82a5
FA
7508 break;
7509 }
7510 } else {
7511 /* AArch64 registers get mapped to non-secure instance
7512 * of AArch32 */
7513 add_cpreg_to_hashtable(cpu, r, opaque, state,
7514 ARM_CP_SECSTATE_NS,
9c513e78 7515 crm, opc1, opc2, r->name);
3f3c82a5 7516 }
f5a0a5a5 7517 }
4b6a83fb
PM
7518 }
7519 }
7520 }
7521}
7522
7523void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
7524 const ARMCPRegInfo *regs, void *opaque)
7525{
7526 /* Define a whole list of registers */
7527 const ARMCPRegInfo *r;
7528 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
7529 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
7530 }
7531}
7532
6c5c0fec
AB
7533/*
7534 * Modify ARMCPRegInfo for access from userspace.
7535 *
7536 * This is a data driven modification directed by
7537 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7538 * user-space cannot alter any values and dynamic values pertaining to
7539 * execution state are hidden from user space view anyway.
7540 */
7541void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
7542{
7543 const ARMCPRegUserSpaceInfo *m;
7544 ARMCPRegInfo *r;
7545
7546 for (m = mods; m->name; m++) {
d040242e
AB
7547 GPatternSpec *pat = NULL;
7548 if (m->is_glob) {
7549 pat = g_pattern_spec_new(m->name);
7550 }
6c5c0fec 7551 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
d040242e
AB
7552 if (pat && g_pattern_match_string(pat, r->name)) {
7553 r->type = ARM_CP_CONST;
7554 r->access = PL0U_R;
7555 r->resetvalue = 0;
7556 /* continue */
7557 } else if (strcmp(r->name, m->name) == 0) {
6c5c0fec
AB
7558 r->type = ARM_CP_CONST;
7559 r->access = PL0U_R;
7560 r->resetvalue &= m->exported_bits;
7561 r->resetvalue |= m->fixed_bits;
7562 break;
7563 }
7564 }
d040242e
AB
7565 if (pat) {
7566 g_pattern_spec_free(pat);
7567 }
6c5c0fec
AB
7568 }
7569}
7570
60322b39 7571const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
4b6a83fb 7572{
60322b39 7573 return g_hash_table_lookup(cpregs, &encoded_cp);
4b6a83fb
PM
7574}
7575
c4241c7d
PM
7576void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
7577 uint64_t value)
4b6a83fb
PM
7578{
7579 /* Helper coprocessor write function for write-ignore registers */
4b6a83fb
PM
7580}
7581
c4241c7d 7582uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
4b6a83fb
PM
7583{
7584 /* Helper coprocessor write function for read-as-zero registers */
4b6a83fb
PM
7585 return 0;
7586}
7587
f5a0a5a5
PM
7588void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
7589{
7590 /* Helper coprocessor reset function for do-nothing-on-reset registers */
7591}
7592
af393ffc 7593static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
37064a8b
PM
7594{
7595 /* Return true if it is not valid for us to switch to
7596 * this CPU mode (ie all the UNPREDICTABLE cases in
7597 * the ARM ARM CPSRWriteByInstr pseudocode).
7598 */
af393ffc
PM
7599
7600 /* Changes to or from Hyp via MSR and CPS are illegal. */
7601 if (write_type == CPSRWriteByInstr &&
7602 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
7603 mode == ARM_CPU_MODE_HYP)) {
7604 return 1;
7605 }
7606
37064a8b
PM
7607 switch (mode) {
7608 case ARM_CPU_MODE_USR:
10eacda7 7609 return 0;
37064a8b
PM
7610 case ARM_CPU_MODE_SYS:
7611 case ARM_CPU_MODE_SVC:
7612 case ARM_CPU_MODE_ABT:
7613 case ARM_CPU_MODE_UND:
7614 case ARM_CPU_MODE_IRQ:
7615 case ARM_CPU_MODE_FIQ:
52ff951b
PM
7616 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
7617 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7618 */
10eacda7
PM
7619 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7620 * and CPS are treated as illegal mode changes.
7621 */
7622 if (write_type == CPSRWriteByInstr &&
10eacda7 7623 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
7c208e0f 7624 (arm_hcr_el2_eff(env) & HCR_TGE)) {
10eacda7
PM
7625 return 1;
7626 }
37064a8b 7627 return 0;
e6c8fc07
PM
7628 case ARM_CPU_MODE_HYP:
7629 return !arm_feature(env, ARM_FEATURE_EL2)
2d2a4549 7630 || arm_current_el(env) < 2 || arm_is_secure_below_el3(env);
027fc527 7631 case ARM_CPU_MODE_MON:
58ae2d1f 7632 return arm_current_el(env) < 3;
37064a8b
PM
7633 default:
7634 return 1;
7635 }
7636}
7637
2f4a40e5
AZ
7638uint32_t cpsr_read(CPUARMState *env)
7639{
7640 int ZF;
6fbe23d5
PB
7641 ZF = (env->ZF == 0);
7642 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
2f4a40e5
AZ
7643 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
7644 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
7645 | ((env->condexec_bits & 0xfc) << 8)
af519934 7646 | (env->GE << 16) | (env->daif & CPSR_AIF);
2f4a40e5
AZ
7647}
7648
50866ba5
PM
7649void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
7650 CPSRWriteType write_type)
2f4a40e5 7651{
6e8801f9
FA
7652 uint32_t changed_daif;
7653
2f4a40e5 7654 if (mask & CPSR_NZCV) {
6fbe23d5
PB
7655 env->ZF = (~val) & CPSR_Z;
7656 env->NF = val;
2f4a40e5
AZ
7657 env->CF = (val >> 29) & 1;
7658 env->VF = (val << 3) & 0x80000000;
7659 }
7660 if (mask & CPSR_Q)
7661 env->QF = ((val & CPSR_Q) != 0);
7662 if (mask & CPSR_T)
7663 env->thumb = ((val & CPSR_T) != 0);
7664 if (mask & CPSR_IT_0_1) {
7665 env->condexec_bits &= ~3;
7666 env->condexec_bits |= (val >> 25) & 3;
7667 }
7668 if (mask & CPSR_IT_2_7) {
7669 env->condexec_bits &= 3;
7670 env->condexec_bits |= (val >> 8) & 0xfc;
7671 }
7672 if (mask & CPSR_GE) {
7673 env->GE = (val >> 16) & 0xf;
7674 }
7675
6e8801f9
FA
7676 /* In a V7 implementation that includes the security extensions but does
7677 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7678 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7679 * bits respectively.
7680 *
7681 * In a V8 implementation, it is permitted for privileged software to
7682 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7683 */
f8c88bbc 7684 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
6e8801f9
FA
7685 arm_feature(env, ARM_FEATURE_EL3) &&
7686 !arm_feature(env, ARM_FEATURE_EL2) &&
7687 !arm_is_secure(env)) {
7688
7689 changed_daif = (env->daif ^ val) & mask;
7690
7691 if (changed_daif & CPSR_A) {
7692 /* Check to see if we are allowed to change the masking of async
7693 * abort exceptions from a non-secure state.
7694 */
7695 if (!(env->cp15.scr_el3 & SCR_AW)) {
7696 qemu_log_mask(LOG_GUEST_ERROR,
7697 "Ignoring attempt to switch CPSR_A flag from "
7698 "non-secure world with SCR.AW bit clear\n");
7699 mask &= ~CPSR_A;
7700 }
7701 }
7702
7703 if (changed_daif & CPSR_F) {
7704 /* Check to see if we are allowed to change the masking of FIQ
7705 * exceptions from a non-secure state.
7706 */
7707 if (!(env->cp15.scr_el3 & SCR_FW)) {
7708 qemu_log_mask(LOG_GUEST_ERROR,
7709 "Ignoring attempt to switch CPSR_F flag from "
7710 "non-secure world with SCR.FW bit clear\n");
7711 mask &= ~CPSR_F;
7712 }
7713
7714 /* Check whether non-maskable FIQ (NMFI) support is enabled.
7715 * If this bit is set software is not allowed to mask
7716 * FIQs, but is allowed to set CPSR_F to 0.
7717 */
7718 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
7719 (val & CPSR_F)) {
7720 qemu_log_mask(LOG_GUEST_ERROR,
7721 "Ignoring attempt to enable CPSR_F flag "
7722 "(non-maskable FIQ [NMFI] support enabled)\n");
7723 mask &= ~CPSR_F;
7724 }
7725 }
7726 }
7727
4cc35614
PM
7728 env->daif &= ~(CPSR_AIF & mask);
7729 env->daif |= val & CPSR_AIF & mask;
7730
f8c88bbc
PM
7731 if (write_type != CPSRWriteRaw &&
7732 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
8c4f0eb9
PM
7733 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
7734 /* Note that we can only get here in USR mode if this is a
7735 * gdb stub write; for this case we follow the architectural
7736 * behaviour for guest writes in USR mode of ignoring an attempt
7737 * to switch mode. (Those are caught by translate.c for writes
7738 * triggered by guest instructions.)
7739 */
7740 mask &= ~CPSR_M;
7741 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
81907a58
PM
7742 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7743 * v7, and has defined behaviour in v8:
7744 * + leave CPSR.M untouched
7745 * + allow changes to the other CPSR fields
7746 * + set PSTATE.IL
7747 * For user changes via the GDB stub, we don't set PSTATE.IL,
7748 * as this would be unnecessarily harsh for a user error.
37064a8b
PM
7749 */
7750 mask &= ~CPSR_M;
81907a58
PM
7751 if (write_type != CPSRWriteByGDBStub &&
7752 arm_feature(env, ARM_FEATURE_V8)) {
7753 mask |= CPSR_IL;
7754 val |= CPSR_IL;
7755 }
81e37284
PM
7756 qemu_log_mask(LOG_GUEST_ERROR,
7757 "Illegal AArch32 mode switch attempt from %s to %s\n",
7758 aarch32_mode_name(env->uncached_cpsr),
7759 aarch32_mode_name(val));
37064a8b 7760 } else {
81e37284
PM
7761 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
7762 write_type == CPSRWriteExceptionReturn ?
7763 "Exception return from AArch32" :
7764 "AArch32 mode switch from",
7765 aarch32_mode_name(env->uncached_cpsr),
7766 aarch32_mode_name(val), env->regs[15]);
37064a8b
PM
7767 switch_mode(env, val & CPSR_M);
7768 }
2f4a40e5
AZ
7769 }
7770 mask &= ~CACHED_CPSR_BITS;
7771 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
7772}
7773
b26eefb6
PB
7774/* Sign/zero extend */
7775uint32_t HELPER(sxtb16)(uint32_t x)
7776{
7777 uint32_t res;
7778 res = (uint16_t)(int8_t)x;
7779 res |= (uint32_t)(int8_t)(x >> 16) << 16;
7780 return res;
7781}
7782
7783uint32_t HELPER(uxtb16)(uint32_t x)
7784{
7785 uint32_t res;
7786 res = (uint16_t)(uint8_t)x;
7787 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
7788 return res;
7789}
7790
3670669c
PB
7791int32_t HELPER(sdiv)(int32_t num, int32_t den)
7792{
7793 if (den == 0)
7794 return 0;
686eeb93
AJ
7795 if (num == INT_MIN && den == -1)
7796 return INT_MIN;
3670669c
PB
7797 return num / den;
7798}
7799
7800uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
7801{
7802 if (den == 0)
7803 return 0;
7804 return num / den;
7805}
7806
7807uint32_t HELPER(rbit)(uint32_t x)
7808{
42fedbca 7809 return revbit32(x);
3670669c
PB
7810}
7811
c47eaf9f 7812#ifdef CONFIG_USER_ONLY
b5ff1b31 7813
affdb64d 7814static void switch_mode(CPUARMState *env, int mode)
b5ff1b31 7815{
2fc0cc0e 7816 ARMCPU *cpu = env_archcpu(env);
a47dddd7
AF
7817
7818 if (mode != ARM_CPU_MODE_USR) {
7819 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
7820 }
b5ff1b31
FB
7821}
7822
012a906b
GB
7823uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7824 uint32_t cur_el, bool secure)
9e729b57
EI
7825{
7826 return 1;
7827}
7828
ce02049d
GB
7829void aarch64_sync_64_to_32(CPUARMState *env)
7830{
7831 g_assert_not_reached();
7832}
7833
b5ff1b31
FB
7834#else
7835
affdb64d 7836static void switch_mode(CPUARMState *env, int mode)
b5ff1b31
FB
7837{
7838 int old_mode;
7839 int i;
7840
7841 old_mode = env->uncached_cpsr & CPSR_M;
7842 if (mode == old_mode)
7843 return;
7844
7845 if (old_mode == ARM_CPU_MODE_FIQ) {
7846 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 7847 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
7848 } else if (mode == ARM_CPU_MODE_FIQ) {
7849 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 7850 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
7851 }
7852
f5206413 7853 i = bank_number(old_mode);
b5ff1b31 7854 env->banked_r13[i] = env->regs[13];
b5ff1b31
FB
7855 env->banked_spsr[i] = env->spsr;
7856
f5206413 7857 i = bank_number(mode);
b5ff1b31 7858 env->regs[13] = env->banked_r13[i];
b5ff1b31 7859 env->spsr = env->banked_spsr[i];
593cfa2b
PM
7860
7861 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
7862 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
b5ff1b31
FB
7863}
7864
0eeb17d6
GB
7865/* Physical Interrupt Target EL Lookup Table
7866 *
7867 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7868 *
7869 * The below multi-dimensional table is used for looking up the target
7870 * exception level given numerous condition criteria. Specifically, the
7871 * target EL is based on SCR and HCR routing controls as well as the
7872 * currently executing EL and secure state.
7873 *
7874 * Dimensions:
7875 * target_el_table[2][2][2][2][2][4]
7876 * | | | | | +--- Current EL
7877 * | | | | +------ Non-secure(0)/Secure(1)
7878 * | | | +--------- HCR mask override
7879 * | | +------------ SCR exec state control
7880 * | +--------------- SCR mask override
7881 * +------------------ 32-bit(0)/64-bit(1) EL3
7882 *
7883 * The table values are as such:
7884 * 0-3 = EL0-EL3
7885 * -1 = Cannot occur
7886 *
7887 * The ARM ARM target EL table includes entries indicating that an "exception
7888 * is not taken". The two cases where this is applicable are:
7889 * 1) An exception is taken from EL3 but the SCR does not have the exception
7890 * routed to EL3.
7891 * 2) An exception is taken from EL2 but the HCR does not have the exception
7892 * routed to EL2.
7893 * In these two cases, the below table contain a target of EL1. This value is
7894 * returned as it is expected that the consumer of the table data will check
7895 * for "target EL >= current EL" to ensure the exception is not taken.
7896 *
7897 * SCR HCR
7898 * 64 EA AMO From
7899 * BIT IRQ IMO Non-secure Secure
7900 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
7901 */
82c39f6a 7902static const int8_t target_el_table[2][2][2][2][2][4] = {
0eeb17d6
GB
7903 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7904 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
7905 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7906 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
7907 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7908 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
7909 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7910 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
7911 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
7912 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
7913 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
7914 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
7915 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7916 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
7917 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7918 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
7919};
7920
7921/*
7922 * Determine the target EL for physical exceptions
7923 */
012a906b
GB
7924uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7925 uint32_t cur_el, bool secure)
0eeb17d6
GB
7926{
7927 CPUARMState *env = cs->env_ptr;
f7778444
RH
7928 bool rw;
7929 bool scr;
7930 bool hcr;
0eeb17d6 7931 int target_el;
2cde031f 7932 /* Is the highest EL AArch64? */
f7778444
RH
7933 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
7934 uint64_t hcr_el2;
2cde031f
SS
7935
7936 if (arm_feature(env, ARM_FEATURE_EL3)) {
7937 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
7938 } else {
7939 /* Either EL2 is the highest EL (and so the EL2 register width
7940 * is given by is64); or there is no EL2 or EL3, in which case
7941 * the value of 'rw' does not affect the table lookup anyway.
7942 */
7943 rw = is64;
7944 }
0eeb17d6 7945
f7778444 7946 hcr_el2 = arm_hcr_el2_eff(env);
0eeb17d6
GB
7947 switch (excp_idx) {
7948 case EXCP_IRQ:
7949 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
f7778444 7950 hcr = hcr_el2 & HCR_IMO;
0eeb17d6
GB
7951 break;
7952 case EXCP_FIQ:
7953 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
f7778444 7954 hcr = hcr_el2 & HCR_FMO;
0eeb17d6
GB
7955 break;
7956 default:
7957 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
f7778444 7958 hcr = hcr_el2 & HCR_AMO;
0eeb17d6
GB
7959 break;
7960 };
7961
0eeb17d6
GB
7962 /* Perform a table-lookup for the target EL given the current state */
7963 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
7964
7965 assert(target_el > 0);
7966
7967 return target_el;
7968}
7969
b59f479b
PMD
7970void arm_log_exception(int idx)
7971{
7972 if (qemu_loglevel_mask(CPU_LOG_INT)) {
7973 const char *exc = NULL;
7974 static const char * const excnames[] = {
7975 [EXCP_UDEF] = "Undefined Instruction",
7976 [EXCP_SWI] = "SVC",
7977 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
7978 [EXCP_DATA_ABORT] = "Data Abort",
7979 [EXCP_IRQ] = "IRQ",
7980 [EXCP_FIQ] = "FIQ",
7981 [EXCP_BKPT] = "Breakpoint",
7982 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
7983 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
7984 [EXCP_HVC] = "Hypervisor Call",
7985 [EXCP_HYP_TRAP] = "Hypervisor Trap",
7986 [EXCP_SMC] = "Secure Monitor Call",
7987 [EXCP_VIRQ] = "Virtual IRQ",
7988 [EXCP_VFIQ] = "Virtual FIQ",
7989 [EXCP_SEMIHOST] = "Semihosting call",
7990 [EXCP_NOCP] = "v7M NOCP UsageFault",
7991 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
7992 [EXCP_STKOF] = "v8M STKOF UsageFault",
7993 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
7994 [EXCP_LSERR] = "v8M LSERR UsageFault",
7995 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
7996 };
7997
7998 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
7999 exc = excnames[idx];
8000 }
8001 if (!exc) {
8002 exc = "unknown";
8003 }
8004 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
8005 }
8006}
8007
a356dacf 8008/*
7aab5a8c
PMD
8009 * Function used to synchronize QEMU's AArch64 register set with AArch32
8010 * register set. This is necessary when switching between AArch32 and AArch64
8011 * execution state.
a356dacf 8012 */
7aab5a8c 8013void aarch64_sync_32_to_64(CPUARMState *env)
9ee6e8bb 8014{
7aab5a8c
PMD
8015 int i;
8016 uint32_t mode = env->uncached_cpsr & CPSR_M;
8017
8018 /* We can blanket copy R[0:7] to X[0:7] */
8019 for (i = 0; i < 8; i++) {
8020 env->xregs[i] = env->regs[i];
fd592d89 8021 }
70d74660 8022
9a223097 8023 /*
7aab5a8c
PMD
8024 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8025 * Otherwise, they come from the banked user regs.
fd592d89 8026 */
7aab5a8c
PMD
8027 if (mode == ARM_CPU_MODE_FIQ) {
8028 for (i = 8; i < 13; i++) {
8029 env->xregs[i] = env->usr_regs[i - 8];
8030 }
8031 } else {
8032 for (i = 8; i < 13; i++) {
8033 env->xregs[i] = env->regs[i];
8034 }
fd592d89 8035 }
9ee6e8bb 8036
7aab5a8c
PMD
8037 /*
8038 * Registers x13-x23 are the various mode SP and FP registers. Registers
8039 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8040 * from the mode banked register.
8041 */
8042 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8043 env->xregs[13] = env->regs[13];
8044 env->xregs[14] = env->regs[14];
8045 } else {
8046 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
8047 /* HYP is an exception in that it is copied from r14 */
8048 if (mode == ARM_CPU_MODE_HYP) {
8049 env->xregs[14] = env->regs[14];
95695eff 8050 } else {
7aab5a8c 8051 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
95695eff 8052 }
95695eff
PM
8053 }
8054
7aab5a8c
PMD
8055 if (mode == ARM_CPU_MODE_HYP) {
8056 env->xregs[15] = env->regs[13];
8057 } else {
8058 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
95695eff
PM
8059 }
8060
7aab5a8c
PMD
8061 if (mode == ARM_CPU_MODE_IRQ) {
8062 env->xregs[16] = env->regs[14];
8063 env->xregs[17] = env->regs[13];
8064 } else {
8065 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
8066 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
8067 }
95695eff 8068
7aab5a8c
PMD
8069 if (mode == ARM_CPU_MODE_SVC) {
8070 env->xregs[18] = env->regs[14];
8071 env->xregs[19] = env->regs[13];
8072 } else {
8073 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
8074 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
8075 }
95695eff 8076
7aab5a8c
PMD
8077 if (mode == ARM_CPU_MODE_ABT) {
8078 env->xregs[20] = env->regs[14];
8079 env->xregs[21] = env->regs[13];
8080 } else {
8081 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
8082 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
8083 }
e33cf0f8 8084
7aab5a8c
PMD
8085 if (mode == ARM_CPU_MODE_UND) {
8086 env->xregs[22] = env->regs[14];
8087 env->xregs[23] = env->regs[13];
8088 } else {
8089 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
8090 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
e33cf0f8
PM
8091 }
8092
8093 /*
7aab5a8c
PMD
8094 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8095 * mode, then we can copy from r8-r14. Otherwise, we copy from the
8096 * FIQ bank for r8-r14.
e33cf0f8 8097 */
7aab5a8c
PMD
8098 if (mode == ARM_CPU_MODE_FIQ) {
8099 for (i = 24; i < 31; i++) {
8100 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
8101 }
8102 } else {
8103 for (i = 24; i < 29; i++) {
8104 env->xregs[i] = env->fiq_regs[i - 24];
e33cf0f8 8105 }
7aab5a8c
PMD
8106 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
8107 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
e33cf0f8 8108 }
7aab5a8c
PMD
8109
8110 env->pc = env->regs[15];
e33cf0f8
PM
8111}
8112
9a223097 8113/*
7aab5a8c
PMD
8114 * Function used to synchronize QEMU's AArch32 register set with AArch64
8115 * register set. This is necessary when switching between AArch32 and AArch64
8116 * execution state.
de2db7ec 8117 */
7aab5a8c 8118void aarch64_sync_64_to_32(CPUARMState *env)
9ee6e8bb 8119{
7aab5a8c
PMD
8120 int i;
8121 uint32_t mode = env->uncached_cpsr & CPSR_M;
abc24d86 8122
7aab5a8c
PMD
8123 /* We can blanket copy X[0:7] to R[0:7] */
8124 for (i = 0; i < 8; i++) {
8125 env->regs[i] = env->xregs[i];
de2db7ec 8126 }
3f0cddee 8127
9a223097 8128 /*
7aab5a8c
PMD
8129 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8130 * Otherwise, we copy x8-x12 into the banked user regs.
de2db7ec 8131 */
7aab5a8c
PMD
8132 if (mode == ARM_CPU_MODE_FIQ) {
8133 for (i = 8; i < 13; i++) {
8134 env->usr_regs[i - 8] = env->xregs[i];
8135 }
8136 } else {
8137 for (i = 8; i < 13; i++) {
8138 env->regs[i] = env->xregs[i];
8139 }
fb602cb7
PM
8140 }
8141
9a223097 8142 /*
7aab5a8c
PMD
8143 * Registers r13 & r14 depend on the current mode.
8144 * If we are in a given mode, we copy the corresponding x registers to r13
8145 * and r14. Otherwise, we copy the x register to the banked r13 and r14
8146 * for the mode.
fb602cb7 8147 */
7aab5a8c
PMD
8148 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8149 env->regs[13] = env->xregs[13];
8150 env->regs[14] = env->xregs[14];
fb602cb7 8151 } else {
7aab5a8c 8152 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
fb602cb7 8153
7aab5a8c
PMD
8154 /*
8155 * HYP is an exception in that it does not have its own banked r14 but
8156 * shares the USR r14
8157 */
8158 if (mode == ARM_CPU_MODE_HYP) {
8159 env->regs[14] = env->xregs[14];
8160 } else {
8161 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
8162 }
8163 }
fb602cb7 8164
7aab5a8c
PMD
8165 if (mode == ARM_CPU_MODE_HYP) {
8166 env->regs[13] = env->xregs[15];
fb602cb7 8167 } else {
7aab5a8c 8168 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
fb602cb7 8169 }
d02a8698 8170
7aab5a8c
PMD
8171 if (mode == ARM_CPU_MODE_IRQ) {
8172 env->regs[14] = env->xregs[16];
8173 env->regs[13] = env->xregs[17];
d02a8698 8174 } else {
7aab5a8c
PMD
8175 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
8176 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
d02a8698
PM
8177 }
8178
7aab5a8c
PMD
8179 if (mode == ARM_CPU_MODE_SVC) {
8180 env->regs[14] = env->xregs[18];
8181 env->regs[13] = env->xregs[19];
8182 } else {
8183 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
8184 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
fb602cb7
PM
8185 }
8186
7aab5a8c
PMD
8187 if (mode == ARM_CPU_MODE_ABT) {
8188 env->regs[14] = env->xregs[20];
8189 env->regs[13] = env->xregs[21];
8190 } else {
8191 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
8192 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
ce02049d
GB
8193 }
8194
8195 if (mode == ARM_CPU_MODE_UND) {
3a9148d0
SS
8196 env->regs[14] = env->xregs[22];
8197 env->regs[13] = env->xregs[23];
ce02049d 8198 } else {
593cfa2b 8199 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
3a9148d0 8200 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
ce02049d
GB
8201 }
8202
8203 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8204 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8205 * FIQ bank for r8-r14.
8206 */
8207 if (mode == ARM_CPU_MODE_FIQ) {
8208 for (i = 24; i < 31; i++) {
8209 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
8210 }
8211 } else {
8212 for (i = 24; i < 29; i++) {
8213 env->fiq_regs[i - 24] = env->xregs[i];
8214 }
8215 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
593cfa2b 8216 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
ce02049d
GB
8217 }
8218
8219 env->regs[15] = env->pc;
8220}
8221
dea8378b
PM
8222static void take_aarch32_exception(CPUARMState *env, int new_mode,
8223 uint32_t mask, uint32_t offset,
8224 uint32_t newpc)
8225{
8226 /* Change the CPU state so as to actually take the exception. */
8227 switch_mode(env, new_mode);
8228 /*
8229 * For exceptions taken to AArch32 we must clear the SS bit in both
8230 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8231 */
8232 env->uncached_cpsr &= ~PSTATE_SS;
8233 env->spsr = cpsr_read(env);
8234 /* Clear IT bits. */
8235 env->condexec_bits = 0;
8236 /* Switch to the new mode, and to the correct instruction set. */
8237 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
8238 /* Set new mode endianness */
8239 env->uncached_cpsr &= ~CPSR_E;
8240 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
8241 env->uncached_cpsr |= CPSR_E;
8242 }
829f9fd3
PM
8243 /* J and IL must always be cleared for exception entry */
8244 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
dea8378b
PM
8245 env->daif |= mask;
8246
8247 if (new_mode == ARM_CPU_MODE_HYP) {
8248 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
8249 env->elr_el[2] = env->regs[15];
8250 } else {
8251 /*
8252 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8253 * and we should just guard the thumb mode on V4
8254 */
8255 if (arm_feature(env, ARM_FEATURE_V4T)) {
8256 env->thumb =
8257 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
8258 }
8259 env->regs[14] = env->regs[15] + offset;
8260 }
8261 env->regs[15] = newpc;
a8a79c7a 8262 arm_rebuild_hflags(env);
dea8378b
PM
8263}
8264
b9bc21ff
PM
8265static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
8266{
8267 /*
8268 * Handle exception entry to Hyp mode; this is sufficiently
8269 * different to entry to other AArch32 modes that we handle it
8270 * separately here.
8271 *
8272 * The vector table entry used is always the 0x14 Hyp mode entry point,
8273 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
8274 * The offset applied to the preferred return address is always zero
8275 * (see DDI0487C.a section G1.12.3).
8276 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8277 */
8278 uint32_t addr, mask;
8279 ARMCPU *cpu = ARM_CPU(cs);
8280 CPUARMState *env = &cpu->env;
8281
8282 switch (cs->exception_index) {
8283 case EXCP_UDEF:
8284 addr = 0x04;
8285 break;
8286 case EXCP_SWI:
8287 addr = 0x14;
8288 break;
8289 case EXCP_BKPT:
8290 /* Fall through to prefetch abort. */
8291 case EXCP_PREFETCH_ABORT:
8292 env->cp15.ifar_s = env->exception.vaddress;
8293 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
8294 (uint32_t)env->exception.vaddress);
8295 addr = 0x0c;
8296 break;
8297 case EXCP_DATA_ABORT:
8298 env->cp15.dfar_s = env->exception.vaddress;
8299 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
8300 (uint32_t)env->exception.vaddress);
8301 addr = 0x10;
8302 break;
8303 case EXCP_IRQ:
8304 addr = 0x18;
8305 break;
8306 case EXCP_FIQ:
8307 addr = 0x1c;
8308 break;
8309 case EXCP_HVC:
8310 addr = 0x08;
8311 break;
8312 case EXCP_HYP_TRAP:
8313 addr = 0x14;
9bbb4ef9 8314 break;
b9bc21ff
PM
8315 default:
8316 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8317 }
8318
8319 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
2ed08180
PM
8320 if (!arm_feature(env, ARM_FEATURE_V8)) {
8321 /*
8322 * QEMU syndrome values are v8-style. v7 has the IL bit
8323 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
8324 * If this is a v7 CPU, squash the IL bit in those cases.
8325 */
8326 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
8327 (cs->exception_index == EXCP_DATA_ABORT &&
8328 !(env->exception.syndrome & ARM_EL_ISV)) ||
8329 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
8330 env->exception.syndrome &= ~ARM_EL_IL;
8331 }
8332 }
b9bc21ff
PM
8333 env->cp15.esr_el[2] = env->exception.syndrome;
8334 }
8335
8336 if (arm_current_el(env) != 2 && addr < 0x14) {
8337 addr = 0x14;
8338 }
8339
8340 mask = 0;
8341 if (!(env->cp15.scr_el3 & SCR_EA)) {
8342 mask |= CPSR_A;
8343 }
8344 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
8345 mask |= CPSR_I;
8346 }
8347 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
8348 mask |= CPSR_F;
8349 }
8350
8351 addr += env->cp15.hvbar;
8352
8353 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
8354}
8355
966f758c 8356static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
b5ff1b31 8357{
97a8ea5a
AF
8358 ARMCPU *cpu = ARM_CPU(cs);
8359 CPUARMState *env = &cpu->env;
b5ff1b31
FB
8360 uint32_t addr;
8361 uint32_t mask;
8362 int new_mode;
8363 uint32_t offset;
16a906fd 8364 uint32_t moe;
b5ff1b31 8365
16a906fd 8366 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
64b91e3f 8367 switch (syn_get_ec(env->exception.syndrome)) {
16a906fd
PM
8368 case EC_BREAKPOINT:
8369 case EC_BREAKPOINT_SAME_EL:
8370 moe = 1;
8371 break;
8372 case EC_WATCHPOINT:
8373 case EC_WATCHPOINT_SAME_EL:
8374 moe = 10;
8375 break;
8376 case EC_AA32_BKPT:
8377 moe = 3;
8378 break;
8379 case EC_VECTORCATCH:
8380 moe = 5;
8381 break;
8382 default:
8383 moe = 0;
8384 break;
8385 }
8386
8387 if (moe) {
8388 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
8389 }
8390
b9bc21ff
PM
8391 if (env->exception.target_el == 2) {
8392 arm_cpu_do_interrupt_aarch32_hyp(cs);
8393 return;
8394 }
8395
27103424 8396 switch (cs->exception_index) {
b5ff1b31
FB
8397 case EXCP_UDEF:
8398 new_mode = ARM_CPU_MODE_UND;
8399 addr = 0x04;
8400 mask = CPSR_I;
8401 if (env->thumb)
8402 offset = 2;
8403 else
8404 offset = 4;
8405 break;
8406 case EXCP_SWI:
8407 new_mode = ARM_CPU_MODE_SVC;
8408 addr = 0x08;
8409 mask = CPSR_I;
601d70b9 8410 /* The PC already points to the next instruction. */
b5ff1b31
FB
8411 offset = 0;
8412 break;
06c949e6 8413 case EXCP_BKPT:
9ee6e8bb
PB
8414 /* Fall through to prefetch abort. */
8415 case EXCP_PREFETCH_ABORT:
88ca1c2d 8416 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
b848ce2b 8417 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
3f1beaca 8418 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
88ca1c2d 8419 env->exception.fsr, (uint32_t)env->exception.vaddress);
b5ff1b31
FB
8420 new_mode = ARM_CPU_MODE_ABT;
8421 addr = 0x0c;
8422 mask = CPSR_A | CPSR_I;
8423 offset = 4;
8424 break;
8425 case EXCP_DATA_ABORT:
4a7e2d73 8426 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
b848ce2b 8427 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
3f1beaca 8428 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
4a7e2d73 8429 env->exception.fsr,
6cd8a264 8430 (uint32_t)env->exception.vaddress);
b5ff1b31
FB
8431 new_mode = ARM_CPU_MODE_ABT;
8432 addr = 0x10;
8433 mask = CPSR_A | CPSR_I;
8434 offset = 8;
8435 break;
8436 case EXCP_IRQ:
8437 new_mode = ARM_CPU_MODE_IRQ;
8438 addr = 0x18;
8439 /* Disable IRQ and imprecise data aborts. */
8440 mask = CPSR_A | CPSR_I;
8441 offset = 4;
de38d23b
FA
8442 if (env->cp15.scr_el3 & SCR_IRQ) {
8443 /* IRQ routed to monitor mode */
8444 new_mode = ARM_CPU_MODE_MON;
8445 mask |= CPSR_F;
8446 }
b5ff1b31
FB
8447 break;
8448 case EXCP_FIQ:
8449 new_mode = ARM_CPU_MODE_FIQ;
8450 addr = 0x1c;
8451 /* Disable FIQ, IRQ and imprecise data aborts. */
8452 mask = CPSR_A | CPSR_I | CPSR_F;
de38d23b
FA
8453 if (env->cp15.scr_el3 & SCR_FIQ) {
8454 /* FIQ routed to monitor mode */
8455 new_mode = ARM_CPU_MODE_MON;
8456 }
b5ff1b31
FB
8457 offset = 4;
8458 break;
87a4b270
PM
8459 case EXCP_VIRQ:
8460 new_mode = ARM_CPU_MODE_IRQ;
8461 addr = 0x18;
8462 /* Disable IRQ and imprecise data aborts. */
8463 mask = CPSR_A | CPSR_I;
8464 offset = 4;
8465 break;
8466 case EXCP_VFIQ:
8467 new_mode = ARM_CPU_MODE_FIQ;
8468 addr = 0x1c;
8469 /* Disable FIQ, IRQ and imprecise data aborts. */
8470 mask = CPSR_A | CPSR_I | CPSR_F;
8471 offset = 4;
8472 break;
dbe9d163
FA
8473 case EXCP_SMC:
8474 new_mode = ARM_CPU_MODE_MON;
8475 addr = 0x08;
8476 mask = CPSR_A | CPSR_I | CPSR_F;
8477 offset = 0;
8478 break;
b5ff1b31 8479 default:
a47dddd7 8480 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
b5ff1b31
FB
8481 return; /* Never happens. Keep compiler happy. */
8482 }
e89e51a1
FA
8483
8484 if (new_mode == ARM_CPU_MODE_MON) {
8485 addr += env->cp15.mvbar;
137feaa9 8486 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
e89e51a1 8487 /* High vectors. When enabled, base address cannot be remapped. */
b5ff1b31 8488 addr += 0xffff0000;
8641136c
NR
8489 } else {
8490 /* ARM v7 architectures provide a vector base address register to remap
8491 * the interrupt vector table.
e89e51a1 8492 * This register is only followed in non-monitor mode, and is banked.
8641136c
NR
8493 * Note: only bits 31:5 are valid.
8494 */
fb6c91ba 8495 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
b5ff1b31 8496 }
dbe9d163
FA
8497
8498 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
8499 env->cp15.scr_el3 &= ~SCR_NS;
8500 }
8501
dea8378b 8502 take_aarch32_exception(env, new_mode, mask, offset, addr);
b5ff1b31
FB
8503}
8504
966f758c
PM
8505/* Handle exception entry to a target EL which is using AArch64 */
8506static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
f3a9b694
PM
8507{
8508 ARMCPU *cpu = ARM_CPU(cs);
8509 CPUARMState *env = &cpu->env;
8510 unsigned int new_el = env->exception.target_el;
8511 target_ulong addr = env->cp15.vbar_el[new_el];
8512 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
0ab5953b
RH
8513 unsigned int cur_el = arm_current_el(env);
8514
9a05f7b6
RH
8515 /*
8516 * Note that new_el can never be 0. If cur_el is 0, then
8517 * el0_a64 is is_a64(), else el0_a64 is ignored.
8518 */
8519 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
f3a9b694 8520
0ab5953b 8521 if (cur_el < new_el) {
3d6f7617
PM
8522 /* Entry vector offset depends on whether the implemented EL
8523 * immediately lower than the target level is using AArch32 or AArch64
8524 */
8525 bool is_aa64;
8526
8527 switch (new_el) {
8528 case 3:
8529 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
8530 break;
8531 case 2:
8532 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
8533 break;
8534 case 1:
8535 is_aa64 = is_a64(env);
8536 break;
8537 default:
8538 g_assert_not_reached();
8539 }
8540
8541 if (is_aa64) {
f3a9b694
PM
8542 addr += 0x400;
8543 } else {
8544 addr += 0x600;
8545 }
8546 } else if (pstate_read(env) & PSTATE_SP) {
8547 addr += 0x200;
8548 }
8549
f3a9b694
PM
8550 switch (cs->exception_index) {
8551 case EXCP_PREFETCH_ABORT:
8552 case EXCP_DATA_ABORT:
8553 env->cp15.far_el[new_el] = env->exception.vaddress;
8554 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
8555 env->cp15.far_el[new_el]);
8556 /* fall through */
8557 case EXCP_BKPT:
8558 case EXCP_UDEF:
8559 case EXCP_SWI:
8560 case EXCP_HVC:
8561 case EXCP_HYP_TRAP:
8562 case EXCP_SMC:
4be42f40
PM
8563 if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
8564 /*
8565 * QEMU internal FP/SIMD syndromes from AArch32 include the
8566 * TA and coproc fields which are only exposed if the exception
8567 * is taken to AArch32 Hyp mode. Mask them out to get a valid
8568 * AArch64 format syndrome.
8569 */
8570 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
8571 }
f3a9b694
PM
8572 env->cp15.esr_el[new_el] = env->exception.syndrome;
8573 break;
8574 case EXCP_IRQ:
8575 case EXCP_VIRQ:
8576 addr += 0x80;
8577 break;
8578 case EXCP_FIQ:
8579 case EXCP_VFIQ:
8580 addr += 0x100;
8581 break;
f3a9b694
PM
8582 default:
8583 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8584 }
8585
8586 if (is_a64(env)) {
8587 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
8588 aarch64_save_sp(env, arm_current_el(env));
8589 env->elr_el[new_el] = env->pc;
8590 } else {
8591 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
f3a9b694
PM
8592 env->elr_el[new_el] = env->regs[15];
8593
8594 aarch64_sync_32_to_64(env);
8595
8596 env->condexec_bits = 0;
8597 }
8598 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
8599 env->elr_el[new_el]);
8600
8601 pstate_write(env, PSTATE_DAIF | new_mode);
8602 env->aarch64 = 1;
8603 aarch64_restore_sp(env, new_el);
a8a79c7a 8604 helper_rebuild_hflags_a64(env, new_el);
f3a9b694
PM
8605
8606 env->pc = addr;
8607
8608 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
8609 new_el, env->pc, pstate_read(env));
966f758c
PM
8610}
8611
ed6e6ba9
AB
8612/*
8613 * Do semihosting call and set the appropriate return value. All the
8614 * permission and validity checks have been done at translate time.
8615 *
8616 * We only see semihosting exceptions in TCG only as they are not
8617 * trapped to the hypervisor in KVM.
8618 */
91f78c58 8619#ifdef CONFIG_TCG
ed6e6ba9
AB
8620static void handle_semihosting(CPUState *cs)
8621{
904c04de
PM
8622 ARMCPU *cpu = ARM_CPU(cs);
8623 CPUARMState *env = &cpu->env;
8624
8625 if (is_a64(env)) {
ed6e6ba9
AB
8626 qemu_log_mask(CPU_LOG_INT,
8627 "...handling as semihosting call 0x%" PRIx64 "\n",
8628 env->xregs[0]);
8629 env->xregs[0] = do_arm_semihosting(env);
4ff5ef9e 8630 env->pc += 4;
904c04de 8631 } else {
904c04de
PM
8632 qemu_log_mask(CPU_LOG_INT,
8633 "...handling as semihosting call 0x%x\n",
8634 env->regs[0]);
8635 env->regs[0] = do_arm_semihosting(env);
4ff5ef9e 8636 env->regs[15] += env->thumb ? 2 : 4;
904c04de
PM
8637 }
8638}
ed6e6ba9 8639#endif
904c04de 8640
966f758c
PM
8641/* Handle a CPU exception for A and R profile CPUs.
8642 * Do any appropriate logging, handle PSCI calls, and then hand off
8643 * to the AArch64-entry or AArch32-entry function depending on the
8644 * target exception level's register width.
8645 */
8646void arm_cpu_do_interrupt(CPUState *cs)
8647{
8648 ARMCPU *cpu = ARM_CPU(cs);
8649 CPUARMState *env = &cpu->env;
8650 unsigned int new_el = env->exception.target_el;
8651
531c60a9 8652 assert(!arm_feature(env, ARM_FEATURE_M));
966f758c
PM
8653
8654 arm_log_exception(cs->exception_index);
8655 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
8656 new_el);
8657 if (qemu_loglevel_mask(CPU_LOG_INT)
8658 && !excp_is_internal(cs->exception_index)) {
6568da45 8659 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
64b91e3f 8660 syn_get_ec(env->exception.syndrome),
966f758c
PM
8661 env->exception.syndrome);
8662 }
8663
8664 if (arm_is_psci_call(cpu, cs->exception_index)) {
8665 arm_handle_psci_call(cpu);
8666 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
8667 return;
8668 }
8669
ed6e6ba9
AB
8670 /*
8671 * Semihosting semantics depend on the register width of the code
8672 * that caused the exception, not the target exception level, so
8673 * must be handled here.
966f758c 8674 */
ed6e6ba9
AB
8675#ifdef CONFIG_TCG
8676 if (cs->exception_index == EXCP_SEMIHOST) {
8677 handle_semihosting(cs);
904c04de
PM
8678 return;
8679 }
ed6e6ba9 8680#endif
904c04de 8681
b5c53d1b
AL
8682 /* Hooks may change global state so BQL should be held, also the
8683 * BQL needs to be held for any modification of
8684 * cs->interrupt_request.
8685 */
8686 g_assert(qemu_mutex_iothread_locked());
8687
8688 arm_call_pre_el_change_hook(cpu);
8689
904c04de
PM
8690 assert(!excp_is_internal(cs->exception_index));
8691 if (arm_el_is_aa64(env, new_el)) {
966f758c
PM
8692 arm_cpu_do_interrupt_aarch64(cs);
8693 } else {
8694 arm_cpu_do_interrupt_aarch32(cs);
8695 }
f3a9b694 8696
bd7d00fc
PM
8697 arm_call_el_change_hook(cpu);
8698
f3a9b694
PM
8699 if (!kvm_enabled()) {
8700 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
8701 }
8702}
c47eaf9f 8703#endif /* !CONFIG_USER_ONLY */
0480f69a
PM
8704
8705/* Return the exception level which controls this address translation regime */
8706static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
8707{
8708 switch (mmu_idx) {
8709 case ARMMMUIdx_S2NS:
8710 case ARMMMUIdx_S1E2:
8711 return 2;
8712 case ARMMMUIdx_S1E3:
8713 return 3;
8714 case ARMMMUIdx_S1SE0:
8715 return arm_el_is_aa64(env, 3) ? 1 : 3;
8716 case ARMMMUIdx_S1SE1:
8717 case ARMMMUIdx_S1NSE0:
8718 case ARMMMUIdx_S1NSE1:
62593718
PM
8719 case ARMMMUIdx_MPrivNegPri:
8720 case ARMMMUIdx_MUserNegPri:
e7b921c2
PM
8721 case ARMMMUIdx_MPriv:
8722 case ARMMMUIdx_MUser:
62593718
PM
8723 case ARMMMUIdx_MSPrivNegPri:
8724 case ARMMMUIdx_MSUserNegPri:
66787c78 8725 case ARMMMUIdx_MSPriv:
66787c78 8726 case ARMMMUIdx_MSUser:
0480f69a
PM
8727 return 1;
8728 default:
8729 g_assert_not_reached();
8730 }
8731}
8732
c47eaf9f
PM
8733#ifndef CONFIG_USER_ONLY
8734
0480f69a
PM
8735/* Return the SCTLR value which controls this address translation regime */
8736static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
8737{
8738 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
8739}
8740
8741/* Return true if the specified stage of address translation is disabled */
8742static inline bool regime_translation_disabled(CPUARMState *env,
8743 ARMMMUIdx mmu_idx)
8744{
29c483a5 8745 if (arm_feature(env, ARM_FEATURE_M)) {
ecf5e8ea 8746 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
3bef7012
PM
8747 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
8748 case R_V7M_MPU_CTRL_ENABLE_MASK:
8749 /* Enabled, but not for HardFault and NMI */
62593718 8750 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
3bef7012
PM
8751 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
8752 /* Enabled for all cases */
8753 return false;
8754 case 0:
8755 default:
8756 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
8757 * we warned about that in armv7m_nvic.c when the guest set it.
8758 */
8759 return true;
8760 }
29c483a5
MD
8761 }
8762
0480f69a 8763 if (mmu_idx == ARMMMUIdx_S2NS) {
9d1bab33
PM
8764 /* HCR.DC means HCR.VM behaves as 1 */
8765 return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
0480f69a 8766 }
3d0e3080
PM
8767
8768 if (env->cp15.hcr_el2 & HCR_TGE) {
8769 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
8770 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
8771 return true;
8772 }
8773 }
8774
9d1bab33
PM
8775 if ((env->cp15.hcr_el2 & HCR_DC) &&
8776 (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
8777 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
8778 return true;
8779 }
8780
0480f69a
PM
8781 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
8782}
8783
73462ddd
PC
8784static inline bool regime_translation_big_endian(CPUARMState *env,
8785 ARMMMUIdx mmu_idx)
8786{
8787 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
8788}
8789
c47eaf9f
PM
8790/* Return the TTBR associated with this translation regime */
8791static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
8792 int ttbrn)
8793{
8794 if (mmu_idx == ARMMMUIdx_S2NS) {
8795 return env->cp15.vttbr_el2;
8796 }
8797 if (ttbrn == 0) {
8798 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
8799 } else {
8800 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
8801 }
8802}
8803
8804#endif /* !CONFIG_USER_ONLY */
8805
0480f69a
PM
8806/* Return the TCR controlling this translation regime */
8807static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
8808{
8809 if (mmu_idx == ARMMMUIdx_S2NS) {
68e9c2fe 8810 return &env->cp15.vtcr_el2;
0480f69a
PM
8811 }
8812 return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
8813}
8814
8bd5c820
PM
8815/* Convert a possible stage1+2 MMU index into the appropriate
8816 * stage 1 MMU index
8817 */
8818static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
8819{
8820 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8821 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
8822 }
8823 return mmu_idx;
8824}
8825
0480f69a
PM
8826/* Return true if the translation regime is using LPAE format page tables */
8827static inline bool regime_using_lpae_format(CPUARMState *env,
8828 ARMMMUIdx mmu_idx)
8829{
8830 int el = regime_el(env, mmu_idx);
8831 if (el == 2 || arm_el_is_aa64(env, el)) {
8832 return true;
8833 }
8834 if (arm_feature(env, ARM_FEATURE_LPAE)
8835 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
8836 return true;
8837 }
8838 return false;
8839}
8840
deb2db99
AR
8841/* Returns true if the stage 1 translation regime is using LPAE format page
8842 * tables. Used when raising alignment exceptions, whose FSR changes depending
8843 * on whether the long or short descriptor format is in use. */
8844bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
30901475 8845{
8bd5c820 8846 mmu_idx = stage_1_mmu_idx(mmu_idx);
deb2db99 8847
30901475
AB
8848 return regime_using_lpae_format(env, mmu_idx);
8849}
8850
c47eaf9f 8851#ifndef CONFIG_USER_ONLY
0480f69a
PM
8852static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
8853{
8854 switch (mmu_idx) {
8855 case ARMMMUIdx_S1SE0:
8856 case ARMMMUIdx_S1NSE0:
e7b921c2 8857 case ARMMMUIdx_MUser:
871bec7c 8858 case ARMMMUIdx_MSUser:
62593718
PM
8859 case ARMMMUIdx_MUserNegPri:
8860 case ARMMMUIdx_MSUserNegPri:
0480f69a
PM
8861 return true;
8862 default:
8863 return false;
8864 case ARMMMUIdx_S12NSE0:
8865 case ARMMMUIdx_S12NSE1:
8866 g_assert_not_reached();
8867 }
8868}
8869
0fbf5238
AJ
8870/* Translate section/page access permissions to page
8871 * R/W protection flags
d76951b6
AJ
8872 *
8873 * @env: CPUARMState
8874 * @mmu_idx: MMU index indicating required translation regime
8875 * @ap: The 3-bit access permissions (AP[2:0])
8876 * @domain_prot: The 2-bit domain access permissions
0fbf5238
AJ
8877 */
8878static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
8879 int ap, int domain_prot)
8880{
554b0b09
PM
8881 bool is_user = regime_is_user(env, mmu_idx);
8882
8883 if (domain_prot == 3) {
8884 return PAGE_READ | PAGE_WRITE;
8885 }
8886
554b0b09
PM
8887 switch (ap) {
8888 case 0:
8889 if (arm_feature(env, ARM_FEATURE_V7)) {
8890 return 0;
8891 }
554b0b09
PM
8892 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
8893 case SCTLR_S:
8894 return is_user ? 0 : PAGE_READ;
8895 case SCTLR_R:
8896 return PAGE_READ;
8897 default:
8898 return 0;
8899 }
8900 case 1:
8901 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8902 case 2:
87c3d486 8903 if (is_user) {
0fbf5238 8904 return PAGE_READ;
87c3d486 8905 } else {
554b0b09 8906 return PAGE_READ | PAGE_WRITE;
87c3d486 8907 }
554b0b09
PM
8908 case 3:
8909 return PAGE_READ | PAGE_WRITE;
8910 case 4: /* Reserved. */
8911 return 0;
8912 case 5:
0fbf5238 8913 return is_user ? 0 : PAGE_READ;
554b0b09 8914 case 6:
0fbf5238 8915 return PAGE_READ;
554b0b09 8916 case 7:
87c3d486 8917 if (!arm_feature(env, ARM_FEATURE_V6K)) {
554b0b09 8918 return 0;
87c3d486 8919 }
0fbf5238 8920 return PAGE_READ;
554b0b09 8921 default:
0fbf5238 8922 g_assert_not_reached();
554b0b09 8923 }
b5ff1b31
FB
8924}
8925
d76951b6
AJ
8926/* Translate section/page access permissions to page
8927 * R/W protection flags.
8928 *
d76951b6 8929 * @ap: The 2-bit simple AP (AP[2:1])
d8e052b3 8930 * @is_user: TRUE if accessing from PL0
d76951b6 8931 */
d8e052b3 8932static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
d76951b6 8933{
d76951b6
AJ
8934 switch (ap) {
8935 case 0:
8936 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8937 case 1:
8938 return PAGE_READ | PAGE_WRITE;
8939 case 2:
8940 return is_user ? 0 : PAGE_READ;
8941 case 3:
8942 return PAGE_READ;
8943 default:
8944 g_assert_not_reached();
8945 }
8946}
8947
d8e052b3
AJ
8948static inline int
8949simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
8950{
8951 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
8952}
8953
6ab1a5ee
EI
8954/* Translate S2 section/page access permissions to protection flags
8955 *
8956 * @env: CPUARMState
8957 * @s2ap: The 2-bit stage2 access permissions (S2AP)
8958 * @xn: XN (execute-never) bit
8959 */
8960static int get_S2prot(CPUARMState *env, int s2ap, int xn)
8961{
8962 int prot = 0;
8963
8964 if (s2ap & 1) {
8965 prot |= PAGE_READ;
8966 }
8967 if (s2ap & 2) {
8968 prot |= PAGE_WRITE;
8969 }
8970 if (!xn) {
dfda6837
SS
8971 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
8972 prot |= PAGE_EXEC;
8973 }
6ab1a5ee
EI
8974 }
8975 return prot;
8976}
8977
d8e052b3
AJ
8978/* Translate section/page access permissions to protection flags
8979 *
8980 * @env: CPUARMState
8981 * @mmu_idx: MMU index indicating required translation regime
8982 * @is_aa64: TRUE if AArch64
8983 * @ap: The 2-bit simple AP (AP[2:1])
8984 * @ns: NS (non-secure) bit
8985 * @xn: XN (execute-never) bit
8986 * @pxn: PXN (privileged execute-never) bit
8987 */
8988static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
8989 int ap, int ns, int xn, int pxn)
8990{
8991 bool is_user = regime_is_user(env, mmu_idx);
8992 int prot_rw, user_rw;
8993 bool have_wxn;
8994 int wxn = 0;
8995
8996 assert(mmu_idx != ARMMMUIdx_S2NS);
8997
8998 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
8999 if (is_user) {
9000 prot_rw = user_rw;
9001 } else {
9002 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
9003 }
9004
9005 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
9006 return prot_rw;
9007 }
9008
9009 /* TODO have_wxn should be replaced with
9010 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
9011 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
9012 * compatible processors have EL2, which is required for [U]WXN.
9013 */
9014 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
9015
9016 if (have_wxn) {
9017 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
9018 }
9019
9020 if (is_aa64) {
9021 switch (regime_el(env, mmu_idx)) {
9022 case 1:
9023 if (!is_user) {
9024 xn = pxn || (user_rw & PAGE_WRITE);
9025 }
9026 break;
9027 case 2:
9028 case 3:
9029 break;
9030 }
9031 } else if (arm_feature(env, ARM_FEATURE_V7)) {
9032 switch (regime_el(env, mmu_idx)) {
9033 case 1:
9034 case 3:
9035 if (is_user) {
9036 xn = xn || !(user_rw & PAGE_READ);
9037 } else {
9038 int uwxn = 0;
9039 if (have_wxn) {
9040 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
9041 }
9042 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
9043 (uwxn && (user_rw & PAGE_WRITE));
9044 }
9045 break;
9046 case 2:
9047 break;
9048 }
9049 } else {
9050 xn = wxn = 0;
9051 }
9052
9053 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
9054 return prot_rw;
9055 }
9056 return prot_rw | PAGE_EXEC;
9057}
9058
0480f69a
PM
9059static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
9060 uint32_t *table, uint32_t address)
b2fa1797 9061{
0480f69a 9062 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
0480f69a 9063 TCR *tcr = regime_tcr(env, mmu_idx);
11f136ee 9064
11f136ee
FA
9065 if (address & tcr->mask) {
9066 if (tcr->raw_tcr & TTBCR_PD1) {
e389be16
FA
9067 /* Translation table walk disabled for TTBR1 */
9068 return false;
9069 }
aef878be 9070 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
e389be16 9071 } else {
11f136ee 9072 if (tcr->raw_tcr & TTBCR_PD0) {
e389be16
FA
9073 /* Translation table walk disabled for TTBR0 */
9074 return false;
9075 }
aef878be 9076 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
e389be16
FA
9077 }
9078 *table |= (address >> 18) & 0x3ffc;
9079 return true;
b2fa1797
PB
9080}
9081
37785977
EI
9082/* Translate a S1 pagetable walk through S2 if needed. */
9083static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
9084 hwaddr addr, MemTxAttrs txattrs,
37785977
EI
9085 ARMMMUFaultInfo *fi)
9086{
9087 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
9088 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
9089 target_ulong s2size;
9090 hwaddr s2pa;
9091 int s2prot;
9092 int ret;
eadb2feb
PM
9093 ARMCacheAttrs cacheattrs = {};
9094 ARMCacheAttrs *pcacheattrs = NULL;
9095
9096 if (env->cp15.hcr_el2 & HCR_PTW) {
9097 /*
9098 * PTW means we must fault if this S1 walk touches S2 Device
9099 * memory; otherwise we don't care about the attributes and can
9100 * save the S2 translation the effort of computing them.
9101 */
9102 pcacheattrs = &cacheattrs;
9103 }
37785977
EI
9104
9105 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
eadb2feb 9106 &txattrs, &s2prot, &s2size, fi, pcacheattrs);
37785977 9107 if (ret) {
3b39d734 9108 assert(fi->type != ARMFault_None);
37785977
EI
9109 fi->s2addr = addr;
9110 fi->stage2 = true;
9111 fi->s1ptw = true;
9112 return ~0;
9113 }
eadb2feb
PM
9114 if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
9115 /* Access was to Device memory: generate Permission fault */
9116 fi->type = ARMFault_Permission;
9117 fi->s2addr = addr;
9118 fi->stage2 = true;
9119 fi->s1ptw = true;
9120 return ~0;
9121 }
37785977
EI
9122 addr = s2pa;
9123 }
9124 return addr;
9125}
9126
14577270 9127/* All loads done in the course of a page table walk go through here. */
a614e698 9128static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
3795a6de 9129 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
ebca90e4 9130{
a614e698
EI
9131 ARMCPU *cpu = ARM_CPU(cs);
9132 CPUARMState *env = &cpu->env;
ebca90e4 9133 MemTxAttrs attrs = {};
3b39d734 9134 MemTxResult result = MEMTX_OK;
5ce4ff65 9135 AddressSpace *as;
3b39d734 9136 uint32_t data;
ebca90e4
PM
9137
9138 attrs.secure = is_secure;
5ce4ff65 9139 as = arm_addressspace(cs, attrs);
3795a6de 9140 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
a614e698
EI
9141 if (fi->s1ptw) {
9142 return 0;
9143 }
73462ddd 9144 if (regime_translation_big_endian(env, mmu_idx)) {
3b39d734 9145 data = address_space_ldl_be(as, addr, attrs, &result);
73462ddd 9146 } else {
3b39d734 9147 data = address_space_ldl_le(as, addr, attrs, &result);
73462ddd 9148 }
3b39d734
PM
9149 if (result == MEMTX_OK) {
9150 return data;
9151 }
9152 fi->type = ARMFault_SyncExternalOnWalk;
9153 fi->ea = arm_extabort_type(result);
9154 return 0;
ebca90e4
PM
9155}
9156
37785977 9157static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
3795a6de 9158 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
ebca90e4 9159{
37785977
EI
9160 ARMCPU *cpu = ARM_CPU(cs);
9161 CPUARMState *env = &cpu->env;
ebca90e4 9162 MemTxAttrs attrs = {};
3b39d734 9163 MemTxResult result = MEMTX_OK;
5ce4ff65 9164 AddressSpace *as;
9aea1ea3 9165 uint64_t data;
ebca90e4
PM
9166
9167 attrs.secure = is_secure;
5ce4ff65 9168 as = arm_addressspace(cs, attrs);
3795a6de 9169 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
37785977
EI
9170 if (fi->s1ptw) {
9171 return 0;
9172 }
73462ddd 9173 if (regime_translation_big_endian(env, mmu_idx)) {
3b39d734 9174 data = address_space_ldq_be(as, addr, attrs, &result);
73462ddd 9175 } else {
3b39d734
PM
9176 data = address_space_ldq_le(as, addr, attrs, &result);
9177 }
9178 if (result == MEMTX_OK) {
9179 return data;
73462ddd 9180 }
3b39d734
PM
9181 fi->type = ARMFault_SyncExternalOnWalk;
9182 fi->ea = arm_extabort_type(result);
9183 return 0;
ebca90e4
PM
9184}
9185
b7cc4e82 9186static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
03ae85f8 9187 MMUAccessType access_type, ARMMMUIdx mmu_idx,
b7cc4e82 9188 hwaddr *phys_ptr, int *prot,
f989983e 9189 target_ulong *page_size,
e14b5a23 9190 ARMMMUFaultInfo *fi)
b5ff1b31 9191{
2fc0cc0e 9192 CPUState *cs = env_cpu(env);
f989983e 9193 int level = 1;
b5ff1b31
FB
9194 uint32_t table;
9195 uint32_t desc;
9196 int type;
9197 int ap;
e389be16 9198 int domain = 0;
dd4ebc2e 9199 int domain_prot;
a8170e5e 9200 hwaddr phys_addr;
0480f69a 9201 uint32_t dacr;
b5ff1b31 9202
9ee6e8bb
PB
9203 /* Pagetable walk. */
9204 /* Lookup l1 descriptor. */
0480f69a 9205 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
e389be16 9206 /* Section translation fault if page walk is disabled by PD0 or PD1 */
f989983e 9207 fi->type = ARMFault_Translation;
e389be16
FA
9208 goto do_fault;
9209 }
a614e698 9210 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
3795a6de 9211 mmu_idx, fi);
3b39d734
PM
9212 if (fi->type != ARMFault_None) {
9213 goto do_fault;
9214 }
9ee6e8bb 9215 type = (desc & 3);
dd4ebc2e 9216 domain = (desc >> 5) & 0x0f;
0480f69a
PM
9217 if (regime_el(env, mmu_idx) == 1) {
9218 dacr = env->cp15.dacr_ns;
9219 } else {
9220 dacr = env->cp15.dacr_s;
9221 }
9222 domain_prot = (dacr >> (domain * 2)) & 3;
9ee6e8bb 9223 if (type == 0) {
601d70b9 9224 /* Section translation fault. */
f989983e 9225 fi->type = ARMFault_Translation;
9ee6e8bb
PB
9226 goto do_fault;
9227 }
f989983e
PM
9228 if (type != 2) {
9229 level = 2;
9230 }
dd4ebc2e 9231 if (domain_prot == 0 || domain_prot == 2) {
f989983e 9232 fi->type = ARMFault_Domain;
9ee6e8bb
PB
9233 goto do_fault;
9234 }
9235 if (type == 2) {
9236 /* 1Mb section. */
9237 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
9238 ap = (desc >> 10) & 3;
d4c430a8 9239 *page_size = 1024 * 1024;
9ee6e8bb
PB
9240 } else {
9241 /* Lookup l2 entry. */
554b0b09
PM
9242 if (type == 1) {
9243 /* Coarse pagetable. */
9244 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
9245 } else {
9246 /* Fine pagetable. */
9247 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
9248 }
a614e698 9249 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
3795a6de 9250 mmu_idx, fi);
3b39d734
PM
9251 if (fi->type != ARMFault_None) {
9252 goto do_fault;
9253 }
9ee6e8bb
PB
9254 switch (desc & 3) {
9255 case 0: /* Page translation fault. */
f989983e 9256 fi->type = ARMFault_Translation;
9ee6e8bb
PB
9257 goto do_fault;
9258 case 1: /* 64k page. */
9259 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9260 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
d4c430a8 9261 *page_size = 0x10000;
ce819861 9262 break;
9ee6e8bb
PB
9263 case 2: /* 4k page. */
9264 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
c10f7fc3 9265 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
d4c430a8 9266 *page_size = 0x1000;
ce819861 9267 break;
fc1891c7 9268 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
554b0b09 9269 if (type == 1) {
fc1891c7
PM
9270 /* ARMv6/XScale extended small page format */
9271 if (arm_feature(env, ARM_FEATURE_XSCALE)
9272 || arm_feature(env, ARM_FEATURE_V6)) {
554b0b09 9273 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
fc1891c7 9274 *page_size = 0x1000;
554b0b09 9275 } else {
fc1891c7
PM
9276 /* UNPREDICTABLE in ARMv5; we choose to take a
9277 * page translation fault.
9278 */
f989983e 9279 fi->type = ARMFault_Translation;
554b0b09
PM
9280 goto do_fault;
9281 }
9282 } else {
9283 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
fc1891c7 9284 *page_size = 0x400;
554b0b09 9285 }
9ee6e8bb 9286 ap = (desc >> 4) & 3;
ce819861
PB
9287 break;
9288 default:
9ee6e8bb
PB
9289 /* Never happens, but compiler isn't smart enough to tell. */
9290 abort();
ce819861 9291 }
9ee6e8bb 9292 }
0fbf5238
AJ
9293 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
9294 *prot |= *prot ? PAGE_EXEC : 0;
9295 if (!(*prot & (1 << access_type))) {
9ee6e8bb 9296 /* Access permission fault. */
f989983e 9297 fi->type = ARMFault_Permission;
9ee6e8bb
PB
9298 goto do_fault;
9299 }
9300 *phys_ptr = phys_addr;
b7cc4e82 9301 return false;
9ee6e8bb 9302do_fault:
f989983e
PM
9303 fi->domain = domain;
9304 fi->level = level;
b7cc4e82 9305 return true;
9ee6e8bb
PB
9306}
9307
b7cc4e82 9308static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
03ae85f8 9309 MMUAccessType access_type, ARMMMUIdx mmu_idx,
b7cc4e82 9310 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
f06cf243 9311 target_ulong *page_size, ARMMMUFaultInfo *fi)
9ee6e8bb 9312{
2fc0cc0e 9313 CPUState *cs = env_cpu(env);
f06cf243 9314 int level = 1;
9ee6e8bb
PB
9315 uint32_t table;
9316 uint32_t desc;
9317 uint32_t xn;
de9b05b8 9318 uint32_t pxn = 0;
9ee6e8bb
PB
9319 int type;
9320 int ap;
de9b05b8 9321 int domain = 0;
dd4ebc2e 9322 int domain_prot;
a8170e5e 9323 hwaddr phys_addr;
0480f69a 9324 uint32_t dacr;
8bf5b6a9 9325 bool ns;
9ee6e8bb
PB
9326
9327 /* Pagetable walk. */
9328 /* Lookup l1 descriptor. */
0480f69a 9329 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
e389be16 9330 /* Section translation fault if page walk is disabled by PD0 or PD1 */
f06cf243 9331 fi->type = ARMFault_Translation;
e389be16
FA
9332 goto do_fault;
9333 }
a614e698 9334 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
3795a6de 9335 mmu_idx, fi);
3b39d734
PM
9336 if (fi->type != ARMFault_None) {
9337 goto do_fault;
9338 }
9ee6e8bb 9339 type = (desc & 3);
de9b05b8
PM
9340 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
9341 /* Section translation fault, or attempt to use the encoding
9342 * which is Reserved on implementations without PXN.
9343 */
f06cf243 9344 fi->type = ARMFault_Translation;
9ee6e8bb 9345 goto do_fault;
de9b05b8
PM
9346 }
9347 if ((type == 1) || !(desc & (1 << 18))) {
9348 /* Page or Section. */
dd4ebc2e 9349 domain = (desc >> 5) & 0x0f;
9ee6e8bb 9350 }
0480f69a
PM
9351 if (regime_el(env, mmu_idx) == 1) {
9352 dacr = env->cp15.dacr_ns;
9353 } else {
9354 dacr = env->cp15.dacr_s;
9355 }
f06cf243
PM
9356 if (type == 1) {
9357 level = 2;
9358 }
0480f69a 9359 domain_prot = (dacr >> (domain * 2)) & 3;
dd4ebc2e 9360 if (domain_prot == 0 || domain_prot == 2) {
f06cf243
PM
9361 /* Section or Page domain fault */
9362 fi->type = ARMFault_Domain;
9ee6e8bb
PB
9363 goto do_fault;
9364 }
de9b05b8 9365 if (type != 1) {
9ee6e8bb
PB
9366 if (desc & (1 << 18)) {
9367 /* Supersection. */
9368 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
4e42a6ca
SF
9369 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
9370 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
d4c430a8 9371 *page_size = 0x1000000;
b5ff1b31 9372 } else {
9ee6e8bb
PB
9373 /* Section. */
9374 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
d4c430a8 9375 *page_size = 0x100000;
b5ff1b31 9376 }
9ee6e8bb
PB
9377 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
9378 xn = desc & (1 << 4);
de9b05b8 9379 pxn = desc & 1;
8bf5b6a9 9380 ns = extract32(desc, 19, 1);
9ee6e8bb 9381 } else {
de9b05b8
PM
9382 if (arm_feature(env, ARM_FEATURE_PXN)) {
9383 pxn = (desc >> 2) & 1;
9384 }
8bf5b6a9 9385 ns = extract32(desc, 3, 1);
9ee6e8bb
PB
9386 /* Lookup l2 entry. */
9387 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
a614e698 9388 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
3795a6de 9389 mmu_idx, fi);
3b39d734
PM
9390 if (fi->type != ARMFault_None) {
9391 goto do_fault;
9392 }
9ee6e8bb
PB
9393 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
9394 switch (desc & 3) {
9395 case 0: /* Page translation fault. */
f06cf243 9396 fi->type = ARMFault_Translation;
b5ff1b31 9397 goto do_fault;
9ee6e8bb
PB
9398 case 1: /* 64k page. */
9399 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9400 xn = desc & (1 << 15);
d4c430a8 9401 *page_size = 0x10000;
9ee6e8bb
PB
9402 break;
9403 case 2: case 3: /* 4k page. */
9404 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9405 xn = desc & 1;
d4c430a8 9406 *page_size = 0x1000;
9ee6e8bb
PB
9407 break;
9408 default:
9409 /* Never happens, but compiler isn't smart enough to tell. */
9410 abort();
b5ff1b31 9411 }
9ee6e8bb 9412 }
dd4ebc2e 9413 if (domain_prot == 3) {
c0034328
JR
9414 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9415 } else {
0480f69a 9416 if (pxn && !regime_is_user(env, mmu_idx)) {
de9b05b8
PM
9417 xn = 1;
9418 }
f06cf243
PM
9419 if (xn && access_type == MMU_INST_FETCH) {
9420 fi->type = ARMFault_Permission;
c0034328 9421 goto do_fault;
f06cf243 9422 }
9ee6e8bb 9423
d76951b6
AJ
9424 if (arm_feature(env, ARM_FEATURE_V6K) &&
9425 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
9426 /* The simplified model uses AP[0] as an access control bit. */
9427 if ((ap & 1) == 0) {
9428 /* Access flag fault. */
f06cf243 9429 fi->type = ARMFault_AccessFlag;
d76951b6
AJ
9430 goto do_fault;
9431 }
9432 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
9433 } else {
9434 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
c0034328 9435 }
0fbf5238
AJ
9436 if (*prot && !xn) {
9437 *prot |= PAGE_EXEC;
9438 }
9439 if (!(*prot & (1 << access_type))) {
c0034328 9440 /* Access permission fault. */
f06cf243 9441 fi->type = ARMFault_Permission;
c0034328
JR
9442 goto do_fault;
9443 }
3ad493fc 9444 }
8bf5b6a9
PM
9445 if (ns) {
9446 /* The NS bit will (as required by the architecture) have no effect if
9447 * the CPU doesn't support TZ or this is a non-secure translation
9448 * regime, because the attribute will already be non-secure.
9449 */
9450 attrs->secure = false;
9451 }
9ee6e8bb 9452 *phys_ptr = phys_addr;
b7cc4e82 9453 return false;
b5ff1b31 9454do_fault:
f06cf243
PM
9455 fi->domain = domain;
9456 fi->level = level;
b7cc4e82 9457 return true;
b5ff1b31
FB
9458}
9459
1853d5a9 9460/*
a0e966c9 9461 * check_s2_mmu_setup
1853d5a9
EI
9462 * @cpu: ARMCPU
9463 * @is_aa64: True if the translation regime is in AArch64 state
9464 * @startlevel: Suggested starting level
9465 * @inputsize: Bitsize of IPAs
9466 * @stride: Page-table stride (See the ARM ARM)
9467 *
a0e966c9
EI
9468 * Returns true if the suggested S2 translation parameters are OK and
9469 * false otherwise.
1853d5a9 9470 */
a0e966c9
EI
9471static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
9472 int inputsize, int stride)
1853d5a9 9473{
98d68ec2
EI
9474 const int grainsize = stride + 3;
9475 int startsizecheck;
9476
1853d5a9
EI
9477 /* Negative levels are never allowed. */
9478 if (level < 0) {
9479 return false;
9480 }
9481
98d68ec2
EI
9482 startsizecheck = inputsize - ((3 - level) * stride + grainsize);
9483 if (startsizecheck < 1 || startsizecheck > stride + 4) {
9484 return false;
9485 }
9486
1853d5a9 9487 if (is_aa64) {
3526423e 9488 CPUARMState *env = &cpu->env;
1853d5a9
EI
9489 unsigned int pamax = arm_pamax(cpu);
9490
9491 switch (stride) {
9492 case 13: /* 64KB Pages. */
9493 if (level == 0 || (level == 1 && pamax <= 42)) {
9494 return false;
9495 }
9496 break;
9497 case 11: /* 16KB Pages. */
9498 if (level == 0 || (level == 1 && pamax <= 40)) {
9499 return false;
9500 }
9501 break;
9502 case 9: /* 4KB Pages. */
9503 if (level == 0 && pamax <= 42) {
9504 return false;
9505 }
9506 break;
9507 default:
9508 g_assert_not_reached();
9509 }
3526423e
EI
9510
9511 /* Inputsize checks. */
9512 if (inputsize > pamax &&
9513 (arm_el_is_aa64(env, 1) || inputsize > 40)) {
9514 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
9515 return false;
9516 }
1853d5a9 9517 } else {
1853d5a9
EI
9518 /* AArch32 only supports 4KB pages. Assert on that. */
9519 assert(stride == 9);
9520
9521 if (level == 0) {
9522 return false;
9523 }
1853d5a9
EI
9524 }
9525 return true;
9526}
9527
5b2d261d
AB
9528/* Translate from the 4-bit stage 2 representation of
9529 * memory attributes (without cache-allocation hints) to
9530 * the 8-bit representation of the stage 1 MAIR registers
9531 * (which includes allocation hints).
9532 *
9533 * ref: shared/translation/attrs/S2AttrDecode()
9534 * .../S2ConvertAttrsHints()
9535 */
9536static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
9537{
9538 uint8_t hiattr = extract32(s2attrs, 2, 2);
9539 uint8_t loattr = extract32(s2attrs, 0, 2);
9540 uint8_t hihint = 0, lohint = 0;
9541
9542 if (hiattr != 0) { /* normal memory */
9543 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
9544 hiattr = loattr = 1; /* non-cacheable */
9545 } else {
9546 if (hiattr != 1) { /* Write-through or write-back */
9547 hihint = 3; /* RW allocate */
9548 }
9549 if (loattr != 1) { /* Write-through or write-back */
9550 lohint = 3; /* RW allocate */
9551 }
9552 }
9553 }
9554
9555 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
9556}
c47eaf9f 9557#endif /* !CONFIG_USER_ONLY */
5b2d261d 9558
e737ed2a
RH
9559ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
9560 ARMMMUIdx mmu_idx)
ba97be9f
RH
9561{
9562 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
9563 uint32_t el = regime_el(env, mmu_idx);
8220af7e 9564 bool tbi, tbid, epd, hpd, using16k, using64k;
ba97be9f
RH
9565 int select, tsz;
9566
9567 /*
9568 * Bit 55 is always between the two regions, and is canonical for
9569 * determining if address tagging is enabled.
9570 */
9571 select = extract64(va, 55, 1);
9572
9573 if (el > 1) {
9574 tsz = extract32(tcr, 0, 6);
9575 using64k = extract32(tcr, 14, 1);
9576 using16k = extract32(tcr, 15, 1);
9577 if (mmu_idx == ARMMMUIdx_S2NS) {
9578 /* VTCR_EL2 */
8220af7e 9579 tbi = tbid = hpd = false;
ba97be9f
RH
9580 } else {
9581 tbi = extract32(tcr, 20, 1);
9582 hpd = extract32(tcr, 24, 1);
8220af7e 9583 tbid = extract32(tcr, 29, 1);
ba97be9f
RH
9584 }
9585 epd = false;
9586 } else if (!select) {
9587 tsz = extract32(tcr, 0, 6);
9588 epd = extract32(tcr, 7, 1);
9589 using64k = extract32(tcr, 14, 1);
9590 using16k = extract32(tcr, 15, 1);
9591 tbi = extract64(tcr, 37, 1);
9592 hpd = extract64(tcr, 41, 1);
8220af7e 9593 tbid = extract64(tcr, 51, 1);
ba97be9f
RH
9594 } else {
9595 int tg = extract32(tcr, 30, 2);
9596 using16k = tg == 1;
9597 using64k = tg == 3;
9598 tsz = extract32(tcr, 16, 6);
9599 epd = extract32(tcr, 23, 1);
9600 tbi = extract64(tcr, 38, 1);
9601 hpd = extract64(tcr, 42, 1);
8220af7e 9602 tbid = extract64(tcr, 52, 1);
ba97be9f
RH
9603 }
9604 tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */
9605 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
9606
9607 return (ARMVAParameters) {
9608 .tsz = tsz,
9609 .select = select,
9610 .tbi = tbi,
8220af7e 9611 .tbid = tbid,
ba97be9f
RH
9612 .epd = epd,
9613 .hpd = hpd,
9614 .using16k = using16k,
9615 .using64k = using64k,
9616 };
9617}
9618
e737ed2a
RH
9619ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
9620 ARMMMUIdx mmu_idx, bool data)
9621{
8220af7e
RH
9622 ARMVAParameters ret = aa64_va_parameters_both(env, va, mmu_idx);
9623
9624 /* Present TBI as a composite with TBID. */
9625 ret.tbi &= (data || !ret.tbid);
9626 return ret;
e737ed2a
RH
9627}
9628
c47eaf9f 9629#ifndef CONFIG_USER_ONLY
ba97be9f
RH
9630static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
9631 ARMMMUIdx mmu_idx)
9632{
9633 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
9634 uint32_t el = regime_el(env, mmu_idx);
9635 int select, tsz;
9636 bool epd, hpd;
9637
9638 if (mmu_idx == ARMMMUIdx_S2NS) {
9639 /* VTCR */
9640 bool sext = extract32(tcr, 4, 1);
9641 bool sign = extract32(tcr, 3, 1);
9642
9643 /*
9644 * If the sign-extend bit is not the same as t0sz[3], the result
9645 * is unpredictable. Flag this as a guest error.
9646 */
9647 if (sign != sext) {
9648 qemu_log_mask(LOG_GUEST_ERROR,
9649 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9650 }
9651 tsz = sextract32(tcr, 0, 4) + 8;
9652 select = 0;
9653 hpd = false;
9654 epd = false;
9655 } else if (el == 2) {
9656 /* HTCR */
9657 tsz = extract32(tcr, 0, 3);
9658 select = 0;
9659 hpd = extract64(tcr, 24, 1);
9660 epd = false;
9661 } else {
9662 int t0sz = extract32(tcr, 0, 3);
9663 int t1sz = extract32(tcr, 16, 3);
9664
9665 if (t1sz == 0) {
9666 select = va > (0xffffffffu >> t0sz);
9667 } else {
9668 /* Note that we will detect errors later. */
9669 select = va >= ~(0xffffffffu >> t1sz);
9670 }
9671 if (!select) {
9672 tsz = t0sz;
9673 epd = extract32(tcr, 7, 1);
9674 hpd = extract64(tcr, 41, 1);
9675 } else {
9676 tsz = t1sz;
9677 epd = extract32(tcr, 23, 1);
9678 hpd = extract64(tcr, 42, 1);
9679 }
9680 /* For aarch32, hpd0 is not enabled without t2e as well. */
9681 hpd &= extract32(tcr, 6, 1);
9682 }
9683
9684 return (ARMVAParameters) {
9685 .tsz = tsz,
9686 .select = select,
9687 .epd = epd,
9688 .hpd = hpd,
9689 };
9690}
9691
b7cc4e82 9692static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
03ae85f8 9693 MMUAccessType access_type, ARMMMUIdx mmu_idx,
b7cc4e82 9694 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
da909b2c 9695 target_ulong *page_size_ptr,
5b2d261d 9696 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
3dde962f 9697{
2fc0cc0e 9698 ARMCPU *cpu = env_archcpu(env);
1853d5a9 9699 CPUState *cs = CPU(cpu);
3dde962f 9700 /* Read an LPAE long-descriptor translation table. */
da909b2c 9701 ARMFaultType fault_type = ARMFault_Translation;
1b4093ea 9702 uint32_t level;
ba97be9f 9703 ARMVAParameters param;
3dde962f 9704 uint64_t ttbr;
dddb5223 9705 hwaddr descaddr, indexmask, indexmask_grainsize;
3dde962f 9706 uint32_t tableattrs;
36d820af 9707 target_ulong page_size;
3dde962f 9708 uint32_t attrs;
ba97be9f
RH
9709 int32_t stride;
9710 int addrsize, inputsize;
0480f69a 9711 TCR *tcr = regime_tcr(env, mmu_idx);
d8e052b3 9712 int ap, ns, xn, pxn;
88e8add8 9713 uint32_t el = regime_el(env, mmu_idx);
ba97be9f 9714 bool ttbr1_valid;
6109769a 9715 uint64_t descaddrmask;
6e99f762 9716 bool aarch64 = arm_el_is_aa64(env, el);
1bafc2ba 9717 bool guarded = false;
0480f69a
PM
9718
9719 /* TODO:
88e8add8
GB
9720 * This code does not handle the different format TCR for VTCR_EL2.
9721 * This code also does not support shareability levels.
9722 * Attribute and permission bit handling should also be checked when adding
9723 * support for those page table walks.
0480f69a 9724 */
6e99f762 9725 if (aarch64) {
ba97be9f
RH
9726 param = aa64_va_parameters(env, address, mmu_idx,
9727 access_type != MMU_INST_FETCH);
1b4093ea 9728 level = 0;
88e8add8
GB
9729 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
9730 * invalid.
9731 */
ba97be9f
RH
9732 ttbr1_valid = (el < 2);
9733 addrsize = 64 - 8 * param.tbi;
9734 inputsize = 64 - param.tsz;
d0a2cbce 9735 } else {
ba97be9f 9736 param = aa32_va_parameters(env, address, mmu_idx);
1b4093ea 9737 level = 1;
d0a2cbce 9738 /* There is no TTBR1 for EL2 */
ba97be9f
RH
9739 ttbr1_valid = (el != 2);
9740 addrsize = (mmu_idx == ARMMMUIdx_S2NS ? 40 : 32);
9741 inputsize = addrsize - param.tsz;
2c8dd318 9742 }
3dde962f 9743
ba97be9f
RH
9744 /*
9745 * We determined the region when collecting the parameters, but we
9746 * have not yet validated that the address is valid for the region.
9747 * Extract the top bits and verify that they all match select.
36d820af
RH
9748 *
9749 * For aa32, if inputsize == addrsize, then we have selected the
9750 * region by exclusion in aa32_va_parameters and there is no more
9751 * validation to do here.
9752 */
9753 if (inputsize < addrsize) {
9754 target_ulong top_bits = sextract64(address, inputsize,
9755 addrsize - inputsize);
9756 if (-top_bits != param.select || (param.select && !ttbr1_valid)) {
9757 /* The gap between the two regions is a Translation fault */
9758 fault_type = ARMFault_Translation;
9759 goto do_fault;
9760 }
3dde962f
PM
9761 }
9762
ba97be9f
RH
9763 if (param.using64k) {
9764 stride = 13;
9765 } else if (param.using16k) {
9766 stride = 11;
9767 } else {
9768 stride = 9;
9769 }
9770
3dde962f
PM
9771 /* Note that QEMU ignores shareability and cacheability attributes,
9772 * so we don't need to do anything with the SH, ORGN, IRGN fields
9773 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
9774 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
9775 * implement any ASID-like capability so we can ignore it (instead
9776 * we will always flush the TLB any time the ASID is changed).
9777 */
ba97be9f 9778 ttbr = regime_ttbr(env, mmu_idx, param.select);
3dde962f 9779
0480f69a 9780 /* Here we should have set up all the parameters for the translation:
6e99f762 9781 * inputsize, ttbr, epd, stride, tbi
0480f69a
PM
9782 */
9783
ba97be9f 9784 if (param.epd) {
88e8add8
GB
9785 /* Translation table walk disabled => Translation fault on TLB miss
9786 * Note: This is always 0 on 64-bit EL2 and EL3.
9787 */
3dde962f
PM
9788 goto do_fault;
9789 }
9790
1853d5a9
EI
9791 if (mmu_idx != ARMMMUIdx_S2NS) {
9792 /* The starting level depends on the virtual address size (which can
9793 * be up to 48 bits) and the translation granule size. It indicates
9794 * the number of strides (stride bits at a time) needed to
9795 * consume the bits of the input address. In the pseudocode this is:
9796 * level = 4 - RoundUp((inputsize - grainsize) / stride)
9797 * where their 'inputsize' is our 'inputsize', 'grainsize' is
9798 * our 'stride + 3' and 'stride' is our 'stride'.
9799 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
9800 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
9801 * = 4 - (inputsize - 4) / stride;
9802 */
9803 level = 4 - (inputsize - 4) / stride;
9804 } else {
9805 /* For stage 2 translations the starting level is specified by the
9806 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
9807 */
1b4093ea
SS
9808 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
9809 uint32_t startlevel;
1853d5a9
EI
9810 bool ok;
9811
6e99f762 9812 if (!aarch64 || stride == 9) {
1853d5a9 9813 /* AArch32 or 4KB pages */
1b4093ea 9814 startlevel = 2 - sl0;
1853d5a9
EI
9815 } else {
9816 /* 16KB or 64KB pages */
1b4093ea 9817 startlevel = 3 - sl0;
1853d5a9
EI
9818 }
9819
9820 /* Check that the starting level is valid. */
6e99f762 9821 ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
1b4093ea 9822 inputsize, stride);
1853d5a9 9823 if (!ok) {
da909b2c 9824 fault_type = ARMFault_Translation;
1853d5a9
EI
9825 goto do_fault;
9826 }
1b4093ea 9827 level = startlevel;
1853d5a9 9828 }
3dde962f 9829
dddb5223
SS
9830 indexmask_grainsize = (1ULL << (stride + 3)) - 1;
9831 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
3dde962f
PM
9832
9833 /* Now we can extract the actual base address from the TTBR */
2c8dd318 9834 descaddr = extract64(ttbr, 0, 48);
dddb5223 9835 descaddr &= ~indexmask;
3dde962f 9836
6109769a 9837 /* The address field in the descriptor goes up to bit 39 for ARMv7
dddb5223
SS
9838 * but up to bit 47 for ARMv8, but we use the descaddrmask
9839 * up to bit 39 for AArch32, because we don't need other bits in that case
9840 * to construct next descriptor address (anyway they should be all zeroes).
6109769a 9841 */
6e99f762 9842 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
dddb5223 9843 ~indexmask_grainsize;
6109769a 9844
ebca90e4
PM
9845 /* Secure accesses start with the page table in secure memory and
9846 * can be downgraded to non-secure at any step. Non-secure accesses
9847 * remain non-secure. We implement this by just ORing in the NSTable/NS
9848 * bits at each step.
9849 */
9850 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
3dde962f
PM
9851 for (;;) {
9852 uint64_t descriptor;
ebca90e4 9853 bool nstable;
3dde962f 9854
dddb5223 9855 descaddr |= (address >> (stride * (4 - level))) & indexmask;
2c8dd318 9856 descaddr &= ~7ULL;
ebca90e4 9857 nstable = extract32(tableattrs, 4, 1);
3795a6de 9858 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
3b39d734 9859 if (fi->type != ARMFault_None) {
37785977
EI
9860 goto do_fault;
9861 }
9862
3dde962f
PM
9863 if (!(descriptor & 1) ||
9864 (!(descriptor & 2) && (level == 3))) {
9865 /* Invalid, or the Reserved level 3 encoding */
9866 goto do_fault;
9867 }
6109769a 9868 descaddr = descriptor & descaddrmask;
3dde962f
PM
9869
9870 if ((descriptor & 2) && (level < 3)) {
037c13c5 9871 /* Table entry. The top five bits are attributes which may
3dde962f
PM
9872 * propagate down through lower levels of the table (and
9873 * which are all arranged so that 0 means "no effect", so
9874 * we can gather them up by ORing in the bits at each level).
9875 */
9876 tableattrs |= extract64(descriptor, 59, 5);
9877 level++;
dddb5223 9878 indexmask = indexmask_grainsize;
3dde962f
PM
9879 continue;
9880 }
9881 /* Block entry at level 1 or 2, or page entry at level 3.
9882 * These are basically the same thing, although the number
9883 * of bits we pull in from the vaddr varies.
9884 */
973a5434 9885 page_size = (1ULL << ((stride * (4 - level)) + 3));
3dde962f 9886 descaddr |= (address & (page_size - 1));
6ab1a5ee 9887 /* Extract attributes from the descriptor */
d615efac
IC
9888 attrs = extract64(descriptor, 2, 10)
9889 | (extract64(descriptor, 52, 12) << 10);
6ab1a5ee
EI
9890
9891 if (mmu_idx == ARMMMUIdx_S2NS) {
9892 /* Stage 2 table descriptors do not include any attribute fields */
9893 break;
9894 }
9895 /* Merge in attributes from table descriptors */
037c13c5 9896 attrs |= nstable << 3; /* NS */
1bafc2ba 9897 guarded = extract64(descriptor, 50, 1); /* GP */
ba97be9f 9898 if (param.hpd) {
037c13c5
RH
9899 /* HPD disables all the table attributes except NSTable. */
9900 break;
9901 }
9902 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
3dde962f
PM
9903 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
9904 * means "force PL1 access only", which means forcing AP[1] to 0.
9905 */
037c13c5
RH
9906 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */
9907 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */
3dde962f
PM
9908 break;
9909 }
9910 /* Here descaddr is the final physical address, and attributes
9911 * are all in attrs.
9912 */
da909b2c 9913 fault_type = ARMFault_AccessFlag;
3dde962f
PM
9914 if ((attrs & (1 << 8)) == 0) {
9915 /* Access flag */
9916 goto do_fault;
9917 }
d8e052b3
AJ
9918
9919 ap = extract32(attrs, 4, 2);
d8e052b3 9920 xn = extract32(attrs, 12, 1);
d8e052b3 9921
6ab1a5ee
EI
9922 if (mmu_idx == ARMMMUIdx_S2NS) {
9923 ns = true;
9924 *prot = get_S2prot(env, ap, xn);
9925 } else {
9926 ns = extract32(attrs, 3, 1);
9927 pxn = extract32(attrs, 11, 1);
6e99f762 9928 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
6ab1a5ee 9929 }
d8e052b3 9930
da909b2c 9931 fault_type = ARMFault_Permission;
d8e052b3 9932 if (!(*prot & (1 << access_type))) {
3dde962f
PM
9933 goto do_fault;
9934 }
3dde962f 9935
8bf5b6a9
PM
9936 if (ns) {
9937 /* The NS bit will (as required by the architecture) have no effect if
9938 * the CPU doesn't support TZ or this is a non-secure translation
9939 * regime, because the attribute will already be non-secure.
9940 */
9941 txattrs->secure = false;
9942 }
1bafc2ba
RH
9943 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
9944 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
9945 txattrs->target_tlb_bit0 = true;
9946 }
5b2d261d
AB
9947
9948 if (cacheattrs != NULL) {
9949 if (mmu_idx == ARMMMUIdx_S2NS) {
9950 cacheattrs->attrs = convert_stage2_attrs(env,
9951 extract32(attrs, 0, 4));
9952 } else {
9953 /* Index into MAIR registers for cache attributes */
9954 uint8_t attrindx = extract32(attrs, 0, 3);
9955 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
9956 assert(attrindx <= 7);
9957 cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
9958 }
9959 cacheattrs->shareability = extract32(attrs, 6, 2);
9960 }
9961
3dde962f
PM
9962 *phys_ptr = descaddr;
9963 *page_size_ptr = page_size;
b7cc4e82 9964 return false;
3dde962f
PM
9965
9966do_fault:
da909b2c
PM
9967 fi->type = fault_type;
9968 fi->level = level;
37785977
EI
9969 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
9970 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
b7cc4e82 9971 return true;
3dde962f
PM
9972}
9973
f6bda88f
PC
9974static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
9975 ARMMMUIdx mmu_idx,
9976 int32_t address, int *prot)
9977{
3a00d560
MD
9978 if (!arm_feature(env, ARM_FEATURE_M)) {
9979 *prot = PAGE_READ | PAGE_WRITE;
9980 switch (address) {
9981 case 0xF0000000 ... 0xFFFFFFFF:
9982 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
9983 /* hivecs execing is ok */
9984 *prot |= PAGE_EXEC;
9985 }
9986 break;
9987 case 0x00000000 ... 0x7FFFFFFF:
f6bda88f 9988 *prot |= PAGE_EXEC;
3a00d560
MD
9989 break;
9990 }
9991 } else {
9992 /* Default system address map for M profile cores.
9993 * The architecture specifies which regions are execute-never;
9994 * at the MPU level no other checks are defined.
9995 */
9996 switch (address) {
9997 case 0x00000000 ... 0x1fffffff: /* ROM */
9998 case 0x20000000 ... 0x3fffffff: /* SRAM */
9999 case 0x60000000 ... 0x7fffffff: /* RAM */
10000 case 0x80000000 ... 0x9fffffff: /* RAM */
10001 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10002 break;
10003 case 0x40000000 ... 0x5fffffff: /* Peripheral */
10004 case 0xa0000000 ... 0xbfffffff: /* Device */
10005 case 0xc0000000 ... 0xdfffffff: /* Device */
10006 case 0xe0000000 ... 0xffffffff: /* System */
10007 *prot = PAGE_READ | PAGE_WRITE;
10008 break;
10009 default:
10010 g_assert_not_reached();
f6bda88f 10011 }
f6bda88f 10012 }
f6bda88f
PC
10013}
10014
29c483a5
MD
10015static bool pmsav7_use_background_region(ARMCPU *cpu,
10016 ARMMMUIdx mmu_idx, bool is_user)
10017{
10018 /* Return true if we should use the default memory map as a
10019 * "background" region if there are no hits against any MPU regions.
10020 */
10021 CPUARMState *env = &cpu->env;
10022
10023 if (is_user) {
10024 return false;
10025 }
10026
10027 if (arm_feature(env, ARM_FEATURE_M)) {
ecf5e8ea
PM
10028 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
10029 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
29c483a5
MD
10030 } else {
10031 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
10032 }
10033}
10034
38aaa60c
PM
10035static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
10036{
10037 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
10038 return arm_feature(env, ARM_FEATURE_M) &&
10039 extract32(address, 20, 12) == 0xe00;
10040}
10041
bf446a11
PM
10042static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
10043{
10044 /* True if address is in the M profile system region
10045 * 0xe0000000 - 0xffffffff
10046 */
10047 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
10048}
10049
f6bda88f 10050static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
03ae85f8 10051 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9375ad15 10052 hwaddr *phys_ptr, int *prot,
e5e40999 10053 target_ulong *page_size,
9375ad15 10054 ARMMMUFaultInfo *fi)
f6bda88f 10055{
2fc0cc0e 10056 ARMCPU *cpu = env_archcpu(env);
f6bda88f
PC
10057 int n;
10058 bool is_user = regime_is_user(env, mmu_idx);
10059
10060 *phys_ptr = address;
e5e40999 10061 *page_size = TARGET_PAGE_SIZE;
f6bda88f
PC
10062 *prot = 0;
10063
38aaa60c
PM
10064 if (regime_translation_disabled(env, mmu_idx) ||
10065 m_is_ppb_region(env, address)) {
10066 /* MPU disabled or M profile PPB access: use default memory map.
10067 * The other case which uses the default memory map in the
10068 * v7M ARM ARM pseudocode is exception vector reads from the vector
10069 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
10070 * which always does a direct read using address_space_ldl(), rather
10071 * than going via this function, so we don't need to check that here.
10072 */
f6bda88f
PC
10073 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10074 } else { /* MPU enabled */
10075 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
10076 /* region search */
10077 uint32_t base = env->pmsav7.drbar[n];
10078 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
10079 uint32_t rmask;
10080 bool srdis = false;
10081
10082 if (!(env->pmsav7.drsr[n] & 0x1)) {
10083 continue;
10084 }
10085
10086 if (!rsize) {
c9f9f124
MD
10087 qemu_log_mask(LOG_GUEST_ERROR,
10088 "DRSR[%d]: Rsize field cannot be 0\n", n);
f6bda88f
PC
10089 continue;
10090 }
10091 rsize++;
10092 rmask = (1ull << rsize) - 1;
10093
10094 if (base & rmask) {
c9f9f124
MD
10095 qemu_log_mask(LOG_GUEST_ERROR,
10096 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
10097 "to DRSR region size, mask = 0x%" PRIx32 "\n",
10098 n, base, rmask);
f6bda88f
PC
10099 continue;
10100 }
10101
10102 if (address < base || address > base + rmask) {
9d2b5a58
PM
10103 /*
10104 * Address not in this region. We must check whether the
10105 * region covers addresses in the same page as our address.
10106 * In that case we must not report a size that covers the
10107 * whole page for a subsequent hit against a different MPU
10108 * region or the background region, because it would result in
10109 * incorrect TLB hits for subsequent accesses to addresses that
10110 * are in this MPU region.
10111 */
10112 if (ranges_overlap(base, rmask,
10113 address & TARGET_PAGE_MASK,
10114 TARGET_PAGE_SIZE)) {
10115 *page_size = 1;
10116 }
f6bda88f
PC
10117 continue;
10118 }
10119
10120 /* Region matched */
10121
10122 if (rsize >= 8) { /* no subregions for regions < 256 bytes */
10123 int i, snd;
10124 uint32_t srdis_mask;
10125
10126 rsize -= 3; /* sub region size (power of 2) */
10127 snd = ((address - base) >> rsize) & 0x7;
10128 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
10129
10130 srdis_mask = srdis ? 0x3 : 0x0;
10131 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
10132 /* This will check in groups of 2, 4 and then 8, whether
10133 * the subregion bits are consistent. rsize is incremented
10134 * back up to give the region size, considering consistent
10135 * adjacent subregions as one region. Stop testing if rsize
10136 * is already big enough for an entire QEMU page.
10137 */
10138 int snd_rounded = snd & ~(i - 1);
10139 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
10140 snd_rounded + 8, i);
10141 if (srdis_mask ^ srdis_multi) {
10142 break;
10143 }
10144 srdis_mask = (srdis_mask << i) | srdis_mask;
10145 rsize++;
10146 }
10147 }
f6bda88f
PC
10148 if (srdis) {
10149 continue;
10150 }
e5e40999
PM
10151 if (rsize < TARGET_PAGE_BITS) {
10152 *page_size = 1 << rsize;
10153 }
f6bda88f
PC
10154 break;
10155 }
10156
10157 if (n == -1) { /* no hits */
29c483a5 10158 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
f6bda88f 10159 /* background fault */
9375ad15 10160 fi->type = ARMFault_Background;
f6bda88f
PC
10161 return true;
10162 }
10163 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10164 } else { /* a MPU hit! */
10165 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
bf446a11
PM
10166 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
10167
10168 if (m_is_system_region(env, address)) {
10169 /* System space is always execute never */
10170 xn = 1;
10171 }
f6bda88f
PC
10172
10173 if (is_user) { /* User mode AP bit decoding */
10174 switch (ap) {
10175 case 0:
10176 case 1:
10177 case 5:
10178 break; /* no access */
10179 case 3:
10180 *prot |= PAGE_WRITE;
10181 /* fall through */
10182 case 2:
10183 case 6:
10184 *prot |= PAGE_READ | PAGE_EXEC;
10185 break;
8638f1ad
PM
10186 case 7:
10187 /* for v7M, same as 6; for R profile a reserved value */
10188 if (arm_feature(env, ARM_FEATURE_M)) {
10189 *prot |= PAGE_READ | PAGE_EXEC;
10190 break;
10191 }
10192 /* fall through */
f6bda88f
PC
10193 default:
10194 qemu_log_mask(LOG_GUEST_ERROR,
c9f9f124
MD
10195 "DRACR[%d]: Bad value for AP bits: 0x%"
10196 PRIx32 "\n", n, ap);
f6bda88f
PC
10197 }
10198 } else { /* Priv. mode AP bits decoding */
10199 switch (ap) {
10200 case 0:
10201 break; /* no access */
10202 case 1:
10203 case 2:
10204 case 3:
10205 *prot |= PAGE_WRITE;
10206 /* fall through */
10207 case 5:
10208 case 6:
10209 *prot |= PAGE_READ | PAGE_EXEC;
10210 break;
8638f1ad
PM
10211 case 7:
10212 /* for v7M, same as 6; for R profile a reserved value */
10213 if (arm_feature(env, ARM_FEATURE_M)) {
10214 *prot |= PAGE_READ | PAGE_EXEC;
10215 break;
10216 }
10217 /* fall through */
f6bda88f
PC
10218 default:
10219 qemu_log_mask(LOG_GUEST_ERROR,
c9f9f124
MD
10220 "DRACR[%d]: Bad value for AP bits: 0x%"
10221 PRIx32 "\n", n, ap);
f6bda88f
PC
10222 }
10223 }
10224
10225 /* execute never */
bf446a11 10226 if (xn) {
f6bda88f
PC
10227 *prot &= ~PAGE_EXEC;
10228 }
10229 }
10230 }
10231
9375ad15
PM
10232 fi->type = ARMFault_Permission;
10233 fi->level = 1;
f6bda88f
PC
10234 return !(*prot & (1 << access_type));
10235}
10236
35337cc3
PM
10237static bool v8m_is_sau_exempt(CPUARMState *env,
10238 uint32_t address, MMUAccessType access_type)
10239{
10240 /* The architecture specifies that certain address ranges are
10241 * exempt from v8M SAU/IDAU checks.
10242 */
10243 return
10244 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
10245 (address >= 0xe0000000 && address <= 0xe0002fff) ||
10246 (address >= 0xe000e000 && address <= 0xe000efff) ||
10247 (address >= 0xe002e000 && address <= 0xe002efff) ||
10248 (address >= 0xe0040000 && address <= 0xe0041fff) ||
10249 (address >= 0xe00ff000 && address <= 0xe00fffff);
10250}
10251
787a7e76 10252void v8m_security_lookup(CPUARMState *env, uint32_t address,
35337cc3
PM
10253 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10254 V8M_SAttributes *sattrs)
10255{
10256 /* Look up the security attributes for this address. Compare the
10257 * pseudocode SecurityCheck() function.
10258 * We assume the caller has zero-initialized *sattrs.
10259 */
2fc0cc0e 10260 ARMCPU *cpu = env_archcpu(env);
35337cc3 10261 int r;
181962fd
PM
10262 bool idau_exempt = false, idau_ns = true, idau_nsc = true;
10263 int idau_region = IREGION_NOTVALID;
72042435
PM
10264 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
10265 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
35337cc3 10266
181962fd
PM
10267 if (cpu->idau) {
10268 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
10269 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
10270
10271 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
10272 &idau_nsc);
10273 }
35337cc3
PM
10274
10275 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
10276 /* 0xf0000000..0xffffffff is always S for insn fetches */
10277 return;
10278 }
10279
181962fd 10280 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
35337cc3
PM
10281 sattrs->ns = !regime_is_secure(env, mmu_idx);
10282 return;
10283 }
10284
181962fd
PM
10285 if (idau_region != IREGION_NOTVALID) {
10286 sattrs->irvalid = true;
10287 sattrs->iregion = idau_region;
10288 }
10289
35337cc3
PM
10290 switch (env->sau.ctrl & 3) {
10291 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
10292 break;
10293 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
10294 sattrs->ns = true;
10295 break;
10296 default: /* SAU.ENABLE == 1 */
10297 for (r = 0; r < cpu->sau_sregion; r++) {
10298 if (env->sau.rlar[r] & 1) {
10299 uint32_t base = env->sau.rbar[r] & ~0x1f;
10300 uint32_t limit = env->sau.rlar[r] | 0x1f;
10301
10302 if (base <= address && limit >= address) {
72042435
PM
10303 if (base > addr_page_base || limit < addr_page_limit) {
10304 sattrs->subpage = true;
10305 }
35337cc3
PM
10306 if (sattrs->srvalid) {
10307 /* If we hit in more than one region then we must report
10308 * as Secure, not NS-Callable, with no valid region
10309 * number info.
10310 */
10311 sattrs->ns = false;
10312 sattrs->nsc = false;
10313 sattrs->sregion = 0;
10314 sattrs->srvalid = false;
10315 break;
10316 } else {
10317 if (env->sau.rlar[r] & 2) {
10318 sattrs->nsc = true;
10319 } else {
10320 sattrs->ns = true;
10321 }
10322 sattrs->srvalid = true;
10323 sattrs->sregion = r;
10324 }
9d2b5a58
PM
10325 } else {
10326 /*
10327 * Address not in this region. We must check whether the
10328 * region covers addresses in the same page as our address.
10329 * In that case we must not report a size that covers the
10330 * whole page for a subsequent hit against a different MPU
10331 * region or the background region, because it would result
10332 * in incorrect TLB hits for subsequent accesses to
10333 * addresses that are in this MPU region.
10334 */
10335 if (limit >= base &&
10336 ranges_overlap(base, limit - base + 1,
10337 addr_page_base,
10338 TARGET_PAGE_SIZE)) {
10339 sattrs->subpage = true;
10340 }
35337cc3
PM
10341 }
10342 }
10343 }
7e3f1223
TR
10344 break;
10345 }
35337cc3 10346
7e3f1223
TR
10347 /*
10348 * The IDAU will override the SAU lookup results if it specifies
10349 * higher security than the SAU does.
10350 */
10351 if (!idau_ns) {
10352 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
10353 sattrs->ns = false;
10354 sattrs->nsc = idau_nsc;
181962fd 10355 }
35337cc3
PM
10356 }
10357}
10358
787a7e76 10359bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
54317c0f
PM
10360 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10361 hwaddr *phys_ptr, MemTxAttrs *txattrs,
72042435
PM
10362 int *prot, bool *is_subpage,
10363 ARMMMUFaultInfo *fi, uint32_t *mregion)
54317c0f
PM
10364{
10365 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
10366 * that a full phys-to-virt translation does).
10367 * mregion is (if not NULL) set to the region number which matched,
10368 * or -1 if no region number is returned (MPU off, address did not
10369 * hit a region, address hit in multiple regions).
72042435
PM
10370 * We set is_subpage to true if the region hit doesn't cover the
10371 * entire TARGET_PAGE the address is within.
54317c0f 10372 */
2fc0cc0e 10373 ARMCPU *cpu = env_archcpu(env);
504e3cc3 10374 bool is_user = regime_is_user(env, mmu_idx);
62c58ee0 10375 uint32_t secure = regime_is_secure(env, mmu_idx);
504e3cc3
PM
10376 int n;
10377 int matchregion = -1;
10378 bool hit = false;
72042435
PM
10379 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
10380 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
504e3cc3 10381
72042435 10382 *is_subpage = false;
504e3cc3
PM
10383 *phys_ptr = address;
10384 *prot = 0;
54317c0f
PM
10385 if (mregion) {
10386 *mregion = -1;
35337cc3
PM
10387 }
10388
504e3cc3
PM
10389 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
10390 * was an exception vector read from the vector table (which is always
10391 * done using the default system address map), because those accesses
10392 * are done in arm_v7m_load_vector(), which always does a direct
10393 * read using address_space_ldl(), rather than going via this function.
10394 */
10395 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
10396 hit = true;
10397 } else if (m_is_ppb_region(env, address)) {
10398 hit = true;
504e3cc3 10399 } else {
cff21316
PM
10400 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
10401 hit = true;
10402 }
10403
504e3cc3
PM
10404 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
10405 /* region search */
10406 /* Note that the base address is bits [31:5] from the register
10407 * with bits [4:0] all zeroes, but the limit address is bits
10408 * [31:5] from the register with bits [4:0] all ones.
10409 */
62c58ee0
PM
10410 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
10411 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
504e3cc3 10412
62c58ee0 10413 if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
504e3cc3
PM
10414 /* Region disabled */
10415 continue;
10416 }
10417
10418 if (address < base || address > limit) {
9d2b5a58
PM
10419 /*
10420 * Address not in this region. We must check whether the
10421 * region covers addresses in the same page as our address.
10422 * In that case we must not report a size that covers the
10423 * whole page for a subsequent hit against a different MPU
10424 * region or the background region, because it would result in
10425 * incorrect TLB hits for subsequent accesses to addresses that
10426 * are in this MPU region.
10427 */
10428 if (limit >= base &&
10429 ranges_overlap(base, limit - base + 1,
10430 addr_page_base,
10431 TARGET_PAGE_SIZE)) {
10432 *is_subpage = true;
10433 }
504e3cc3
PM
10434 continue;
10435 }
10436
72042435
PM
10437 if (base > addr_page_base || limit < addr_page_limit) {
10438 *is_subpage = true;
10439 }
10440
cff21316 10441 if (matchregion != -1) {
504e3cc3
PM
10442 /* Multiple regions match -- always a failure (unlike
10443 * PMSAv7 where highest-numbered-region wins)
10444 */
3f551b5b
PM
10445 fi->type = ARMFault_Permission;
10446 fi->level = 1;
504e3cc3
PM
10447 return true;
10448 }
10449
10450 matchregion = n;
10451 hit = true;
504e3cc3
PM
10452 }
10453 }
10454
10455 if (!hit) {
10456 /* background fault */
3f551b5b 10457 fi->type = ARMFault_Background;
504e3cc3
PM
10458 return true;
10459 }
10460
10461 if (matchregion == -1) {
10462 /* hit using the background region */
10463 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10464 } else {
62c58ee0
PM
10465 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
10466 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
504e3cc3
PM
10467
10468 if (m_is_system_region(env, address)) {
10469 /* System space is always execute never */
10470 xn = 1;
10471 }
10472
10473 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
10474 if (*prot && !xn) {
10475 *prot |= PAGE_EXEC;
10476 }
10477 /* We don't need to look the attribute up in the MAIR0/MAIR1
10478 * registers because that only tells us about cacheability.
10479 */
54317c0f
PM
10480 if (mregion) {
10481 *mregion = matchregion;
10482 }
504e3cc3
PM
10483 }
10484
3f551b5b
PM
10485 fi->type = ARMFault_Permission;
10486 fi->level = 1;
504e3cc3
PM
10487 return !(*prot & (1 << access_type));
10488}
10489
54317c0f
PM
10490
10491static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
10492 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10493 hwaddr *phys_ptr, MemTxAttrs *txattrs,
72042435
PM
10494 int *prot, target_ulong *page_size,
10495 ARMMMUFaultInfo *fi)
54317c0f
PM
10496{
10497 uint32_t secure = regime_is_secure(env, mmu_idx);
10498 V8M_SAttributes sattrs = {};
72042435
PM
10499 bool ret;
10500 bool mpu_is_subpage;
54317c0f
PM
10501
10502 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10503 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
10504 if (access_type == MMU_INST_FETCH) {
10505 /* Instruction fetches always use the MMU bank and the
10506 * transaction attribute determined by the fetch address,
10507 * regardless of CPU state. This is painful for QEMU
10508 * to handle, because it would mean we need to encode
10509 * into the mmu_idx not just the (user, negpri) information
10510 * for the current security state but also that for the
10511 * other security state, which would balloon the number
10512 * of mmu_idx values needed alarmingly.
10513 * Fortunately we can avoid this because it's not actually
10514 * possible to arbitrarily execute code from memory with
10515 * the wrong security attribute: it will always generate
10516 * an exception of some kind or another, apart from the
10517 * special case of an NS CPU executing an SG instruction
10518 * in S&NSC memory. So we always just fail the translation
10519 * here and sort things out in the exception handler
10520 * (including possibly emulating an SG instruction).
10521 */
10522 if (sattrs.ns != !secure) {
3f551b5b
PM
10523 if (sattrs.nsc) {
10524 fi->type = ARMFault_QEMU_NSCExec;
10525 } else {
10526 fi->type = ARMFault_QEMU_SFault;
10527 }
72042435 10528 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
54317c0f
PM
10529 *phys_ptr = address;
10530 *prot = 0;
10531 return true;
10532 }
10533 } else {
10534 /* For data accesses we always use the MMU bank indicated
10535 * by the current CPU state, but the security attributes
10536 * might downgrade a secure access to nonsecure.
10537 */
10538 if (sattrs.ns) {
10539 txattrs->secure = false;
10540 } else if (!secure) {
10541 /* NS access to S memory must fault.
10542 * Architecturally we should first check whether the
10543 * MPU information for this address indicates that we
10544 * are doing an unaligned access to Device memory, which
10545 * should generate a UsageFault instead. QEMU does not
10546 * currently check for that kind of unaligned access though.
10547 * If we added it we would need to do so as a special case
10548 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
10549 */
3f551b5b 10550 fi->type = ARMFault_QEMU_SFault;
72042435 10551 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
54317c0f
PM
10552 *phys_ptr = address;
10553 *prot = 0;
10554 return true;
10555 }
10556 }
10557 }
10558
72042435
PM
10559 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
10560 txattrs, prot, &mpu_is_subpage, fi, NULL);
72042435
PM
10561 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
10562 return ret;
54317c0f
PM
10563}
10564
13689d43 10565static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
03ae85f8 10566 MMUAccessType access_type, ARMMMUIdx mmu_idx,
53a4e5c5
PM
10567 hwaddr *phys_ptr, int *prot,
10568 ARMMMUFaultInfo *fi)
9ee6e8bb
PB
10569{
10570 int n;
10571 uint32_t mask;
10572 uint32_t base;
0480f69a 10573 bool is_user = regime_is_user(env, mmu_idx);
9ee6e8bb 10574
3279adb9
PM
10575 if (regime_translation_disabled(env, mmu_idx)) {
10576 /* MPU disabled. */
10577 *phys_ptr = address;
10578 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10579 return false;
10580 }
10581
9ee6e8bb
PB
10582 *phys_ptr = address;
10583 for (n = 7; n >= 0; n--) {
554b0b09 10584 base = env->cp15.c6_region[n];
87c3d486 10585 if ((base & 1) == 0) {
554b0b09 10586 continue;
87c3d486 10587 }
554b0b09
PM
10588 mask = 1 << ((base >> 1) & 0x1f);
10589 /* Keep this shift separate from the above to avoid an
10590 (undefined) << 32. */
10591 mask = (mask << 1) - 1;
87c3d486 10592 if (((base ^ address) & ~mask) == 0) {
554b0b09 10593 break;
87c3d486 10594 }
9ee6e8bb 10595 }
87c3d486 10596 if (n < 0) {
53a4e5c5 10597 fi->type = ARMFault_Background;
b7cc4e82 10598 return true;
87c3d486 10599 }
9ee6e8bb 10600
03ae85f8 10601 if (access_type == MMU_INST_FETCH) {
7e09797c 10602 mask = env->cp15.pmsav5_insn_ap;
9ee6e8bb 10603 } else {
7e09797c 10604 mask = env->cp15.pmsav5_data_ap;
9ee6e8bb
PB
10605 }
10606 mask = (mask >> (n * 4)) & 0xf;
10607 switch (mask) {
10608 case 0:
53a4e5c5
PM
10609 fi->type = ARMFault_Permission;
10610 fi->level = 1;
b7cc4e82 10611 return true;
9ee6e8bb 10612 case 1:
87c3d486 10613 if (is_user) {
53a4e5c5
PM
10614 fi->type = ARMFault_Permission;
10615 fi->level = 1;
b7cc4e82 10616 return true;
87c3d486 10617 }
554b0b09
PM
10618 *prot = PAGE_READ | PAGE_WRITE;
10619 break;
9ee6e8bb 10620 case 2:
554b0b09 10621 *prot = PAGE_READ;
87c3d486 10622 if (!is_user) {
554b0b09 10623 *prot |= PAGE_WRITE;
87c3d486 10624 }
554b0b09 10625 break;
9ee6e8bb 10626 case 3:
554b0b09
PM
10627 *prot = PAGE_READ | PAGE_WRITE;
10628 break;
9ee6e8bb 10629 case 5:
87c3d486 10630 if (is_user) {
53a4e5c5
PM
10631 fi->type = ARMFault_Permission;
10632 fi->level = 1;
b7cc4e82 10633 return true;
87c3d486 10634 }
554b0b09
PM
10635 *prot = PAGE_READ;
10636 break;
9ee6e8bb 10637 case 6:
554b0b09
PM
10638 *prot = PAGE_READ;
10639 break;
9ee6e8bb 10640 default:
554b0b09 10641 /* Bad permission. */
53a4e5c5
PM
10642 fi->type = ARMFault_Permission;
10643 fi->level = 1;
b7cc4e82 10644 return true;
9ee6e8bb 10645 }
3ad493fc 10646 *prot |= PAGE_EXEC;
b7cc4e82 10647 return false;
9ee6e8bb
PB
10648}
10649
5b2d261d
AB
10650/* Combine either inner or outer cacheability attributes for normal
10651 * memory, according to table D4-42 and pseudocode procedure
10652 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
10653 *
10654 * NB: only stage 1 includes allocation hints (RW bits), leading to
10655 * some asymmetry.
10656 */
10657static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
10658{
10659 if (s1 == 4 || s2 == 4) {
10660 /* non-cacheable has precedence */
10661 return 4;
10662 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
10663 /* stage 1 write-through takes precedence */
10664 return s1;
10665 } else if (extract32(s2, 2, 2) == 2) {
10666 /* stage 2 write-through takes precedence, but the allocation hint
10667 * is still taken from stage 1
10668 */
10669 return (2 << 2) | extract32(s1, 0, 2);
10670 } else { /* write-back */
10671 return s1;
10672 }
10673}
10674
10675/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
10676 * and CombineS1S2Desc()
10677 *
10678 * @s1: Attributes from stage 1 walk
10679 * @s2: Attributes from stage 2 walk
10680 */
10681static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
10682{
10683 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
10684 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
10685 ARMCacheAttrs ret;
10686
10687 /* Combine shareability attributes (table D4-43) */
10688 if (s1.shareability == 2 || s2.shareability == 2) {
10689 /* if either are outer-shareable, the result is outer-shareable */
10690 ret.shareability = 2;
10691 } else if (s1.shareability == 3 || s2.shareability == 3) {
10692 /* if either are inner-shareable, the result is inner-shareable */
10693 ret.shareability = 3;
10694 } else {
10695 /* both non-shareable */
10696 ret.shareability = 0;
10697 }
10698
10699 /* Combine memory type and cacheability attributes */
10700 if (s1hi == 0 || s2hi == 0) {
10701 /* Device has precedence over normal */
10702 if (s1lo == 0 || s2lo == 0) {
10703 /* nGnRnE has precedence over anything */
10704 ret.attrs = 0;
10705 } else if (s1lo == 4 || s2lo == 4) {
10706 /* non-Reordering has precedence over Reordering */
10707 ret.attrs = 4; /* nGnRE */
10708 } else if (s1lo == 8 || s2lo == 8) {
10709 /* non-Gathering has precedence over Gathering */
10710 ret.attrs = 8; /* nGRE */
10711 } else {
10712 ret.attrs = 0xc; /* GRE */
10713 }
10714
10715 /* Any location for which the resultant memory type is any
10716 * type of Device memory is always treated as Outer Shareable.
10717 */
10718 ret.shareability = 2;
10719 } else { /* Normal memory */
10720 /* Outer/inner cacheability combine independently */
10721 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
10722 | combine_cacheattr_nibble(s1lo, s2lo);
10723
10724 if (ret.attrs == 0x44) {
10725 /* Any location for which the resultant memory type is Normal
10726 * Inner Non-cacheable, Outer Non-cacheable is always treated
10727 * as Outer Shareable.
10728 */
10729 ret.shareability = 2;
10730 }
10731 }
10732
10733 return ret;
10734}
10735
10736
702a9357
PM
10737/* get_phys_addr - get the physical address for this virtual address
10738 *
10739 * Find the physical address corresponding to the given virtual address,
10740 * by doing a translation table walk on MMU based systems or using the
10741 * MPU state on MPU based systems.
10742 *
b7cc4e82
PC
10743 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10744 * prot and page_size may not be filled in, and the populated fsr value provides
702a9357
PM
10745 * information on why the translation aborted, in the format of a
10746 * DFSR/IFSR fault register, with the following caveats:
10747 * * we honour the short vs long DFSR format differences.
10748 * * the WnR bit is never set (the caller must do this).
f6bda88f 10749 * * for PSMAv5 based systems we don't bother to return a full FSR format
702a9357
PM
10750 * value.
10751 *
10752 * @env: CPUARMState
10753 * @address: virtual address to get physical address for
10754 * @access_type: 0 for read, 1 for write, 2 for execute
d3649702 10755 * @mmu_idx: MMU index indicating required translation regime
702a9357 10756 * @phys_ptr: set to the physical address corresponding to the virtual address
8bf5b6a9 10757 * @attrs: set to the memory transaction attributes to use
702a9357
PM
10758 * @prot: set to the permissions for the page containing phys_ptr
10759 * @page_size: set to the size of the page containing phys_ptr
5b2d261d
AB
10760 * @fi: set to fault info if the translation fails
10761 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
702a9357 10762 */
ebae861f
PMD
10763bool get_phys_addr(CPUARMState *env, target_ulong address,
10764 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10765 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10766 target_ulong *page_size,
10767 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
9ee6e8bb 10768{
0480f69a 10769 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
9b539263
EI
10770 /* Call ourselves recursively to do the stage 1 and then stage 2
10771 * translations.
0480f69a 10772 */
9b539263
EI
10773 if (arm_feature(env, ARM_FEATURE_EL2)) {
10774 hwaddr ipa;
10775 int s2_prot;
10776 int ret;
5b2d261d 10777 ARMCacheAttrs cacheattrs2 = {};
9b539263
EI
10778
10779 ret = get_phys_addr(env, address, access_type,
8bd5c820 10780 stage_1_mmu_idx(mmu_idx), &ipa, attrs,
bc52bfeb 10781 prot, page_size, fi, cacheattrs);
9b539263
EI
10782
10783 /* If S1 fails or S2 is disabled, return early. */
10784 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
10785 *phys_ptr = ipa;
10786 return ret;
10787 }
10788
10789 /* S1 is done. Now do S2 translation. */
10790 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
10791 phys_ptr, attrs, &s2_prot,
da909b2c 10792 page_size, fi,
5b2d261d 10793 cacheattrs != NULL ? &cacheattrs2 : NULL);
9b539263
EI
10794 fi->s2addr = ipa;
10795 /* Combine the S1 and S2 perms. */
10796 *prot &= s2_prot;
5b2d261d
AB
10797
10798 /* Combine the S1 and S2 cache attributes, if needed */
10799 if (!ret && cacheattrs != NULL) {
9d1bab33
PM
10800 if (env->cp15.hcr_el2 & HCR_DC) {
10801 /*
10802 * HCR.DC forces the first stage attributes to
10803 * Normal Non-Shareable,
10804 * Inner Write-Back Read-Allocate Write-Allocate,
10805 * Outer Write-Back Read-Allocate Write-Allocate.
10806 */
10807 cacheattrs->attrs = 0xff;
10808 cacheattrs->shareability = 0;
10809 }
5b2d261d
AB
10810 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
10811 }
10812
9b539263
EI
10813 return ret;
10814 } else {
10815 /*
10816 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
10817 */
8bd5c820 10818 mmu_idx = stage_1_mmu_idx(mmu_idx);
9b539263 10819 }
0480f69a 10820 }
d3649702 10821
8bf5b6a9
PM
10822 /* The page table entries may downgrade secure to non-secure, but
10823 * cannot upgrade an non-secure translation regime's attributes
10824 * to secure.
10825 */
10826 attrs->secure = regime_is_secure(env, mmu_idx);
0995bf8c 10827 attrs->user = regime_is_user(env, mmu_idx);
8bf5b6a9 10828
0480f69a
PM
10829 /* Fast Context Switch Extension. This doesn't exist at all in v8.
10830 * In v7 and earlier it affects all stage 1 translations.
10831 */
10832 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
10833 && !arm_feature(env, ARM_FEATURE_V8)) {
10834 if (regime_el(env, mmu_idx) == 3) {
10835 address += env->cp15.fcseidr_s;
10836 } else {
10837 address += env->cp15.fcseidr_ns;
10838 }
54bf36ed 10839 }
9ee6e8bb 10840
3279adb9 10841 if (arm_feature(env, ARM_FEATURE_PMSA)) {
c9f9f124 10842 bool ret;
f6bda88f 10843 *page_size = TARGET_PAGE_SIZE;
3279adb9 10844
504e3cc3
PM
10845 if (arm_feature(env, ARM_FEATURE_V8)) {
10846 /* PMSAv8 */
10847 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
72042435 10848 phys_ptr, attrs, prot, page_size, fi);
504e3cc3 10849 } else if (arm_feature(env, ARM_FEATURE_V7)) {
3279adb9
PM
10850 /* PMSAv7 */
10851 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
e5e40999 10852 phys_ptr, prot, page_size, fi);
3279adb9
PM
10853 } else {
10854 /* Pre-v7 MPU */
10855 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
53a4e5c5 10856 phys_ptr, prot, fi);
3279adb9
PM
10857 }
10858 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
c9f9f124 10859 " mmu_idx %u -> %s (prot %c%c%c)\n",
709e4407
PM
10860 access_type == MMU_DATA_LOAD ? "reading" :
10861 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
c9f9f124
MD
10862 (uint32_t)address, mmu_idx,
10863 ret ? "Miss" : "Hit",
10864 *prot & PAGE_READ ? 'r' : '-',
10865 *prot & PAGE_WRITE ? 'w' : '-',
10866 *prot & PAGE_EXEC ? 'x' : '-');
10867
10868 return ret;
f6bda88f
PC
10869 }
10870
3279adb9
PM
10871 /* Definitely a real MMU, not an MPU */
10872
0480f69a 10873 if (regime_translation_disabled(env, mmu_idx)) {
3279adb9 10874 /* MMU disabled. */
9ee6e8bb 10875 *phys_ptr = address;
3ad493fc 10876 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
d4c430a8 10877 *page_size = TARGET_PAGE_SIZE;
9ee6e8bb 10878 return 0;
0480f69a
PM
10879 }
10880
0480f69a 10881 if (regime_using_lpae_format(env, mmu_idx)) {
bc52bfeb
PM
10882 return get_phys_addr_lpae(env, address, access_type, mmu_idx,
10883 phys_ptr, attrs, prot, page_size,
10884 fi, cacheattrs);
0480f69a 10885 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
bc52bfeb
PM
10886 return get_phys_addr_v6(env, address, access_type, mmu_idx,
10887 phys_ptr, attrs, prot, page_size, fi);
9ee6e8bb 10888 } else {
bc52bfeb 10889 return get_phys_addr_v5(env, address, access_type, mmu_idx,
f989983e 10890 phys_ptr, prot, page_size, fi);
9ee6e8bb
PB
10891 }
10892}
10893
0faea0c7
PM
10894hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
10895 MemTxAttrs *attrs)
b5ff1b31 10896{
00b941e5 10897 ARMCPU *cpu = ARM_CPU(cs);
d3649702 10898 CPUARMState *env = &cpu->env;
a8170e5e 10899 hwaddr phys_addr;
d4c430a8 10900 target_ulong page_size;
b5ff1b31 10901 int prot;
b7cc4e82 10902 bool ret;
e14b5a23 10903 ARMMMUFaultInfo fi = {};
50494a27 10904 ARMMMUIdx mmu_idx = arm_mmu_idx(env);
b5ff1b31 10905
0faea0c7
PM
10906 *attrs = (MemTxAttrs) {};
10907
8bd5c820 10908 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
bc52bfeb 10909 attrs, &prot, &page_size, &fi, NULL);
b5ff1b31 10910
b7cc4e82 10911 if (ret) {
b5ff1b31 10912 return -1;
00b941e5 10913 }
b5ff1b31
FB
10914 return phys_addr;
10915}
10916
b5ff1b31 10917#endif
6ddbc6e4
PB
10918
10919/* Note that signed overflow is undefined in C. The following routines are
10920 careful to use unsigned types where modulo arithmetic is required.
10921 Failure to do so _will_ break on newer gcc. */
10922
10923/* Signed saturating arithmetic. */
10924
1654b2d6 10925/* Perform 16-bit signed saturating addition. */
6ddbc6e4
PB
10926static inline uint16_t add16_sat(uint16_t a, uint16_t b)
10927{
10928 uint16_t res;
10929
10930 res = a + b;
10931 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
10932 if (a & 0x8000)
10933 res = 0x8000;
10934 else
10935 res = 0x7fff;
10936 }
10937 return res;
10938}
10939
1654b2d6 10940/* Perform 8-bit signed saturating addition. */
6ddbc6e4
PB
10941static inline uint8_t add8_sat(uint8_t a, uint8_t b)
10942{
10943 uint8_t res;
10944
10945 res = a + b;
10946 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
10947 if (a & 0x80)
10948 res = 0x80;
10949 else
10950 res = 0x7f;
10951 }
10952 return res;
10953}
10954
1654b2d6 10955/* Perform 16-bit signed saturating subtraction. */
6ddbc6e4
PB
10956static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
10957{
10958 uint16_t res;
10959
10960 res = a - b;
10961 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
10962 if (a & 0x8000)
10963 res = 0x8000;
10964 else
10965 res = 0x7fff;
10966 }
10967 return res;
10968}
10969
1654b2d6 10970/* Perform 8-bit signed saturating subtraction. */
6ddbc6e4
PB
10971static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
10972{
10973 uint8_t res;
10974
10975 res = a - b;
10976 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
10977 if (a & 0x80)
10978 res = 0x80;
10979 else
10980 res = 0x7f;
10981 }
10982 return res;
10983}
10984
10985#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
10986#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
10987#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
10988#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
10989#define PFX q
10990
10991#include "op_addsub.h"
10992
10993/* Unsigned saturating arithmetic. */
460a09c1 10994static inline uint16_t add16_usat(uint16_t a, uint16_t b)
6ddbc6e4
PB
10995{
10996 uint16_t res;
10997 res = a + b;
10998 if (res < a)
10999 res = 0xffff;
11000 return res;
11001}
11002
460a09c1 11003static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
6ddbc6e4 11004{
4c4fd3f8 11005 if (a > b)
6ddbc6e4
PB
11006 return a - b;
11007 else
11008 return 0;
11009}
11010
11011static inline uint8_t add8_usat(uint8_t a, uint8_t b)
11012{
11013 uint8_t res;
11014 res = a + b;
11015 if (res < a)
11016 res = 0xff;
11017 return res;
11018}
11019
11020static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
11021{
4c4fd3f8 11022 if (a > b)
6ddbc6e4
PB
11023 return a - b;
11024 else
11025 return 0;
11026}
11027
11028#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11029#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11030#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
11031#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
11032#define PFX uq
11033
11034#include "op_addsub.h"
11035
11036/* Signed modulo arithmetic. */
11037#define SARITH16(a, b, n, op) do { \
11038 int32_t sum; \
db6e2e65 11039 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
6ddbc6e4
PB
11040 RESULT(sum, n, 16); \
11041 if (sum >= 0) \
11042 ge |= 3 << (n * 2); \
11043 } while(0)
11044
11045#define SARITH8(a, b, n, op) do { \
11046 int32_t sum; \
db6e2e65 11047 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
6ddbc6e4
PB
11048 RESULT(sum, n, 8); \
11049 if (sum >= 0) \
11050 ge |= 1 << n; \
11051 } while(0)
11052
11053
11054#define ADD16(a, b, n) SARITH16(a, b, n, +)
11055#define SUB16(a, b, n) SARITH16(a, b, n, -)
11056#define ADD8(a, b, n) SARITH8(a, b, n, +)
11057#define SUB8(a, b, n) SARITH8(a, b, n, -)
11058#define PFX s
11059#define ARITH_GE
11060
11061#include "op_addsub.h"
11062
11063/* Unsigned modulo arithmetic. */
11064#define ADD16(a, b, n) do { \
11065 uint32_t sum; \
11066 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11067 RESULT(sum, n, 16); \
a87aa10b 11068 if ((sum >> 16) == 1) \
6ddbc6e4
PB
11069 ge |= 3 << (n * 2); \
11070 } while(0)
11071
11072#define ADD8(a, b, n) do { \
11073 uint32_t sum; \
11074 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11075 RESULT(sum, n, 8); \
a87aa10b
AZ
11076 if ((sum >> 8) == 1) \
11077 ge |= 1 << n; \
6ddbc6e4
PB
11078 } while(0)
11079
11080#define SUB16(a, b, n) do { \
11081 uint32_t sum; \
11082 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11083 RESULT(sum, n, 16); \
11084 if ((sum >> 16) == 0) \
11085 ge |= 3 << (n * 2); \
11086 } while(0)
11087
11088#define SUB8(a, b, n) do { \
11089 uint32_t sum; \
11090 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11091 RESULT(sum, n, 8); \
11092 if ((sum >> 8) == 0) \
a87aa10b 11093 ge |= 1 << n; \
6ddbc6e4
PB
11094 } while(0)
11095
11096#define PFX u
11097#define ARITH_GE
11098
11099#include "op_addsub.h"
11100
11101/* Halved signed arithmetic. */
11102#define ADD16(a, b, n) \
11103 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11104#define SUB16(a, b, n) \
11105 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11106#define ADD8(a, b, n) \
11107 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11108#define SUB8(a, b, n) \
11109 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11110#define PFX sh
11111
11112#include "op_addsub.h"
11113
11114/* Halved unsigned arithmetic. */
11115#define ADD16(a, b, n) \
11116 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11117#define SUB16(a, b, n) \
11118 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11119#define ADD8(a, b, n) \
11120 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11121#define SUB8(a, b, n) \
11122 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11123#define PFX uh
11124
11125#include "op_addsub.h"
11126
11127static inline uint8_t do_usad(uint8_t a, uint8_t b)
11128{
11129 if (a > b)
11130 return a - b;
11131 else
11132 return b - a;
11133}
11134
11135/* Unsigned sum of absolute byte differences. */
11136uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
11137{
11138 uint32_t sum;
11139 sum = do_usad(a, b);
11140 sum += do_usad(a >> 8, b >> 8);
11141 sum += do_usad(a >> 16, b >>16);
11142 sum += do_usad(a >> 24, b >> 24);
11143 return sum;
11144}
11145
11146/* For ARMv6 SEL instruction. */
11147uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
11148{
11149 uint32_t mask;
11150
11151 mask = 0;
11152 if (flags & 1)
11153 mask |= 0xff;
11154 if (flags & 2)
11155 mask |= 0xff00;
11156 if (flags & 4)
11157 mask |= 0xff0000;
11158 if (flags & 8)
11159 mask |= 0xff000000;
11160 return (a & mask) | (b & ~mask);
11161}
11162
aa633469
PM
11163/* CRC helpers.
11164 * The upper bytes of val (above the number specified by 'bytes') must have
11165 * been zeroed out by the caller.
11166 */
eb0ecd5a
WN
11167uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
11168{
11169 uint8_t buf[4];
11170
aa633469 11171 stl_le_p(buf, val);
eb0ecd5a
WN
11172
11173 /* zlib crc32 converts the accumulator and output to one's complement. */
11174 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
11175}
11176
11177uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
11178{
11179 uint8_t buf[4];
11180
aa633469 11181 stl_le_p(buf, val);
eb0ecd5a
WN
11182
11183 /* Linux crc32c converts the output to one's complement. */
11184 return crc32c(acc, buf, bytes) ^ 0xffffffff;
11185}
a9e01311
RH
11186
11187/* Return the exception level to which FP-disabled exceptions should
11188 * be taken, or 0 if FP is enabled.
11189 */
ced31551 11190int fp_exception_el(CPUARMState *env, int cur_el)
a9e01311 11191{
55faa212 11192#ifndef CONFIG_USER_ONLY
a9e01311 11193 int fpen;
a9e01311
RH
11194
11195 /* CPACR and the CPTR registers don't exist before v6, so FP is
11196 * always accessible
11197 */
11198 if (!arm_feature(env, ARM_FEATURE_V6)) {
11199 return 0;
11200 }
11201
d87513c0
PM
11202 if (arm_feature(env, ARM_FEATURE_M)) {
11203 /* CPACR can cause a NOCP UsageFault taken to current security state */
11204 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
11205 return 1;
11206 }
11207
11208 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
11209 if (!extract32(env->v7m.nsacr, 10, 1)) {
11210 /* FP insns cause a NOCP UsageFault taken to Secure */
11211 return 3;
11212 }
11213 }
11214
11215 return 0;
11216 }
11217
a9e01311
RH
11218 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
11219 * 0, 2 : trap EL0 and EL1/PL1 accesses
11220 * 1 : trap only EL0 accesses
11221 * 3 : trap no accesses
11222 */
11223 fpen = extract32(env->cp15.cpacr_el1, 20, 2);
11224 switch (fpen) {
11225 case 0:
11226 case 2:
11227 if (cur_el == 0 || cur_el == 1) {
11228 /* Trap to PL1, which might be EL1 or EL3 */
11229 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
11230 return 3;
11231 }
11232 return 1;
11233 }
11234 if (cur_el == 3 && !is_a64(env)) {
11235 /* Secure PL1 running at EL3 */
11236 return 3;
11237 }
11238 break;
11239 case 1:
11240 if (cur_el == 0) {
11241 return 1;
11242 }
11243 break;
11244 case 3:
11245 break;
11246 }
11247
fc1120a7
PM
11248 /*
11249 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
11250 * to control non-secure access to the FPU. It doesn't have any
11251 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
11252 */
11253 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
11254 cur_el <= 2 && !arm_is_secure_below_el3(env))) {
11255 if (!extract32(env->cp15.nsacr, 10, 1)) {
11256 /* FP insns act as UNDEF */
11257 return cur_el == 2 ? 2 : 1;
11258 }
11259 }
11260
a9e01311
RH
11261 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
11262 * check because zero bits in the registers mean "don't trap".
11263 */
11264
11265 /* CPTR_EL2 : present in v7VE or v8 */
11266 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
11267 && !arm_is_secure_below_el3(env)) {
11268 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
11269 return 2;
11270 }
11271
11272 /* CPTR_EL3 : present in v8 */
11273 if (extract32(env->cp15.cptr_el[3], 10, 1)) {
11274 /* Trap all FP ops to EL3 */
11275 return 3;
11276 }
55faa212 11277#endif
a9e01311
RH
11278 return 0;
11279}
11280
7aab5a8c 11281#ifndef CONFIG_TCG
65e4655c
RH
11282ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
11283{
7aab5a8c 11284 g_assert_not_reached();
65e4655c 11285}
7aab5a8c 11286#endif
65e4655c 11287
164690b2 11288ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
65e4655c 11289{
65e4655c 11290 if (arm_feature(env, ARM_FEATURE_M)) {
50494a27 11291 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
65e4655c
RH
11292 }
11293
11294 if (el < 2 && arm_is_secure_below_el3(env)) {
50494a27
RH
11295 return ARMMMUIdx_S1SE0 + el;
11296 } else {
11297 return ARMMMUIdx_S12NSE0 + el;
65e4655c 11298 }
50494a27
RH
11299}
11300
164690b2
RH
11301ARMMMUIdx arm_mmu_idx(CPUARMState *env)
11302{
11303 return arm_mmu_idx_el(env, arm_current_el(env));
11304}
11305
50494a27
RH
11306int cpu_mmu_index(CPUARMState *env, bool ifetch)
11307{
11308 return arm_to_core_mmu_idx(arm_mmu_idx(env));
65e4655c
RH
11309}
11310
64be86ab
RH
11311#ifndef CONFIG_USER_ONLY
11312ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
11313{
11314 return stage_1_mmu_idx(arm_mmu_idx(env));
11315}
11316#endif
11317
fdd1b228
RH
11318static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
11319 ARMMMUIdx mmu_idx, uint32_t flags)
11320{
11321 flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
11322 flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
11323 arm_to_core_mmu_idx(mmu_idx));
11324
fdd1b228
RH
11325 if (arm_singlestep_active(env)) {
11326 flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
11327 }
11328 return flags;
11329}
11330
43eccfb6
RH
11331static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
11332 ARMMMUIdx mmu_idx, uint32_t flags)
11333{
8061a649
RH
11334 bool sctlr_b = arm_sctlr_b(env);
11335
11336 if (sctlr_b) {
11337 flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
11338 }
11339 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
11340 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
11341 }
43eccfb6
RH
11342 flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
11343
11344 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
11345}
11346
6e33ced5
RH
11347static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
11348 ARMMMUIdx mmu_idx)
11349{
11350 uint32_t flags = 0;
11351
0a54d68e
RH
11352 /* v8M always enables the fpu. */
11353 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
11354
6e33ced5
RH
11355 if (arm_v7m_is_handler_mode(env)) {
11356 flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
11357 }
11358
11359 /*
11360 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
11361 * is suppressing them because the requested execution priority
11362 * is less than 0.
11363 */
11364 if (arm_feature(env, ARM_FEATURE_V8) &&
11365 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
11366 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
11367 flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
11368 }
11369
11370 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
11371}
11372
83f4baef
RH
11373static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
11374{
11375 int flags = 0;
11376
11377 flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
11378 arm_debug_target_el(env));
11379 return flags;
11380}
11381
c747224c
RH
11382static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
11383 ARMMMUIdx mmu_idx)
11384{
83f4baef 11385 uint32_t flags = rebuild_hflags_aprofile(env);
0a54d68e
RH
11386
11387 if (arm_el_is_aa64(env, 1)) {
11388 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
11389 }
5bb0a20b
MZ
11390
11391 if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
11392 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
11393 flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1);
11394 }
11395
83f4baef 11396 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
c747224c
RH
11397}
11398
d4d7503a
RH
11399static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
11400 ARMMMUIdx mmu_idx)
a9e01311 11401{
83f4baef 11402 uint32_t flags = rebuild_hflags_aprofile(env);
d4d7503a
RH
11403 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
11404 ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
d4d7503a
RH
11405 uint64_t sctlr;
11406 int tbii, tbid;
b9adaa70 11407
d4d7503a 11408 flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
cd208a1c 11409
d4d7503a
RH
11410 /* FIXME: ARMv8.1-VHE S2 translation regime. */
11411 if (regime_el(env, stage1) < 2) {
11412 ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
11413 tbid = (p1.tbi << 1) | p0.tbi;
11414 tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
11415 } else {
11416 tbid = p0.tbi;
11417 tbii = tbid & !p0.tbid;
11418 }
5d8634f5 11419
d4d7503a
RH
11420 flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
11421 flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
11422
11423 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
11424 int sve_el = sve_exception_el(env, el);
11425 uint32_t zcr_len;
5d8634f5 11426
d4d7503a
RH
11427 /*
11428 * If SVE is disabled, but FP is enabled,
11429 * then the effective len is 0.
11430 */
11431 if (sve_el != 0 && fp_el == 0) {
11432 zcr_len = 0;
11433 } else {
11434 zcr_len = sve_zcr_len_for_el(env, el);
5d8634f5 11435 }
d4d7503a
RH
11436 flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
11437 flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
11438 }
1db5e96c 11439
d4d7503a 11440 sctlr = arm_sctlr(env, el);
1db5e96c 11441
8061a649
RH
11442 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
11443 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
11444 }
11445
d4d7503a
RH
11446 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
11447 /*
11448 * In order to save space in flags, we record only whether
11449 * pauth is "inactive", meaning all insns are implemented as
11450 * a nop, or "active" when some action must be performed.
11451 * The decision of which action to take is left to a helper.
11452 */
11453 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
11454 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
1db5e96c 11455 }
d4d7503a 11456 }
0816ef1b 11457
d4d7503a
RH
11458 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
11459 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
11460 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
11461 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
0816ef1b 11462 }
d4d7503a 11463 }
08f1434a 11464
d4d7503a
RH
11465 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
11466}
11467
3d74e2e9
RH
11468static uint32_t rebuild_hflags_internal(CPUARMState *env)
11469{
11470 int el = arm_current_el(env);
11471 int fp_el = fp_exception_el(env, el);
164690b2 11472 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
3d74e2e9
RH
11473
11474 if (is_a64(env)) {
11475 return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
11476 } else if (arm_feature(env, ARM_FEATURE_M)) {
11477 return rebuild_hflags_m32(env, fp_el, mmu_idx);
11478 } else {
11479 return rebuild_hflags_a32(env, fp_el, mmu_idx);
11480 }
11481}
11482
11483void arm_rebuild_hflags(CPUARMState *env)
11484{
11485 env->hflags = rebuild_hflags_internal(env);
11486}
11487
14f3c588
RH
11488void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
11489{
11490 int fp_el = fp_exception_el(env, el);
11491 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11492
11493 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
11494}
11495
f80741d1
AB
11496/*
11497 * If we have triggered a EL state change we can't rely on the
11498 * translator having passed it too us, we need to recompute.
11499 */
11500void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
11501{
11502 int el = arm_current_el(env);
11503 int fp_el = fp_exception_el(env, el);
11504 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11505 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
11506}
11507
14f3c588
RH
11508void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
11509{
11510 int fp_el = fp_exception_el(env, el);
11511 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11512
11513 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
11514}
11515
11516void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
11517{
11518 int fp_el = fp_exception_el(env, el);
11519 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
11520
11521 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
11522}
11523
0ee8b24a
PMD
11524static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
11525{
11526#ifdef CONFIG_DEBUG_TCG
11527 uint32_t env_flags_current = env->hflags;
11528 uint32_t env_flags_rebuilt = rebuild_hflags_internal(env);
11529
11530 if (unlikely(env_flags_current != env_flags_rebuilt)) {
11531 fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
11532 env_flags_current, env_flags_rebuilt);
11533 abort();
11534 }
11535#endif
11536}
11537
d4d7503a
RH
11538void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
11539 target_ulong *cs_base, uint32_t *pflags)
11540{
e979972a
RH
11541 uint32_t flags = env->hflags;
11542 uint32_t pstate_for_ss;
d4d7503a 11543
9b253fe5 11544 *cs_base = 0;
0ee8b24a 11545 assert_hflags_rebuild_correctly(env);
3d74e2e9 11546
e979972a 11547 if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) {
d4d7503a 11548 *pc = env->pc;
d4d7503a 11549 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
08f1434a
RH
11550 flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
11551 }
60e12c37 11552 pstate_for_ss = env->pstate;
a9e01311
RH
11553 } else {
11554 *pc = env->regs[15];
6e33ced5
RH
11555
11556 if (arm_feature(env, ARM_FEATURE_M)) {
9550d1bd
RH
11557 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11558 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
11559 != env->v7m.secure) {
11560 flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
11561 }
11562
11563 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
11564 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
11565 (env->v7m.secure &&
11566 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
11567 /*
11568 * ASPEN is set, but FPCA/SFPA indicate that there is no
11569 * active FP context; we must create a new FP context before
11570 * executing any FP insn.
11571 */
11572 flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
11573 }
11574
11575 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
11576 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
11577 flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
11578 }
6e33ced5 11579 } else {
bbad7c62
RH
11580 /*
11581 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
11582 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
11583 */
11584 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11585 flags = FIELD_DP32(flags, TBFLAG_A32,
11586 XSCALE_CPAR, env->cp15.c15_cpar);
11587 } else {
11588 flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN,
11589 env->vfp.vec_len);
11590 flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
11591 env->vfp.vec_stride);
11592 }
0a54d68e
RH
11593 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
11594 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
11595 }
6e33ced5
RH
11596 }
11597
aad821ac 11598 flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
aad821ac 11599 flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
60e12c37 11600 pstate_for_ss = env->uncached_cpsr;
d4d7503a 11601 }
a9e01311 11602
60e12c37
RH
11603 /*
11604 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
a9e01311
RH
11605 * states defined in the ARM ARM for software singlestep:
11606 * SS_ACTIVE PSTATE.SS State
11607 * 0 x Inactive (the TB flag for SS is always 0)
11608 * 1 0 Active-pending
11609 * 1 1 Active-not-pending
fdd1b228 11610 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
a9e01311 11611 */
60e12c37
RH
11612 if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
11613 (pstate_for_ss & PSTATE_SS)) {
11614 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
a9e01311 11615 }
a9e01311 11616
b9adaa70 11617 *pflags = flags;
a9e01311 11618}
0ab5953b
RH
11619
11620#ifdef TARGET_AARCH64
11621/*
11622 * The manual says that when SVE is enabled and VQ is widened the
11623 * implementation is allowed to zero the previously inaccessible
11624 * portion of the registers. The corollary to that is that when
11625 * SVE is enabled and VQ is narrowed we are also allowed to zero
11626 * the now inaccessible portion of the registers.
11627 *
11628 * The intent of this is that no predicate bit beyond VQ is ever set.
11629 * Which means that some operations on predicate registers themselves
11630 * may operate on full uint64_t or even unrolled across the maximum
11631 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
11632 * may well be cheaper than conditionals to restrict the operation
11633 * to the relevant portion of a uint16_t[16].
11634 */
11635void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
11636{
11637 int i, j;
11638 uint64_t pmask;
11639
11640 assert(vq >= 1 && vq <= ARM_MAX_VQ);
2fc0cc0e 11641 assert(vq <= env_archcpu(env)->sve_max_vq);
0ab5953b
RH
11642
11643 /* Zap the high bits of the zregs. */
11644 for (i = 0; i < 32; i++) {
11645 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
11646 }
11647
11648 /* Zap the high bits of the pregs and ffr. */
11649 pmask = 0;
11650 if (vq & 3) {
11651 pmask = ~(-1ULL << (16 * (vq & 3)));
11652 }
11653 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
11654 for (i = 0; i < 17; ++i) {
11655 env->vfp.pregs[i].p[j] &= pmask;
11656 }
11657 pmask = 0;
11658 }
11659}
11660
11661/*
11662 * Notice a change in SVE vector size when changing EL.
11663 */
9a05f7b6
RH
11664void aarch64_sve_change_el(CPUARMState *env, int old_el,
11665 int new_el, bool el0_a64)
0ab5953b 11666{
2fc0cc0e 11667 ARMCPU *cpu = env_archcpu(env);
0ab5953b 11668 int old_len, new_len;
9a05f7b6 11669 bool old_a64, new_a64;
0ab5953b
RH
11670
11671 /* Nothing to do if no SVE. */
cd208a1c 11672 if (!cpu_isar_feature(aa64_sve, cpu)) {
0ab5953b
RH
11673 return;
11674 }
11675
11676 /* Nothing to do if FP is disabled in either EL. */
11677 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
11678 return;
11679 }
11680
11681 /*
11682 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
11683 * at ELx, or not available because the EL is in AArch32 state, then
11684 * for all purposes other than a direct read, the ZCR_ELx.LEN field
11685 * has an effective value of 0".
11686 *
11687 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
11688 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
11689 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
11690 * we already have the correct register contents when encountering the
11691 * vq0->vq0 transition between EL0->EL1.
11692 */
9a05f7b6
RH
11693 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
11694 old_len = (old_a64 && !sve_exception_el(env, old_el)
0ab5953b 11695 ? sve_zcr_len_for_el(env, old_el) : 0);
9a05f7b6
RH
11696 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
11697 new_len = (new_a64 && !sve_exception_el(env, new_el)
0ab5953b
RH
11698 ? sve_zcr_len_for_el(env, new_el) : 0);
11699
11700 /* When changing vector length, clear inaccessible state. */
11701 if (new_len < old_len) {
11702 aarch64_sve_narrow_vq(env, new_len + 1);
11703 }
11704}
11705#endif