]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/helper.c
target-arm: Add a feature flag for EL3
[mirror_qemu.git] / target-arm / helper.c
CommitLineData
b5ff1b31 1#include "cpu.h"
ccd38087 2#include "internals.h"
022c62cb 3#include "exec/gdbstub.h"
7b59220e 4#include "helper.h"
1de7afc9 5#include "qemu/host-utils.h"
78027bb6 6#include "sysemu/arch_init.h"
9c17d615 7#include "sysemu/sysemu.h"
1de7afc9 8#include "qemu/bitops.h"
eb0ecd5a
WN
9#include "qemu/crc32c.h"
10#include <zlib.h> /* For crc32 */
0b03bdfc 11
4a501606 12#ifndef CONFIG_USER_ONLY
aca3f40b
PM
13#include "exec/softmmu_exec.h"
14
2c8dd318 15static inline int get_phys_addr(CPUARMState *env, target_ulong address,
4a501606 16 int access_type, int is_user,
a8170e5e 17 hwaddr *phys_ptr, int *prot,
4a501606 18 target_ulong *page_size);
7c2cb42b
AF
19
20/* Definitions for the PMCCNTR and PMCR registers */
21#define PMCRD 0x8
22#define PMCRC 0x4
23#define PMCRE 0x1
4a501606
PM
24#endif
25
0ecb72a5 26static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
56aebc89
PB
27{
28 int nregs;
29
30 /* VFP data registers are always little-endian. */
31 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
32 if (reg < nregs) {
33 stfq_le_p(buf, env->vfp.regs[reg]);
34 return 8;
35 }
36 if (arm_feature(env, ARM_FEATURE_NEON)) {
37 /* Aliases for Q regs. */
38 nregs += 16;
39 if (reg < nregs) {
40 stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
41 stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
42 return 16;
43 }
44 }
45 switch (reg - nregs) {
46 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
47 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
48 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
49 }
50 return 0;
51}
52
0ecb72a5 53static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
56aebc89
PB
54{
55 int nregs;
56
57 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
58 if (reg < nregs) {
59 env->vfp.regs[reg] = ldfq_le_p(buf);
60 return 8;
61 }
62 if (arm_feature(env, ARM_FEATURE_NEON)) {
63 nregs += 16;
64 if (reg < nregs) {
65 env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
66 env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
67 return 16;
68 }
69 }
70 switch (reg - nregs) {
71 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
72 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
71b3c3de 73 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
56aebc89
PB
74 }
75 return 0;
76}
77
6a669427
PM
78static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
79{
80 switch (reg) {
81 case 0 ... 31:
82 /* 128 bit FP register */
83 stfq_le_p(buf, env->vfp.regs[reg * 2]);
84 stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
85 return 16;
86 case 32:
87 /* FPSR */
88 stl_p(buf, vfp_get_fpsr(env));
89 return 4;
90 case 33:
91 /* FPCR */
92 stl_p(buf, vfp_get_fpcr(env));
93 return 4;
94 default:
95 return 0;
96 }
97}
98
99static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
100{
101 switch (reg) {
102 case 0 ... 31:
103 /* 128 bit FP register */
104 env->vfp.regs[reg * 2] = ldfq_le_p(buf);
105 env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
106 return 16;
107 case 32:
108 /* FPSR */
109 vfp_set_fpsr(env, ldl_p(buf));
110 return 4;
111 case 33:
112 /* FPCR */
113 vfp_set_fpcr(env, ldl_p(buf));
114 return 4;
115 default:
116 return 0;
117 }
118}
119
c4241c7d 120static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
d4e6df63 121{
67ed771d 122 if (cpreg_field_is_64bit(ri)) {
c4241c7d 123 return CPREG_FIELD64(env, ri);
22d9e1a9 124 } else {
c4241c7d 125 return CPREG_FIELD32(env, ri);
22d9e1a9 126 }
d4e6df63
PM
127}
128
c4241c7d
PM
129static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
130 uint64_t value)
d4e6df63 131{
67ed771d 132 if (cpreg_field_is_64bit(ri)) {
22d9e1a9
PM
133 CPREG_FIELD64(env, ri) = value;
134 } else {
135 CPREG_FIELD32(env, ri) = value;
136 }
d4e6df63
PM
137}
138
59a1c327 139static uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
721fae12 140{
59a1c327 141 /* Raw read of a coprocessor register (as needed for migration, etc). */
721fae12 142 if (ri->type & ARM_CP_CONST) {
59a1c327 143 return ri->resetvalue;
721fae12 144 } else if (ri->raw_readfn) {
59a1c327 145 return ri->raw_readfn(env, ri);
721fae12 146 } else if (ri->readfn) {
59a1c327 147 return ri->readfn(env, ri);
721fae12 148 } else {
59a1c327 149 return raw_read(env, ri);
721fae12 150 }
721fae12
PM
151}
152
59a1c327 153static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
7900e9f1 154 uint64_t v)
721fae12
PM
155{
156 /* Raw write of a coprocessor register (as needed for migration, etc).
721fae12
PM
157 * Note that constant registers are treated as write-ignored; the
158 * caller should check for success by whether a readback gives the
159 * value written.
160 */
161 if (ri->type & ARM_CP_CONST) {
59a1c327 162 return;
721fae12 163 } else if (ri->raw_writefn) {
c4241c7d 164 ri->raw_writefn(env, ri, v);
721fae12 165 } else if (ri->writefn) {
c4241c7d 166 ri->writefn(env, ri, v);
721fae12 167 } else {
afb2530f 168 raw_write(env, ri, v);
721fae12 169 }
721fae12
PM
170}
171
172bool write_cpustate_to_list(ARMCPU *cpu)
173{
174 /* Write the coprocessor state from cpu->env to the (index,value) list. */
175 int i;
176 bool ok = true;
177
178 for (i = 0; i < cpu->cpreg_array_len; i++) {
179 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
180 const ARMCPRegInfo *ri;
59a1c327 181
60322b39 182 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12
PM
183 if (!ri) {
184 ok = false;
185 continue;
186 }
187 if (ri->type & ARM_CP_NO_MIGRATE) {
188 continue;
189 }
59a1c327 190 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
721fae12
PM
191 }
192 return ok;
193}
194
195bool write_list_to_cpustate(ARMCPU *cpu)
196{
197 int i;
198 bool ok = true;
199
200 for (i = 0; i < cpu->cpreg_array_len; i++) {
201 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
202 uint64_t v = cpu->cpreg_values[i];
721fae12
PM
203 const ARMCPRegInfo *ri;
204
60322b39 205 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12
PM
206 if (!ri) {
207 ok = false;
208 continue;
209 }
210 if (ri->type & ARM_CP_NO_MIGRATE) {
211 continue;
212 }
213 /* Write value and confirm it reads back as written
214 * (to catch read-only registers and partially read-only
215 * registers where the incoming migration value doesn't match)
216 */
59a1c327
PM
217 write_raw_cp_reg(&cpu->env, ri, v);
218 if (read_raw_cp_reg(&cpu->env, ri) != v) {
721fae12
PM
219 ok = false;
220 }
221 }
222 return ok;
223}
224
225static void add_cpreg_to_list(gpointer key, gpointer opaque)
226{
227 ARMCPU *cpu = opaque;
228 uint64_t regidx;
229 const ARMCPRegInfo *ri;
230
231 regidx = *(uint32_t *)key;
60322b39 232 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12
PM
233
234 if (!(ri->type & ARM_CP_NO_MIGRATE)) {
235 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
236 /* The value array need not be initialized at this point */
237 cpu->cpreg_array_len++;
238 }
239}
240
241static void count_cpreg(gpointer key, gpointer opaque)
242{
243 ARMCPU *cpu = opaque;
244 uint64_t regidx;
245 const ARMCPRegInfo *ri;
246
247 regidx = *(uint32_t *)key;
60322b39 248 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
721fae12
PM
249
250 if (!(ri->type & ARM_CP_NO_MIGRATE)) {
251 cpu->cpreg_array_len++;
252 }
253}
254
255static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
256{
cbf239b7
AR
257 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
258 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
721fae12 259
cbf239b7
AR
260 if (aidx > bidx) {
261 return 1;
262 }
263 if (aidx < bidx) {
264 return -1;
265 }
266 return 0;
721fae12
PM
267}
268
82a3a118
PM
269static void cpreg_make_keylist(gpointer key, gpointer value, gpointer udata)
270{
271 GList **plist = udata;
272
273 *plist = g_list_prepend(*plist, key);
274}
275
721fae12
PM
276void init_cpreg_list(ARMCPU *cpu)
277{
278 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
279 * Note that we require cpreg_tuples[] to be sorted by key ID.
280 */
82a3a118 281 GList *keys = NULL;
721fae12
PM
282 int arraylen;
283
82a3a118
PM
284 g_hash_table_foreach(cpu->cp_regs, cpreg_make_keylist, &keys);
285
721fae12
PM
286 keys = g_list_sort(keys, cpreg_key_compare);
287
288 cpu->cpreg_array_len = 0;
289
290 g_list_foreach(keys, count_cpreg, cpu);
291
292 arraylen = cpu->cpreg_array_len;
293 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
294 cpu->cpreg_values = g_new(uint64_t, arraylen);
295 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
296 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
297 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
298 cpu->cpreg_array_len = 0;
299
300 g_list_foreach(keys, add_cpreg_to_list, cpu);
301
302 assert(cpu->cpreg_array_len == arraylen);
303
304 g_list_free(keys);
305}
306
014406b5
PM
307/* Return true if extended addresses are enabled.
308 * This is always the case if our translation regime is 64 bit,
309 * but depends on TTBCR.EAE for 32 bit.
310 */
311static inline bool extended_addresses_enabled(CPUARMState *env)
312{
313 return arm_el_is_aa64(env, 1)
314 || ((arm_feature(env, ARM_FEATURE_LPAE)
315 && (env->cp15.c2_control & (1U << 31))));
316}
317
c4241c7d 318static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
c983fe6c 319{
00c8cb0a
AF
320 ARMCPU *cpu = arm_env_get_cpu(env);
321
c983fe6c 322 env->cp15.c3 = value;
00c8cb0a 323 tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
c983fe6c
PM
324}
325
c4241c7d 326static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
08de207b 327{
00c8cb0a
AF
328 ARMCPU *cpu = arm_env_get_cpu(env);
329
08de207b
PM
330 if (env->cp15.c13_fcse != value) {
331 /* Unlike real hardware the qemu TLB uses virtual addresses,
332 * not modified virtual addresses, so this causes a TLB flush.
333 */
00c8cb0a 334 tlb_flush(CPU(cpu), 1);
08de207b
PM
335 env->cp15.c13_fcse = value;
336 }
08de207b 337}
c4241c7d
PM
338
339static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
340 uint64_t value)
08de207b 341{
00c8cb0a
AF
342 ARMCPU *cpu = arm_env_get_cpu(env);
343
014406b5
PM
344 if (env->cp15.contextidr_el1 != value && !arm_feature(env, ARM_FEATURE_MPU)
345 && !extended_addresses_enabled(env)) {
08de207b
PM
346 /* For VMSA (when not using the LPAE long descriptor page table
347 * format) this register includes the ASID, so do a TLB flush.
348 * For PMSA it is purely a process ID and no action is needed.
349 */
00c8cb0a 350 tlb_flush(CPU(cpu), 1);
08de207b 351 }
014406b5 352 env->cp15.contextidr_el1 = value;
08de207b
PM
353}
354
c4241c7d
PM
355static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
356 uint64_t value)
d929823f
PM
357{
358 /* Invalidate all (TLBIALL) */
00c8cb0a
AF
359 ARMCPU *cpu = arm_env_get_cpu(env);
360
361 tlb_flush(CPU(cpu), 1);
d929823f
PM
362}
363
c4241c7d
PM
364static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
365 uint64_t value)
d929823f
PM
366{
367 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
31b030d4
AF
368 ARMCPU *cpu = arm_env_get_cpu(env);
369
370 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
d929823f
PM
371}
372
c4241c7d
PM
373static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
374 uint64_t value)
d929823f
PM
375{
376 /* Invalidate by ASID (TLBIASID) */
00c8cb0a
AF
377 ARMCPU *cpu = arm_env_get_cpu(env);
378
379 tlb_flush(CPU(cpu), value == 0);
d929823f
PM
380}
381
c4241c7d
PM
382static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
383 uint64_t value)
d929823f
PM
384{
385 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
31b030d4
AF
386 ARMCPU *cpu = arm_env_get_cpu(env);
387
388 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
d929823f
PM
389}
390
e9aa6c21
PM
391static const ARMCPRegInfo cp_reginfo[] = {
392 /* DBGDIDR: just RAZ. In particular this means the "debug architecture
393 * version" bits will read as a reserved value, which should cause
394 * Linux to not try to use the debug hardware.
395 */
396 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
397 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
08de207b
PM
398 { .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0,
399 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
d4e6df63 400 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
014406b5
PM
401 { .name = "CONTEXTIDR", .state = ARM_CP_STATE_BOTH,
402 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
403 .access = PL1_RW,
404 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el1),
d4e6df63 405 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
9449fdf6
PM
406 REGINFO_SENTINEL
407};
408
409static const ARMCPRegInfo not_v8_cp_reginfo[] = {
410 /* NB: Some of these registers exist in v8 but with more precise
411 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
412 */
413 /* MMU Domain access control / MPU write buffer control */
414 { .name = "DACR", .cp = 15,
415 .crn = 3, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
416 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
417 .resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
4fdd17dd
PM
418 /* ??? This covers not just the impdef TLB lockdown registers but also
419 * some v7VMSA registers relating to TEX remap, so it is overly broad.
420 */
421 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = CP_ANY,
422 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
d929823f
PM
423 /* MMU TLB control. Note that the wildcarding means we cover not just
424 * the unified TLB ops but also the dside/iside/inner-shareable variants.
425 */
426 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
d4e6df63
PM
427 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
428 .type = ARM_CP_NO_MIGRATE },
d929823f 429 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
d4e6df63
PM
430 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
431 .type = ARM_CP_NO_MIGRATE },
d929823f 432 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
d4e6df63
PM
433 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
434 .type = ARM_CP_NO_MIGRATE },
d929823f 435 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
d4e6df63
PM
436 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
437 .type = ARM_CP_NO_MIGRATE },
c4804214
PM
438 /* Cache maintenance ops; some of this space may be overridden later. */
439 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
440 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
441 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
e9aa6c21
PM
442 REGINFO_SENTINEL
443};
444
7d57f408
PM
445static const ARMCPRegInfo not_v6_cp_reginfo[] = {
446 /* Not all pre-v6 cores implemented this WFI, so this is slightly
447 * over-broad.
448 */
449 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
450 .access = PL1_W, .type = ARM_CP_WFI },
451 REGINFO_SENTINEL
452};
453
454static const ARMCPRegInfo not_v7_cp_reginfo[] = {
455 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
456 * is UNPREDICTABLE; we choose to NOP as most implementations do).
457 */
458 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
459 .access = PL1_W, .type = ARM_CP_WFI },
34f90529
PM
460 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
461 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
462 * OMAPCP will override this space.
463 */
464 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
465 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
466 .resetvalue = 0 },
467 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
468 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
469 .resetvalue = 0 },
776d4e5c
PM
470 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
471 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
d4e6df63
PM
472 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
473 .resetvalue = 0 },
7d57f408
PM
474 REGINFO_SENTINEL
475};
476
c4241c7d
PM
477static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
478 uint64_t value)
2771db27 479{
f0aff255
FA
480 uint32_t mask = 0;
481
482 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
483 if (!arm_feature(env, ARM_FEATURE_V8)) {
484 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
485 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
486 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
487 */
488 if (arm_feature(env, ARM_FEATURE_VFP)) {
489 /* VFP coprocessor: cp10 & cp11 [23:20] */
490 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
491
492 if (!arm_feature(env, ARM_FEATURE_NEON)) {
493 /* ASEDIS [31] bit is RAO/WI */
494 value |= (1 << 31);
495 }
496
497 /* VFPv3 and upwards with NEON implement 32 double precision
498 * registers (D0-D31).
499 */
500 if (!arm_feature(env, ARM_FEATURE_NEON) ||
501 !arm_feature(env, ARM_FEATURE_VFP3)) {
502 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
503 value |= (1 << 30);
504 }
505 }
506 value &= mask;
2771db27 507 }
f0aff255 508 env->cp15.c1_coproc = value;
2771db27
PM
509}
510
7d57f408
PM
511static const ARMCPRegInfo v6_cp_reginfo[] = {
512 /* prefetch by MVA in v6, NOP in v7 */
513 { .name = "MVA_prefetch",
514 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
515 .access = PL1_W, .type = ARM_CP_NOP },
516 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
517 .access = PL0_W, .type = ARM_CP_NOP },
091fd17c 518 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
7d57f408 519 .access = PL0_W, .type = ARM_CP_NOP },
091fd17c 520 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
7d57f408 521 .access = PL0_W, .type = ARM_CP_NOP },
06d76f31 522 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
6cd8a264
RH
523 .access = PL1_RW,
524 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el1),
06d76f31
PM
525 .resetvalue = 0, },
526 /* Watchpoint Fault Address Register : should actually only be present
527 * for 1136, 1176, 11MPCore.
528 */
529 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
530 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
34222fb8
PM
531 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
532 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2,
2771db27
PM
533 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_coproc),
534 .resetvalue = 0, .writefn = cpacr_write },
7d57f408
PM
535 REGINFO_SENTINEL
536};
537
fcd25206 538static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
200ac0ef 539{
3b163b01 540 /* Performance monitor registers user accessibility is controlled
fcd25206 541 * by PMUSERENR.
200ac0ef
PM
542 */
543 if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
fcd25206 544 return CP_ACCESS_TRAP;
200ac0ef 545 }
fcd25206 546 return CP_ACCESS_OK;
200ac0ef
PM
547}
548
7c2cb42b 549#ifndef CONFIG_USER_ONLY
c4241c7d
PM
550static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
551 uint64_t value)
200ac0ef 552{
7c2cb42b
AF
553 /* Don't computer the number of ticks in user mode */
554 uint32_t temp_ticks;
555
556 temp_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
557 get_ticks_per_sec() / 1000000;
558
559 if (env->cp15.c9_pmcr & PMCRE) {
560 /* If the counter is enabled */
561 if (env->cp15.c9_pmcr & PMCRD) {
562 /* Increment once every 64 processor clock cycles */
563 env->cp15.c15_ccnt = (temp_ticks/64) - env->cp15.c15_ccnt;
564 } else {
565 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
566 }
567 }
568
569 if (value & PMCRC) {
570 /* The counter has been reset */
571 env->cp15.c15_ccnt = 0;
572 }
573
200ac0ef
PM
574 /* only the DP, X, D and E bits are writable */
575 env->cp15.c9_pmcr &= ~0x39;
576 env->cp15.c9_pmcr |= (value & 0x39);
7c2cb42b
AF
577
578 if (env->cp15.c9_pmcr & PMCRE) {
579 if (env->cp15.c9_pmcr & PMCRD) {
580 /* Increment once every 64 processor clock cycles */
581 temp_ticks /= 64;
582 }
583 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
584 }
585}
586
587static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
588{
589 uint32_t total_ticks;
590
591 if (!(env->cp15.c9_pmcr & PMCRE)) {
592 /* Counter is disabled, do not change value */
593 return env->cp15.c15_ccnt;
594 }
595
596 total_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
597 get_ticks_per_sec() / 1000000;
598
599 if (env->cp15.c9_pmcr & PMCRD) {
600 /* Increment once every 64 processor clock cycles */
601 total_ticks /= 64;
602 }
603 return total_ticks - env->cp15.c15_ccnt;
604}
605
606static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
607 uint64_t value)
608{
609 uint32_t total_ticks;
610
611 if (!(env->cp15.c9_pmcr & PMCRE)) {
612 /* Counter is disabled, set the absolute value */
613 env->cp15.c15_ccnt = value;
614 return;
615 }
616
617 total_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
618 get_ticks_per_sec() / 1000000;
619
620 if (env->cp15.c9_pmcr & PMCRD) {
621 /* Increment once every 64 processor clock cycles */
622 total_ticks /= 64;
623 }
624 env->cp15.c15_ccnt = total_ticks - value;
200ac0ef 625}
7c2cb42b 626#endif
200ac0ef 627
c4241c7d 628static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
200ac0ef
PM
629 uint64_t value)
630{
200ac0ef
PM
631 value &= (1 << 31);
632 env->cp15.c9_pmcnten |= value;
200ac0ef
PM
633}
634
c4241c7d
PM
635static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
636 uint64_t value)
200ac0ef 637{
200ac0ef
PM
638 value &= (1 << 31);
639 env->cp15.c9_pmcnten &= ~value;
200ac0ef
PM
640}
641
c4241c7d
PM
642static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
643 uint64_t value)
200ac0ef 644{
200ac0ef 645 env->cp15.c9_pmovsr &= ~value;
200ac0ef
PM
646}
647
c4241c7d
PM
648static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
649 uint64_t value)
200ac0ef 650{
200ac0ef 651 env->cp15.c9_pmxevtyper = value & 0xff;
200ac0ef
PM
652}
653
c4241c7d 654static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
200ac0ef
PM
655 uint64_t value)
656{
657 env->cp15.c9_pmuserenr = value & 1;
200ac0ef
PM
658}
659
c4241c7d
PM
660static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
661 uint64_t value)
200ac0ef
PM
662{
663 /* We have no event counters so only the C bit can be changed */
664 value &= (1 << 31);
665 env->cp15.c9_pminten |= value;
200ac0ef
PM
666}
667
c4241c7d
PM
668static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
669 uint64_t value)
200ac0ef
PM
670{
671 value &= (1 << 31);
672 env->cp15.c9_pminten &= ~value;
200ac0ef
PM
673}
674
c4241c7d
PM
675static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
676 uint64_t value)
8641136c 677{
a505d7fe
PM
678 /* Note that even though the AArch64 view of this register has bits
679 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
680 * architectural requirements for bits which are RES0 only in some
681 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
682 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
683 */
68fdb6c5 684 env->cp15.vbar_el[1] = value & ~0x1FULL;
8641136c
NR
685}
686
c4241c7d 687static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
776d4e5c
PM
688{
689 ARMCPU *cpu = arm_env_get_cpu(env);
c4241c7d 690 return cpu->ccsidr[env->cp15.c0_cssel];
776d4e5c
PM
691}
692
c4241c7d
PM
693static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
694 uint64_t value)
776d4e5c
PM
695{
696 env->cp15.c0_cssel = value & 0xf;
776d4e5c
PM
697}
698
1090b9c6
PM
699static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
700{
701 CPUState *cs = ENV_GET_CPU(env);
702 uint64_t ret = 0;
703
704 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
705 ret |= CPSR_I;
706 }
707 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
708 ret |= CPSR_F;
709 }
710 /* External aborts are not possible in QEMU so A bit is always clear */
711 return ret;
712}
713
e9aa6c21
PM
714static const ARMCPRegInfo v7_cp_reginfo[] = {
715 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
716 * debug components
717 */
718 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
719 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
091fd17c 720 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
e9aa6c21 721 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7d57f408
PM
722 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
723 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
724 .access = PL1_W, .type = ARM_CP_NOP },
200ac0ef
PM
725 /* Performance monitors are implementation defined in v7,
726 * but with an ARM recommended set of registers, which we
727 * follow (although we don't actually implement any counters)
728 *
729 * Performance registers fall into three categories:
730 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
731 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
732 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
733 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
734 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
735 */
736 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
737 .access = PL0_RW, .resetvalue = 0,
738 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
fcd25206
PM
739 .writefn = pmcntenset_write,
740 .accessfn = pmreg_access,
741 .raw_writefn = raw_write },
200ac0ef
PM
742 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
743 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
fcd25206
PM
744 .accessfn = pmreg_access,
745 .writefn = pmcntenclr_write,
d4e6df63 746 .type = ARM_CP_NO_MIGRATE },
200ac0ef
PM
747 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
748 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
fcd25206
PM
749 .accessfn = pmreg_access,
750 .writefn = pmovsr_write,
751 .raw_writefn = raw_write },
752 /* Unimplemented so WI. */
200ac0ef 753 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
fcd25206 754 .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
200ac0ef 755 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
fcd25206 756 * We choose to RAZ/WI.
200ac0ef
PM
757 */
758 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
fcd25206
PM
759 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
760 .accessfn = pmreg_access },
7c2cb42b 761#ifndef CONFIG_USER_ONLY
200ac0ef 762 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
7c2cb42b
AF
763 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
764 .readfn = pmccntr_read, .writefn = pmccntr_write,
fcd25206 765 .accessfn = pmreg_access },
7c2cb42b 766#endif
200ac0ef
PM
767 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
768 .access = PL0_RW,
769 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
fcd25206
PM
770 .accessfn = pmreg_access, .writefn = pmxevtyper_write,
771 .raw_writefn = raw_write },
772 /* Unimplemented, RAZ/WI. */
200ac0ef 773 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
fcd25206
PM
774 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
775 .accessfn = pmreg_access },
200ac0ef
PM
776 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
777 .access = PL0_R | PL1_RW,
778 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
779 .resetvalue = 0,
d4e6df63 780 .writefn = pmuserenr_write, .raw_writefn = raw_write },
200ac0ef
PM
781 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
782 .access = PL1_RW,
783 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
784 .resetvalue = 0,
d4e6df63 785 .writefn = pmintenset_write, .raw_writefn = raw_write },
200ac0ef 786 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
d4e6df63 787 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
200ac0ef 788 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
d4e6df63 789 .resetvalue = 0, .writefn = pmintenclr_write, },
a505d7fe
PM
790 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
791 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
8641136c 792 .access = PL1_RW, .writefn = vbar_write,
68fdb6c5 793 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[1]),
8641136c 794 .resetvalue = 0 },
2771db27
PM
795 { .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
796 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr),
797 .resetvalue = 0, },
7da845b0
PM
798 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
799 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
d4e6df63 800 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_MIGRATE },
7da845b0
PM
801 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
802 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
776d4e5c
PM
803 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c0_cssel),
804 .writefn = csselr_write, .resetvalue = 0 },
805 /* Auxiliary ID register: this actually has an IMPDEF value but for now
806 * just RAZ for all cores:
807 */
0ff644a7
PM
808 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
809 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
776d4e5c 810 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
f32cdad5
PM
811 /* Auxiliary fault status registers: these also are IMPDEF, and we
812 * choose to RAZ/WI for all cores.
813 */
814 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
815 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
816 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
817 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
818 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
819 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
b0fe2427
PM
820 /* MAIR can just read-as-written because we don't implement caches
821 * and so don't need to care about memory attributes.
822 */
823 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
824 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
825 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el1),
826 .resetvalue = 0 },
827 /* For non-long-descriptor page tables these are PRRR and NMRR;
828 * regardless they still act as reads-as-written for QEMU.
829 * The override is necessary because of the overly-broad TLB_LOCKDOWN
830 * definition.
831 */
832 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, .type = ARM_CP_OVERRIDE,
833 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
834 .fieldoffset = offsetoflow32(CPUARMState, cp15.mair_el1),
835 .resetfn = arm_cp_reset_ignore },
836 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, .type = ARM_CP_OVERRIDE,
837 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
838 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el1),
839 .resetfn = arm_cp_reset_ignore },
1090b9c6
PM
840 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
841 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
842 .type = ARM_CP_NO_MIGRATE, .access = PL1_R, .readfn = isr_read },
e9aa6c21
PM
843 REGINFO_SENTINEL
844};
845
c4241c7d
PM
846static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
847 uint64_t value)
c326b979
PM
848{
849 value &= 1;
850 env->teecr = value;
c326b979
PM
851}
852
c4241c7d 853static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri)
c326b979 854{
c326b979 855 if (arm_current_pl(env) == 0 && (env->teecr & 1)) {
92611c00 856 return CP_ACCESS_TRAP;
c326b979 857 }
92611c00 858 return CP_ACCESS_OK;
c326b979
PM
859}
860
861static const ARMCPRegInfo t2ee_cp_reginfo[] = {
862 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
863 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
864 .resetvalue = 0,
865 .writefn = teecr_write },
866 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
867 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
92611c00 868 .accessfn = teehbr_access, .resetvalue = 0 },
c326b979
PM
869 REGINFO_SENTINEL
870};
871
4d31c596 872static const ARMCPRegInfo v6k_cp_reginfo[] = {
e4fe830b
PM
873 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
874 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
875 .access = PL0_RW,
876 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el0), .resetvalue = 0 },
4d31c596
PM
877 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
878 .access = PL0_RW,
e4fe830b
PM
879 .fieldoffset = offsetoflow32(CPUARMState, cp15.tpidr_el0),
880 .resetfn = arm_cp_reset_ignore },
881 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
882 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
883 .access = PL0_R|PL1_W,
884 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el0), .resetvalue = 0 },
4d31c596
PM
885 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
886 .access = PL0_R|PL1_W,
e4fe830b
PM
887 .fieldoffset = offsetoflow32(CPUARMState, cp15.tpidrro_el0),
888 .resetfn = arm_cp_reset_ignore },
889 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_BOTH,
890 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
4d31c596 891 .access = PL1_RW,
e4fe830b 892 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el1), .resetvalue = 0 },
4d31c596
PM
893 REGINFO_SENTINEL
894};
895
55d284af
PM
896#ifndef CONFIG_USER_ONLY
897
00108f2d
PM
898static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
899{
900 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
901 if (arm_current_pl(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
902 return CP_ACCESS_TRAP;
903 }
904 return CP_ACCESS_OK;
905}
906
907static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx)
908{
909 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
910 if (arm_current_pl(env) == 0 &&
911 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
912 return CP_ACCESS_TRAP;
913 }
914 return CP_ACCESS_OK;
915}
916
917static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx)
918{
919 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
920 * EL0[PV]TEN is zero.
921 */
922 if (arm_current_pl(env) == 0 &&
923 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
924 return CP_ACCESS_TRAP;
925 }
926 return CP_ACCESS_OK;
927}
928
929static CPAccessResult gt_pct_access(CPUARMState *env,
930 const ARMCPRegInfo *ri)
931{
932 return gt_counter_access(env, GTIMER_PHYS);
933}
934
935static CPAccessResult gt_vct_access(CPUARMState *env,
936 const ARMCPRegInfo *ri)
937{
938 return gt_counter_access(env, GTIMER_VIRT);
939}
940
941static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
942{
943 return gt_timer_access(env, GTIMER_PHYS);
944}
945
946static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
947{
948 return gt_timer_access(env, GTIMER_VIRT);
949}
950
55d284af
PM
951static uint64_t gt_get_countervalue(CPUARMState *env)
952{
bc72ad67 953 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
55d284af
PM
954}
955
956static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
957{
958 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
959
960 if (gt->ctl & 1) {
961 /* Timer enabled: calculate and set current ISTATUS, irq, and
962 * reset timer to when ISTATUS next has to change
963 */
964 uint64_t count = gt_get_countervalue(&cpu->env);
965 /* Note that this must be unsigned 64 bit arithmetic: */
966 int istatus = count >= gt->cval;
967 uint64_t nexttick;
968
969 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
970 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
971 (istatus && !(gt->ctl & 2)));
972 if (istatus) {
973 /* Next transition is when count rolls back over to zero */
974 nexttick = UINT64_MAX;
975 } else {
976 /* Next transition is when we hit cval */
977 nexttick = gt->cval;
978 }
979 /* Note that the desired next expiry time might be beyond the
980 * signed-64-bit range of a QEMUTimer -- in this case we just
981 * set the timer for as far in the future as possible. When the
982 * timer expires we will reset the timer for any remaining period.
983 */
984 if (nexttick > INT64_MAX / GTIMER_SCALE) {
985 nexttick = INT64_MAX / GTIMER_SCALE;
986 }
bc72ad67 987 timer_mod(cpu->gt_timer[timeridx], nexttick);
55d284af
PM
988 } else {
989 /* Timer disabled: ISTATUS and timer output always clear */
990 gt->ctl &= ~4;
991 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
bc72ad67 992 timer_del(cpu->gt_timer[timeridx]);
55d284af
PM
993 }
994}
995
55d284af
PM
996static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri)
997{
998 ARMCPU *cpu = arm_env_get_cpu(env);
999 int timeridx = ri->opc1 & 1;
1000
bc72ad67 1001 timer_del(cpu->gt_timer[timeridx]);
55d284af
PM
1002}
1003
c4241c7d 1004static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
55d284af 1005{
c4241c7d 1006 return gt_get_countervalue(env);
55d284af
PM
1007}
1008
c4241c7d
PM
1009static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1010 uint64_t value)
55d284af
PM
1011{
1012 int timeridx = ri->opc1 & 1;
1013
1014 env->cp15.c14_timer[timeridx].cval = value;
1015 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
55d284af 1016}
c4241c7d
PM
1017
1018static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
55d284af
PM
1019{
1020 int timeridx = ri->crm & 1;
1021
c4241c7d
PM
1022 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1023 gt_get_countervalue(env));
55d284af
PM
1024}
1025
c4241c7d
PM
1026static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1027 uint64_t value)
55d284af
PM
1028{
1029 int timeridx = ri->crm & 1;
1030
1031 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) +
1032 + sextract64(value, 0, 32);
1033 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
55d284af
PM
1034}
1035
c4241c7d
PM
1036static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1037 uint64_t value)
55d284af
PM
1038{
1039 ARMCPU *cpu = arm_env_get_cpu(env);
1040 int timeridx = ri->crm & 1;
1041 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1042
1043 env->cp15.c14_timer[timeridx].ctl = value & 3;
1044 if ((oldval ^ value) & 1) {
1045 /* Enable toggled */
1046 gt_recalc_timer(cpu, timeridx);
1047 } else if ((oldval & value) & 2) {
1048 /* IMASK toggled: don't need to recalculate,
1049 * just set the interrupt line based on ISTATUS
1050 */
1051 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1052 (oldval & 4) && (value & 2));
1053 }
55d284af
PM
1054}
1055
1056void arm_gt_ptimer_cb(void *opaque)
1057{
1058 ARMCPU *cpu = opaque;
1059
1060 gt_recalc_timer(cpu, GTIMER_PHYS);
1061}
1062
1063void arm_gt_vtimer_cb(void *opaque)
1064{
1065 ARMCPU *cpu = opaque;
1066
1067 gt_recalc_timer(cpu, GTIMER_VIRT);
1068}
1069
1070static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1071 /* Note that CNTFRQ is purely reads-as-written for the benefit
1072 * of software; writing it doesn't actually change the timer frequency.
1073 * Our reset value matches the fixed frequency we implement the timer at.
1074 */
1075 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
a7adc4b7
PM
1076 .type = ARM_CP_NO_MIGRATE,
1077 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1078 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1079 .resetfn = arm_cp_reset_ignore,
1080 },
1081 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1082 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1083 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
55d284af
PM
1084 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1085 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
55d284af
PM
1086 },
1087 /* overall control: mostly access permissions */
a7adc4b7
PM
1088 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1089 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
55d284af
PM
1090 .access = PL1_RW,
1091 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1092 .resetvalue = 0,
1093 },
1094 /* per-timer control */
1095 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
a7adc4b7
PM
1096 .type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
1097 .accessfn = gt_ptimer_access,
1098 .fieldoffset = offsetoflow32(CPUARMState,
1099 cp15.c14_timer[GTIMER_PHYS].ctl),
1100 .resetfn = arm_cp_reset_ignore,
1101 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1102 },
1103 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1104 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
55d284af 1105 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
a7adc4b7 1106 .accessfn = gt_ptimer_access,
55d284af
PM
1107 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1108 .resetvalue = 0,
00108f2d 1109 .writefn = gt_ctl_write, .raw_writefn = raw_write,
55d284af
PM
1110 },
1111 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
a7adc4b7
PM
1112 .type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
1113 .accessfn = gt_vtimer_access,
1114 .fieldoffset = offsetoflow32(CPUARMState,
1115 cp15.c14_timer[GTIMER_VIRT].ctl),
1116 .resetfn = arm_cp_reset_ignore,
1117 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1118 },
1119 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1120 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
55d284af 1121 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
a7adc4b7 1122 .accessfn = gt_vtimer_access,
55d284af
PM
1123 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1124 .resetvalue = 0,
00108f2d 1125 .writefn = gt_ctl_write, .raw_writefn = raw_write,
55d284af
PM
1126 },
1127 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1128 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1129 .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
00108f2d 1130 .accessfn = gt_ptimer_access,
55d284af
PM
1131 .readfn = gt_tval_read, .writefn = gt_tval_write,
1132 },
a7adc4b7
PM
1133 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1134 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1135 .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
1136 .readfn = gt_tval_read, .writefn = gt_tval_write,
1137 },
55d284af
PM
1138 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1139 .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
00108f2d 1140 .accessfn = gt_vtimer_access,
55d284af
PM
1141 .readfn = gt_tval_read, .writefn = gt_tval_write,
1142 },
a7adc4b7
PM
1143 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1144 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1145 .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
1146 .readfn = gt_tval_read, .writefn = gt_tval_write,
1147 },
55d284af
PM
1148 /* The counter itself */
1149 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1150 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
00108f2d 1151 .accessfn = gt_pct_access,
a7adc4b7
PM
1152 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1153 },
1154 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1155 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1156 .access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
1157 .accessfn = gt_pct_access,
55d284af
PM
1158 .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
1159 },
1160 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1161 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
00108f2d 1162 .accessfn = gt_vct_access,
a7adc4b7
PM
1163 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1164 },
1165 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1166 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1167 .access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
1168 .accessfn = gt_vct_access,
55d284af
PM
1169 .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
1170 },
1171 /* Comparison value, indicating when the timer goes off */
1172 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1173 .access = PL1_RW | PL0_R,
a7adc4b7 1174 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
55d284af 1175 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
a7adc4b7
PM
1176 .accessfn = gt_ptimer_access, .resetfn = arm_cp_reset_ignore,
1177 .writefn = gt_cval_write, .raw_writefn = raw_write,
1178 },
1179 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1180 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1181 .access = PL1_RW | PL0_R,
1182 .type = ARM_CP_IO,
1183 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1184 .resetvalue = 0, .accessfn = gt_vtimer_access,
00108f2d 1185 .writefn = gt_cval_write, .raw_writefn = raw_write,
55d284af
PM
1186 },
1187 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1188 .access = PL1_RW | PL0_R,
a7adc4b7 1189 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
55d284af 1190 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
a7adc4b7
PM
1191 .accessfn = gt_vtimer_access, .resetfn = arm_cp_reset_ignore,
1192 .writefn = gt_cval_write, .raw_writefn = raw_write,
1193 },
1194 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1195 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1196 .access = PL1_RW | PL0_R,
1197 .type = ARM_CP_IO,
1198 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1199 .resetvalue = 0, .accessfn = gt_vtimer_access,
00108f2d 1200 .writefn = gt_cval_write, .raw_writefn = raw_write,
55d284af
PM
1201 },
1202 REGINFO_SENTINEL
1203};
1204
1205#else
1206/* In user-mode none of the generic timer registers are accessible,
bc72ad67 1207 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
55d284af
PM
1208 * so instead just don't register any of them.
1209 */
6cc7a3ae 1210static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
6cc7a3ae
PM
1211 REGINFO_SENTINEL
1212};
1213
55d284af
PM
1214#endif
1215
c4241c7d 1216static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4a501606 1217{
891a2fe7 1218 if (arm_feature(env, ARM_FEATURE_LPAE)) {
19525524 1219 env->cp15.par_el1 = value;
891a2fe7 1220 } else if (arm_feature(env, ARM_FEATURE_V7)) {
19525524 1221 env->cp15.par_el1 = value & 0xfffff6ff;
4a501606 1222 } else {
19525524 1223 env->cp15.par_el1 = value & 0xfffff1ff;
4a501606 1224 }
4a501606
PM
1225}
1226
1227#ifndef CONFIG_USER_ONLY
1228/* get_phys_addr() isn't present for user-mode-only targets */
702a9357 1229
92611c00
PM
1230static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
1231{
1232 if (ri->opc2 & 4) {
1233 /* Other states are only available with TrustZone; in
1234 * a non-TZ implementation these registers don't exist
1235 * at all, which is an Uncategorized trap. This underdecoding
1236 * is safe because the reginfo is NO_MIGRATE.
1237 */
1238 return CP_ACCESS_TRAP_UNCATEGORIZED;
1239 }
1240 return CP_ACCESS_OK;
1241}
1242
c4241c7d 1243static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4a501606 1244{
a8170e5e 1245 hwaddr phys_addr;
4a501606
PM
1246 target_ulong page_size;
1247 int prot;
1248 int ret, is_user = ri->opc2 & 2;
1249 int access_type = ri->opc2 & 1;
1250
4a501606
PM
1251 ret = get_phys_addr(env, value, access_type, is_user,
1252 &phys_addr, &prot, &page_size);
702a9357
PM
1253 if (extended_addresses_enabled(env)) {
1254 /* ret is a DFSR/IFSR value for the long descriptor
1255 * translation table format, but with WnR always clear.
1256 * Convert it to a 64-bit PAR.
1257 */
1258 uint64_t par64 = (1 << 11); /* LPAE bit always set */
1259 if (ret == 0) {
1260 par64 |= phys_addr & ~0xfffULL;
1261 /* We don't set the ATTR or SH fields in the PAR. */
4a501606 1262 } else {
702a9357
PM
1263 par64 |= 1; /* F */
1264 par64 |= (ret & 0x3f) << 1; /* FS */
1265 /* Note that S2WLK and FSTAGE are always zero, because we don't
1266 * implement virtualization and therefore there can't be a stage 2
1267 * fault.
1268 */
4a501606 1269 }
19525524 1270 env->cp15.par_el1 = par64;
4a501606 1271 } else {
702a9357
PM
1272 /* ret is a DFSR/IFSR value for the short descriptor
1273 * translation table format (with WnR always clear).
1274 * Convert it to a 32-bit PAR.
1275 */
1276 if (ret == 0) {
1277 /* We do not set any attribute bits in the PAR */
1278 if (page_size == (1 << 24)
1279 && arm_feature(env, ARM_FEATURE_V7)) {
19525524 1280 env->cp15.par_el1 = (phys_addr & 0xff000000) | 1 << 1;
702a9357 1281 } else {
19525524 1282 env->cp15.par_el1 = phys_addr & 0xfffff000;
702a9357
PM
1283 }
1284 } else {
19525524 1285 env->cp15.par_el1 = ((ret & (1 << 10)) >> 5) |
775fda92 1286 ((ret & (1 << 12)) >> 6) |
702a9357
PM
1287 ((ret & 0xf) << 1) | 1;
1288 }
4a501606 1289 }
4a501606
PM
1290}
1291#endif
1292
1293static const ARMCPRegInfo vapa_cp_reginfo[] = {
1294 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
1295 .access = PL1_RW, .resetvalue = 0,
19525524 1296 .fieldoffset = offsetoflow32(CPUARMState, cp15.par_el1),
4a501606
PM
1297 .writefn = par_write },
1298#ifndef CONFIG_USER_ONLY
1299 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
92611c00
PM
1300 .access = PL1_W, .accessfn = ats_access,
1301 .writefn = ats_write, .type = ARM_CP_NO_MIGRATE },
4a501606
PM
1302#endif
1303 REGINFO_SENTINEL
1304};
1305
18032bec
PM
1306/* Return basic MPU access permission bits. */
1307static uint32_t simple_mpu_ap_bits(uint32_t val)
1308{
1309 uint32_t ret;
1310 uint32_t mask;
1311 int i;
1312 ret = 0;
1313 mask = 3;
1314 for (i = 0; i < 16; i += 2) {
1315 ret |= (val >> i) & mask;
1316 mask <<= 2;
1317 }
1318 return ret;
1319}
1320
1321/* Pad basic MPU access permission bits to extended format. */
1322static uint32_t extended_mpu_ap_bits(uint32_t val)
1323{
1324 uint32_t ret;
1325 uint32_t mask;
1326 int i;
1327 ret = 0;
1328 mask = 3;
1329 for (i = 0; i < 16; i += 2) {
1330 ret |= (val & mask) << i;
1331 mask <<= 2;
1332 }
1333 return ret;
1334}
1335
c4241c7d
PM
1336static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1337 uint64_t value)
18032bec 1338{
7e09797c 1339 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
18032bec
PM
1340}
1341
c4241c7d 1342static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
18032bec 1343{
7e09797c 1344 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
18032bec
PM
1345}
1346
c4241c7d
PM
1347static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1348 uint64_t value)
18032bec 1349{
7e09797c 1350 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
18032bec
PM
1351}
1352
c4241c7d 1353static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
18032bec 1354{
7e09797c 1355 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
18032bec
PM
1356}
1357
1358static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
1359 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
d4e6df63 1360 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
7e09797c
PM
1361 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
1362 .resetvalue = 0,
18032bec
PM
1363 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
1364 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
d4e6df63 1365 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
7e09797c
PM
1366 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
1367 .resetvalue = 0,
18032bec
PM
1368 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
1369 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
1370 .access = PL1_RW,
7e09797c
PM
1371 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
1372 .resetvalue = 0, },
18032bec
PM
1373 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
1374 .access = PL1_RW,
7e09797c
PM
1375 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
1376 .resetvalue = 0, },
ecce5c3c
PM
1377 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
1378 .access = PL1_RW,
1379 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
1380 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
1381 .access = PL1_RW,
1382 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
06d76f31 1383 /* Protection region base and size registers */
e508a92b
PM
1384 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
1385 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1386 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
1387 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
1388 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1389 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
1390 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
1391 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1392 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
1393 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
1394 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1395 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
1396 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
1397 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1398 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
1399 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
1400 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1401 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
1402 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
1403 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1404 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
1405 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
1406 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1407 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
18032bec
PM
1408 REGINFO_SENTINEL
1409};
1410
c4241c7d
PM
1411static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
1412 uint64_t value)
ecce5c3c 1413{
2ebcebe2
PM
1414 int maskshift = extract32(value, 0, 3);
1415
74f1c6dd 1416 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & (1 << 31))) {
e42c4db3 1417 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
e42c4db3
PM
1418 } else {
1419 value &= 7;
1420 }
1421 /* Note that we always calculate c2_mask and c2_base_mask, but
1422 * they are only used for short-descriptor tables (ie if EAE is 0);
1423 * for long-descriptor tables the TTBCR fields are used differently
1424 * and the c2_mask and c2_base_mask values are meaningless.
1425 */
ecce5c3c 1426 env->cp15.c2_control = value;
2ebcebe2
PM
1427 env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> maskshift);
1428 env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> maskshift);
ecce5c3c
PM
1429}
1430
c4241c7d
PM
1431static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1432 uint64_t value)
d4e6df63 1433{
00c8cb0a
AF
1434 ARMCPU *cpu = arm_env_get_cpu(env);
1435
d4e6df63
PM
1436 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1437 /* With LPAE the TTBCR could result in a change of ASID
1438 * via the TTBCR.A1 bit, so do a TLB flush.
1439 */
00c8cb0a 1440 tlb_flush(CPU(cpu), 1);
d4e6df63 1441 }
c4241c7d 1442 vmsa_ttbcr_raw_write(env, ri, value);
d4e6df63
PM
1443}
1444
ecce5c3c
PM
1445static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1446{
1447 env->cp15.c2_base_mask = 0xffffc000u;
1448 env->cp15.c2_control = 0;
1449 env->cp15.c2_mask = 0;
1450}
1451
cb2e37df
PM
1452static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
1453 uint64_t value)
1454{
00c8cb0a
AF
1455 ARMCPU *cpu = arm_env_get_cpu(env);
1456
cb2e37df 1457 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
00c8cb0a 1458 tlb_flush(CPU(cpu), 1);
cb2e37df
PM
1459 env->cp15.c2_control = value;
1460}
1461
327ed10f
PM
1462static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1463 uint64_t value)
1464{
1465 /* 64 bit accesses to the TTBRs can change the ASID and so we
1466 * must flush the TLB.
1467 */
1468 if (cpreg_field_is_64bit(ri)) {
00c8cb0a
AF
1469 ARMCPU *cpu = arm_env_get_cpu(env);
1470
1471 tlb_flush(CPU(cpu), 1);
327ed10f
PM
1472 }
1473 raw_write(env, ri, value);
1474}
1475
18032bec
PM
1476static const ARMCPRegInfo vmsa_cp_reginfo[] = {
1477 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
6cd8a264 1478 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
d81c519c 1479 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
6cd8a264 1480 .resetfn = arm_cp_reset_ignore, },
18032bec
PM
1481 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
1482 .access = PL1_RW,
6cd8a264
RH
1483 .fieldoffset = offsetof(CPUARMState, cp15.ifsr_el2), .resetvalue = 0, },
1484 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
1485 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
1486 .access = PL1_RW,
d81c519c 1487 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
327ed10f
PM
1488 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
1489 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
1490 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
1491 .writefn = vmsa_ttbr_write, .resetvalue = 0 },
1492 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
1493 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
1494 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el1),
1495 .writefn = vmsa_ttbr_write, .resetvalue = 0 },
cb2e37df
PM
1496 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
1497 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
1498 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
1499 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
ecce5c3c 1500 .fieldoffset = offsetof(CPUARMState, cp15.c2_control) },
cb2e37df
PM
1501 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
1502 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE, .writefn = vmsa_ttbcr_write,
1503 .resetfn = arm_cp_reset_ignore, .raw_writefn = vmsa_ttbcr_raw_write,
1504 .fieldoffset = offsetoflow32(CPUARMState, cp15.c2_control) },
6cd8a264
RH
1505 /* 64-bit FAR; this entry also gives us the AArch32 DFAR */
1506 { .name = "FAR_EL1", .state = ARM_CP_STATE_BOTH,
1507 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
1508 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el1),
06d76f31 1509 .resetvalue = 0, },
18032bec
PM
1510 REGINFO_SENTINEL
1511};
1512
c4241c7d
PM
1513static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
1514 uint64_t value)
1047b9d7
PM
1515{
1516 env->cp15.c15_ticonfig = value & 0xe7;
1517 /* The OS_TYPE bit in this register changes the reported CPUID! */
1518 env->cp15.c0_cpuid = (value & (1 << 5)) ?
1519 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1047b9d7
PM
1520}
1521
c4241c7d
PM
1522static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
1523 uint64_t value)
1047b9d7
PM
1524{
1525 env->cp15.c15_threadid = value & 0xffff;
1047b9d7
PM
1526}
1527
c4241c7d
PM
1528static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
1529 uint64_t value)
1047b9d7
PM
1530{
1531 /* Wait-for-interrupt (deprecated) */
c3affe56 1532 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
1047b9d7
PM
1533}
1534
c4241c7d
PM
1535static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
1536 uint64_t value)
c4804214
PM
1537{
1538 /* On OMAP there are registers indicating the max/min index of dcache lines
1539 * containing a dirty line; cache flush operations have to reset these.
1540 */
1541 env->cp15.c15_i_max = 0x000;
1542 env->cp15.c15_i_min = 0xff0;
c4804214
PM
1543}
1544
18032bec
PM
1545static const ARMCPRegInfo omap_cp_reginfo[] = {
1546 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
1547 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
d81c519c 1548 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
6cd8a264 1549 .resetvalue = 0, },
1047b9d7
PM
1550 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
1551 .access = PL1_RW, .type = ARM_CP_NOP },
1552 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
1553 .access = PL1_RW,
1554 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
1555 .writefn = omap_ticonfig_write },
1556 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
1557 .access = PL1_RW,
1558 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
1559 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
1560 .access = PL1_RW, .resetvalue = 0xff0,
1561 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
1562 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
1563 .access = PL1_RW,
1564 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
1565 .writefn = omap_threadid_write },
1566 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
1567 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
d4e6df63 1568 .type = ARM_CP_NO_MIGRATE,
1047b9d7
PM
1569 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
1570 /* TODO: Peripheral port remap register:
1571 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1572 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1573 * when MMU is off.
1574 */
c4804214 1575 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
d4e6df63
PM
1576 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
1577 .type = ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE,
c4804214 1578 .writefn = omap_cachemaint_write },
34f90529
PM
1579 { .name = "C9", .cp = 15, .crn = 9,
1580 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
1581 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
1047b9d7
PM
1582 REGINFO_SENTINEL
1583};
1584
c4241c7d
PM
1585static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1586 uint64_t value)
1047b9d7
PM
1587{
1588 value &= 0x3fff;
1589 if (env->cp15.c15_cpar != value) {
1590 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1591 tb_flush(env);
1592 env->cp15.c15_cpar = value;
1593 }
1047b9d7
PM
1594}
1595
1596static const ARMCPRegInfo xscale_cp_reginfo[] = {
1597 { .name = "XSCALE_CPAR",
1598 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
1599 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
1600 .writefn = xscale_cpar_write, },
2771db27
PM
1601 { .name = "XSCALE_AUXCR",
1602 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
1603 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
1604 .resetvalue = 0, },
3b771579
PM
1605 /* XScale specific cache-lockdown: since we have no cache we NOP these
1606 * and hope the guest does not really rely on cache behaviour.
1607 */
1608 { .name = "XSCALE_LOCK_ICACHE_LINE",
1609 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
1610 .access = PL1_W, .type = ARM_CP_NOP },
1611 { .name = "XSCALE_UNLOCK_ICACHE",
1612 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
1613 .access = PL1_W, .type = ARM_CP_NOP },
1614 { .name = "XSCALE_DCACHE_LOCK",
1615 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
1616 .access = PL1_RW, .type = ARM_CP_NOP },
1617 { .name = "XSCALE_UNLOCK_DCACHE",
1618 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
1619 .access = PL1_W, .type = ARM_CP_NOP },
1047b9d7
PM
1620 REGINFO_SENTINEL
1621};
1622
1623static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
1624 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1625 * implementation of this implementation-defined space.
1626 * Ideally this should eventually disappear in favour of actually
1627 * implementing the correct behaviour for all cores.
1628 */
1629 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
1630 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3671cd87
PC
1631 .access = PL1_RW,
1632 .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE | ARM_CP_OVERRIDE,
d4e6df63 1633 .resetvalue = 0 },
18032bec
PM
1634 REGINFO_SENTINEL
1635};
1636
c4804214
PM
1637static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
1638 /* Cache status: RAZ because we have no cache so it's always clean */
1639 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
d4e6df63
PM
1640 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1641 .resetvalue = 0 },
c4804214
PM
1642 REGINFO_SENTINEL
1643};
1644
1645static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
1646 /* We never have a a block transfer operation in progress */
1647 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
d4e6df63
PM
1648 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1649 .resetvalue = 0 },
30b05bba
PM
1650 /* The cache ops themselves: these all NOP for QEMU */
1651 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
1652 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1653 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
1654 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1655 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
1656 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1657 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
1658 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1659 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
1660 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1661 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
1662 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
c4804214
PM
1663 REGINFO_SENTINEL
1664};
1665
1666static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
1667 /* The cache test-and-clean instructions always return (1 << 30)
1668 * to indicate that there are no dirty cache lines.
1669 */
1670 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
d4e6df63
PM
1671 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1672 .resetvalue = (1 << 30) },
c4804214 1673 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
d4e6df63
PM
1674 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1675 .resetvalue = (1 << 30) },
c4804214
PM
1676 REGINFO_SENTINEL
1677};
1678
34f90529
PM
1679static const ARMCPRegInfo strongarm_cp_reginfo[] = {
1680 /* Ignore ReadBuffer accesses */
1681 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
1682 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
d4e6df63
PM
1683 .access = PL1_RW, .resetvalue = 0,
1684 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE },
34f90529
PM
1685 REGINFO_SENTINEL
1686};
1687
c4241c7d 1688static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
81bdde9d 1689{
55e5c285
AF
1690 CPUState *cs = CPU(arm_env_get_cpu(env));
1691 uint32_t mpidr = cs->cpu_index;
4b7fff2f
PM
1692 /* We don't support setting cluster ID ([8..11]) (known as Aff1
1693 * in later ARM ARM versions), or any of the higher affinity level fields,
81bdde9d
PM
1694 * so these bits always RAZ.
1695 */
1696 if (arm_feature(env, ARM_FEATURE_V7MP)) {
78dbbbe4 1697 mpidr |= (1U << 31);
81bdde9d
PM
1698 /* Cores which are uniprocessor (non-coherent)
1699 * but still implement the MP extensions set
1700 * bit 30. (For instance, A9UP.) However we do
1701 * not currently model any of those cores.
1702 */
1703 }
c4241c7d 1704 return mpidr;
81bdde9d
PM
1705}
1706
1707static const ARMCPRegInfo mpidr_cp_reginfo[] = {
4b7fff2f
PM
1708 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
1709 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
d4e6df63 1710 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_MIGRATE },
81bdde9d
PM
1711 REGINFO_SENTINEL
1712};
1713
7ac681cf 1714static const ARMCPRegInfo lpae_cp_reginfo[] = {
b90372ad 1715 /* NOP AMAIR0/1: the override is because these clash with the rather
7ac681cf
PM
1716 * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1717 */
b0fe2427
PM
1718 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
1719 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
7ac681cf
PM
1720 .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
1721 .resetvalue = 0 },
b0fe2427 1722 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
7ac681cf
PM
1723 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
1724 .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
1725 .resetvalue = 0 },
f9fc619a
PM
1726 /* 64 bit access versions of the (dummy) debug registers */
1727 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
1728 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
1729 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
1730 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
891a2fe7
PM
1731 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
1732 .access = PL1_RW, .type = ARM_CP_64BIT,
19525524 1733 .fieldoffset = offsetof(CPUARMState, cp15.par_el1), .resetvalue = 0 },
891a2fe7 1734 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
327ed10f
PM
1735 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
1736 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
1737 .writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
891a2fe7 1738 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
327ed10f
PM
1739 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
1740 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el1),
1741 .writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
7ac681cf
PM
1742 REGINFO_SENTINEL
1743};
1744
c4241c7d 1745static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
b0d2b7d0 1746{
c4241c7d 1747 return vfp_get_fpcr(env);
b0d2b7d0
PM
1748}
1749
c4241c7d
PM
1750static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1751 uint64_t value)
b0d2b7d0
PM
1752{
1753 vfp_set_fpcr(env, value);
b0d2b7d0
PM
1754}
1755
c4241c7d 1756static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
b0d2b7d0 1757{
c4241c7d 1758 return vfp_get_fpsr(env);
b0d2b7d0
PM
1759}
1760
c4241c7d
PM
1761static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1762 uint64_t value)
b0d2b7d0
PM
1763{
1764 vfp_set_fpsr(env, value);
b0d2b7d0
PM
1765}
1766
c2b820fe
PM
1767static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
1768{
1769 if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
1770 return CP_ACCESS_TRAP;
1771 }
1772 return CP_ACCESS_OK;
1773}
1774
1775static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
1776 uint64_t value)
1777{
1778 env->daif = value & PSTATE_DAIF;
1779}
1780
8af35c37
PM
1781static CPAccessResult aa64_cacheop_access(CPUARMState *env,
1782 const ARMCPRegInfo *ri)
1783{
1784 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
1785 * SCTLR_EL1.UCI is set.
1786 */
1787 if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
1788 return CP_ACCESS_TRAP;
1789 }
1790 return CP_ACCESS_OK;
1791}
1792
168aa23b
PM
1793static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
1794 uint64_t value)
1795{
1796 /* Invalidate by VA (AArch64 version) */
31b030d4 1797 ARMCPU *cpu = arm_env_get_cpu(env);
168aa23b 1798 uint64_t pageaddr = value << 12;
31b030d4 1799 tlb_flush_page(CPU(cpu), pageaddr);
168aa23b
PM
1800}
1801
1802static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
1803 uint64_t value)
1804{
1805 /* Invalidate by VA, all ASIDs (AArch64 version) */
31b030d4 1806 ARMCPU *cpu = arm_env_get_cpu(env);
168aa23b 1807 uint64_t pageaddr = value << 12;
31b030d4 1808 tlb_flush_page(CPU(cpu), pageaddr);
168aa23b
PM
1809}
1810
1811static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
1812 uint64_t value)
1813{
1814 /* Invalidate by ASID (AArch64 version) */
00c8cb0a 1815 ARMCPU *cpu = arm_env_get_cpu(env);
168aa23b 1816 int asid = extract64(value, 48, 16);
00c8cb0a 1817 tlb_flush(CPU(cpu), asid == 0);
168aa23b
PM
1818}
1819
aca3f40b
PM
1820static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
1821{
1822 /* We don't implement EL2, so the only control on DC ZVA is the
1823 * bit in the SCTLR which can prohibit access for EL0.
1824 */
1825 if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
1826 return CP_ACCESS_TRAP;
1827 }
1828 return CP_ACCESS_OK;
1829}
1830
1831static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
1832{
1833 ARMCPU *cpu = arm_env_get_cpu(env);
1834 int dzp_bit = 1 << 4;
1835
1836 /* DZP indicates whether DC ZVA access is allowed */
1837 if (aa64_zva_access(env, NULL) != CP_ACCESS_OK) {
1838 dzp_bit = 0;
1839 }
1840 return cpu->dcz_blocksize | dzp_bit;
1841}
1842
f502cfc2
PM
1843static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
1844{
1845 if (!env->pstate & PSTATE_SP) {
1846 /* Access to SP_EL0 is undefined if it's being used as
1847 * the stack pointer.
1848 */
1849 return CP_ACCESS_TRAP_UNCATEGORIZED;
1850 }
1851 return CP_ACCESS_OK;
1852}
1853
1854static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
1855{
1856 return env->pstate & PSTATE_SP;
1857}
1858
1859static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
1860{
1861 update_spsel(env, val);
1862}
1863
b0d2b7d0
PM
1864static const ARMCPRegInfo v8_cp_reginfo[] = {
1865 /* Minimal set of EL0-visible registers. This will need to be expanded
1866 * significantly for system emulation of AArch64 CPUs.
1867 */
1868 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
1869 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
1870 .access = PL0_RW, .type = ARM_CP_NZCV },
c2b820fe
PM
1871 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
1872 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
1873 .type = ARM_CP_NO_MIGRATE,
1874 .access = PL0_RW, .accessfn = aa64_daif_access,
1875 .fieldoffset = offsetof(CPUARMState, daif),
1876 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
b0d2b7d0
PM
1877 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
1878 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
1879 .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
1880 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
1881 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
1882 .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
b0d2b7d0
PM
1883 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
1884 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
aca3f40b
PM
1885 .access = PL0_R, .type = ARM_CP_NO_MIGRATE,
1886 .readfn = aa64_dczid_read },
1887 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
1888 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
1889 .access = PL0_W, .type = ARM_CP_DC_ZVA,
1890#ifndef CONFIG_USER_ONLY
1891 /* Avoid overhead of an access check that always passes in user-mode */
1892 .accessfn = aa64_zva_access,
1893#endif
1894 },
0eef9d98
PM
1895 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
1896 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
1897 .access = PL1_R, .type = ARM_CP_CURRENTEL },
8af35c37
PM
1898 /* Cache ops: all NOPs since we don't emulate caches */
1899 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
1900 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
1901 .access = PL1_W, .type = ARM_CP_NOP },
1902 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
1903 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
1904 .access = PL1_W, .type = ARM_CP_NOP },
1905 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
1906 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
1907 .access = PL0_W, .type = ARM_CP_NOP,
1908 .accessfn = aa64_cacheop_access },
1909 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
1910 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
1911 .access = PL1_W, .type = ARM_CP_NOP },
1912 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
1913 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
1914 .access = PL1_W, .type = ARM_CP_NOP },
1915 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
1916 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
1917 .access = PL0_W, .type = ARM_CP_NOP,
1918 .accessfn = aa64_cacheop_access },
1919 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
1920 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
1921 .access = PL1_W, .type = ARM_CP_NOP },
1922 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
1923 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
1924 .access = PL0_W, .type = ARM_CP_NOP,
1925 .accessfn = aa64_cacheop_access },
1926 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
1927 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
1928 .access = PL0_W, .type = ARM_CP_NOP,
1929 .accessfn = aa64_cacheop_access },
1930 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
1931 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
1932 .access = PL1_W, .type = ARM_CP_NOP },
168aa23b
PM
1933 /* TLBI operations */
1934 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 1935 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
168aa23b
PM
1936 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1937 .writefn = tlbiall_write },
1938 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 1939 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
168aa23b
PM
1940 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1941 .writefn = tlbi_aa64_va_write },
1942 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 1943 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
168aa23b
PM
1944 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1945 .writefn = tlbi_aa64_asid_write },
1946 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 1947 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
168aa23b
PM
1948 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1949 .writefn = tlbi_aa64_vaa_write },
1950 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 1951 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
168aa23b
PM
1952 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1953 .writefn = tlbi_aa64_va_write },
1954 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
6ab9f499 1955 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
168aa23b
PM
1956 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1957 .writefn = tlbi_aa64_vaa_write },
1958 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
6ab9f499 1959 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
168aa23b
PM
1960 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1961 .writefn = tlbiall_write },
1962 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
6ab9f499 1963 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
168aa23b
PM
1964 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1965 .writefn = tlbi_aa64_va_write },
1966 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
6ab9f499 1967 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
168aa23b
PM
1968 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1969 .writefn = tlbi_aa64_asid_write },
1970 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
6ab9f499 1971 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
168aa23b
PM
1972 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1973 .writefn = tlbi_aa64_vaa_write },
1974 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
6ab9f499 1975 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
168aa23b
PM
1976 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1977 .writefn = tlbi_aa64_va_write },
1978 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
6ab9f499 1979 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
168aa23b
PM
1980 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1981 .writefn = tlbi_aa64_vaa_write },
19525524
PM
1982#ifndef CONFIG_USER_ONLY
1983 /* 64 bit address translation operations */
1984 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
1985 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
1986 .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
1987 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
1988 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
1989 .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
1990 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
1991 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
1992 .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
1993 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
1994 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
1995 .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
1996#endif
9449fdf6
PM
1997 /* 32 bit TLB invalidates, Inner Shareable */
1998 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1999 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
2000 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2001 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
2002 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2003 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
2004 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2005 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
2006 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
2007 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
2008 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
2009 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
2010 /* 32 bit ITLB invalidates */
2011 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2012 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
2013 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2014 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
2015 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2016 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
2017 /* 32 bit DTLB invalidates */
2018 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2019 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
2020 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2021 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
2022 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2023 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
2024 /* 32 bit TLB invalidates */
2025 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2026 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
2027 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2028 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
2029 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2030 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
2031 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2032 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
2033 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
2034 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
2035 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
2036 .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
2037 /* 32 bit cache operations */
2038 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
2039 .type = ARM_CP_NOP, .access = PL1_W },
2040 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
2041 .type = ARM_CP_NOP, .access = PL1_W },
2042 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
2043 .type = ARM_CP_NOP, .access = PL1_W },
2044 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
2045 .type = ARM_CP_NOP, .access = PL1_W },
2046 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
2047 .type = ARM_CP_NOP, .access = PL1_W },
2048 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
2049 .type = ARM_CP_NOP, .access = PL1_W },
2050 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
2051 .type = ARM_CP_NOP, .access = PL1_W },
2052 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
2053 .type = ARM_CP_NOP, .access = PL1_W },
2054 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
2055 .type = ARM_CP_NOP, .access = PL1_W },
2056 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
2057 .type = ARM_CP_NOP, .access = PL1_W },
2058 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
2059 .type = ARM_CP_NOP, .access = PL1_W },
2060 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
2061 .type = ARM_CP_NOP, .access = PL1_W },
2062 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
2063 .type = ARM_CP_NOP, .access = PL1_W },
2064 /* MMU Domain access control / MPU write buffer control */
2065 { .name = "DACR", .cp = 15,
2066 .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
2067 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
2068 .resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
91e24069
PM
2069 /* Dummy implementation of monitor debug system control register:
2070 * we don't support debug.
2071 */
2072 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_AA64,
2073 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
2074 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
cd5c11b8
PM
2075 /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
2076 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_AA64,
2077 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
2078 .access = PL1_W, .type = ARM_CP_NOP },
a0618a19
PM
2079 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
2080 .type = ARM_CP_NO_MIGRATE,
2081 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
6947f059
EI
2082 .access = PL1_RW,
2083 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
a65f1de9
PM
2084 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
2085 .type = ARM_CP_NO_MIGRATE,
2086 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
2087 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[0]) },
f502cfc2
PM
2088 /* We rely on the access checks not allowing the guest to write to the
2089 * state field when SPSel indicates that it's being used as the stack
2090 * pointer.
2091 */
2092 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
2093 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
2094 .access = PL1_RW, .accessfn = sp_el0_access,
2095 .type = ARM_CP_NO_MIGRATE,
2096 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
2097 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
2098 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
2099 .type = ARM_CP_NO_MIGRATE,
2100 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
b0d2b7d0
PM
2101 REGINFO_SENTINEL
2102};
2103
c4241c7d
PM
2104static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2105 uint64_t value)
2771db27 2106{
00c8cb0a
AF
2107 ARMCPU *cpu = arm_env_get_cpu(env);
2108
2f0d8631
PM
2109 if (env->cp15.c1_sys == value) {
2110 /* Skip the TLB flush if nothing actually changed; Linux likes
2111 * to do a lot of pointless SCTLR writes.
2112 */
2113 return;
2114 }
2115
2771db27
PM
2116 env->cp15.c1_sys = value;
2117 /* ??? Lots of these bits are not implemented. */
2118 /* This may enable/disable the MMU, so do a TLB flush. */
00c8cb0a 2119 tlb_flush(CPU(cpu), 1);
2771db27
PM
2120}
2121
7da845b0
PM
2122static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
2123{
2124 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
2125 * but the AArch32 CTR has its own reginfo struct)
2126 */
2127 if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
2128 return CP_ACCESS_TRAP;
2129 }
2130 return CP_ACCESS_OK;
2131}
2132
0b45451e
PM
2133static void define_aarch64_debug_regs(ARMCPU *cpu)
2134{
2135 /* Define breakpoint and watchpoint registers. These do nothing
2136 * but read as written, for now.
2137 */
2138 int i;
2139
2140 for (i = 0; i < 16; i++) {
2141 ARMCPRegInfo dbgregs[] = {
2142 { .name = "DBGBVR", .state = ARM_CP_STATE_AA64,
2143 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
2144 .access = PL1_RW,
2145 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]) },
2146 { .name = "DBGBCR", .state = ARM_CP_STATE_AA64,
2147 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
2148 .access = PL1_RW,
2149 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]) },
2150 { .name = "DBGWVR", .state = ARM_CP_STATE_AA64,
2151 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
2152 .access = PL1_RW,
2153 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]) },
2154 { .name = "DBGWCR", .state = ARM_CP_STATE_AA64,
2155 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
2156 .access = PL1_RW,
2157 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]) },
2158 REGINFO_SENTINEL
2159 };
2160 define_arm_cp_regs(cpu, dbgregs);
2161 }
2162}
2163
2ceb98c0
PM
2164void register_cp_regs_for_features(ARMCPU *cpu)
2165{
2166 /* Register all the coprocessor registers based on feature bits */
2167 CPUARMState *env = &cpu->env;
2168 if (arm_feature(env, ARM_FEATURE_M)) {
2169 /* M profile has no coprocessor registers */
2170 return;
2171 }
2172
e9aa6c21 2173 define_arm_cp_regs(cpu, cp_reginfo);
9449fdf6
PM
2174 if (!arm_feature(env, ARM_FEATURE_V8)) {
2175 /* Must go early as it is full of wildcards that may be
2176 * overridden by later definitions.
2177 */
2178 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
2179 }
2180
7d57f408 2181 if (arm_feature(env, ARM_FEATURE_V6)) {
8515a092
PM
2182 /* The ID registers all have impdef reset values */
2183 ARMCPRegInfo v6_idregs[] = {
0ff644a7
PM
2184 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
2185 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
2186 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2187 .resetvalue = cpu->id_pfr0 },
0ff644a7
PM
2188 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
2189 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
2190 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2191 .resetvalue = cpu->id_pfr1 },
0ff644a7
PM
2192 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
2193 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
2194 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2195 .resetvalue = cpu->id_dfr0 },
0ff644a7
PM
2196 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
2197 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
2198 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2199 .resetvalue = cpu->id_afr0 },
0ff644a7
PM
2200 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
2201 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
2202 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2203 .resetvalue = cpu->id_mmfr0 },
0ff644a7
PM
2204 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
2205 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
2206 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2207 .resetvalue = cpu->id_mmfr1 },
0ff644a7
PM
2208 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
2209 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
2210 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2211 .resetvalue = cpu->id_mmfr2 },
0ff644a7
PM
2212 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
2213 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
2214 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2215 .resetvalue = cpu->id_mmfr3 },
0ff644a7
PM
2216 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
2217 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
2218 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2219 .resetvalue = cpu->id_isar0 },
0ff644a7
PM
2220 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
2221 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
2222 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2223 .resetvalue = cpu->id_isar1 },
0ff644a7
PM
2224 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
2225 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
2226 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2227 .resetvalue = cpu->id_isar2 },
0ff644a7
PM
2228 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
2229 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
2230 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2231 .resetvalue = cpu->id_isar3 },
0ff644a7
PM
2232 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
2233 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
2234 .access = PL1_R, .type = ARM_CP_CONST,
8515a092 2235 .resetvalue = cpu->id_isar4 },
0ff644a7
PM
2236 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
2237 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
2238 .access = PL1_R, .type = ARM_CP_CONST,
8515a092
PM
2239 .resetvalue = cpu->id_isar5 },
2240 /* 6..7 are as yet unallocated and must RAZ */
2241 { .name = "ID_ISAR6", .cp = 15, .crn = 0, .crm = 2,
2242 .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST,
2243 .resetvalue = 0 },
2244 { .name = "ID_ISAR7", .cp = 15, .crn = 0, .crm = 2,
2245 .opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST,
2246 .resetvalue = 0 },
2247 REGINFO_SENTINEL
2248 };
2249 define_arm_cp_regs(cpu, v6_idregs);
7d57f408
PM
2250 define_arm_cp_regs(cpu, v6_cp_reginfo);
2251 } else {
2252 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
2253 }
4d31c596
PM
2254 if (arm_feature(env, ARM_FEATURE_V6K)) {
2255 define_arm_cp_regs(cpu, v6k_cp_reginfo);
2256 }
e9aa6c21 2257 if (arm_feature(env, ARM_FEATURE_V7)) {
200ac0ef 2258 /* v7 performance monitor control register: same implementor
7c2cb42b
AF
2259 * field as main ID register, and we implement only the cycle
2260 * count register.
200ac0ef 2261 */
7c2cb42b 2262#ifndef CONFIG_USER_ONLY
200ac0ef
PM
2263 ARMCPRegInfo pmcr = {
2264 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
2265 .access = PL0_RW, .resetvalue = cpu->midr & 0xff000000,
d6d60581 2266 .type = ARM_CP_IO,
200ac0ef 2267 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
fcd25206
PM
2268 .accessfn = pmreg_access, .writefn = pmcr_write,
2269 .raw_writefn = raw_write,
200ac0ef 2270 };
7c2cb42b
AF
2271 define_one_arm_cp_reg(cpu, &pmcr);
2272#endif
776d4e5c 2273 ARMCPRegInfo clidr = {
7da845b0
PM
2274 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
2275 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
776d4e5c
PM
2276 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
2277 };
776d4e5c 2278 define_one_arm_cp_reg(cpu, &clidr);
e9aa6c21 2279 define_arm_cp_regs(cpu, v7_cp_reginfo);
7d57f408
PM
2280 } else {
2281 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
e9aa6c21 2282 }
b0d2b7d0 2283 if (arm_feature(env, ARM_FEATURE_V8)) {
e60cef86
PM
2284 /* AArch64 ID registers, which all have impdef reset values */
2285 ARMCPRegInfo v8_idregs[] = {
2286 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
2287 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
2288 .access = PL1_R, .type = ARM_CP_CONST,
2289 .resetvalue = cpu->id_aa64pfr0 },
2290 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
2291 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
2292 .access = PL1_R, .type = ARM_CP_CONST,
2293 .resetvalue = cpu->id_aa64pfr1},
2294 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
2295 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
2296 .access = PL1_R, .type = ARM_CP_CONST,
9225d739
PM
2297 /* We mask out the PMUVer field, beacuse we don't currently
2298 * implement the PMU. Not advertising it prevents the guest
2299 * from trying to use it and getting UNDEFs on registers we
2300 * don't implement.
2301 */
2302 .resetvalue = cpu->id_aa64dfr0 & ~0xf00 },
e60cef86
PM
2303 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
2304 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
2305 .access = PL1_R, .type = ARM_CP_CONST,
2306 .resetvalue = cpu->id_aa64dfr1 },
2307 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
2308 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
2309 .access = PL1_R, .type = ARM_CP_CONST,
2310 .resetvalue = cpu->id_aa64afr0 },
2311 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
2312 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
2313 .access = PL1_R, .type = ARM_CP_CONST,
2314 .resetvalue = cpu->id_aa64afr1 },
2315 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
2316 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
2317 .access = PL1_R, .type = ARM_CP_CONST,
2318 .resetvalue = cpu->id_aa64isar0 },
2319 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
2320 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
2321 .access = PL1_R, .type = ARM_CP_CONST,
2322 .resetvalue = cpu->id_aa64isar1 },
2323 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
2324 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
2325 .access = PL1_R, .type = ARM_CP_CONST,
2326 .resetvalue = cpu->id_aa64mmfr0 },
2327 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
2328 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
2329 .access = PL1_R, .type = ARM_CP_CONST,
2330 .resetvalue = cpu->id_aa64mmfr1 },
a50c0f51
PM
2331 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
2332 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
2333 .access = PL1_R, .type = ARM_CP_CONST,
2334 .resetvalue = cpu->mvfr0 },
2335 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
2336 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
2337 .access = PL1_R, .type = ARM_CP_CONST,
2338 .resetvalue = cpu->mvfr1 },
2339 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
2340 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
2341 .access = PL1_R, .type = ARM_CP_CONST,
2342 .resetvalue = cpu->mvfr2 },
e60cef86
PM
2343 REGINFO_SENTINEL
2344 };
3933443e
PM
2345 ARMCPRegInfo rvbar = {
2346 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
2347 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
2348 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
2349 };
2350 define_one_arm_cp_reg(cpu, &rvbar);
e60cef86 2351 define_arm_cp_regs(cpu, v8_idregs);
b0d2b7d0 2352 define_arm_cp_regs(cpu, v8_cp_reginfo);
0b45451e 2353 define_aarch64_debug_regs(cpu);
b0d2b7d0 2354 }
18032bec
PM
2355 if (arm_feature(env, ARM_FEATURE_MPU)) {
2356 /* These are the MPU registers prior to PMSAv6. Any new
2357 * PMSA core later than the ARM946 will require that we
2358 * implement the PMSAv6 or PMSAv7 registers, which are
2359 * completely different.
2360 */
2361 assert(!arm_feature(env, ARM_FEATURE_V6));
2362 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
2363 } else {
2364 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
2365 }
c326b979
PM
2366 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
2367 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
2368 }
6cc7a3ae
PM
2369 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
2370 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
2371 }
4a501606
PM
2372 if (arm_feature(env, ARM_FEATURE_VAPA)) {
2373 define_arm_cp_regs(cpu, vapa_cp_reginfo);
2374 }
c4804214
PM
2375 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
2376 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
2377 }
2378 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
2379 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
2380 }
2381 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
2382 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
2383 }
18032bec
PM
2384 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2385 define_arm_cp_regs(cpu, omap_cp_reginfo);
2386 }
34f90529
PM
2387 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
2388 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
2389 }
1047b9d7
PM
2390 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2391 define_arm_cp_regs(cpu, xscale_cp_reginfo);
2392 }
2393 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
2394 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
2395 }
7ac681cf
PM
2396 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2397 define_arm_cp_regs(cpu, lpae_cp_reginfo);
2398 }
7884849c
PM
2399 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
2400 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
2401 * be read-only (ie write causes UNDEF exception).
2402 */
2403 {
00a29f3d
PM
2404 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
2405 /* Pre-v8 MIDR space.
2406 * Note that the MIDR isn't a simple constant register because
7884849c
PM
2407 * of the TI925 behaviour where writes to another register can
2408 * cause the MIDR value to change.
97ce8d61
PC
2409 *
2410 * Unimplemented registers in the c15 0 0 0 space default to
2411 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
2412 * and friends override accordingly.
7884849c
PM
2413 */
2414 { .name = "MIDR",
97ce8d61 2415 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
7884849c 2416 .access = PL1_R, .resetvalue = cpu->midr,
d4e6df63 2417 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
97ce8d61
PC
2418 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
2419 .type = ARM_CP_OVERRIDE },
7884849c
PM
2420 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
2421 { .name = "DUMMY",
2422 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
2423 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2424 { .name = "DUMMY",
2425 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
2426 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2427 { .name = "DUMMY",
2428 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
2429 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2430 { .name = "DUMMY",
2431 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
2432 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2433 { .name = "DUMMY",
2434 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
2435 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2436 REGINFO_SENTINEL
2437 };
00a29f3d
PM
2438 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
2439 /* v8 MIDR -- the wildcard isn't necessary, and nor is the
2440 * variable-MIDR TI925 behaviour. Instead we have a single
2441 * (strictly speaking IMPDEF) alias of the MIDR, REVIDR.
2442 */
2443 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
2444 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
2445 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr },
2446 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
2447 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
2448 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr },
2449 REGINFO_SENTINEL
2450 };
2451 ARMCPRegInfo id_cp_reginfo[] = {
2452 /* These are common to v8 and pre-v8 */
2453 { .name = "CTR",
2454 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
2455 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
2456 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
2457 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
2458 .access = PL0_R, .accessfn = ctr_el0_access,
2459 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
2460 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
2461 { .name = "TCMTR",
2462 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
2463 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2464 { .name = "TLBTR",
2465 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
2466 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2467 REGINFO_SENTINEL
2468 };
7884849c
PM
2469 ARMCPRegInfo crn0_wi_reginfo = {
2470 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
2471 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
2472 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
2473 };
2474 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
2475 arm_feature(env, ARM_FEATURE_STRONGARM)) {
2476 ARMCPRegInfo *r;
2477 /* Register the blanket "writes ignored" value first to cover the
a703eda1
PC
2478 * whole space. Then update the specific ID registers to allow write
2479 * access, so that they ignore writes rather than causing them to
2480 * UNDEF.
7884849c
PM
2481 */
2482 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
00a29f3d
PM
2483 for (r = id_pre_v8_midr_cp_reginfo;
2484 r->type != ARM_CP_SENTINEL; r++) {
2485 r->access = PL1_RW;
2486 }
7884849c
PM
2487 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
2488 r->access = PL1_RW;
7884849c 2489 }
7884849c 2490 }
00a29f3d
PM
2491 if (arm_feature(env, ARM_FEATURE_V8)) {
2492 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
2493 } else {
2494 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
2495 }
a703eda1 2496 define_arm_cp_regs(cpu, id_cp_reginfo);
7884849c
PM
2497 }
2498
97ce8d61
PC
2499 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
2500 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
2501 }
2502
2771db27
PM
2503 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
2504 ARMCPRegInfo auxcr = {
2eef0bf8
PM
2505 .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
2506 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
2771db27
PM
2507 .access = PL1_RW, .type = ARM_CP_CONST,
2508 .resetvalue = cpu->reset_auxcr
2509 };
2510 define_one_arm_cp_reg(cpu, &auxcr);
2511 }
2512
d8ba780b 2513 if (arm_feature(env, ARM_FEATURE_CBAR)) {
f318cec6
PM
2514 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
2515 /* 32 bit view is [31:18] 0...0 [43:32]. */
2516 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
2517 | extract64(cpu->reset_cbar, 32, 12);
2518 ARMCPRegInfo cbar_reginfo[] = {
2519 { .name = "CBAR",
2520 .type = ARM_CP_CONST,
2521 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
2522 .access = PL1_R, .resetvalue = cpu->reset_cbar },
2523 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
2524 .type = ARM_CP_CONST,
2525 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
2526 .access = PL1_R, .resetvalue = cbar32 },
2527 REGINFO_SENTINEL
2528 };
2529 /* We don't implement a r/w 64 bit CBAR currently */
2530 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
2531 define_arm_cp_regs(cpu, cbar_reginfo);
2532 } else {
2533 ARMCPRegInfo cbar = {
2534 .name = "CBAR",
2535 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
2536 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
2537 .fieldoffset = offsetof(CPUARMState,
2538 cp15.c15_config_base_address)
2539 };
2540 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
2541 cbar.access = PL1_R;
2542 cbar.fieldoffset = 0;
2543 cbar.type = ARM_CP_CONST;
2544 }
2545 define_one_arm_cp_reg(cpu, &cbar);
2546 }
d8ba780b
PC
2547 }
2548
2771db27
PM
2549 /* Generic registers whose values depend on the implementation */
2550 {
2551 ARMCPRegInfo sctlr = {
5ebafdf3
PM
2552 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
2553 .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
2771db27 2554 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_sys),
d4e6df63
PM
2555 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
2556 .raw_writefn = raw_write,
2771db27
PM
2557 };
2558 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2559 /* Normally we would always end the TB on an SCTLR write, but Linux
2560 * arch/arm/mach-pxa/sleep.S expects two instructions following
2561 * an MMU enable to execute from cache. Imitate this behaviour.
2562 */
2563 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
2564 }
2565 define_one_arm_cp_reg(cpu, &sctlr);
2566 }
2ceb98c0
PM
2567}
2568
778c3a06 2569ARMCPU *cpu_arm_init(const char *cpu_model)
40f137e1 2570{
9262685b 2571 return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
14969266
AF
2572}
2573
2574void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
2575{
22169d41 2576 CPUState *cs = CPU(cpu);
14969266
AF
2577 CPUARMState *env = &cpu->env;
2578
6a669427
PM
2579 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
2580 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
2581 aarch64_fpu_gdb_set_reg,
2582 34, "aarch64-fpu.xml", 0);
2583 } else if (arm_feature(env, ARM_FEATURE_NEON)) {
22169d41 2584 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
56aebc89
PB
2585 51, "arm-neon.xml", 0);
2586 } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
22169d41 2587 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
56aebc89
PB
2588 35, "arm-vfp3.xml", 0);
2589 } else if (arm_feature(env, ARM_FEATURE_VFP)) {
22169d41 2590 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
56aebc89
PB
2591 19, "arm-vfp.xml", 0);
2592 }
40f137e1
PB
2593}
2594
777dc784
PM
2595/* Sort alphabetically by type name, except for "any". */
2596static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5adb4839 2597{
777dc784
PM
2598 ObjectClass *class_a = (ObjectClass *)a;
2599 ObjectClass *class_b = (ObjectClass *)b;
2600 const char *name_a, *name_b;
5adb4839 2601
777dc784
PM
2602 name_a = object_class_get_name(class_a);
2603 name_b = object_class_get_name(class_b);
51492fd1 2604 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
777dc784 2605 return 1;
51492fd1 2606 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
777dc784
PM
2607 return -1;
2608 } else {
2609 return strcmp(name_a, name_b);
5adb4839
PB
2610 }
2611}
2612
777dc784 2613static void arm_cpu_list_entry(gpointer data, gpointer user_data)
40f137e1 2614{
777dc784 2615 ObjectClass *oc = data;
92a31361 2616 CPUListState *s = user_data;
51492fd1
AF
2617 const char *typename;
2618 char *name;
3371d272 2619
51492fd1
AF
2620 typename = object_class_get_name(oc);
2621 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
777dc784 2622 (*s->cpu_fprintf)(s->file, " %s\n",
51492fd1
AF
2623 name);
2624 g_free(name);
777dc784
PM
2625}
2626
2627void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2628{
92a31361 2629 CPUListState s = {
777dc784
PM
2630 .file = f,
2631 .cpu_fprintf = cpu_fprintf,
2632 };
2633 GSList *list;
2634
2635 list = object_class_get_list(TYPE_ARM_CPU, false);
2636 list = g_slist_sort(list, arm_cpu_list_compare);
2637 (*cpu_fprintf)(f, "Available CPUs:\n");
2638 g_slist_foreach(list, arm_cpu_list_entry, &s);
2639 g_slist_free(list);
a96c0514
PM
2640#ifdef CONFIG_KVM
2641 /* The 'host' CPU type is dynamically registered only if KVM is
2642 * enabled, so we have to special-case it here:
2643 */
2644 (*cpu_fprintf)(f, " host (only available in KVM mode)\n");
2645#endif
40f137e1
PB
2646}
2647
78027bb6
CR
2648static void arm_cpu_add_definition(gpointer data, gpointer user_data)
2649{
2650 ObjectClass *oc = data;
2651 CpuDefinitionInfoList **cpu_list = user_data;
2652 CpuDefinitionInfoList *entry;
2653 CpuDefinitionInfo *info;
2654 const char *typename;
2655
2656 typename = object_class_get_name(oc);
2657 info = g_malloc0(sizeof(*info));
2658 info->name = g_strndup(typename,
2659 strlen(typename) - strlen("-" TYPE_ARM_CPU));
2660
2661 entry = g_malloc0(sizeof(*entry));
2662 entry->value = info;
2663 entry->next = *cpu_list;
2664 *cpu_list = entry;
2665}
2666
2667CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2668{
2669 CpuDefinitionInfoList *cpu_list = NULL;
2670 GSList *list;
2671
2672 list = object_class_get_list(TYPE_ARM_CPU, false);
2673 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
2674 g_slist_free(list);
2675
2676 return cpu_list;
2677}
2678
6e6efd61 2679static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
f5a0a5a5
PM
2680 void *opaque, int state,
2681 int crm, int opc1, int opc2)
6e6efd61
PM
2682{
2683 /* Private utility function for define_one_arm_cp_reg_with_opaque():
2684 * add a single reginfo struct to the hash table.
2685 */
2686 uint32_t *key = g_new(uint32_t, 1);
2687 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
2688 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
f5a0a5a5
PM
2689 if (r->state == ARM_CP_STATE_BOTH && state == ARM_CP_STATE_AA32) {
2690 /* The AArch32 view of a shared register sees the lower 32 bits
2691 * of a 64 bit backing field. It is not migratable as the AArch64
2692 * view handles that. AArch64 also handles reset.
2693 * We assume it is a cp15 register.
2694 */
2695 r2->cp = 15;
2696 r2->type |= ARM_CP_NO_MIGRATE;
2697 r2->resetfn = arm_cp_reset_ignore;
2698#ifdef HOST_WORDS_BIGENDIAN
2699 if (r2->fieldoffset) {
2700 r2->fieldoffset += sizeof(uint32_t);
2701 }
2702#endif
2703 }
2704 if (state == ARM_CP_STATE_AA64) {
2705 /* To allow abbreviation of ARMCPRegInfo
2706 * definitions, we treat cp == 0 as equivalent to
2707 * the value for "standard guest-visible sysreg".
2708 */
2709 if (r->cp == 0) {
2710 r2->cp = CP_REG_ARM64_SYSREG_CP;
2711 }
2712 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
2713 r2->opc0, opc1, opc2);
2714 } else {
2715 *key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2);
2716 }
6e6efd61
PM
2717 if (opaque) {
2718 r2->opaque = opaque;
2719 }
67ed771d
PM
2720 /* reginfo passed to helpers is correct for the actual access,
2721 * and is never ARM_CP_STATE_BOTH:
2722 */
2723 r2->state = state;
6e6efd61
PM
2724 /* Make sure reginfo passed to helpers for wildcarded regs
2725 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
2726 */
2727 r2->crm = crm;
2728 r2->opc1 = opc1;
2729 r2->opc2 = opc2;
2730 /* By convention, for wildcarded registers only the first
2731 * entry is used for migration; the others are marked as
2732 * NO_MIGRATE so we don't try to transfer the register
2733 * multiple times. Special registers (ie NOP/WFI) are
2734 * never migratable.
2735 */
2736 if ((r->type & ARM_CP_SPECIAL) ||
2737 ((r->crm == CP_ANY) && crm != 0) ||
2738 ((r->opc1 == CP_ANY) && opc1 != 0) ||
2739 ((r->opc2 == CP_ANY) && opc2 != 0)) {
2740 r2->type |= ARM_CP_NO_MIGRATE;
2741 }
2742
2743 /* Overriding of an existing definition must be explicitly
2744 * requested.
2745 */
2746 if (!(r->type & ARM_CP_OVERRIDE)) {
2747 ARMCPRegInfo *oldreg;
2748 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
2749 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
2750 fprintf(stderr, "Register redefined: cp=%d %d bit "
2751 "crn=%d crm=%d opc1=%d opc2=%d, "
2752 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
2753 r2->crn, r2->crm, r2->opc1, r2->opc2,
2754 oldreg->name, r2->name);
2755 g_assert_not_reached();
2756 }
2757 }
2758 g_hash_table_insert(cpu->cp_regs, key, r2);
2759}
2760
2761
4b6a83fb
PM
2762void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
2763 const ARMCPRegInfo *r, void *opaque)
2764{
2765 /* Define implementations of coprocessor registers.
2766 * We store these in a hashtable because typically
2767 * there are less than 150 registers in a space which
2768 * is 16*16*16*8*8 = 262144 in size.
2769 * Wildcarding is supported for the crm, opc1 and opc2 fields.
2770 * If a register is defined twice then the second definition is
2771 * used, so this can be used to define some generic registers and
2772 * then override them with implementation specific variations.
2773 * At least one of the original and the second definition should
2774 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
2775 * against accidental use.
f5a0a5a5
PM
2776 *
2777 * The state field defines whether the register is to be
2778 * visible in the AArch32 or AArch64 execution state. If the
2779 * state is set to ARM_CP_STATE_BOTH then we synthesise a
2780 * reginfo structure for the AArch32 view, which sees the lower
2781 * 32 bits of the 64 bit register.
2782 *
2783 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
2784 * be wildcarded. AArch64 registers are always considered to be 64
2785 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
2786 * the register, if any.
4b6a83fb 2787 */
f5a0a5a5 2788 int crm, opc1, opc2, state;
4b6a83fb
PM
2789 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
2790 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
2791 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
2792 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
2793 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
2794 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
2795 /* 64 bit registers have only CRm and Opc1 fields */
2796 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
f5a0a5a5
PM
2797 /* op0 only exists in the AArch64 encodings */
2798 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
2799 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
2800 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
2801 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
2802 * encodes a minimum access level for the register. We roll this
2803 * runtime check into our general permission check code, so check
2804 * here that the reginfo's specified permissions are strict enough
2805 * to encompass the generic architectural permission check.
2806 */
2807 if (r->state != ARM_CP_STATE_AA32) {
2808 int mask = 0;
2809 switch (r->opc1) {
2810 case 0: case 1: case 2:
2811 /* min_EL EL1 */
2812 mask = PL1_RW;
2813 break;
2814 case 3:
2815 /* min_EL EL0 */
2816 mask = PL0_RW;
2817 break;
2818 case 4:
2819 /* min_EL EL2 */
2820 mask = PL2_RW;
2821 break;
2822 case 5:
2823 /* unallocated encoding, so not possible */
2824 assert(false);
2825 break;
2826 case 6:
2827 /* min_EL EL3 */
2828 mask = PL3_RW;
2829 break;
2830 case 7:
2831 /* min_EL EL1, secure mode only (we don't check the latter) */
2832 mask = PL1_RW;
2833 break;
2834 default:
2835 /* broken reginfo with out-of-range opc1 */
2836 assert(false);
2837 break;
2838 }
2839 /* assert our permissions are not too lax (stricter is fine) */
2840 assert((r->access & ~mask) == 0);
2841 }
2842
4b6a83fb
PM
2843 /* Check that the register definition has enough info to handle
2844 * reads and writes if they are permitted.
2845 */
2846 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
2847 if (r->access & PL3_R) {
2848 assert(r->fieldoffset || r->readfn);
2849 }
2850 if (r->access & PL3_W) {
2851 assert(r->fieldoffset || r->writefn);
2852 }
2853 }
2854 /* Bad type field probably means missing sentinel at end of reg list */
2855 assert(cptype_valid(r->type));
2856 for (crm = crmmin; crm <= crmmax; crm++) {
2857 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
2858 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
f5a0a5a5
PM
2859 for (state = ARM_CP_STATE_AA32;
2860 state <= ARM_CP_STATE_AA64; state++) {
2861 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
2862 continue;
2863 }
2864 add_cpreg_to_hashtable(cpu, r, opaque, state,
2865 crm, opc1, opc2);
2866 }
4b6a83fb
PM
2867 }
2868 }
2869 }
2870}
2871
2872void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
2873 const ARMCPRegInfo *regs, void *opaque)
2874{
2875 /* Define a whole list of registers */
2876 const ARMCPRegInfo *r;
2877 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
2878 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
2879 }
2880}
2881
60322b39 2882const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
4b6a83fb 2883{
60322b39 2884 return g_hash_table_lookup(cpregs, &encoded_cp);
4b6a83fb
PM
2885}
2886
c4241c7d
PM
2887void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
2888 uint64_t value)
4b6a83fb
PM
2889{
2890 /* Helper coprocessor write function for write-ignore registers */
4b6a83fb
PM
2891}
2892
c4241c7d 2893uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
4b6a83fb
PM
2894{
2895 /* Helper coprocessor write function for read-as-zero registers */
4b6a83fb
PM
2896 return 0;
2897}
2898
f5a0a5a5
PM
2899void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
2900{
2901 /* Helper coprocessor reset function for do-nothing-on-reset registers */
2902}
2903
0ecb72a5 2904static int bad_mode_switch(CPUARMState *env, int mode)
37064a8b
PM
2905{
2906 /* Return true if it is not valid for us to switch to
2907 * this CPU mode (ie all the UNPREDICTABLE cases in
2908 * the ARM ARM CPSRWriteByInstr pseudocode).
2909 */
2910 switch (mode) {
2911 case ARM_CPU_MODE_USR:
2912 case ARM_CPU_MODE_SYS:
2913 case ARM_CPU_MODE_SVC:
2914 case ARM_CPU_MODE_ABT:
2915 case ARM_CPU_MODE_UND:
2916 case ARM_CPU_MODE_IRQ:
2917 case ARM_CPU_MODE_FIQ:
2918 return 0;
2919 default:
2920 return 1;
2921 }
2922}
2923
2f4a40e5
AZ
2924uint32_t cpsr_read(CPUARMState *env)
2925{
2926 int ZF;
6fbe23d5
PB
2927 ZF = (env->ZF == 0);
2928 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
2f4a40e5
AZ
2929 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
2930 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
2931 | ((env->condexec_bits & 0xfc) << 8)
af519934 2932 | (env->GE << 16) | (env->daif & CPSR_AIF);
2f4a40e5
AZ
2933}
2934
2935void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
2936{
2f4a40e5 2937 if (mask & CPSR_NZCV) {
6fbe23d5
PB
2938 env->ZF = (~val) & CPSR_Z;
2939 env->NF = val;
2f4a40e5
AZ
2940 env->CF = (val >> 29) & 1;
2941 env->VF = (val << 3) & 0x80000000;
2942 }
2943 if (mask & CPSR_Q)
2944 env->QF = ((val & CPSR_Q) != 0);
2945 if (mask & CPSR_T)
2946 env->thumb = ((val & CPSR_T) != 0);
2947 if (mask & CPSR_IT_0_1) {
2948 env->condexec_bits &= ~3;
2949 env->condexec_bits |= (val >> 25) & 3;
2950 }
2951 if (mask & CPSR_IT_2_7) {
2952 env->condexec_bits &= 3;
2953 env->condexec_bits |= (val >> 8) & 0xfc;
2954 }
2955 if (mask & CPSR_GE) {
2956 env->GE = (val >> 16) & 0xf;
2957 }
2958
4cc35614
PM
2959 env->daif &= ~(CPSR_AIF & mask);
2960 env->daif |= val & CPSR_AIF & mask;
2961
2f4a40e5 2962 if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
37064a8b
PM
2963 if (bad_mode_switch(env, val & CPSR_M)) {
2964 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
2965 * We choose to ignore the attempt and leave the CPSR M field
2966 * untouched.
2967 */
2968 mask &= ~CPSR_M;
2969 } else {
2970 switch_mode(env, val & CPSR_M);
2971 }
2f4a40e5
AZ
2972 }
2973 mask &= ~CACHED_CPSR_BITS;
2974 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
2975}
2976
b26eefb6
PB
2977/* Sign/zero extend */
2978uint32_t HELPER(sxtb16)(uint32_t x)
2979{
2980 uint32_t res;
2981 res = (uint16_t)(int8_t)x;
2982 res |= (uint32_t)(int8_t)(x >> 16) << 16;
2983 return res;
2984}
2985
2986uint32_t HELPER(uxtb16)(uint32_t x)
2987{
2988 uint32_t res;
2989 res = (uint16_t)(uint8_t)x;
2990 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
2991 return res;
2992}
2993
f51bbbfe
PB
2994uint32_t HELPER(clz)(uint32_t x)
2995{
7bbcb0af 2996 return clz32(x);
f51bbbfe
PB
2997}
2998
3670669c
PB
2999int32_t HELPER(sdiv)(int32_t num, int32_t den)
3000{
3001 if (den == 0)
3002 return 0;
686eeb93
AJ
3003 if (num == INT_MIN && den == -1)
3004 return INT_MIN;
3670669c
PB
3005 return num / den;
3006}
3007
3008uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
3009{
3010 if (den == 0)
3011 return 0;
3012 return num / den;
3013}
3014
3015uint32_t HELPER(rbit)(uint32_t x)
3016{
3017 x = ((x & 0xff000000) >> 24)
3018 | ((x & 0x00ff0000) >> 8)
3019 | ((x & 0x0000ff00) << 8)
3020 | ((x & 0x000000ff) << 24);
3021 x = ((x & 0xf0f0f0f0) >> 4)
3022 | ((x & 0x0f0f0f0f) << 4);
3023 x = ((x & 0x88888888) >> 3)
3024 | ((x & 0x44444444) >> 1)
3025 | ((x & 0x22222222) << 1)
3026 | ((x & 0x11111111) << 3);
3027 return x;
3028}
3029
5fafdf24 3030#if defined(CONFIG_USER_ONLY)
b5ff1b31 3031
97a8ea5a 3032void arm_cpu_do_interrupt(CPUState *cs)
b5ff1b31 3033{
27103424 3034 cs->exception_index = -1;
b5ff1b31
FB
3035}
3036
7510454e
AF
3037int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
3038 int mmu_idx)
b5ff1b31 3039{
7510454e
AF
3040 ARMCPU *cpu = ARM_CPU(cs);
3041 CPUARMState *env = &cpu->env;
3042
abf1172f 3043 env->exception.vaddress = address;
b5ff1b31 3044 if (rw == 2) {
27103424 3045 cs->exception_index = EXCP_PREFETCH_ABORT;
b5ff1b31 3046 } else {
27103424 3047 cs->exception_index = EXCP_DATA_ABORT;
b5ff1b31
FB
3048 }
3049 return 1;
3050}
3051
9ee6e8bb 3052/* These should probably raise undefined insn exceptions. */
0ecb72a5 3053void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
9ee6e8bb 3054{
a47dddd7
AF
3055 ARMCPU *cpu = arm_env_get_cpu(env);
3056
3057 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
9ee6e8bb
PB
3058}
3059
0ecb72a5 3060uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
9ee6e8bb 3061{
a47dddd7
AF
3062 ARMCPU *cpu = arm_env_get_cpu(env);
3063
3064 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
9ee6e8bb
PB
3065 return 0;
3066}
3067
0ecb72a5 3068void switch_mode(CPUARMState *env, int mode)
b5ff1b31 3069{
a47dddd7
AF
3070 ARMCPU *cpu = arm_env_get_cpu(env);
3071
3072 if (mode != ARM_CPU_MODE_USR) {
3073 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
3074 }
b5ff1b31
FB
3075}
3076
0ecb72a5 3077void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
9ee6e8bb 3078{
a47dddd7
AF
3079 ARMCPU *cpu = arm_env_get_cpu(env);
3080
3081 cpu_abort(CPU(cpu), "banked r13 write\n");
9ee6e8bb
PB
3082}
3083
0ecb72a5 3084uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
9ee6e8bb 3085{
a47dddd7
AF
3086 ARMCPU *cpu = arm_env_get_cpu(env);
3087
3088 cpu_abort(CPU(cpu), "banked r13 read\n");
9ee6e8bb
PB
3089 return 0;
3090}
3091
b5ff1b31
FB
3092#else
3093
3094/* Map CPU modes onto saved register banks. */
494b00c7 3095int bank_number(int mode)
b5ff1b31
FB
3096{
3097 switch (mode) {
3098 case ARM_CPU_MODE_USR:
3099 case ARM_CPU_MODE_SYS:
3100 return 0;
3101 case ARM_CPU_MODE_SVC:
3102 return 1;
3103 case ARM_CPU_MODE_ABT:
3104 return 2;
3105 case ARM_CPU_MODE_UND:
3106 return 3;
3107 case ARM_CPU_MODE_IRQ:
3108 return 4;
3109 case ARM_CPU_MODE_FIQ:
3110 return 5;
28c9457d
EI
3111 case ARM_CPU_MODE_HYP:
3112 return 6;
3113 case ARM_CPU_MODE_MON:
3114 return 7;
b5ff1b31 3115 }
f5206413 3116 hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode);
b5ff1b31
FB
3117}
3118
0ecb72a5 3119void switch_mode(CPUARMState *env, int mode)
b5ff1b31
FB
3120{
3121 int old_mode;
3122 int i;
3123
3124 old_mode = env->uncached_cpsr & CPSR_M;
3125 if (mode == old_mode)
3126 return;
3127
3128 if (old_mode == ARM_CPU_MODE_FIQ) {
3129 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 3130 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
3131 } else if (mode == ARM_CPU_MODE_FIQ) {
3132 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 3133 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
3134 }
3135
f5206413 3136 i = bank_number(old_mode);
b5ff1b31
FB
3137 env->banked_r13[i] = env->regs[13];
3138 env->banked_r14[i] = env->regs[14];
3139 env->banked_spsr[i] = env->spsr;
3140
f5206413 3141 i = bank_number(mode);
b5ff1b31
FB
3142 env->regs[13] = env->banked_r13[i];
3143 env->regs[14] = env->banked_r14[i];
3144 env->spsr = env->banked_spsr[i];
3145}
3146
9ee6e8bb
PB
3147static void v7m_push(CPUARMState *env, uint32_t val)
3148{
70d74660
AF
3149 CPUState *cs = CPU(arm_env_get_cpu(env));
3150
9ee6e8bb 3151 env->regs[13] -= 4;
ab1da857 3152 stl_phys(cs->as, env->regs[13], val);
9ee6e8bb
PB
3153}
3154
3155static uint32_t v7m_pop(CPUARMState *env)
3156{
70d74660 3157 CPUState *cs = CPU(arm_env_get_cpu(env));
9ee6e8bb 3158 uint32_t val;
70d74660 3159
fdfba1a2 3160 val = ldl_phys(cs->as, env->regs[13]);
9ee6e8bb
PB
3161 env->regs[13] += 4;
3162 return val;
3163}
3164
3165/* Switch to V7M main or process stack pointer. */
3166static void switch_v7m_sp(CPUARMState *env, int process)
3167{
3168 uint32_t tmp;
3169 if (env->v7m.current_sp != process) {
3170 tmp = env->v7m.other_sp;
3171 env->v7m.other_sp = env->regs[13];
3172 env->regs[13] = tmp;
3173 env->v7m.current_sp = process;
3174 }
3175}
3176
3177static void do_v7m_exception_exit(CPUARMState *env)
3178{
3179 uint32_t type;
3180 uint32_t xpsr;
3181
3182 type = env->regs[15];
3183 if (env->v7m.exception != 0)
983fe826 3184 armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
9ee6e8bb
PB
3185
3186 /* Switch to the target stack. */
3187 switch_v7m_sp(env, (type & 4) != 0);
3188 /* Pop registers. */
3189 env->regs[0] = v7m_pop(env);
3190 env->regs[1] = v7m_pop(env);
3191 env->regs[2] = v7m_pop(env);
3192 env->regs[3] = v7m_pop(env);
3193 env->regs[12] = v7m_pop(env);
3194 env->regs[14] = v7m_pop(env);
3195 env->regs[15] = v7m_pop(env);
3196 xpsr = v7m_pop(env);
3197 xpsr_write(env, xpsr, 0xfffffdff);
3198 /* Undo stack alignment. */
3199 if (xpsr & 0x200)
3200 env->regs[13] |= 4;
3201 /* ??? The exception return type specifies Thread/Handler mode. However
3202 this is also implied by the xPSR value. Not sure what to do
3203 if there is a mismatch. */
3204 /* ??? Likewise for mismatches between the CONTROL register and the stack
3205 pointer. */
3206}
3207
e6f010cc 3208void arm_v7m_cpu_do_interrupt(CPUState *cs)
9ee6e8bb 3209{
e6f010cc
AF
3210 ARMCPU *cpu = ARM_CPU(cs);
3211 CPUARMState *env = &cpu->env;
9ee6e8bb
PB
3212 uint32_t xpsr = xpsr_read(env);
3213 uint32_t lr;
3214 uint32_t addr;
3215
27103424 3216 arm_log_exception(cs->exception_index);
3f1beaca 3217
9ee6e8bb
PB
3218 lr = 0xfffffff1;
3219 if (env->v7m.current_sp)
3220 lr |= 4;
3221 if (env->v7m.exception == 0)
3222 lr |= 8;
3223
3224 /* For exceptions we just mark as pending on the NVIC, and let that
3225 handle it. */
3226 /* TODO: Need to escalate if the current priority is higher than the
3227 one we're raising. */
27103424 3228 switch (cs->exception_index) {
9ee6e8bb 3229 case EXCP_UDEF:
983fe826 3230 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
9ee6e8bb
PB
3231 return;
3232 case EXCP_SWI:
314e2296 3233 /* The PC already points to the next instruction. */
983fe826 3234 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
9ee6e8bb
PB
3235 return;
3236 case EXCP_PREFETCH_ABORT:
3237 case EXCP_DATA_ABORT:
abf1172f
PM
3238 /* TODO: if we implemented the MPU registers, this is where we
3239 * should set the MMFAR, etc from exception.fsr and exception.vaddress.
3240 */
983fe826 3241 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
9ee6e8bb
PB
3242 return;
3243 case EXCP_BKPT:
2ad207d4
PB
3244 if (semihosting_enabled) {
3245 int nr;
d31dd73e 3246 nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
2ad207d4
PB
3247 if (nr == 0xab) {
3248 env->regs[15] += 2;
3249 env->regs[0] = do_arm_semihosting(env);
3f1beaca 3250 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
2ad207d4
PB
3251 return;
3252 }
3253 }
983fe826 3254 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
9ee6e8bb
PB
3255 return;
3256 case EXCP_IRQ:
983fe826 3257 env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
9ee6e8bb
PB
3258 break;
3259 case EXCP_EXCEPTION_EXIT:
3260 do_v7m_exception_exit(env);
3261 return;
3262 default:
a47dddd7 3263 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9ee6e8bb
PB
3264 return; /* Never happens. Keep compiler happy. */
3265 }
3266
3267 /* Align stack pointer. */
3268 /* ??? Should only do this if Configuration Control Register
3269 STACKALIGN bit is set. */
3270 if (env->regs[13] & 4) {
ab19b0ec 3271 env->regs[13] -= 4;
9ee6e8bb
PB
3272 xpsr |= 0x200;
3273 }
6c95676b 3274 /* Switch to the handler mode. */
9ee6e8bb
PB
3275 v7m_push(env, xpsr);
3276 v7m_push(env, env->regs[15]);
3277 v7m_push(env, env->regs[14]);
3278 v7m_push(env, env->regs[12]);
3279 v7m_push(env, env->regs[3]);
3280 v7m_push(env, env->regs[2]);
3281 v7m_push(env, env->regs[1]);
3282 v7m_push(env, env->regs[0]);
3283 switch_v7m_sp(env, 0);
c98d174c
PM
3284 /* Clear IT bits */
3285 env->condexec_bits = 0;
9ee6e8bb 3286 env->regs[14] = lr;
fdfba1a2 3287 addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4);
9ee6e8bb
PB
3288 env->regs[15] = addr & 0xfffffffe;
3289 env->thumb = addr & 1;
3290}
3291
b5ff1b31 3292/* Handle a CPU exception. */
97a8ea5a 3293void arm_cpu_do_interrupt(CPUState *cs)
b5ff1b31 3294{
97a8ea5a
AF
3295 ARMCPU *cpu = ARM_CPU(cs);
3296 CPUARMState *env = &cpu->env;
b5ff1b31
FB
3297 uint32_t addr;
3298 uint32_t mask;
3299 int new_mode;
3300 uint32_t offset;
3301
e6f010cc
AF
3302 assert(!IS_M(env));
3303
27103424 3304 arm_log_exception(cs->exception_index);
3f1beaca 3305
b5ff1b31 3306 /* TODO: Vectored interrupt controller. */
27103424 3307 switch (cs->exception_index) {
b5ff1b31
FB
3308 case EXCP_UDEF:
3309 new_mode = ARM_CPU_MODE_UND;
3310 addr = 0x04;
3311 mask = CPSR_I;
3312 if (env->thumb)
3313 offset = 2;
3314 else
3315 offset = 4;
3316 break;
3317 case EXCP_SWI:
8e71621f
PB
3318 if (semihosting_enabled) {
3319 /* Check for semihosting interrupt. */
3320 if (env->thumb) {
d31dd73e
BS
3321 mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code)
3322 & 0xff;
8e71621f 3323 } else {
d31dd73e 3324 mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code)
d8fd2954 3325 & 0xffffff;
8e71621f
PB
3326 }
3327 /* Only intercept calls from privileged modes, to provide some
3328 semblance of security. */
3329 if (((mask == 0x123456 && !env->thumb)
3330 || (mask == 0xab && env->thumb))
3331 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
3332 env->regs[0] = do_arm_semihosting(env);
3f1beaca 3333 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
8e71621f
PB
3334 return;
3335 }
3336 }
b5ff1b31
FB
3337 new_mode = ARM_CPU_MODE_SVC;
3338 addr = 0x08;
3339 mask = CPSR_I;
601d70b9 3340 /* The PC already points to the next instruction. */
b5ff1b31
FB
3341 offset = 0;
3342 break;
06c949e6 3343 case EXCP_BKPT:
9ee6e8bb 3344 /* See if this is a semihosting syscall. */
2ad207d4 3345 if (env->thumb && semihosting_enabled) {
d31dd73e 3346 mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
9ee6e8bb
PB
3347 if (mask == 0xab
3348 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
3349 env->regs[15] += 2;
3350 env->regs[0] = do_arm_semihosting(env);
3f1beaca 3351 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
9ee6e8bb
PB
3352 return;
3353 }
3354 }
abf1172f 3355 env->exception.fsr = 2;
9ee6e8bb
PB
3356 /* Fall through to prefetch abort. */
3357 case EXCP_PREFETCH_ABORT:
6cd8a264
RH
3358 env->cp15.ifsr_el2 = env->exception.fsr;
3359 env->cp15.far_el1 = deposit64(env->cp15.far_el1, 32, 32,
3360 env->exception.vaddress);
3f1beaca 3361 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
6cd8a264 3362 env->cp15.ifsr_el2, (uint32_t)env->exception.vaddress);
b5ff1b31
FB
3363 new_mode = ARM_CPU_MODE_ABT;
3364 addr = 0x0c;
3365 mask = CPSR_A | CPSR_I;
3366 offset = 4;
3367 break;
3368 case EXCP_DATA_ABORT:
d81c519c 3369 env->cp15.esr_el[1] = env->exception.fsr;
6cd8a264
RH
3370 env->cp15.far_el1 = deposit64(env->cp15.far_el1, 0, 32,
3371 env->exception.vaddress);
3f1beaca 3372 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
d81c519c 3373 (uint32_t)env->cp15.esr_el[1],
6cd8a264 3374 (uint32_t)env->exception.vaddress);
b5ff1b31
FB
3375 new_mode = ARM_CPU_MODE_ABT;
3376 addr = 0x10;
3377 mask = CPSR_A | CPSR_I;
3378 offset = 8;
3379 break;
3380 case EXCP_IRQ:
3381 new_mode = ARM_CPU_MODE_IRQ;
3382 addr = 0x18;
3383 /* Disable IRQ and imprecise data aborts. */
3384 mask = CPSR_A | CPSR_I;
3385 offset = 4;
3386 break;
3387 case EXCP_FIQ:
3388 new_mode = ARM_CPU_MODE_FIQ;
3389 addr = 0x1c;
3390 /* Disable FIQ, IRQ and imprecise data aborts. */
3391 mask = CPSR_A | CPSR_I | CPSR_F;
3392 offset = 4;
3393 break;
3394 default:
a47dddd7 3395 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
b5ff1b31
FB
3396 return; /* Never happens. Keep compiler happy. */
3397 }
3398 /* High vectors. */
76e3e1bc 3399 if (env->cp15.c1_sys & SCTLR_V) {
8641136c 3400 /* when enabled, base address cannot be remapped. */
b5ff1b31 3401 addr += 0xffff0000;
8641136c
NR
3402 } else {
3403 /* ARM v7 architectures provide a vector base address register to remap
3404 * the interrupt vector table.
3405 * This register is only followed in non-monitor mode, and has a secure
3406 * and un-secure copy. Since the cpu is always in a un-secure operation
3407 * and is never in monitor mode this feature is always active.
3408 * Note: only bits 31:5 are valid.
3409 */
68fdb6c5 3410 addr += env->cp15.vbar_el[1];
b5ff1b31
FB
3411 }
3412 switch_mode (env, new_mode);
3413 env->spsr = cpsr_read(env);
9ee6e8bb
PB
3414 /* Clear IT bits. */
3415 env->condexec_bits = 0;
30a8cac1 3416 /* Switch to the new mode, and to the correct instruction set. */
6d7e6326 3417 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
4cc35614 3418 env->daif |= mask;
be5e7a76
DES
3419 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
3420 * and we should just guard the thumb mode on V4 */
3421 if (arm_feature(env, ARM_FEATURE_V4T)) {
76e3e1bc 3422 env->thumb = (env->cp15.c1_sys & SCTLR_TE) != 0;
be5e7a76 3423 }
b5ff1b31
FB
3424 env->regs[14] = env->regs[15] + offset;
3425 env->regs[15] = addr;
259186a7 3426 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
b5ff1b31
FB
3427}
3428
3429/* Check section/page access permissions.
3430 Returns the page protection flags, or zero if the access is not
3431 permitted. */
0ecb72a5 3432static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
dd4ebc2e 3433 int access_type, int is_user)
b5ff1b31 3434{
9ee6e8bb
PB
3435 int prot_ro;
3436
dd4ebc2e 3437 if (domain_prot == 3) {
b5ff1b31 3438 return PAGE_READ | PAGE_WRITE;
dd4ebc2e 3439 }
b5ff1b31 3440
9ee6e8bb
PB
3441 if (access_type == 1)
3442 prot_ro = 0;
3443 else
3444 prot_ro = PAGE_READ;
3445
b5ff1b31
FB
3446 switch (ap) {
3447 case 0:
99f678a6
PM
3448 if (arm_feature(env, ARM_FEATURE_V7)) {
3449 return 0;
3450 }
78600320 3451 if (access_type == 1)
b5ff1b31 3452 return 0;
76e3e1bc
PM
3453 switch (env->cp15.c1_sys & (SCTLR_S | SCTLR_R)) {
3454 case SCTLR_S:
b5ff1b31 3455 return is_user ? 0 : PAGE_READ;
76e3e1bc 3456 case SCTLR_R:
b5ff1b31
FB
3457 return PAGE_READ;
3458 default:
3459 return 0;
3460 }
3461 case 1:
3462 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
3463 case 2:
3464 if (is_user)
9ee6e8bb 3465 return prot_ro;
b5ff1b31
FB
3466 else
3467 return PAGE_READ | PAGE_WRITE;
3468 case 3:
3469 return PAGE_READ | PAGE_WRITE;
d4934d18 3470 case 4: /* Reserved. */
9ee6e8bb
PB
3471 return 0;
3472 case 5:
3473 return is_user ? 0 : prot_ro;
3474 case 6:
3475 return prot_ro;
d4934d18 3476 case 7:
0ab06d83 3477 if (!arm_feature (env, ARM_FEATURE_V6K))
d4934d18
PB
3478 return 0;
3479 return prot_ro;
b5ff1b31
FB
3480 default:
3481 abort();
3482 }
3483}
3484
0ecb72a5 3485static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
b2fa1797
PB
3486{
3487 uint32_t table;
3488
3489 if (address & env->cp15.c2_mask)
327ed10f 3490 table = env->cp15.ttbr1_el1 & 0xffffc000;
b2fa1797 3491 else
327ed10f 3492 table = env->cp15.ttbr0_el1 & env->cp15.c2_base_mask;
b2fa1797
PB
3493
3494 table |= (address >> 18) & 0x3ffc;
3495 return table;
3496}
3497
0ecb72a5 3498static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
a8170e5e 3499 int is_user, hwaddr *phys_ptr,
77a71dd1 3500 int *prot, target_ulong *page_size)
b5ff1b31 3501{
70d74660 3502 CPUState *cs = CPU(arm_env_get_cpu(env));
b5ff1b31
FB
3503 int code;
3504 uint32_t table;
3505 uint32_t desc;
3506 int type;
3507 int ap;
3508 int domain;
dd4ebc2e 3509 int domain_prot;
a8170e5e 3510 hwaddr phys_addr;
b5ff1b31 3511
9ee6e8bb
PB
3512 /* Pagetable walk. */
3513 /* Lookup l1 descriptor. */
b2fa1797 3514 table = get_level1_table_address(env, address);
fdfba1a2 3515 desc = ldl_phys(cs->as, table);
9ee6e8bb 3516 type = (desc & 3);
dd4ebc2e
JCD
3517 domain = (desc >> 5) & 0x0f;
3518 domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
9ee6e8bb 3519 if (type == 0) {
601d70b9 3520 /* Section translation fault. */
9ee6e8bb
PB
3521 code = 5;
3522 goto do_fault;
3523 }
dd4ebc2e 3524 if (domain_prot == 0 || domain_prot == 2) {
9ee6e8bb
PB
3525 if (type == 2)
3526 code = 9; /* Section domain fault. */
3527 else
3528 code = 11; /* Page domain fault. */
3529 goto do_fault;
3530 }
3531 if (type == 2) {
3532 /* 1Mb section. */
3533 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
3534 ap = (desc >> 10) & 3;
3535 code = 13;
d4c430a8 3536 *page_size = 1024 * 1024;
9ee6e8bb
PB
3537 } else {
3538 /* Lookup l2 entry. */
3539 if (type == 1) {
3540 /* Coarse pagetable. */
3541 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
3542 } else {
3543 /* Fine pagetable. */
3544 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
3545 }
fdfba1a2 3546 desc = ldl_phys(cs->as, table);
9ee6e8bb
PB
3547 switch (desc & 3) {
3548 case 0: /* Page translation fault. */
3549 code = 7;
3550 goto do_fault;
3551 case 1: /* 64k page. */
3552 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
3553 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
d4c430a8 3554 *page_size = 0x10000;
ce819861 3555 break;
9ee6e8bb
PB
3556 case 2: /* 4k page. */
3557 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
c10f7fc3 3558 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
d4c430a8 3559 *page_size = 0x1000;
ce819861 3560 break;
9ee6e8bb
PB
3561 case 3: /* 1k page. */
3562 if (type == 1) {
3563 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
3564 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
3565 } else {
3566 /* Page translation fault. */
3567 code = 7;
3568 goto do_fault;
3569 }
3570 } else {
3571 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
3572 }
3573 ap = (desc >> 4) & 3;
d4c430a8 3574 *page_size = 0x400;
ce819861
PB
3575 break;
3576 default:
9ee6e8bb
PB
3577 /* Never happens, but compiler isn't smart enough to tell. */
3578 abort();
ce819861 3579 }
9ee6e8bb
PB
3580 code = 15;
3581 }
dd4ebc2e 3582 *prot = check_ap(env, ap, domain_prot, access_type, is_user);
9ee6e8bb
PB
3583 if (!*prot) {
3584 /* Access permission fault. */
3585 goto do_fault;
3586 }
3ad493fc 3587 *prot |= PAGE_EXEC;
9ee6e8bb
PB
3588 *phys_ptr = phys_addr;
3589 return 0;
3590do_fault:
3591 return code | (domain << 4);
3592}
3593
0ecb72a5 3594static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
a8170e5e 3595 int is_user, hwaddr *phys_ptr,
77a71dd1 3596 int *prot, target_ulong *page_size)
9ee6e8bb 3597{
70d74660 3598 CPUState *cs = CPU(arm_env_get_cpu(env));
9ee6e8bb
PB
3599 int code;
3600 uint32_t table;
3601 uint32_t desc;
3602 uint32_t xn;
de9b05b8 3603 uint32_t pxn = 0;
9ee6e8bb
PB
3604 int type;
3605 int ap;
de9b05b8 3606 int domain = 0;
dd4ebc2e 3607 int domain_prot;
a8170e5e 3608 hwaddr phys_addr;
9ee6e8bb
PB
3609
3610 /* Pagetable walk. */
3611 /* Lookup l1 descriptor. */
b2fa1797 3612 table = get_level1_table_address(env, address);
fdfba1a2 3613 desc = ldl_phys(cs->as, table);
9ee6e8bb 3614 type = (desc & 3);
de9b05b8
PM
3615 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
3616 /* Section translation fault, or attempt to use the encoding
3617 * which is Reserved on implementations without PXN.
3618 */
9ee6e8bb 3619 code = 5;
9ee6e8bb 3620 goto do_fault;
de9b05b8
PM
3621 }
3622 if ((type == 1) || !(desc & (1 << 18))) {
3623 /* Page or Section. */
dd4ebc2e 3624 domain = (desc >> 5) & 0x0f;
9ee6e8bb 3625 }
dd4ebc2e
JCD
3626 domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
3627 if (domain_prot == 0 || domain_prot == 2) {
de9b05b8 3628 if (type != 1) {
9ee6e8bb 3629 code = 9; /* Section domain fault. */
de9b05b8 3630 } else {
9ee6e8bb 3631 code = 11; /* Page domain fault. */
de9b05b8 3632 }
9ee6e8bb
PB
3633 goto do_fault;
3634 }
de9b05b8 3635 if (type != 1) {
9ee6e8bb
PB
3636 if (desc & (1 << 18)) {
3637 /* Supersection. */
3638 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
d4c430a8 3639 *page_size = 0x1000000;
b5ff1b31 3640 } else {
9ee6e8bb
PB
3641 /* Section. */
3642 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
d4c430a8 3643 *page_size = 0x100000;
b5ff1b31 3644 }
9ee6e8bb
PB
3645 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
3646 xn = desc & (1 << 4);
de9b05b8 3647 pxn = desc & 1;
9ee6e8bb
PB
3648 code = 13;
3649 } else {
de9b05b8
PM
3650 if (arm_feature(env, ARM_FEATURE_PXN)) {
3651 pxn = (desc >> 2) & 1;
3652 }
9ee6e8bb
PB
3653 /* Lookup l2 entry. */
3654 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
fdfba1a2 3655 desc = ldl_phys(cs->as, table);
9ee6e8bb
PB
3656 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
3657 switch (desc & 3) {
3658 case 0: /* Page translation fault. */
3659 code = 7;
b5ff1b31 3660 goto do_fault;
9ee6e8bb
PB
3661 case 1: /* 64k page. */
3662 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
3663 xn = desc & (1 << 15);
d4c430a8 3664 *page_size = 0x10000;
9ee6e8bb
PB
3665 break;
3666 case 2: case 3: /* 4k page. */
3667 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
3668 xn = desc & 1;
d4c430a8 3669 *page_size = 0x1000;
9ee6e8bb
PB
3670 break;
3671 default:
3672 /* Never happens, but compiler isn't smart enough to tell. */
3673 abort();
b5ff1b31 3674 }
9ee6e8bb
PB
3675 code = 15;
3676 }
dd4ebc2e 3677 if (domain_prot == 3) {
c0034328
JR
3678 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3679 } else {
de9b05b8
PM
3680 if (pxn && !is_user) {
3681 xn = 1;
3682 }
c0034328
JR
3683 if (xn && access_type == 2)
3684 goto do_fault;
9ee6e8bb 3685
c0034328 3686 /* The simplified model uses AP[0] as an access control bit. */
76e3e1bc 3687 if ((env->cp15.c1_sys & SCTLR_AFE) && (ap & 1) == 0) {
c0034328
JR
3688 /* Access flag fault. */
3689 code = (code == 15) ? 6 : 3;
3690 goto do_fault;
3691 }
dd4ebc2e 3692 *prot = check_ap(env, ap, domain_prot, access_type, is_user);
c0034328
JR
3693 if (!*prot) {
3694 /* Access permission fault. */
3695 goto do_fault;
3696 }
3697 if (!xn) {
3698 *prot |= PAGE_EXEC;
3699 }
3ad493fc 3700 }
9ee6e8bb 3701 *phys_ptr = phys_addr;
b5ff1b31
FB
3702 return 0;
3703do_fault:
3704 return code | (domain << 4);
3705}
3706
3dde962f
PM
3707/* Fault type for long-descriptor MMU fault reporting; this corresponds
3708 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
3709 */
3710typedef enum {
3711 translation_fault = 1,
3712 access_fault = 2,
3713 permission_fault = 3,
3714} MMUFaultType;
3715
2c8dd318 3716static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
3dde962f 3717 int access_type, int is_user,
a8170e5e 3718 hwaddr *phys_ptr, int *prot,
3dde962f
PM
3719 target_ulong *page_size_ptr)
3720{
70d74660 3721 CPUState *cs = CPU(arm_env_get_cpu(env));
3dde962f
PM
3722 /* Read an LPAE long-descriptor translation table. */
3723 MMUFaultType fault_type = translation_fault;
3724 uint32_t level = 1;
3725 uint32_t epd;
2c8dd318
RH
3726 int32_t tsz;
3727 uint32_t tg;
3dde962f
PM
3728 uint64_t ttbr;
3729 int ttbr_select;
2c8dd318 3730 hwaddr descaddr, descmask;
3dde962f
PM
3731 uint32_t tableattrs;
3732 target_ulong page_size;
3733 uint32_t attrs;
2c8dd318
RH
3734 int32_t granule_sz = 9;
3735 int32_t va_size = 32;
3736 int32_t tbi = 0;
3737
3738 if (arm_el_is_aa64(env, 1)) {
3739 va_size = 64;
3740 if (extract64(address, 55, 1))
3741 tbi = extract64(env->cp15.c2_control, 38, 1);
3742 else
3743 tbi = extract64(env->cp15.c2_control, 37, 1);
3744 tbi *= 8;
3745 }
3dde962f
PM
3746
3747 /* Determine whether this address is in the region controlled by
3748 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
3749 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
3750 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
3751 */
2c8dd318
RH
3752 uint32_t t0sz = extract32(env->cp15.c2_control, 0, 6);
3753 if (arm_el_is_aa64(env, 1)) {
3754 t0sz = MIN(t0sz, 39);
3755 t0sz = MAX(t0sz, 16);
3756 }
3757 uint32_t t1sz = extract32(env->cp15.c2_control, 16, 6);
3758 if (arm_el_is_aa64(env, 1)) {
3759 t1sz = MIN(t1sz, 39);
3760 t1sz = MAX(t1sz, 16);
3761 }
3762 if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) {
3dde962f
PM
3763 /* there is a ttbr0 region and we are in it (high bits all zero) */
3764 ttbr_select = 0;
2c8dd318 3765 } else if (t1sz && !extract64(~address, va_size - t1sz, t1sz - tbi)) {
3dde962f
PM
3766 /* there is a ttbr1 region and we are in it (high bits all one) */
3767 ttbr_select = 1;
3768 } else if (!t0sz) {
3769 /* ttbr0 region is "everything not in the ttbr1 region" */
3770 ttbr_select = 0;
3771 } else if (!t1sz) {
3772 /* ttbr1 region is "everything not in the ttbr0 region" */
3773 ttbr_select = 1;
3774 } else {
3775 /* in the gap between the two regions, this is a Translation fault */
3776 fault_type = translation_fault;
3777 goto do_fault;
3778 }
3779
3780 /* Note that QEMU ignores shareability and cacheability attributes,
3781 * so we don't need to do anything with the SH, ORGN, IRGN fields
3782 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
3783 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
3784 * implement any ASID-like capability so we can ignore it (instead
3785 * we will always flush the TLB any time the ASID is changed).
3786 */
3787 if (ttbr_select == 0) {
327ed10f 3788 ttbr = env->cp15.ttbr0_el1;
3dde962f
PM
3789 epd = extract32(env->cp15.c2_control, 7, 1);
3790 tsz = t0sz;
2c8dd318
RH
3791
3792 tg = extract32(env->cp15.c2_control, 14, 2);
3793 if (tg == 1) { /* 64KB pages */
3794 granule_sz = 13;
3795 }
3796 if (tg == 2) { /* 16KB pages */
3797 granule_sz = 11;
3798 }
3dde962f 3799 } else {
327ed10f 3800 ttbr = env->cp15.ttbr1_el1;
3dde962f
PM
3801 epd = extract32(env->cp15.c2_control, 23, 1);
3802 tsz = t1sz;
2c8dd318
RH
3803
3804 tg = extract32(env->cp15.c2_control, 30, 2);
3805 if (tg == 3) { /* 64KB pages */
3806 granule_sz = 13;
3807 }
3808 if (tg == 1) { /* 16KB pages */
3809 granule_sz = 11;
3810 }
3dde962f
PM
3811 }
3812
3813 if (epd) {
3814 /* Translation table walk disabled => Translation fault on TLB miss */
3815 goto do_fault;
3816 }
3817
2c8dd318
RH
3818 /* The starting level depends on the virtual address size which can be
3819 * up to 48-bits and the translation granule size.
3dde962f 3820 */
2c8dd318
RH
3821 if ((va_size - tsz) > (granule_sz * 4 + 3)) {
3822 level = 0;
3823 } else if ((va_size - tsz) > (granule_sz * 3 + 3)) {
3824 level = 1;
3dde962f 3825 } else {
2c8dd318 3826 level = 2;
3dde962f
PM
3827 }
3828
3829 /* Clear the vaddr bits which aren't part of the within-region address,
3830 * so that we don't have to special case things when calculating the
3831 * first descriptor address.
3832 */
2c8dd318
RH
3833 if (tsz) {
3834 address &= (1ULL << (va_size - tsz)) - 1;
3835 }
3836
3837 descmask = (1ULL << (granule_sz + 3)) - 1;
3dde962f
PM
3838
3839 /* Now we can extract the actual base address from the TTBR */
2c8dd318
RH
3840 descaddr = extract64(ttbr, 0, 48);
3841 descaddr &= ~((1ULL << (va_size - tsz - (granule_sz * (4 - level)))) - 1);
3dde962f
PM
3842
3843 tableattrs = 0;
3844 for (;;) {
3845 uint64_t descriptor;
3846
2c8dd318
RH
3847 descaddr |= (address >> (granule_sz * (4 - level))) & descmask;
3848 descaddr &= ~7ULL;
2c17449b 3849 descriptor = ldq_phys(cs->as, descaddr);
3dde962f
PM
3850 if (!(descriptor & 1) ||
3851 (!(descriptor & 2) && (level == 3))) {
3852 /* Invalid, or the Reserved level 3 encoding */
3853 goto do_fault;
3854 }
3855 descaddr = descriptor & 0xfffffff000ULL;
3856
3857 if ((descriptor & 2) && (level < 3)) {
3858 /* Table entry. The top five bits are attributes which may
3859 * propagate down through lower levels of the table (and
3860 * which are all arranged so that 0 means "no effect", so
3861 * we can gather them up by ORing in the bits at each level).
3862 */
3863 tableattrs |= extract64(descriptor, 59, 5);
3864 level++;
3865 continue;
3866 }
3867 /* Block entry at level 1 or 2, or page entry at level 3.
3868 * These are basically the same thing, although the number
3869 * of bits we pull in from the vaddr varies.
3870 */
2c8dd318 3871 page_size = (1 << ((granule_sz * (4 - level)) + 3));
3dde962f
PM
3872 descaddr |= (address & (page_size - 1));
3873 /* Extract attributes from the descriptor and merge with table attrs */
2c8dd318
RH
3874 if (arm_feature(env, ARM_FEATURE_V8)) {
3875 attrs = extract64(descriptor, 2, 10)
3876 | (extract64(descriptor, 53, 11) << 10);
3877 } else {
3878 attrs = extract64(descriptor, 2, 10)
3879 | (extract64(descriptor, 52, 12) << 10);
3880 }
3dde962f
PM
3881 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
3882 attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
3883 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
3884 * means "force PL1 access only", which means forcing AP[1] to 0.
3885 */
3886 if (extract32(tableattrs, 2, 1)) {
3887 attrs &= ~(1 << 4);
3888 }
3889 /* Since we're always in the Non-secure state, NSTable is ignored. */
3890 break;
3891 }
3892 /* Here descaddr is the final physical address, and attributes
3893 * are all in attrs.
3894 */
3895 fault_type = access_fault;
3896 if ((attrs & (1 << 8)) == 0) {
3897 /* Access flag */
3898 goto do_fault;
3899 }
3900 fault_type = permission_fault;
3901 if (is_user && !(attrs & (1 << 4))) {
3902 /* Unprivileged access not enabled */
3903 goto do_fault;
3904 }
3905 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3906 if (attrs & (1 << 12) || (!is_user && (attrs & (1 << 11)))) {
3907 /* XN or PXN */
3908 if (access_type == 2) {
3909 goto do_fault;
3910 }
3911 *prot &= ~PAGE_EXEC;
3912 }
3913 if (attrs & (1 << 5)) {
3914 /* Write access forbidden */
3915 if (access_type == 1) {
3916 goto do_fault;
3917 }
3918 *prot &= ~PAGE_WRITE;
3919 }
3920
3921 *phys_ptr = descaddr;
3922 *page_size_ptr = page_size;
3923 return 0;
3924
3925do_fault:
3926 /* Long-descriptor format IFSR/DFSR value */
3927 return (1 << 9) | (fault_type << 2) | level;
3928}
3929
77a71dd1
PM
3930static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
3931 int access_type, int is_user,
a8170e5e 3932 hwaddr *phys_ptr, int *prot)
9ee6e8bb
PB
3933{
3934 int n;
3935 uint32_t mask;
3936 uint32_t base;
3937
3938 *phys_ptr = address;
3939 for (n = 7; n >= 0; n--) {
3940 base = env->cp15.c6_region[n];
3941 if ((base & 1) == 0)
3942 continue;
3943 mask = 1 << ((base >> 1) & 0x1f);
3944 /* Keep this shift separate from the above to avoid an
3945 (undefined) << 32. */
3946 mask = (mask << 1) - 1;
3947 if (((base ^ address) & ~mask) == 0)
3948 break;
3949 }
3950 if (n < 0)
3951 return 2;
3952
3953 if (access_type == 2) {
7e09797c 3954 mask = env->cp15.pmsav5_insn_ap;
9ee6e8bb 3955 } else {
7e09797c 3956 mask = env->cp15.pmsav5_data_ap;
9ee6e8bb
PB
3957 }
3958 mask = (mask >> (n * 4)) & 0xf;
3959 switch (mask) {
3960 case 0:
3961 return 1;
3962 case 1:
3963 if (is_user)
3964 return 1;
3965 *prot = PAGE_READ | PAGE_WRITE;
3966 break;
3967 case 2:
3968 *prot = PAGE_READ;
3969 if (!is_user)
3970 *prot |= PAGE_WRITE;
3971 break;
3972 case 3:
3973 *prot = PAGE_READ | PAGE_WRITE;
3974 break;
3975 case 5:
3976 if (is_user)
3977 return 1;
3978 *prot = PAGE_READ;
3979 break;
3980 case 6:
3981 *prot = PAGE_READ;
3982 break;
3983 default:
3984 /* Bad permission. */
3985 return 1;
3986 }
3ad493fc 3987 *prot |= PAGE_EXEC;
9ee6e8bb
PB
3988 return 0;
3989}
3990
702a9357
PM
3991/* get_phys_addr - get the physical address for this virtual address
3992 *
3993 * Find the physical address corresponding to the given virtual address,
3994 * by doing a translation table walk on MMU based systems or using the
3995 * MPU state on MPU based systems.
3996 *
3997 * Returns 0 if the translation was successful. Otherwise, phys_ptr,
3998 * prot and page_size are not filled in, and the return value provides
3999 * information on why the translation aborted, in the format of a
4000 * DFSR/IFSR fault register, with the following caveats:
4001 * * we honour the short vs long DFSR format differences.
4002 * * the WnR bit is never set (the caller must do this).
4003 * * for MPU based systems we don't bother to return a full FSR format
4004 * value.
4005 *
4006 * @env: CPUARMState
4007 * @address: virtual address to get physical address for
4008 * @access_type: 0 for read, 1 for write, 2 for execute
4009 * @is_user: 0 for privileged access, 1 for user
4010 * @phys_ptr: set to the physical address corresponding to the virtual address
4011 * @prot: set to the permissions for the page containing phys_ptr
4012 * @page_size: set to the size of the page containing phys_ptr
4013 */
2c8dd318 4014static inline int get_phys_addr(CPUARMState *env, target_ulong address,
9ee6e8bb 4015 int access_type, int is_user,
a8170e5e 4016 hwaddr *phys_ptr, int *prot,
d4c430a8 4017 target_ulong *page_size)
9ee6e8bb
PB
4018{
4019 /* Fast Context Switch Extension. */
4020 if (address < 0x02000000)
4021 address += env->cp15.c13_fcse;
4022
76e3e1bc 4023 if ((env->cp15.c1_sys & SCTLR_M) == 0) {
9ee6e8bb
PB
4024 /* MMU/MPU disabled. */
4025 *phys_ptr = address;
3ad493fc 4026 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
d4c430a8 4027 *page_size = TARGET_PAGE_SIZE;
9ee6e8bb
PB
4028 return 0;
4029 } else if (arm_feature(env, ARM_FEATURE_MPU)) {
d4c430a8 4030 *page_size = TARGET_PAGE_SIZE;
9ee6e8bb
PB
4031 return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
4032 prot);
3dde962f
PM
4033 } else if (extended_addresses_enabled(env)) {
4034 return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr,
4035 prot, page_size);
76e3e1bc 4036 } else if (env->cp15.c1_sys & SCTLR_XP) {
9ee6e8bb 4037 return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
d4c430a8 4038 prot, page_size);
9ee6e8bb
PB
4039 } else {
4040 return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
d4c430a8 4041 prot, page_size);
9ee6e8bb
PB
4042 }
4043}
4044
7510454e
AF
4045int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
4046 int access_type, int mmu_idx)
b5ff1b31 4047{
7510454e
AF
4048 ARMCPU *cpu = ARM_CPU(cs);
4049 CPUARMState *env = &cpu->env;
a8170e5e 4050 hwaddr phys_addr;
d4c430a8 4051 target_ulong page_size;
b5ff1b31 4052 int prot;
6ebbf390 4053 int ret, is_user;
00892383
RH
4054 uint32_t syn;
4055 bool same_el = (arm_current_pl(env) != 0);
b5ff1b31 4056
6ebbf390 4057 is_user = mmu_idx == MMU_USER_IDX;
d4c430a8
PB
4058 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
4059 &page_size);
b5ff1b31
FB
4060 if (ret == 0) {
4061 /* Map a single [sub]page. */
a8170e5e 4062 phys_addr &= ~(hwaddr)0x3ff;
2c8dd318 4063 address &= ~(target_ulong)0x3ff;
0c591eb0 4064 tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
d4c430a8 4065 return 0;
b5ff1b31
FB
4066 }
4067
00892383
RH
4068 /* AArch64 syndrome does not have an LPAE bit */
4069 syn = ret & ~(1 << 9);
4070
4071 /* For insn and data aborts we assume there is no instruction syndrome
4072 * information; this is always true for exceptions reported to EL1.
4073 */
b5ff1b31 4074 if (access_type == 2) {
00892383 4075 syn = syn_insn_abort(same_el, 0, 0, syn);
27103424 4076 cs->exception_index = EXCP_PREFETCH_ABORT;
b5ff1b31 4077 } else {
00892383 4078 syn = syn_data_abort(same_el, 0, 0, 0, access_type == 1, syn);
abf1172f
PM
4079 if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) {
4080 ret |= (1 << 11);
4081 }
27103424 4082 cs->exception_index = EXCP_DATA_ABORT;
b5ff1b31 4083 }
00892383
RH
4084
4085 env->exception.syndrome = syn;
abf1172f
PM
4086 env->exception.vaddress = address;
4087 env->exception.fsr = ret;
b5ff1b31
FB
4088 return 1;
4089}
4090
00b941e5 4091hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
b5ff1b31 4092{
00b941e5 4093 ARMCPU *cpu = ARM_CPU(cs);
a8170e5e 4094 hwaddr phys_addr;
d4c430a8 4095 target_ulong page_size;
b5ff1b31
FB
4096 int prot;
4097 int ret;
4098
00b941e5 4099 ret = get_phys_addr(&cpu->env, addr, 0, 0, &phys_addr, &prot, &page_size);
b5ff1b31 4100
00b941e5 4101 if (ret != 0) {
b5ff1b31 4102 return -1;
00b941e5 4103 }
b5ff1b31
FB
4104
4105 return phys_addr;
4106}
4107
0ecb72a5 4108void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
9ee6e8bb 4109{
39ea3d4e
PM
4110 if ((env->uncached_cpsr & CPSR_M) == mode) {
4111 env->regs[13] = val;
4112 } else {
f5206413 4113 env->banked_r13[bank_number(mode)] = val;
39ea3d4e 4114 }
9ee6e8bb
PB
4115}
4116
0ecb72a5 4117uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
9ee6e8bb 4118{
39ea3d4e
PM
4119 if ((env->uncached_cpsr & CPSR_M) == mode) {
4120 return env->regs[13];
4121 } else {
f5206413 4122 return env->banked_r13[bank_number(mode)];
39ea3d4e 4123 }
9ee6e8bb
PB
4124}
4125
0ecb72a5 4126uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
9ee6e8bb 4127{
a47dddd7
AF
4128 ARMCPU *cpu = arm_env_get_cpu(env);
4129
9ee6e8bb
PB
4130 switch (reg) {
4131 case 0: /* APSR */
4132 return xpsr_read(env) & 0xf8000000;
4133 case 1: /* IAPSR */
4134 return xpsr_read(env) & 0xf80001ff;
4135 case 2: /* EAPSR */
4136 return xpsr_read(env) & 0xff00fc00;
4137 case 3: /* xPSR */
4138 return xpsr_read(env) & 0xff00fdff;
4139 case 5: /* IPSR */
4140 return xpsr_read(env) & 0x000001ff;
4141 case 6: /* EPSR */
4142 return xpsr_read(env) & 0x0700fc00;
4143 case 7: /* IEPSR */
4144 return xpsr_read(env) & 0x0700edff;
4145 case 8: /* MSP */
4146 return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
4147 case 9: /* PSP */
4148 return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
4149 case 16: /* PRIMASK */
4cc35614 4150 return (env->daif & PSTATE_I) != 0;
82845826
SH
4151 case 17: /* BASEPRI */
4152 case 18: /* BASEPRI_MAX */
9ee6e8bb 4153 return env->v7m.basepri;
82845826 4154 case 19: /* FAULTMASK */
4cc35614 4155 return (env->daif & PSTATE_F) != 0;
9ee6e8bb
PB
4156 case 20: /* CONTROL */
4157 return env->v7m.control;
4158 default:
4159 /* ??? For debugging only. */
a47dddd7 4160 cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg);
9ee6e8bb
PB
4161 return 0;
4162 }
4163}
4164
0ecb72a5 4165void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
9ee6e8bb 4166{
a47dddd7
AF
4167 ARMCPU *cpu = arm_env_get_cpu(env);
4168
9ee6e8bb
PB
4169 switch (reg) {
4170 case 0: /* APSR */
4171 xpsr_write(env, val, 0xf8000000);
4172 break;
4173 case 1: /* IAPSR */
4174 xpsr_write(env, val, 0xf8000000);
4175 break;
4176 case 2: /* EAPSR */
4177 xpsr_write(env, val, 0xfe00fc00);
4178 break;
4179 case 3: /* xPSR */
4180 xpsr_write(env, val, 0xfe00fc00);
4181 break;
4182 case 5: /* IPSR */
4183 /* IPSR bits are readonly. */
4184 break;
4185 case 6: /* EPSR */
4186 xpsr_write(env, val, 0x0600fc00);
4187 break;
4188 case 7: /* IEPSR */
4189 xpsr_write(env, val, 0x0600fc00);
4190 break;
4191 case 8: /* MSP */
4192 if (env->v7m.current_sp)
4193 env->v7m.other_sp = val;
4194 else
4195 env->regs[13] = val;
4196 break;
4197 case 9: /* PSP */
4198 if (env->v7m.current_sp)
4199 env->regs[13] = val;
4200 else
4201 env->v7m.other_sp = val;
4202 break;
4203 case 16: /* PRIMASK */
4cc35614
PM
4204 if (val & 1) {
4205 env->daif |= PSTATE_I;
4206 } else {
4207 env->daif &= ~PSTATE_I;
4208 }
9ee6e8bb 4209 break;
82845826 4210 case 17: /* BASEPRI */
9ee6e8bb
PB
4211 env->v7m.basepri = val & 0xff;
4212 break;
82845826 4213 case 18: /* BASEPRI_MAX */
9ee6e8bb
PB
4214 val &= 0xff;
4215 if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
4216 env->v7m.basepri = val;
4217 break;
82845826 4218 case 19: /* FAULTMASK */
4cc35614
PM
4219 if (val & 1) {
4220 env->daif |= PSTATE_F;
4221 } else {
4222 env->daif &= ~PSTATE_F;
4223 }
82845826 4224 break;
9ee6e8bb
PB
4225 case 20: /* CONTROL */
4226 env->v7m.control = val & 3;
4227 switch_v7m_sp(env, (val & 2) != 0);
4228 break;
4229 default:
4230 /* ??? For debugging only. */
a47dddd7 4231 cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg);
9ee6e8bb
PB
4232 return;
4233 }
4234}
4235
b5ff1b31 4236#endif
6ddbc6e4 4237
aca3f40b
PM
4238void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
4239{
4240 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
4241 * Note that we do not implement the (architecturally mandated)
4242 * alignment fault for attempts to use this on Device memory
4243 * (which matches the usual QEMU behaviour of not implementing either
4244 * alignment faults or any memory attribute handling).
4245 */
4246
4247 ARMCPU *cpu = arm_env_get_cpu(env);
4248 uint64_t blocklen = 4 << cpu->dcz_blocksize;
4249 uint64_t vaddr = vaddr_in & ~(blocklen - 1);
4250
4251#ifndef CONFIG_USER_ONLY
4252 {
4253 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
4254 * the block size so we might have to do more than one TLB lookup.
4255 * We know that in fact for any v8 CPU the page size is at least 4K
4256 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
4257 * 1K as an artefact of legacy v5 subpage support being present in the
4258 * same QEMU executable.
4259 */
4260 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
4261 void *hostaddr[maxidx];
4262 int try, i;
4263
4264 for (try = 0; try < 2; try++) {
4265
4266 for (i = 0; i < maxidx; i++) {
4267 hostaddr[i] = tlb_vaddr_to_host(env,
4268 vaddr + TARGET_PAGE_SIZE * i,
4269 1, cpu_mmu_index(env));
4270 if (!hostaddr[i]) {
4271 break;
4272 }
4273 }
4274 if (i == maxidx) {
4275 /* If it's all in the TLB it's fair game for just writing to;
4276 * we know we don't need to update dirty status, etc.
4277 */
4278 for (i = 0; i < maxidx - 1; i++) {
4279 memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
4280 }
4281 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
4282 return;
4283 }
4284 /* OK, try a store and see if we can populate the tlb. This
4285 * might cause an exception if the memory isn't writable,
4286 * in which case we will longjmp out of here. We must for
4287 * this purpose use the actual register value passed to us
4288 * so that we get the fault address right.
4289 */
4290 helper_ret_stb_mmu(env, vaddr_in, 0, cpu_mmu_index(env), GETRA());
4291 /* Now we can populate the other TLB entries, if any */
4292 for (i = 0; i < maxidx; i++) {
4293 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
4294 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
4295 helper_ret_stb_mmu(env, va, 0, cpu_mmu_index(env), GETRA());
4296 }
4297 }
4298 }
4299
4300 /* Slow path (probably attempt to do this to an I/O device or
4301 * similar, or clearing of a block of code we have translations
4302 * cached for). Just do a series of byte writes as the architecture
4303 * demands. It's not worth trying to use a cpu_physical_memory_map(),
4304 * memset(), unmap() sequence here because:
4305 * + we'd need to account for the blocksize being larger than a page
4306 * + the direct-RAM access case is almost always going to be dealt
4307 * with in the fastpath code above, so there's no speed benefit
4308 * + we would have to deal with the map returning NULL because the
4309 * bounce buffer was in use
4310 */
4311 for (i = 0; i < blocklen; i++) {
4312 helper_ret_stb_mmu(env, vaddr + i, 0, cpu_mmu_index(env), GETRA());
4313 }
4314 }
4315#else
4316 memset(g2h(vaddr), 0, blocklen);
4317#endif
4318}
4319
6ddbc6e4
PB
4320/* Note that signed overflow is undefined in C. The following routines are
4321 careful to use unsigned types where modulo arithmetic is required.
4322 Failure to do so _will_ break on newer gcc. */
4323
4324/* Signed saturating arithmetic. */
4325
1654b2d6 4326/* Perform 16-bit signed saturating addition. */
6ddbc6e4
PB
4327static inline uint16_t add16_sat(uint16_t a, uint16_t b)
4328{
4329 uint16_t res;
4330
4331 res = a + b;
4332 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
4333 if (a & 0x8000)
4334 res = 0x8000;
4335 else
4336 res = 0x7fff;
4337 }
4338 return res;
4339}
4340
1654b2d6 4341/* Perform 8-bit signed saturating addition. */
6ddbc6e4
PB
4342static inline uint8_t add8_sat(uint8_t a, uint8_t b)
4343{
4344 uint8_t res;
4345
4346 res = a + b;
4347 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
4348 if (a & 0x80)
4349 res = 0x80;
4350 else
4351 res = 0x7f;
4352 }
4353 return res;
4354}
4355
1654b2d6 4356/* Perform 16-bit signed saturating subtraction. */
6ddbc6e4
PB
4357static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
4358{
4359 uint16_t res;
4360
4361 res = a - b;
4362 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
4363 if (a & 0x8000)
4364 res = 0x8000;
4365 else
4366 res = 0x7fff;
4367 }
4368 return res;
4369}
4370
1654b2d6 4371/* Perform 8-bit signed saturating subtraction. */
6ddbc6e4
PB
4372static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
4373{
4374 uint8_t res;
4375
4376 res = a - b;
4377 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
4378 if (a & 0x80)
4379 res = 0x80;
4380 else
4381 res = 0x7f;
4382 }
4383 return res;
4384}
4385
4386#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
4387#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
4388#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
4389#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
4390#define PFX q
4391
4392#include "op_addsub.h"
4393
4394/* Unsigned saturating arithmetic. */
460a09c1 4395static inline uint16_t add16_usat(uint16_t a, uint16_t b)
6ddbc6e4
PB
4396{
4397 uint16_t res;
4398 res = a + b;
4399 if (res < a)
4400 res = 0xffff;
4401 return res;
4402}
4403
460a09c1 4404static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
6ddbc6e4 4405{
4c4fd3f8 4406 if (a > b)
6ddbc6e4
PB
4407 return a - b;
4408 else
4409 return 0;
4410}
4411
4412static inline uint8_t add8_usat(uint8_t a, uint8_t b)
4413{
4414 uint8_t res;
4415 res = a + b;
4416 if (res < a)
4417 res = 0xff;
4418 return res;
4419}
4420
4421static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
4422{
4c4fd3f8 4423 if (a > b)
6ddbc6e4
PB
4424 return a - b;
4425 else
4426 return 0;
4427}
4428
4429#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
4430#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
4431#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
4432#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
4433#define PFX uq
4434
4435#include "op_addsub.h"
4436
4437/* Signed modulo arithmetic. */
4438#define SARITH16(a, b, n, op) do { \
4439 int32_t sum; \
db6e2e65 4440 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
6ddbc6e4
PB
4441 RESULT(sum, n, 16); \
4442 if (sum >= 0) \
4443 ge |= 3 << (n * 2); \
4444 } while(0)
4445
4446#define SARITH8(a, b, n, op) do { \
4447 int32_t sum; \
db6e2e65 4448 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
6ddbc6e4
PB
4449 RESULT(sum, n, 8); \
4450 if (sum >= 0) \
4451 ge |= 1 << n; \
4452 } while(0)
4453
4454
4455#define ADD16(a, b, n) SARITH16(a, b, n, +)
4456#define SUB16(a, b, n) SARITH16(a, b, n, -)
4457#define ADD8(a, b, n) SARITH8(a, b, n, +)
4458#define SUB8(a, b, n) SARITH8(a, b, n, -)
4459#define PFX s
4460#define ARITH_GE
4461
4462#include "op_addsub.h"
4463
4464/* Unsigned modulo arithmetic. */
4465#define ADD16(a, b, n) do { \
4466 uint32_t sum; \
4467 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
4468 RESULT(sum, n, 16); \
a87aa10b 4469 if ((sum >> 16) == 1) \
6ddbc6e4
PB
4470 ge |= 3 << (n * 2); \
4471 } while(0)
4472
4473#define ADD8(a, b, n) do { \
4474 uint32_t sum; \
4475 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
4476 RESULT(sum, n, 8); \
a87aa10b
AZ
4477 if ((sum >> 8) == 1) \
4478 ge |= 1 << n; \
6ddbc6e4
PB
4479 } while(0)
4480
4481#define SUB16(a, b, n) do { \
4482 uint32_t sum; \
4483 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
4484 RESULT(sum, n, 16); \
4485 if ((sum >> 16) == 0) \
4486 ge |= 3 << (n * 2); \
4487 } while(0)
4488
4489#define SUB8(a, b, n) do { \
4490 uint32_t sum; \
4491 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
4492 RESULT(sum, n, 8); \
4493 if ((sum >> 8) == 0) \
a87aa10b 4494 ge |= 1 << n; \
6ddbc6e4
PB
4495 } while(0)
4496
4497#define PFX u
4498#define ARITH_GE
4499
4500#include "op_addsub.h"
4501
4502/* Halved signed arithmetic. */
4503#define ADD16(a, b, n) \
4504 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
4505#define SUB16(a, b, n) \
4506 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
4507#define ADD8(a, b, n) \
4508 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
4509#define SUB8(a, b, n) \
4510 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
4511#define PFX sh
4512
4513#include "op_addsub.h"
4514
4515/* Halved unsigned arithmetic. */
4516#define ADD16(a, b, n) \
4517 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
4518#define SUB16(a, b, n) \
4519 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
4520#define ADD8(a, b, n) \
4521 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
4522#define SUB8(a, b, n) \
4523 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
4524#define PFX uh
4525
4526#include "op_addsub.h"
4527
4528static inline uint8_t do_usad(uint8_t a, uint8_t b)
4529{
4530 if (a > b)
4531 return a - b;
4532 else
4533 return b - a;
4534}
4535
4536/* Unsigned sum of absolute byte differences. */
4537uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
4538{
4539 uint32_t sum;
4540 sum = do_usad(a, b);
4541 sum += do_usad(a >> 8, b >> 8);
4542 sum += do_usad(a >> 16, b >>16);
4543 sum += do_usad(a >> 24, b >> 24);
4544 return sum;
4545}
4546
4547/* For ARMv6 SEL instruction. */
4548uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
4549{
4550 uint32_t mask;
4551
4552 mask = 0;
4553 if (flags & 1)
4554 mask |= 0xff;
4555 if (flags & 2)
4556 mask |= 0xff00;
4557 if (flags & 4)
4558 mask |= 0xff0000;
4559 if (flags & 8)
4560 mask |= 0xff000000;
4561 return (a & mask) | (b & ~mask);
4562}
4563
b90372ad
PM
4564/* VFP support. We follow the convention used for VFP instructions:
4565 Single precision routines have a "s" suffix, double precision a
4373f3ce
PB
4566 "d" suffix. */
4567
4568/* Convert host exception flags to vfp form. */
4569static inline int vfp_exceptbits_from_host(int host_bits)
4570{
4571 int target_bits = 0;
4572
4573 if (host_bits & float_flag_invalid)
4574 target_bits |= 1;
4575 if (host_bits & float_flag_divbyzero)
4576 target_bits |= 2;
4577 if (host_bits & float_flag_overflow)
4578 target_bits |= 4;
36802b6b 4579 if (host_bits & (float_flag_underflow | float_flag_output_denormal))
4373f3ce
PB
4580 target_bits |= 8;
4581 if (host_bits & float_flag_inexact)
4582 target_bits |= 0x10;
cecd8504
PM
4583 if (host_bits & float_flag_input_denormal)
4584 target_bits |= 0x80;
4373f3ce
PB
4585 return target_bits;
4586}
4587
0ecb72a5 4588uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
4373f3ce
PB
4589{
4590 int i;
4591 uint32_t fpscr;
4592
4593 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
4594 | (env->vfp.vec_len << 16)
4595 | (env->vfp.vec_stride << 20);
4596 i = get_float_exception_flags(&env->vfp.fp_status);
3a492f3a 4597 i |= get_float_exception_flags(&env->vfp.standard_fp_status);
4373f3ce
PB
4598 fpscr |= vfp_exceptbits_from_host(i);
4599 return fpscr;
4600}
4601
0ecb72a5 4602uint32_t vfp_get_fpscr(CPUARMState *env)
01653295
PM
4603{
4604 return HELPER(vfp_get_fpscr)(env);
4605}
4606
4373f3ce
PB
4607/* Convert vfp exception flags to target form. */
4608static inline int vfp_exceptbits_to_host(int target_bits)
4609{
4610 int host_bits = 0;
4611
4612 if (target_bits & 1)
4613 host_bits |= float_flag_invalid;
4614 if (target_bits & 2)
4615 host_bits |= float_flag_divbyzero;
4616 if (target_bits & 4)
4617 host_bits |= float_flag_overflow;
4618 if (target_bits & 8)
4619 host_bits |= float_flag_underflow;
4620 if (target_bits & 0x10)
4621 host_bits |= float_flag_inexact;
cecd8504
PM
4622 if (target_bits & 0x80)
4623 host_bits |= float_flag_input_denormal;
4373f3ce
PB
4624 return host_bits;
4625}
4626
0ecb72a5 4627void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
4373f3ce
PB
4628{
4629 int i;
4630 uint32_t changed;
4631
4632 changed = env->vfp.xregs[ARM_VFP_FPSCR];
4633 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
4634 env->vfp.vec_len = (val >> 16) & 7;
4635 env->vfp.vec_stride = (val >> 20) & 3;
4636
4637 changed ^= val;
4638 if (changed & (3 << 22)) {
4639 i = (val >> 22) & 3;
4640 switch (i) {
4d3da0f3 4641 case FPROUNDING_TIEEVEN:
4373f3ce
PB
4642 i = float_round_nearest_even;
4643 break;
4d3da0f3 4644 case FPROUNDING_POSINF:
4373f3ce
PB
4645 i = float_round_up;
4646 break;
4d3da0f3 4647 case FPROUNDING_NEGINF:
4373f3ce
PB
4648 i = float_round_down;
4649 break;
4d3da0f3 4650 case FPROUNDING_ZERO:
4373f3ce
PB
4651 i = float_round_to_zero;
4652 break;
4653 }
4654 set_float_rounding_mode(i, &env->vfp.fp_status);
4655 }
cecd8504 4656 if (changed & (1 << 24)) {
fe76d976 4657 set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
cecd8504
PM
4658 set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
4659 }
5c7908ed
PB
4660 if (changed & (1 << 25))
4661 set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
4373f3ce 4662
b12c390b 4663 i = vfp_exceptbits_to_host(val);
4373f3ce 4664 set_float_exception_flags(i, &env->vfp.fp_status);
3a492f3a 4665 set_float_exception_flags(0, &env->vfp.standard_fp_status);
4373f3ce
PB
4666}
4667
0ecb72a5 4668void vfp_set_fpscr(CPUARMState *env, uint32_t val)
01653295
PM
4669{
4670 HELPER(vfp_set_fpscr)(env, val);
4671}
4672
4373f3ce
PB
4673#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
4674
4675#define VFP_BINOP(name) \
ae1857ec 4676float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
4373f3ce 4677{ \
ae1857ec
PM
4678 float_status *fpst = fpstp; \
4679 return float32_ ## name(a, b, fpst); \
4373f3ce 4680} \
ae1857ec 4681float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
4373f3ce 4682{ \
ae1857ec
PM
4683 float_status *fpst = fpstp; \
4684 return float64_ ## name(a, b, fpst); \
4373f3ce
PB
4685}
4686VFP_BINOP(add)
4687VFP_BINOP(sub)
4688VFP_BINOP(mul)
4689VFP_BINOP(div)
f71a2ae5
PM
4690VFP_BINOP(min)
4691VFP_BINOP(max)
4692VFP_BINOP(minnum)
4693VFP_BINOP(maxnum)
4373f3ce
PB
4694#undef VFP_BINOP
4695
4696float32 VFP_HELPER(neg, s)(float32 a)
4697{
4698 return float32_chs(a);
4699}
4700
4701float64 VFP_HELPER(neg, d)(float64 a)
4702{
66230e0d 4703 return float64_chs(a);
4373f3ce
PB
4704}
4705
4706float32 VFP_HELPER(abs, s)(float32 a)
4707{
4708 return float32_abs(a);
4709}
4710
4711float64 VFP_HELPER(abs, d)(float64 a)
4712{
66230e0d 4713 return float64_abs(a);
4373f3ce
PB
4714}
4715
0ecb72a5 4716float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
4373f3ce
PB
4717{
4718 return float32_sqrt(a, &env->vfp.fp_status);
4719}
4720
0ecb72a5 4721float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
4373f3ce
PB
4722{
4723 return float64_sqrt(a, &env->vfp.fp_status);
4724}
4725
4726/* XXX: check quiet/signaling case */
4727#define DO_VFP_cmp(p, type) \
0ecb72a5 4728void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
4373f3ce
PB
4729{ \
4730 uint32_t flags; \
4731 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
4732 case 0: flags = 0x6; break; \
4733 case -1: flags = 0x8; break; \
4734 case 1: flags = 0x2; break; \
4735 default: case 2: flags = 0x3; break; \
4736 } \
4737 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4738 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4739} \
0ecb72a5 4740void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
4373f3ce
PB
4741{ \
4742 uint32_t flags; \
4743 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
4744 case 0: flags = 0x6; break; \
4745 case -1: flags = 0x8; break; \
4746 case 1: flags = 0x2; break; \
4747 default: case 2: flags = 0x3; break; \
4748 } \
4749 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4750 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4751}
4752DO_VFP_cmp(s, float32)
4753DO_VFP_cmp(d, float64)
4754#undef DO_VFP_cmp
4755
5500b06c 4756/* Integer to float and float to integer conversions */
4373f3ce 4757
5500b06c
PM
4758#define CONV_ITOF(name, fsz, sign) \
4759 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
4760{ \
4761 float_status *fpst = fpstp; \
85836979 4762 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
4373f3ce
PB
4763}
4764
5500b06c
PM
4765#define CONV_FTOI(name, fsz, sign, round) \
4766uint32_t HELPER(name)(float##fsz x, void *fpstp) \
4767{ \
4768 float_status *fpst = fpstp; \
4769 if (float##fsz##_is_any_nan(x)) { \
4770 float_raise(float_flag_invalid, fpst); \
4771 return 0; \
4772 } \
4773 return float##fsz##_to_##sign##int32##round(x, fpst); \
4373f3ce
PB
4774}
4775
5500b06c
PM
4776#define FLOAT_CONVS(name, p, fsz, sign) \
4777CONV_ITOF(vfp_##name##to##p, fsz, sign) \
4778CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
4779CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
4373f3ce 4780
5500b06c
PM
4781FLOAT_CONVS(si, s, 32, )
4782FLOAT_CONVS(si, d, 64, )
4783FLOAT_CONVS(ui, s, 32, u)
4784FLOAT_CONVS(ui, d, 64, u)
4373f3ce 4785
5500b06c
PM
4786#undef CONV_ITOF
4787#undef CONV_FTOI
4788#undef FLOAT_CONVS
4373f3ce
PB
4789
4790/* floating point conversion */
0ecb72a5 4791float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
4373f3ce 4792{
2d627737
PM
4793 float64 r = float32_to_float64(x, &env->vfp.fp_status);
4794 /* ARM requires that S<->D conversion of any kind of NaN generates
4795 * a quiet NaN by forcing the most significant frac bit to 1.
4796 */
4797 return float64_maybe_silence_nan(r);
4373f3ce
PB
4798}
4799
0ecb72a5 4800float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
4373f3ce 4801{
2d627737
PM
4802 float32 r = float64_to_float32(x, &env->vfp.fp_status);
4803 /* ARM requires that S<->D conversion of any kind of NaN generates
4804 * a quiet NaN by forcing the most significant frac bit to 1.
4805 */
4806 return float32_maybe_silence_nan(r);
4373f3ce
PB
4807}
4808
4809/* VFP3 fixed point conversion. */
16d5b3ca 4810#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
8ed697e8
WN
4811float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
4812 void *fpstp) \
4373f3ce 4813{ \
5500b06c 4814 float_status *fpst = fpstp; \
622465e1 4815 float##fsz tmp; \
8ed697e8 4816 tmp = itype##_to_##float##fsz(x, fpst); \
5500b06c 4817 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
16d5b3ca
WN
4818}
4819
abe66f70
PM
4820/* Notice that we want only input-denormal exception flags from the
4821 * scalbn operation: the other possible flags (overflow+inexact if
4822 * we overflow to infinity, output-denormal) aren't correct for the
4823 * complete scale-and-convert operation.
4824 */
16d5b3ca
WN
4825#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
4826uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
4827 uint32_t shift, \
4828 void *fpstp) \
4373f3ce 4829{ \
5500b06c 4830 float_status *fpst = fpstp; \
abe66f70 4831 int old_exc_flags = get_float_exception_flags(fpst); \
622465e1
PM
4832 float##fsz tmp; \
4833 if (float##fsz##_is_any_nan(x)) { \
5500b06c 4834 float_raise(float_flag_invalid, fpst); \
622465e1 4835 return 0; \
09d9487f 4836 } \
5500b06c 4837 tmp = float##fsz##_scalbn(x, shift, fpst); \
abe66f70
PM
4838 old_exc_flags |= get_float_exception_flags(fpst) \
4839 & float_flag_input_denormal; \
4840 set_float_exception_flags(old_exc_flags, fpst); \
16d5b3ca 4841 return float##fsz##_to_##itype##round(tmp, fpst); \
622465e1
PM
4842}
4843
16d5b3ca
WN
4844#define VFP_CONV_FIX(name, p, fsz, isz, itype) \
4845VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
3c6a074a
WN
4846VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
4847VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4848
4849#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
4850VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4851VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
16d5b3ca 4852
8ed697e8
WN
4853VFP_CONV_FIX(sh, d, 64, 64, int16)
4854VFP_CONV_FIX(sl, d, 64, 64, int32)
3c6a074a 4855VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
8ed697e8
WN
4856VFP_CONV_FIX(uh, d, 64, 64, uint16)
4857VFP_CONV_FIX(ul, d, 64, 64, uint32)
3c6a074a 4858VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
8ed697e8
WN
4859VFP_CONV_FIX(sh, s, 32, 32, int16)
4860VFP_CONV_FIX(sl, s, 32, 32, int32)
3c6a074a 4861VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
8ed697e8
WN
4862VFP_CONV_FIX(uh, s, 32, 32, uint16)
4863VFP_CONV_FIX(ul, s, 32, 32, uint32)
3c6a074a 4864VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
4373f3ce 4865#undef VFP_CONV_FIX
16d5b3ca
WN
4866#undef VFP_CONV_FIX_FLOAT
4867#undef VFP_CONV_FLOAT_FIX_ROUND
4373f3ce 4868
52a1f6a3
AG
4869/* Set the current fp rounding mode and return the old one.
4870 * The argument is a softfloat float_round_ value.
4871 */
4872uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
4873{
4874 float_status *fp_status = &env->vfp.fp_status;
4875
4876 uint32_t prev_rmode = get_float_rounding_mode(fp_status);
4877 set_float_rounding_mode(rmode, fp_status);
4878
4879 return prev_rmode;
4880}
4881
43630e58
WN
4882/* Set the current fp rounding mode in the standard fp status and return
4883 * the old one. This is for NEON instructions that need to change the
4884 * rounding mode but wish to use the standard FPSCR values for everything
4885 * else. Always set the rounding mode back to the correct value after
4886 * modifying it.
4887 * The argument is a softfloat float_round_ value.
4888 */
4889uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
4890{
4891 float_status *fp_status = &env->vfp.standard_fp_status;
4892
4893 uint32_t prev_rmode = get_float_rounding_mode(fp_status);
4894 set_float_rounding_mode(rmode, fp_status);
4895
4896 return prev_rmode;
4897}
4898
60011498 4899/* Half precision conversions. */
0ecb72a5 4900static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
60011498 4901{
60011498 4902 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
fb91678d
PM
4903 float32 r = float16_to_float32(make_float16(a), ieee, s);
4904 if (ieee) {
4905 return float32_maybe_silence_nan(r);
4906 }
4907 return r;
60011498
PB
4908}
4909
0ecb72a5 4910static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
60011498 4911{
60011498 4912 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
fb91678d
PM
4913 float16 r = float32_to_float16(a, ieee, s);
4914 if (ieee) {
4915 r = float16_maybe_silence_nan(r);
4916 }
4917 return float16_val(r);
60011498
PB
4918}
4919
0ecb72a5 4920float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2d981da7
PM
4921{
4922 return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
4923}
4924
0ecb72a5 4925uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2d981da7
PM
4926{
4927 return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
4928}
4929
0ecb72a5 4930float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2d981da7
PM
4931{
4932 return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
4933}
4934
0ecb72a5 4935uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2d981da7
PM
4936{
4937 return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
4938}
4939
8900aad2
PM
4940float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
4941{
4942 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
4943 float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
4944 if (ieee) {
4945 return float64_maybe_silence_nan(r);
4946 }
4947 return r;
4948}
4949
4950uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
4951{
4952 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
4953 float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
4954 if (ieee) {
4955 r = float16_maybe_silence_nan(r);
4956 }
4957 return float16_val(r);
4958}
4959
dda3ec49 4960#define float32_two make_float32(0x40000000)
6aae3df1
PM
4961#define float32_three make_float32(0x40400000)
4962#define float32_one_point_five make_float32(0x3fc00000)
dda3ec49 4963
0ecb72a5 4964float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
4373f3ce 4965{
dda3ec49
PM
4966 float_status *s = &env->vfp.standard_fp_status;
4967 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
4968 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
43fe9bdb
PM
4969 if (!(float32_is_zero(a) || float32_is_zero(b))) {
4970 float_raise(float_flag_input_denormal, s);
4971 }
dda3ec49
PM
4972 return float32_two;
4973 }
4974 return float32_sub(float32_two, float32_mul(a, b, s), s);
4373f3ce
PB
4975}
4976
0ecb72a5 4977float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
4373f3ce 4978{
71826966 4979 float_status *s = &env->vfp.standard_fp_status;
9ea62f57
PM
4980 float32 product;
4981 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
4982 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
43fe9bdb
PM
4983 if (!(float32_is_zero(a) || float32_is_zero(b))) {
4984 float_raise(float_flag_input_denormal, s);
4985 }
6aae3df1 4986 return float32_one_point_five;
9ea62f57 4987 }
6aae3df1
PM
4988 product = float32_mul(a, b, s);
4989 return float32_div(float32_sub(float32_three, product, s), float32_two, s);
4373f3ce
PB
4990}
4991
8f8e3aa4
PB
4992/* NEON helpers. */
4993
56bf4fe2
CL
4994/* Constants 256 and 512 are used in some helpers; we avoid relying on
4995 * int->float conversions at run-time. */
4996#define float64_256 make_float64(0x4070000000000000LL)
4997#define float64_512 make_float64(0x4080000000000000LL)
b6d4443a
AB
4998#define float32_maxnorm make_float32(0x7f7fffff)
4999#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
56bf4fe2 5000
b6d4443a
AB
5001/* Reciprocal functions
5002 *
5003 * The algorithm that must be used to calculate the estimate
5004 * is specified by the ARM ARM, see FPRecipEstimate()
fe0e4872 5005 */
b6d4443a
AB
5006
5007static float64 recip_estimate(float64 a, float_status *real_fp_status)
fe0e4872 5008{
1146a817
PM
5009 /* These calculations mustn't set any fp exception flags,
5010 * so we use a local copy of the fp_status.
5011 */
b6d4443a 5012 float_status dummy_status = *real_fp_status;
1146a817 5013 float_status *s = &dummy_status;
fe0e4872
CL
5014 /* q = (int)(a * 512.0) */
5015 float64 q = float64_mul(float64_512, a, s);
5016 int64_t q_int = float64_to_int64_round_to_zero(q, s);
5017
5018 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
5019 q = int64_to_float64(q_int, s);
5020 q = float64_add(q, float64_half, s);
5021 q = float64_div(q, float64_512, s);
5022 q = float64_div(float64_one, q, s);
5023
5024 /* s = (int)(256.0 * r + 0.5) */
5025 q = float64_mul(q, float64_256, s);
5026 q = float64_add(q, float64_half, s);
5027 q_int = float64_to_int64_round_to_zero(q, s);
5028
5029 /* return (double)s / 256.0 */
5030 return float64_div(int64_to_float64(q_int, s), float64_256, s);
5031}
5032
b6d4443a
AB
5033/* Common wrapper to call recip_estimate */
5034static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
4373f3ce 5035{
b6d4443a
AB
5036 uint64_t val64 = float64_val(num);
5037 uint64_t frac = extract64(val64, 0, 52);
5038 int64_t exp = extract64(val64, 52, 11);
5039 uint64_t sbit;
5040 float64 scaled, estimate;
fe0e4872 5041
b6d4443a
AB
5042 /* Generate the scaled number for the estimate function */
5043 if (exp == 0) {
5044 if (extract64(frac, 51, 1) == 0) {
5045 exp = -1;
5046 frac = extract64(frac, 0, 50) << 2;
5047 } else {
5048 frac = extract64(frac, 0, 51) << 1;
5049 }
5050 }
fe0e4872 5051
b6d4443a
AB
5052 /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
5053 scaled = make_float64((0x3feULL << 52)
5054 | extract64(frac, 44, 8) << 44);
5055
5056 estimate = recip_estimate(scaled, fpst);
5057
5058 /* Build new result */
5059 val64 = float64_val(estimate);
5060 sbit = 0x8000000000000000ULL & val64;
5061 exp = off - exp;
5062 frac = extract64(val64, 0, 52);
5063
5064 if (exp == 0) {
5065 frac = 1ULL << 51 | extract64(frac, 1, 51);
5066 } else if (exp == -1) {
5067 frac = 1ULL << 50 | extract64(frac, 2, 50);
5068 exp = 0;
5069 }
5070
5071 return make_float64(sbit | (exp << 52) | frac);
5072}
5073
5074static bool round_to_inf(float_status *fpst, bool sign_bit)
5075{
5076 switch (fpst->float_rounding_mode) {
5077 case float_round_nearest_even: /* Round to Nearest */
5078 return true;
5079 case float_round_up: /* Round to +Inf */
5080 return !sign_bit;
5081 case float_round_down: /* Round to -Inf */
5082 return sign_bit;
5083 case float_round_to_zero: /* Round to Zero */
5084 return false;
5085 }
5086
5087 g_assert_not_reached();
5088}
5089
5090float32 HELPER(recpe_f32)(float32 input, void *fpstp)
5091{
5092 float_status *fpst = fpstp;
5093 float32 f32 = float32_squash_input_denormal(input, fpst);
5094 uint32_t f32_val = float32_val(f32);
5095 uint32_t f32_sbit = 0x80000000ULL & f32_val;
5096 int32_t f32_exp = extract32(f32_val, 23, 8);
5097 uint32_t f32_frac = extract32(f32_val, 0, 23);
5098 float64 f64, r64;
5099 uint64_t r64_val;
5100 int64_t r64_exp;
5101 uint64_t r64_frac;
5102
5103 if (float32_is_any_nan(f32)) {
5104 float32 nan = f32;
5105 if (float32_is_signaling_nan(f32)) {
5106 float_raise(float_flag_invalid, fpst);
5107 nan = float32_maybe_silence_nan(f32);
fe0e4872 5108 }
b6d4443a
AB
5109 if (fpst->default_nan_mode) {
5110 nan = float32_default_nan;
43fe9bdb 5111 }
b6d4443a
AB
5112 return nan;
5113 } else if (float32_is_infinity(f32)) {
5114 return float32_set_sign(float32_zero, float32_is_neg(f32));
5115 } else if (float32_is_zero(f32)) {
5116 float_raise(float_flag_divbyzero, fpst);
5117 return float32_set_sign(float32_infinity, float32_is_neg(f32));
5118 } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
5119 /* Abs(value) < 2.0^-128 */
5120 float_raise(float_flag_overflow | float_flag_inexact, fpst);
5121 if (round_to_inf(fpst, f32_sbit)) {
5122 return float32_set_sign(float32_infinity, float32_is_neg(f32));
5123 } else {
5124 return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
5125 }
5126 } else if (f32_exp >= 253 && fpst->flush_to_zero) {
5127 float_raise(float_flag_underflow, fpst);
5128 return float32_set_sign(float32_zero, float32_is_neg(f32));
fe0e4872
CL
5129 }
5130
fe0e4872 5131
b6d4443a
AB
5132 f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
5133 r64 = call_recip_estimate(f64, 253, fpst);
5134 r64_val = float64_val(r64);
5135 r64_exp = extract64(r64_val, 52, 11);
5136 r64_frac = extract64(r64_val, 0, 52);
5137
5138 /* result = sign : result_exp<7:0> : fraction<51:29>; */
5139 return make_float32(f32_sbit |
5140 (r64_exp & 0xff) << 23 |
5141 extract64(r64_frac, 29, 24));
5142}
5143
5144float64 HELPER(recpe_f64)(float64 input, void *fpstp)
5145{
5146 float_status *fpst = fpstp;
5147 float64 f64 = float64_squash_input_denormal(input, fpst);
5148 uint64_t f64_val = float64_val(f64);
5149 uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
5150 int64_t f64_exp = extract64(f64_val, 52, 11);
5151 float64 r64;
5152 uint64_t r64_val;
5153 int64_t r64_exp;
5154 uint64_t r64_frac;
5155
5156 /* Deal with any special cases */
5157 if (float64_is_any_nan(f64)) {
5158 float64 nan = f64;
5159 if (float64_is_signaling_nan(f64)) {
5160 float_raise(float_flag_invalid, fpst);
5161 nan = float64_maybe_silence_nan(f64);
5162 }
5163 if (fpst->default_nan_mode) {
5164 nan = float64_default_nan;
5165 }
5166 return nan;
5167 } else if (float64_is_infinity(f64)) {
5168 return float64_set_sign(float64_zero, float64_is_neg(f64));
5169 } else if (float64_is_zero(f64)) {
5170 float_raise(float_flag_divbyzero, fpst);
5171 return float64_set_sign(float64_infinity, float64_is_neg(f64));
5172 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
5173 /* Abs(value) < 2.0^-1024 */
5174 float_raise(float_flag_overflow | float_flag_inexact, fpst);
5175 if (round_to_inf(fpst, f64_sbit)) {
5176 return float64_set_sign(float64_infinity, float64_is_neg(f64));
5177 } else {
5178 return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
5179 }
5180 } else if (f64_exp >= 1023 && fpst->flush_to_zero) {
5181 float_raise(float_flag_underflow, fpst);
5182 return float64_set_sign(float64_zero, float64_is_neg(f64));
5183 }
fe0e4872 5184
b6d4443a
AB
5185 r64 = call_recip_estimate(f64, 2045, fpst);
5186 r64_val = float64_val(r64);
5187 r64_exp = extract64(r64_val, 52, 11);
5188 r64_frac = extract64(r64_val, 0, 52);
fe0e4872 5189
b6d4443a
AB
5190 /* result = sign : result_exp<10:0> : fraction<51:0> */
5191 return make_float64(f64_sbit |
5192 ((r64_exp & 0x7ff) << 52) |
5193 r64_frac);
4373f3ce
PB
5194}
5195
e07be5d2
CL
5196/* The algorithm that must be used to calculate the estimate
5197 * is specified by the ARM ARM.
5198 */
c2fb418e 5199static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
e07be5d2 5200{
1146a817
PM
5201 /* These calculations mustn't set any fp exception flags,
5202 * so we use a local copy of the fp_status.
5203 */
c2fb418e 5204 float_status dummy_status = *real_fp_status;
1146a817 5205 float_status *s = &dummy_status;
e07be5d2
CL
5206 float64 q;
5207 int64_t q_int;
5208
5209 if (float64_lt(a, float64_half, s)) {
5210 /* range 0.25 <= a < 0.5 */
5211
5212 /* a in units of 1/512 rounded down */
5213 /* q0 = (int)(a * 512.0); */
5214 q = float64_mul(float64_512, a, s);
5215 q_int = float64_to_int64_round_to_zero(q, s);
5216
5217 /* reciprocal root r */
5218 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
5219 q = int64_to_float64(q_int, s);
5220 q = float64_add(q, float64_half, s);
5221 q = float64_div(q, float64_512, s);
5222 q = float64_sqrt(q, s);
5223 q = float64_div(float64_one, q, s);
5224 } else {
5225 /* range 0.5 <= a < 1.0 */
5226
5227 /* a in units of 1/256 rounded down */
5228 /* q1 = (int)(a * 256.0); */
5229 q = float64_mul(float64_256, a, s);
5230 int64_t q_int = float64_to_int64_round_to_zero(q, s);
5231
5232 /* reciprocal root r */
5233 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
5234 q = int64_to_float64(q_int, s);
5235 q = float64_add(q, float64_half, s);
5236 q = float64_div(q, float64_256, s);
5237 q = float64_sqrt(q, s);
5238 q = float64_div(float64_one, q, s);
5239 }
5240 /* r in units of 1/256 rounded to nearest */
5241 /* s = (int)(256.0 * r + 0.5); */
5242
5243 q = float64_mul(q, float64_256,s );
5244 q = float64_add(q, float64_half, s);
5245 q_int = float64_to_int64_round_to_zero(q, s);
5246
5247 /* return (double)s / 256.0;*/
5248 return float64_div(int64_to_float64(q_int, s), float64_256, s);
5249}
5250
c2fb418e 5251float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
4373f3ce 5252{
c2fb418e
AB
5253 float_status *s = fpstp;
5254 float32 f32 = float32_squash_input_denormal(input, s);
5255 uint32_t val = float32_val(f32);
5256 uint32_t f32_sbit = 0x80000000 & val;
5257 int32_t f32_exp = extract32(val, 23, 8);
5258 uint32_t f32_frac = extract32(val, 0, 23);
5259 uint64_t f64_frac;
5260 uint64_t val64;
e07be5d2
CL
5261 int result_exp;
5262 float64 f64;
e07be5d2 5263
c2fb418e
AB
5264 if (float32_is_any_nan(f32)) {
5265 float32 nan = f32;
5266 if (float32_is_signaling_nan(f32)) {
e07be5d2 5267 float_raise(float_flag_invalid, s);
c2fb418e 5268 nan = float32_maybe_silence_nan(f32);
e07be5d2 5269 }
c2fb418e
AB
5270 if (s->default_nan_mode) {
5271 nan = float32_default_nan;
43fe9bdb 5272 }
c2fb418e
AB
5273 return nan;
5274 } else if (float32_is_zero(f32)) {
e07be5d2 5275 float_raise(float_flag_divbyzero, s);
c2fb418e
AB
5276 return float32_set_sign(float32_infinity, float32_is_neg(f32));
5277 } else if (float32_is_neg(f32)) {
e07be5d2
CL
5278 float_raise(float_flag_invalid, s);
5279 return float32_default_nan;
c2fb418e 5280 } else if (float32_is_infinity(f32)) {
e07be5d2
CL
5281 return float32_zero;
5282 }
5283
c2fb418e 5284 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
e07be5d2 5285 * preserving the parity of the exponent. */
c2fb418e
AB
5286
5287 f64_frac = ((uint64_t) f32_frac) << 29;
5288 if (f32_exp == 0) {
5289 while (extract64(f64_frac, 51, 1) == 0) {
5290 f64_frac = f64_frac << 1;
5291 f32_exp = f32_exp-1;
5292 }
5293 f64_frac = extract64(f64_frac, 0, 51) << 1;
5294 }
5295
5296 if (extract64(f32_exp, 0, 1) == 0) {
5297 f64 = make_float64(((uint64_t) f32_sbit) << 32
e07be5d2 5298 | (0x3feULL << 52)
c2fb418e 5299 | f64_frac);
e07be5d2 5300 } else {
c2fb418e 5301 f64 = make_float64(((uint64_t) f32_sbit) << 32
e07be5d2 5302 | (0x3fdULL << 52)
c2fb418e 5303 | f64_frac);
e07be5d2
CL
5304 }
5305
c2fb418e 5306 result_exp = (380 - f32_exp) / 2;
e07be5d2 5307
c2fb418e 5308 f64 = recip_sqrt_estimate(f64, s);
e07be5d2
CL
5309
5310 val64 = float64_val(f64);
5311
26cc6abf 5312 val = ((result_exp & 0xff) << 23)
e07be5d2
CL
5313 | ((val64 >> 29) & 0x7fffff);
5314 return make_float32(val);
4373f3ce
PB
5315}
5316
c2fb418e
AB
5317float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
5318{
5319 float_status *s = fpstp;
5320 float64 f64 = float64_squash_input_denormal(input, s);
5321 uint64_t val = float64_val(f64);
5322 uint64_t f64_sbit = 0x8000000000000000ULL & val;
5323 int64_t f64_exp = extract64(val, 52, 11);
5324 uint64_t f64_frac = extract64(val, 0, 52);
5325 int64_t result_exp;
5326 uint64_t result_frac;
5327
5328 if (float64_is_any_nan(f64)) {
5329 float64 nan = f64;
5330 if (float64_is_signaling_nan(f64)) {
5331 float_raise(float_flag_invalid, s);
5332 nan = float64_maybe_silence_nan(f64);
5333 }
5334 if (s->default_nan_mode) {
5335 nan = float64_default_nan;
5336 }
5337 return nan;
5338 } else if (float64_is_zero(f64)) {
5339 float_raise(float_flag_divbyzero, s);
5340 return float64_set_sign(float64_infinity, float64_is_neg(f64));
5341 } else if (float64_is_neg(f64)) {
5342 float_raise(float_flag_invalid, s);
5343 return float64_default_nan;
5344 } else if (float64_is_infinity(f64)) {
5345 return float64_zero;
5346 }
5347
5348 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
5349 * preserving the parity of the exponent. */
5350
5351 if (f64_exp == 0) {
5352 while (extract64(f64_frac, 51, 1) == 0) {
5353 f64_frac = f64_frac << 1;
5354 f64_exp = f64_exp - 1;
5355 }
5356 f64_frac = extract64(f64_frac, 0, 51) << 1;
5357 }
5358
5359 if (extract64(f64_exp, 0, 1) == 0) {
5360 f64 = make_float64(f64_sbit
5361 | (0x3feULL << 52)
5362 | f64_frac);
5363 } else {
5364 f64 = make_float64(f64_sbit
5365 | (0x3fdULL << 52)
5366 | f64_frac);
5367 }
5368
5369 result_exp = (3068 - f64_exp) / 2;
5370
5371 f64 = recip_sqrt_estimate(f64, s);
5372
5373 result_frac = extract64(float64_val(f64), 0, 52);
5374
5375 return make_float64(f64_sbit |
5376 ((result_exp & 0x7ff) << 52) |
5377 result_frac);
5378}
5379
b6d4443a 5380uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
4373f3ce 5381{
b6d4443a 5382 float_status *s = fpstp;
fe0e4872
CL
5383 float64 f64;
5384
5385 if ((a & 0x80000000) == 0) {
5386 return 0xffffffff;
5387 }
5388
5389 f64 = make_float64((0x3feULL << 52)
5390 | ((int64_t)(a & 0x7fffffff) << 21));
5391
b6d4443a 5392 f64 = recip_estimate(f64, s);
fe0e4872
CL
5393
5394 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
4373f3ce
PB
5395}
5396
c2fb418e 5397uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
4373f3ce 5398{
c2fb418e 5399 float_status *fpst = fpstp;
e07be5d2
CL
5400 float64 f64;
5401
5402 if ((a & 0xc0000000) == 0) {
5403 return 0xffffffff;
5404 }
5405
5406 if (a & 0x80000000) {
5407 f64 = make_float64((0x3feULL << 52)
5408 | ((uint64_t)(a & 0x7fffffff) << 21));
5409 } else { /* bits 31-30 == '01' */
5410 f64 = make_float64((0x3fdULL << 52)
5411 | ((uint64_t)(a & 0x3fffffff) << 22));
5412 }
5413
c2fb418e 5414 f64 = recip_sqrt_estimate(f64, fpst);
e07be5d2
CL
5415
5416 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
4373f3ce 5417}
fe1479c3 5418
da97f52c
PM
5419/* VFPv4 fused multiply-accumulate */
5420float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
5421{
5422 float_status *fpst = fpstp;
5423 return float32_muladd(a, b, c, 0, fpst);
5424}
5425
5426float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
5427{
5428 float_status *fpst = fpstp;
5429 return float64_muladd(a, b, c, 0, fpst);
5430}
d9b0848d
PM
5431
5432/* ARMv8 round to integral */
5433float32 HELPER(rints_exact)(float32 x, void *fp_status)
5434{
5435 return float32_round_to_int(x, fp_status);
5436}
5437
5438float64 HELPER(rintd_exact)(float64 x, void *fp_status)
5439{
5440 return float64_round_to_int(x, fp_status);
5441}
5442
5443float32 HELPER(rints)(float32 x, void *fp_status)
5444{
5445 int old_flags = get_float_exception_flags(fp_status), new_flags;
5446 float32 ret;
5447
5448 ret = float32_round_to_int(x, fp_status);
5449
5450 /* Suppress any inexact exceptions the conversion produced */
5451 if (!(old_flags & float_flag_inexact)) {
5452 new_flags = get_float_exception_flags(fp_status);
5453 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
5454 }
5455
5456 return ret;
5457}
5458
5459float64 HELPER(rintd)(float64 x, void *fp_status)
5460{
5461 int old_flags = get_float_exception_flags(fp_status), new_flags;
5462 float64 ret;
5463
5464 ret = float64_round_to_int(x, fp_status);
5465
5466 new_flags = get_float_exception_flags(fp_status);
5467
5468 /* Suppress any inexact exceptions the conversion produced */
5469 if (!(old_flags & float_flag_inexact)) {
5470 new_flags = get_float_exception_flags(fp_status);
5471 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
5472 }
5473
5474 return ret;
5475}
9972da66
WN
5476
5477/* Convert ARM rounding mode to softfloat */
5478int arm_rmode_to_sf(int rmode)
5479{
5480 switch (rmode) {
5481 case FPROUNDING_TIEAWAY:
5482 rmode = float_round_ties_away;
5483 break;
5484 case FPROUNDING_ODD:
5485 /* FIXME: add support for TIEAWAY and ODD */
5486 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
5487 rmode);
5488 case FPROUNDING_TIEEVEN:
5489 default:
5490 rmode = float_round_nearest_even;
5491 break;
5492 case FPROUNDING_POSINF:
5493 rmode = float_round_up;
5494 break;
5495 case FPROUNDING_NEGINF:
5496 rmode = float_round_down;
5497 break;
5498 case FPROUNDING_ZERO:
5499 rmode = float_round_to_zero;
5500 break;
5501 }
5502 return rmode;
5503}
eb0ecd5a
WN
5504
5505static void crc_init_buffer(uint8_t *buf, uint32_t val, uint32_t bytes)
5506{
5507 memset(buf, 0, 4);
5508
5509 if (bytes == 1) {
5510 buf[0] = val & 0xff;
5511 } else if (bytes == 2) {
5512 buf[0] = val & 0xff;
5513 buf[1] = (val >> 8) & 0xff;
5514 } else {
5515 buf[0] = val & 0xff;
5516 buf[1] = (val >> 8) & 0xff;
5517 buf[2] = (val >> 16) & 0xff;
5518 buf[3] = (val >> 24) & 0xff;
5519 }
5520}
5521
5522uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
5523{
5524 uint8_t buf[4];
5525
5526 crc_init_buffer(buf, val, bytes);
5527
5528 /* zlib crc32 converts the accumulator and output to one's complement. */
5529 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
5530}
5531
5532uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
5533{
5534 uint8_t buf[4];
5535
5536 crc_init_buffer(buf, val, bytes);
5537
5538 /* Linux crc32c converts the output to one's complement. */
5539 return crc32c(acc, buf, bytes) ^ 0xffffffff;
5540}