2 #include "exec/gdbstub.h"
4 #include "qemu/host-utils.h"
5 #include "sysemu/sysemu.h"
6 #include "qemu/bitops.h"
8 #ifndef CONFIG_USER_ONLY
9 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
10 int access_type
, int is_user
,
11 hwaddr
*phys_ptr
, int *prot
,
12 target_ulong
*page_size
);
15 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
19 /* VFP data registers are always little-endian. */
20 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
22 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
25 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
26 /* Aliases for Q regs. */
29 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
30 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
34 switch (reg
- nregs
) {
35 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
36 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
37 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
42 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
46 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
48 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
51 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
54 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
55 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
59 switch (reg
- nregs
) {
60 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
61 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
62 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
67 static int raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
70 *value
= CPREG_FIELD32(env
, ri
);
74 static int raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
77 CPREG_FIELD32(env
, ri
) = value
;
81 static bool read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
84 /* Raw read of a coprocessor register (as needed for migration, etc)
85 * return true on success, false if the read is impossible for some reason.
87 if (ri
->type
& ARM_CP_CONST
) {
89 } else if (ri
->raw_readfn
) {
90 return (ri
->raw_readfn(env
, ri
, v
) == 0);
91 } else if (ri
->readfn
) {
92 return (ri
->readfn(env
, ri
, v
) == 0);
94 if (ri
->type
& ARM_CP_64BIT
) {
95 *v
= CPREG_FIELD64(env
, ri
);
97 *v
= CPREG_FIELD32(env
, ri
);
103 static bool write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
106 /* Raw write of a coprocessor register (as needed for migration, etc).
107 * Return true on success, false if the write is impossible for some reason.
108 * Note that constant registers are treated as write-ignored; the
109 * caller should check for success by whether a readback gives the
112 if (ri
->type
& ARM_CP_CONST
) {
114 } else if (ri
->raw_writefn
) {
115 return (ri
->raw_writefn(env
, ri
, v
) == 0);
116 } else if (ri
->writefn
) {
117 return (ri
->writefn(env
, ri
, v
) == 0);
119 if (ri
->type
& ARM_CP_64BIT
) {
120 CPREG_FIELD64(env
, ri
) = v
;
122 CPREG_FIELD32(env
, ri
) = v
;
128 bool write_cpustate_to_list(ARMCPU
*cpu
)
130 /* Write the coprocessor state from cpu->env to the (index,value) list. */
134 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
135 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
136 const ARMCPRegInfo
*ri
;
138 ri
= get_arm_cp_reginfo(cpu
, regidx
);
143 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
146 if (!read_raw_cp_reg(&cpu
->env
, ri
, &v
)) {
150 cpu
->cpreg_values
[i
] = v
;
155 bool write_list_to_cpustate(ARMCPU
*cpu
)
160 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
161 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
162 uint64_t v
= cpu
->cpreg_values
[i
];
164 const ARMCPRegInfo
*ri
;
166 ri
= get_arm_cp_reginfo(cpu
, regidx
);
171 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
174 /* Write value and confirm it reads back as written
175 * (to catch read-only registers and partially read-only
176 * registers where the incoming migration value doesn't match)
178 if (!write_raw_cp_reg(&cpu
->env
, ri
, v
) ||
179 !read_raw_cp_reg(&cpu
->env
, ri
, &readback
) ||
187 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
189 ARMCPU
*cpu
= opaque
;
191 const ARMCPRegInfo
*ri
;
193 regidx
= *(uint32_t *)key
;
194 ri
= get_arm_cp_reginfo(cpu
, regidx
);
196 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
197 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
198 /* The value array need not be initialized at this point */
199 cpu
->cpreg_array_len
++;
203 static void count_cpreg(gpointer key
, gpointer opaque
)
205 ARMCPU
*cpu
= opaque
;
207 const ARMCPRegInfo
*ri
;
209 regidx
= *(uint32_t *)key
;
210 ri
= get_arm_cp_reginfo(cpu
, regidx
);
212 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
213 cpu
->cpreg_array_len
++;
217 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
219 uint32_t aidx
= *(uint32_t *)a
;
220 uint32_t bidx
= *(uint32_t *)b
;
225 static void cpreg_make_keylist(gpointer key
, gpointer value
, gpointer udata
)
227 GList
**plist
= udata
;
229 *plist
= g_list_prepend(*plist
, key
);
232 void init_cpreg_list(ARMCPU
*cpu
)
234 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
235 * Note that we require cpreg_tuples[] to be sorted by key ID.
240 g_hash_table_foreach(cpu
->cp_regs
, cpreg_make_keylist
, &keys
);
242 keys
= g_list_sort(keys
, cpreg_key_compare
);
244 cpu
->cpreg_array_len
= 0;
246 g_list_foreach(keys
, count_cpreg
, cpu
);
248 arraylen
= cpu
->cpreg_array_len
;
249 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
250 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
251 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
252 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
253 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
254 cpu
->cpreg_array_len
= 0;
256 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
258 assert(cpu
->cpreg_array_len
== arraylen
);
263 static int dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
265 env
->cp15
.c3
= value
;
266 tlb_flush(env
, 1); /* Flush TLB as domain not tracked in TLB */
270 static int fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
272 if (env
->cp15
.c13_fcse
!= value
) {
273 /* Unlike real hardware the qemu TLB uses virtual addresses,
274 * not modified virtual addresses, so this causes a TLB flush.
277 env
->cp15
.c13_fcse
= value
;
281 static int contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
284 if (env
->cp15
.c13_context
!= value
&& !arm_feature(env
, ARM_FEATURE_MPU
)) {
285 /* For VMSA (when not using the LPAE long descriptor page table
286 * format) this register includes the ASID, so do a TLB flush.
287 * For PMSA it is purely a process ID and no action is needed.
291 env
->cp15
.c13_context
= value
;
295 static int tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
298 /* Invalidate all (TLBIALL) */
303 static int tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
306 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
307 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
311 static int tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
314 /* Invalidate by ASID (TLBIASID) */
315 tlb_flush(env
, value
== 0);
319 static int tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
322 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
323 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
327 static const ARMCPRegInfo cp_reginfo
[] = {
328 /* DBGDIDR: just RAZ. In particular this means the "debug architecture
329 * version" bits will read as a reserved value, which should cause
330 * Linux to not try to use the debug hardware.
332 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
333 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
334 /* MMU Domain access control / MPU write buffer control */
335 { .name
= "DACR", .cp
= 15,
336 .crn
= 3, .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
337 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c3
),
338 .resetvalue
= 0, .writefn
= dacr_write
, .raw_writefn
= raw_write
, },
339 { .name
= "FCSEIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 0,
340 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_fcse
),
341 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
342 { .name
= "CONTEXTIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 1,
343 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_fcse
),
344 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
345 /* ??? This covers not just the impdef TLB lockdown registers but also
346 * some v7VMSA registers relating to TEX remap, so it is overly broad.
348 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= CP_ANY
,
349 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
350 /* MMU TLB control. Note that the wildcarding means we cover not just
351 * the unified TLB ops but also the dside/iside/inner-shareable variants.
353 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
354 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
355 .type
= ARM_CP_NO_MIGRATE
},
356 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
357 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
358 .type
= ARM_CP_NO_MIGRATE
},
359 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
360 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
361 .type
= ARM_CP_NO_MIGRATE
},
362 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
363 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
364 .type
= ARM_CP_NO_MIGRATE
},
365 /* Cache maintenance ops; some of this space may be overridden later. */
366 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
367 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
368 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
372 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
373 /* Not all pre-v6 cores implemented this WFI, so this is slightly
376 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
377 .access
= PL1_W
, .type
= ARM_CP_WFI
},
381 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
382 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
383 * is UNPREDICTABLE; we choose to NOP as most implementations do).
385 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
386 .access
= PL1_W
, .type
= ARM_CP_WFI
},
387 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
388 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
389 * OMAPCP will override this space.
391 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
392 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
394 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
395 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
397 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
398 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
399 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
404 static int cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
406 if (env
->cp15
.c1_coproc
!= value
) {
407 env
->cp15
.c1_coproc
= value
;
408 /* ??? Is this safe when called from within a TB? */
414 static const ARMCPRegInfo v6_cp_reginfo
[] = {
415 /* prefetch by MVA in v6, NOP in v7 */
416 { .name
= "MVA_prefetch",
417 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
418 .access
= PL1_W
, .type
= ARM_CP_NOP
},
419 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
420 .access
= PL0_W
, .type
= ARM_CP_NOP
},
421 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
422 .access
= PL0_W
, .type
= ARM_CP_NOP
},
423 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
424 .access
= PL0_W
, .type
= ARM_CP_NOP
},
425 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
426 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_insn
),
428 /* Watchpoint Fault Address Register : should actually only be present
429 * for 1136, 1176, 11MPCore.
431 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
432 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
433 { .name
= "CPACR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2,
434 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_coproc
),
435 .resetvalue
= 0, .writefn
= cpacr_write
},
440 static int pmreg_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
443 /* Generic performance monitor register read function for where
444 * user access may be allowed by PMUSERENR.
446 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
449 *value
= CPREG_FIELD32(env
, ri
);
453 static int pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
456 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
459 /* only the DP, X, D and E bits are writable */
460 env
->cp15
.c9_pmcr
&= ~0x39;
461 env
->cp15
.c9_pmcr
|= (value
& 0x39);
465 static int pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
468 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
472 env
->cp15
.c9_pmcnten
|= value
;
476 static int pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
479 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
483 env
->cp15
.c9_pmcnten
&= ~value
;
487 static int pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
490 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
493 env
->cp15
.c9_pmovsr
&= ~value
;
497 static int pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
500 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
503 env
->cp15
.c9_pmxevtyper
= value
& 0xff;
507 static int pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
510 env
->cp15
.c9_pmuserenr
= value
& 1;
514 static int pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
517 /* We have no event counters so only the C bit can be changed */
519 env
->cp15
.c9_pminten
|= value
;
523 static int pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
527 env
->cp15
.c9_pminten
&= ~value
;
531 static int ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
534 ARMCPU
*cpu
= arm_env_get_cpu(env
);
535 *value
= cpu
->ccsidr
[env
->cp15
.c0_cssel
];
539 static int csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
542 env
->cp15
.c0_cssel
= value
& 0xf;
546 static const ARMCPRegInfo v7_cp_reginfo
[] = {
547 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
550 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
551 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
552 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
553 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
554 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
555 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
556 .access
= PL1_W
, .type
= ARM_CP_NOP
},
557 /* Performance monitors are implementation defined in v7,
558 * but with an ARM recommended set of registers, which we
559 * follow (although we don't actually implement any counters)
561 * Performance registers fall into three categories:
562 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
563 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
564 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
565 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
566 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
568 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
569 .access
= PL0_RW
, .resetvalue
= 0,
570 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
571 .readfn
= pmreg_read
, .writefn
= pmcntenset_write
,
572 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
},
573 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
574 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
575 .readfn
= pmreg_read
, .writefn
= pmcntenclr_write
,
576 .type
= ARM_CP_NO_MIGRATE
},
577 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
578 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
579 .readfn
= pmreg_read
, .writefn
= pmovsr_write
,
580 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
},
581 /* Unimplemented so WI. Strictly speaking write accesses in PL0 should
584 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
585 .access
= PL0_W
, .type
= ARM_CP_NOP
},
586 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
587 * We choose to RAZ/WI. XXX should respect PMUSERENR.
589 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
590 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
591 /* Unimplemented, RAZ/WI. XXX PMUSERENR */
592 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
593 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
594 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
596 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmxevtyper
),
597 .readfn
= pmreg_read
, .writefn
= pmxevtyper_write
,
598 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
},
599 /* Unimplemented, RAZ/WI. XXX PMUSERENR */
600 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
601 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
602 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
603 .access
= PL0_R
| PL1_RW
,
604 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
606 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
607 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
609 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
611 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
612 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
613 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
614 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
615 .resetvalue
= 0, .writefn
= pmintenclr_write
, },
616 { .name
= "SCR", .cp
= 15, .crn
= 1, .crm
= 1, .opc1
= 0, .opc2
= 0,
617 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_scr
),
619 { .name
= "CCSIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
620 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_MIGRATE
},
621 { .name
= "CSSELR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
622 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cssel
),
623 .writefn
= csselr_write
, .resetvalue
= 0 },
624 /* Auxiliary ID register: this actually has an IMPDEF value but for now
625 * just RAZ for all cores:
627 { .name
= "AIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 7,
628 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
632 static int teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
639 static int teehbr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
642 /* This is a helper function because the user access rights
643 * depend on the value of the TEECR.
645 if (arm_current_pl(env
) == 0 && (env
->teecr
& 1)) {
648 *value
= env
->teehbr
;
652 static int teehbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
655 if (arm_current_pl(env
) == 0 && (env
->teecr
& 1)) {
662 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
663 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
664 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
666 .writefn
= teecr_write
},
667 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
668 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
669 .resetvalue
= 0, .raw_readfn
= raw_read
, .raw_writefn
= raw_write
,
670 .readfn
= teehbr_read
, .writefn
= teehbr_write
},
674 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
675 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
677 .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_tls1
),
679 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
680 .access
= PL0_R
|PL1_W
,
681 .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_tls2
),
683 { .name
= "TPIDRPRW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 4,
685 .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_tls3
),
690 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
691 /* Dummy implementation: RAZ/WI the whole crn=14 space */
692 { .name
= "GENERIC_TIMER", .cp
= 15, .crn
= 14,
693 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
694 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
699 static int par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
701 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
702 env
->cp15
.c7_par
= value
;
703 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
704 env
->cp15
.c7_par
= value
& 0xfffff6ff;
706 env
->cp15
.c7_par
= value
& 0xfffff1ff;
711 #ifndef CONFIG_USER_ONLY
712 /* get_phys_addr() isn't present for user-mode-only targets */
714 /* Return true if extended addresses are enabled, ie this is an
715 * LPAE implementation and we are using the long-descriptor translation
716 * table format because the TTBCR EAE bit is set.
718 static inline bool extended_addresses_enabled(CPUARMState
*env
)
720 return arm_feature(env
, ARM_FEATURE_LPAE
)
721 && (env
->cp15
.c2_control
& (1 << 31));
724 static int ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
727 target_ulong page_size
;
729 int ret
, is_user
= ri
->opc2
& 2;
730 int access_type
= ri
->opc2
& 1;
733 /* Other states are only available with TrustZone */
736 ret
= get_phys_addr(env
, value
, access_type
, is_user
,
737 &phys_addr
, &prot
, &page_size
);
738 if (extended_addresses_enabled(env
)) {
739 /* ret is a DFSR/IFSR value for the long descriptor
740 * translation table format, but with WnR always clear.
741 * Convert it to a 64-bit PAR.
743 uint64_t par64
= (1 << 11); /* LPAE bit always set */
745 par64
|= phys_addr
& ~0xfffULL
;
746 /* We don't set the ATTR or SH fields in the PAR. */
749 par64
|= (ret
& 0x3f) << 1; /* FS */
750 /* Note that S2WLK and FSTAGE are always zero, because we don't
751 * implement virtualization and therefore there can't be a stage 2
755 env
->cp15
.c7_par
= par64
;
756 env
->cp15
.c7_par_hi
= par64
>> 32;
758 /* ret is a DFSR/IFSR value for the short descriptor
759 * translation table format (with WnR always clear).
760 * Convert it to a 32-bit PAR.
763 /* We do not set any attribute bits in the PAR */
764 if (page_size
== (1 << 24)
765 && arm_feature(env
, ARM_FEATURE_V7
)) {
766 env
->cp15
.c7_par
= (phys_addr
& 0xff000000) | 1 << 1;
768 env
->cp15
.c7_par
= phys_addr
& 0xfffff000;
771 env
->cp15
.c7_par
= ((ret
& (10 << 1)) >> 5) |
772 ((ret
& (12 << 1)) >> 6) |
773 ((ret
& 0xf) << 1) | 1;
775 env
->cp15
.c7_par_hi
= 0;
781 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
782 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
783 .access
= PL1_RW
, .resetvalue
= 0,
784 .fieldoffset
= offsetof(CPUARMState
, cp15
.c7_par
),
785 .writefn
= par_write
},
786 #ifndef CONFIG_USER_ONLY
787 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
788 .access
= PL1_W
, .writefn
= ats_write
, .type
= ARM_CP_NO_MIGRATE
},
793 /* Return basic MPU access permission bits. */
794 static uint32_t simple_mpu_ap_bits(uint32_t val
)
801 for (i
= 0; i
< 16; i
+= 2) {
802 ret
|= (val
>> i
) & mask
;
808 /* Pad basic MPU access permission bits to extended format. */
809 static uint32_t extended_mpu_ap_bits(uint32_t val
)
816 for (i
= 0; i
< 16; i
+= 2) {
817 ret
|= (val
& mask
) << i
;
823 static int pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
826 env
->cp15
.c5_data
= extended_mpu_ap_bits(value
);
830 static int pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
833 *value
= simple_mpu_ap_bits(env
->cp15
.c5_data
);
837 static int pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
840 env
->cp15
.c5_insn
= extended_mpu_ap_bits(value
);
844 static int pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
847 *value
= simple_mpu_ap_bits(env
->cp15
.c5_insn
);
851 static int arm946_prbs_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
857 *value
= env
->cp15
.c6_region
[ri
->crm
];
861 static int arm946_prbs_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
867 env
->cp15
.c6_region
[ri
->crm
] = value
;
871 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
872 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
873 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
874 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0,
875 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
876 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
877 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
878 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0,
879 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
880 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
882 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
883 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
885 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
886 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
888 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
889 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
891 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
892 /* Protection region base and size registers */
893 { .name
= "946_PRBS", .cp
= 15, .crn
= 6, .crm
= CP_ANY
, .opc1
= 0,
894 .opc2
= CP_ANY
, .access
= PL1_RW
,
895 .readfn
= arm946_prbs_read
, .writefn
= arm946_prbs_write
, },
899 static int vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
902 int maskshift
= extract32(value
, 0, 3);
904 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
905 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
909 /* Note that we always calculate c2_mask and c2_base_mask, but
910 * they are only used for short-descriptor tables (ie if EAE is 0);
911 * for long-descriptor tables the TTBCR fields are used differently
912 * and the c2_mask and c2_base_mask values are meaningless.
914 env
->cp15
.c2_control
= value
;
915 env
->cp15
.c2_mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
916 env
->cp15
.c2_base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
920 static int vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
923 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
924 /* With LPAE the TTBCR could result in a change of ASID
925 * via the TTBCR.A1 bit, so do a TLB flush.
929 return vmsa_ttbcr_raw_write(env
, ri
, value
);
932 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
934 env
->cp15
.c2_base_mask
= 0xffffc000u
;
935 env
->cp15
.c2_control
= 0;
936 env
->cp15
.c2_mask
= 0;
939 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
940 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
942 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
943 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
945 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
946 { .name
= "TTBR0", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
948 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_base0
), .resetvalue
= 0, },
949 { .name
= "TTBR1", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
951 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_base1
), .resetvalue
= 0, },
952 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
953 .access
= PL1_RW
, .writefn
= vmsa_ttbcr_write
,
954 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
955 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_control
) },
956 { .name
= "DFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
957 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_data
),
962 static int omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
965 env
->cp15
.c15_ticonfig
= value
& 0xe7;
966 /* The OS_TYPE bit in this register changes the reported CPUID! */
967 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
968 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
972 static int omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
975 env
->cp15
.c15_threadid
= value
& 0xffff;
979 static int omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
982 /* Wait-for-interrupt (deprecated) */
983 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
987 static int omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
990 /* On OMAP there are registers indicating the max/min index of dcache lines
991 * containing a dirty line; cache flush operations have to reset these.
993 env
->cp15
.c15_i_max
= 0x000;
994 env
->cp15
.c15_i_min
= 0xff0;
998 static const ARMCPRegInfo omap_cp_reginfo
[] = {
999 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
1000 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
1001 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1002 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
1003 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
1004 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
1006 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
1007 .writefn
= omap_ticonfig_write
},
1008 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
1010 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
1011 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
1012 .access
= PL1_RW
, .resetvalue
= 0xff0,
1013 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
1014 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
1016 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
1017 .writefn
= omap_threadid_write
},
1018 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
1019 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1020 .type
= ARM_CP_NO_MIGRATE
,
1021 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
1022 /* TODO: Peripheral port remap register:
1023 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1024 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1027 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
1028 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
1029 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
,
1030 .writefn
= omap_cachemaint_write
},
1031 { .name
= "C9", .cp
= 15, .crn
= 9,
1032 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
1033 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
1037 static int xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1041 if (env
->cp15
.c15_cpar
!= value
) {
1042 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1044 env
->cp15
.c15_cpar
= value
;
1049 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
1050 { .name
= "XSCALE_CPAR",
1051 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1052 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
1053 .writefn
= xscale_cpar_write
, },
1054 { .name
= "XSCALE_AUXCR",
1055 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
1056 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
1061 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
1062 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1063 * implementation of this implementation-defined space.
1064 * Ideally this should eventually disappear in favour of actually
1065 * implementing the correct behaviour for all cores.
1067 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
1068 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1069 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1074 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
1075 /* Cache status: RAZ because we have no cache so it's always clean */
1076 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
1077 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1082 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
1083 /* We never have a a block transfer operation in progress */
1084 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
1085 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1087 /* The cache ops themselves: these all NOP for QEMU */
1088 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
1089 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1090 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
1091 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1092 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
1093 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1094 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
1095 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1096 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
1097 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1098 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
1099 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1103 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
1104 /* The cache test-and-clean instructions always return (1 << 30)
1105 * to indicate that there are no dirty cache lines.
1107 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
1108 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1109 .resetvalue
= (1 << 30) },
1110 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
1111 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1112 .resetvalue
= (1 << 30) },
1116 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
1117 /* Ignore ReadBuffer accesses */
1118 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
1119 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1120 .access
= PL1_RW
, .resetvalue
= 0,
1121 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
},
1125 static int mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1128 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1129 uint32_t mpidr
= cs
->cpu_index
;
1130 /* We don't support setting cluster ID ([8..11])
1131 * so these bits always RAZ.
1133 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
1135 /* Cores which are uniprocessor (non-coherent)
1136 * but still implement the MP extensions set
1137 * bit 30. (For instance, A9UP.) However we do
1138 * not currently model any of those cores.
1145 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
1146 { .name
= "MPIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
1147 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_MIGRATE
},
1151 static int par64_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t *value
)
1153 *value
= ((uint64_t)env
->cp15
.c7_par_hi
<< 32) | env
->cp15
.c7_par
;
1157 static int par64_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1159 env
->cp15
.c7_par_hi
= value
>> 32;
1160 env
->cp15
.c7_par
= value
;
1164 static void par64_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1166 env
->cp15
.c7_par_hi
= 0;
1167 env
->cp15
.c7_par
= 0;
1170 static int ttbr064_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1173 *value
= ((uint64_t)env
->cp15
.c2_base0_hi
<< 32) | env
->cp15
.c2_base0
;
1177 static int ttbr064_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1180 env
->cp15
.c2_base0_hi
= value
>> 32;
1181 env
->cp15
.c2_base0
= value
;
1185 static int ttbr064_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1188 /* Writes to the 64 bit format TTBRs may change the ASID */
1190 return ttbr064_raw_write(env
, ri
, value
);
1193 static void ttbr064_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1195 env
->cp15
.c2_base0_hi
= 0;
1196 env
->cp15
.c2_base0
= 0;
1199 static int ttbr164_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1202 *value
= ((uint64_t)env
->cp15
.c2_base1_hi
<< 32) | env
->cp15
.c2_base1
;
1206 static int ttbr164_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1209 env
->cp15
.c2_base1_hi
= value
>> 32;
1210 env
->cp15
.c2_base1
= value
;
1214 static void ttbr164_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1216 env
->cp15
.c2_base1_hi
= 0;
1217 env
->cp15
.c2_base1
= 0;
1220 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
1221 /* NOP AMAIR0/1: the override is because these clash with the rather
1222 * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1224 { .name
= "AMAIR0", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
1225 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1227 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
1228 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1230 /* 64 bit access versions of the (dummy) debug registers */
1231 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
1232 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1233 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
1234 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1235 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
1236 .access
= PL1_RW
, .type
= ARM_CP_64BIT
,
1237 .readfn
= par64_read
, .writefn
= par64_write
, .resetfn
= par64_reset
},
1238 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
1239 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .readfn
= ttbr064_read
,
1240 .writefn
= ttbr064_write
, .raw_writefn
= ttbr064_raw_write
,
1241 .resetfn
= ttbr064_reset
},
1242 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
1243 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .readfn
= ttbr164_read
,
1244 .writefn
= ttbr164_write
, .resetfn
= ttbr164_reset
},
1248 static int sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1250 env
->cp15
.c1_sys
= value
;
1251 /* ??? Lots of these bits are not implemented. */
1252 /* This may enable/disable the MMU, so do a TLB flush. */
1257 void register_cp_regs_for_features(ARMCPU
*cpu
)
1259 /* Register all the coprocessor registers based on feature bits */
1260 CPUARMState
*env
= &cpu
->env
;
1261 if (arm_feature(env
, ARM_FEATURE_M
)) {
1262 /* M profile has no coprocessor registers */
1266 define_arm_cp_regs(cpu
, cp_reginfo
);
1267 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1268 /* The ID registers all have impdef reset values */
1269 ARMCPRegInfo v6_idregs
[] = {
1270 { .name
= "ID_PFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1271 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1272 .resetvalue
= cpu
->id_pfr0
},
1273 { .name
= "ID_PFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1274 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1275 .resetvalue
= cpu
->id_pfr1
},
1276 { .name
= "ID_DFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1277 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1278 .resetvalue
= cpu
->id_dfr0
},
1279 { .name
= "ID_AFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1280 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1281 .resetvalue
= cpu
->id_afr0
},
1282 { .name
= "ID_MMFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1283 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1284 .resetvalue
= cpu
->id_mmfr0
},
1285 { .name
= "ID_MMFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1286 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1287 .resetvalue
= cpu
->id_mmfr1
},
1288 { .name
= "ID_MMFR2", .cp
= 15, .crn
= 0, .crm
= 1,
1289 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1290 .resetvalue
= cpu
->id_mmfr2
},
1291 { .name
= "ID_MMFR3", .cp
= 15, .crn
= 0, .crm
= 1,
1292 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1293 .resetvalue
= cpu
->id_mmfr3
},
1294 { .name
= "ID_ISAR0", .cp
= 15, .crn
= 0, .crm
= 2,
1295 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1296 .resetvalue
= cpu
->id_isar0
},
1297 { .name
= "ID_ISAR1", .cp
= 15, .crn
= 0, .crm
= 2,
1298 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1299 .resetvalue
= cpu
->id_isar1
},
1300 { .name
= "ID_ISAR2", .cp
= 15, .crn
= 0, .crm
= 2,
1301 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1302 .resetvalue
= cpu
->id_isar2
},
1303 { .name
= "ID_ISAR3", .cp
= 15, .crn
= 0, .crm
= 2,
1304 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1305 .resetvalue
= cpu
->id_isar3
},
1306 { .name
= "ID_ISAR4", .cp
= 15, .crn
= 0, .crm
= 2,
1307 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1308 .resetvalue
= cpu
->id_isar4
},
1309 { .name
= "ID_ISAR5", .cp
= 15, .crn
= 0, .crm
= 2,
1310 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1311 .resetvalue
= cpu
->id_isar5
},
1312 /* 6..7 are as yet unallocated and must RAZ */
1313 { .name
= "ID_ISAR6", .cp
= 15, .crn
= 0, .crm
= 2,
1314 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1316 { .name
= "ID_ISAR7", .cp
= 15, .crn
= 0, .crm
= 2,
1317 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1321 define_arm_cp_regs(cpu
, v6_idregs
);
1322 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
1324 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
1326 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1327 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
1329 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1330 /* v7 performance monitor control register: same implementor
1331 * field as main ID register, and we implement no event counters.
1333 ARMCPRegInfo pmcr
= {
1334 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
1335 .access
= PL0_RW
, .resetvalue
= cpu
->midr
& 0xff000000,
1336 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
1337 .readfn
= pmreg_read
, .writefn
= pmcr_write
,
1338 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
,
1340 ARMCPRegInfo clidr
= {
1341 .name
= "CLIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
1342 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
1344 define_one_arm_cp_reg(cpu
, &pmcr
);
1345 define_one_arm_cp_reg(cpu
, &clidr
);
1346 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
1348 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
1350 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
1351 /* These are the MPU registers prior to PMSAv6. Any new
1352 * PMSA core later than the ARM946 will require that we
1353 * implement the PMSAv6 or PMSAv7 registers, which are
1354 * completely different.
1356 assert(!arm_feature(env
, ARM_FEATURE_V6
));
1357 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
1359 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
1361 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
1362 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
1364 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1365 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
1367 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
1368 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
1370 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
1371 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
1373 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
1374 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
1376 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
1377 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
1379 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
1380 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
1382 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
1383 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
1385 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1386 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
1388 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
1389 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
1391 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1392 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
1394 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
1395 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
1396 * be read-only (ie write causes UNDEF exception).
1399 ARMCPRegInfo id_cp_reginfo
[] = {
1400 /* Note that the MIDR isn't a simple constant register because
1401 * of the TI925 behaviour where writes to another register can
1402 * cause the MIDR value to change.
1404 * Unimplemented registers in the c15 0 0 0 space default to
1405 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
1406 * and friends override accordingly.
1409 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
1410 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
1411 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
1412 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
1413 .type
= ARM_CP_OVERRIDE
},
1415 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
1416 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
1418 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
1419 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1421 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
1422 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1423 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
1425 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
1426 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1428 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
1429 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1431 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
1432 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1434 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
1435 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1437 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
1438 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1441 ARMCPRegInfo crn0_wi_reginfo
= {
1442 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
1443 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
1444 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
1446 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
1447 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
1449 /* Register the blanket "writes ignored" value first to cover the
1450 * whole space. Then update the specific ID registers to allow write
1451 * access, so that they ignore writes rather than causing them to
1454 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
1455 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
1459 define_arm_cp_regs(cpu
, id_cp_reginfo
);
1462 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
1463 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
1466 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
1467 ARMCPRegInfo auxcr
= {
1468 .name
= "AUXCR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1,
1469 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
1470 .resetvalue
= cpu
->reset_auxcr
1472 define_one_arm_cp_reg(cpu
, &auxcr
);
1475 /* Generic registers whose values depend on the implementation */
1477 ARMCPRegInfo sctlr
= {
1478 .name
= "SCTLR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
1479 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_sys
),
1480 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
1481 .raw_writefn
= raw_write
,
1483 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1484 /* Normally we would always end the TB on an SCTLR write, but Linux
1485 * arch/arm/mach-pxa/sleep.S expects two instructions following
1486 * an MMU enable to execute from cache. Imitate this behaviour.
1488 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
1490 define_one_arm_cp_reg(cpu
, &sctlr
);
1494 ARMCPU
*cpu_arm_init(const char *cpu_model
)
1500 oc
= cpu_class_by_name(TYPE_ARM_CPU
, cpu_model
);
1504 cpu
= ARM_CPU(object_new(object_class_get_name(oc
)));
1506 env
->cpu_model_str
= cpu_model
;
1508 /* TODO this should be set centrally, once possible */
1509 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1514 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
1516 CPUARMState
*env
= &cpu
->env
;
1518 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
1519 gdb_register_coprocessor(env
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
1520 51, "arm-neon.xml", 0);
1521 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
1522 gdb_register_coprocessor(env
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
1523 35, "arm-vfp3.xml", 0);
1524 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
1525 gdb_register_coprocessor(env
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
1526 19, "arm-vfp.xml", 0);
1530 /* Sort alphabetically by type name, except for "any". */
1531 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
1533 ObjectClass
*class_a
= (ObjectClass
*)a
;
1534 ObjectClass
*class_b
= (ObjectClass
*)b
;
1535 const char *name_a
, *name_b
;
1537 name_a
= object_class_get_name(class_a
);
1538 name_b
= object_class_get_name(class_b
);
1539 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
1541 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
1544 return strcmp(name_a
, name_b
);
1548 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
1550 ObjectClass
*oc
= data
;
1551 CPUListState
*s
= user_data
;
1552 const char *typename
;
1555 typename
= object_class_get_name(oc
);
1556 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
1557 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
1562 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
1566 .cpu_fprintf
= cpu_fprintf
,
1570 list
= object_class_get_list(TYPE_ARM_CPU
, false);
1571 list
= g_slist_sort(list
, arm_cpu_list_compare
);
1572 (*cpu_fprintf
)(f
, "Available CPUs:\n");
1573 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
1577 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
1578 const ARMCPRegInfo
*r
, void *opaque
)
1580 /* Define implementations of coprocessor registers.
1581 * We store these in a hashtable because typically
1582 * there are less than 150 registers in a space which
1583 * is 16*16*16*8*8 = 262144 in size.
1584 * Wildcarding is supported for the crm, opc1 and opc2 fields.
1585 * If a register is defined twice then the second definition is
1586 * used, so this can be used to define some generic registers and
1587 * then override them with implementation specific variations.
1588 * At least one of the original and the second definition should
1589 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
1590 * against accidental use.
1592 int crm
, opc1
, opc2
;
1593 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
1594 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
1595 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
1596 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
1597 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
1598 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
1599 /* 64 bit registers have only CRm and Opc1 fields */
1600 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
1601 /* Check that the register definition has enough info to handle
1602 * reads and writes if they are permitted.
1604 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
1605 if (r
->access
& PL3_R
) {
1606 assert(r
->fieldoffset
|| r
->readfn
);
1608 if (r
->access
& PL3_W
) {
1609 assert(r
->fieldoffset
|| r
->writefn
);
1612 /* Bad type field probably means missing sentinel at end of reg list */
1613 assert(cptype_valid(r
->type
));
1614 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
1615 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
1616 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
1617 uint32_t *key
= g_new(uint32_t, 1);
1618 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
1619 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
1620 *key
= ENCODE_CP_REG(r
->cp
, is64
, r
->crn
, crm
, opc1
, opc2
);
1622 r2
->opaque
= opaque
;
1624 /* Make sure reginfo passed to helpers for wildcarded regs
1625 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
1630 /* By convention, for wildcarded registers only the first
1631 * entry is used for migration; the others are marked as
1632 * NO_MIGRATE so we don't try to transfer the register
1633 * multiple times. Special registers (ie NOP/WFI) are
1636 if ((r
->type
& ARM_CP_SPECIAL
) ||
1637 ((r
->crm
== CP_ANY
) && crm
!= 0) ||
1638 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
1639 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
1640 r2
->type
|= ARM_CP_NO_MIGRATE
;
1643 /* Overriding of an existing definition must be explicitly
1646 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
1647 ARMCPRegInfo
*oldreg
;
1648 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
1649 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
1650 fprintf(stderr
, "Register redefined: cp=%d %d bit "
1651 "crn=%d crm=%d opc1=%d opc2=%d, "
1652 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
1653 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
1654 oldreg
->name
, r2
->name
);
1658 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
1664 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
1665 const ARMCPRegInfo
*regs
, void *opaque
)
1667 /* Define a whole list of registers */
1668 const ARMCPRegInfo
*r
;
1669 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
1670 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
1674 const ARMCPRegInfo
*get_arm_cp_reginfo(ARMCPU
*cpu
, uint32_t encoded_cp
)
1676 return g_hash_table_lookup(cpu
->cp_regs
, &encoded_cp
);
1679 int arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1682 /* Helper coprocessor write function for write-ignore registers */
1686 int arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t *value
)
1688 /* Helper coprocessor write function for read-as-zero registers */
1693 static int bad_mode_switch(CPUARMState
*env
, int mode
)
1695 /* Return true if it is not valid for us to switch to
1696 * this CPU mode (ie all the UNPREDICTABLE cases in
1697 * the ARM ARM CPSRWriteByInstr pseudocode).
1700 case ARM_CPU_MODE_USR
:
1701 case ARM_CPU_MODE_SYS
:
1702 case ARM_CPU_MODE_SVC
:
1703 case ARM_CPU_MODE_ABT
:
1704 case ARM_CPU_MODE_UND
:
1705 case ARM_CPU_MODE_IRQ
:
1706 case ARM_CPU_MODE_FIQ
:
1713 uint32_t cpsr_read(CPUARMState
*env
)
1716 ZF
= (env
->ZF
== 0);
1717 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
1718 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
1719 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
1720 | ((env
->condexec_bits
& 0xfc) << 8)
1724 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
1726 if (mask
& CPSR_NZCV
) {
1727 env
->ZF
= (~val
) & CPSR_Z
;
1729 env
->CF
= (val
>> 29) & 1;
1730 env
->VF
= (val
<< 3) & 0x80000000;
1733 env
->QF
= ((val
& CPSR_Q
) != 0);
1735 env
->thumb
= ((val
& CPSR_T
) != 0);
1736 if (mask
& CPSR_IT_0_1
) {
1737 env
->condexec_bits
&= ~3;
1738 env
->condexec_bits
|= (val
>> 25) & 3;
1740 if (mask
& CPSR_IT_2_7
) {
1741 env
->condexec_bits
&= 3;
1742 env
->condexec_bits
|= (val
>> 8) & 0xfc;
1744 if (mask
& CPSR_GE
) {
1745 env
->GE
= (val
>> 16) & 0xf;
1748 if ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
) {
1749 if (bad_mode_switch(env
, val
& CPSR_M
)) {
1750 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
1751 * We choose to ignore the attempt and leave the CPSR M field
1756 switch_mode(env
, val
& CPSR_M
);
1759 mask
&= ~CACHED_CPSR_BITS
;
1760 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
1763 /* Sign/zero extend */
1764 uint32_t HELPER(sxtb16
)(uint32_t x
)
1767 res
= (uint16_t)(int8_t)x
;
1768 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
1772 uint32_t HELPER(uxtb16
)(uint32_t x
)
1775 res
= (uint16_t)(uint8_t)x
;
1776 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
1780 uint32_t HELPER(clz
)(uint32_t x
)
1785 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
1789 if (num
== INT_MIN
&& den
== -1)
1794 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
1801 uint32_t HELPER(rbit
)(uint32_t x
)
1803 x
= ((x
& 0xff000000) >> 24)
1804 | ((x
& 0x00ff0000) >> 8)
1805 | ((x
& 0x0000ff00) << 8)
1806 | ((x
& 0x000000ff) << 24);
1807 x
= ((x
& 0xf0f0f0f0) >> 4)
1808 | ((x
& 0x0f0f0f0f) << 4);
1809 x
= ((x
& 0x88888888) >> 3)
1810 | ((x
& 0x44444444) >> 1)
1811 | ((x
& 0x22222222) << 1)
1812 | ((x
& 0x11111111) << 3);
1816 #if defined(CONFIG_USER_ONLY)
1818 void arm_cpu_do_interrupt(CPUState
*cs
)
1820 ARMCPU
*cpu
= ARM_CPU(cs
);
1821 CPUARMState
*env
= &cpu
->env
;
1823 env
->exception_index
= -1;
1826 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
, int rw
,
1830 env
->exception_index
= EXCP_PREFETCH_ABORT
;
1831 env
->cp15
.c6_insn
= address
;
1833 env
->exception_index
= EXCP_DATA_ABORT
;
1834 env
->cp15
.c6_data
= address
;
1839 /* These should probably raise undefined insn exceptions. */
1840 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
1842 cpu_abort(env
, "v7m_mrs %d\n", reg
);
1845 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
1847 cpu_abort(env
, "v7m_mrs %d\n", reg
);
1851 void switch_mode(CPUARMState
*env
, int mode
)
1853 if (mode
!= ARM_CPU_MODE_USR
)
1854 cpu_abort(env
, "Tried to switch out of user mode\n");
1857 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
1859 cpu_abort(env
, "banked r13 write\n");
1862 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
1864 cpu_abort(env
, "banked r13 read\n");
1870 /* Map CPU modes onto saved register banks. */
1871 int bank_number(int mode
)
1874 case ARM_CPU_MODE_USR
:
1875 case ARM_CPU_MODE_SYS
:
1877 case ARM_CPU_MODE_SVC
:
1879 case ARM_CPU_MODE_ABT
:
1881 case ARM_CPU_MODE_UND
:
1883 case ARM_CPU_MODE_IRQ
:
1885 case ARM_CPU_MODE_FIQ
:
1888 hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode
);
1891 void switch_mode(CPUARMState
*env
, int mode
)
1896 old_mode
= env
->uncached_cpsr
& CPSR_M
;
1897 if (mode
== old_mode
)
1900 if (old_mode
== ARM_CPU_MODE_FIQ
) {
1901 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
1902 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
1903 } else if (mode
== ARM_CPU_MODE_FIQ
) {
1904 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
1905 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
1908 i
= bank_number(old_mode
);
1909 env
->banked_r13
[i
] = env
->regs
[13];
1910 env
->banked_r14
[i
] = env
->regs
[14];
1911 env
->banked_spsr
[i
] = env
->spsr
;
1913 i
= bank_number(mode
);
1914 env
->regs
[13] = env
->banked_r13
[i
];
1915 env
->regs
[14] = env
->banked_r14
[i
];
1916 env
->spsr
= env
->banked_spsr
[i
];
1919 static void v7m_push(CPUARMState
*env
, uint32_t val
)
1922 stl_phys(env
->regs
[13], val
);
1925 static uint32_t v7m_pop(CPUARMState
*env
)
1928 val
= ldl_phys(env
->regs
[13]);
1933 /* Switch to V7M main or process stack pointer. */
1934 static void switch_v7m_sp(CPUARMState
*env
, int process
)
1937 if (env
->v7m
.current_sp
!= process
) {
1938 tmp
= env
->v7m
.other_sp
;
1939 env
->v7m
.other_sp
= env
->regs
[13];
1940 env
->regs
[13] = tmp
;
1941 env
->v7m
.current_sp
= process
;
1945 static void do_v7m_exception_exit(CPUARMState
*env
)
1950 type
= env
->regs
[15];
1951 if (env
->v7m
.exception
!= 0)
1952 armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
);
1954 /* Switch to the target stack. */
1955 switch_v7m_sp(env
, (type
& 4) != 0);
1956 /* Pop registers. */
1957 env
->regs
[0] = v7m_pop(env
);
1958 env
->regs
[1] = v7m_pop(env
);
1959 env
->regs
[2] = v7m_pop(env
);
1960 env
->regs
[3] = v7m_pop(env
);
1961 env
->regs
[12] = v7m_pop(env
);
1962 env
->regs
[14] = v7m_pop(env
);
1963 env
->regs
[15] = v7m_pop(env
);
1964 xpsr
= v7m_pop(env
);
1965 xpsr_write(env
, xpsr
, 0xfffffdff);
1966 /* Undo stack alignment. */
1969 /* ??? The exception return type specifies Thread/Handler mode. However
1970 this is also implied by the xPSR value. Not sure what to do
1971 if there is a mismatch. */
1972 /* ??? Likewise for mismatches between the CONTROL register and the stack
1976 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
1978 ARMCPU
*cpu
= ARM_CPU(cs
);
1979 CPUARMState
*env
= &cpu
->env
;
1980 uint32_t xpsr
= xpsr_read(env
);
1985 if (env
->v7m
.current_sp
)
1987 if (env
->v7m
.exception
== 0)
1990 /* For exceptions we just mark as pending on the NVIC, and let that
1992 /* TODO: Need to escalate if the current priority is higher than the
1993 one we're raising. */
1994 switch (env
->exception_index
) {
1996 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
);
1999 /* The PC already points to the next instruction. */
2000 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
);
2002 case EXCP_PREFETCH_ABORT
:
2003 case EXCP_DATA_ABORT
:
2004 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
);
2007 if (semihosting_enabled
) {
2009 nr
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2012 env
->regs
[0] = do_arm_semihosting(env
);
2016 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
);
2019 env
->v7m
.exception
= armv7m_nvic_acknowledge_irq(env
->nvic
);
2021 case EXCP_EXCEPTION_EXIT
:
2022 do_v7m_exception_exit(env
);
2025 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2026 return; /* Never happens. Keep compiler happy. */
2029 /* Align stack pointer. */
2030 /* ??? Should only do this if Configuration Control Register
2031 STACKALIGN bit is set. */
2032 if (env
->regs
[13] & 4) {
2036 /* Switch to the handler mode. */
2037 v7m_push(env
, xpsr
);
2038 v7m_push(env
, env
->regs
[15]);
2039 v7m_push(env
, env
->regs
[14]);
2040 v7m_push(env
, env
->regs
[12]);
2041 v7m_push(env
, env
->regs
[3]);
2042 v7m_push(env
, env
->regs
[2]);
2043 v7m_push(env
, env
->regs
[1]);
2044 v7m_push(env
, env
->regs
[0]);
2045 switch_v7m_sp(env
, 0);
2047 env
->condexec_bits
= 0;
2049 addr
= ldl_phys(env
->v7m
.vecbase
+ env
->v7m
.exception
* 4);
2050 env
->regs
[15] = addr
& 0xfffffffe;
2051 env
->thumb
= addr
& 1;
2054 /* Handle a CPU exception. */
2055 void arm_cpu_do_interrupt(CPUState
*cs
)
2057 ARMCPU
*cpu
= ARM_CPU(cs
);
2058 CPUARMState
*env
= &cpu
->env
;
2066 /* TODO: Vectored interrupt controller. */
2067 switch (env
->exception_index
) {
2069 new_mode
= ARM_CPU_MODE_UND
;
2078 if (semihosting_enabled
) {
2079 /* Check for semihosting interrupt. */
2081 mask
= arm_lduw_code(env
, env
->regs
[15] - 2, env
->bswap_code
)
2084 mask
= arm_ldl_code(env
, env
->regs
[15] - 4, env
->bswap_code
)
2087 /* Only intercept calls from privileged modes, to provide some
2088 semblance of security. */
2089 if (((mask
== 0x123456 && !env
->thumb
)
2090 || (mask
== 0xab && env
->thumb
))
2091 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2092 env
->regs
[0] = do_arm_semihosting(env
);
2096 new_mode
= ARM_CPU_MODE_SVC
;
2099 /* The PC already points to the next instruction. */
2103 /* See if this is a semihosting syscall. */
2104 if (env
->thumb
&& semihosting_enabled
) {
2105 mask
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2107 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2109 env
->regs
[0] = do_arm_semihosting(env
);
2113 env
->cp15
.c5_insn
= 2;
2114 /* Fall through to prefetch abort. */
2115 case EXCP_PREFETCH_ABORT
:
2116 new_mode
= ARM_CPU_MODE_ABT
;
2118 mask
= CPSR_A
| CPSR_I
;
2121 case EXCP_DATA_ABORT
:
2122 new_mode
= ARM_CPU_MODE_ABT
;
2124 mask
= CPSR_A
| CPSR_I
;
2128 new_mode
= ARM_CPU_MODE_IRQ
;
2130 /* Disable IRQ and imprecise data aborts. */
2131 mask
= CPSR_A
| CPSR_I
;
2135 new_mode
= ARM_CPU_MODE_FIQ
;
2137 /* Disable FIQ, IRQ and imprecise data aborts. */
2138 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
2142 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2143 return; /* Never happens. Keep compiler happy. */
2146 if (env
->cp15
.c1_sys
& (1 << 13)) {
2149 switch_mode (env
, new_mode
);
2150 env
->spsr
= cpsr_read(env
);
2151 /* Clear IT bits. */
2152 env
->condexec_bits
= 0;
2153 /* Switch to the new mode, and to the correct instruction set. */
2154 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
2155 env
->uncached_cpsr
|= mask
;
2156 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
2157 * and we should just guard the thumb mode on V4 */
2158 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
2159 env
->thumb
= (env
->cp15
.c1_sys
& (1 << 30)) != 0;
2161 env
->regs
[14] = env
->regs
[15] + offset
;
2162 env
->regs
[15] = addr
;
2163 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
2166 /* Check section/page access permissions.
2167 Returns the page protection flags, or zero if the access is not
2169 static inline int check_ap(CPUARMState
*env
, int ap
, int domain_prot
,
2170 int access_type
, int is_user
)
2174 if (domain_prot
== 3) {
2175 return PAGE_READ
| PAGE_WRITE
;
2178 if (access_type
== 1)
2181 prot_ro
= PAGE_READ
;
2185 if (access_type
== 1)
2187 switch ((env
->cp15
.c1_sys
>> 8) & 3) {
2189 return is_user
? 0 : PAGE_READ
;
2196 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
2201 return PAGE_READ
| PAGE_WRITE
;
2203 return PAGE_READ
| PAGE_WRITE
;
2204 case 4: /* Reserved. */
2207 return is_user
? 0 : prot_ro
;
2211 if (!arm_feature (env
, ARM_FEATURE_V6K
))
2219 static uint32_t get_level1_table_address(CPUARMState
*env
, uint32_t address
)
2223 if (address
& env
->cp15
.c2_mask
)
2224 table
= env
->cp15
.c2_base1
& 0xffffc000;
2226 table
= env
->cp15
.c2_base0
& env
->cp15
.c2_base_mask
;
2228 table
|= (address
>> 18) & 0x3ffc;
2232 static int get_phys_addr_v5(CPUARMState
*env
, uint32_t address
, int access_type
,
2233 int is_user
, hwaddr
*phys_ptr
,
2234 int *prot
, target_ulong
*page_size
)
2245 /* Pagetable walk. */
2246 /* Lookup l1 descriptor. */
2247 table
= get_level1_table_address(env
, address
);
2248 desc
= ldl_phys(table
);
2250 domain
= (desc
>> 5) & 0x0f;
2251 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
2253 /* Section translation fault. */
2257 if (domain_prot
== 0 || domain_prot
== 2) {
2259 code
= 9; /* Section domain fault. */
2261 code
= 11; /* Page domain fault. */
2266 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
2267 ap
= (desc
>> 10) & 3;
2269 *page_size
= 1024 * 1024;
2271 /* Lookup l2 entry. */
2273 /* Coarse pagetable. */
2274 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
2276 /* Fine pagetable. */
2277 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
2279 desc
= ldl_phys(table
);
2281 case 0: /* Page translation fault. */
2284 case 1: /* 64k page. */
2285 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
2286 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
2287 *page_size
= 0x10000;
2289 case 2: /* 4k page. */
2290 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
2291 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
2292 *page_size
= 0x1000;
2294 case 3: /* 1k page. */
2296 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2297 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
2299 /* Page translation fault. */
2304 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
2306 ap
= (desc
>> 4) & 3;
2310 /* Never happens, but compiler isn't smart enough to tell. */
2315 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
2317 /* Access permission fault. */
2321 *phys_ptr
= phys_addr
;
2324 return code
| (domain
<< 4);
2327 static int get_phys_addr_v6(CPUARMState
*env
, uint32_t address
, int access_type
,
2328 int is_user
, hwaddr
*phys_ptr
,
2329 int *prot
, target_ulong
*page_size
)
2342 /* Pagetable walk. */
2343 /* Lookup l1 descriptor. */
2344 table
= get_level1_table_address(env
, address
);
2345 desc
= ldl_phys(table
);
2347 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
2348 /* Section translation fault, or attempt to use the encoding
2349 * which is Reserved on implementations without PXN.
2354 if ((type
== 1) || !(desc
& (1 << 18))) {
2355 /* Page or Section. */
2356 domain
= (desc
>> 5) & 0x0f;
2358 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
2359 if (domain_prot
== 0 || domain_prot
== 2) {
2361 code
= 9; /* Section domain fault. */
2363 code
= 11; /* Page domain fault. */
2368 if (desc
& (1 << 18)) {
2370 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
2371 *page_size
= 0x1000000;
2374 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
2375 *page_size
= 0x100000;
2377 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
2378 xn
= desc
& (1 << 4);
2382 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
2383 pxn
= (desc
>> 2) & 1;
2385 /* Lookup l2 entry. */
2386 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
2387 desc
= ldl_phys(table
);
2388 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
2390 case 0: /* Page translation fault. */
2393 case 1: /* 64k page. */
2394 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
2395 xn
= desc
& (1 << 15);
2396 *page_size
= 0x10000;
2398 case 2: case 3: /* 4k page. */
2399 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
2401 *page_size
= 0x1000;
2404 /* Never happens, but compiler isn't smart enough to tell. */
2409 if (domain_prot
== 3) {
2410 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2412 if (pxn
&& !is_user
) {
2415 if (xn
&& access_type
== 2)
2418 /* The simplified model uses AP[0] as an access control bit. */
2419 if ((env
->cp15
.c1_sys
& (1 << 29)) && (ap
& 1) == 0) {
2420 /* Access flag fault. */
2421 code
= (code
== 15) ? 6 : 3;
2424 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
2426 /* Access permission fault. */
2433 *phys_ptr
= phys_addr
;
2436 return code
| (domain
<< 4);
2439 /* Fault type for long-descriptor MMU fault reporting; this corresponds
2440 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
2443 translation_fault
= 1,
2445 permission_fault
= 3,
2448 static int get_phys_addr_lpae(CPUARMState
*env
, uint32_t address
,
2449 int access_type
, int is_user
,
2450 hwaddr
*phys_ptr
, int *prot
,
2451 target_ulong
*page_size_ptr
)
2453 /* Read an LPAE long-descriptor translation table. */
2454 MMUFaultType fault_type
= translation_fault
;
2462 uint32_t tableattrs
;
2463 target_ulong page_size
;
2466 /* Determine whether this address is in the region controlled by
2467 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
2468 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
2469 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
2471 uint32_t t0sz
= extract32(env
->cp15
.c2_control
, 0, 3);
2472 uint32_t t1sz
= extract32(env
->cp15
.c2_control
, 16, 3);
2473 if (t0sz
&& !extract32(address
, 32 - t0sz
, t0sz
)) {
2474 /* there is a ttbr0 region and we are in it (high bits all zero) */
2476 } else if (t1sz
&& !extract32(~address
, 32 - t1sz
, t1sz
)) {
2477 /* there is a ttbr1 region and we are in it (high bits all one) */
2480 /* ttbr0 region is "everything not in the ttbr1 region" */
2483 /* ttbr1 region is "everything not in the ttbr0 region" */
2486 /* in the gap between the two regions, this is a Translation fault */
2487 fault_type
= translation_fault
;
2491 /* Note that QEMU ignores shareability and cacheability attributes,
2492 * so we don't need to do anything with the SH, ORGN, IRGN fields
2493 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
2494 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
2495 * implement any ASID-like capability so we can ignore it (instead
2496 * we will always flush the TLB any time the ASID is changed).
2498 if (ttbr_select
== 0) {
2499 ttbr
= ((uint64_t)env
->cp15
.c2_base0_hi
<< 32) | env
->cp15
.c2_base0
;
2500 epd
= extract32(env
->cp15
.c2_control
, 7, 1);
2503 ttbr
= ((uint64_t)env
->cp15
.c2_base1_hi
<< 32) | env
->cp15
.c2_base1
;
2504 epd
= extract32(env
->cp15
.c2_control
, 23, 1);
2509 /* Translation table walk disabled => Translation fault on TLB miss */
2513 /* If the region is small enough we will skip straight to a 2nd level
2514 * lookup. This affects the number of bits of the address used in
2515 * combination with the TTBR to find the first descriptor. ('n' here
2516 * matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
2517 * from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
2526 /* Clear the vaddr bits which aren't part of the within-region address,
2527 * so that we don't have to special case things when calculating the
2528 * first descriptor address.
2530 address
&= (0xffffffffU
>> tsz
);
2532 /* Now we can extract the actual base address from the TTBR */
2533 descaddr
= extract64(ttbr
, 0, 40);
2534 descaddr
&= ~((1ULL << n
) - 1);
2538 uint64_t descriptor
;
2540 descaddr
|= ((address
>> (9 * (4 - level
))) & 0xff8);
2541 descriptor
= ldq_phys(descaddr
);
2542 if (!(descriptor
& 1) ||
2543 (!(descriptor
& 2) && (level
== 3))) {
2544 /* Invalid, or the Reserved level 3 encoding */
2547 descaddr
= descriptor
& 0xfffffff000ULL
;
2549 if ((descriptor
& 2) && (level
< 3)) {
2550 /* Table entry. The top five bits are attributes which may
2551 * propagate down through lower levels of the table (and
2552 * which are all arranged so that 0 means "no effect", so
2553 * we can gather them up by ORing in the bits at each level).
2555 tableattrs
|= extract64(descriptor
, 59, 5);
2559 /* Block entry at level 1 or 2, or page entry at level 3.
2560 * These are basically the same thing, although the number
2561 * of bits we pull in from the vaddr varies.
2563 page_size
= (1 << (39 - (9 * level
)));
2564 descaddr
|= (address
& (page_size
- 1));
2565 /* Extract attributes from the descriptor and merge with table attrs */
2566 attrs
= extract64(descriptor
, 2, 10)
2567 | (extract64(descriptor
, 52, 12) << 10);
2568 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
2569 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
2570 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
2571 * means "force PL1 access only", which means forcing AP[1] to 0.
2573 if (extract32(tableattrs
, 2, 1)) {
2576 /* Since we're always in the Non-secure state, NSTable is ignored. */
2579 /* Here descaddr is the final physical address, and attributes
2582 fault_type
= access_fault
;
2583 if ((attrs
& (1 << 8)) == 0) {
2587 fault_type
= permission_fault
;
2588 if (is_user
&& !(attrs
& (1 << 4))) {
2589 /* Unprivileged access not enabled */
2592 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2593 if (attrs
& (1 << 12) || (!is_user
&& (attrs
& (1 << 11)))) {
2595 if (access_type
== 2) {
2598 *prot
&= ~PAGE_EXEC
;
2600 if (attrs
& (1 << 5)) {
2601 /* Write access forbidden */
2602 if (access_type
== 1) {
2605 *prot
&= ~PAGE_WRITE
;
2608 *phys_ptr
= descaddr
;
2609 *page_size_ptr
= page_size
;
2613 /* Long-descriptor format IFSR/DFSR value */
2614 return (1 << 9) | (fault_type
<< 2) | level
;
2617 static int get_phys_addr_mpu(CPUARMState
*env
, uint32_t address
,
2618 int access_type
, int is_user
,
2619 hwaddr
*phys_ptr
, int *prot
)
2625 *phys_ptr
= address
;
2626 for (n
= 7; n
>= 0; n
--) {
2627 base
= env
->cp15
.c6_region
[n
];
2628 if ((base
& 1) == 0)
2630 mask
= 1 << ((base
>> 1) & 0x1f);
2631 /* Keep this shift separate from the above to avoid an
2632 (undefined) << 32. */
2633 mask
= (mask
<< 1) - 1;
2634 if (((base
^ address
) & ~mask
) == 0)
2640 if (access_type
== 2) {
2641 mask
= env
->cp15
.c5_insn
;
2643 mask
= env
->cp15
.c5_data
;
2645 mask
= (mask
>> (n
* 4)) & 0xf;
2652 *prot
= PAGE_READ
| PAGE_WRITE
;
2657 *prot
|= PAGE_WRITE
;
2660 *prot
= PAGE_READ
| PAGE_WRITE
;
2671 /* Bad permission. */
2678 /* get_phys_addr - get the physical address for this virtual address
2680 * Find the physical address corresponding to the given virtual address,
2681 * by doing a translation table walk on MMU based systems or using the
2682 * MPU state on MPU based systems.
2684 * Returns 0 if the translation was successful. Otherwise, phys_ptr,
2685 * prot and page_size are not filled in, and the return value provides
2686 * information on why the translation aborted, in the format of a
2687 * DFSR/IFSR fault register, with the following caveats:
2688 * * we honour the short vs long DFSR format differences.
2689 * * the WnR bit is never set (the caller must do this).
2690 * * for MPU based systems we don't bother to return a full FSR format
2694 * @address: virtual address to get physical address for
2695 * @access_type: 0 for read, 1 for write, 2 for execute
2696 * @is_user: 0 for privileged access, 1 for user
2697 * @phys_ptr: set to the physical address corresponding to the virtual address
2698 * @prot: set to the permissions for the page containing phys_ptr
2699 * @page_size: set to the size of the page containing phys_ptr
2701 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
2702 int access_type
, int is_user
,
2703 hwaddr
*phys_ptr
, int *prot
,
2704 target_ulong
*page_size
)
2706 /* Fast Context Switch Extension. */
2707 if (address
< 0x02000000)
2708 address
+= env
->cp15
.c13_fcse
;
2710 if ((env
->cp15
.c1_sys
& 1) == 0) {
2711 /* MMU/MPU disabled. */
2712 *phys_ptr
= address
;
2713 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2714 *page_size
= TARGET_PAGE_SIZE
;
2716 } else if (arm_feature(env
, ARM_FEATURE_MPU
)) {
2717 *page_size
= TARGET_PAGE_SIZE
;
2718 return get_phys_addr_mpu(env
, address
, access_type
, is_user
, phys_ptr
,
2720 } else if (extended_addresses_enabled(env
)) {
2721 return get_phys_addr_lpae(env
, address
, access_type
, is_user
, phys_ptr
,
2723 } else if (env
->cp15
.c1_sys
& (1 << 23)) {
2724 return get_phys_addr_v6(env
, address
, access_type
, is_user
, phys_ptr
,
2727 return get_phys_addr_v5(env
, address
, access_type
, is_user
, phys_ptr
,
2732 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
,
2733 int access_type
, int mmu_idx
)
2736 target_ulong page_size
;
2740 is_user
= mmu_idx
== MMU_USER_IDX
;
2741 ret
= get_phys_addr(env
, address
, access_type
, is_user
, &phys_addr
, &prot
,
2744 /* Map a single [sub]page. */
2745 phys_addr
&= ~(hwaddr
)0x3ff;
2746 address
&= ~(uint32_t)0x3ff;
2747 tlb_set_page (env
, address
, phys_addr
, prot
, mmu_idx
, page_size
);
2751 if (access_type
== 2) {
2752 env
->cp15
.c5_insn
= ret
;
2753 env
->cp15
.c6_insn
= address
;
2754 env
->exception_index
= EXCP_PREFETCH_ABORT
;
2756 env
->cp15
.c5_data
= ret
;
2757 if (access_type
== 1 && arm_feature(env
, ARM_FEATURE_V6
))
2758 env
->cp15
.c5_data
|= (1 << 11);
2759 env
->cp15
.c6_data
= address
;
2760 env
->exception_index
= EXCP_DATA_ABORT
;
2765 hwaddr
cpu_get_phys_page_debug(CPUARMState
*env
, target_ulong addr
)
2768 target_ulong page_size
;
2772 ret
= get_phys_addr(env
, addr
, 0, 0, &phys_addr
, &prot
, &page_size
);
2780 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
2782 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
2783 env
->regs
[13] = val
;
2785 env
->banked_r13
[bank_number(mode
)] = val
;
2789 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
2791 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
2792 return env
->regs
[13];
2794 return env
->banked_r13
[bank_number(mode
)];
2798 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
2802 return xpsr_read(env
) & 0xf8000000;
2804 return xpsr_read(env
) & 0xf80001ff;
2806 return xpsr_read(env
) & 0xff00fc00;
2808 return xpsr_read(env
) & 0xff00fdff;
2810 return xpsr_read(env
) & 0x000001ff;
2812 return xpsr_read(env
) & 0x0700fc00;
2814 return xpsr_read(env
) & 0x0700edff;
2816 return env
->v7m
.current_sp
? env
->v7m
.other_sp
: env
->regs
[13];
2818 return env
->v7m
.current_sp
? env
->regs
[13] : env
->v7m
.other_sp
;
2819 case 16: /* PRIMASK */
2820 return (env
->uncached_cpsr
& CPSR_I
) != 0;
2821 case 17: /* BASEPRI */
2822 case 18: /* BASEPRI_MAX */
2823 return env
->v7m
.basepri
;
2824 case 19: /* FAULTMASK */
2825 return (env
->uncached_cpsr
& CPSR_F
) != 0;
2826 case 20: /* CONTROL */
2827 return env
->v7m
.control
;
2829 /* ??? For debugging only. */
2830 cpu_abort(env
, "Unimplemented system register read (%d)\n", reg
);
2835 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
2839 xpsr_write(env
, val
, 0xf8000000);
2842 xpsr_write(env
, val
, 0xf8000000);
2845 xpsr_write(env
, val
, 0xfe00fc00);
2848 xpsr_write(env
, val
, 0xfe00fc00);
2851 /* IPSR bits are readonly. */
2854 xpsr_write(env
, val
, 0x0600fc00);
2857 xpsr_write(env
, val
, 0x0600fc00);
2860 if (env
->v7m
.current_sp
)
2861 env
->v7m
.other_sp
= val
;
2863 env
->regs
[13] = val
;
2866 if (env
->v7m
.current_sp
)
2867 env
->regs
[13] = val
;
2869 env
->v7m
.other_sp
= val
;
2871 case 16: /* PRIMASK */
2873 env
->uncached_cpsr
|= CPSR_I
;
2875 env
->uncached_cpsr
&= ~CPSR_I
;
2877 case 17: /* BASEPRI */
2878 env
->v7m
.basepri
= val
& 0xff;
2880 case 18: /* BASEPRI_MAX */
2882 if (val
!= 0 && (val
< env
->v7m
.basepri
|| env
->v7m
.basepri
== 0))
2883 env
->v7m
.basepri
= val
;
2885 case 19: /* FAULTMASK */
2887 env
->uncached_cpsr
|= CPSR_F
;
2889 env
->uncached_cpsr
&= ~CPSR_F
;
2891 case 20: /* CONTROL */
2892 env
->v7m
.control
= val
& 3;
2893 switch_v7m_sp(env
, (val
& 2) != 0);
2896 /* ??? For debugging only. */
2897 cpu_abort(env
, "Unimplemented system register write (%d)\n", reg
);
2904 /* Note that signed overflow is undefined in C. The following routines are
2905 careful to use unsigned types where modulo arithmetic is required.
2906 Failure to do so _will_ break on newer gcc. */
2908 /* Signed saturating arithmetic. */
2910 /* Perform 16-bit signed saturating addition. */
2911 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
2916 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
2925 /* Perform 8-bit signed saturating addition. */
2926 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
2931 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
2940 /* Perform 16-bit signed saturating subtraction. */
2941 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
2946 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
2955 /* Perform 8-bit signed saturating subtraction. */
2956 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
2961 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
2970 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2971 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2972 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
2973 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
2976 #include "op_addsub.h"
2978 /* Unsigned saturating arithmetic. */
2979 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
2988 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
2996 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
3005 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
3013 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
3014 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
3015 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
3016 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
3019 #include "op_addsub.h"
3021 /* Signed modulo arithmetic. */
3022 #define SARITH16(a, b, n, op) do { \
3024 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
3025 RESULT(sum, n, 16); \
3027 ge |= 3 << (n * 2); \
3030 #define SARITH8(a, b, n, op) do { \
3032 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
3033 RESULT(sum, n, 8); \
3039 #define ADD16(a, b, n) SARITH16(a, b, n, +)
3040 #define SUB16(a, b, n) SARITH16(a, b, n, -)
3041 #define ADD8(a, b, n) SARITH8(a, b, n, +)
3042 #define SUB8(a, b, n) SARITH8(a, b, n, -)
3046 #include "op_addsub.h"
3048 /* Unsigned modulo arithmetic. */
3049 #define ADD16(a, b, n) do { \
3051 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
3052 RESULT(sum, n, 16); \
3053 if ((sum >> 16) == 1) \
3054 ge |= 3 << (n * 2); \
3057 #define ADD8(a, b, n) do { \
3059 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
3060 RESULT(sum, n, 8); \
3061 if ((sum >> 8) == 1) \
3065 #define SUB16(a, b, n) do { \
3067 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
3068 RESULT(sum, n, 16); \
3069 if ((sum >> 16) == 0) \
3070 ge |= 3 << (n * 2); \
3073 #define SUB8(a, b, n) do { \
3075 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
3076 RESULT(sum, n, 8); \
3077 if ((sum >> 8) == 0) \
3084 #include "op_addsub.h"
3086 /* Halved signed arithmetic. */
3087 #define ADD16(a, b, n) \
3088 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
3089 #define SUB16(a, b, n) \
3090 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
3091 #define ADD8(a, b, n) \
3092 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
3093 #define SUB8(a, b, n) \
3094 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
3097 #include "op_addsub.h"
3099 /* Halved unsigned arithmetic. */
3100 #define ADD16(a, b, n) \
3101 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3102 #define SUB16(a, b, n) \
3103 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3104 #define ADD8(a, b, n) \
3105 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3106 #define SUB8(a, b, n) \
3107 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3110 #include "op_addsub.h"
3112 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
3120 /* Unsigned sum of absolute byte differences. */
3121 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
3124 sum
= do_usad(a
, b
);
3125 sum
+= do_usad(a
>> 8, b
>> 8);
3126 sum
+= do_usad(a
>> 16, b
>>16);
3127 sum
+= do_usad(a
>> 24, b
>> 24);
3131 /* For ARMv6 SEL instruction. */
3132 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
3145 return (a
& mask
) | (b
& ~mask
);
3148 /* VFP support. We follow the convention used for VFP instructions:
3149 Single precision routines have a "s" suffix, double precision a
3152 /* Convert host exception flags to vfp form. */
3153 static inline int vfp_exceptbits_from_host(int host_bits
)
3155 int target_bits
= 0;
3157 if (host_bits
& float_flag_invalid
)
3159 if (host_bits
& float_flag_divbyzero
)
3161 if (host_bits
& float_flag_overflow
)
3163 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
3165 if (host_bits
& float_flag_inexact
)
3166 target_bits
|= 0x10;
3167 if (host_bits
& float_flag_input_denormal
)
3168 target_bits
|= 0x80;
3172 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
3177 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
3178 | (env
->vfp
.vec_len
<< 16)
3179 | (env
->vfp
.vec_stride
<< 20);
3180 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
3181 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
3182 fpscr
|= vfp_exceptbits_from_host(i
);
3186 uint32_t vfp_get_fpscr(CPUARMState
*env
)
3188 return HELPER(vfp_get_fpscr
)(env
);
3191 /* Convert vfp exception flags to target form. */
3192 static inline int vfp_exceptbits_to_host(int target_bits
)
3196 if (target_bits
& 1)
3197 host_bits
|= float_flag_invalid
;
3198 if (target_bits
& 2)
3199 host_bits
|= float_flag_divbyzero
;
3200 if (target_bits
& 4)
3201 host_bits
|= float_flag_overflow
;
3202 if (target_bits
& 8)
3203 host_bits
|= float_flag_underflow
;
3204 if (target_bits
& 0x10)
3205 host_bits
|= float_flag_inexact
;
3206 if (target_bits
& 0x80)
3207 host_bits
|= float_flag_input_denormal
;
3211 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
3216 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
3217 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
3218 env
->vfp
.vec_len
= (val
>> 16) & 7;
3219 env
->vfp
.vec_stride
= (val
>> 20) & 3;
3222 if (changed
& (3 << 22)) {
3223 i
= (val
>> 22) & 3;
3226 i
= float_round_nearest_even
;
3232 i
= float_round_down
;
3235 i
= float_round_to_zero
;
3238 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
3240 if (changed
& (1 << 24)) {
3241 set_flush_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
3242 set_flush_inputs_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
3244 if (changed
& (1 << 25))
3245 set_default_nan_mode((val
& (1 << 25)) != 0, &env
->vfp
.fp_status
);
3247 i
= vfp_exceptbits_to_host(val
);
3248 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
3249 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
3252 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
3254 HELPER(vfp_set_fpscr
)(env
, val
);
3257 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
3259 #define VFP_BINOP(name) \
3260 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
3262 float_status *fpst = fpstp; \
3263 return float32_ ## name(a, b, fpst); \
3265 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
3267 float_status *fpst = fpstp; \
3268 return float64_ ## name(a, b, fpst); \
3276 float32
VFP_HELPER(neg
, s
)(float32 a
)
3278 return float32_chs(a
);
3281 float64
VFP_HELPER(neg
, d
)(float64 a
)
3283 return float64_chs(a
);
3286 float32
VFP_HELPER(abs
, s
)(float32 a
)
3288 return float32_abs(a
);
3291 float64
VFP_HELPER(abs
, d
)(float64 a
)
3293 return float64_abs(a
);
3296 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
3298 return float32_sqrt(a
, &env
->vfp
.fp_status
);
3301 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
3303 return float64_sqrt(a
, &env
->vfp
.fp_status
);
3306 /* XXX: check quiet/signaling case */
3307 #define DO_VFP_cmp(p, type) \
3308 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
3311 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
3312 case 0: flags = 0x6; break; \
3313 case -1: flags = 0x8; break; \
3314 case 1: flags = 0x2; break; \
3315 default: case 2: flags = 0x3; break; \
3317 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
3318 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
3320 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
3323 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
3324 case 0: flags = 0x6; break; \
3325 case -1: flags = 0x8; break; \
3326 case 1: flags = 0x2; break; \
3327 default: case 2: flags = 0x3; break; \
3329 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
3330 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
3332 DO_VFP_cmp(s
, float32
)
3333 DO_VFP_cmp(d
, float64
)
3336 /* Integer to float and float to integer conversions */
3338 #define CONV_ITOF(name, fsz, sign) \
3339 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
3341 float_status *fpst = fpstp; \
3342 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
3345 #define CONV_FTOI(name, fsz, sign, round) \
3346 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
3348 float_status *fpst = fpstp; \
3349 if (float##fsz##_is_any_nan(x)) { \
3350 float_raise(float_flag_invalid, fpst); \
3353 return float##fsz##_to_##sign##int32##round(x, fpst); \
3356 #define FLOAT_CONVS(name, p, fsz, sign) \
3357 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
3358 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
3359 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
3361 FLOAT_CONVS(si
, s
, 32, )
3362 FLOAT_CONVS(si
, d
, 64, )
3363 FLOAT_CONVS(ui
, s
, 32, u
)
3364 FLOAT_CONVS(ui
, d
, 64, u
)
3370 /* floating point conversion */
3371 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
3373 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
3374 /* ARM requires that S<->D conversion of any kind of NaN generates
3375 * a quiet NaN by forcing the most significant frac bit to 1.
3377 return float64_maybe_silence_nan(r
);
3380 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
3382 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
3383 /* ARM requires that S<->D conversion of any kind of NaN generates
3384 * a quiet NaN by forcing the most significant frac bit to 1.
3386 return float32_maybe_silence_nan(r
);
3389 /* VFP3 fixed point conversion. */
3390 #define VFP_CONV_FIX(name, p, fsz, itype, sign) \
3391 float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, \
3394 float_status *fpst = fpstp; \
3396 tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
3397 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
3399 uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
3402 float_status *fpst = fpstp; \
3404 if (float##fsz##_is_any_nan(x)) { \
3405 float_raise(float_flag_invalid, fpst); \
3408 tmp = float##fsz##_scalbn(x, shift, fpst); \
3409 return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
3412 VFP_CONV_FIX(sh
, d
, 64, int16
, )
3413 VFP_CONV_FIX(sl
, d
, 64, int32
, )
3414 VFP_CONV_FIX(uh
, d
, 64, uint16
, u
)
3415 VFP_CONV_FIX(ul
, d
, 64, uint32
, u
)
3416 VFP_CONV_FIX(sh
, s
, 32, int16
, )
3417 VFP_CONV_FIX(sl
, s
, 32, int32
, )
3418 VFP_CONV_FIX(uh
, s
, 32, uint16
, u
)
3419 VFP_CONV_FIX(ul
, s
, 32, uint32
, u
)
3422 /* Half precision conversions. */
3423 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
3425 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
3426 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
3428 return float32_maybe_silence_nan(r
);
3433 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
3435 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
3436 float16 r
= float32_to_float16(a
, ieee
, s
);
3438 r
= float16_maybe_silence_nan(r
);
3440 return float16_val(r
);
3443 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
3445 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
3448 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
3450 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
3453 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
3455 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
3458 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
3460 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
3463 #define float32_two make_float32(0x40000000)
3464 #define float32_three make_float32(0x40400000)
3465 #define float32_one_point_five make_float32(0x3fc00000)
3467 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
3469 float_status
*s
= &env
->vfp
.standard_fp_status
;
3470 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
3471 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
3472 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
3473 float_raise(float_flag_input_denormal
, s
);
3477 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
3480 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
3482 float_status
*s
= &env
->vfp
.standard_fp_status
;
3484 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
3485 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
3486 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
3487 float_raise(float_flag_input_denormal
, s
);
3489 return float32_one_point_five
;
3491 product
= float32_mul(a
, b
, s
);
3492 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
3497 /* Constants 256 and 512 are used in some helpers; we avoid relying on
3498 * int->float conversions at run-time. */
3499 #define float64_256 make_float64(0x4070000000000000LL)
3500 #define float64_512 make_float64(0x4080000000000000LL)
3502 /* The algorithm that must be used to calculate the estimate
3503 * is specified by the ARM ARM.
3505 static float64
recip_estimate(float64 a
, CPUARMState
*env
)
3507 /* These calculations mustn't set any fp exception flags,
3508 * so we use a local copy of the fp_status.
3510 float_status dummy_status
= env
->vfp
.standard_fp_status
;
3511 float_status
*s
= &dummy_status
;
3512 /* q = (int)(a * 512.0) */
3513 float64 q
= float64_mul(float64_512
, a
, s
);
3514 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
3516 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
3517 q
= int64_to_float64(q_int
, s
);
3518 q
= float64_add(q
, float64_half
, s
);
3519 q
= float64_div(q
, float64_512
, s
);
3520 q
= float64_div(float64_one
, q
, s
);
3522 /* s = (int)(256.0 * r + 0.5) */
3523 q
= float64_mul(q
, float64_256
, s
);
3524 q
= float64_add(q
, float64_half
, s
);
3525 q_int
= float64_to_int64_round_to_zero(q
, s
);
3527 /* return (double)s / 256.0 */
3528 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
3531 float32
HELPER(recpe_f32
)(float32 a
, CPUARMState
*env
)
3533 float_status
*s
= &env
->vfp
.standard_fp_status
;
3535 uint32_t val32
= float32_val(a
);
3538 int a_exp
= (val32
& 0x7f800000) >> 23;
3539 int sign
= val32
& 0x80000000;
3541 if (float32_is_any_nan(a
)) {
3542 if (float32_is_signaling_nan(a
)) {
3543 float_raise(float_flag_invalid
, s
);
3545 return float32_default_nan
;
3546 } else if (float32_is_infinity(a
)) {
3547 return float32_set_sign(float32_zero
, float32_is_neg(a
));
3548 } else if (float32_is_zero_or_denormal(a
)) {
3549 if (!float32_is_zero(a
)) {
3550 float_raise(float_flag_input_denormal
, s
);
3552 float_raise(float_flag_divbyzero
, s
);
3553 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
3554 } else if (a_exp
>= 253) {
3555 float_raise(float_flag_underflow
, s
);
3556 return float32_set_sign(float32_zero
, float32_is_neg(a
));
3559 f64
= make_float64((0x3feULL
<< 52)
3560 | ((int64_t)(val32
& 0x7fffff) << 29));
3562 result_exp
= 253 - a_exp
;
3564 f64
= recip_estimate(f64
, env
);
3567 | ((result_exp
& 0xff) << 23)
3568 | ((float64_val(f64
) >> 29) & 0x7fffff);
3569 return make_float32(val32
);
3572 /* The algorithm that must be used to calculate the estimate
3573 * is specified by the ARM ARM.
3575 static float64
recip_sqrt_estimate(float64 a
, CPUARMState
*env
)
3577 /* These calculations mustn't set any fp exception flags,
3578 * so we use a local copy of the fp_status.
3580 float_status dummy_status
= env
->vfp
.standard_fp_status
;
3581 float_status
*s
= &dummy_status
;
3585 if (float64_lt(a
, float64_half
, s
)) {
3586 /* range 0.25 <= a < 0.5 */
3588 /* a in units of 1/512 rounded down */
3589 /* q0 = (int)(a * 512.0); */
3590 q
= float64_mul(float64_512
, a
, s
);
3591 q_int
= float64_to_int64_round_to_zero(q
, s
);
3593 /* reciprocal root r */
3594 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
3595 q
= int64_to_float64(q_int
, s
);
3596 q
= float64_add(q
, float64_half
, s
);
3597 q
= float64_div(q
, float64_512
, s
);
3598 q
= float64_sqrt(q
, s
);
3599 q
= float64_div(float64_one
, q
, s
);
3601 /* range 0.5 <= a < 1.0 */
3603 /* a in units of 1/256 rounded down */
3604 /* q1 = (int)(a * 256.0); */
3605 q
= float64_mul(float64_256
, a
, s
);
3606 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
3608 /* reciprocal root r */
3609 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
3610 q
= int64_to_float64(q_int
, s
);
3611 q
= float64_add(q
, float64_half
, s
);
3612 q
= float64_div(q
, float64_256
, s
);
3613 q
= float64_sqrt(q
, s
);
3614 q
= float64_div(float64_one
, q
, s
);
3616 /* r in units of 1/256 rounded to nearest */
3617 /* s = (int)(256.0 * r + 0.5); */
3619 q
= float64_mul(q
, float64_256
,s
);
3620 q
= float64_add(q
, float64_half
, s
);
3621 q_int
= float64_to_int64_round_to_zero(q
, s
);
3623 /* return (double)s / 256.0;*/
3624 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
3627 float32
HELPER(rsqrte_f32
)(float32 a
, CPUARMState
*env
)
3629 float_status
*s
= &env
->vfp
.standard_fp_status
;
3635 val
= float32_val(a
);
3637 if (float32_is_any_nan(a
)) {
3638 if (float32_is_signaling_nan(a
)) {
3639 float_raise(float_flag_invalid
, s
);
3641 return float32_default_nan
;
3642 } else if (float32_is_zero_or_denormal(a
)) {
3643 if (!float32_is_zero(a
)) {
3644 float_raise(float_flag_input_denormal
, s
);
3646 float_raise(float_flag_divbyzero
, s
);
3647 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
3648 } else if (float32_is_neg(a
)) {
3649 float_raise(float_flag_invalid
, s
);
3650 return float32_default_nan
;
3651 } else if (float32_is_infinity(a
)) {
3652 return float32_zero
;
3655 /* Normalize to a double-precision value between 0.25 and 1.0,
3656 * preserving the parity of the exponent. */
3657 if ((val
& 0x800000) == 0) {
3658 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
3660 | ((uint64_t)(val
& 0x7fffff) << 29));
3662 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
3664 | ((uint64_t)(val
& 0x7fffff) << 29));
3667 result_exp
= (380 - ((val
& 0x7f800000) >> 23)) / 2;
3669 f64
= recip_sqrt_estimate(f64
, env
);
3671 val64
= float64_val(f64
);
3673 val
= ((result_exp
& 0xff) << 23)
3674 | ((val64
>> 29) & 0x7fffff);
3675 return make_float32(val
);
3678 uint32_t HELPER(recpe_u32
)(uint32_t a
, CPUARMState
*env
)
3682 if ((a
& 0x80000000) == 0) {
3686 f64
= make_float64((0x3feULL
<< 52)
3687 | ((int64_t)(a
& 0x7fffffff) << 21));
3689 f64
= recip_estimate (f64
, env
);
3691 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
3694 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, CPUARMState
*env
)
3698 if ((a
& 0xc0000000) == 0) {
3702 if (a
& 0x80000000) {
3703 f64
= make_float64((0x3feULL
<< 52)
3704 | ((uint64_t)(a
& 0x7fffffff) << 21));
3705 } else { /* bits 31-30 == '01' */
3706 f64
= make_float64((0x3fdULL
<< 52)
3707 | ((uint64_t)(a
& 0x3fffffff) << 22));
3710 f64
= recip_sqrt_estimate(f64
, env
);
3712 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
3715 /* VFPv4 fused multiply-accumulate */
3716 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
3718 float_status
*fpst
= fpstp
;
3719 return float32_muladd(a
, b
, c
, 0, fpst
);
3722 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
3724 float_status
*fpst
= fpstp
;
3725 return float64_muladd(a
, b
, c
, 0, fpst
);