2 #include "exec/gdbstub.h"
4 #include "qemu/host-utils.h"
5 #include "sysemu/sysemu.h"
6 #include "qemu/bitops.h"
8 #ifndef CONFIG_USER_ONLY
9 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
10 int access_type
, int is_user
,
11 hwaddr
*phys_ptr
, int *prot
,
12 target_ulong
*page_size
);
15 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
19 /* VFP data registers are always little-endian. */
20 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
22 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
25 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
26 /* Aliases for Q regs. */
29 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
30 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
34 switch (reg
- nregs
) {
35 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
36 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
37 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
42 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
46 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
48 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
51 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
54 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
55 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
59 switch (reg
- nregs
) {
60 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
61 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
62 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
67 static int raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
70 *value
= CPREG_FIELD32(env
, ri
);
74 static int raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
77 CPREG_FIELD32(env
, ri
) = value
;
81 static bool read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
84 /* Raw read of a coprocessor register (as needed for migration, etc)
85 * return true on success, false if the read is impossible for some reason.
87 if (ri
->type
& ARM_CP_CONST
) {
89 } else if (ri
->raw_readfn
) {
90 return (ri
->raw_readfn(env
, ri
, v
) == 0);
91 } else if (ri
->readfn
) {
92 return (ri
->readfn(env
, ri
, v
) == 0);
94 if (ri
->type
& ARM_CP_64BIT
) {
95 *v
= CPREG_FIELD64(env
, ri
);
97 *v
= CPREG_FIELD32(env
, ri
);
103 static bool write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
106 /* Raw write of a coprocessor register (as needed for migration, etc).
107 * Return true on success, false if the write is impossible for some reason.
108 * Note that constant registers are treated as write-ignored; the
109 * caller should check for success by whether a readback gives the
112 if (ri
->type
& ARM_CP_CONST
) {
114 } else if (ri
->raw_writefn
) {
115 return (ri
->raw_writefn(env
, ri
, v
) == 0);
116 } else if (ri
->writefn
) {
117 return (ri
->writefn(env
, ri
, v
) == 0);
119 if (ri
->type
& ARM_CP_64BIT
) {
120 CPREG_FIELD64(env
, ri
) = v
;
122 CPREG_FIELD32(env
, ri
) = v
;
128 bool write_cpustate_to_list(ARMCPU
*cpu
)
130 /* Write the coprocessor state from cpu->env to the (index,value) list. */
134 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
135 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
136 const ARMCPRegInfo
*ri
;
138 ri
= get_arm_cp_reginfo(cpu
, regidx
);
143 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
146 if (!read_raw_cp_reg(&cpu
->env
, ri
, &v
)) {
150 cpu
->cpreg_values
[i
] = v
;
155 bool write_list_to_cpustate(ARMCPU
*cpu
)
160 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
161 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
162 uint64_t v
= cpu
->cpreg_values
[i
];
164 const ARMCPRegInfo
*ri
;
166 ri
= get_arm_cp_reginfo(cpu
, regidx
);
171 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
174 /* Write value and confirm it reads back as written
175 * (to catch read-only registers and partially read-only
176 * registers where the incoming migration value doesn't match)
178 if (!write_raw_cp_reg(&cpu
->env
, ri
, v
) ||
179 !read_raw_cp_reg(&cpu
->env
, ri
, &readback
) ||
187 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
189 ARMCPU
*cpu
= opaque
;
191 const ARMCPRegInfo
*ri
;
193 regidx
= *(uint32_t *)key
;
194 ri
= get_arm_cp_reginfo(cpu
, regidx
);
196 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
197 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
198 /* The value array need not be initialized at this point */
199 cpu
->cpreg_array_len
++;
203 static void count_cpreg(gpointer key
, gpointer opaque
)
205 ARMCPU
*cpu
= opaque
;
207 const ARMCPRegInfo
*ri
;
209 regidx
= *(uint32_t *)key
;
210 ri
= get_arm_cp_reginfo(cpu
, regidx
);
212 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
213 cpu
->cpreg_array_len
++;
217 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
219 uint32_t aidx
= *(uint32_t *)a
;
220 uint32_t bidx
= *(uint32_t *)b
;
225 void init_cpreg_list(ARMCPU
*cpu
)
227 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
228 * Note that we require cpreg_tuples[] to be sorted by key ID.
233 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
234 keys
= g_list_sort(keys
, cpreg_key_compare
);
236 cpu
->cpreg_array_len
= 0;
238 g_list_foreach(keys
, count_cpreg
, cpu
);
240 arraylen
= cpu
->cpreg_array_len
;
241 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
242 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
243 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
244 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
245 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
246 cpu
->cpreg_array_len
= 0;
248 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
250 assert(cpu
->cpreg_array_len
== arraylen
);
255 static int dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
257 env
->cp15
.c3
= value
;
258 tlb_flush(env
, 1); /* Flush TLB as domain not tracked in TLB */
262 static int fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
264 if (env
->cp15
.c13_fcse
!= value
) {
265 /* Unlike real hardware the qemu TLB uses virtual addresses,
266 * not modified virtual addresses, so this causes a TLB flush.
269 env
->cp15
.c13_fcse
= value
;
273 static int contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
276 if (env
->cp15
.c13_context
!= value
&& !arm_feature(env
, ARM_FEATURE_MPU
)) {
277 /* For VMSA (when not using the LPAE long descriptor page table
278 * format) this register includes the ASID, so do a TLB flush.
279 * For PMSA it is purely a process ID and no action is needed.
283 env
->cp15
.c13_context
= value
;
287 static int tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
290 /* Invalidate all (TLBIALL) */
295 static int tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
298 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
299 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
303 static int tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
306 /* Invalidate by ASID (TLBIASID) */
307 tlb_flush(env
, value
== 0);
311 static int tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
314 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
315 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
319 static const ARMCPRegInfo cp_reginfo
[] = {
320 /* DBGDIDR: just RAZ. In particular this means the "debug architecture
321 * version" bits will read as a reserved value, which should cause
322 * Linux to not try to use the debug hardware.
324 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
325 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
326 /* MMU Domain access control / MPU write buffer control */
327 { .name
= "DACR", .cp
= 15,
328 .crn
= 3, .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
329 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c3
),
330 .resetvalue
= 0, .writefn
= dacr_write
, .raw_writefn
= raw_write
, },
331 { .name
= "FCSEIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 0,
332 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_fcse
),
333 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
334 { .name
= "CONTEXTIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 1,
335 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_fcse
),
336 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
337 /* ??? This covers not just the impdef TLB lockdown registers but also
338 * some v7VMSA registers relating to TEX remap, so it is overly broad.
340 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= CP_ANY
,
341 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
342 /* MMU TLB control. Note that the wildcarding means we cover not just
343 * the unified TLB ops but also the dside/iside/inner-shareable variants.
345 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
346 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
347 .type
= ARM_CP_NO_MIGRATE
},
348 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
349 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
350 .type
= ARM_CP_NO_MIGRATE
},
351 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
352 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
353 .type
= ARM_CP_NO_MIGRATE
},
354 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
355 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
356 .type
= ARM_CP_NO_MIGRATE
},
357 /* Cache maintenance ops; some of this space may be overridden later. */
358 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
359 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
360 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
364 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
365 /* Not all pre-v6 cores implemented this WFI, so this is slightly
368 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
369 .access
= PL1_W
, .type
= ARM_CP_WFI
},
373 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
374 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
375 * is UNPREDICTABLE; we choose to NOP as most implementations do).
377 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
378 .access
= PL1_W
, .type
= ARM_CP_WFI
},
379 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
380 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
381 * OMAPCP will override this space.
383 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
384 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
386 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
387 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
389 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
390 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
391 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
396 static int cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
398 if (env
->cp15
.c1_coproc
!= value
) {
399 env
->cp15
.c1_coproc
= value
;
400 /* ??? Is this safe when called from within a TB? */
406 static const ARMCPRegInfo v6_cp_reginfo
[] = {
407 /* prefetch by MVA in v6, NOP in v7 */
408 { .name
= "MVA_prefetch",
409 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
410 .access
= PL1_W
, .type
= ARM_CP_NOP
},
411 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
412 .access
= PL0_W
, .type
= ARM_CP_NOP
},
413 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
414 .access
= PL0_W
, .type
= ARM_CP_NOP
},
415 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
416 .access
= PL0_W
, .type
= ARM_CP_NOP
},
417 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
418 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_insn
),
420 /* Watchpoint Fault Address Register : should actually only be present
421 * for 1136, 1176, 11MPCore.
423 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
424 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
425 { .name
= "CPACR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2,
426 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_coproc
),
427 .resetvalue
= 0, .writefn
= cpacr_write
},
432 static int pmreg_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
435 /* Generic performance monitor register read function for where
436 * user access may be allowed by PMUSERENR.
438 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
441 *value
= CPREG_FIELD32(env
, ri
);
445 static int pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
448 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
451 /* only the DP, X, D and E bits are writable */
452 env
->cp15
.c9_pmcr
&= ~0x39;
453 env
->cp15
.c9_pmcr
|= (value
& 0x39);
457 static int pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
460 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
464 env
->cp15
.c9_pmcnten
|= value
;
468 static int pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
471 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
475 env
->cp15
.c9_pmcnten
&= ~value
;
479 static int pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
482 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
485 env
->cp15
.c9_pmovsr
&= ~value
;
489 static int pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
492 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
495 env
->cp15
.c9_pmxevtyper
= value
& 0xff;
499 static int pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
502 env
->cp15
.c9_pmuserenr
= value
& 1;
506 static int pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
509 /* We have no event counters so only the C bit can be changed */
511 env
->cp15
.c9_pminten
|= value
;
515 static int pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
519 env
->cp15
.c9_pminten
&= ~value
;
523 static int ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
526 ARMCPU
*cpu
= arm_env_get_cpu(env
);
527 *value
= cpu
->ccsidr
[env
->cp15
.c0_cssel
];
531 static int csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
534 env
->cp15
.c0_cssel
= value
& 0xf;
538 static const ARMCPRegInfo v7_cp_reginfo
[] = {
539 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
542 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
543 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
544 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
545 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
546 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
547 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
548 .access
= PL1_W
, .type
= ARM_CP_NOP
},
549 /* Performance monitors are implementation defined in v7,
550 * but with an ARM recommended set of registers, which we
551 * follow (although we don't actually implement any counters)
553 * Performance registers fall into three categories:
554 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
555 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
556 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
557 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
558 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
560 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
561 .access
= PL0_RW
, .resetvalue
= 0,
562 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
563 .readfn
= pmreg_read
, .writefn
= pmcntenset_write
,
564 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
},
565 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
566 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
567 .readfn
= pmreg_read
, .writefn
= pmcntenclr_write
,
568 .type
= ARM_CP_NO_MIGRATE
},
569 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
570 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
571 .readfn
= pmreg_read
, .writefn
= pmovsr_write
,
572 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
},
573 /* Unimplemented so WI. Strictly speaking write accesses in PL0 should
576 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
577 .access
= PL0_W
, .type
= ARM_CP_NOP
},
578 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
579 * We choose to RAZ/WI. XXX should respect PMUSERENR.
581 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
582 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
583 /* Unimplemented, RAZ/WI. XXX PMUSERENR */
584 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
585 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
586 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
588 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmxevtyper
),
589 .readfn
= pmreg_read
, .writefn
= pmxevtyper_write
,
590 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
},
591 /* Unimplemented, RAZ/WI. XXX PMUSERENR */
592 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
593 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
594 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
595 .access
= PL0_R
| PL1_RW
,
596 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
598 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
599 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
601 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
603 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
604 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
605 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
606 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
607 .resetvalue
= 0, .writefn
= pmintenclr_write
, },
608 { .name
= "SCR", .cp
= 15, .crn
= 1, .crm
= 1, .opc1
= 0, .opc2
= 0,
609 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_scr
),
611 { .name
= "CCSIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
612 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_MIGRATE
},
613 { .name
= "CSSELR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
614 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cssel
),
615 .writefn
= csselr_write
, .resetvalue
= 0 },
616 /* Auxiliary ID register: this actually has an IMPDEF value but for now
617 * just RAZ for all cores:
619 { .name
= "AIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 7,
620 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
624 static int teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
631 static int teehbr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
634 /* This is a helper function because the user access rights
635 * depend on the value of the TEECR.
637 if (arm_current_pl(env
) == 0 && (env
->teecr
& 1)) {
640 *value
= env
->teehbr
;
644 static int teehbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
647 if (arm_current_pl(env
) == 0 && (env
->teecr
& 1)) {
654 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
655 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
656 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
658 .writefn
= teecr_write
},
659 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
660 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
661 .resetvalue
= 0, .raw_readfn
= raw_read
, .raw_writefn
= raw_write
,
662 .readfn
= teehbr_read
, .writefn
= teehbr_write
},
666 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
667 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
669 .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_tls1
),
671 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
672 .access
= PL0_R
|PL1_W
,
673 .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_tls2
),
675 { .name
= "TPIDRPRW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 4,
677 .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_tls3
),
682 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
683 /* Dummy implementation: RAZ/WI the whole crn=14 space */
684 { .name
= "GENERIC_TIMER", .cp
= 15, .crn
= 14,
685 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
686 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
691 static int par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
693 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
694 env
->cp15
.c7_par
= value
;
695 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
696 env
->cp15
.c7_par
= value
& 0xfffff6ff;
698 env
->cp15
.c7_par
= value
& 0xfffff1ff;
703 #ifndef CONFIG_USER_ONLY
704 /* get_phys_addr() isn't present for user-mode-only targets */
706 /* Return true if extended addresses are enabled, ie this is an
707 * LPAE implementation and we are using the long-descriptor translation
708 * table format because the TTBCR EAE bit is set.
710 static inline bool extended_addresses_enabled(CPUARMState
*env
)
712 return arm_feature(env
, ARM_FEATURE_LPAE
)
713 && (env
->cp15
.c2_control
& (1 << 31));
716 static int ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
719 target_ulong page_size
;
721 int ret
, is_user
= ri
->opc2
& 2;
722 int access_type
= ri
->opc2
& 1;
725 /* Other states are only available with TrustZone */
728 ret
= get_phys_addr(env
, value
, access_type
, is_user
,
729 &phys_addr
, &prot
, &page_size
);
730 if (extended_addresses_enabled(env
)) {
731 /* ret is a DFSR/IFSR value for the long descriptor
732 * translation table format, but with WnR always clear.
733 * Convert it to a 64-bit PAR.
735 uint64_t par64
= (1 << 11); /* LPAE bit always set */
737 par64
|= phys_addr
& ~0xfffULL
;
738 /* We don't set the ATTR or SH fields in the PAR. */
741 par64
|= (ret
& 0x3f) << 1; /* FS */
742 /* Note that S2WLK and FSTAGE are always zero, because we don't
743 * implement virtualization and therefore there can't be a stage 2
747 env
->cp15
.c7_par
= par64
;
748 env
->cp15
.c7_par_hi
= par64
>> 32;
750 /* ret is a DFSR/IFSR value for the short descriptor
751 * translation table format (with WnR always clear).
752 * Convert it to a 32-bit PAR.
755 /* We do not set any attribute bits in the PAR */
756 if (page_size
== (1 << 24)
757 && arm_feature(env
, ARM_FEATURE_V7
)) {
758 env
->cp15
.c7_par
= (phys_addr
& 0xff000000) | 1 << 1;
760 env
->cp15
.c7_par
= phys_addr
& 0xfffff000;
763 env
->cp15
.c7_par
= ((ret
& (10 << 1)) >> 5) |
764 ((ret
& (12 << 1)) >> 6) |
765 ((ret
& 0xf) << 1) | 1;
767 env
->cp15
.c7_par_hi
= 0;
773 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
774 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
775 .access
= PL1_RW
, .resetvalue
= 0,
776 .fieldoffset
= offsetof(CPUARMState
, cp15
.c7_par
),
777 .writefn
= par_write
},
778 #ifndef CONFIG_USER_ONLY
779 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
780 .access
= PL1_W
, .writefn
= ats_write
, .type
= ARM_CP_NO_MIGRATE
},
785 /* Return basic MPU access permission bits. */
786 static uint32_t simple_mpu_ap_bits(uint32_t val
)
793 for (i
= 0; i
< 16; i
+= 2) {
794 ret
|= (val
>> i
) & mask
;
800 /* Pad basic MPU access permission bits to extended format. */
801 static uint32_t extended_mpu_ap_bits(uint32_t val
)
808 for (i
= 0; i
< 16; i
+= 2) {
809 ret
|= (val
& mask
) << i
;
815 static int pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
818 env
->cp15
.c5_data
= extended_mpu_ap_bits(value
);
822 static int pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
825 *value
= simple_mpu_ap_bits(env
->cp15
.c5_data
);
829 static int pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
832 env
->cp15
.c5_insn
= extended_mpu_ap_bits(value
);
836 static int pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
839 *value
= simple_mpu_ap_bits(env
->cp15
.c5_insn
);
843 static int arm946_prbs_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
849 *value
= env
->cp15
.c6_region
[ri
->crm
];
853 static int arm946_prbs_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
859 env
->cp15
.c6_region
[ri
->crm
] = value
;
863 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
864 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
865 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
866 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0,
867 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
868 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
869 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
870 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0,
871 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
872 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
874 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
875 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
877 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
878 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
880 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
881 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
883 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
884 /* Protection region base and size registers */
885 { .name
= "946_PRBS", .cp
= 15, .crn
= 6, .crm
= CP_ANY
, .opc1
= 0,
886 .opc2
= CP_ANY
, .access
= PL1_RW
,
887 .readfn
= arm946_prbs_read
, .writefn
= arm946_prbs_write
, },
891 static int vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
894 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
895 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
899 /* Note that we always calculate c2_mask and c2_base_mask, but
900 * they are only used for short-descriptor tables (ie if EAE is 0);
901 * for long-descriptor tables the TTBCR fields are used differently
902 * and the c2_mask and c2_base_mask values are meaningless.
904 env
->cp15
.c2_control
= value
;
905 env
->cp15
.c2_mask
= ~(((uint32_t)0xffffffffu
) >> value
);
906 env
->cp15
.c2_base_mask
= ~((uint32_t)0x3fffu
>> value
);
910 static int vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
913 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
914 /* With LPAE the TTBCR could result in a change of ASID
915 * via the TTBCR.A1 bit, so do a TLB flush.
919 return vmsa_ttbcr_raw_write(env
, ri
, value
);
922 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
924 env
->cp15
.c2_base_mask
= 0xffffc000u
;
925 env
->cp15
.c2_control
= 0;
926 env
->cp15
.c2_mask
= 0;
929 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
930 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
932 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
933 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
935 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
936 { .name
= "TTBR0", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
938 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_base0
), .resetvalue
= 0, },
939 { .name
= "TTBR1", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
941 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_base1
), .resetvalue
= 0, },
942 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
943 .access
= PL1_RW
, .writefn
= vmsa_ttbcr_write
,
944 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
945 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_control
) },
946 { .name
= "DFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
947 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_data
),
952 static int omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
955 env
->cp15
.c15_ticonfig
= value
& 0xe7;
956 /* The OS_TYPE bit in this register changes the reported CPUID! */
957 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
958 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
962 static int omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
965 env
->cp15
.c15_threadid
= value
& 0xffff;
969 static int omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
972 /* Wait-for-interrupt (deprecated) */
973 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
977 static int omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
980 /* On OMAP there are registers indicating the max/min index of dcache lines
981 * containing a dirty line; cache flush operations have to reset these.
983 env
->cp15
.c15_i_max
= 0x000;
984 env
->cp15
.c15_i_min
= 0xff0;
988 static const ARMCPRegInfo omap_cp_reginfo
[] = {
989 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
990 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
991 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
992 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
993 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
994 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
996 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
997 .writefn
= omap_ticonfig_write
},
998 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
1000 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
1001 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
1002 .access
= PL1_RW
, .resetvalue
= 0xff0,
1003 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
1004 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
1006 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
1007 .writefn
= omap_threadid_write
},
1008 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
1009 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1010 .type
= ARM_CP_NO_MIGRATE
,
1011 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
1012 /* TODO: Peripheral port remap register:
1013 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1014 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1017 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
1018 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
1019 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
,
1020 .writefn
= omap_cachemaint_write
},
1021 { .name
= "C9", .cp
= 15, .crn
= 9,
1022 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
1023 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
1027 static int xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1031 if (env
->cp15
.c15_cpar
!= value
) {
1032 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1034 env
->cp15
.c15_cpar
= value
;
1039 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
1040 { .name
= "XSCALE_CPAR",
1041 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1042 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
1043 .writefn
= xscale_cpar_write
, },
1044 { .name
= "XSCALE_AUXCR",
1045 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
1046 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
1051 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
1052 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1053 * implementation of this implementation-defined space.
1054 * Ideally this should eventually disappear in favour of actually
1055 * implementing the correct behaviour for all cores.
1057 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
1058 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1059 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1064 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
1065 /* Cache status: RAZ because we have no cache so it's always clean */
1066 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
1067 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1072 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
1073 /* We never have a a block transfer operation in progress */
1074 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
1075 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1077 /* The cache ops themselves: these all NOP for QEMU */
1078 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
1079 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1080 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
1081 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1082 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
1083 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1084 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
1085 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1086 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
1087 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1088 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
1089 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1093 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
1094 /* The cache test-and-clean instructions always return (1 << 30)
1095 * to indicate that there are no dirty cache lines.
1097 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
1098 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1099 .resetvalue
= (1 << 30) },
1100 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
1101 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1102 .resetvalue
= (1 << 30) },
1106 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
1107 /* Ignore ReadBuffer accesses */
1108 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
1109 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1110 .access
= PL1_RW
, .resetvalue
= 0,
1111 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
},
1115 static int mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1118 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1119 uint32_t mpidr
= cs
->cpu_index
;
1120 /* We don't support setting cluster ID ([8..11])
1121 * so these bits always RAZ.
1123 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
1125 /* Cores which are uniprocessor (non-coherent)
1126 * but still implement the MP extensions set
1127 * bit 30. (For instance, A9UP.) However we do
1128 * not currently model any of those cores.
1135 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
1136 { .name
= "MPIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
1137 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_MIGRATE
},
1141 static int par64_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t *value
)
1143 *value
= ((uint64_t)env
->cp15
.c7_par_hi
<< 32) | env
->cp15
.c7_par
;
1147 static int par64_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1149 env
->cp15
.c7_par_hi
= value
>> 32;
1150 env
->cp15
.c7_par
= value
;
1154 static void par64_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1156 env
->cp15
.c7_par_hi
= 0;
1157 env
->cp15
.c7_par
= 0;
1160 static int ttbr064_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1163 *value
= ((uint64_t)env
->cp15
.c2_base0_hi
<< 32) | env
->cp15
.c2_base0
;
1167 static int ttbr064_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1170 env
->cp15
.c2_base0_hi
= value
>> 32;
1171 env
->cp15
.c2_base0
= value
;
1175 static int ttbr064_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1178 /* Writes to the 64 bit format TTBRs may change the ASID */
1180 return ttbr064_raw_write(env
, ri
, value
);
1183 static void ttbr064_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1185 env
->cp15
.c2_base0_hi
= 0;
1186 env
->cp15
.c2_base0
= 0;
1189 static int ttbr164_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1192 *value
= ((uint64_t)env
->cp15
.c2_base1_hi
<< 32) | env
->cp15
.c2_base1
;
1196 static int ttbr164_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1199 env
->cp15
.c2_base1_hi
= value
>> 32;
1200 env
->cp15
.c2_base1
= value
;
1204 static void ttbr164_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1206 env
->cp15
.c2_base1_hi
= 0;
1207 env
->cp15
.c2_base1
= 0;
1210 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
1211 /* NOP AMAIR0/1: the override is because these clash with the rather
1212 * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1214 { .name
= "AMAIR0", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
1215 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1217 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
1218 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1220 /* 64 bit access versions of the (dummy) debug registers */
1221 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
1222 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1223 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
1224 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1225 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
1226 .access
= PL1_RW
, .type
= ARM_CP_64BIT
,
1227 .readfn
= par64_read
, .writefn
= par64_write
, .resetfn
= par64_reset
},
1228 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
1229 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .readfn
= ttbr064_read
,
1230 .writefn
= ttbr064_write
, .raw_writefn
= ttbr064_raw_write
,
1231 .resetfn
= ttbr064_reset
},
1232 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
1233 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .readfn
= ttbr164_read
,
1234 .writefn
= ttbr164_write
, .resetfn
= ttbr164_reset
},
1238 static int sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1240 env
->cp15
.c1_sys
= value
;
1241 /* ??? Lots of these bits are not implemented. */
1242 /* This may enable/disable the MMU, so do a TLB flush. */
1247 void register_cp_regs_for_features(ARMCPU
*cpu
)
1249 /* Register all the coprocessor registers based on feature bits */
1250 CPUARMState
*env
= &cpu
->env
;
1251 if (arm_feature(env
, ARM_FEATURE_M
)) {
1252 /* M profile has no coprocessor registers */
1256 define_arm_cp_regs(cpu
, cp_reginfo
);
1257 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1258 /* The ID registers all have impdef reset values */
1259 ARMCPRegInfo v6_idregs
[] = {
1260 { .name
= "ID_PFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1261 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1262 .resetvalue
= cpu
->id_pfr0
},
1263 { .name
= "ID_PFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1264 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1265 .resetvalue
= cpu
->id_pfr1
},
1266 { .name
= "ID_DFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1267 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1268 .resetvalue
= cpu
->id_dfr0
},
1269 { .name
= "ID_AFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1270 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1271 .resetvalue
= cpu
->id_afr0
},
1272 { .name
= "ID_MMFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1273 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1274 .resetvalue
= cpu
->id_mmfr0
},
1275 { .name
= "ID_MMFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1276 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1277 .resetvalue
= cpu
->id_mmfr1
},
1278 { .name
= "ID_MMFR2", .cp
= 15, .crn
= 0, .crm
= 1,
1279 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1280 .resetvalue
= cpu
->id_mmfr2
},
1281 { .name
= "ID_MMFR3", .cp
= 15, .crn
= 0, .crm
= 1,
1282 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1283 .resetvalue
= cpu
->id_mmfr3
},
1284 { .name
= "ID_ISAR0", .cp
= 15, .crn
= 0, .crm
= 2,
1285 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1286 .resetvalue
= cpu
->id_isar0
},
1287 { .name
= "ID_ISAR1", .cp
= 15, .crn
= 0, .crm
= 2,
1288 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1289 .resetvalue
= cpu
->id_isar1
},
1290 { .name
= "ID_ISAR2", .cp
= 15, .crn
= 0, .crm
= 2,
1291 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1292 .resetvalue
= cpu
->id_isar2
},
1293 { .name
= "ID_ISAR3", .cp
= 15, .crn
= 0, .crm
= 2,
1294 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1295 .resetvalue
= cpu
->id_isar3
},
1296 { .name
= "ID_ISAR4", .cp
= 15, .crn
= 0, .crm
= 2,
1297 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1298 .resetvalue
= cpu
->id_isar4
},
1299 { .name
= "ID_ISAR5", .cp
= 15, .crn
= 0, .crm
= 2,
1300 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1301 .resetvalue
= cpu
->id_isar5
},
1302 /* 6..7 are as yet unallocated and must RAZ */
1303 { .name
= "ID_ISAR6", .cp
= 15, .crn
= 0, .crm
= 2,
1304 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1306 { .name
= "ID_ISAR7", .cp
= 15, .crn
= 0, .crm
= 2,
1307 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1311 define_arm_cp_regs(cpu
, v6_idregs
);
1312 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
1314 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
1316 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1317 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
1319 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1320 /* v7 performance monitor control register: same implementor
1321 * field as main ID register, and we implement no event counters.
1323 ARMCPRegInfo pmcr
= {
1324 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
1325 .access
= PL0_RW
, .resetvalue
= cpu
->midr
& 0xff000000,
1326 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
1327 .readfn
= pmreg_read
, .writefn
= pmcr_write
,
1328 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
,
1330 ARMCPRegInfo clidr
= {
1331 .name
= "CLIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
1332 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
1334 define_one_arm_cp_reg(cpu
, &pmcr
);
1335 define_one_arm_cp_reg(cpu
, &clidr
);
1336 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
1338 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
1340 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
1341 /* These are the MPU registers prior to PMSAv6. Any new
1342 * PMSA core later than the ARM946 will require that we
1343 * implement the PMSAv6 or PMSAv7 registers, which are
1344 * completely different.
1346 assert(!arm_feature(env
, ARM_FEATURE_V6
));
1347 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
1349 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
1351 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
1352 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
1354 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1355 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
1357 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
1358 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
1360 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
1361 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
1363 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
1364 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
1366 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
1367 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
1369 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
1370 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
1372 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
1373 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
1375 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1376 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
1378 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
1379 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
1381 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
1382 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
1384 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1385 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
1387 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
1388 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
1389 * be read-only (ie write causes UNDEF exception).
1392 ARMCPRegInfo id_cp_reginfo
[] = {
1393 /* Note that the MIDR isn't a simple constant register because
1394 * of the TI925 behaviour where writes to another register can
1395 * cause the MIDR value to change.
1398 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
1399 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
1400 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
1401 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
) },
1403 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
1404 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
1406 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
1407 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1409 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
1410 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1411 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
1413 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
1414 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1416 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
1417 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1419 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
1420 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1422 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
1423 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1425 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
1426 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1429 ARMCPRegInfo crn0_wi_reginfo
= {
1430 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
1431 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
1432 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
1434 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
1435 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
1437 /* Register the blanket "writes ignored" value first to cover the
1438 * whole space. Then update the specific ID registers to allow write
1439 * access, so that they ignore writes rather than causing them to
1442 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
1443 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
1447 define_arm_cp_regs(cpu
, id_cp_reginfo
);
1450 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
1451 ARMCPRegInfo auxcr
= {
1452 .name
= "AUXCR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1,
1453 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
1454 .resetvalue
= cpu
->reset_auxcr
1456 define_one_arm_cp_reg(cpu
, &auxcr
);
1459 /* Generic registers whose values depend on the implementation */
1461 ARMCPRegInfo sctlr
= {
1462 .name
= "SCTLR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
1463 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_sys
),
1464 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
1465 .raw_writefn
= raw_write
,
1467 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1468 /* Normally we would always end the TB on an SCTLR write, but Linux
1469 * arch/arm/mach-pxa/sleep.S expects two instructions following
1470 * an MMU enable to execute from cache. Imitate this behaviour.
1472 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
1474 define_one_arm_cp_reg(cpu
, &sctlr
);
1478 ARMCPU
*cpu_arm_init(const char *cpu_model
)
1484 oc
= cpu_class_by_name(TYPE_ARM_CPU
, cpu_model
);
1488 cpu
= ARM_CPU(object_new(object_class_get_name(oc
)));
1490 env
->cpu_model_str
= cpu_model
;
1492 /* TODO this should be set centrally, once possible */
1493 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1498 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
1500 CPUARMState
*env
= &cpu
->env
;
1502 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
1503 gdb_register_coprocessor(env
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
1504 51, "arm-neon.xml", 0);
1505 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
1506 gdb_register_coprocessor(env
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
1507 35, "arm-vfp3.xml", 0);
1508 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
1509 gdb_register_coprocessor(env
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
1510 19, "arm-vfp.xml", 0);
1514 /* Sort alphabetically by type name, except for "any". */
1515 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
1517 ObjectClass
*class_a
= (ObjectClass
*)a
;
1518 ObjectClass
*class_b
= (ObjectClass
*)b
;
1519 const char *name_a
, *name_b
;
1521 name_a
= object_class_get_name(class_a
);
1522 name_b
= object_class_get_name(class_b
);
1523 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
1525 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
1528 return strcmp(name_a
, name_b
);
1532 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
1534 ObjectClass
*oc
= data
;
1535 CPUListState
*s
= user_data
;
1536 const char *typename
;
1539 typename
= object_class_get_name(oc
);
1540 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
1541 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
1546 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
1550 .cpu_fprintf
= cpu_fprintf
,
1554 list
= object_class_get_list(TYPE_ARM_CPU
, false);
1555 list
= g_slist_sort(list
, arm_cpu_list_compare
);
1556 (*cpu_fprintf
)(f
, "Available CPUs:\n");
1557 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
1561 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
1562 const ARMCPRegInfo
*r
, void *opaque
)
1564 /* Define implementations of coprocessor registers.
1565 * We store these in a hashtable because typically
1566 * there are less than 150 registers in a space which
1567 * is 16*16*16*8*8 = 262144 in size.
1568 * Wildcarding is supported for the crm, opc1 and opc2 fields.
1569 * If a register is defined twice then the second definition is
1570 * used, so this can be used to define some generic registers and
1571 * then override them with implementation specific variations.
1572 * At least one of the original and the second definition should
1573 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
1574 * against accidental use.
1576 int crm
, opc1
, opc2
;
1577 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
1578 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
1579 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
1580 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
1581 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
1582 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
1583 /* 64 bit registers have only CRm and Opc1 fields */
1584 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
1585 /* Check that the register definition has enough info to handle
1586 * reads and writes if they are permitted.
1588 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
1589 if (r
->access
& PL3_R
) {
1590 assert(r
->fieldoffset
|| r
->readfn
);
1592 if (r
->access
& PL3_W
) {
1593 assert(r
->fieldoffset
|| r
->writefn
);
1596 /* Bad type field probably means missing sentinel at end of reg list */
1597 assert(cptype_valid(r
->type
));
1598 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
1599 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
1600 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
1601 uint32_t *key
= g_new(uint32_t, 1);
1602 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
1603 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
1604 *key
= ENCODE_CP_REG(r
->cp
, is64
, r
->crn
, crm
, opc1
, opc2
);
1605 r2
->opaque
= opaque
;
1606 /* Make sure reginfo passed to helpers for wildcarded regs
1607 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
1612 /* By convention, for wildcarded registers only the first
1613 * entry is used for migration; the others are marked as
1614 * NO_MIGRATE so we don't try to transfer the register
1615 * multiple times. Special registers (ie NOP/WFI) are
1618 if ((r
->type
& ARM_CP_SPECIAL
) ||
1619 ((r
->crm
== CP_ANY
) && crm
!= 0) ||
1620 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
1621 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
1622 r2
->type
|= ARM_CP_NO_MIGRATE
;
1625 /* Overriding of an existing definition must be explicitly
1628 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
1629 ARMCPRegInfo
*oldreg
;
1630 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
1631 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
1632 fprintf(stderr
, "Register redefined: cp=%d %d bit "
1633 "crn=%d crm=%d opc1=%d opc2=%d, "
1634 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
1635 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
1636 oldreg
->name
, r2
->name
);
1640 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
1646 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
1647 const ARMCPRegInfo
*regs
, void *opaque
)
1649 /* Define a whole list of registers */
1650 const ARMCPRegInfo
*r
;
1651 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
1652 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
1656 const ARMCPRegInfo
*get_arm_cp_reginfo(ARMCPU
*cpu
, uint32_t encoded_cp
)
1658 return g_hash_table_lookup(cpu
->cp_regs
, &encoded_cp
);
1661 int arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1664 /* Helper coprocessor write function for write-ignore registers */
1668 int arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t *value
)
1670 /* Helper coprocessor write function for read-as-zero registers */
1675 static int bad_mode_switch(CPUARMState
*env
, int mode
)
1677 /* Return true if it is not valid for us to switch to
1678 * this CPU mode (ie all the UNPREDICTABLE cases in
1679 * the ARM ARM CPSRWriteByInstr pseudocode).
1682 case ARM_CPU_MODE_USR
:
1683 case ARM_CPU_MODE_SYS
:
1684 case ARM_CPU_MODE_SVC
:
1685 case ARM_CPU_MODE_ABT
:
1686 case ARM_CPU_MODE_UND
:
1687 case ARM_CPU_MODE_IRQ
:
1688 case ARM_CPU_MODE_FIQ
:
1695 uint32_t cpsr_read(CPUARMState
*env
)
1698 ZF
= (env
->ZF
== 0);
1699 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
1700 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
1701 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
1702 | ((env
->condexec_bits
& 0xfc) << 8)
1706 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
1708 if (mask
& CPSR_NZCV
) {
1709 env
->ZF
= (~val
) & CPSR_Z
;
1711 env
->CF
= (val
>> 29) & 1;
1712 env
->VF
= (val
<< 3) & 0x80000000;
1715 env
->QF
= ((val
& CPSR_Q
) != 0);
1717 env
->thumb
= ((val
& CPSR_T
) != 0);
1718 if (mask
& CPSR_IT_0_1
) {
1719 env
->condexec_bits
&= ~3;
1720 env
->condexec_bits
|= (val
>> 25) & 3;
1722 if (mask
& CPSR_IT_2_7
) {
1723 env
->condexec_bits
&= 3;
1724 env
->condexec_bits
|= (val
>> 8) & 0xfc;
1726 if (mask
& CPSR_GE
) {
1727 env
->GE
= (val
>> 16) & 0xf;
1730 if ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
) {
1731 if (bad_mode_switch(env
, val
& CPSR_M
)) {
1732 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
1733 * We choose to ignore the attempt and leave the CPSR M field
1738 switch_mode(env
, val
& CPSR_M
);
1741 mask
&= ~CACHED_CPSR_BITS
;
1742 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
1745 /* Sign/zero extend */
1746 uint32_t HELPER(sxtb16
)(uint32_t x
)
1749 res
= (uint16_t)(int8_t)x
;
1750 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
1754 uint32_t HELPER(uxtb16
)(uint32_t x
)
1757 res
= (uint16_t)(uint8_t)x
;
1758 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
1762 uint32_t HELPER(clz
)(uint32_t x
)
1767 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
1771 if (num
== INT_MIN
&& den
== -1)
1776 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
1783 uint32_t HELPER(rbit
)(uint32_t x
)
1785 x
= ((x
& 0xff000000) >> 24)
1786 | ((x
& 0x00ff0000) >> 8)
1787 | ((x
& 0x0000ff00) << 8)
1788 | ((x
& 0x000000ff) << 24);
1789 x
= ((x
& 0xf0f0f0f0) >> 4)
1790 | ((x
& 0x0f0f0f0f) << 4);
1791 x
= ((x
& 0x88888888) >> 3)
1792 | ((x
& 0x44444444) >> 1)
1793 | ((x
& 0x22222222) << 1)
1794 | ((x
& 0x11111111) << 3);
1798 #if defined(CONFIG_USER_ONLY)
1800 void arm_cpu_do_interrupt(CPUState
*cs
)
1802 ARMCPU
*cpu
= ARM_CPU(cs
);
1803 CPUARMState
*env
= &cpu
->env
;
1805 env
->exception_index
= -1;
1808 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
, int rw
,
1812 env
->exception_index
= EXCP_PREFETCH_ABORT
;
1813 env
->cp15
.c6_insn
= address
;
1815 env
->exception_index
= EXCP_DATA_ABORT
;
1816 env
->cp15
.c6_data
= address
;
1821 /* These should probably raise undefined insn exceptions. */
1822 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
1824 cpu_abort(env
, "v7m_mrs %d\n", reg
);
1827 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
1829 cpu_abort(env
, "v7m_mrs %d\n", reg
);
1833 void switch_mode(CPUARMState
*env
, int mode
)
1835 if (mode
!= ARM_CPU_MODE_USR
)
1836 cpu_abort(env
, "Tried to switch out of user mode\n");
1839 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
1841 cpu_abort(env
, "banked r13 write\n");
1844 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
1846 cpu_abort(env
, "banked r13 read\n");
1852 /* Map CPU modes onto saved register banks. */
1853 int bank_number(int mode
)
1856 case ARM_CPU_MODE_USR
:
1857 case ARM_CPU_MODE_SYS
:
1859 case ARM_CPU_MODE_SVC
:
1861 case ARM_CPU_MODE_ABT
:
1863 case ARM_CPU_MODE_UND
:
1865 case ARM_CPU_MODE_IRQ
:
1867 case ARM_CPU_MODE_FIQ
:
1870 hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode
);
1873 void switch_mode(CPUARMState
*env
, int mode
)
1878 old_mode
= env
->uncached_cpsr
& CPSR_M
;
1879 if (mode
== old_mode
)
1882 if (old_mode
== ARM_CPU_MODE_FIQ
) {
1883 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
1884 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
1885 } else if (mode
== ARM_CPU_MODE_FIQ
) {
1886 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
1887 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
1890 i
= bank_number(old_mode
);
1891 env
->banked_r13
[i
] = env
->regs
[13];
1892 env
->banked_r14
[i
] = env
->regs
[14];
1893 env
->banked_spsr
[i
] = env
->spsr
;
1895 i
= bank_number(mode
);
1896 env
->regs
[13] = env
->banked_r13
[i
];
1897 env
->regs
[14] = env
->banked_r14
[i
];
1898 env
->spsr
= env
->banked_spsr
[i
];
1901 static void v7m_push(CPUARMState
*env
, uint32_t val
)
1904 stl_phys(env
->regs
[13], val
);
1907 static uint32_t v7m_pop(CPUARMState
*env
)
1910 val
= ldl_phys(env
->regs
[13]);
1915 /* Switch to V7M main or process stack pointer. */
1916 static void switch_v7m_sp(CPUARMState
*env
, int process
)
1919 if (env
->v7m
.current_sp
!= process
) {
1920 tmp
= env
->v7m
.other_sp
;
1921 env
->v7m
.other_sp
= env
->regs
[13];
1922 env
->regs
[13] = tmp
;
1923 env
->v7m
.current_sp
= process
;
1927 static void do_v7m_exception_exit(CPUARMState
*env
)
1932 type
= env
->regs
[15];
1933 if (env
->v7m
.exception
!= 0)
1934 armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
);
1936 /* Switch to the target stack. */
1937 switch_v7m_sp(env
, (type
& 4) != 0);
1938 /* Pop registers. */
1939 env
->regs
[0] = v7m_pop(env
);
1940 env
->regs
[1] = v7m_pop(env
);
1941 env
->regs
[2] = v7m_pop(env
);
1942 env
->regs
[3] = v7m_pop(env
);
1943 env
->regs
[12] = v7m_pop(env
);
1944 env
->regs
[14] = v7m_pop(env
);
1945 env
->regs
[15] = v7m_pop(env
);
1946 xpsr
= v7m_pop(env
);
1947 xpsr_write(env
, xpsr
, 0xfffffdff);
1948 /* Undo stack alignment. */
1951 /* ??? The exception return type specifies Thread/Handler mode. However
1952 this is also implied by the xPSR value. Not sure what to do
1953 if there is a mismatch. */
1954 /* ??? Likewise for mismatches between the CONTROL register and the stack
1958 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
1960 ARMCPU
*cpu
= ARM_CPU(cs
);
1961 CPUARMState
*env
= &cpu
->env
;
1962 uint32_t xpsr
= xpsr_read(env
);
1967 if (env
->v7m
.current_sp
)
1969 if (env
->v7m
.exception
== 0)
1972 /* For exceptions we just mark as pending on the NVIC, and let that
1974 /* TODO: Need to escalate if the current priority is higher than the
1975 one we're raising. */
1976 switch (env
->exception_index
) {
1978 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
);
1981 /* The PC already points to the next instruction. */
1982 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
);
1984 case EXCP_PREFETCH_ABORT
:
1985 case EXCP_DATA_ABORT
:
1986 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
);
1989 if (semihosting_enabled
) {
1991 nr
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
1994 env
->regs
[0] = do_arm_semihosting(env
);
1998 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
);
2001 env
->v7m
.exception
= armv7m_nvic_acknowledge_irq(env
->nvic
);
2003 case EXCP_EXCEPTION_EXIT
:
2004 do_v7m_exception_exit(env
);
2007 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2008 return; /* Never happens. Keep compiler happy. */
2011 /* Align stack pointer. */
2012 /* ??? Should only do this if Configuration Control Register
2013 STACKALIGN bit is set. */
2014 if (env
->regs
[13] & 4) {
2018 /* Switch to the handler mode. */
2019 v7m_push(env
, xpsr
);
2020 v7m_push(env
, env
->regs
[15]);
2021 v7m_push(env
, env
->regs
[14]);
2022 v7m_push(env
, env
->regs
[12]);
2023 v7m_push(env
, env
->regs
[3]);
2024 v7m_push(env
, env
->regs
[2]);
2025 v7m_push(env
, env
->regs
[1]);
2026 v7m_push(env
, env
->regs
[0]);
2027 switch_v7m_sp(env
, 0);
2029 env
->condexec_bits
= 0;
2031 addr
= ldl_phys(env
->v7m
.vecbase
+ env
->v7m
.exception
* 4);
2032 env
->regs
[15] = addr
& 0xfffffffe;
2033 env
->thumb
= addr
& 1;
2036 /* Handle a CPU exception. */
2037 void arm_cpu_do_interrupt(CPUState
*cs
)
2039 ARMCPU
*cpu
= ARM_CPU(cs
);
2040 CPUARMState
*env
= &cpu
->env
;
2048 /* TODO: Vectored interrupt controller. */
2049 switch (env
->exception_index
) {
2051 new_mode
= ARM_CPU_MODE_UND
;
2060 if (semihosting_enabled
) {
2061 /* Check for semihosting interrupt. */
2063 mask
= arm_lduw_code(env
, env
->regs
[15] - 2, env
->bswap_code
)
2066 mask
= arm_ldl_code(env
, env
->regs
[15] - 4, env
->bswap_code
)
2069 /* Only intercept calls from privileged modes, to provide some
2070 semblance of security. */
2071 if (((mask
== 0x123456 && !env
->thumb
)
2072 || (mask
== 0xab && env
->thumb
))
2073 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2074 env
->regs
[0] = do_arm_semihosting(env
);
2078 new_mode
= ARM_CPU_MODE_SVC
;
2081 /* The PC already points to the next instruction. */
2085 /* See if this is a semihosting syscall. */
2086 if (env
->thumb
&& semihosting_enabled
) {
2087 mask
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2089 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2091 env
->regs
[0] = do_arm_semihosting(env
);
2095 env
->cp15
.c5_insn
= 2;
2096 /* Fall through to prefetch abort. */
2097 case EXCP_PREFETCH_ABORT
:
2098 new_mode
= ARM_CPU_MODE_ABT
;
2100 mask
= CPSR_A
| CPSR_I
;
2103 case EXCP_DATA_ABORT
:
2104 new_mode
= ARM_CPU_MODE_ABT
;
2106 mask
= CPSR_A
| CPSR_I
;
2110 new_mode
= ARM_CPU_MODE_IRQ
;
2112 /* Disable IRQ and imprecise data aborts. */
2113 mask
= CPSR_A
| CPSR_I
;
2117 new_mode
= ARM_CPU_MODE_FIQ
;
2119 /* Disable FIQ, IRQ and imprecise data aborts. */
2120 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
2124 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2125 return; /* Never happens. Keep compiler happy. */
2128 if (env
->cp15
.c1_sys
& (1 << 13)) {
2131 switch_mode (env
, new_mode
);
2132 env
->spsr
= cpsr_read(env
);
2133 /* Clear IT bits. */
2134 env
->condexec_bits
= 0;
2135 /* Switch to the new mode, and to the correct instruction set. */
2136 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
2137 env
->uncached_cpsr
|= mask
;
2138 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
2139 * and we should just guard the thumb mode on V4 */
2140 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
2141 env
->thumb
= (env
->cp15
.c1_sys
& (1 << 30)) != 0;
2143 env
->regs
[14] = env
->regs
[15] + offset
;
2144 env
->regs
[15] = addr
;
2145 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
2148 /* Check section/page access permissions.
2149 Returns the page protection flags, or zero if the access is not
2151 static inline int check_ap(CPUARMState
*env
, int ap
, int domain_prot
,
2152 int access_type
, int is_user
)
2156 if (domain_prot
== 3) {
2157 return PAGE_READ
| PAGE_WRITE
;
2160 if (access_type
== 1)
2163 prot_ro
= PAGE_READ
;
2167 if (access_type
== 1)
2169 switch ((env
->cp15
.c1_sys
>> 8) & 3) {
2171 return is_user
? 0 : PAGE_READ
;
2178 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
2183 return PAGE_READ
| PAGE_WRITE
;
2185 return PAGE_READ
| PAGE_WRITE
;
2186 case 4: /* Reserved. */
2189 return is_user
? 0 : prot_ro
;
2193 if (!arm_feature (env
, ARM_FEATURE_V6K
))
2201 static uint32_t get_level1_table_address(CPUARMState
*env
, uint32_t address
)
2205 if (address
& env
->cp15
.c2_mask
)
2206 table
= env
->cp15
.c2_base1
& 0xffffc000;
2208 table
= env
->cp15
.c2_base0
& env
->cp15
.c2_base_mask
;
2210 table
|= (address
>> 18) & 0x3ffc;
2214 static int get_phys_addr_v5(CPUARMState
*env
, uint32_t address
, int access_type
,
2215 int is_user
, hwaddr
*phys_ptr
,
2216 int *prot
, target_ulong
*page_size
)
2227 /* Pagetable walk. */
2228 /* Lookup l1 descriptor. */
2229 table
= get_level1_table_address(env
, address
);
2230 desc
= ldl_phys(table
);
2232 domain
= (desc
>> 5) & 0x0f;
2233 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
2235 /* Section translation fault. */
2239 if (domain_prot
== 0 || domain_prot
== 2) {
2241 code
= 9; /* Section domain fault. */
2243 code
= 11; /* Page domain fault. */
2248 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
2249 ap
= (desc
>> 10) & 3;
2251 *page_size
= 1024 * 1024;
2253 /* Lookup l2 entry. */
2255 /* Coarse pagetable. */
2256 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
2258 /* Fine pagetable. */
2259 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
2261 desc
= ldl_phys(table
);
2263 case 0: /* Page translation fault. */
2266 case 1: /* 64k page. */
2267 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
2268 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
2269 *page_size
= 0x10000;
2271 case 2: /* 4k page. */
2272 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
2273 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
2274 *page_size
= 0x1000;
2276 case 3: /* 1k page. */
2278 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2279 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
2281 /* Page translation fault. */
2286 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
2288 ap
= (desc
>> 4) & 3;
2292 /* Never happens, but compiler isn't smart enough to tell. */
2297 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
2299 /* Access permission fault. */
2303 *phys_ptr
= phys_addr
;
2306 return code
| (domain
<< 4);
2309 static int get_phys_addr_v6(CPUARMState
*env
, uint32_t address
, int access_type
,
2310 int is_user
, hwaddr
*phys_ptr
,
2311 int *prot
, target_ulong
*page_size
)
2324 /* Pagetable walk. */
2325 /* Lookup l1 descriptor. */
2326 table
= get_level1_table_address(env
, address
);
2327 desc
= ldl_phys(table
);
2329 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
2330 /* Section translation fault, or attempt to use the encoding
2331 * which is Reserved on implementations without PXN.
2336 if ((type
== 1) || !(desc
& (1 << 18))) {
2337 /* Page or Section. */
2338 domain
= (desc
>> 5) & 0x0f;
2340 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
2341 if (domain_prot
== 0 || domain_prot
== 2) {
2343 code
= 9; /* Section domain fault. */
2345 code
= 11; /* Page domain fault. */
2350 if (desc
& (1 << 18)) {
2352 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
2353 *page_size
= 0x1000000;
2356 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
2357 *page_size
= 0x100000;
2359 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
2360 xn
= desc
& (1 << 4);
2364 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
2365 pxn
= (desc
>> 2) & 1;
2367 /* Lookup l2 entry. */
2368 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
2369 desc
= ldl_phys(table
);
2370 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
2372 case 0: /* Page translation fault. */
2375 case 1: /* 64k page. */
2376 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
2377 xn
= desc
& (1 << 15);
2378 *page_size
= 0x10000;
2380 case 2: case 3: /* 4k page. */
2381 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
2383 *page_size
= 0x1000;
2386 /* Never happens, but compiler isn't smart enough to tell. */
2391 if (domain_prot
== 3) {
2392 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2394 if (pxn
&& !is_user
) {
2397 if (xn
&& access_type
== 2)
2400 /* The simplified model uses AP[0] as an access control bit. */
2401 if ((env
->cp15
.c1_sys
& (1 << 29)) && (ap
& 1) == 0) {
2402 /* Access flag fault. */
2403 code
= (code
== 15) ? 6 : 3;
2406 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
2408 /* Access permission fault. */
2415 *phys_ptr
= phys_addr
;
2418 return code
| (domain
<< 4);
2421 /* Fault type for long-descriptor MMU fault reporting; this corresponds
2422 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
2425 translation_fault
= 1,
2427 permission_fault
= 3,
2430 static int get_phys_addr_lpae(CPUARMState
*env
, uint32_t address
,
2431 int access_type
, int is_user
,
2432 hwaddr
*phys_ptr
, int *prot
,
2433 target_ulong
*page_size_ptr
)
2435 /* Read an LPAE long-descriptor translation table. */
2436 MMUFaultType fault_type
= translation_fault
;
2444 uint32_t tableattrs
;
2445 target_ulong page_size
;
2448 /* Determine whether this address is in the region controlled by
2449 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
2450 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
2451 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
2453 uint32_t t0sz
= extract32(env
->cp15
.c2_control
, 0, 3);
2454 uint32_t t1sz
= extract32(env
->cp15
.c2_control
, 16, 3);
2455 if (t0sz
&& !extract32(address
, 32 - t0sz
, t0sz
)) {
2456 /* there is a ttbr0 region and we are in it (high bits all zero) */
2458 } else if (t1sz
&& !extract32(~address
, 32 - t1sz
, t1sz
)) {
2459 /* there is a ttbr1 region and we are in it (high bits all one) */
2462 /* ttbr0 region is "everything not in the ttbr1 region" */
2465 /* ttbr1 region is "everything not in the ttbr0 region" */
2468 /* in the gap between the two regions, this is a Translation fault */
2469 fault_type
= translation_fault
;
2473 /* Note that QEMU ignores shareability and cacheability attributes,
2474 * so we don't need to do anything with the SH, ORGN, IRGN fields
2475 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
2476 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
2477 * implement any ASID-like capability so we can ignore it (instead
2478 * we will always flush the TLB any time the ASID is changed).
2480 if (ttbr_select
== 0) {
2481 ttbr
= ((uint64_t)env
->cp15
.c2_base0_hi
<< 32) | env
->cp15
.c2_base0
;
2482 epd
= extract32(env
->cp15
.c2_control
, 7, 1);
2485 ttbr
= ((uint64_t)env
->cp15
.c2_base1_hi
<< 32) | env
->cp15
.c2_base1
;
2486 epd
= extract32(env
->cp15
.c2_control
, 23, 1);
2491 /* Translation table walk disabled => Translation fault on TLB miss */
2495 /* If the region is small enough we will skip straight to a 2nd level
2496 * lookup. This affects the number of bits of the address used in
2497 * combination with the TTBR to find the first descriptor. ('n' here
2498 * matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
2499 * from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
2508 /* Clear the vaddr bits which aren't part of the within-region address,
2509 * so that we don't have to special case things when calculating the
2510 * first descriptor address.
2512 address
&= (0xffffffffU
>> tsz
);
2514 /* Now we can extract the actual base address from the TTBR */
2515 descaddr
= extract64(ttbr
, 0, 40);
2516 descaddr
&= ~((1ULL << n
) - 1);
2520 uint64_t descriptor
;
2522 descaddr
|= ((address
>> (9 * (4 - level
))) & 0xff8);
2523 descriptor
= ldq_phys(descaddr
);
2524 if (!(descriptor
& 1) ||
2525 (!(descriptor
& 2) && (level
== 3))) {
2526 /* Invalid, or the Reserved level 3 encoding */
2529 descaddr
= descriptor
& 0xfffffff000ULL
;
2531 if ((descriptor
& 2) && (level
< 3)) {
2532 /* Table entry. The top five bits are attributes which may
2533 * propagate down through lower levels of the table (and
2534 * which are all arranged so that 0 means "no effect", so
2535 * we can gather them up by ORing in the bits at each level).
2537 tableattrs
|= extract64(descriptor
, 59, 5);
2541 /* Block entry at level 1 or 2, or page entry at level 3.
2542 * These are basically the same thing, although the number
2543 * of bits we pull in from the vaddr varies.
2545 page_size
= (1 << (39 - (9 * level
)));
2546 descaddr
|= (address
& (page_size
- 1));
2547 /* Extract attributes from the descriptor and merge with table attrs */
2548 attrs
= extract64(descriptor
, 2, 10)
2549 | (extract64(descriptor
, 52, 12) << 10);
2550 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
2551 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
2552 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
2553 * means "force PL1 access only", which means forcing AP[1] to 0.
2555 if (extract32(tableattrs
, 2, 1)) {
2558 /* Since we're always in the Non-secure state, NSTable is ignored. */
2561 /* Here descaddr is the final physical address, and attributes
2564 fault_type
= access_fault
;
2565 if ((attrs
& (1 << 8)) == 0) {
2569 fault_type
= permission_fault
;
2570 if (is_user
&& !(attrs
& (1 << 4))) {
2571 /* Unprivileged access not enabled */
2574 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2575 if (attrs
& (1 << 12) || (!is_user
&& (attrs
& (1 << 11)))) {
2577 if (access_type
== 2) {
2580 *prot
&= ~PAGE_EXEC
;
2582 if (attrs
& (1 << 5)) {
2583 /* Write access forbidden */
2584 if (access_type
== 1) {
2587 *prot
&= ~PAGE_WRITE
;
2590 *phys_ptr
= descaddr
;
2591 *page_size_ptr
= page_size
;
2595 /* Long-descriptor format IFSR/DFSR value */
2596 return (1 << 9) | (fault_type
<< 2) | level
;
2599 static int get_phys_addr_mpu(CPUARMState
*env
, uint32_t address
,
2600 int access_type
, int is_user
,
2601 hwaddr
*phys_ptr
, int *prot
)
2607 *phys_ptr
= address
;
2608 for (n
= 7; n
>= 0; n
--) {
2609 base
= env
->cp15
.c6_region
[n
];
2610 if ((base
& 1) == 0)
2612 mask
= 1 << ((base
>> 1) & 0x1f);
2613 /* Keep this shift separate from the above to avoid an
2614 (undefined) << 32. */
2615 mask
= (mask
<< 1) - 1;
2616 if (((base
^ address
) & ~mask
) == 0)
2622 if (access_type
== 2) {
2623 mask
= env
->cp15
.c5_insn
;
2625 mask
= env
->cp15
.c5_data
;
2627 mask
= (mask
>> (n
* 4)) & 0xf;
2634 *prot
= PAGE_READ
| PAGE_WRITE
;
2639 *prot
|= PAGE_WRITE
;
2642 *prot
= PAGE_READ
| PAGE_WRITE
;
2653 /* Bad permission. */
2660 /* get_phys_addr - get the physical address for this virtual address
2662 * Find the physical address corresponding to the given virtual address,
2663 * by doing a translation table walk on MMU based systems or using the
2664 * MPU state on MPU based systems.
2666 * Returns 0 if the translation was successful. Otherwise, phys_ptr,
2667 * prot and page_size are not filled in, and the return value provides
2668 * information on why the translation aborted, in the format of a
2669 * DFSR/IFSR fault register, with the following caveats:
2670 * * we honour the short vs long DFSR format differences.
2671 * * the WnR bit is never set (the caller must do this).
2672 * * for MPU based systems we don't bother to return a full FSR format
2676 * @address: virtual address to get physical address for
2677 * @access_type: 0 for read, 1 for write, 2 for execute
2678 * @is_user: 0 for privileged access, 1 for user
2679 * @phys_ptr: set to the physical address corresponding to the virtual address
2680 * @prot: set to the permissions for the page containing phys_ptr
2681 * @page_size: set to the size of the page containing phys_ptr
2683 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
2684 int access_type
, int is_user
,
2685 hwaddr
*phys_ptr
, int *prot
,
2686 target_ulong
*page_size
)
2688 /* Fast Context Switch Extension. */
2689 if (address
< 0x02000000)
2690 address
+= env
->cp15
.c13_fcse
;
2692 if ((env
->cp15
.c1_sys
& 1) == 0) {
2693 /* MMU/MPU disabled. */
2694 *phys_ptr
= address
;
2695 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2696 *page_size
= TARGET_PAGE_SIZE
;
2698 } else if (arm_feature(env
, ARM_FEATURE_MPU
)) {
2699 *page_size
= TARGET_PAGE_SIZE
;
2700 return get_phys_addr_mpu(env
, address
, access_type
, is_user
, phys_ptr
,
2702 } else if (extended_addresses_enabled(env
)) {
2703 return get_phys_addr_lpae(env
, address
, access_type
, is_user
, phys_ptr
,
2705 } else if (env
->cp15
.c1_sys
& (1 << 23)) {
2706 return get_phys_addr_v6(env
, address
, access_type
, is_user
, phys_ptr
,
2709 return get_phys_addr_v5(env
, address
, access_type
, is_user
, phys_ptr
,
2714 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
,
2715 int access_type
, int mmu_idx
)
2718 target_ulong page_size
;
2722 is_user
= mmu_idx
== MMU_USER_IDX
;
2723 ret
= get_phys_addr(env
, address
, access_type
, is_user
, &phys_addr
, &prot
,
2726 /* Map a single [sub]page. */
2727 phys_addr
&= ~(hwaddr
)0x3ff;
2728 address
&= ~(uint32_t)0x3ff;
2729 tlb_set_page (env
, address
, phys_addr
, prot
, mmu_idx
, page_size
);
2733 if (access_type
== 2) {
2734 env
->cp15
.c5_insn
= ret
;
2735 env
->cp15
.c6_insn
= address
;
2736 env
->exception_index
= EXCP_PREFETCH_ABORT
;
2738 env
->cp15
.c5_data
= ret
;
2739 if (access_type
== 1 && arm_feature(env
, ARM_FEATURE_V6
))
2740 env
->cp15
.c5_data
|= (1 << 11);
2741 env
->cp15
.c6_data
= address
;
2742 env
->exception_index
= EXCP_DATA_ABORT
;
2747 hwaddr
cpu_get_phys_page_debug(CPUARMState
*env
, target_ulong addr
)
2750 target_ulong page_size
;
2754 ret
= get_phys_addr(env
, addr
, 0, 0, &phys_addr
, &prot
, &page_size
);
2762 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
2764 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
2765 env
->regs
[13] = val
;
2767 env
->banked_r13
[bank_number(mode
)] = val
;
2771 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
2773 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
2774 return env
->regs
[13];
2776 return env
->banked_r13
[bank_number(mode
)];
2780 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
2784 return xpsr_read(env
) & 0xf8000000;
2786 return xpsr_read(env
) & 0xf80001ff;
2788 return xpsr_read(env
) & 0xff00fc00;
2790 return xpsr_read(env
) & 0xff00fdff;
2792 return xpsr_read(env
) & 0x000001ff;
2794 return xpsr_read(env
) & 0x0700fc00;
2796 return xpsr_read(env
) & 0x0700edff;
2798 return env
->v7m
.current_sp
? env
->v7m
.other_sp
: env
->regs
[13];
2800 return env
->v7m
.current_sp
? env
->regs
[13] : env
->v7m
.other_sp
;
2801 case 16: /* PRIMASK */
2802 return (env
->uncached_cpsr
& CPSR_I
) != 0;
2803 case 17: /* BASEPRI */
2804 case 18: /* BASEPRI_MAX */
2805 return env
->v7m
.basepri
;
2806 case 19: /* FAULTMASK */
2807 return (env
->uncached_cpsr
& CPSR_F
) != 0;
2808 case 20: /* CONTROL */
2809 return env
->v7m
.control
;
2811 /* ??? For debugging only. */
2812 cpu_abort(env
, "Unimplemented system register read (%d)\n", reg
);
2817 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
2821 xpsr_write(env
, val
, 0xf8000000);
2824 xpsr_write(env
, val
, 0xf8000000);
2827 xpsr_write(env
, val
, 0xfe00fc00);
2830 xpsr_write(env
, val
, 0xfe00fc00);
2833 /* IPSR bits are readonly. */
2836 xpsr_write(env
, val
, 0x0600fc00);
2839 xpsr_write(env
, val
, 0x0600fc00);
2842 if (env
->v7m
.current_sp
)
2843 env
->v7m
.other_sp
= val
;
2845 env
->regs
[13] = val
;
2848 if (env
->v7m
.current_sp
)
2849 env
->regs
[13] = val
;
2851 env
->v7m
.other_sp
= val
;
2853 case 16: /* PRIMASK */
2855 env
->uncached_cpsr
|= CPSR_I
;
2857 env
->uncached_cpsr
&= ~CPSR_I
;
2859 case 17: /* BASEPRI */
2860 env
->v7m
.basepri
= val
& 0xff;
2862 case 18: /* BASEPRI_MAX */
2864 if (val
!= 0 && (val
< env
->v7m
.basepri
|| env
->v7m
.basepri
== 0))
2865 env
->v7m
.basepri
= val
;
2867 case 19: /* FAULTMASK */
2869 env
->uncached_cpsr
|= CPSR_F
;
2871 env
->uncached_cpsr
&= ~CPSR_F
;
2873 case 20: /* CONTROL */
2874 env
->v7m
.control
= val
& 3;
2875 switch_v7m_sp(env
, (val
& 2) != 0);
2878 /* ??? For debugging only. */
2879 cpu_abort(env
, "Unimplemented system register write (%d)\n", reg
);
2886 /* Note that signed overflow is undefined in C. The following routines are
2887 careful to use unsigned types where modulo arithmetic is required.
2888 Failure to do so _will_ break on newer gcc. */
2890 /* Signed saturating arithmetic. */
2892 /* Perform 16-bit signed saturating addition. */
2893 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
2898 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
2907 /* Perform 8-bit signed saturating addition. */
2908 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
2913 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
2922 /* Perform 16-bit signed saturating subtraction. */
2923 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
2928 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
2937 /* Perform 8-bit signed saturating subtraction. */
2938 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
2943 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
2952 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2953 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2954 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
2955 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
2958 #include "op_addsub.h"
2960 /* Unsigned saturating arithmetic. */
2961 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
2970 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
2978 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
2987 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
2995 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2996 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2997 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
2998 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
3001 #include "op_addsub.h"
3003 /* Signed modulo arithmetic. */
3004 #define SARITH16(a, b, n, op) do { \
3006 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
3007 RESULT(sum, n, 16); \
3009 ge |= 3 << (n * 2); \
3012 #define SARITH8(a, b, n, op) do { \
3014 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
3015 RESULT(sum, n, 8); \
3021 #define ADD16(a, b, n) SARITH16(a, b, n, +)
3022 #define SUB16(a, b, n) SARITH16(a, b, n, -)
3023 #define ADD8(a, b, n) SARITH8(a, b, n, +)
3024 #define SUB8(a, b, n) SARITH8(a, b, n, -)
3028 #include "op_addsub.h"
3030 /* Unsigned modulo arithmetic. */
3031 #define ADD16(a, b, n) do { \
3033 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
3034 RESULT(sum, n, 16); \
3035 if ((sum >> 16) == 1) \
3036 ge |= 3 << (n * 2); \
3039 #define ADD8(a, b, n) do { \
3041 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
3042 RESULT(sum, n, 8); \
3043 if ((sum >> 8) == 1) \
3047 #define SUB16(a, b, n) do { \
3049 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
3050 RESULT(sum, n, 16); \
3051 if ((sum >> 16) == 0) \
3052 ge |= 3 << (n * 2); \
3055 #define SUB8(a, b, n) do { \
3057 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
3058 RESULT(sum, n, 8); \
3059 if ((sum >> 8) == 0) \
3066 #include "op_addsub.h"
3068 /* Halved signed arithmetic. */
3069 #define ADD16(a, b, n) \
3070 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
3071 #define SUB16(a, b, n) \
3072 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
3073 #define ADD8(a, b, n) \
3074 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
3075 #define SUB8(a, b, n) \
3076 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
3079 #include "op_addsub.h"
3081 /* Halved unsigned arithmetic. */
3082 #define ADD16(a, b, n) \
3083 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3084 #define SUB16(a, b, n) \
3085 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3086 #define ADD8(a, b, n) \
3087 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3088 #define SUB8(a, b, n) \
3089 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3092 #include "op_addsub.h"
3094 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
3102 /* Unsigned sum of absolute byte differences. */
3103 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
3106 sum
= do_usad(a
, b
);
3107 sum
+= do_usad(a
>> 8, b
>> 8);
3108 sum
+= do_usad(a
>> 16, b
>>16);
3109 sum
+= do_usad(a
>> 24, b
>> 24);
3113 /* For ARMv6 SEL instruction. */
3114 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
3127 return (a
& mask
) | (b
& ~mask
);
3130 /* VFP support. We follow the convention used for VFP instructions:
3131 Single precision routines have a "s" suffix, double precision a
3134 /* Convert host exception flags to vfp form. */
3135 static inline int vfp_exceptbits_from_host(int host_bits
)
3137 int target_bits
= 0;
3139 if (host_bits
& float_flag_invalid
)
3141 if (host_bits
& float_flag_divbyzero
)
3143 if (host_bits
& float_flag_overflow
)
3145 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
3147 if (host_bits
& float_flag_inexact
)
3148 target_bits
|= 0x10;
3149 if (host_bits
& float_flag_input_denormal
)
3150 target_bits
|= 0x80;
3154 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
3159 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
3160 | (env
->vfp
.vec_len
<< 16)
3161 | (env
->vfp
.vec_stride
<< 20);
3162 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
3163 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
3164 fpscr
|= vfp_exceptbits_from_host(i
);
3168 uint32_t vfp_get_fpscr(CPUARMState
*env
)
3170 return HELPER(vfp_get_fpscr
)(env
);
3173 /* Convert vfp exception flags to target form. */
3174 static inline int vfp_exceptbits_to_host(int target_bits
)
3178 if (target_bits
& 1)
3179 host_bits
|= float_flag_invalid
;
3180 if (target_bits
& 2)
3181 host_bits
|= float_flag_divbyzero
;
3182 if (target_bits
& 4)
3183 host_bits
|= float_flag_overflow
;
3184 if (target_bits
& 8)
3185 host_bits
|= float_flag_underflow
;
3186 if (target_bits
& 0x10)
3187 host_bits
|= float_flag_inexact
;
3188 if (target_bits
& 0x80)
3189 host_bits
|= float_flag_input_denormal
;
3193 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
3198 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
3199 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
3200 env
->vfp
.vec_len
= (val
>> 16) & 7;
3201 env
->vfp
.vec_stride
= (val
>> 20) & 3;
3204 if (changed
& (3 << 22)) {
3205 i
= (val
>> 22) & 3;
3208 i
= float_round_nearest_even
;
3214 i
= float_round_down
;
3217 i
= float_round_to_zero
;
3220 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
3222 if (changed
& (1 << 24)) {
3223 set_flush_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
3224 set_flush_inputs_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
3226 if (changed
& (1 << 25))
3227 set_default_nan_mode((val
& (1 << 25)) != 0, &env
->vfp
.fp_status
);
3229 i
= vfp_exceptbits_to_host(val
);
3230 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
3231 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
3234 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
3236 HELPER(vfp_set_fpscr
)(env
, val
);
3239 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
3241 #define VFP_BINOP(name) \
3242 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
3244 float_status *fpst = fpstp; \
3245 return float32_ ## name(a, b, fpst); \
3247 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
3249 float_status *fpst = fpstp; \
3250 return float64_ ## name(a, b, fpst); \
3258 float32
VFP_HELPER(neg
, s
)(float32 a
)
3260 return float32_chs(a
);
3263 float64
VFP_HELPER(neg
, d
)(float64 a
)
3265 return float64_chs(a
);
3268 float32
VFP_HELPER(abs
, s
)(float32 a
)
3270 return float32_abs(a
);
3273 float64
VFP_HELPER(abs
, d
)(float64 a
)
3275 return float64_abs(a
);
3278 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
3280 return float32_sqrt(a
, &env
->vfp
.fp_status
);
3283 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
3285 return float64_sqrt(a
, &env
->vfp
.fp_status
);
3288 /* XXX: check quiet/signaling case */
3289 #define DO_VFP_cmp(p, type) \
3290 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
3293 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
3294 case 0: flags = 0x6; break; \
3295 case -1: flags = 0x8; break; \
3296 case 1: flags = 0x2; break; \
3297 default: case 2: flags = 0x3; break; \
3299 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
3300 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
3302 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
3305 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
3306 case 0: flags = 0x6; break; \
3307 case -1: flags = 0x8; break; \
3308 case 1: flags = 0x2; break; \
3309 default: case 2: flags = 0x3; break; \
3311 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
3312 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
3314 DO_VFP_cmp(s
, float32
)
3315 DO_VFP_cmp(d
, float64
)
3318 /* Integer to float and float to integer conversions */
3320 #define CONV_ITOF(name, fsz, sign) \
3321 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
3323 float_status *fpst = fpstp; \
3324 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
3327 #define CONV_FTOI(name, fsz, sign, round) \
3328 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
3330 float_status *fpst = fpstp; \
3331 if (float##fsz##_is_any_nan(x)) { \
3332 float_raise(float_flag_invalid, fpst); \
3335 return float##fsz##_to_##sign##int32##round(x, fpst); \
3338 #define FLOAT_CONVS(name, p, fsz, sign) \
3339 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
3340 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
3341 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
3343 FLOAT_CONVS(si
, s
, 32, )
3344 FLOAT_CONVS(si
, d
, 64, )
3345 FLOAT_CONVS(ui
, s
, 32, u
)
3346 FLOAT_CONVS(ui
, d
, 64, u
)
3352 /* floating point conversion */
3353 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
3355 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
3356 /* ARM requires that S<->D conversion of any kind of NaN generates
3357 * a quiet NaN by forcing the most significant frac bit to 1.
3359 return float64_maybe_silence_nan(r
);
3362 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
3364 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
3365 /* ARM requires that S<->D conversion of any kind of NaN generates
3366 * a quiet NaN by forcing the most significant frac bit to 1.
3368 return float32_maybe_silence_nan(r
);
3371 /* VFP3 fixed point conversion. */
3372 #define VFP_CONV_FIX(name, p, fsz, itype, sign) \
3373 float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, \
3376 float_status *fpst = fpstp; \
3378 tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
3379 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
3381 uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
3384 float_status *fpst = fpstp; \
3386 if (float##fsz##_is_any_nan(x)) { \
3387 float_raise(float_flag_invalid, fpst); \
3390 tmp = float##fsz##_scalbn(x, shift, fpst); \
3391 return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
3394 VFP_CONV_FIX(sh
, d
, 64, int16
, )
3395 VFP_CONV_FIX(sl
, d
, 64, int32
, )
3396 VFP_CONV_FIX(uh
, d
, 64, uint16
, u
)
3397 VFP_CONV_FIX(ul
, d
, 64, uint32
, u
)
3398 VFP_CONV_FIX(sh
, s
, 32, int16
, )
3399 VFP_CONV_FIX(sl
, s
, 32, int32
, )
3400 VFP_CONV_FIX(uh
, s
, 32, uint16
, u
)
3401 VFP_CONV_FIX(ul
, s
, 32, uint32
, u
)
3404 /* Half precision conversions. */
3405 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
3407 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
3408 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
3410 return float32_maybe_silence_nan(r
);
3415 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
3417 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
3418 float16 r
= float32_to_float16(a
, ieee
, s
);
3420 r
= float16_maybe_silence_nan(r
);
3422 return float16_val(r
);
3425 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
3427 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
3430 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
3432 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
3435 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
3437 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
3440 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
3442 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
3445 #define float32_two make_float32(0x40000000)
3446 #define float32_three make_float32(0x40400000)
3447 #define float32_one_point_five make_float32(0x3fc00000)
3449 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
3451 float_status
*s
= &env
->vfp
.standard_fp_status
;
3452 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
3453 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
3454 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
3455 float_raise(float_flag_input_denormal
, s
);
3459 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
3462 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
3464 float_status
*s
= &env
->vfp
.standard_fp_status
;
3466 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
3467 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
3468 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
3469 float_raise(float_flag_input_denormal
, s
);
3471 return float32_one_point_five
;
3473 product
= float32_mul(a
, b
, s
);
3474 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
3479 /* Constants 256 and 512 are used in some helpers; we avoid relying on
3480 * int->float conversions at run-time. */
3481 #define float64_256 make_float64(0x4070000000000000LL)
3482 #define float64_512 make_float64(0x4080000000000000LL)
3484 /* The algorithm that must be used to calculate the estimate
3485 * is specified by the ARM ARM.
3487 static float64
recip_estimate(float64 a
, CPUARMState
*env
)
3489 /* These calculations mustn't set any fp exception flags,
3490 * so we use a local copy of the fp_status.
3492 float_status dummy_status
= env
->vfp
.standard_fp_status
;
3493 float_status
*s
= &dummy_status
;
3494 /* q = (int)(a * 512.0) */
3495 float64 q
= float64_mul(float64_512
, a
, s
);
3496 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
3498 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
3499 q
= int64_to_float64(q_int
, s
);
3500 q
= float64_add(q
, float64_half
, s
);
3501 q
= float64_div(q
, float64_512
, s
);
3502 q
= float64_div(float64_one
, q
, s
);
3504 /* s = (int)(256.0 * r + 0.5) */
3505 q
= float64_mul(q
, float64_256
, s
);
3506 q
= float64_add(q
, float64_half
, s
);
3507 q_int
= float64_to_int64_round_to_zero(q
, s
);
3509 /* return (double)s / 256.0 */
3510 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
3513 float32
HELPER(recpe_f32
)(float32 a
, CPUARMState
*env
)
3515 float_status
*s
= &env
->vfp
.standard_fp_status
;
3517 uint32_t val32
= float32_val(a
);
3520 int a_exp
= (val32
& 0x7f800000) >> 23;
3521 int sign
= val32
& 0x80000000;
3523 if (float32_is_any_nan(a
)) {
3524 if (float32_is_signaling_nan(a
)) {
3525 float_raise(float_flag_invalid
, s
);
3527 return float32_default_nan
;
3528 } else if (float32_is_infinity(a
)) {
3529 return float32_set_sign(float32_zero
, float32_is_neg(a
));
3530 } else if (float32_is_zero_or_denormal(a
)) {
3531 if (!float32_is_zero(a
)) {
3532 float_raise(float_flag_input_denormal
, s
);
3534 float_raise(float_flag_divbyzero
, s
);
3535 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
3536 } else if (a_exp
>= 253) {
3537 float_raise(float_flag_underflow
, s
);
3538 return float32_set_sign(float32_zero
, float32_is_neg(a
));
3541 f64
= make_float64((0x3feULL
<< 52)
3542 | ((int64_t)(val32
& 0x7fffff) << 29));
3544 result_exp
= 253 - a_exp
;
3546 f64
= recip_estimate(f64
, env
);
3549 | ((result_exp
& 0xff) << 23)
3550 | ((float64_val(f64
) >> 29) & 0x7fffff);
3551 return make_float32(val32
);
3554 /* The algorithm that must be used to calculate the estimate
3555 * is specified by the ARM ARM.
3557 static float64
recip_sqrt_estimate(float64 a
, CPUARMState
*env
)
3559 /* These calculations mustn't set any fp exception flags,
3560 * so we use a local copy of the fp_status.
3562 float_status dummy_status
= env
->vfp
.standard_fp_status
;
3563 float_status
*s
= &dummy_status
;
3567 if (float64_lt(a
, float64_half
, s
)) {
3568 /* range 0.25 <= a < 0.5 */
3570 /* a in units of 1/512 rounded down */
3571 /* q0 = (int)(a * 512.0); */
3572 q
= float64_mul(float64_512
, a
, s
);
3573 q_int
= float64_to_int64_round_to_zero(q
, s
);
3575 /* reciprocal root r */
3576 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
3577 q
= int64_to_float64(q_int
, s
);
3578 q
= float64_add(q
, float64_half
, s
);
3579 q
= float64_div(q
, float64_512
, s
);
3580 q
= float64_sqrt(q
, s
);
3581 q
= float64_div(float64_one
, q
, s
);
3583 /* range 0.5 <= a < 1.0 */
3585 /* a in units of 1/256 rounded down */
3586 /* q1 = (int)(a * 256.0); */
3587 q
= float64_mul(float64_256
, a
, s
);
3588 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
3590 /* reciprocal root r */
3591 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
3592 q
= int64_to_float64(q_int
, s
);
3593 q
= float64_add(q
, float64_half
, s
);
3594 q
= float64_div(q
, float64_256
, s
);
3595 q
= float64_sqrt(q
, s
);
3596 q
= float64_div(float64_one
, q
, s
);
3598 /* r in units of 1/256 rounded to nearest */
3599 /* s = (int)(256.0 * r + 0.5); */
3601 q
= float64_mul(q
, float64_256
,s
);
3602 q
= float64_add(q
, float64_half
, s
);
3603 q_int
= float64_to_int64_round_to_zero(q
, s
);
3605 /* return (double)s / 256.0;*/
3606 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
3609 float32
HELPER(rsqrte_f32
)(float32 a
, CPUARMState
*env
)
3611 float_status
*s
= &env
->vfp
.standard_fp_status
;
3617 val
= float32_val(a
);
3619 if (float32_is_any_nan(a
)) {
3620 if (float32_is_signaling_nan(a
)) {
3621 float_raise(float_flag_invalid
, s
);
3623 return float32_default_nan
;
3624 } else if (float32_is_zero_or_denormal(a
)) {
3625 if (!float32_is_zero(a
)) {
3626 float_raise(float_flag_input_denormal
, s
);
3628 float_raise(float_flag_divbyzero
, s
);
3629 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
3630 } else if (float32_is_neg(a
)) {
3631 float_raise(float_flag_invalid
, s
);
3632 return float32_default_nan
;
3633 } else if (float32_is_infinity(a
)) {
3634 return float32_zero
;
3637 /* Normalize to a double-precision value between 0.25 and 1.0,
3638 * preserving the parity of the exponent. */
3639 if ((val
& 0x800000) == 0) {
3640 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
3642 | ((uint64_t)(val
& 0x7fffff) << 29));
3644 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
3646 | ((uint64_t)(val
& 0x7fffff) << 29));
3649 result_exp
= (380 - ((val
& 0x7f800000) >> 23)) / 2;
3651 f64
= recip_sqrt_estimate(f64
, env
);
3653 val64
= float64_val(f64
);
3655 val
= ((result_exp
& 0xff) << 23)
3656 | ((val64
>> 29) & 0x7fffff);
3657 return make_float32(val
);
3660 uint32_t HELPER(recpe_u32
)(uint32_t a
, CPUARMState
*env
)
3664 if ((a
& 0x80000000) == 0) {
3668 f64
= make_float64((0x3feULL
<< 52)
3669 | ((int64_t)(a
& 0x7fffffff) << 21));
3671 f64
= recip_estimate (f64
, env
);
3673 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
3676 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, CPUARMState
*env
)
3680 if ((a
& 0xc0000000) == 0) {
3684 if (a
& 0x80000000) {
3685 f64
= make_float64((0x3feULL
<< 52)
3686 | ((uint64_t)(a
& 0x7fffffff) << 21));
3687 } else { /* bits 31-30 == '01' */
3688 f64
= make_float64((0x3fdULL
<< 52)
3689 | ((uint64_t)(a
& 0x3fffffff) << 22));
3692 f64
= recip_sqrt_estimate(f64
, env
);
3694 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
3697 /* VFPv4 fused multiply-accumulate */
3698 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
3700 float_status
*fpst
= fpstp
;
3701 return float32_muladd(a
, b
, c
, 0, fpst
);
3704 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
3706 float_status
*fpst
= fpstp
;
3707 return float64_muladd(a
, b
, c
, 0, fpst
);