2 #include "exec/gdbstub.h"
4 #include "qemu/host-utils.h"
5 #include "sysemu/arch_init.h"
6 #include "sysemu/sysemu.h"
7 #include "qemu/bitops.h"
8 #include "qemu/crc32c.h"
9 #include <zlib.h> /* For crc32 */
11 #ifndef CONFIG_USER_ONLY
12 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
13 int access_type
, int is_user
,
14 hwaddr
*phys_ptr
, int *prot
,
15 target_ulong
*page_size
);
17 /* Definitions for the PMCCNTR and PMCR registers */
23 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
27 /* VFP data registers are always little-endian. */
28 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
30 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
33 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
34 /* Aliases for Q regs. */
37 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
38 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
42 switch (reg
- nregs
) {
43 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
44 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
45 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
50 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
54 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
56 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
59 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
62 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
63 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
67 switch (reg
- nregs
) {
68 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
69 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
70 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
75 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
79 /* 128 bit FP register */
80 stfq_le_p(buf
, env
->vfp
.regs
[reg
* 2]);
81 stfq_le_p(buf
+ 8, env
->vfp
.regs
[reg
* 2 + 1]);
85 stl_p(buf
, vfp_get_fpsr(env
));
89 stl_p(buf
, vfp_get_fpcr(env
));
96 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
100 /* 128 bit FP register */
101 env
->vfp
.regs
[reg
* 2] = ldfq_le_p(buf
);
102 env
->vfp
.regs
[reg
* 2 + 1] = ldfq_le_p(buf
+ 8);
106 vfp_set_fpsr(env
, ldl_p(buf
));
110 vfp_set_fpcr(env
, ldl_p(buf
));
117 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
119 if (cpreg_field_is_64bit(ri
)) {
120 return CPREG_FIELD64(env
, ri
);
122 return CPREG_FIELD32(env
, ri
);
126 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
129 if (cpreg_field_is_64bit(ri
)) {
130 CPREG_FIELD64(env
, ri
) = value
;
132 CPREG_FIELD32(env
, ri
) = value
;
136 static uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
138 /* Raw read of a coprocessor register (as needed for migration, etc). */
139 if (ri
->type
& ARM_CP_CONST
) {
140 return ri
->resetvalue
;
141 } else if (ri
->raw_readfn
) {
142 return ri
->raw_readfn(env
, ri
);
143 } else if (ri
->readfn
) {
144 return ri
->readfn(env
, ri
);
146 return raw_read(env
, ri
);
150 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
153 /* Raw write of a coprocessor register (as needed for migration, etc).
154 * Note that constant registers are treated as write-ignored; the
155 * caller should check for success by whether a readback gives the
158 if (ri
->type
& ARM_CP_CONST
) {
160 } else if (ri
->raw_writefn
) {
161 ri
->raw_writefn(env
, ri
, v
);
162 } else if (ri
->writefn
) {
163 ri
->writefn(env
, ri
, v
);
165 raw_write(env
, ri
, v
);
169 bool write_cpustate_to_list(ARMCPU
*cpu
)
171 /* Write the coprocessor state from cpu->env to the (index,value) list. */
175 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
176 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
177 const ARMCPRegInfo
*ri
;
179 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
184 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
187 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
192 bool write_list_to_cpustate(ARMCPU
*cpu
)
197 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
198 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
199 uint64_t v
= cpu
->cpreg_values
[i
];
200 const ARMCPRegInfo
*ri
;
202 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
207 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
210 /* Write value and confirm it reads back as written
211 * (to catch read-only registers and partially read-only
212 * registers where the incoming migration value doesn't match)
214 write_raw_cp_reg(&cpu
->env
, ri
, v
);
215 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
222 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
224 ARMCPU
*cpu
= opaque
;
226 const ARMCPRegInfo
*ri
;
228 regidx
= *(uint32_t *)key
;
229 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
231 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
232 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
233 /* The value array need not be initialized at this point */
234 cpu
->cpreg_array_len
++;
238 static void count_cpreg(gpointer key
, gpointer opaque
)
240 ARMCPU
*cpu
= opaque
;
242 const ARMCPRegInfo
*ri
;
244 regidx
= *(uint32_t *)key
;
245 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
247 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
248 cpu
->cpreg_array_len
++;
252 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
254 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
255 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
266 static void cpreg_make_keylist(gpointer key
, gpointer value
, gpointer udata
)
268 GList
**plist
= udata
;
270 *plist
= g_list_prepend(*plist
, key
);
273 void init_cpreg_list(ARMCPU
*cpu
)
275 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
276 * Note that we require cpreg_tuples[] to be sorted by key ID.
281 g_hash_table_foreach(cpu
->cp_regs
, cpreg_make_keylist
, &keys
);
283 keys
= g_list_sort(keys
, cpreg_key_compare
);
285 cpu
->cpreg_array_len
= 0;
287 g_list_foreach(keys
, count_cpreg
, cpu
);
289 arraylen
= cpu
->cpreg_array_len
;
290 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
291 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
292 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
293 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
294 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
295 cpu
->cpreg_array_len
= 0;
297 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
299 assert(cpu
->cpreg_array_len
== arraylen
);
304 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
306 env
->cp15
.c3
= value
;
307 tlb_flush(env
, 1); /* Flush TLB as domain not tracked in TLB */
310 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
312 if (env
->cp15
.c13_fcse
!= value
) {
313 /* Unlike real hardware the qemu TLB uses virtual addresses,
314 * not modified virtual addresses, so this causes a TLB flush.
317 env
->cp15
.c13_fcse
= value
;
321 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
324 if (env
->cp15
.c13_context
!= value
&& !arm_feature(env
, ARM_FEATURE_MPU
)) {
325 /* For VMSA (when not using the LPAE long descriptor page table
326 * format) this register includes the ASID, so do a TLB flush.
327 * For PMSA it is purely a process ID and no action is needed.
331 env
->cp15
.c13_context
= value
;
334 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
337 /* Invalidate all (TLBIALL) */
341 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
344 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
345 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
348 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
351 /* Invalidate by ASID (TLBIASID) */
352 tlb_flush(env
, value
== 0);
355 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
358 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
359 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
362 static const ARMCPRegInfo cp_reginfo
[] = {
363 /* DBGDIDR: just RAZ. In particular this means the "debug architecture
364 * version" bits will read as a reserved value, which should cause
365 * Linux to not try to use the debug hardware.
367 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
368 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
369 /* MMU Domain access control / MPU write buffer control */
370 { .name
= "DACR", .cp
= 15,
371 .crn
= 3, .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
372 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c3
),
373 .resetvalue
= 0, .writefn
= dacr_write
, .raw_writefn
= raw_write
, },
374 { .name
= "FCSEIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 0,
375 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_fcse
),
376 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
377 { .name
= "CONTEXTIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 1,
378 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_context
),
379 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
380 /* ??? This covers not just the impdef TLB lockdown registers but also
381 * some v7VMSA registers relating to TEX remap, so it is overly broad.
383 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= CP_ANY
,
384 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
385 /* MMU TLB control. Note that the wildcarding means we cover not just
386 * the unified TLB ops but also the dside/iside/inner-shareable variants.
388 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
389 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
390 .type
= ARM_CP_NO_MIGRATE
},
391 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
392 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
393 .type
= ARM_CP_NO_MIGRATE
},
394 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
395 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
396 .type
= ARM_CP_NO_MIGRATE
},
397 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
398 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
399 .type
= ARM_CP_NO_MIGRATE
},
400 /* Cache maintenance ops; some of this space may be overridden later. */
401 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
402 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
403 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
407 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
408 /* Not all pre-v6 cores implemented this WFI, so this is slightly
411 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
412 .access
= PL1_W
, .type
= ARM_CP_WFI
},
416 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
417 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
418 * is UNPREDICTABLE; we choose to NOP as most implementations do).
420 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
421 .access
= PL1_W
, .type
= ARM_CP_WFI
},
422 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
423 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
424 * OMAPCP will override this space.
426 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
427 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
429 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
430 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
432 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
433 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
434 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
439 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
442 if (env
->cp15
.c1_coproc
!= value
) {
443 env
->cp15
.c1_coproc
= value
;
444 /* ??? Is this safe when called from within a TB? */
449 static const ARMCPRegInfo v6_cp_reginfo
[] = {
450 /* prefetch by MVA in v6, NOP in v7 */
451 { .name
= "MVA_prefetch",
452 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
453 .access
= PL1_W
, .type
= ARM_CP_NOP
},
454 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
455 .access
= PL0_W
, .type
= ARM_CP_NOP
},
456 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
457 .access
= PL0_W
, .type
= ARM_CP_NOP
},
458 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
459 .access
= PL0_W
, .type
= ARM_CP_NOP
},
460 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
461 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_insn
),
463 /* Watchpoint Fault Address Register : should actually only be present
464 * for 1136, 1176, 11MPCore.
466 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
467 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
468 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
469 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2,
470 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_coproc
),
471 .resetvalue
= 0, .writefn
= cpacr_write
},
475 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
477 /* Perfomance monitor registers user accessibility is controlled
480 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
481 return CP_ACCESS_TRAP
;
486 #ifndef CONFIG_USER_ONLY
487 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
490 /* Don't computer the number of ticks in user mode */
493 temp_ticks
= qemu_clock_get_us(QEMU_CLOCK_VIRTUAL
) *
494 get_ticks_per_sec() / 1000000;
496 if (env
->cp15
.c9_pmcr
& PMCRE
) {
497 /* If the counter is enabled */
498 if (env
->cp15
.c9_pmcr
& PMCRD
) {
499 /* Increment once every 64 processor clock cycles */
500 env
->cp15
.c15_ccnt
= (temp_ticks
/64) - env
->cp15
.c15_ccnt
;
502 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
507 /* The counter has been reset */
508 env
->cp15
.c15_ccnt
= 0;
511 /* only the DP, X, D and E bits are writable */
512 env
->cp15
.c9_pmcr
&= ~0x39;
513 env
->cp15
.c9_pmcr
|= (value
& 0x39);
515 if (env
->cp15
.c9_pmcr
& PMCRE
) {
516 if (env
->cp15
.c9_pmcr
& PMCRD
) {
517 /* Increment once every 64 processor clock cycles */
520 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
524 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
526 uint32_t total_ticks
;
528 if (!(env
->cp15
.c9_pmcr
& PMCRE
)) {
529 /* Counter is disabled, do not change value */
530 return env
->cp15
.c15_ccnt
;
533 total_ticks
= qemu_clock_get_us(QEMU_CLOCK_VIRTUAL
) *
534 get_ticks_per_sec() / 1000000;
536 if (env
->cp15
.c9_pmcr
& PMCRD
) {
537 /* Increment once every 64 processor clock cycles */
540 return total_ticks
- env
->cp15
.c15_ccnt
;
543 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
546 uint32_t total_ticks
;
548 if (!(env
->cp15
.c9_pmcr
& PMCRE
)) {
549 /* Counter is disabled, set the absolute value */
550 env
->cp15
.c15_ccnt
= value
;
554 total_ticks
= qemu_clock_get_us(QEMU_CLOCK_VIRTUAL
) *
555 get_ticks_per_sec() / 1000000;
557 if (env
->cp15
.c9_pmcr
& PMCRD
) {
558 /* Increment once every 64 processor clock cycles */
561 env
->cp15
.c15_ccnt
= total_ticks
- value
;
565 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
569 env
->cp15
.c9_pmcnten
|= value
;
572 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
576 env
->cp15
.c9_pmcnten
&= ~value
;
579 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
582 env
->cp15
.c9_pmovsr
&= ~value
;
585 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
588 env
->cp15
.c9_pmxevtyper
= value
& 0xff;
591 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
594 env
->cp15
.c9_pmuserenr
= value
& 1;
597 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
600 /* We have no event counters so only the C bit can be changed */
602 env
->cp15
.c9_pminten
|= value
;
605 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
609 env
->cp15
.c9_pminten
&= ~value
;
612 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
615 /* Note that even though the AArch64 view of this register has bits
616 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
617 * architectural requirements for bits which are RES0 only in some
618 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
619 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
621 env
->cp15
.c12_vbar
= value
& ~0x1Ful
;
624 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
626 ARMCPU
*cpu
= arm_env_get_cpu(env
);
627 return cpu
->ccsidr
[env
->cp15
.c0_cssel
];
630 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
633 env
->cp15
.c0_cssel
= value
& 0xf;
636 static const ARMCPRegInfo v7_cp_reginfo
[] = {
637 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
640 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
641 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
642 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
643 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
644 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
645 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
646 .access
= PL1_W
, .type
= ARM_CP_NOP
},
647 /* Performance monitors are implementation defined in v7,
648 * but with an ARM recommended set of registers, which we
649 * follow (although we don't actually implement any counters)
651 * Performance registers fall into three categories:
652 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
653 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
654 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
655 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
656 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
658 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
659 .access
= PL0_RW
, .resetvalue
= 0,
660 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
661 .writefn
= pmcntenset_write
,
662 .accessfn
= pmreg_access
,
663 .raw_writefn
= raw_write
},
664 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
665 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
666 .accessfn
= pmreg_access
,
667 .writefn
= pmcntenclr_write
,
668 .type
= ARM_CP_NO_MIGRATE
},
669 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
670 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
671 .accessfn
= pmreg_access
,
672 .writefn
= pmovsr_write
,
673 .raw_writefn
= raw_write
},
674 /* Unimplemented so WI. */
675 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
676 .access
= PL0_W
, .accessfn
= pmreg_access
, .type
= ARM_CP_NOP
},
677 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
678 * We choose to RAZ/WI.
680 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
681 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
682 .accessfn
= pmreg_access
},
683 #ifndef CONFIG_USER_ONLY
684 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
685 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_IO
,
686 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
687 .accessfn
= pmreg_access
},
689 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
691 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmxevtyper
),
692 .accessfn
= pmreg_access
, .writefn
= pmxevtyper_write
,
693 .raw_writefn
= raw_write
},
694 /* Unimplemented, RAZ/WI. */
695 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
696 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
697 .accessfn
= pmreg_access
},
698 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
699 .access
= PL0_R
| PL1_RW
,
700 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
702 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
703 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
705 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
707 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
708 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
709 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
710 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
711 .resetvalue
= 0, .writefn
= pmintenclr_write
, },
712 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
713 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
714 .access
= PL1_RW
, .writefn
= vbar_write
,
715 .fieldoffset
= offsetof(CPUARMState
, cp15
.c12_vbar
),
717 { .name
= "SCR", .cp
= 15, .crn
= 1, .crm
= 1, .opc1
= 0, .opc2
= 0,
718 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_scr
),
720 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
721 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
722 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_MIGRATE
},
723 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
724 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
725 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cssel
),
726 .writefn
= csselr_write
, .resetvalue
= 0 },
727 /* Auxiliary ID register: this actually has an IMPDEF value but for now
728 * just RAZ for all cores:
730 { .name
= "AIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 7,
731 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
732 /* MAIR can just read-as-written because we don't implement caches
733 * and so don't need to care about memory attributes.
735 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
736 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
737 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el1
),
739 /* For non-long-descriptor page tables these are PRRR and NMRR;
740 * regardless they still act as reads-as-written for QEMU.
741 * The override is necessary because of the overly-broad TLB_LOCKDOWN
744 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
, .type
= ARM_CP_OVERRIDE
,
745 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
746 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mair_el1
),
747 .resetfn
= arm_cp_reset_ignore
},
748 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
, .type
= ARM_CP_OVERRIDE
,
749 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
750 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el1
),
751 .resetfn
= arm_cp_reset_ignore
},
755 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
762 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
764 if (arm_current_pl(env
) == 0 && (env
->teecr
& 1)) {
765 return CP_ACCESS_TRAP
;
770 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
771 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
772 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
774 .writefn
= teecr_write
},
775 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
776 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
777 .accessfn
= teehbr_access
, .resetvalue
= 0 },
781 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
782 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
783 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
785 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el0
), .resetvalue
= 0 },
786 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
788 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.tpidr_el0
),
789 .resetfn
= arm_cp_reset_ignore
},
790 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
791 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
792 .access
= PL0_R
|PL1_W
,
793 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el0
), .resetvalue
= 0 },
794 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
795 .access
= PL0_R
|PL1_W
,
796 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.tpidrro_el0
),
797 .resetfn
= arm_cp_reset_ignore
},
798 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
799 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
801 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el1
), .resetvalue
= 0 },
805 #ifndef CONFIG_USER_ONLY
807 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
809 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
810 if (arm_current_pl(env
) == 0 && !extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
811 return CP_ACCESS_TRAP
;
816 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
)
818 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
819 if (arm_current_pl(env
) == 0 &&
820 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
821 return CP_ACCESS_TRAP
;
826 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
)
828 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
829 * EL0[PV]TEN is zero.
831 if (arm_current_pl(env
) == 0 &&
832 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
833 return CP_ACCESS_TRAP
;
838 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
839 const ARMCPRegInfo
*ri
)
841 return gt_counter_access(env
, GTIMER_PHYS
);
844 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
845 const ARMCPRegInfo
*ri
)
847 return gt_counter_access(env
, GTIMER_VIRT
);
850 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
852 return gt_timer_access(env
, GTIMER_PHYS
);
855 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
857 return gt_timer_access(env
, GTIMER_VIRT
);
860 static uint64_t gt_get_countervalue(CPUARMState
*env
)
862 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
865 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
867 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
870 /* Timer enabled: calculate and set current ISTATUS, irq, and
871 * reset timer to when ISTATUS next has to change
873 uint64_t count
= gt_get_countervalue(&cpu
->env
);
874 /* Note that this must be unsigned 64 bit arithmetic: */
875 int istatus
= count
>= gt
->cval
;
878 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
879 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
],
880 (istatus
&& !(gt
->ctl
& 2)));
882 /* Next transition is when count rolls back over to zero */
883 nexttick
= UINT64_MAX
;
885 /* Next transition is when we hit cval */
888 /* Note that the desired next expiry time might be beyond the
889 * signed-64-bit range of a QEMUTimer -- in this case we just
890 * set the timer for as far in the future as possible. When the
891 * timer expires we will reset the timer for any remaining period.
893 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
894 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
896 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
898 /* Timer disabled: ISTATUS and timer output always clear */
900 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
901 timer_del(cpu
->gt_timer
[timeridx
]);
905 static void gt_cnt_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
907 ARMCPU
*cpu
= arm_env_get_cpu(env
);
908 int timeridx
= ri
->opc1
& 1;
910 timer_del(cpu
->gt_timer
[timeridx
]);
913 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
915 return gt_get_countervalue(env
);
918 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
921 int timeridx
= ri
->opc1
& 1;
923 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
924 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
927 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
929 int timeridx
= ri
->crm
& 1;
931 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
932 gt_get_countervalue(env
));
935 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
938 int timeridx
= ri
->crm
& 1;
940 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) +
941 + sextract64(value
, 0, 32);
942 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
945 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
948 ARMCPU
*cpu
= arm_env_get_cpu(env
);
949 int timeridx
= ri
->crm
& 1;
950 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
952 env
->cp15
.c14_timer
[timeridx
].ctl
= value
& 3;
953 if ((oldval
^ value
) & 1) {
955 gt_recalc_timer(cpu
, timeridx
);
956 } else if ((oldval
& value
) & 2) {
957 /* IMASK toggled: don't need to recalculate,
958 * just set the interrupt line based on ISTATUS
960 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
],
961 (oldval
& 4) && (value
& 2));
965 void arm_gt_ptimer_cb(void *opaque
)
967 ARMCPU
*cpu
= opaque
;
969 gt_recalc_timer(cpu
, GTIMER_PHYS
);
972 void arm_gt_vtimer_cb(void *opaque
)
974 ARMCPU
*cpu
= opaque
;
976 gt_recalc_timer(cpu
, GTIMER_VIRT
);
979 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
980 /* Note that CNTFRQ is purely reads-as-written for the benefit
981 * of software; writing it doesn't actually change the timer frequency.
982 * Our reset value matches the fixed frequency we implement the timer at.
984 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
985 .type
= ARM_CP_NO_MIGRATE
,
986 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
987 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
988 .resetfn
= arm_cp_reset_ignore
,
990 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
991 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
992 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
993 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
994 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
996 /* overall control: mostly access permissions */
997 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
998 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
1000 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
1003 /* per-timer control */
1004 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
1005 .type
= ARM_CP_IO
| ARM_CP_NO_MIGRATE
, .access
= PL1_RW
| PL0_R
,
1006 .accessfn
= gt_ptimer_access
,
1007 .fieldoffset
= offsetoflow32(CPUARMState
,
1008 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1009 .resetfn
= arm_cp_reset_ignore
,
1010 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
1012 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1013 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
1014 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1015 .accessfn
= gt_ptimer_access
,
1016 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1018 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
1020 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
1021 .type
= ARM_CP_IO
| ARM_CP_NO_MIGRATE
, .access
= PL1_RW
| PL0_R
,
1022 .accessfn
= gt_vtimer_access
,
1023 .fieldoffset
= offsetoflow32(CPUARMState
,
1024 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1025 .resetfn
= arm_cp_reset_ignore
,
1026 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
1028 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1029 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
1030 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1031 .accessfn
= gt_vtimer_access
,
1032 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1034 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
1036 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1037 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
1038 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1039 .accessfn
= gt_ptimer_access
,
1040 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
1042 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1043 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
1044 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1045 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
1047 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
1048 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1049 .accessfn
= gt_vtimer_access
,
1050 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
1052 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1053 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
1054 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1055 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
1057 /* The counter itself */
1058 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
1059 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
1060 .accessfn
= gt_pct_access
,
1061 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
1063 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
1064 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
1065 .access
= PL0_R
, .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
1066 .accessfn
= gt_pct_access
,
1067 .readfn
= gt_cnt_read
, .resetfn
= gt_cnt_reset
,
1069 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
1070 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
1071 .accessfn
= gt_vct_access
,
1072 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
1074 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
1075 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
1076 .access
= PL0_R
, .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
1077 .accessfn
= gt_vct_access
,
1078 .readfn
= gt_cnt_read
, .resetfn
= gt_cnt_reset
,
1080 /* Comparison value, indicating when the timer goes off */
1081 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
1082 .access
= PL1_RW
| PL0_R
,
1083 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_NO_MIGRATE
,
1084 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
1085 .accessfn
= gt_ptimer_access
, .resetfn
= arm_cp_reset_ignore
,
1086 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1088 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1089 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
1090 .access
= PL1_RW
| PL0_R
,
1092 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
1093 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
1094 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1096 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
1097 .access
= PL1_RW
| PL0_R
,
1098 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_NO_MIGRATE
,
1099 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
1100 .accessfn
= gt_vtimer_access
, .resetfn
= arm_cp_reset_ignore
,
1101 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1103 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1104 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
1105 .access
= PL1_RW
| PL0_R
,
1107 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
1108 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
1109 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1115 /* In user-mode none of the generic timer registers are accessible,
1116 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
1117 * so instead just don't register any of them.
1119 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1125 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1127 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1128 env
->cp15
.c7_par
= value
;
1129 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
1130 env
->cp15
.c7_par
= value
& 0xfffff6ff;
1132 env
->cp15
.c7_par
= value
& 0xfffff1ff;
1136 #ifndef CONFIG_USER_ONLY
1137 /* get_phys_addr() isn't present for user-mode-only targets */
1139 /* Return true if extended addresses are enabled, ie this is an
1140 * LPAE implementation and we are using the long-descriptor translation
1141 * table format because the TTBCR EAE bit is set.
1143 static inline bool extended_addresses_enabled(CPUARMState
*env
)
1145 return arm_feature(env
, ARM_FEATURE_LPAE
)
1146 && (env
->cp15
.c2_control
& (1U << 31));
1149 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1152 /* Other states are only available with TrustZone; in
1153 * a non-TZ implementation these registers don't exist
1154 * at all, which is an Uncategorized trap. This underdecoding
1155 * is safe because the reginfo is NO_MIGRATE.
1157 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1159 return CP_ACCESS_OK
;
1162 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1165 target_ulong page_size
;
1167 int ret
, is_user
= ri
->opc2
& 2;
1168 int access_type
= ri
->opc2
& 1;
1170 ret
= get_phys_addr(env
, value
, access_type
, is_user
,
1171 &phys_addr
, &prot
, &page_size
);
1172 if (extended_addresses_enabled(env
)) {
1173 /* ret is a DFSR/IFSR value for the long descriptor
1174 * translation table format, but with WnR always clear.
1175 * Convert it to a 64-bit PAR.
1177 uint64_t par64
= (1 << 11); /* LPAE bit always set */
1179 par64
|= phys_addr
& ~0xfffULL
;
1180 /* We don't set the ATTR or SH fields in the PAR. */
1183 par64
|= (ret
& 0x3f) << 1; /* FS */
1184 /* Note that S2WLK and FSTAGE are always zero, because we don't
1185 * implement virtualization and therefore there can't be a stage 2
1189 env
->cp15
.c7_par
= par64
;
1190 env
->cp15
.c7_par_hi
= par64
>> 32;
1192 /* ret is a DFSR/IFSR value for the short descriptor
1193 * translation table format (with WnR always clear).
1194 * Convert it to a 32-bit PAR.
1197 /* We do not set any attribute bits in the PAR */
1198 if (page_size
== (1 << 24)
1199 && arm_feature(env
, ARM_FEATURE_V7
)) {
1200 env
->cp15
.c7_par
= (phys_addr
& 0xff000000) | 1 << 1;
1202 env
->cp15
.c7_par
= phys_addr
& 0xfffff000;
1205 env
->cp15
.c7_par
= ((ret
& (1 << 10)) >> 5) |
1206 ((ret
& (1 << 12)) >> 6) |
1207 ((ret
& 0xf) << 1) | 1;
1209 env
->cp15
.c7_par_hi
= 0;
1214 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
1215 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
1216 .access
= PL1_RW
, .resetvalue
= 0,
1217 .fieldoffset
= offsetof(CPUARMState
, cp15
.c7_par
),
1218 .writefn
= par_write
},
1219 #ifndef CONFIG_USER_ONLY
1220 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
1221 .access
= PL1_W
, .accessfn
= ats_access
,
1222 .writefn
= ats_write
, .type
= ARM_CP_NO_MIGRATE
},
1227 /* Return basic MPU access permission bits. */
1228 static uint32_t simple_mpu_ap_bits(uint32_t val
)
1235 for (i
= 0; i
< 16; i
+= 2) {
1236 ret
|= (val
>> i
) & mask
;
1242 /* Pad basic MPU access permission bits to extended format. */
1243 static uint32_t extended_mpu_ap_bits(uint32_t val
)
1250 for (i
= 0; i
< 16; i
+= 2) {
1251 ret
|= (val
& mask
) << i
;
1257 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1260 env
->cp15
.c5_data
= extended_mpu_ap_bits(value
);
1263 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1265 return simple_mpu_ap_bits(env
->cp15
.c5_data
);
1268 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1271 env
->cp15
.c5_insn
= extended_mpu_ap_bits(value
);
1274 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1276 return simple_mpu_ap_bits(env
->cp15
.c5_insn
);
1279 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
1280 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
1281 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
1282 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0,
1283 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
1284 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
1285 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
1286 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0,
1287 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
1288 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
1290 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1291 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
1293 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
1294 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
1296 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
1297 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
1299 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
1300 /* Protection region base and size registers */
1301 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
1302 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1303 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
1304 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
1305 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1306 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
1307 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
1308 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1309 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
1310 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
1311 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1312 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
1313 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
1314 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1315 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
1316 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
1317 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1318 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
1319 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
1320 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1321 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
1322 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
1323 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1324 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
1328 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1331 int maskshift
= extract32(value
, 0, 3);
1333 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& (1 << 31))) {
1334 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
1338 /* Note that we always calculate c2_mask and c2_base_mask, but
1339 * they are only used for short-descriptor tables (ie if EAE is 0);
1340 * for long-descriptor tables the TTBCR fields are used differently
1341 * and the c2_mask and c2_base_mask values are meaningless.
1343 env
->cp15
.c2_control
= value
;
1344 env
->cp15
.c2_mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
1345 env
->cp15
.c2_base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
1348 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1351 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1352 /* With LPAE the TTBCR could result in a change of ASID
1353 * via the TTBCR.A1 bit, so do a TLB flush.
1357 vmsa_ttbcr_raw_write(env
, ri
, value
);
1360 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1362 env
->cp15
.c2_base_mask
= 0xffffc000u
;
1363 env
->cp15
.c2_control
= 0;
1364 env
->cp15
.c2_mask
= 0;
1367 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1370 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
1372 env
->cp15
.c2_control
= value
;
1375 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1378 /* 64 bit accesses to the TTBRs can change the ASID and so we
1379 * must flush the TLB.
1381 if (cpreg_field_is_64bit(ri
)) {
1384 raw_write(env
, ri
, value
);
1387 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
1388 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
1390 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1391 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
1393 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
1394 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1395 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
1396 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el1
),
1397 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0 },
1398 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1399 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
1400 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el1
),
1401 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0 },
1402 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
1403 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
1404 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
1405 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
1406 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_control
) },
1407 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
1408 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
, .writefn
= vmsa_ttbcr_write
,
1409 .resetfn
= arm_cp_reset_ignore
, .raw_writefn
= vmsa_ttbcr_raw_write
,
1410 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c2_control
) },
1411 { .name
= "DFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
1412 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_data
),
1417 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1420 env
->cp15
.c15_ticonfig
= value
& 0xe7;
1421 /* The OS_TYPE bit in this register changes the reported CPUID! */
1422 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
1423 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
1426 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1429 env
->cp15
.c15_threadid
= value
& 0xffff;
1432 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1435 /* Wait-for-interrupt (deprecated) */
1436 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
1439 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1442 /* On OMAP there are registers indicating the max/min index of dcache lines
1443 * containing a dirty line; cache flush operations have to reset these.
1445 env
->cp15
.c15_i_max
= 0x000;
1446 env
->cp15
.c15_i_min
= 0xff0;
1449 static const ARMCPRegInfo omap_cp_reginfo
[] = {
1450 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
1451 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
1452 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1453 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
1454 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
1455 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
1457 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
1458 .writefn
= omap_ticonfig_write
},
1459 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
1461 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
1462 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
1463 .access
= PL1_RW
, .resetvalue
= 0xff0,
1464 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
1465 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
1467 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
1468 .writefn
= omap_threadid_write
},
1469 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
1470 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1471 .type
= ARM_CP_NO_MIGRATE
,
1472 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
1473 /* TODO: Peripheral port remap register:
1474 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1475 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1478 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
1479 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
1480 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
,
1481 .writefn
= omap_cachemaint_write
},
1482 { .name
= "C9", .cp
= 15, .crn
= 9,
1483 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
1484 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
1488 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1492 if (env
->cp15
.c15_cpar
!= value
) {
1493 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1495 env
->cp15
.c15_cpar
= value
;
1499 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
1500 { .name
= "XSCALE_CPAR",
1501 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1502 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
1503 .writefn
= xscale_cpar_write
, },
1504 { .name
= "XSCALE_AUXCR",
1505 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
1506 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
1511 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
1512 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1513 * implementation of this implementation-defined space.
1514 * Ideally this should eventually disappear in favour of actually
1515 * implementing the correct behaviour for all cores.
1517 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
1518 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1520 .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
| ARM_CP_OVERRIDE
,
1525 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
1526 /* Cache status: RAZ because we have no cache so it's always clean */
1527 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
1528 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1533 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
1534 /* We never have a a block transfer operation in progress */
1535 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
1536 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1538 /* The cache ops themselves: these all NOP for QEMU */
1539 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
1540 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1541 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
1542 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1543 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
1544 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1545 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
1546 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1547 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
1548 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1549 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
1550 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1554 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
1555 /* The cache test-and-clean instructions always return (1 << 30)
1556 * to indicate that there are no dirty cache lines.
1558 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
1559 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1560 .resetvalue
= (1 << 30) },
1561 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
1562 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1563 .resetvalue
= (1 << 30) },
1567 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
1568 /* Ignore ReadBuffer accesses */
1569 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
1570 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1571 .access
= PL1_RW
, .resetvalue
= 0,
1572 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
},
1576 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1578 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1579 uint32_t mpidr
= cs
->cpu_index
;
1580 /* We don't support setting cluster ID ([8..11]) (known as Aff1
1581 * in later ARM ARM versions), or any of the higher affinity level fields,
1582 * so these bits always RAZ.
1584 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
1585 mpidr
|= (1U << 31);
1586 /* Cores which are uniprocessor (non-coherent)
1587 * but still implement the MP extensions set
1588 * bit 30. (For instance, A9UP.) However we do
1589 * not currently model any of those cores.
1595 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
1596 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
1597 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
1598 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_MIGRATE
},
1602 static uint64_t par64_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1604 return ((uint64_t)env
->cp15
.c7_par_hi
<< 32) | env
->cp15
.c7_par
;
1607 static void par64_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1610 env
->cp15
.c7_par_hi
= value
>> 32;
1611 env
->cp15
.c7_par
= value
;
1614 static void par64_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1616 env
->cp15
.c7_par_hi
= 0;
1617 env
->cp15
.c7_par
= 0;
1620 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
1621 /* NOP AMAIR0/1: the override is because these clash with the rather
1622 * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1624 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
1625 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
1626 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1628 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
1629 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
1630 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1632 /* 64 bit access versions of the (dummy) debug registers */
1633 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
1634 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1635 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
1636 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1637 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
1638 .access
= PL1_RW
, .type
= ARM_CP_64BIT
,
1639 .readfn
= par64_read
, .writefn
= par64_write
, .resetfn
= par64_reset
},
1640 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
1641 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
,
1642 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el1
),
1643 .writefn
= vmsa_ttbr_write
, .resetfn
= arm_cp_reset_ignore
},
1644 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
1645 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
,
1646 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el1
),
1647 .writefn
= vmsa_ttbr_write
, .resetfn
= arm_cp_reset_ignore
},
1651 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1653 return vfp_get_fpcr(env
);
1656 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1659 vfp_set_fpcr(env
, value
);
1662 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1664 return vfp_get_fpsr(env
);
1667 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1670 vfp_set_fpsr(env
, value
);
1673 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
1674 const ARMCPRegInfo
*ri
)
1676 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
1677 * SCTLR_EL1.UCI is set.
1679 if (arm_current_pl(env
) == 0 && !(env
->cp15
.c1_sys
& SCTLR_UCI
)) {
1680 return CP_ACCESS_TRAP
;
1682 return CP_ACCESS_OK
;
1685 static void tlbi_aa64_va_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1688 /* Invalidate by VA (AArch64 version) */
1689 uint64_t pageaddr
= value
<< 12;
1690 tlb_flush_page(env
, pageaddr
);
1693 static void tlbi_aa64_vaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1696 /* Invalidate by VA, all ASIDs (AArch64 version) */
1697 uint64_t pageaddr
= value
<< 12;
1698 tlb_flush_page(env
, pageaddr
);
1701 static void tlbi_aa64_asid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1704 /* Invalidate by ASID (AArch64 version) */
1705 int asid
= extract64(value
, 48, 16);
1706 tlb_flush(env
, asid
== 0);
1709 static const ARMCPRegInfo v8_cp_reginfo
[] = {
1710 /* Minimal set of EL0-visible registers. This will need to be expanded
1711 * significantly for system emulation of AArch64 CPUs.
1713 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
1714 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
1715 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
1716 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
1717 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
1718 .access
= PL0_RW
, .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
1719 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
1720 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
1721 .access
= PL0_RW
, .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
1722 /* Prohibit use of DC ZVA. OPTME: implement DC ZVA and allow its use.
1723 * For system mode the DZP bit here will need to be computed, not constant.
1725 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
1726 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
1727 .access
= PL0_R
, .type
= ARM_CP_CONST
,
1728 .resetvalue
= 0x10 },
1729 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
1730 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
1731 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
1732 /* Cache ops: all NOPs since we don't emulate caches */
1733 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
1734 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
1735 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1736 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
1737 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
1738 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1739 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
1740 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
1741 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1742 .accessfn
= aa64_cacheop_access
},
1743 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
1744 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
1745 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1746 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
1747 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
1748 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1749 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
1750 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
1751 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1752 .accessfn
= aa64_cacheop_access
},
1753 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
1754 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
1755 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1756 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
1757 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
1758 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1759 .accessfn
= aa64_cacheop_access
},
1760 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
1761 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
1762 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1763 .accessfn
= aa64_cacheop_access
},
1764 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
1765 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
1766 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1767 /* TLBI operations */
1768 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
1769 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1770 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1771 .writefn
= tlbiall_write
},
1772 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
1773 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1774 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1775 .writefn
= tlbi_aa64_va_write
},
1776 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
1777 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1778 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1779 .writefn
= tlbi_aa64_asid_write
},
1780 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
1781 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1782 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1783 .writefn
= tlbi_aa64_vaa_write
},
1784 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
1785 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
1786 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1787 .writefn
= tlbi_aa64_va_write
},
1788 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
1789 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
1790 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1791 .writefn
= tlbi_aa64_vaa_write
},
1792 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
1793 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1794 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1795 .writefn
= tlbiall_write
},
1796 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
1797 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1798 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1799 .writefn
= tlbi_aa64_va_write
},
1800 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
1801 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1802 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1803 .writefn
= tlbi_aa64_asid_write
},
1804 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
1805 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1806 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1807 .writefn
= tlbi_aa64_vaa_write
},
1808 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
1809 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
1810 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1811 .writefn
= tlbi_aa64_va_write
},
1812 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
1813 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
1814 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1815 .writefn
= tlbi_aa64_vaa_write
},
1816 /* Dummy implementation of monitor debug system control register:
1817 * we don't support debug.
1819 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_AA64
,
1820 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
1821 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1822 /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
1823 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_AA64
,
1824 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
1825 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1829 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1832 env
->cp15
.c1_sys
= value
;
1833 /* ??? Lots of these bits are not implemented. */
1834 /* This may enable/disable the MMU, so do a TLB flush. */
1838 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1840 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
1841 * but the AArch32 CTR has its own reginfo struct)
1843 if (arm_current_pl(env
) == 0 && !(env
->cp15
.c1_sys
& SCTLR_UCT
)) {
1844 return CP_ACCESS_TRAP
;
1846 return CP_ACCESS_OK
;
1849 static void define_aarch64_debug_regs(ARMCPU
*cpu
)
1851 /* Define breakpoint and watchpoint registers. These do nothing
1852 * but read as written, for now.
1856 for (i
= 0; i
< 16; i
++) {
1857 ARMCPRegInfo dbgregs
[] = {
1858 { .name
= "DBGBVR", .state
= ARM_CP_STATE_AA64
,
1859 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
1861 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]) },
1862 { .name
= "DBGBCR", .state
= ARM_CP_STATE_AA64
,
1863 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
1865 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]) },
1866 { .name
= "DBGWVR", .state
= ARM_CP_STATE_AA64
,
1867 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
1869 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]) },
1870 { .name
= "DBGWCR", .state
= ARM_CP_STATE_AA64
,
1871 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
1873 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]) },
1876 define_arm_cp_regs(cpu
, dbgregs
);
1880 void register_cp_regs_for_features(ARMCPU
*cpu
)
1882 /* Register all the coprocessor registers based on feature bits */
1883 CPUARMState
*env
= &cpu
->env
;
1884 if (arm_feature(env
, ARM_FEATURE_M
)) {
1885 /* M profile has no coprocessor registers */
1889 define_arm_cp_regs(cpu
, cp_reginfo
);
1890 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1891 /* The ID registers all have impdef reset values */
1892 ARMCPRegInfo v6_idregs
[] = {
1893 { .name
= "ID_PFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1894 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1895 .resetvalue
= cpu
->id_pfr0
},
1896 { .name
= "ID_PFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1897 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1898 .resetvalue
= cpu
->id_pfr1
},
1899 { .name
= "ID_DFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1900 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1901 .resetvalue
= cpu
->id_dfr0
},
1902 { .name
= "ID_AFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1903 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1904 .resetvalue
= cpu
->id_afr0
},
1905 { .name
= "ID_MMFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1906 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1907 .resetvalue
= cpu
->id_mmfr0
},
1908 { .name
= "ID_MMFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1909 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1910 .resetvalue
= cpu
->id_mmfr1
},
1911 { .name
= "ID_MMFR2", .cp
= 15, .crn
= 0, .crm
= 1,
1912 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1913 .resetvalue
= cpu
->id_mmfr2
},
1914 { .name
= "ID_MMFR3", .cp
= 15, .crn
= 0, .crm
= 1,
1915 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1916 .resetvalue
= cpu
->id_mmfr3
},
1917 { .name
= "ID_ISAR0", .cp
= 15, .crn
= 0, .crm
= 2,
1918 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1919 .resetvalue
= cpu
->id_isar0
},
1920 { .name
= "ID_ISAR1", .cp
= 15, .crn
= 0, .crm
= 2,
1921 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1922 .resetvalue
= cpu
->id_isar1
},
1923 { .name
= "ID_ISAR2", .cp
= 15, .crn
= 0, .crm
= 2,
1924 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1925 .resetvalue
= cpu
->id_isar2
},
1926 { .name
= "ID_ISAR3", .cp
= 15, .crn
= 0, .crm
= 2,
1927 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1928 .resetvalue
= cpu
->id_isar3
},
1929 { .name
= "ID_ISAR4", .cp
= 15, .crn
= 0, .crm
= 2,
1930 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1931 .resetvalue
= cpu
->id_isar4
},
1932 { .name
= "ID_ISAR5", .cp
= 15, .crn
= 0, .crm
= 2,
1933 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1934 .resetvalue
= cpu
->id_isar5
},
1935 /* 6..7 are as yet unallocated and must RAZ */
1936 { .name
= "ID_ISAR6", .cp
= 15, .crn
= 0, .crm
= 2,
1937 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1939 { .name
= "ID_ISAR7", .cp
= 15, .crn
= 0, .crm
= 2,
1940 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1944 define_arm_cp_regs(cpu
, v6_idregs
);
1945 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
1947 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
1949 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1950 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
1952 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1953 /* v7 performance monitor control register: same implementor
1954 * field as main ID register, and we implement only the cycle
1957 #ifndef CONFIG_USER_ONLY
1958 ARMCPRegInfo pmcr
= {
1959 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
1960 .access
= PL0_RW
, .resetvalue
= cpu
->midr
& 0xff000000,
1961 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
1962 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
1963 .raw_writefn
= raw_write
,
1965 define_one_arm_cp_reg(cpu
, &pmcr
);
1967 ARMCPRegInfo clidr
= {
1968 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
1969 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
1970 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
1972 define_one_arm_cp_reg(cpu
, &clidr
);
1973 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
1975 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
1977 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1978 /* AArch64 ID registers, which all have impdef reset values */
1979 ARMCPRegInfo v8_idregs
[] = {
1980 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1981 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
1982 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1983 .resetvalue
= cpu
->id_aa64pfr0
},
1984 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1985 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
1986 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1987 .resetvalue
= cpu
->id_aa64pfr1
},
1988 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1989 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
1990 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1991 .resetvalue
= cpu
->id_aa64dfr0
},
1992 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1993 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
1994 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1995 .resetvalue
= cpu
->id_aa64dfr1
},
1996 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1997 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
1998 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1999 .resetvalue
= cpu
->id_aa64afr0
},
2000 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
2001 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
2002 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2003 .resetvalue
= cpu
->id_aa64afr1
},
2004 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
2005 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
2006 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2007 .resetvalue
= cpu
->id_aa64isar0
},
2008 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
2009 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
2010 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2011 .resetvalue
= cpu
->id_aa64isar1
},
2012 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
2013 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
2014 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2015 .resetvalue
= cpu
->id_aa64mmfr0
},
2016 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
2017 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
2018 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2019 .resetvalue
= cpu
->id_aa64mmfr1
},
2022 define_arm_cp_regs(cpu
, v8_idregs
);
2023 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
2024 define_aarch64_debug_regs(cpu
);
2026 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
2027 /* These are the MPU registers prior to PMSAv6. Any new
2028 * PMSA core later than the ARM946 will require that we
2029 * implement the PMSAv6 or PMSAv7 registers, which are
2030 * completely different.
2032 assert(!arm_feature(env
, ARM_FEATURE_V6
));
2033 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
2035 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
2037 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
2038 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
2040 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
2041 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
2043 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
2044 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
2046 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
2047 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
2049 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
2050 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
2052 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
2053 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
2055 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
2056 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
2058 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
2059 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
2061 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2062 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
2064 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
2065 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
2067 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2068 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
2070 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
2071 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
2072 * be read-only (ie write causes UNDEF exception).
2075 ARMCPRegInfo id_cp_reginfo
[] = {
2076 /* Note that the MIDR isn't a simple constant register because
2077 * of the TI925 behaviour where writes to another register can
2078 * cause the MIDR value to change.
2080 * Unimplemented registers in the c15 0 0 0 space default to
2081 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
2082 * and friends override accordingly.
2085 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
2086 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
2087 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
2088 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
2089 .type
= ARM_CP_OVERRIDE
},
2090 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2091 .opc0
= 3, .opc1
= 0, .opc2
= 0, .crn
= 0, .crm
= 0,
2092 .access
= PL1_R
, .resetvalue
= cpu
->midr
, .type
= ARM_CP_CONST
},
2094 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
2095 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
2096 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
2097 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
2098 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
2099 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
2101 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
2102 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2104 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
2105 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2106 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
2108 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
2109 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2111 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
2112 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2114 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
2115 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2117 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
2118 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2120 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
2121 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2124 ARMCPRegInfo crn0_wi_reginfo
= {
2125 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
2126 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
2127 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
2129 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
2130 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
2132 /* Register the blanket "writes ignored" value first to cover the
2133 * whole space. Then update the specific ID registers to allow write
2134 * access, so that they ignore writes rather than causing them to
2137 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
2138 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
2142 define_arm_cp_regs(cpu
, id_cp_reginfo
);
2145 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
2146 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
2149 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
2150 ARMCPRegInfo auxcr
= {
2151 .name
= "AUXCR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1,
2152 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2153 .resetvalue
= cpu
->reset_auxcr
2155 define_one_arm_cp_reg(cpu
, &auxcr
);
2158 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
2159 ARMCPRegInfo cbar
= {
2160 .name
= "CBAR", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
2161 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
2162 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_config_base_address
)
2164 define_one_arm_cp_reg(cpu
, &cbar
);
2167 /* Generic registers whose values depend on the implementation */
2169 ARMCPRegInfo sctlr
= {
2170 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
2171 .opc0
= 3, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
2172 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_sys
),
2173 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
2174 .raw_writefn
= raw_write
,
2176 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2177 /* Normally we would always end the TB on an SCTLR write, but Linux
2178 * arch/arm/mach-pxa/sleep.S expects two instructions following
2179 * an MMU enable to execute from cache. Imitate this behaviour.
2181 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
2183 define_one_arm_cp_reg(cpu
, &sctlr
);
2187 ARMCPU
*cpu_arm_init(const char *cpu_model
)
2192 oc
= cpu_class_by_name(TYPE_ARM_CPU
, cpu_model
);
2196 cpu
= ARM_CPU(object_new(object_class_get_name(oc
)));
2198 /* TODO this should be set centrally, once possible */
2199 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
2204 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
2206 CPUState
*cs
= CPU(cpu
);
2207 CPUARMState
*env
= &cpu
->env
;
2209 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
2210 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
2211 aarch64_fpu_gdb_set_reg
,
2212 34, "aarch64-fpu.xml", 0);
2213 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
2214 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2215 51, "arm-neon.xml", 0);
2216 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
2217 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2218 35, "arm-vfp3.xml", 0);
2219 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
2220 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2221 19, "arm-vfp.xml", 0);
2225 /* Sort alphabetically by type name, except for "any". */
2226 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
2228 ObjectClass
*class_a
= (ObjectClass
*)a
;
2229 ObjectClass
*class_b
= (ObjectClass
*)b
;
2230 const char *name_a
, *name_b
;
2232 name_a
= object_class_get_name(class_a
);
2233 name_b
= object_class_get_name(class_b
);
2234 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
2236 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
2239 return strcmp(name_a
, name_b
);
2243 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
2245 ObjectClass
*oc
= data
;
2246 CPUListState
*s
= user_data
;
2247 const char *typename
;
2250 typename
= object_class_get_name(oc
);
2251 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
2252 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
2257 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
2261 .cpu_fprintf
= cpu_fprintf
,
2265 list
= object_class_get_list(TYPE_ARM_CPU
, false);
2266 list
= g_slist_sort(list
, arm_cpu_list_compare
);
2267 (*cpu_fprintf
)(f
, "Available CPUs:\n");
2268 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
2271 /* The 'host' CPU type is dynamically registered only if KVM is
2272 * enabled, so we have to special-case it here:
2274 (*cpu_fprintf
)(f
, " host (only available in KVM mode)\n");
2278 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
2280 ObjectClass
*oc
= data
;
2281 CpuDefinitionInfoList
**cpu_list
= user_data
;
2282 CpuDefinitionInfoList
*entry
;
2283 CpuDefinitionInfo
*info
;
2284 const char *typename
;
2286 typename
= object_class_get_name(oc
);
2287 info
= g_malloc0(sizeof(*info
));
2288 info
->name
= g_strndup(typename
,
2289 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
2291 entry
= g_malloc0(sizeof(*entry
));
2292 entry
->value
= info
;
2293 entry
->next
= *cpu_list
;
2297 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
2299 CpuDefinitionInfoList
*cpu_list
= NULL
;
2302 list
= object_class_get_list(TYPE_ARM_CPU
, false);
2303 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
2309 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
2310 void *opaque
, int state
,
2311 int crm
, int opc1
, int opc2
)
2313 /* Private utility function for define_one_arm_cp_reg_with_opaque():
2314 * add a single reginfo struct to the hash table.
2316 uint32_t *key
= g_new(uint32_t, 1);
2317 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
2318 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
2319 if (r
->state
== ARM_CP_STATE_BOTH
&& state
== ARM_CP_STATE_AA32
) {
2320 /* The AArch32 view of a shared register sees the lower 32 bits
2321 * of a 64 bit backing field. It is not migratable as the AArch64
2322 * view handles that. AArch64 also handles reset.
2323 * We assume it is a cp15 register.
2326 r2
->type
|= ARM_CP_NO_MIGRATE
;
2327 r2
->resetfn
= arm_cp_reset_ignore
;
2328 #ifdef HOST_WORDS_BIGENDIAN
2329 if (r2
->fieldoffset
) {
2330 r2
->fieldoffset
+= sizeof(uint32_t);
2334 if (state
== ARM_CP_STATE_AA64
) {
2335 /* To allow abbreviation of ARMCPRegInfo
2336 * definitions, we treat cp == 0 as equivalent to
2337 * the value for "standard guest-visible sysreg".
2340 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
2342 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
2343 r2
->opc0
, opc1
, opc2
);
2345 *key
= ENCODE_CP_REG(r2
->cp
, is64
, r2
->crn
, crm
, opc1
, opc2
);
2348 r2
->opaque
= opaque
;
2350 /* reginfo passed to helpers is correct for the actual access,
2351 * and is never ARM_CP_STATE_BOTH:
2354 /* Make sure reginfo passed to helpers for wildcarded regs
2355 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
2360 /* By convention, for wildcarded registers only the first
2361 * entry is used for migration; the others are marked as
2362 * NO_MIGRATE so we don't try to transfer the register
2363 * multiple times. Special registers (ie NOP/WFI) are
2366 if ((r
->type
& ARM_CP_SPECIAL
) ||
2367 ((r
->crm
== CP_ANY
) && crm
!= 0) ||
2368 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
2369 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
2370 r2
->type
|= ARM_CP_NO_MIGRATE
;
2373 /* Overriding of an existing definition must be explicitly
2376 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
2377 ARMCPRegInfo
*oldreg
;
2378 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
2379 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
2380 fprintf(stderr
, "Register redefined: cp=%d %d bit "
2381 "crn=%d crm=%d opc1=%d opc2=%d, "
2382 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
2383 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
2384 oldreg
->name
, r2
->name
);
2385 g_assert_not_reached();
2388 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
2392 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
2393 const ARMCPRegInfo
*r
, void *opaque
)
2395 /* Define implementations of coprocessor registers.
2396 * We store these in a hashtable because typically
2397 * there are less than 150 registers in a space which
2398 * is 16*16*16*8*8 = 262144 in size.
2399 * Wildcarding is supported for the crm, opc1 and opc2 fields.
2400 * If a register is defined twice then the second definition is
2401 * used, so this can be used to define some generic registers and
2402 * then override them with implementation specific variations.
2403 * At least one of the original and the second definition should
2404 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
2405 * against accidental use.
2407 * The state field defines whether the register is to be
2408 * visible in the AArch32 or AArch64 execution state. If the
2409 * state is set to ARM_CP_STATE_BOTH then we synthesise a
2410 * reginfo structure for the AArch32 view, which sees the lower
2411 * 32 bits of the 64 bit register.
2413 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
2414 * be wildcarded. AArch64 registers are always considered to be 64
2415 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
2416 * the register, if any.
2418 int crm
, opc1
, opc2
, state
;
2419 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
2420 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
2421 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
2422 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
2423 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
2424 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
2425 /* 64 bit registers have only CRm and Opc1 fields */
2426 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
2427 /* op0 only exists in the AArch64 encodings */
2428 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
2429 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
2430 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
2431 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
2432 * encodes a minimum access level for the register. We roll this
2433 * runtime check into our general permission check code, so check
2434 * here that the reginfo's specified permissions are strict enough
2435 * to encompass the generic architectural permission check.
2437 if (r
->state
!= ARM_CP_STATE_AA32
) {
2440 case 0: case 1: case 2:
2453 /* unallocated encoding, so not possible */
2461 /* min_EL EL1, secure mode only (we don't check the latter) */
2465 /* broken reginfo with out-of-range opc1 */
2469 /* assert our permissions are not too lax (stricter is fine) */
2470 assert((r
->access
& ~mask
) == 0);
2473 /* Check that the register definition has enough info to handle
2474 * reads and writes if they are permitted.
2476 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
2477 if (r
->access
& PL3_R
) {
2478 assert(r
->fieldoffset
|| r
->readfn
);
2480 if (r
->access
& PL3_W
) {
2481 assert(r
->fieldoffset
|| r
->writefn
);
2484 /* Bad type field probably means missing sentinel at end of reg list */
2485 assert(cptype_valid(r
->type
));
2486 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
2487 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
2488 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
2489 for (state
= ARM_CP_STATE_AA32
;
2490 state
<= ARM_CP_STATE_AA64
; state
++) {
2491 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
2494 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
2502 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
2503 const ARMCPRegInfo
*regs
, void *opaque
)
2505 /* Define a whole list of registers */
2506 const ARMCPRegInfo
*r
;
2507 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
2508 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
2512 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
2514 return g_hash_table_lookup(cpregs
, &encoded_cp
);
2517 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2520 /* Helper coprocessor write function for write-ignore registers */
2523 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2525 /* Helper coprocessor write function for read-as-zero registers */
2529 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
2531 /* Helper coprocessor reset function for do-nothing-on-reset registers */
2534 static int bad_mode_switch(CPUARMState
*env
, int mode
)
2536 /* Return true if it is not valid for us to switch to
2537 * this CPU mode (ie all the UNPREDICTABLE cases in
2538 * the ARM ARM CPSRWriteByInstr pseudocode).
2541 case ARM_CPU_MODE_USR
:
2542 case ARM_CPU_MODE_SYS
:
2543 case ARM_CPU_MODE_SVC
:
2544 case ARM_CPU_MODE_ABT
:
2545 case ARM_CPU_MODE_UND
:
2546 case ARM_CPU_MODE_IRQ
:
2547 case ARM_CPU_MODE_FIQ
:
2554 uint32_t cpsr_read(CPUARMState
*env
)
2557 ZF
= (env
->ZF
== 0);
2558 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
2559 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
2560 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
2561 | ((env
->condexec_bits
& 0xfc) << 8)
2562 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
2565 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
2567 if (mask
& CPSR_NZCV
) {
2568 env
->ZF
= (~val
) & CPSR_Z
;
2570 env
->CF
= (val
>> 29) & 1;
2571 env
->VF
= (val
<< 3) & 0x80000000;
2574 env
->QF
= ((val
& CPSR_Q
) != 0);
2576 env
->thumb
= ((val
& CPSR_T
) != 0);
2577 if (mask
& CPSR_IT_0_1
) {
2578 env
->condexec_bits
&= ~3;
2579 env
->condexec_bits
|= (val
>> 25) & 3;
2581 if (mask
& CPSR_IT_2_7
) {
2582 env
->condexec_bits
&= 3;
2583 env
->condexec_bits
|= (val
>> 8) & 0xfc;
2585 if (mask
& CPSR_GE
) {
2586 env
->GE
= (val
>> 16) & 0xf;
2589 env
->daif
&= ~(CPSR_AIF
& mask
);
2590 env
->daif
|= val
& CPSR_AIF
& mask
;
2592 if ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
) {
2593 if (bad_mode_switch(env
, val
& CPSR_M
)) {
2594 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
2595 * We choose to ignore the attempt and leave the CPSR M field
2600 switch_mode(env
, val
& CPSR_M
);
2603 mask
&= ~CACHED_CPSR_BITS
;
2604 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
2607 /* Sign/zero extend */
2608 uint32_t HELPER(sxtb16
)(uint32_t x
)
2611 res
= (uint16_t)(int8_t)x
;
2612 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
2616 uint32_t HELPER(uxtb16
)(uint32_t x
)
2619 res
= (uint16_t)(uint8_t)x
;
2620 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
2624 uint32_t HELPER(clz
)(uint32_t x
)
2629 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
2633 if (num
== INT_MIN
&& den
== -1)
2638 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
2645 uint32_t HELPER(rbit
)(uint32_t x
)
2647 x
= ((x
& 0xff000000) >> 24)
2648 | ((x
& 0x00ff0000) >> 8)
2649 | ((x
& 0x0000ff00) << 8)
2650 | ((x
& 0x000000ff) << 24);
2651 x
= ((x
& 0xf0f0f0f0) >> 4)
2652 | ((x
& 0x0f0f0f0f) << 4);
2653 x
= ((x
& 0x88888888) >> 3)
2654 | ((x
& 0x44444444) >> 1)
2655 | ((x
& 0x22222222) << 1)
2656 | ((x
& 0x11111111) << 3);
2660 #if defined(CONFIG_USER_ONLY)
2662 void arm_cpu_do_interrupt(CPUState
*cs
)
2664 ARMCPU
*cpu
= ARM_CPU(cs
);
2665 CPUARMState
*env
= &cpu
->env
;
2667 env
->exception_index
= -1;
2670 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
, int rw
,
2674 env
->exception_index
= EXCP_PREFETCH_ABORT
;
2675 env
->cp15
.c6_insn
= address
;
2677 env
->exception_index
= EXCP_DATA_ABORT
;
2678 env
->cp15
.c6_data
= address
;
2683 /* These should probably raise undefined insn exceptions. */
2684 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
2686 cpu_abort(env
, "v7m_mrs %d\n", reg
);
2689 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
2691 cpu_abort(env
, "v7m_mrs %d\n", reg
);
2695 void switch_mode(CPUARMState
*env
, int mode
)
2697 if (mode
!= ARM_CPU_MODE_USR
)
2698 cpu_abort(env
, "Tried to switch out of user mode\n");
2701 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
2703 cpu_abort(env
, "banked r13 write\n");
2706 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
2708 cpu_abort(env
, "banked r13 read\n");
2714 /* Map CPU modes onto saved register banks. */
2715 int bank_number(int mode
)
2718 case ARM_CPU_MODE_USR
:
2719 case ARM_CPU_MODE_SYS
:
2721 case ARM_CPU_MODE_SVC
:
2723 case ARM_CPU_MODE_ABT
:
2725 case ARM_CPU_MODE_UND
:
2727 case ARM_CPU_MODE_IRQ
:
2729 case ARM_CPU_MODE_FIQ
:
2732 hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode
);
2735 void switch_mode(CPUARMState
*env
, int mode
)
2740 old_mode
= env
->uncached_cpsr
& CPSR_M
;
2741 if (mode
== old_mode
)
2744 if (old_mode
== ARM_CPU_MODE_FIQ
) {
2745 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
2746 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
2747 } else if (mode
== ARM_CPU_MODE_FIQ
) {
2748 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
2749 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
2752 i
= bank_number(old_mode
);
2753 env
->banked_r13
[i
] = env
->regs
[13];
2754 env
->banked_r14
[i
] = env
->regs
[14];
2755 env
->banked_spsr
[i
] = env
->spsr
;
2757 i
= bank_number(mode
);
2758 env
->regs
[13] = env
->banked_r13
[i
];
2759 env
->regs
[14] = env
->banked_r14
[i
];
2760 env
->spsr
= env
->banked_spsr
[i
];
2763 static void v7m_push(CPUARMState
*env
, uint32_t val
)
2765 CPUState
*cs
= ENV_GET_CPU(env
);
2767 stl_phys(cs
->as
, env
->regs
[13], val
);
2770 static uint32_t v7m_pop(CPUARMState
*env
)
2772 CPUState
*cs
= ENV_GET_CPU(env
);
2774 val
= ldl_phys(cs
->as
, env
->regs
[13]);
2779 /* Switch to V7M main or process stack pointer. */
2780 static void switch_v7m_sp(CPUARMState
*env
, int process
)
2783 if (env
->v7m
.current_sp
!= process
) {
2784 tmp
= env
->v7m
.other_sp
;
2785 env
->v7m
.other_sp
= env
->regs
[13];
2786 env
->regs
[13] = tmp
;
2787 env
->v7m
.current_sp
= process
;
2791 static void do_v7m_exception_exit(CPUARMState
*env
)
2796 type
= env
->regs
[15];
2797 if (env
->v7m
.exception
!= 0)
2798 armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
);
2800 /* Switch to the target stack. */
2801 switch_v7m_sp(env
, (type
& 4) != 0);
2802 /* Pop registers. */
2803 env
->regs
[0] = v7m_pop(env
);
2804 env
->regs
[1] = v7m_pop(env
);
2805 env
->regs
[2] = v7m_pop(env
);
2806 env
->regs
[3] = v7m_pop(env
);
2807 env
->regs
[12] = v7m_pop(env
);
2808 env
->regs
[14] = v7m_pop(env
);
2809 env
->regs
[15] = v7m_pop(env
);
2810 xpsr
= v7m_pop(env
);
2811 xpsr_write(env
, xpsr
, 0xfffffdff);
2812 /* Undo stack alignment. */
2815 /* ??? The exception return type specifies Thread/Handler mode. However
2816 this is also implied by the xPSR value. Not sure what to do
2817 if there is a mismatch. */
2818 /* ??? Likewise for mismatches between the CONTROL register and the stack
2822 /* Exception names for debug logging; note that not all of these
2823 * precisely correspond to architectural exceptions.
2825 static const char * const excnames
[] = {
2826 [EXCP_UDEF
] = "Undefined Instruction",
2828 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
2829 [EXCP_DATA_ABORT
] = "Data Abort",
2832 [EXCP_BKPT
] = "Breakpoint",
2833 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
2834 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
2835 [EXCP_STREX
] = "QEMU intercept of STREX",
2838 static inline void arm_log_exception(int idx
)
2840 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
2841 const char *exc
= NULL
;
2843 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
2844 exc
= excnames
[idx
];
2849 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
2853 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
2855 ARMCPU
*cpu
= ARM_CPU(cs
);
2856 CPUARMState
*env
= &cpu
->env
;
2857 uint32_t xpsr
= xpsr_read(env
);
2861 arm_log_exception(env
->exception_index
);
2864 if (env
->v7m
.current_sp
)
2866 if (env
->v7m
.exception
== 0)
2869 /* For exceptions we just mark as pending on the NVIC, and let that
2871 /* TODO: Need to escalate if the current priority is higher than the
2872 one we're raising. */
2873 switch (env
->exception_index
) {
2875 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
);
2878 /* The PC already points to the next instruction. */
2879 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
);
2881 case EXCP_PREFETCH_ABORT
:
2882 case EXCP_DATA_ABORT
:
2883 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
);
2886 if (semihosting_enabled
) {
2888 nr
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2891 env
->regs
[0] = do_arm_semihosting(env
);
2892 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2896 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
);
2899 env
->v7m
.exception
= armv7m_nvic_acknowledge_irq(env
->nvic
);
2901 case EXCP_EXCEPTION_EXIT
:
2902 do_v7m_exception_exit(env
);
2905 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2906 return; /* Never happens. Keep compiler happy. */
2909 /* Align stack pointer. */
2910 /* ??? Should only do this if Configuration Control Register
2911 STACKALIGN bit is set. */
2912 if (env
->regs
[13] & 4) {
2916 /* Switch to the handler mode. */
2917 v7m_push(env
, xpsr
);
2918 v7m_push(env
, env
->regs
[15]);
2919 v7m_push(env
, env
->regs
[14]);
2920 v7m_push(env
, env
->regs
[12]);
2921 v7m_push(env
, env
->regs
[3]);
2922 v7m_push(env
, env
->regs
[2]);
2923 v7m_push(env
, env
->regs
[1]);
2924 v7m_push(env
, env
->regs
[0]);
2925 switch_v7m_sp(env
, 0);
2927 env
->condexec_bits
= 0;
2929 addr
= ldl_phys(cs
->as
, env
->v7m
.vecbase
+ env
->v7m
.exception
* 4);
2930 env
->regs
[15] = addr
& 0xfffffffe;
2931 env
->thumb
= addr
& 1;
2934 /* Handle a CPU exception. */
2935 void arm_cpu_do_interrupt(CPUState
*cs
)
2937 ARMCPU
*cpu
= ARM_CPU(cs
);
2938 CPUARMState
*env
= &cpu
->env
;
2946 arm_log_exception(env
->exception_index
);
2948 /* TODO: Vectored interrupt controller. */
2949 switch (env
->exception_index
) {
2951 new_mode
= ARM_CPU_MODE_UND
;
2960 if (semihosting_enabled
) {
2961 /* Check for semihosting interrupt. */
2963 mask
= arm_lduw_code(env
, env
->regs
[15] - 2, env
->bswap_code
)
2966 mask
= arm_ldl_code(env
, env
->regs
[15] - 4, env
->bswap_code
)
2969 /* Only intercept calls from privileged modes, to provide some
2970 semblance of security. */
2971 if (((mask
== 0x123456 && !env
->thumb
)
2972 || (mask
== 0xab && env
->thumb
))
2973 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2974 env
->regs
[0] = do_arm_semihosting(env
);
2975 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2979 new_mode
= ARM_CPU_MODE_SVC
;
2982 /* The PC already points to the next instruction. */
2986 /* See if this is a semihosting syscall. */
2987 if (env
->thumb
&& semihosting_enabled
) {
2988 mask
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2990 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2992 env
->regs
[0] = do_arm_semihosting(env
);
2993 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2997 env
->cp15
.c5_insn
= 2;
2998 /* Fall through to prefetch abort. */
2999 case EXCP_PREFETCH_ABORT
:
3000 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
3001 env
->cp15
.c5_insn
, env
->cp15
.c6_insn
);
3002 new_mode
= ARM_CPU_MODE_ABT
;
3004 mask
= CPSR_A
| CPSR_I
;
3007 case EXCP_DATA_ABORT
:
3008 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
3009 env
->cp15
.c5_data
, env
->cp15
.c6_data
);
3010 new_mode
= ARM_CPU_MODE_ABT
;
3012 mask
= CPSR_A
| CPSR_I
;
3016 new_mode
= ARM_CPU_MODE_IRQ
;
3018 /* Disable IRQ and imprecise data aborts. */
3019 mask
= CPSR_A
| CPSR_I
;
3023 new_mode
= ARM_CPU_MODE_FIQ
;
3025 /* Disable FIQ, IRQ and imprecise data aborts. */
3026 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
3030 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
3031 return; /* Never happens. Keep compiler happy. */
3034 if (env
->cp15
.c1_sys
& SCTLR_V
) {
3035 /* when enabled, base address cannot be remapped. */
3038 /* ARM v7 architectures provide a vector base address register to remap
3039 * the interrupt vector table.
3040 * This register is only followed in non-monitor mode, and has a secure
3041 * and un-secure copy. Since the cpu is always in a un-secure operation
3042 * and is never in monitor mode this feature is always active.
3043 * Note: only bits 31:5 are valid.
3045 addr
+= env
->cp15
.c12_vbar
;
3047 switch_mode (env
, new_mode
);
3048 env
->spsr
= cpsr_read(env
);
3049 /* Clear IT bits. */
3050 env
->condexec_bits
= 0;
3051 /* Switch to the new mode, and to the correct instruction set. */
3052 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
3054 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
3055 * and we should just guard the thumb mode on V4 */
3056 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
3057 env
->thumb
= (env
->cp15
.c1_sys
& SCTLR_TE
) != 0;
3059 env
->regs
[14] = env
->regs
[15] + offset
;
3060 env
->regs
[15] = addr
;
3061 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
3064 /* Check section/page access permissions.
3065 Returns the page protection flags, or zero if the access is not
3067 static inline int check_ap(CPUARMState
*env
, int ap
, int domain_prot
,
3068 int access_type
, int is_user
)
3072 if (domain_prot
== 3) {
3073 return PAGE_READ
| PAGE_WRITE
;
3076 if (access_type
== 1)
3079 prot_ro
= PAGE_READ
;
3083 if (arm_feature(env
, ARM_FEATURE_V7
)) {
3086 if (access_type
== 1)
3088 switch (env
->cp15
.c1_sys
& (SCTLR_S
| SCTLR_R
)) {
3090 return is_user
? 0 : PAGE_READ
;
3097 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
3102 return PAGE_READ
| PAGE_WRITE
;
3104 return PAGE_READ
| PAGE_WRITE
;
3105 case 4: /* Reserved. */
3108 return is_user
? 0 : prot_ro
;
3112 if (!arm_feature (env
, ARM_FEATURE_V6K
))
3120 static uint32_t get_level1_table_address(CPUARMState
*env
, uint32_t address
)
3124 if (address
& env
->cp15
.c2_mask
)
3125 table
= env
->cp15
.ttbr1_el1
& 0xffffc000;
3127 table
= env
->cp15
.ttbr0_el1
& env
->cp15
.c2_base_mask
;
3129 table
|= (address
>> 18) & 0x3ffc;
3133 static int get_phys_addr_v5(CPUARMState
*env
, uint32_t address
, int access_type
,
3134 int is_user
, hwaddr
*phys_ptr
,
3135 int *prot
, target_ulong
*page_size
)
3137 CPUState
*cs
= ENV_GET_CPU(env
);
3147 /* Pagetable walk. */
3148 /* Lookup l1 descriptor. */
3149 table
= get_level1_table_address(env
, address
);
3150 desc
= ldl_phys(cs
->as
, table
);
3152 domain
= (desc
>> 5) & 0x0f;
3153 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
3155 /* Section translation fault. */
3159 if (domain_prot
== 0 || domain_prot
== 2) {
3161 code
= 9; /* Section domain fault. */
3163 code
= 11; /* Page domain fault. */
3168 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
3169 ap
= (desc
>> 10) & 3;
3171 *page_size
= 1024 * 1024;
3173 /* Lookup l2 entry. */
3175 /* Coarse pagetable. */
3176 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
3178 /* Fine pagetable. */
3179 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
3181 desc
= ldl_phys(cs
->as
, table
);
3183 case 0: /* Page translation fault. */
3186 case 1: /* 64k page. */
3187 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
3188 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
3189 *page_size
= 0x10000;
3191 case 2: /* 4k page. */
3192 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3193 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
3194 *page_size
= 0x1000;
3196 case 3: /* 1k page. */
3198 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
3199 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3201 /* Page translation fault. */
3206 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
3208 ap
= (desc
>> 4) & 3;
3212 /* Never happens, but compiler isn't smart enough to tell. */
3217 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
3219 /* Access permission fault. */
3223 *phys_ptr
= phys_addr
;
3226 return code
| (domain
<< 4);
3229 static int get_phys_addr_v6(CPUARMState
*env
, uint32_t address
, int access_type
,
3230 int is_user
, hwaddr
*phys_ptr
,
3231 int *prot
, target_ulong
*page_size
)
3233 CPUState
*cs
= ENV_GET_CPU(env
);
3245 /* Pagetable walk. */
3246 /* Lookup l1 descriptor. */
3247 table
= get_level1_table_address(env
, address
);
3248 desc
= ldl_phys(cs
->as
, table
);
3250 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
3251 /* Section translation fault, or attempt to use the encoding
3252 * which is Reserved on implementations without PXN.
3257 if ((type
== 1) || !(desc
& (1 << 18))) {
3258 /* Page or Section. */
3259 domain
= (desc
>> 5) & 0x0f;
3261 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
3262 if (domain_prot
== 0 || domain_prot
== 2) {
3264 code
= 9; /* Section domain fault. */
3266 code
= 11; /* Page domain fault. */
3271 if (desc
& (1 << 18)) {
3273 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
3274 *page_size
= 0x1000000;
3277 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
3278 *page_size
= 0x100000;
3280 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
3281 xn
= desc
& (1 << 4);
3285 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
3286 pxn
= (desc
>> 2) & 1;
3288 /* Lookup l2 entry. */
3289 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
3290 desc
= ldl_phys(cs
->as
, table
);
3291 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
3293 case 0: /* Page translation fault. */
3296 case 1: /* 64k page. */
3297 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
3298 xn
= desc
& (1 << 15);
3299 *page_size
= 0x10000;
3301 case 2: case 3: /* 4k page. */
3302 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3304 *page_size
= 0x1000;
3307 /* Never happens, but compiler isn't smart enough to tell. */
3312 if (domain_prot
== 3) {
3313 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3315 if (pxn
&& !is_user
) {
3318 if (xn
&& access_type
== 2)
3321 /* The simplified model uses AP[0] as an access control bit. */
3322 if ((env
->cp15
.c1_sys
& SCTLR_AFE
) && (ap
& 1) == 0) {
3323 /* Access flag fault. */
3324 code
= (code
== 15) ? 6 : 3;
3327 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
3329 /* Access permission fault. */
3336 *phys_ptr
= phys_addr
;
3339 return code
| (domain
<< 4);
3342 /* Fault type for long-descriptor MMU fault reporting; this corresponds
3343 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
3346 translation_fault
= 1,
3348 permission_fault
= 3,
3351 static int get_phys_addr_lpae(CPUARMState
*env
, uint32_t address
,
3352 int access_type
, int is_user
,
3353 hwaddr
*phys_ptr
, int *prot
,
3354 target_ulong
*page_size_ptr
)
3356 CPUState
*cs
= ENV_GET_CPU(env
);
3357 /* Read an LPAE long-descriptor translation table. */
3358 MMUFaultType fault_type
= translation_fault
;
3366 uint32_t tableattrs
;
3367 target_ulong page_size
;
3370 /* Determine whether this address is in the region controlled by
3371 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
3372 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
3373 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
3375 uint32_t t0sz
= extract32(env
->cp15
.c2_control
, 0, 3);
3376 uint32_t t1sz
= extract32(env
->cp15
.c2_control
, 16, 3);
3377 if (t0sz
&& !extract32(address
, 32 - t0sz
, t0sz
)) {
3378 /* there is a ttbr0 region and we are in it (high bits all zero) */
3380 } else if (t1sz
&& !extract32(~address
, 32 - t1sz
, t1sz
)) {
3381 /* there is a ttbr1 region and we are in it (high bits all one) */
3384 /* ttbr0 region is "everything not in the ttbr1 region" */
3387 /* ttbr1 region is "everything not in the ttbr0 region" */
3390 /* in the gap between the two regions, this is a Translation fault */
3391 fault_type
= translation_fault
;
3395 /* Note that QEMU ignores shareability and cacheability attributes,
3396 * so we don't need to do anything with the SH, ORGN, IRGN fields
3397 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
3398 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
3399 * implement any ASID-like capability so we can ignore it (instead
3400 * we will always flush the TLB any time the ASID is changed).
3402 if (ttbr_select
== 0) {
3403 ttbr
= env
->cp15
.ttbr0_el1
;
3404 epd
= extract32(env
->cp15
.c2_control
, 7, 1);
3407 ttbr
= env
->cp15
.ttbr1_el1
;
3408 epd
= extract32(env
->cp15
.c2_control
, 23, 1);
3413 /* Translation table walk disabled => Translation fault on TLB miss */
3417 /* If the region is small enough we will skip straight to a 2nd level
3418 * lookup. This affects the number of bits of the address used in
3419 * combination with the TTBR to find the first descriptor. ('n' here
3420 * matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
3421 * from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
3430 /* Clear the vaddr bits which aren't part of the within-region address,
3431 * so that we don't have to special case things when calculating the
3432 * first descriptor address.
3434 address
&= (0xffffffffU
>> tsz
);
3436 /* Now we can extract the actual base address from the TTBR */
3437 descaddr
= extract64(ttbr
, 0, 40);
3438 descaddr
&= ~((1ULL << n
) - 1);
3442 uint64_t descriptor
;
3444 descaddr
|= ((address
>> (9 * (4 - level
))) & 0xff8);
3445 descriptor
= ldq_phys(cs
->as
, descaddr
);
3446 if (!(descriptor
& 1) ||
3447 (!(descriptor
& 2) && (level
== 3))) {
3448 /* Invalid, or the Reserved level 3 encoding */
3451 descaddr
= descriptor
& 0xfffffff000ULL
;
3453 if ((descriptor
& 2) && (level
< 3)) {
3454 /* Table entry. The top five bits are attributes which may
3455 * propagate down through lower levels of the table (and
3456 * which are all arranged so that 0 means "no effect", so
3457 * we can gather them up by ORing in the bits at each level).
3459 tableattrs
|= extract64(descriptor
, 59, 5);
3463 /* Block entry at level 1 or 2, or page entry at level 3.
3464 * These are basically the same thing, although the number
3465 * of bits we pull in from the vaddr varies.
3467 page_size
= (1 << (39 - (9 * level
)));
3468 descaddr
|= (address
& (page_size
- 1));
3469 /* Extract attributes from the descriptor and merge with table attrs */
3470 attrs
= extract64(descriptor
, 2, 10)
3471 | (extract64(descriptor
, 52, 12) << 10);
3472 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
3473 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
3474 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
3475 * means "force PL1 access only", which means forcing AP[1] to 0.
3477 if (extract32(tableattrs
, 2, 1)) {
3480 /* Since we're always in the Non-secure state, NSTable is ignored. */
3483 /* Here descaddr is the final physical address, and attributes
3486 fault_type
= access_fault
;
3487 if ((attrs
& (1 << 8)) == 0) {
3491 fault_type
= permission_fault
;
3492 if (is_user
&& !(attrs
& (1 << 4))) {
3493 /* Unprivileged access not enabled */
3496 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3497 if (attrs
& (1 << 12) || (!is_user
&& (attrs
& (1 << 11)))) {
3499 if (access_type
== 2) {
3502 *prot
&= ~PAGE_EXEC
;
3504 if (attrs
& (1 << 5)) {
3505 /* Write access forbidden */
3506 if (access_type
== 1) {
3509 *prot
&= ~PAGE_WRITE
;
3512 *phys_ptr
= descaddr
;
3513 *page_size_ptr
= page_size
;
3517 /* Long-descriptor format IFSR/DFSR value */
3518 return (1 << 9) | (fault_type
<< 2) | level
;
3521 static int get_phys_addr_mpu(CPUARMState
*env
, uint32_t address
,
3522 int access_type
, int is_user
,
3523 hwaddr
*phys_ptr
, int *prot
)
3529 *phys_ptr
= address
;
3530 for (n
= 7; n
>= 0; n
--) {
3531 base
= env
->cp15
.c6_region
[n
];
3532 if ((base
& 1) == 0)
3534 mask
= 1 << ((base
>> 1) & 0x1f);
3535 /* Keep this shift separate from the above to avoid an
3536 (undefined) << 32. */
3537 mask
= (mask
<< 1) - 1;
3538 if (((base
^ address
) & ~mask
) == 0)
3544 if (access_type
== 2) {
3545 mask
= env
->cp15
.c5_insn
;
3547 mask
= env
->cp15
.c5_data
;
3549 mask
= (mask
>> (n
* 4)) & 0xf;
3556 *prot
= PAGE_READ
| PAGE_WRITE
;
3561 *prot
|= PAGE_WRITE
;
3564 *prot
= PAGE_READ
| PAGE_WRITE
;
3575 /* Bad permission. */
3582 /* get_phys_addr - get the physical address for this virtual address
3584 * Find the physical address corresponding to the given virtual address,
3585 * by doing a translation table walk on MMU based systems or using the
3586 * MPU state on MPU based systems.
3588 * Returns 0 if the translation was successful. Otherwise, phys_ptr,
3589 * prot and page_size are not filled in, and the return value provides
3590 * information on why the translation aborted, in the format of a
3591 * DFSR/IFSR fault register, with the following caveats:
3592 * * we honour the short vs long DFSR format differences.
3593 * * the WnR bit is never set (the caller must do this).
3594 * * for MPU based systems we don't bother to return a full FSR format
3598 * @address: virtual address to get physical address for
3599 * @access_type: 0 for read, 1 for write, 2 for execute
3600 * @is_user: 0 for privileged access, 1 for user
3601 * @phys_ptr: set to the physical address corresponding to the virtual address
3602 * @prot: set to the permissions for the page containing phys_ptr
3603 * @page_size: set to the size of the page containing phys_ptr
3605 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
3606 int access_type
, int is_user
,
3607 hwaddr
*phys_ptr
, int *prot
,
3608 target_ulong
*page_size
)
3610 /* Fast Context Switch Extension. */
3611 if (address
< 0x02000000)
3612 address
+= env
->cp15
.c13_fcse
;
3614 if ((env
->cp15
.c1_sys
& SCTLR_M
) == 0) {
3615 /* MMU/MPU disabled. */
3616 *phys_ptr
= address
;
3617 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3618 *page_size
= TARGET_PAGE_SIZE
;
3620 } else if (arm_feature(env
, ARM_FEATURE_MPU
)) {
3621 *page_size
= TARGET_PAGE_SIZE
;
3622 return get_phys_addr_mpu(env
, address
, access_type
, is_user
, phys_ptr
,
3624 } else if (extended_addresses_enabled(env
)) {
3625 return get_phys_addr_lpae(env
, address
, access_type
, is_user
, phys_ptr
,
3627 } else if (env
->cp15
.c1_sys
& SCTLR_XP
) {
3628 return get_phys_addr_v6(env
, address
, access_type
, is_user
, phys_ptr
,
3631 return get_phys_addr_v5(env
, address
, access_type
, is_user
, phys_ptr
,
3636 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
,
3637 int access_type
, int mmu_idx
)
3640 target_ulong page_size
;
3644 is_user
= mmu_idx
== MMU_USER_IDX
;
3645 ret
= get_phys_addr(env
, address
, access_type
, is_user
, &phys_addr
, &prot
,
3648 /* Map a single [sub]page. */
3649 phys_addr
&= ~(hwaddr
)0x3ff;
3650 address
&= ~(uint32_t)0x3ff;
3651 tlb_set_page (env
, address
, phys_addr
, prot
, mmu_idx
, page_size
);
3655 if (access_type
== 2) {
3656 env
->cp15
.c5_insn
= ret
;
3657 env
->cp15
.c6_insn
= address
;
3658 env
->exception_index
= EXCP_PREFETCH_ABORT
;
3660 env
->cp15
.c5_data
= ret
;
3661 if (access_type
== 1 && arm_feature(env
, ARM_FEATURE_V6
))
3662 env
->cp15
.c5_data
|= (1 << 11);
3663 env
->cp15
.c6_data
= address
;
3664 env
->exception_index
= EXCP_DATA_ABORT
;
3669 hwaddr
arm_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
3671 ARMCPU
*cpu
= ARM_CPU(cs
);
3673 target_ulong page_size
;
3677 ret
= get_phys_addr(&cpu
->env
, addr
, 0, 0, &phys_addr
, &prot
, &page_size
);
3686 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
3688 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
3689 env
->regs
[13] = val
;
3691 env
->banked_r13
[bank_number(mode
)] = val
;
3695 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
3697 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
3698 return env
->regs
[13];
3700 return env
->banked_r13
[bank_number(mode
)];
3704 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
3708 return xpsr_read(env
) & 0xf8000000;
3710 return xpsr_read(env
) & 0xf80001ff;
3712 return xpsr_read(env
) & 0xff00fc00;
3714 return xpsr_read(env
) & 0xff00fdff;
3716 return xpsr_read(env
) & 0x000001ff;
3718 return xpsr_read(env
) & 0x0700fc00;
3720 return xpsr_read(env
) & 0x0700edff;
3722 return env
->v7m
.current_sp
? env
->v7m
.other_sp
: env
->regs
[13];
3724 return env
->v7m
.current_sp
? env
->regs
[13] : env
->v7m
.other_sp
;
3725 case 16: /* PRIMASK */
3726 return (env
->daif
& PSTATE_I
) != 0;
3727 case 17: /* BASEPRI */
3728 case 18: /* BASEPRI_MAX */
3729 return env
->v7m
.basepri
;
3730 case 19: /* FAULTMASK */
3731 return (env
->daif
& PSTATE_F
) != 0;
3732 case 20: /* CONTROL */
3733 return env
->v7m
.control
;
3735 /* ??? For debugging only. */
3736 cpu_abort(env
, "Unimplemented system register read (%d)\n", reg
);
3741 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
3745 xpsr_write(env
, val
, 0xf8000000);
3748 xpsr_write(env
, val
, 0xf8000000);
3751 xpsr_write(env
, val
, 0xfe00fc00);
3754 xpsr_write(env
, val
, 0xfe00fc00);
3757 /* IPSR bits are readonly. */
3760 xpsr_write(env
, val
, 0x0600fc00);
3763 xpsr_write(env
, val
, 0x0600fc00);
3766 if (env
->v7m
.current_sp
)
3767 env
->v7m
.other_sp
= val
;
3769 env
->regs
[13] = val
;
3772 if (env
->v7m
.current_sp
)
3773 env
->regs
[13] = val
;
3775 env
->v7m
.other_sp
= val
;
3777 case 16: /* PRIMASK */
3779 env
->daif
|= PSTATE_I
;
3781 env
->daif
&= ~PSTATE_I
;
3784 case 17: /* BASEPRI */
3785 env
->v7m
.basepri
= val
& 0xff;
3787 case 18: /* BASEPRI_MAX */
3789 if (val
!= 0 && (val
< env
->v7m
.basepri
|| env
->v7m
.basepri
== 0))
3790 env
->v7m
.basepri
= val
;
3792 case 19: /* FAULTMASK */
3794 env
->daif
|= PSTATE_F
;
3796 env
->daif
&= ~PSTATE_F
;
3799 case 20: /* CONTROL */
3800 env
->v7m
.control
= val
& 3;
3801 switch_v7m_sp(env
, (val
& 2) != 0);
3804 /* ??? For debugging only. */
3805 cpu_abort(env
, "Unimplemented system register write (%d)\n", reg
);
3812 /* Note that signed overflow is undefined in C. The following routines are
3813 careful to use unsigned types where modulo arithmetic is required.
3814 Failure to do so _will_ break on newer gcc. */
3816 /* Signed saturating arithmetic. */
3818 /* Perform 16-bit signed saturating addition. */
3819 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
3824 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
3833 /* Perform 8-bit signed saturating addition. */
3834 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
3839 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
3848 /* Perform 16-bit signed saturating subtraction. */
3849 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
3854 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
3863 /* Perform 8-bit signed saturating subtraction. */
3864 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
3869 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
3878 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
3879 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
3880 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
3881 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
3884 #include "op_addsub.h"
3886 /* Unsigned saturating arithmetic. */
3887 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
3896 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
3904 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
3913 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
3921 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
3922 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
3923 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
3924 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
3927 #include "op_addsub.h"
3929 /* Signed modulo arithmetic. */
3930 #define SARITH16(a, b, n, op) do { \
3932 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
3933 RESULT(sum, n, 16); \
3935 ge |= 3 << (n * 2); \
3938 #define SARITH8(a, b, n, op) do { \
3940 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
3941 RESULT(sum, n, 8); \
3947 #define ADD16(a, b, n) SARITH16(a, b, n, +)
3948 #define SUB16(a, b, n) SARITH16(a, b, n, -)
3949 #define ADD8(a, b, n) SARITH8(a, b, n, +)
3950 #define SUB8(a, b, n) SARITH8(a, b, n, -)
3954 #include "op_addsub.h"
3956 /* Unsigned modulo arithmetic. */
3957 #define ADD16(a, b, n) do { \
3959 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
3960 RESULT(sum, n, 16); \
3961 if ((sum >> 16) == 1) \
3962 ge |= 3 << (n * 2); \
3965 #define ADD8(a, b, n) do { \
3967 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
3968 RESULT(sum, n, 8); \
3969 if ((sum >> 8) == 1) \
3973 #define SUB16(a, b, n) do { \
3975 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
3976 RESULT(sum, n, 16); \
3977 if ((sum >> 16) == 0) \
3978 ge |= 3 << (n * 2); \
3981 #define SUB8(a, b, n) do { \
3983 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
3984 RESULT(sum, n, 8); \
3985 if ((sum >> 8) == 0) \
3992 #include "op_addsub.h"
3994 /* Halved signed arithmetic. */
3995 #define ADD16(a, b, n) \
3996 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
3997 #define SUB16(a, b, n) \
3998 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
3999 #define ADD8(a, b, n) \
4000 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
4001 #define SUB8(a, b, n) \
4002 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
4005 #include "op_addsub.h"
4007 /* Halved unsigned arithmetic. */
4008 #define ADD16(a, b, n) \
4009 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
4010 #define SUB16(a, b, n) \
4011 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
4012 #define ADD8(a, b, n) \
4013 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
4014 #define SUB8(a, b, n) \
4015 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
4018 #include "op_addsub.h"
4020 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
4028 /* Unsigned sum of absolute byte differences. */
4029 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
4032 sum
= do_usad(a
, b
);
4033 sum
+= do_usad(a
>> 8, b
>> 8);
4034 sum
+= do_usad(a
>> 16, b
>>16);
4035 sum
+= do_usad(a
>> 24, b
>> 24);
4039 /* For ARMv6 SEL instruction. */
4040 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
4053 return (a
& mask
) | (b
& ~mask
);
4056 /* VFP support. We follow the convention used for VFP instructions:
4057 Single precision routines have a "s" suffix, double precision a
4060 /* Convert host exception flags to vfp form. */
4061 static inline int vfp_exceptbits_from_host(int host_bits
)
4063 int target_bits
= 0;
4065 if (host_bits
& float_flag_invalid
)
4067 if (host_bits
& float_flag_divbyzero
)
4069 if (host_bits
& float_flag_overflow
)
4071 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
4073 if (host_bits
& float_flag_inexact
)
4074 target_bits
|= 0x10;
4075 if (host_bits
& float_flag_input_denormal
)
4076 target_bits
|= 0x80;
4080 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
4085 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
4086 | (env
->vfp
.vec_len
<< 16)
4087 | (env
->vfp
.vec_stride
<< 20);
4088 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
4089 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
4090 fpscr
|= vfp_exceptbits_from_host(i
);
4094 uint32_t vfp_get_fpscr(CPUARMState
*env
)
4096 return HELPER(vfp_get_fpscr
)(env
);
4099 /* Convert vfp exception flags to target form. */
4100 static inline int vfp_exceptbits_to_host(int target_bits
)
4104 if (target_bits
& 1)
4105 host_bits
|= float_flag_invalid
;
4106 if (target_bits
& 2)
4107 host_bits
|= float_flag_divbyzero
;
4108 if (target_bits
& 4)
4109 host_bits
|= float_flag_overflow
;
4110 if (target_bits
& 8)
4111 host_bits
|= float_flag_underflow
;
4112 if (target_bits
& 0x10)
4113 host_bits
|= float_flag_inexact
;
4114 if (target_bits
& 0x80)
4115 host_bits
|= float_flag_input_denormal
;
4119 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
4124 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
4125 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
4126 env
->vfp
.vec_len
= (val
>> 16) & 7;
4127 env
->vfp
.vec_stride
= (val
>> 20) & 3;
4130 if (changed
& (3 << 22)) {
4131 i
= (val
>> 22) & 3;
4133 case FPROUNDING_TIEEVEN
:
4134 i
= float_round_nearest_even
;
4136 case FPROUNDING_POSINF
:
4139 case FPROUNDING_NEGINF
:
4140 i
= float_round_down
;
4142 case FPROUNDING_ZERO
:
4143 i
= float_round_to_zero
;
4146 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
4148 if (changed
& (1 << 24)) {
4149 set_flush_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
4150 set_flush_inputs_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
4152 if (changed
& (1 << 25))
4153 set_default_nan_mode((val
& (1 << 25)) != 0, &env
->vfp
.fp_status
);
4155 i
= vfp_exceptbits_to_host(val
);
4156 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
4157 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
4160 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
4162 HELPER(vfp_set_fpscr
)(env
, val
);
4165 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
4167 #define VFP_BINOP(name) \
4168 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
4170 float_status *fpst = fpstp; \
4171 return float32_ ## name(a, b, fpst); \
4173 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
4175 float_status *fpst = fpstp; \
4176 return float64_ ## name(a, b, fpst); \
4188 float32
VFP_HELPER(neg
, s
)(float32 a
)
4190 return float32_chs(a
);
4193 float64
VFP_HELPER(neg
, d
)(float64 a
)
4195 return float64_chs(a
);
4198 float32
VFP_HELPER(abs
, s
)(float32 a
)
4200 return float32_abs(a
);
4203 float64
VFP_HELPER(abs
, d
)(float64 a
)
4205 return float64_abs(a
);
4208 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
4210 return float32_sqrt(a
, &env
->vfp
.fp_status
);
4213 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
4215 return float64_sqrt(a
, &env
->vfp
.fp_status
);
4218 /* XXX: check quiet/signaling case */
4219 #define DO_VFP_cmp(p, type) \
4220 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
4223 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
4224 case 0: flags = 0x6; break; \
4225 case -1: flags = 0x8; break; \
4226 case 1: flags = 0x2; break; \
4227 default: case 2: flags = 0x3; break; \
4229 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4230 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4232 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
4235 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
4236 case 0: flags = 0x6; break; \
4237 case -1: flags = 0x8; break; \
4238 case 1: flags = 0x2; break; \
4239 default: case 2: flags = 0x3; break; \
4241 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4242 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4244 DO_VFP_cmp(s
, float32
)
4245 DO_VFP_cmp(d
, float64
)
4248 /* Integer to float and float to integer conversions */
4250 #define CONV_ITOF(name, fsz, sign) \
4251 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
4253 float_status *fpst = fpstp; \
4254 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
4257 #define CONV_FTOI(name, fsz, sign, round) \
4258 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
4260 float_status *fpst = fpstp; \
4261 if (float##fsz##_is_any_nan(x)) { \
4262 float_raise(float_flag_invalid, fpst); \
4265 return float##fsz##_to_##sign##int32##round(x, fpst); \
4268 #define FLOAT_CONVS(name, p, fsz, sign) \
4269 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
4270 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
4271 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
4273 FLOAT_CONVS(si
, s
, 32, )
4274 FLOAT_CONVS(si
, d
, 64, )
4275 FLOAT_CONVS(ui
, s
, 32, u
)
4276 FLOAT_CONVS(ui
, d
, 64, u
)
4282 /* floating point conversion */
4283 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
4285 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
4286 /* ARM requires that S<->D conversion of any kind of NaN generates
4287 * a quiet NaN by forcing the most significant frac bit to 1.
4289 return float64_maybe_silence_nan(r
);
4292 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
4294 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
4295 /* ARM requires that S<->D conversion of any kind of NaN generates
4296 * a quiet NaN by forcing the most significant frac bit to 1.
4298 return float32_maybe_silence_nan(r
);
4301 /* VFP3 fixed point conversion. */
4302 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4303 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
4306 float_status *fpst = fpstp; \
4308 tmp = itype##_to_##float##fsz(x, fpst); \
4309 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
4312 /* Notice that we want only input-denormal exception flags from the
4313 * scalbn operation: the other possible flags (overflow+inexact if
4314 * we overflow to infinity, output-denormal) aren't correct for the
4315 * complete scale-and-convert operation.
4317 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
4318 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
4322 float_status *fpst = fpstp; \
4323 int old_exc_flags = get_float_exception_flags(fpst); \
4325 if (float##fsz##_is_any_nan(x)) { \
4326 float_raise(float_flag_invalid, fpst); \
4329 tmp = float##fsz##_scalbn(x, shift, fpst); \
4330 old_exc_flags |= get_float_exception_flags(fpst) \
4331 & float_flag_input_denormal; \
4332 set_float_exception_flags(old_exc_flags, fpst); \
4333 return float##fsz##_to_##itype##round(tmp, fpst); \
4336 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
4337 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4338 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
4339 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4341 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
4342 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4343 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4345 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
4346 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
4347 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
4348 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
4349 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
4350 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
4351 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
4352 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
4353 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
4354 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
4355 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
4356 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
4358 #undef VFP_CONV_FIX_FLOAT
4359 #undef VFP_CONV_FLOAT_FIX_ROUND
4361 /* Set the current fp rounding mode and return the old one.
4362 * The argument is a softfloat float_round_ value.
4364 uint32_t HELPER(set_rmode
)(uint32_t rmode
, CPUARMState
*env
)
4366 float_status
*fp_status
= &env
->vfp
.fp_status
;
4368 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
4369 set_float_rounding_mode(rmode
, fp_status
);
4374 /* Set the current fp rounding mode in the standard fp status and return
4375 * the old one. This is for NEON instructions that need to change the
4376 * rounding mode but wish to use the standard FPSCR values for everything
4377 * else. Always set the rounding mode back to the correct value after
4379 * The argument is a softfloat float_round_ value.
4381 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
4383 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
4385 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
4386 set_float_rounding_mode(rmode
, fp_status
);
4391 /* Half precision conversions. */
4392 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
4394 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4395 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
4397 return float32_maybe_silence_nan(r
);
4402 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
4404 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4405 float16 r
= float32_to_float16(a
, ieee
, s
);
4407 r
= float16_maybe_silence_nan(r
);
4409 return float16_val(r
);
4412 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
4414 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
4417 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
4419 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
4422 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
4424 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
4427 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
4429 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
4432 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, CPUARMState
*env
)
4434 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4435 float64 r
= float16_to_float64(make_float16(a
), ieee
, &env
->vfp
.fp_status
);
4437 return float64_maybe_silence_nan(r
);
4442 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, CPUARMState
*env
)
4444 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4445 float16 r
= float64_to_float16(a
, ieee
, &env
->vfp
.fp_status
);
4447 r
= float16_maybe_silence_nan(r
);
4449 return float16_val(r
);
4452 #define float32_two make_float32(0x40000000)
4453 #define float32_three make_float32(0x40400000)
4454 #define float32_one_point_five make_float32(0x3fc00000)
4456 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
4458 float_status
*s
= &env
->vfp
.standard_fp_status
;
4459 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
4460 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
4461 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
4462 float_raise(float_flag_input_denormal
, s
);
4466 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
4469 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
4471 float_status
*s
= &env
->vfp
.standard_fp_status
;
4473 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
4474 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
4475 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
4476 float_raise(float_flag_input_denormal
, s
);
4478 return float32_one_point_five
;
4480 product
= float32_mul(a
, b
, s
);
4481 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
4486 /* Constants 256 and 512 are used in some helpers; we avoid relying on
4487 * int->float conversions at run-time. */
4488 #define float64_256 make_float64(0x4070000000000000LL)
4489 #define float64_512 make_float64(0x4080000000000000LL)
4491 /* The algorithm that must be used to calculate the estimate
4492 * is specified by the ARM ARM.
4494 static float64
recip_estimate(float64 a
, CPUARMState
*env
)
4496 /* These calculations mustn't set any fp exception flags,
4497 * so we use a local copy of the fp_status.
4499 float_status dummy_status
= env
->vfp
.standard_fp_status
;
4500 float_status
*s
= &dummy_status
;
4501 /* q = (int)(a * 512.0) */
4502 float64 q
= float64_mul(float64_512
, a
, s
);
4503 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
4505 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
4506 q
= int64_to_float64(q_int
, s
);
4507 q
= float64_add(q
, float64_half
, s
);
4508 q
= float64_div(q
, float64_512
, s
);
4509 q
= float64_div(float64_one
, q
, s
);
4511 /* s = (int)(256.0 * r + 0.5) */
4512 q
= float64_mul(q
, float64_256
, s
);
4513 q
= float64_add(q
, float64_half
, s
);
4514 q_int
= float64_to_int64_round_to_zero(q
, s
);
4516 /* return (double)s / 256.0 */
4517 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
4520 float32
HELPER(recpe_f32
)(float32 a
, CPUARMState
*env
)
4522 float_status
*s
= &env
->vfp
.standard_fp_status
;
4524 uint32_t val32
= float32_val(a
);
4527 int a_exp
= (val32
& 0x7f800000) >> 23;
4528 int sign
= val32
& 0x80000000;
4530 if (float32_is_any_nan(a
)) {
4531 if (float32_is_signaling_nan(a
)) {
4532 float_raise(float_flag_invalid
, s
);
4534 return float32_default_nan
;
4535 } else if (float32_is_infinity(a
)) {
4536 return float32_set_sign(float32_zero
, float32_is_neg(a
));
4537 } else if (float32_is_zero_or_denormal(a
)) {
4538 if (!float32_is_zero(a
)) {
4539 float_raise(float_flag_input_denormal
, s
);
4541 float_raise(float_flag_divbyzero
, s
);
4542 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
4543 } else if (a_exp
>= 253) {
4544 float_raise(float_flag_underflow
, s
);
4545 return float32_set_sign(float32_zero
, float32_is_neg(a
));
4548 f64
= make_float64((0x3feULL
<< 52)
4549 | ((int64_t)(val32
& 0x7fffff) << 29));
4551 result_exp
= 253 - a_exp
;
4553 f64
= recip_estimate(f64
, env
);
4556 | ((result_exp
& 0xff) << 23)
4557 | ((float64_val(f64
) >> 29) & 0x7fffff);
4558 return make_float32(val32
);
4561 /* The algorithm that must be used to calculate the estimate
4562 * is specified by the ARM ARM.
4564 static float64
recip_sqrt_estimate(float64 a
, CPUARMState
*env
)
4566 /* These calculations mustn't set any fp exception flags,
4567 * so we use a local copy of the fp_status.
4569 float_status dummy_status
= env
->vfp
.standard_fp_status
;
4570 float_status
*s
= &dummy_status
;
4574 if (float64_lt(a
, float64_half
, s
)) {
4575 /* range 0.25 <= a < 0.5 */
4577 /* a in units of 1/512 rounded down */
4578 /* q0 = (int)(a * 512.0); */
4579 q
= float64_mul(float64_512
, a
, s
);
4580 q_int
= float64_to_int64_round_to_zero(q
, s
);
4582 /* reciprocal root r */
4583 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
4584 q
= int64_to_float64(q_int
, s
);
4585 q
= float64_add(q
, float64_half
, s
);
4586 q
= float64_div(q
, float64_512
, s
);
4587 q
= float64_sqrt(q
, s
);
4588 q
= float64_div(float64_one
, q
, s
);
4590 /* range 0.5 <= a < 1.0 */
4592 /* a in units of 1/256 rounded down */
4593 /* q1 = (int)(a * 256.0); */
4594 q
= float64_mul(float64_256
, a
, s
);
4595 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
4597 /* reciprocal root r */
4598 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
4599 q
= int64_to_float64(q_int
, s
);
4600 q
= float64_add(q
, float64_half
, s
);
4601 q
= float64_div(q
, float64_256
, s
);
4602 q
= float64_sqrt(q
, s
);
4603 q
= float64_div(float64_one
, q
, s
);
4605 /* r in units of 1/256 rounded to nearest */
4606 /* s = (int)(256.0 * r + 0.5); */
4608 q
= float64_mul(q
, float64_256
,s
);
4609 q
= float64_add(q
, float64_half
, s
);
4610 q_int
= float64_to_int64_round_to_zero(q
, s
);
4612 /* return (double)s / 256.0;*/
4613 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
4616 float32
HELPER(rsqrte_f32
)(float32 a
, CPUARMState
*env
)
4618 float_status
*s
= &env
->vfp
.standard_fp_status
;
4624 val
= float32_val(a
);
4626 if (float32_is_any_nan(a
)) {
4627 if (float32_is_signaling_nan(a
)) {
4628 float_raise(float_flag_invalid
, s
);
4630 return float32_default_nan
;
4631 } else if (float32_is_zero_or_denormal(a
)) {
4632 if (!float32_is_zero(a
)) {
4633 float_raise(float_flag_input_denormal
, s
);
4635 float_raise(float_flag_divbyzero
, s
);
4636 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
4637 } else if (float32_is_neg(a
)) {
4638 float_raise(float_flag_invalid
, s
);
4639 return float32_default_nan
;
4640 } else if (float32_is_infinity(a
)) {
4641 return float32_zero
;
4644 /* Normalize to a double-precision value between 0.25 and 1.0,
4645 * preserving the parity of the exponent. */
4646 if ((val
& 0x800000) == 0) {
4647 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
4649 | ((uint64_t)(val
& 0x7fffff) << 29));
4651 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
4653 | ((uint64_t)(val
& 0x7fffff) << 29));
4656 result_exp
= (380 - ((val
& 0x7f800000) >> 23)) / 2;
4658 f64
= recip_sqrt_estimate(f64
, env
);
4660 val64
= float64_val(f64
);
4662 val
= ((result_exp
& 0xff) << 23)
4663 | ((val64
>> 29) & 0x7fffff);
4664 return make_float32(val
);
4667 uint32_t HELPER(recpe_u32
)(uint32_t a
, CPUARMState
*env
)
4671 if ((a
& 0x80000000) == 0) {
4675 f64
= make_float64((0x3feULL
<< 52)
4676 | ((int64_t)(a
& 0x7fffffff) << 21));
4678 f64
= recip_estimate (f64
, env
);
4680 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
4683 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, CPUARMState
*env
)
4687 if ((a
& 0xc0000000) == 0) {
4691 if (a
& 0x80000000) {
4692 f64
= make_float64((0x3feULL
<< 52)
4693 | ((uint64_t)(a
& 0x7fffffff) << 21));
4694 } else { /* bits 31-30 == '01' */
4695 f64
= make_float64((0x3fdULL
<< 52)
4696 | ((uint64_t)(a
& 0x3fffffff) << 22));
4699 f64
= recip_sqrt_estimate(f64
, env
);
4701 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
4704 /* VFPv4 fused multiply-accumulate */
4705 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
4707 float_status
*fpst
= fpstp
;
4708 return float32_muladd(a
, b
, c
, 0, fpst
);
4711 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
4713 float_status
*fpst
= fpstp
;
4714 return float64_muladd(a
, b
, c
, 0, fpst
);
4717 /* ARMv8 round to integral */
4718 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
4720 return float32_round_to_int(x
, fp_status
);
4723 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
4725 return float64_round_to_int(x
, fp_status
);
4728 float32
HELPER(rints
)(float32 x
, void *fp_status
)
4730 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
4733 ret
= float32_round_to_int(x
, fp_status
);
4735 /* Suppress any inexact exceptions the conversion produced */
4736 if (!(old_flags
& float_flag_inexact
)) {
4737 new_flags
= get_float_exception_flags(fp_status
);
4738 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
4744 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
4746 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
4749 ret
= float64_round_to_int(x
, fp_status
);
4751 new_flags
= get_float_exception_flags(fp_status
);
4753 /* Suppress any inexact exceptions the conversion produced */
4754 if (!(old_flags
& float_flag_inexact
)) {
4755 new_flags
= get_float_exception_flags(fp_status
);
4756 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
4762 /* Convert ARM rounding mode to softfloat */
4763 int arm_rmode_to_sf(int rmode
)
4766 case FPROUNDING_TIEAWAY
:
4767 rmode
= float_round_ties_away
;
4769 case FPROUNDING_ODD
:
4770 /* FIXME: add support for TIEAWAY and ODD */
4771 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
4773 case FPROUNDING_TIEEVEN
:
4775 rmode
= float_round_nearest_even
;
4777 case FPROUNDING_POSINF
:
4778 rmode
= float_round_up
;
4780 case FPROUNDING_NEGINF
:
4781 rmode
= float_round_down
;
4783 case FPROUNDING_ZERO
:
4784 rmode
= float_round_to_zero
;
4790 static void crc_init_buffer(uint8_t *buf
, uint32_t val
, uint32_t bytes
)
4795 buf
[0] = val
& 0xff;
4796 } else if (bytes
== 2) {
4797 buf
[0] = val
& 0xff;
4798 buf
[1] = (val
>> 8) & 0xff;
4800 buf
[0] = val
& 0xff;
4801 buf
[1] = (val
>> 8) & 0xff;
4802 buf
[2] = (val
>> 16) & 0xff;
4803 buf
[3] = (val
>> 24) & 0xff;
4807 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
4811 crc_init_buffer(buf
, val
, bytes
);
4813 /* zlib crc32 converts the accumulator and output to one's complement. */
4814 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
4817 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
4821 crc_init_buffer(buf
, val
, bytes
);
4823 /* Linux crc32c converts the output to one's complement. */
4824 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;