]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/helper.c
target-arm: Pull "add one cpreg to hashtable" into its own function
[mirror_qemu.git] / target-arm / helper.c
CommitLineData
b5ff1b31 1#include "cpu.h"
022c62cb 2#include "exec/gdbstub.h"
7b59220e 3#include "helper.h"
1de7afc9 4#include "qemu/host-utils.h"
78027bb6 5#include "sysemu/arch_init.h"
9c17d615 6#include "sysemu/sysemu.h"
1de7afc9 7#include "qemu/bitops.h"
0b03bdfc 8
4a501606
PM
9#ifndef CONFIG_USER_ONLY
10static inline int get_phys_addr(CPUARMState *env, uint32_t address,
11 int access_type, int is_user,
a8170e5e 12 hwaddr *phys_ptr, int *prot,
4a501606
PM
13 target_ulong *page_size);
14#endif
15
0ecb72a5 16static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
56aebc89
PB
17{
18 int nregs;
19
20 /* VFP data registers are always little-endian. */
21 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
22 if (reg < nregs) {
23 stfq_le_p(buf, env->vfp.regs[reg]);
24 return 8;
25 }
26 if (arm_feature(env, ARM_FEATURE_NEON)) {
27 /* Aliases for Q regs. */
28 nregs += 16;
29 if (reg < nregs) {
30 stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
31 stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
32 return 16;
33 }
34 }
35 switch (reg - nregs) {
36 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
37 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
38 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
39 }
40 return 0;
41}
42
0ecb72a5 43static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
56aebc89
PB
44{
45 int nregs;
46
47 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
48 if (reg < nregs) {
49 env->vfp.regs[reg] = ldfq_le_p(buf);
50 return 8;
51 }
52 if (arm_feature(env, ARM_FEATURE_NEON)) {
53 nregs += 16;
54 if (reg < nregs) {
55 env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
56 env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
57 return 16;
58 }
59 }
60 switch (reg - nregs) {
61 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
62 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
71b3c3de 63 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
56aebc89
PB
64 }
65 return 0;
66}
67
6a669427
PM
68static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
69{
70 switch (reg) {
71 case 0 ... 31:
72 /* 128 bit FP register */
73 stfq_le_p(buf, env->vfp.regs[reg * 2]);
74 stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
75 return 16;
76 case 32:
77 /* FPSR */
78 stl_p(buf, vfp_get_fpsr(env));
79 return 4;
80 case 33:
81 /* FPCR */
82 stl_p(buf, vfp_get_fpcr(env));
83 return 4;
84 default:
85 return 0;
86 }
87}
88
89static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
90{
91 switch (reg) {
92 case 0 ... 31:
93 /* 128 bit FP register */
94 env->vfp.regs[reg * 2] = ldfq_le_p(buf);
95 env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
96 return 16;
97 case 32:
98 /* FPSR */
99 vfp_set_fpsr(env, ldl_p(buf));
100 return 4;
101 case 33:
102 /* FPCR */
103 vfp_set_fpcr(env, ldl_p(buf));
104 return 4;
105 default:
106 return 0;
107 }
108}
109
d4e6df63
PM
110static int raw_read(CPUARMState *env, const ARMCPRegInfo *ri,
111 uint64_t *value)
112{
22d9e1a9
PM
113 if (ri->type & ARM_CP_64BIT) {
114 *value = CPREG_FIELD64(env, ri);
115 } else {
116 *value = CPREG_FIELD32(env, ri);
117 }
d4e6df63
PM
118 return 0;
119}
120
121static int raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
122 uint64_t value)
123{
22d9e1a9
PM
124 if (ri->type & ARM_CP_64BIT) {
125 CPREG_FIELD64(env, ri) = value;
126 } else {
127 CPREG_FIELD32(env, ri) = value;
128 }
d4e6df63
PM
129 return 0;
130}
131
721fae12
PM
132static bool read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
133 uint64_t *v)
134{
135 /* Raw read of a coprocessor register (as needed for migration, etc)
136 * return true on success, false if the read is impossible for some reason.
137 */
138 if (ri->type & ARM_CP_CONST) {
139 *v = ri->resetvalue;
140 } else if (ri->raw_readfn) {
141 return (ri->raw_readfn(env, ri, v) == 0);
142 } else if (ri->readfn) {
143 return (ri->readfn(env, ri, v) == 0);
144 } else {
145 if (ri->type & ARM_CP_64BIT) {
146 *v = CPREG_FIELD64(env, ri);
147 } else {
148 *v = CPREG_FIELD32(env, ri);
149 }
150 }
151 return true;
152}
153
154static bool write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
155 int64_t v)
156{
157 /* Raw write of a coprocessor register (as needed for migration, etc).
158 * Return true on success, false if the write is impossible for some reason.
159 * Note that constant registers are treated as write-ignored; the
160 * caller should check for success by whether a readback gives the
161 * value written.
162 */
163 if (ri->type & ARM_CP_CONST) {
164 return true;
165 } else if (ri->raw_writefn) {
166 return (ri->raw_writefn(env, ri, v) == 0);
167 } else if (ri->writefn) {
168 return (ri->writefn(env, ri, v) == 0);
169 } else {
170 if (ri->type & ARM_CP_64BIT) {
171 CPREG_FIELD64(env, ri) = v;
172 } else {
173 CPREG_FIELD32(env, ri) = v;
174 }
175 }
176 return true;
177}
178
179bool write_cpustate_to_list(ARMCPU *cpu)
180{
181 /* Write the coprocessor state from cpu->env to the (index,value) list. */
182 int i;
183 bool ok = true;
184
185 for (i = 0; i < cpu->cpreg_array_len; i++) {
186 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
187 const ARMCPRegInfo *ri;
188 uint64_t v;
189 ri = get_arm_cp_reginfo(cpu, regidx);
190 if (!ri) {
191 ok = false;
192 continue;
193 }
194 if (ri->type & ARM_CP_NO_MIGRATE) {
195 continue;
196 }
197 if (!read_raw_cp_reg(&cpu->env, ri, &v)) {
198 ok = false;
199 continue;
200 }
201 cpu->cpreg_values[i] = v;
202 }
203 return ok;
204}
205
206bool write_list_to_cpustate(ARMCPU *cpu)
207{
208 int i;
209 bool ok = true;
210
211 for (i = 0; i < cpu->cpreg_array_len; i++) {
212 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
213 uint64_t v = cpu->cpreg_values[i];
214 uint64_t readback;
215 const ARMCPRegInfo *ri;
216
217 ri = get_arm_cp_reginfo(cpu, regidx);
218 if (!ri) {
219 ok = false;
220 continue;
221 }
222 if (ri->type & ARM_CP_NO_MIGRATE) {
223 continue;
224 }
225 /* Write value and confirm it reads back as written
226 * (to catch read-only registers and partially read-only
227 * registers where the incoming migration value doesn't match)
228 */
229 if (!write_raw_cp_reg(&cpu->env, ri, v) ||
230 !read_raw_cp_reg(&cpu->env, ri, &readback) ||
231 readback != v) {
232 ok = false;
233 }
234 }
235 return ok;
236}
237
238static void add_cpreg_to_list(gpointer key, gpointer opaque)
239{
240 ARMCPU *cpu = opaque;
241 uint64_t regidx;
242 const ARMCPRegInfo *ri;
243
244 regidx = *(uint32_t *)key;
245 ri = get_arm_cp_reginfo(cpu, regidx);
246
247 if (!(ri->type & ARM_CP_NO_MIGRATE)) {
248 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
249 /* The value array need not be initialized at this point */
250 cpu->cpreg_array_len++;
251 }
252}
253
254static void count_cpreg(gpointer key, gpointer opaque)
255{
256 ARMCPU *cpu = opaque;
257 uint64_t regidx;
258 const ARMCPRegInfo *ri;
259
260 regidx = *(uint32_t *)key;
261 ri = get_arm_cp_reginfo(cpu, regidx);
262
263 if (!(ri->type & ARM_CP_NO_MIGRATE)) {
264 cpu->cpreg_array_len++;
265 }
266}
267
268static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
269{
cbf239b7
AR
270 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
271 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
721fae12 272
cbf239b7
AR
273 if (aidx > bidx) {
274 return 1;
275 }
276 if (aidx < bidx) {
277 return -1;
278 }
279 return 0;
721fae12
PM
280}
281
82a3a118
PM
282static void cpreg_make_keylist(gpointer key, gpointer value, gpointer udata)
283{
284 GList **plist = udata;
285
286 *plist = g_list_prepend(*plist, key);
287}
288
721fae12
PM
289void init_cpreg_list(ARMCPU *cpu)
290{
291 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
292 * Note that we require cpreg_tuples[] to be sorted by key ID.
293 */
82a3a118 294 GList *keys = NULL;
721fae12
PM
295 int arraylen;
296
82a3a118
PM
297 g_hash_table_foreach(cpu->cp_regs, cpreg_make_keylist, &keys);
298
721fae12
PM
299 keys = g_list_sort(keys, cpreg_key_compare);
300
301 cpu->cpreg_array_len = 0;
302
303 g_list_foreach(keys, count_cpreg, cpu);
304
305 arraylen = cpu->cpreg_array_len;
306 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
307 cpu->cpreg_values = g_new(uint64_t, arraylen);
308 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
309 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
310 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
311 cpu->cpreg_array_len = 0;
312
313 g_list_foreach(keys, add_cpreg_to_list, cpu);
314
315 assert(cpu->cpreg_array_len == arraylen);
316
317 g_list_free(keys);
318}
319
c983fe6c
PM
320static int dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
321{
322 env->cp15.c3 = value;
323 tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
324 return 0;
325}
326
08de207b
PM
327static int fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
328{
329 if (env->cp15.c13_fcse != value) {
330 /* Unlike real hardware the qemu TLB uses virtual addresses,
331 * not modified virtual addresses, so this causes a TLB flush.
332 */
333 tlb_flush(env, 1);
334 env->cp15.c13_fcse = value;
335 }
336 return 0;
337}
338static int contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
339 uint64_t value)
340{
341 if (env->cp15.c13_context != value && !arm_feature(env, ARM_FEATURE_MPU)) {
342 /* For VMSA (when not using the LPAE long descriptor page table
343 * format) this register includes the ASID, so do a TLB flush.
344 * For PMSA it is purely a process ID and no action is needed.
345 */
346 tlb_flush(env, 1);
347 }
348 env->cp15.c13_context = value;
349 return 0;
350}
351
d929823f
PM
352static int tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
353 uint64_t value)
354{
355 /* Invalidate all (TLBIALL) */
356 tlb_flush(env, 1);
357 return 0;
358}
359
360static int tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
361 uint64_t value)
362{
363 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
364 tlb_flush_page(env, value & TARGET_PAGE_MASK);
365 return 0;
366}
367
368static int tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
369 uint64_t value)
370{
371 /* Invalidate by ASID (TLBIASID) */
372 tlb_flush(env, value == 0);
373 return 0;
374}
375
376static int tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
377 uint64_t value)
378{
379 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
380 tlb_flush_page(env, value & TARGET_PAGE_MASK);
381 return 0;
382}
383
e9aa6c21
PM
384static const ARMCPRegInfo cp_reginfo[] = {
385 /* DBGDIDR: just RAZ. In particular this means the "debug architecture
386 * version" bits will read as a reserved value, which should cause
387 * Linux to not try to use the debug hardware.
388 */
389 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
390 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
c983fe6c
PM
391 /* MMU Domain access control / MPU write buffer control */
392 { .name = "DACR", .cp = 15,
393 .crn = 3, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
394 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
d4e6df63 395 .resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
08de207b
PM
396 { .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0,
397 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
d4e6df63 398 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
08de207b
PM
399 { .name = "CONTEXTIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 1,
400 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
d4e6df63 401 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
4fdd17dd
PM
402 /* ??? This covers not just the impdef TLB lockdown registers but also
403 * some v7VMSA registers relating to TEX remap, so it is overly broad.
404 */
405 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = CP_ANY,
406 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
d929823f
PM
407 /* MMU TLB control. Note that the wildcarding means we cover not just
408 * the unified TLB ops but also the dside/iside/inner-shareable variants.
409 */
410 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
d4e6df63
PM
411 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
412 .type = ARM_CP_NO_MIGRATE },
d929823f 413 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
d4e6df63
PM
414 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
415 .type = ARM_CP_NO_MIGRATE },
d929823f 416 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
d4e6df63
PM
417 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
418 .type = ARM_CP_NO_MIGRATE },
d929823f 419 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
d4e6df63
PM
420 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
421 .type = ARM_CP_NO_MIGRATE },
c4804214
PM
422 /* Cache maintenance ops; some of this space may be overridden later. */
423 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
424 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
425 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
e9aa6c21
PM
426 REGINFO_SENTINEL
427};
428
7d57f408
PM
429static const ARMCPRegInfo not_v6_cp_reginfo[] = {
430 /* Not all pre-v6 cores implemented this WFI, so this is slightly
431 * over-broad.
432 */
433 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
434 .access = PL1_W, .type = ARM_CP_WFI },
435 REGINFO_SENTINEL
436};
437
438static const ARMCPRegInfo not_v7_cp_reginfo[] = {
439 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
440 * is UNPREDICTABLE; we choose to NOP as most implementations do).
441 */
442 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
443 .access = PL1_W, .type = ARM_CP_WFI },
34f90529
PM
444 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
445 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
446 * OMAPCP will override this space.
447 */
448 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
449 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
450 .resetvalue = 0 },
451 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
452 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
453 .resetvalue = 0 },
776d4e5c
PM
454 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
455 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
d4e6df63
PM
456 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
457 .resetvalue = 0 },
7d57f408
PM
458 REGINFO_SENTINEL
459};
460
2771db27
PM
461static int cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
462{
463 if (env->cp15.c1_coproc != value) {
464 env->cp15.c1_coproc = value;
465 /* ??? Is this safe when called from within a TB? */
466 tb_flush(env);
467 }
468 return 0;
469}
470
7d57f408
PM
471static const ARMCPRegInfo v6_cp_reginfo[] = {
472 /* prefetch by MVA in v6, NOP in v7 */
473 { .name = "MVA_prefetch",
474 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
475 .access = PL1_W, .type = ARM_CP_NOP },
476 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
477 .access = PL0_W, .type = ARM_CP_NOP },
091fd17c 478 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
7d57f408 479 .access = PL0_W, .type = ARM_CP_NOP },
091fd17c 480 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
7d57f408 481 .access = PL0_W, .type = ARM_CP_NOP },
06d76f31
PM
482 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
483 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c6_insn),
484 .resetvalue = 0, },
485 /* Watchpoint Fault Address Register : should actually only be present
486 * for 1136, 1176, 11MPCore.
487 */
488 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
489 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
2771db27
PM
490 { .name = "CPACR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2,
491 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_coproc),
492 .resetvalue = 0, .writefn = cpacr_write },
7d57f408
PM
493 REGINFO_SENTINEL
494};
495
d4e6df63 496
200ac0ef
PM
497static int pmreg_read(CPUARMState *env, const ARMCPRegInfo *ri,
498 uint64_t *value)
499{
500 /* Generic performance monitor register read function for where
501 * user access may be allowed by PMUSERENR.
502 */
503 if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
504 return EXCP_UDEF;
505 }
506 *value = CPREG_FIELD32(env, ri);
507 return 0;
508}
509
510static int pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
511 uint64_t value)
512{
513 if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
514 return EXCP_UDEF;
515 }
516 /* only the DP, X, D and E bits are writable */
517 env->cp15.c9_pmcr &= ~0x39;
518 env->cp15.c9_pmcr |= (value & 0x39);
519 return 0;
520}
521
522static int pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
523 uint64_t value)
524{
525 if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
526 return EXCP_UDEF;
527 }
528 value &= (1 << 31);
529 env->cp15.c9_pmcnten |= value;
530 return 0;
531}
532
533static int pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
534 uint64_t value)
535{
536 if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
537 return EXCP_UDEF;
538 }
539 value &= (1 << 31);
540 env->cp15.c9_pmcnten &= ~value;
541 return 0;
542}
543
544static int pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
545 uint64_t value)
546{
547 if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
548 return EXCP_UDEF;
549 }
550 env->cp15.c9_pmovsr &= ~value;
551 return 0;
552}
553
554static int pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
555 uint64_t value)
556{
557 if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
558 return EXCP_UDEF;
559 }
560 env->cp15.c9_pmxevtyper = value & 0xff;
561 return 0;
562}
563
564static int pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
565 uint64_t value)
566{
567 env->cp15.c9_pmuserenr = value & 1;
568 return 0;
569}
570
571static int pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
572 uint64_t value)
573{
574 /* We have no event counters so only the C bit can be changed */
575 value &= (1 << 31);
576 env->cp15.c9_pminten |= value;
577 return 0;
578}
579
580static int pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
581 uint64_t value)
582{
583 value &= (1 << 31);
584 env->cp15.c9_pminten &= ~value;
585 return 0;
586}
587
8641136c
NR
588static int vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
589 uint64_t value)
590{
591 env->cp15.c12_vbar = value & ~0x1Ful;
592 return 0;
593}
594
776d4e5c
PM
595static int ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri,
596 uint64_t *value)
597{
598 ARMCPU *cpu = arm_env_get_cpu(env);
599 *value = cpu->ccsidr[env->cp15.c0_cssel];
600 return 0;
601}
602
603static int csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
604 uint64_t value)
605{
606 env->cp15.c0_cssel = value & 0xf;
607 return 0;
608}
609
e9aa6c21
PM
610static const ARMCPRegInfo v7_cp_reginfo[] = {
611 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
612 * debug components
613 */
614 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
615 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
091fd17c 616 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
e9aa6c21 617 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7d57f408
PM
618 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
619 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
620 .access = PL1_W, .type = ARM_CP_NOP },
200ac0ef
PM
621 /* Performance monitors are implementation defined in v7,
622 * but with an ARM recommended set of registers, which we
623 * follow (although we don't actually implement any counters)
624 *
625 * Performance registers fall into three categories:
626 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
627 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
628 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
629 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
630 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
631 */
632 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
633 .access = PL0_RW, .resetvalue = 0,
634 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
d4e6df63
PM
635 .readfn = pmreg_read, .writefn = pmcntenset_write,
636 .raw_readfn = raw_read, .raw_writefn = raw_write },
200ac0ef
PM
637 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
638 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
d4e6df63
PM
639 .readfn = pmreg_read, .writefn = pmcntenclr_write,
640 .type = ARM_CP_NO_MIGRATE },
200ac0ef
PM
641 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
642 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
d4e6df63
PM
643 .readfn = pmreg_read, .writefn = pmovsr_write,
644 .raw_readfn = raw_read, .raw_writefn = raw_write },
200ac0ef
PM
645 /* Unimplemented so WI. Strictly speaking write accesses in PL0 should
646 * respect PMUSERENR.
647 */
648 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
649 .access = PL0_W, .type = ARM_CP_NOP },
650 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
651 * We choose to RAZ/WI. XXX should respect PMUSERENR.
652 */
653 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
654 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
655 /* Unimplemented, RAZ/WI. XXX PMUSERENR */
656 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
657 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
658 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
659 .access = PL0_RW,
660 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
d4e6df63
PM
661 .readfn = pmreg_read, .writefn = pmxevtyper_write,
662 .raw_readfn = raw_read, .raw_writefn = raw_write },
200ac0ef
PM
663 /* Unimplemented, RAZ/WI. XXX PMUSERENR */
664 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
665 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
666 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
667 .access = PL0_R | PL1_RW,
668 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
669 .resetvalue = 0,
d4e6df63 670 .writefn = pmuserenr_write, .raw_writefn = raw_write },
200ac0ef
PM
671 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
672 .access = PL1_RW,
673 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
674 .resetvalue = 0,
d4e6df63 675 .writefn = pmintenset_write, .raw_writefn = raw_write },
200ac0ef 676 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
d4e6df63 677 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
200ac0ef 678 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
d4e6df63 679 .resetvalue = 0, .writefn = pmintenclr_write, },
8641136c
NR
680 { .name = "VBAR", .cp = 15, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
681 .access = PL1_RW, .writefn = vbar_write,
682 .fieldoffset = offsetof(CPUARMState, cp15.c12_vbar),
683 .resetvalue = 0 },
2771db27
PM
684 { .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
685 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr),
686 .resetvalue = 0, },
776d4e5c 687 { .name = "CCSIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
d4e6df63 688 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_MIGRATE },
776d4e5c
PM
689 { .name = "CSSELR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
690 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c0_cssel),
691 .writefn = csselr_write, .resetvalue = 0 },
692 /* Auxiliary ID register: this actually has an IMPDEF value but for now
693 * just RAZ for all cores:
694 */
695 { .name = "AIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 7,
696 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
e9aa6c21
PM
697 REGINFO_SENTINEL
698};
699
c326b979
PM
700static int teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
701{
702 value &= 1;
703 env->teecr = value;
704 return 0;
705}
706
707static int teehbr_read(CPUARMState *env, const ARMCPRegInfo *ri,
708 uint64_t *value)
709{
710 /* This is a helper function because the user access rights
711 * depend on the value of the TEECR.
712 */
713 if (arm_current_pl(env) == 0 && (env->teecr & 1)) {
714 return EXCP_UDEF;
715 }
716 *value = env->teehbr;
717 return 0;
718}
719
720static int teehbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
721 uint64_t value)
722{
723 if (arm_current_pl(env) == 0 && (env->teecr & 1)) {
724 return EXCP_UDEF;
725 }
726 env->teehbr = value;
727 return 0;
728}
729
730static const ARMCPRegInfo t2ee_cp_reginfo[] = {
731 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
732 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
733 .resetvalue = 0,
734 .writefn = teecr_write },
735 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
736 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
d4e6df63 737 .resetvalue = 0, .raw_readfn = raw_read, .raw_writefn = raw_write,
c326b979
PM
738 .readfn = teehbr_read, .writefn = teehbr_write },
739 REGINFO_SENTINEL
740};
741
4d31c596
PM
742static const ARMCPRegInfo v6k_cp_reginfo[] = {
743 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
744 .access = PL0_RW,
745 .fieldoffset = offsetof(CPUARMState, cp15.c13_tls1),
746 .resetvalue = 0 },
747 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
748 .access = PL0_R|PL1_W,
749 .fieldoffset = offsetof(CPUARMState, cp15.c13_tls2),
750 .resetvalue = 0 },
751 { .name = "TPIDRPRW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 4,
752 .access = PL1_RW,
753 .fieldoffset = offsetof(CPUARMState, cp15.c13_tls3),
754 .resetvalue = 0 },
755 REGINFO_SENTINEL
756};
757
55d284af
PM
758#ifndef CONFIG_USER_ONLY
759
760static uint64_t gt_get_countervalue(CPUARMState *env)
761{
bc72ad67 762 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
55d284af
PM
763}
764
765static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
766{
767 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
768
769 if (gt->ctl & 1) {
770 /* Timer enabled: calculate and set current ISTATUS, irq, and
771 * reset timer to when ISTATUS next has to change
772 */
773 uint64_t count = gt_get_countervalue(&cpu->env);
774 /* Note that this must be unsigned 64 bit arithmetic: */
775 int istatus = count >= gt->cval;
776 uint64_t nexttick;
777
778 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
779 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
780 (istatus && !(gt->ctl & 2)));
781 if (istatus) {
782 /* Next transition is when count rolls back over to zero */
783 nexttick = UINT64_MAX;
784 } else {
785 /* Next transition is when we hit cval */
786 nexttick = gt->cval;
787 }
788 /* Note that the desired next expiry time might be beyond the
789 * signed-64-bit range of a QEMUTimer -- in this case we just
790 * set the timer for as far in the future as possible. When the
791 * timer expires we will reset the timer for any remaining period.
792 */
793 if (nexttick > INT64_MAX / GTIMER_SCALE) {
794 nexttick = INT64_MAX / GTIMER_SCALE;
795 }
bc72ad67 796 timer_mod(cpu->gt_timer[timeridx], nexttick);
55d284af
PM
797 } else {
798 /* Timer disabled: ISTATUS and timer output always clear */
799 gt->ctl &= ~4;
800 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
bc72ad67 801 timer_del(cpu->gt_timer[timeridx]);
55d284af
PM
802 }
803}
804
805static int gt_cntfrq_read(CPUARMState *env, const ARMCPRegInfo *ri,
806 uint64_t *value)
807{
808 /* Not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
809 if (arm_current_pl(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
810 return EXCP_UDEF;
811 }
812 *value = env->cp15.c14_cntfrq;
813 return 0;
814}
815
816static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri)
817{
818 ARMCPU *cpu = arm_env_get_cpu(env);
819 int timeridx = ri->opc1 & 1;
820
bc72ad67 821 timer_del(cpu->gt_timer[timeridx]);
55d284af
PM
822}
823
824static int gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri,
825 uint64_t *value)
826{
827 int timeridx = ri->opc1 & 1;
828
829 if (arm_current_pl(env) == 0 &&
830 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
831 return EXCP_UDEF;
832 }
833 *value = gt_get_countervalue(env);
834 return 0;
835}
836
837static int gt_cval_read(CPUARMState *env, const ARMCPRegInfo *ri,
838 uint64_t *value)
839{
840 int timeridx = ri->opc1 & 1;
841
842 if (arm_current_pl(env) == 0 &&
843 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
844 return EXCP_UDEF;
845 }
846 *value = env->cp15.c14_timer[timeridx].cval;
847 return 0;
848}
849
850static int gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
851 uint64_t value)
852{
853 int timeridx = ri->opc1 & 1;
854
855 env->cp15.c14_timer[timeridx].cval = value;
856 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
857 return 0;
858}
859static int gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
860 uint64_t *value)
861{
862 int timeridx = ri->crm & 1;
863
864 if (arm_current_pl(env) == 0 &&
865 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
866 return EXCP_UDEF;
867 }
868 *value = (uint32_t)(env->cp15.c14_timer[timeridx].cval -
869 gt_get_countervalue(env));
870 return 0;
871}
872
873static int gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
874 uint64_t value)
875{
876 int timeridx = ri->crm & 1;
877
878 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) +
879 + sextract64(value, 0, 32);
880 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
881 return 0;
882}
883
884static int gt_ctl_read(CPUARMState *env, const ARMCPRegInfo *ri,
885 uint64_t *value)
886{
887 int timeridx = ri->crm & 1;
888
889 if (arm_current_pl(env) == 0 &&
890 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
891 return EXCP_UDEF;
892 }
893 *value = env->cp15.c14_timer[timeridx].ctl;
894 return 0;
895}
896
897static int gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
898 uint64_t value)
899{
900 ARMCPU *cpu = arm_env_get_cpu(env);
901 int timeridx = ri->crm & 1;
902 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
903
904 env->cp15.c14_timer[timeridx].ctl = value & 3;
905 if ((oldval ^ value) & 1) {
906 /* Enable toggled */
907 gt_recalc_timer(cpu, timeridx);
908 } else if ((oldval & value) & 2) {
909 /* IMASK toggled: don't need to recalculate,
910 * just set the interrupt line based on ISTATUS
911 */
912 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
913 (oldval & 4) && (value & 2));
914 }
915 return 0;
916}
917
918void arm_gt_ptimer_cb(void *opaque)
919{
920 ARMCPU *cpu = opaque;
921
922 gt_recalc_timer(cpu, GTIMER_PHYS);
923}
924
925void arm_gt_vtimer_cb(void *opaque)
926{
927 ARMCPU *cpu = opaque;
928
929 gt_recalc_timer(cpu, GTIMER_VIRT);
930}
931
932static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
933 /* Note that CNTFRQ is purely reads-as-written for the benefit
934 * of software; writing it doesn't actually change the timer frequency.
935 * Our reset value matches the fixed frequency we implement the timer at.
936 */
937 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
938 .access = PL1_RW | PL0_R,
939 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
940 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
941 .readfn = gt_cntfrq_read, .raw_readfn = raw_read,
942 },
943 /* overall control: mostly access permissions */
944 { .name = "CNTKCTL", .cp = 15, .crn = 14, .crm = 1, .opc1 = 0, .opc2 = 0,
945 .access = PL1_RW,
946 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
947 .resetvalue = 0,
948 },
949 /* per-timer control */
950 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
951 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
952 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
953 .resetvalue = 0,
954 .readfn = gt_ctl_read, .writefn = gt_ctl_write,
955 .raw_readfn = raw_read, .raw_writefn = raw_write,
956 },
957 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
958 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
959 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
960 .resetvalue = 0,
961 .readfn = gt_ctl_read, .writefn = gt_ctl_write,
962 .raw_readfn = raw_read, .raw_writefn = raw_write,
963 },
964 /* TimerValue views: a 32 bit downcounting view of the underlying state */
965 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
966 .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
967 .readfn = gt_tval_read, .writefn = gt_tval_write,
968 },
969 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
970 .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
971 .readfn = gt_tval_read, .writefn = gt_tval_write,
972 },
973 /* The counter itself */
974 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
975 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
976 .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
977 },
978 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
979 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
980 .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
981 },
982 /* Comparison value, indicating when the timer goes off */
983 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
984 .access = PL1_RW | PL0_R,
985 .type = ARM_CP_64BIT | ARM_CP_IO,
986 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
987 .resetvalue = 0,
988 .readfn = gt_cval_read, .writefn = gt_cval_write,
989 .raw_readfn = raw_read, .raw_writefn = raw_write,
990 },
991 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
992 .access = PL1_RW | PL0_R,
993 .type = ARM_CP_64BIT | ARM_CP_IO,
994 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
995 .resetvalue = 0,
996 .readfn = gt_cval_read, .writefn = gt_cval_write,
997 .raw_readfn = raw_read, .raw_writefn = raw_write,
998 },
999 REGINFO_SENTINEL
1000};
1001
1002#else
1003/* In user-mode none of the generic timer registers are accessible,
bc72ad67 1004 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
55d284af
PM
1005 * so instead just don't register any of them.
1006 */
6cc7a3ae 1007static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
6cc7a3ae
PM
1008 REGINFO_SENTINEL
1009};
1010
55d284af
PM
1011#endif
1012
4a501606
PM
1013static int par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1014{
891a2fe7
PM
1015 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1016 env->cp15.c7_par = value;
1017 } else if (arm_feature(env, ARM_FEATURE_V7)) {
4a501606
PM
1018 env->cp15.c7_par = value & 0xfffff6ff;
1019 } else {
1020 env->cp15.c7_par = value & 0xfffff1ff;
1021 }
1022 return 0;
1023}
1024
1025#ifndef CONFIG_USER_ONLY
1026/* get_phys_addr() isn't present for user-mode-only targets */
702a9357
PM
1027
1028/* Return true if extended addresses are enabled, ie this is an
1029 * LPAE implementation and we are using the long-descriptor translation
1030 * table format because the TTBCR EAE bit is set.
1031 */
1032static inline bool extended_addresses_enabled(CPUARMState *env)
1033{
1034 return arm_feature(env, ARM_FEATURE_LPAE)
78dbbbe4 1035 && (env->cp15.c2_control & (1U << 31));
702a9357
PM
1036}
1037
4a501606
PM
1038static int ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1039{
a8170e5e 1040 hwaddr phys_addr;
4a501606
PM
1041 target_ulong page_size;
1042 int prot;
1043 int ret, is_user = ri->opc2 & 2;
1044 int access_type = ri->opc2 & 1;
1045
1046 if (ri->opc2 & 4) {
1047 /* Other states are only available with TrustZone */
1048 return EXCP_UDEF;
1049 }
1050 ret = get_phys_addr(env, value, access_type, is_user,
1051 &phys_addr, &prot, &page_size);
702a9357
PM
1052 if (extended_addresses_enabled(env)) {
1053 /* ret is a DFSR/IFSR value for the long descriptor
1054 * translation table format, but with WnR always clear.
1055 * Convert it to a 64-bit PAR.
1056 */
1057 uint64_t par64 = (1 << 11); /* LPAE bit always set */
1058 if (ret == 0) {
1059 par64 |= phys_addr & ~0xfffULL;
1060 /* We don't set the ATTR or SH fields in the PAR. */
4a501606 1061 } else {
702a9357
PM
1062 par64 |= 1; /* F */
1063 par64 |= (ret & 0x3f) << 1; /* FS */
1064 /* Note that S2WLK and FSTAGE are always zero, because we don't
1065 * implement virtualization and therefore there can't be a stage 2
1066 * fault.
1067 */
4a501606 1068 }
702a9357
PM
1069 env->cp15.c7_par = par64;
1070 env->cp15.c7_par_hi = par64 >> 32;
4a501606 1071 } else {
702a9357
PM
1072 /* ret is a DFSR/IFSR value for the short descriptor
1073 * translation table format (with WnR always clear).
1074 * Convert it to a 32-bit PAR.
1075 */
1076 if (ret == 0) {
1077 /* We do not set any attribute bits in the PAR */
1078 if (page_size == (1 << 24)
1079 && arm_feature(env, ARM_FEATURE_V7)) {
1080 env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1081 } else {
1082 env->cp15.c7_par = phys_addr & 0xfffff000;
1083 }
1084 } else {
1085 env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1086 ((ret & (12 << 1)) >> 6) |
1087 ((ret & 0xf) << 1) | 1;
1088 }
1089 env->cp15.c7_par_hi = 0;
4a501606
PM
1090 }
1091 return 0;
1092}
1093#endif
1094
1095static const ARMCPRegInfo vapa_cp_reginfo[] = {
1096 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
1097 .access = PL1_RW, .resetvalue = 0,
1098 .fieldoffset = offsetof(CPUARMState, cp15.c7_par),
1099 .writefn = par_write },
1100#ifndef CONFIG_USER_ONLY
1101 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
d4e6df63 1102 .access = PL1_W, .writefn = ats_write, .type = ARM_CP_NO_MIGRATE },
4a501606
PM
1103#endif
1104 REGINFO_SENTINEL
1105};
1106
18032bec
PM
1107/* Return basic MPU access permission bits. */
1108static uint32_t simple_mpu_ap_bits(uint32_t val)
1109{
1110 uint32_t ret;
1111 uint32_t mask;
1112 int i;
1113 ret = 0;
1114 mask = 3;
1115 for (i = 0; i < 16; i += 2) {
1116 ret |= (val >> i) & mask;
1117 mask <<= 2;
1118 }
1119 return ret;
1120}
1121
1122/* Pad basic MPU access permission bits to extended format. */
1123static uint32_t extended_mpu_ap_bits(uint32_t val)
1124{
1125 uint32_t ret;
1126 uint32_t mask;
1127 int i;
1128 ret = 0;
1129 mask = 3;
1130 for (i = 0; i < 16; i += 2) {
1131 ret |= (val & mask) << i;
1132 mask <<= 2;
1133 }
1134 return ret;
1135}
1136
1137static int pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1138 uint64_t value)
1139{
1140 env->cp15.c5_data = extended_mpu_ap_bits(value);
1141 return 0;
1142}
1143
1144static int pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri,
1145 uint64_t *value)
1146{
1147 *value = simple_mpu_ap_bits(env->cp15.c5_data);
1148 return 0;
1149}
1150
1151static int pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1152 uint64_t value)
1153{
1154 env->cp15.c5_insn = extended_mpu_ap_bits(value);
1155 return 0;
1156}
1157
1158static int pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri,
1159 uint64_t *value)
1160{
1161 *value = simple_mpu_ap_bits(env->cp15.c5_insn);
1162 return 0;
1163}
1164
06d76f31
PM
1165static int arm946_prbs_read(CPUARMState *env, const ARMCPRegInfo *ri,
1166 uint64_t *value)
1167{
599d64f6 1168 if (ri->crm >= 8) {
06d76f31
PM
1169 return EXCP_UDEF;
1170 }
1171 *value = env->cp15.c6_region[ri->crm];
1172 return 0;
1173}
1174
1175static int arm946_prbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
1176 uint64_t value)
1177{
599d64f6 1178 if (ri->crm >= 8) {
06d76f31
PM
1179 return EXCP_UDEF;
1180 }
1181 env->cp15.c6_region[ri->crm] = value;
1182 return 0;
1183}
1184
18032bec
PM
1185static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
1186 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
d4e6df63 1187 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
18032bec
PM
1188 .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0,
1189 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
1190 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
d4e6df63 1191 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
18032bec
PM
1192 .fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0,
1193 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
1194 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
1195 .access = PL1_RW,
1196 .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, },
1197 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
1198 .access = PL1_RW,
1199 .fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0, },
ecce5c3c
PM
1200 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
1201 .access = PL1_RW,
1202 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
1203 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
1204 .access = PL1_RW,
1205 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
06d76f31
PM
1206 /* Protection region base and size registers */
1207 { .name = "946_PRBS", .cp = 15, .crn = 6, .crm = CP_ANY, .opc1 = 0,
1208 .opc2 = CP_ANY, .access = PL1_RW,
1209 .readfn = arm946_prbs_read, .writefn = arm946_prbs_write, },
18032bec
PM
1210 REGINFO_SENTINEL
1211};
1212
d4e6df63
PM
1213static int vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
1214 uint64_t value)
ecce5c3c 1215{
2ebcebe2
PM
1216 int maskshift = extract32(value, 0, 3);
1217
74f1c6dd 1218 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & (1 << 31))) {
e42c4db3 1219 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
e42c4db3
PM
1220 } else {
1221 value &= 7;
1222 }
1223 /* Note that we always calculate c2_mask and c2_base_mask, but
1224 * they are only used for short-descriptor tables (ie if EAE is 0);
1225 * for long-descriptor tables the TTBCR fields are used differently
1226 * and the c2_mask and c2_base_mask values are meaningless.
1227 */
ecce5c3c 1228 env->cp15.c2_control = value;
2ebcebe2
PM
1229 env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> maskshift);
1230 env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> maskshift);
ecce5c3c
PM
1231 return 0;
1232}
1233
d4e6df63
PM
1234static int vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1235 uint64_t value)
1236{
1237 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1238 /* With LPAE the TTBCR could result in a change of ASID
1239 * via the TTBCR.A1 bit, so do a TLB flush.
1240 */
1241 tlb_flush(env, 1);
1242 }
1243 return vmsa_ttbcr_raw_write(env, ri, value);
1244}
1245
ecce5c3c
PM
1246static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1247{
1248 env->cp15.c2_base_mask = 0xffffc000u;
1249 env->cp15.c2_control = 0;
1250 env->cp15.c2_mask = 0;
1251}
1252
18032bec
PM
1253static const ARMCPRegInfo vmsa_cp_reginfo[] = {
1254 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
1255 .access = PL1_RW,
1256 .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, },
1257 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
1258 .access = PL1_RW,
1259 .fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0, },
ecce5c3c
PM
1260 { .name = "TTBR0", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
1261 .access = PL1_RW,
1262 .fieldoffset = offsetof(CPUARMState, cp15.c2_base0), .resetvalue = 0, },
1263 { .name = "TTBR1", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
1264 .access = PL1_RW,
81a60ada 1265 .fieldoffset = offsetof(CPUARMState, cp15.c2_base1), .resetvalue = 0, },
ecce5c3c
PM
1266 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
1267 .access = PL1_RW, .writefn = vmsa_ttbcr_write,
d4e6df63 1268 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
ecce5c3c 1269 .fieldoffset = offsetof(CPUARMState, cp15.c2_control) },
06d76f31
PM
1270 { .name = "DFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
1271 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c6_data),
1272 .resetvalue = 0, },
18032bec
PM
1273 REGINFO_SENTINEL
1274};
1275
1047b9d7
PM
1276static int omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
1277 uint64_t value)
1278{
1279 env->cp15.c15_ticonfig = value & 0xe7;
1280 /* The OS_TYPE bit in this register changes the reported CPUID! */
1281 env->cp15.c0_cpuid = (value & (1 << 5)) ?
1282 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1283 return 0;
1284}
1285
1286static int omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
1287 uint64_t value)
1288{
1289 env->cp15.c15_threadid = value & 0xffff;
1290 return 0;
1291}
1292
1293static int omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
1294 uint64_t value)
1295{
1296 /* Wait-for-interrupt (deprecated) */
c3affe56 1297 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
1047b9d7
PM
1298 return 0;
1299}
1300
c4804214
PM
1301static int omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
1302 uint64_t value)
1303{
1304 /* On OMAP there are registers indicating the max/min index of dcache lines
1305 * containing a dirty line; cache flush operations have to reset these.
1306 */
1307 env->cp15.c15_i_max = 0x000;
1308 env->cp15.c15_i_min = 0xff0;
1309 return 0;
1310}
1311
18032bec
PM
1312static const ARMCPRegInfo omap_cp_reginfo[] = {
1313 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
1314 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
1315 .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, },
1047b9d7
PM
1316 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
1317 .access = PL1_RW, .type = ARM_CP_NOP },
1318 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
1319 .access = PL1_RW,
1320 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
1321 .writefn = omap_ticonfig_write },
1322 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
1323 .access = PL1_RW,
1324 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
1325 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
1326 .access = PL1_RW, .resetvalue = 0xff0,
1327 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
1328 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
1329 .access = PL1_RW,
1330 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
1331 .writefn = omap_threadid_write },
1332 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
1333 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
d4e6df63 1334 .type = ARM_CP_NO_MIGRATE,
1047b9d7
PM
1335 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
1336 /* TODO: Peripheral port remap register:
1337 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1338 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1339 * when MMU is off.
1340 */
c4804214 1341 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
d4e6df63
PM
1342 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
1343 .type = ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE,
c4804214 1344 .writefn = omap_cachemaint_write },
34f90529
PM
1345 { .name = "C9", .cp = 15, .crn = 9,
1346 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
1347 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
1047b9d7
PM
1348 REGINFO_SENTINEL
1349};
1350
1351static int xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1352 uint64_t value)
1353{
1354 value &= 0x3fff;
1355 if (env->cp15.c15_cpar != value) {
1356 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1357 tb_flush(env);
1358 env->cp15.c15_cpar = value;
1359 }
1360 return 0;
1361}
1362
1363static const ARMCPRegInfo xscale_cp_reginfo[] = {
1364 { .name = "XSCALE_CPAR",
1365 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
1366 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
1367 .writefn = xscale_cpar_write, },
2771db27
PM
1368 { .name = "XSCALE_AUXCR",
1369 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
1370 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
1371 .resetvalue = 0, },
1047b9d7
PM
1372 REGINFO_SENTINEL
1373};
1374
1375static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
1376 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1377 * implementation of this implementation-defined space.
1378 * Ideally this should eventually disappear in favour of actually
1379 * implementing the correct behaviour for all cores.
1380 */
1381 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
1382 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3671cd87
PC
1383 .access = PL1_RW,
1384 .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE | ARM_CP_OVERRIDE,
d4e6df63 1385 .resetvalue = 0 },
18032bec
PM
1386 REGINFO_SENTINEL
1387};
1388
c4804214
PM
1389static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
1390 /* Cache status: RAZ because we have no cache so it's always clean */
1391 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
d4e6df63
PM
1392 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1393 .resetvalue = 0 },
c4804214
PM
1394 REGINFO_SENTINEL
1395};
1396
1397static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
1398 /* We never have a a block transfer operation in progress */
1399 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
d4e6df63
PM
1400 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1401 .resetvalue = 0 },
30b05bba
PM
1402 /* The cache ops themselves: these all NOP for QEMU */
1403 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
1404 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1405 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
1406 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1407 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
1408 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1409 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
1410 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1411 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
1412 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1413 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
1414 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
c4804214
PM
1415 REGINFO_SENTINEL
1416};
1417
1418static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
1419 /* The cache test-and-clean instructions always return (1 << 30)
1420 * to indicate that there are no dirty cache lines.
1421 */
1422 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
d4e6df63
PM
1423 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1424 .resetvalue = (1 << 30) },
c4804214 1425 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
d4e6df63
PM
1426 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1427 .resetvalue = (1 << 30) },
c4804214
PM
1428 REGINFO_SENTINEL
1429};
1430
34f90529
PM
1431static const ARMCPRegInfo strongarm_cp_reginfo[] = {
1432 /* Ignore ReadBuffer accesses */
1433 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
1434 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
d4e6df63
PM
1435 .access = PL1_RW, .resetvalue = 0,
1436 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE },
34f90529
PM
1437 REGINFO_SENTINEL
1438};
1439
81bdde9d
PM
1440static int mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1441 uint64_t *value)
1442{
55e5c285
AF
1443 CPUState *cs = CPU(arm_env_get_cpu(env));
1444 uint32_t mpidr = cs->cpu_index;
81bdde9d
PM
1445 /* We don't support setting cluster ID ([8..11])
1446 * so these bits always RAZ.
1447 */
1448 if (arm_feature(env, ARM_FEATURE_V7MP)) {
78dbbbe4 1449 mpidr |= (1U << 31);
81bdde9d
PM
1450 /* Cores which are uniprocessor (non-coherent)
1451 * but still implement the MP extensions set
1452 * bit 30. (For instance, A9UP.) However we do
1453 * not currently model any of those cores.
1454 */
1455 }
1456 *value = mpidr;
1457 return 0;
1458}
1459
1460static const ARMCPRegInfo mpidr_cp_reginfo[] = {
1461 { .name = "MPIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
d4e6df63 1462 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_MIGRATE },
81bdde9d
PM
1463 REGINFO_SENTINEL
1464};
1465
891a2fe7
PM
1466static int par64_read(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value)
1467{
1468 *value = ((uint64_t)env->cp15.c7_par_hi << 32) | env->cp15.c7_par;
1469 return 0;
1470}
1471
1472static int par64_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1473{
1474 env->cp15.c7_par_hi = value >> 32;
1475 env->cp15.c7_par = value;
1476 return 0;
1477}
1478
1479static void par64_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1480{
1481 env->cp15.c7_par_hi = 0;
1482 env->cp15.c7_par = 0;
1483}
1484
1485static int ttbr064_read(CPUARMState *env, const ARMCPRegInfo *ri,
1486 uint64_t *value)
1487{
1488 *value = ((uint64_t)env->cp15.c2_base0_hi << 32) | env->cp15.c2_base0;
1489 return 0;
1490}
1491
d4e6df63
PM
1492static int ttbr064_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
1493 uint64_t value)
891a2fe7
PM
1494{
1495 env->cp15.c2_base0_hi = value >> 32;
1496 env->cp15.c2_base0 = value;
d4e6df63
PM
1497 return 0;
1498}
1499
1500static int ttbr064_write(CPUARMState *env, const ARMCPRegInfo *ri,
1501 uint64_t value)
1502{
891a2fe7
PM
1503 /* Writes to the 64 bit format TTBRs may change the ASID */
1504 tlb_flush(env, 1);
d4e6df63 1505 return ttbr064_raw_write(env, ri, value);
891a2fe7
PM
1506}
1507
1508static void ttbr064_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1509{
1510 env->cp15.c2_base0_hi = 0;
1511 env->cp15.c2_base0 = 0;
1512}
1513
1514static int ttbr164_read(CPUARMState *env, const ARMCPRegInfo *ri,
1515 uint64_t *value)
1516{
1517 *value = ((uint64_t)env->cp15.c2_base1_hi << 32) | env->cp15.c2_base1;
1518 return 0;
1519}
1520
1521static int ttbr164_write(CPUARMState *env, const ARMCPRegInfo *ri,
1522 uint64_t value)
1523{
1524 env->cp15.c2_base1_hi = value >> 32;
1525 env->cp15.c2_base1 = value;
1526 return 0;
1527}
1528
1529static void ttbr164_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1530{
1531 env->cp15.c2_base1_hi = 0;
1532 env->cp15.c2_base1 = 0;
1533}
1534
7ac681cf 1535static const ARMCPRegInfo lpae_cp_reginfo[] = {
b90372ad 1536 /* NOP AMAIR0/1: the override is because these clash with the rather
7ac681cf
PM
1537 * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1538 */
1539 { .name = "AMAIR0", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
1540 .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
1541 .resetvalue = 0 },
1542 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
1543 .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
1544 .resetvalue = 0 },
f9fc619a
PM
1545 /* 64 bit access versions of the (dummy) debug registers */
1546 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
1547 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
1548 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
1549 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
891a2fe7
PM
1550 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
1551 .access = PL1_RW, .type = ARM_CP_64BIT,
1552 .readfn = par64_read, .writefn = par64_write, .resetfn = par64_reset },
1553 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
1554 .access = PL1_RW, .type = ARM_CP_64BIT, .readfn = ttbr064_read,
d4e6df63
PM
1555 .writefn = ttbr064_write, .raw_writefn = ttbr064_raw_write,
1556 .resetfn = ttbr064_reset },
891a2fe7
PM
1557 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
1558 .access = PL1_RW, .type = ARM_CP_64BIT, .readfn = ttbr164_read,
1559 .writefn = ttbr164_write, .resetfn = ttbr164_reset },
7ac681cf
PM
1560 REGINFO_SENTINEL
1561};
1562
2771db27
PM
1563static int sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1564{
1565 env->cp15.c1_sys = value;
1566 /* ??? Lots of these bits are not implemented. */
1567 /* This may enable/disable the MMU, so do a TLB flush. */
1568 tlb_flush(env, 1);
1569 return 0;
1570}
1571
2ceb98c0
PM
1572void register_cp_regs_for_features(ARMCPU *cpu)
1573{
1574 /* Register all the coprocessor registers based on feature bits */
1575 CPUARMState *env = &cpu->env;
1576 if (arm_feature(env, ARM_FEATURE_M)) {
1577 /* M profile has no coprocessor registers */
1578 return;
1579 }
1580
e9aa6c21 1581 define_arm_cp_regs(cpu, cp_reginfo);
7d57f408 1582 if (arm_feature(env, ARM_FEATURE_V6)) {
8515a092
PM
1583 /* The ID registers all have impdef reset values */
1584 ARMCPRegInfo v6_idregs[] = {
1585 { .name = "ID_PFR0", .cp = 15, .crn = 0, .crm = 1,
1586 .opc1 = 0, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST,
1587 .resetvalue = cpu->id_pfr0 },
1588 { .name = "ID_PFR1", .cp = 15, .crn = 0, .crm = 1,
1589 .opc1 = 0, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST,
1590 .resetvalue = cpu->id_pfr1 },
1591 { .name = "ID_DFR0", .cp = 15, .crn = 0, .crm = 1,
1592 .opc1 = 0, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST,
1593 .resetvalue = cpu->id_dfr0 },
1594 { .name = "ID_AFR0", .cp = 15, .crn = 0, .crm = 1,
1595 .opc1 = 0, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST,
1596 .resetvalue = cpu->id_afr0 },
1597 { .name = "ID_MMFR0", .cp = 15, .crn = 0, .crm = 1,
1598 .opc1 = 0, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST,
1599 .resetvalue = cpu->id_mmfr0 },
1600 { .name = "ID_MMFR1", .cp = 15, .crn = 0, .crm = 1,
1601 .opc1 = 0, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST,
1602 .resetvalue = cpu->id_mmfr1 },
1603 { .name = "ID_MMFR2", .cp = 15, .crn = 0, .crm = 1,
1604 .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST,
1605 .resetvalue = cpu->id_mmfr2 },
1606 { .name = "ID_MMFR3", .cp = 15, .crn = 0, .crm = 1,
1607 .opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST,
1608 .resetvalue = cpu->id_mmfr3 },
1609 { .name = "ID_ISAR0", .cp = 15, .crn = 0, .crm = 2,
1610 .opc1 = 0, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST,
1611 .resetvalue = cpu->id_isar0 },
1612 { .name = "ID_ISAR1", .cp = 15, .crn = 0, .crm = 2,
1613 .opc1 = 0, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST,
1614 .resetvalue = cpu->id_isar1 },
1615 { .name = "ID_ISAR2", .cp = 15, .crn = 0, .crm = 2,
1616 .opc1 = 0, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST,
1617 .resetvalue = cpu->id_isar2 },
1618 { .name = "ID_ISAR3", .cp = 15, .crn = 0, .crm = 2,
1619 .opc1 = 0, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST,
1620 .resetvalue = cpu->id_isar3 },
1621 { .name = "ID_ISAR4", .cp = 15, .crn = 0, .crm = 2,
1622 .opc1 = 0, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST,
1623 .resetvalue = cpu->id_isar4 },
1624 { .name = "ID_ISAR5", .cp = 15, .crn = 0, .crm = 2,
1625 .opc1 = 0, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST,
1626 .resetvalue = cpu->id_isar5 },
1627 /* 6..7 are as yet unallocated and must RAZ */
1628 { .name = "ID_ISAR6", .cp = 15, .crn = 0, .crm = 2,
1629 .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST,
1630 .resetvalue = 0 },
1631 { .name = "ID_ISAR7", .cp = 15, .crn = 0, .crm = 2,
1632 .opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST,
1633 .resetvalue = 0 },
1634 REGINFO_SENTINEL
1635 };
1636 define_arm_cp_regs(cpu, v6_idregs);
7d57f408
PM
1637 define_arm_cp_regs(cpu, v6_cp_reginfo);
1638 } else {
1639 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
1640 }
4d31c596
PM
1641 if (arm_feature(env, ARM_FEATURE_V6K)) {
1642 define_arm_cp_regs(cpu, v6k_cp_reginfo);
1643 }
e9aa6c21 1644 if (arm_feature(env, ARM_FEATURE_V7)) {
200ac0ef
PM
1645 /* v7 performance monitor control register: same implementor
1646 * field as main ID register, and we implement no event counters.
1647 */
1648 ARMCPRegInfo pmcr = {
1649 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
1650 .access = PL0_RW, .resetvalue = cpu->midr & 0xff000000,
1651 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
d4e6df63
PM
1652 .readfn = pmreg_read, .writefn = pmcr_write,
1653 .raw_readfn = raw_read, .raw_writefn = raw_write,
200ac0ef 1654 };
776d4e5c
PM
1655 ARMCPRegInfo clidr = {
1656 .name = "CLIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
1657 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
1658 };
200ac0ef 1659 define_one_arm_cp_reg(cpu, &pmcr);
776d4e5c 1660 define_one_arm_cp_reg(cpu, &clidr);
e9aa6c21 1661 define_arm_cp_regs(cpu, v7_cp_reginfo);
7d57f408
PM
1662 } else {
1663 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
e9aa6c21 1664 }
18032bec
PM
1665 if (arm_feature(env, ARM_FEATURE_MPU)) {
1666 /* These are the MPU registers prior to PMSAv6. Any new
1667 * PMSA core later than the ARM946 will require that we
1668 * implement the PMSAv6 or PMSAv7 registers, which are
1669 * completely different.
1670 */
1671 assert(!arm_feature(env, ARM_FEATURE_V6));
1672 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
1673 } else {
1674 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
1675 }
c326b979
PM
1676 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
1677 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
1678 }
6cc7a3ae
PM
1679 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1680 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
1681 }
4a501606
PM
1682 if (arm_feature(env, ARM_FEATURE_VAPA)) {
1683 define_arm_cp_regs(cpu, vapa_cp_reginfo);
1684 }
c4804214
PM
1685 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
1686 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
1687 }
1688 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
1689 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
1690 }
1691 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
1692 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
1693 }
18032bec
PM
1694 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1695 define_arm_cp_regs(cpu, omap_cp_reginfo);
1696 }
34f90529
PM
1697 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
1698 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
1699 }
1047b9d7
PM
1700 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1701 define_arm_cp_regs(cpu, xscale_cp_reginfo);
1702 }
1703 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
1704 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
1705 }
7ac681cf
PM
1706 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1707 define_arm_cp_regs(cpu, lpae_cp_reginfo);
1708 }
7884849c
PM
1709 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
1710 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
1711 * be read-only (ie write causes UNDEF exception).
1712 */
1713 {
1714 ARMCPRegInfo id_cp_reginfo[] = {
1715 /* Note that the MIDR isn't a simple constant register because
1716 * of the TI925 behaviour where writes to another register can
1717 * cause the MIDR value to change.
97ce8d61
PC
1718 *
1719 * Unimplemented registers in the c15 0 0 0 space default to
1720 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
1721 * and friends override accordingly.
7884849c
PM
1722 */
1723 { .name = "MIDR",
97ce8d61 1724 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
7884849c 1725 .access = PL1_R, .resetvalue = cpu->midr,
d4e6df63 1726 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
97ce8d61
PC
1727 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
1728 .type = ARM_CP_OVERRIDE },
7884849c
PM
1729 { .name = "CTR",
1730 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
1731 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
1732 { .name = "TCMTR",
1733 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
1734 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1735 { .name = "TLBTR",
1736 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
1737 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1738 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
1739 { .name = "DUMMY",
1740 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
1741 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1742 { .name = "DUMMY",
1743 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
1744 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1745 { .name = "DUMMY",
1746 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
1747 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1748 { .name = "DUMMY",
1749 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
1750 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1751 { .name = "DUMMY",
1752 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
1753 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1754 REGINFO_SENTINEL
1755 };
1756 ARMCPRegInfo crn0_wi_reginfo = {
1757 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
1758 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
1759 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
1760 };
1761 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
1762 arm_feature(env, ARM_FEATURE_STRONGARM)) {
1763 ARMCPRegInfo *r;
1764 /* Register the blanket "writes ignored" value first to cover the
a703eda1
PC
1765 * whole space. Then update the specific ID registers to allow write
1766 * access, so that they ignore writes rather than causing them to
1767 * UNDEF.
7884849c
PM
1768 */
1769 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
1770 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
1771 r->access = PL1_RW;
7884849c 1772 }
7884849c 1773 }
a703eda1 1774 define_arm_cp_regs(cpu, id_cp_reginfo);
7884849c
PM
1775 }
1776
97ce8d61
PC
1777 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
1778 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
1779 }
1780
2771db27
PM
1781 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
1782 ARMCPRegInfo auxcr = {
1783 .name = "AUXCR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1,
1784 .access = PL1_RW, .type = ARM_CP_CONST,
1785 .resetvalue = cpu->reset_auxcr
1786 };
1787 define_one_arm_cp_reg(cpu, &auxcr);
1788 }
1789
d8ba780b
PC
1790 if (arm_feature(env, ARM_FEATURE_CBAR)) {
1791 ARMCPRegInfo cbar = {
1792 .name = "CBAR", .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
1793 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
1794 .fieldoffset = offsetof(CPUARMState, cp15.c15_config_base_address)
1795 };
1796 define_one_arm_cp_reg(cpu, &cbar);
1797 }
1798
2771db27
PM
1799 /* Generic registers whose values depend on the implementation */
1800 {
1801 ARMCPRegInfo sctlr = {
1802 .name = "SCTLR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
1803 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_sys),
d4e6df63
PM
1804 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
1805 .raw_writefn = raw_write,
2771db27
PM
1806 };
1807 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1808 /* Normally we would always end the TB on an SCTLR write, but Linux
1809 * arch/arm/mach-pxa/sleep.S expects two instructions following
1810 * an MMU enable to execute from cache. Imitate this behaviour.
1811 */
1812 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
1813 }
1814 define_one_arm_cp_reg(cpu, &sctlr);
1815 }
2ceb98c0
PM
1816}
1817
778c3a06 1818ARMCPU *cpu_arm_init(const char *cpu_model)
40f137e1 1819{
dec9c2d4 1820 ARMCPU *cpu;
5900d6b2 1821 ObjectClass *oc;
40f137e1 1822
5900d6b2
AF
1823 oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
1824 if (!oc) {
aaed909a 1825 return NULL;
777dc784 1826 }
5900d6b2 1827 cpu = ARM_CPU(object_new(object_class_get_name(oc)));
14969266
AF
1828
1829 /* TODO this should be set centrally, once possible */
1830 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
777dc784 1831
14969266
AF
1832 return cpu;
1833}
1834
1835void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
1836{
22169d41 1837 CPUState *cs = CPU(cpu);
14969266
AF
1838 CPUARMState *env = &cpu->env;
1839
6a669427
PM
1840 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
1841 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
1842 aarch64_fpu_gdb_set_reg,
1843 34, "aarch64-fpu.xml", 0);
1844 } else if (arm_feature(env, ARM_FEATURE_NEON)) {
22169d41 1845 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
56aebc89
PB
1846 51, "arm-neon.xml", 0);
1847 } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
22169d41 1848 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
56aebc89
PB
1849 35, "arm-vfp3.xml", 0);
1850 } else if (arm_feature(env, ARM_FEATURE_VFP)) {
22169d41 1851 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
56aebc89
PB
1852 19, "arm-vfp.xml", 0);
1853 }
40f137e1
PB
1854}
1855
777dc784
PM
1856/* Sort alphabetically by type name, except for "any". */
1857static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5adb4839 1858{
777dc784
PM
1859 ObjectClass *class_a = (ObjectClass *)a;
1860 ObjectClass *class_b = (ObjectClass *)b;
1861 const char *name_a, *name_b;
5adb4839 1862
777dc784
PM
1863 name_a = object_class_get_name(class_a);
1864 name_b = object_class_get_name(class_b);
51492fd1 1865 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
777dc784 1866 return 1;
51492fd1 1867 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
777dc784
PM
1868 return -1;
1869 } else {
1870 return strcmp(name_a, name_b);
5adb4839
PB
1871 }
1872}
1873
777dc784 1874static void arm_cpu_list_entry(gpointer data, gpointer user_data)
40f137e1 1875{
777dc784 1876 ObjectClass *oc = data;
92a31361 1877 CPUListState *s = user_data;
51492fd1
AF
1878 const char *typename;
1879 char *name;
3371d272 1880
51492fd1
AF
1881 typename = object_class_get_name(oc);
1882 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
777dc784 1883 (*s->cpu_fprintf)(s->file, " %s\n",
51492fd1
AF
1884 name);
1885 g_free(name);
777dc784
PM
1886}
1887
1888void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1889{
92a31361 1890 CPUListState s = {
777dc784
PM
1891 .file = f,
1892 .cpu_fprintf = cpu_fprintf,
1893 };
1894 GSList *list;
1895
1896 list = object_class_get_list(TYPE_ARM_CPU, false);
1897 list = g_slist_sort(list, arm_cpu_list_compare);
1898 (*cpu_fprintf)(f, "Available CPUs:\n");
1899 g_slist_foreach(list, arm_cpu_list_entry, &s);
1900 g_slist_free(list);
a96c0514
PM
1901#ifdef CONFIG_KVM
1902 /* The 'host' CPU type is dynamically registered only if KVM is
1903 * enabled, so we have to special-case it here:
1904 */
1905 (*cpu_fprintf)(f, " host (only available in KVM mode)\n");
1906#endif
40f137e1
PB
1907}
1908
78027bb6
CR
1909static void arm_cpu_add_definition(gpointer data, gpointer user_data)
1910{
1911 ObjectClass *oc = data;
1912 CpuDefinitionInfoList **cpu_list = user_data;
1913 CpuDefinitionInfoList *entry;
1914 CpuDefinitionInfo *info;
1915 const char *typename;
1916
1917 typename = object_class_get_name(oc);
1918 info = g_malloc0(sizeof(*info));
1919 info->name = g_strndup(typename,
1920 strlen(typename) - strlen("-" TYPE_ARM_CPU));
1921
1922 entry = g_malloc0(sizeof(*entry));
1923 entry->value = info;
1924 entry->next = *cpu_list;
1925 *cpu_list = entry;
1926}
1927
1928CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1929{
1930 CpuDefinitionInfoList *cpu_list = NULL;
1931 GSList *list;
1932
1933 list = object_class_get_list(TYPE_ARM_CPU, false);
1934 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
1935 g_slist_free(list);
1936
1937 return cpu_list;
1938}
1939
6e6efd61
PM
1940static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
1941 void *opaque, int crm, int opc1, int opc2)
1942{
1943 /* Private utility function for define_one_arm_cp_reg_with_opaque():
1944 * add a single reginfo struct to the hash table.
1945 */
1946 uint32_t *key = g_new(uint32_t, 1);
1947 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
1948 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
1949 *key = ENCODE_CP_REG(r->cp, is64, r->crn, crm, opc1, opc2);
1950 if (opaque) {
1951 r2->opaque = opaque;
1952 }
1953 /* Make sure reginfo passed to helpers for wildcarded regs
1954 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
1955 */
1956 r2->crm = crm;
1957 r2->opc1 = opc1;
1958 r2->opc2 = opc2;
1959 /* By convention, for wildcarded registers only the first
1960 * entry is used for migration; the others are marked as
1961 * NO_MIGRATE so we don't try to transfer the register
1962 * multiple times. Special registers (ie NOP/WFI) are
1963 * never migratable.
1964 */
1965 if ((r->type & ARM_CP_SPECIAL) ||
1966 ((r->crm == CP_ANY) && crm != 0) ||
1967 ((r->opc1 == CP_ANY) && opc1 != 0) ||
1968 ((r->opc2 == CP_ANY) && opc2 != 0)) {
1969 r2->type |= ARM_CP_NO_MIGRATE;
1970 }
1971
1972 /* Overriding of an existing definition must be explicitly
1973 * requested.
1974 */
1975 if (!(r->type & ARM_CP_OVERRIDE)) {
1976 ARMCPRegInfo *oldreg;
1977 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
1978 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
1979 fprintf(stderr, "Register redefined: cp=%d %d bit "
1980 "crn=%d crm=%d opc1=%d opc2=%d, "
1981 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
1982 r2->crn, r2->crm, r2->opc1, r2->opc2,
1983 oldreg->name, r2->name);
1984 g_assert_not_reached();
1985 }
1986 }
1987 g_hash_table_insert(cpu->cp_regs, key, r2);
1988}
1989
1990
4b6a83fb
PM
1991void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
1992 const ARMCPRegInfo *r, void *opaque)
1993{
1994 /* Define implementations of coprocessor registers.
1995 * We store these in a hashtable because typically
1996 * there are less than 150 registers in a space which
1997 * is 16*16*16*8*8 = 262144 in size.
1998 * Wildcarding is supported for the crm, opc1 and opc2 fields.
1999 * If a register is defined twice then the second definition is
2000 * used, so this can be used to define some generic registers and
2001 * then override them with implementation specific variations.
2002 * At least one of the original and the second definition should
2003 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
2004 * against accidental use.
2005 */
2006 int crm, opc1, opc2;
2007 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
2008 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
2009 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
2010 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
2011 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
2012 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
2013 /* 64 bit registers have only CRm and Opc1 fields */
2014 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
2015 /* Check that the register definition has enough info to handle
2016 * reads and writes if they are permitted.
2017 */
2018 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
2019 if (r->access & PL3_R) {
2020 assert(r->fieldoffset || r->readfn);
2021 }
2022 if (r->access & PL3_W) {
2023 assert(r->fieldoffset || r->writefn);
2024 }
2025 }
2026 /* Bad type field probably means missing sentinel at end of reg list */
2027 assert(cptype_valid(r->type));
2028 for (crm = crmmin; crm <= crmmax; crm++) {
2029 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
2030 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
6e6efd61 2031 add_cpreg_to_hashtable(cpu, r, opaque, crm, opc1, opc2);
4b6a83fb
PM
2032 }
2033 }
2034 }
2035}
2036
2037void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
2038 const ARMCPRegInfo *regs, void *opaque)
2039{
2040 /* Define a whole list of registers */
2041 const ARMCPRegInfo *r;
2042 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
2043 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
2044 }
2045}
2046
2047const ARMCPRegInfo *get_arm_cp_reginfo(ARMCPU *cpu, uint32_t encoded_cp)
2048{
2049 return g_hash_table_lookup(cpu->cp_regs, &encoded_cp);
2050}
2051
2052int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
2053 uint64_t value)
2054{
2055 /* Helper coprocessor write function for write-ignore registers */
2056 return 0;
2057}
2058
2059int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value)
2060{
2061 /* Helper coprocessor write function for read-as-zero registers */
2062 *value = 0;
2063 return 0;
2064}
2065
0ecb72a5 2066static int bad_mode_switch(CPUARMState *env, int mode)
37064a8b
PM
2067{
2068 /* Return true if it is not valid for us to switch to
2069 * this CPU mode (ie all the UNPREDICTABLE cases in
2070 * the ARM ARM CPSRWriteByInstr pseudocode).
2071 */
2072 switch (mode) {
2073 case ARM_CPU_MODE_USR:
2074 case ARM_CPU_MODE_SYS:
2075 case ARM_CPU_MODE_SVC:
2076 case ARM_CPU_MODE_ABT:
2077 case ARM_CPU_MODE_UND:
2078 case ARM_CPU_MODE_IRQ:
2079 case ARM_CPU_MODE_FIQ:
2080 return 0;
2081 default:
2082 return 1;
2083 }
2084}
2085
2f4a40e5
AZ
2086uint32_t cpsr_read(CPUARMState *env)
2087{
2088 int ZF;
6fbe23d5
PB
2089 ZF = (env->ZF == 0);
2090 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
2f4a40e5
AZ
2091 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
2092 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
2093 | ((env->condexec_bits & 0xfc) << 8)
2094 | (env->GE << 16);
2095}
2096
2097void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
2098{
2f4a40e5 2099 if (mask & CPSR_NZCV) {
6fbe23d5
PB
2100 env->ZF = (~val) & CPSR_Z;
2101 env->NF = val;
2f4a40e5
AZ
2102 env->CF = (val >> 29) & 1;
2103 env->VF = (val << 3) & 0x80000000;
2104 }
2105 if (mask & CPSR_Q)
2106 env->QF = ((val & CPSR_Q) != 0);
2107 if (mask & CPSR_T)
2108 env->thumb = ((val & CPSR_T) != 0);
2109 if (mask & CPSR_IT_0_1) {
2110 env->condexec_bits &= ~3;
2111 env->condexec_bits |= (val >> 25) & 3;
2112 }
2113 if (mask & CPSR_IT_2_7) {
2114 env->condexec_bits &= 3;
2115 env->condexec_bits |= (val >> 8) & 0xfc;
2116 }
2117 if (mask & CPSR_GE) {
2118 env->GE = (val >> 16) & 0xf;
2119 }
2120
2121 if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
37064a8b
PM
2122 if (bad_mode_switch(env, val & CPSR_M)) {
2123 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
2124 * We choose to ignore the attempt and leave the CPSR M field
2125 * untouched.
2126 */
2127 mask &= ~CPSR_M;
2128 } else {
2129 switch_mode(env, val & CPSR_M);
2130 }
2f4a40e5
AZ
2131 }
2132 mask &= ~CACHED_CPSR_BITS;
2133 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
2134}
2135
b26eefb6
PB
2136/* Sign/zero extend */
2137uint32_t HELPER(sxtb16)(uint32_t x)
2138{
2139 uint32_t res;
2140 res = (uint16_t)(int8_t)x;
2141 res |= (uint32_t)(int8_t)(x >> 16) << 16;
2142 return res;
2143}
2144
2145uint32_t HELPER(uxtb16)(uint32_t x)
2146{
2147 uint32_t res;
2148 res = (uint16_t)(uint8_t)x;
2149 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
2150 return res;
2151}
2152
f51bbbfe
PB
2153uint32_t HELPER(clz)(uint32_t x)
2154{
7bbcb0af 2155 return clz32(x);
f51bbbfe
PB
2156}
2157
3670669c
PB
2158int32_t HELPER(sdiv)(int32_t num, int32_t den)
2159{
2160 if (den == 0)
2161 return 0;
686eeb93
AJ
2162 if (num == INT_MIN && den == -1)
2163 return INT_MIN;
3670669c
PB
2164 return num / den;
2165}
2166
2167uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
2168{
2169 if (den == 0)
2170 return 0;
2171 return num / den;
2172}
2173
2174uint32_t HELPER(rbit)(uint32_t x)
2175{
2176 x = ((x & 0xff000000) >> 24)
2177 | ((x & 0x00ff0000) >> 8)
2178 | ((x & 0x0000ff00) << 8)
2179 | ((x & 0x000000ff) << 24);
2180 x = ((x & 0xf0f0f0f0) >> 4)
2181 | ((x & 0x0f0f0f0f) << 4);
2182 x = ((x & 0x88888888) >> 3)
2183 | ((x & 0x44444444) >> 1)
2184 | ((x & 0x22222222) << 1)
2185 | ((x & 0x11111111) << 3);
2186 return x;
2187}
2188
5fafdf24 2189#if defined(CONFIG_USER_ONLY)
b5ff1b31 2190
97a8ea5a 2191void arm_cpu_do_interrupt(CPUState *cs)
b5ff1b31 2192{
97a8ea5a
AF
2193 ARMCPU *cpu = ARM_CPU(cs);
2194 CPUARMState *env = &cpu->env;
2195
b5ff1b31
FB
2196 env->exception_index = -1;
2197}
2198
0ecb72a5 2199int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
97b348e7 2200 int mmu_idx)
b5ff1b31
FB
2201{
2202 if (rw == 2) {
2203 env->exception_index = EXCP_PREFETCH_ABORT;
2204 env->cp15.c6_insn = address;
2205 } else {
2206 env->exception_index = EXCP_DATA_ABORT;
2207 env->cp15.c6_data = address;
2208 }
2209 return 1;
2210}
2211
9ee6e8bb 2212/* These should probably raise undefined insn exceptions. */
0ecb72a5 2213void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
9ee6e8bb
PB
2214{
2215 cpu_abort(env, "v7m_mrs %d\n", reg);
2216}
2217
0ecb72a5 2218uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
9ee6e8bb
PB
2219{
2220 cpu_abort(env, "v7m_mrs %d\n", reg);
2221 return 0;
2222}
2223
0ecb72a5 2224void switch_mode(CPUARMState *env, int mode)
b5ff1b31
FB
2225{
2226 if (mode != ARM_CPU_MODE_USR)
2227 cpu_abort(env, "Tried to switch out of user mode\n");
2228}
2229
0ecb72a5 2230void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
9ee6e8bb
PB
2231{
2232 cpu_abort(env, "banked r13 write\n");
2233}
2234
0ecb72a5 2235uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
9ee6e8bb
PB
2236{
2237 cpu_abort(env, "banked r13 read\n");
2238 return 0;
2239}
2240
b5ff1b31
FB
2241#else
2242
2243/* Map CPU modes onto saved register banks. */
494b00c7 2244int bank_number(int mode)
b5ff1b31
FB
2245{
2246 switch (mode) {
2247 case ARM_CPU_MODE_USR:
2248 case ARM_CPU_MODE_SYS:
2249 return 0;
2250 case ARM_CPU_MODE_SVC:
2251 return 1;
2252 case ARM_CPU_MODE_ABT:
2253 return 2;
2254 case ARM_CPU_MODE_UND:
2255 return 3;
2256 case ARM_CPU_MODE_IRQ:
2257 return 4;
2258 case ARM_CPU_MODE_FIQ:
2259 return 5;
2260 }
f5206413 2261 hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode);
b5ff1b31
FB
2262}
2263
0ecb72a5 2264void switch_mode(CPUARMState *env, int mode)
b5ff1b31
FB
2265{
2266 int old_mode;
2267 int i;
2268
2269 old_mode = env->uncached_cpsr & CPSR_M;
2270 if (mode == old_mode)
2271 return;
2272
2273 if (old_mode == ARM_CPU_MODE_FIQ) {
2274 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 2275 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
2276 } else if (mode == ARM_CPU_MODE_FIQ) {
2277 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 2278 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
2279 }
2280
f5206413 2281 i = bank_number(old_mode);
b5ff1b31
FB
2282 env->banked_r13[i] = env->regs[13];
2283 env->banked_r14[i] = env->regs[14];
2284 env->banked_spsr[i] = env->spsr;
2285
f5206413 2286 i = bank_number(mode);
b5ff1b31
FB
2287 env->regs[13] = env->banked_r13[i];
2288 env->regs[14] = env->banked_r14[i];
2289 env->spsr = env->banked_spsr[i];
2290}
2291
9ee6e8bb
PB
2292static void v7m_push(CPUARMState *env, uint32_t val)
2293{
2294 env->regs[13] -= 4;
2295 stl_phys(env->regs[13], val);
2296}
2297
2298static uint32_t v7m_pop(CPUARMState *env)
2299{
2300 uint32_t val;
2301 val = ldl_phys(env->regs[13]);
2302 env->regs[13] += 4;
2303 return val;
2304}
2305
2306/* Switch to V7M main or process stack pointer. */
2307static void switch_v7m_sp(CPUARMState *env, int process)
2308{
2309 uint32_t tmp;
2310 if (env->v7m.current_sp != process) {
2311 tmp = env->v7m.other_sp;
2312 env->v7m.other_sp = env->regs[13];
2313 env->regs[13] = tmp;
2314 env->v7m.current_sp = process;
2315 }
2316}
2317
2318static void do_v7m_exception_exit(CPUARMState *env)
2319{
2320 uint32_t type;
2321 uint32_t xpsr;
2322
2323 type = env->regs[15];
2324 if (env->v7m.exception != 0)
983fe826 2325 armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
9ee6e8bb
PB
2326
2327 /* Switch to the target stack. */
2328 switch_v7m_sp(env, (type & 4) != 0);
2329 /* Pop registers. */
2330 env->regs[0] = v7m_pop(env);
2331 env->regs[1] = v7m_pop(env);
2332 env->regs[2] = v7m_pop(env);
2333 env->regs[3] = v7m_pop(env);
2334 env->regs[12] = v7m_pop(env);
2335 env->regs[14] = v7m_pop(env);
2336 env->regs[15] = v7m_pop(env);
2337 xpsr = v7m_pop(env);
2338 xpsr_write(env, xpsr, 0xfffffdff);
2339 /* Undo stack alignment. */
2340 if (xpsr & 0x200)
2341 env->regs[13] |= 4;
2342 /* ??? The exception return type specifies Thread/Handler mode. However
2343 this is also implied by the xPSR value. Not sure what to do
2344 if there is a mismatch. */
2345 /* ??? Likewise for mismatches between the CONTROL register and the stack
2346 pointer. */
2347}
2348
3f1beaca
PM
2349/* Exception names for debug logging; note that not all of these
2350 * precisely correspond to architectural exceptions.
2351 */
2352static const char * const excnames[] = {
2353 [EXCP_UDEF] = "Undefined Instruction",
2354 [EXCP_SWI] = "SVC",
2355 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
2356 [EXCP_DATA_ABORT] = "Data Abort",
2357 [EXCP_IRQ] = "IRQ",
2358 [EXCP_FIQ] = "FIQ",
2359 [EXCP_BKPT] = "Breakpoint",
2360 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
2361 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
2362 [EXCP_STREX] = "QEMU intercept of STREX",
2363};
2364
2365static inline void arm_log_exception(int idx)
2366{
2367 if (qemu_loglevel_mask(CPU_LOG_INT)) {
2368 const char *exc = NULL;
2369
2370 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
2371 exc = excnames[idx];
2372 }
2373 if (!exc) {
2374 exc = "unknown";
2375 }
2376 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
2377 }
2378}
2379
e6f010cc 2380void arm_v7m_cpu_do_interrupt(CPUState *cs)
9ee6e8bb 2381{
e6f010cc
AF
2382 ARMCPU *cpu = ARM_CPU(cs);
2383 CPUARMState *env = &cpu->env;
9ee6e8bb
PB
2384 uint32_t xpsr = xpsr_read(env);
2385 uint32_t lr;
2386 uint32_t addr;
2387
3f1beaca
PM
2388 arm_log_exception(env->exception_index);
2389
9ee6e8bb
PB
2390 lr = 0xfffffff1;
2391 if (env->v7m.current_sp)
2392 lr |= 4;
2393 if (env->v7m.exception == 0)
2394 lr |= 8;
2395
2396 /* For exceptions we just mark as pending on the NVIC, and let that
2397 handle it. */
2398 /* TODO: Need to escalate if the current priority is higher than the
2399 one we're raising. */
2400 switch (env->exception_index) {
2401 case EXCP_UDEF:
983fe826 2402 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
9ee6e8bb
PB
2403 return;
2404 case EXCP_SWI:
314e2296 2405 /* The PC already points to the next instruction. */
983fe826 2406 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
9ee6e8bb
PB
2407 return;
2408 case EXCP_PREFETCH_ABORT:
2409 case EXCP_DATA_ABORT:
983fe826 2410 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
9ee6e8bb
PB
2411 return;
2412 case EXCP_BKPT:
2ad207d4
PB
2413 if (semihosting_enabled) {
2414 int nr;
d31dd73e 2415 nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
2ad207d4
PB
2416 if (nr == 0xab) {
2417 env->regs[15] += 2;
2418 env->regs[0] = do_arm_semihosting(env);
3f1beaca 2419 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
2ad207d4
PB
2420 return;
2421 }
2422 }
983fe826 2423 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
9ee6e8bb
PB
2424 return;
2425 case EXCP_IRQ:
983fe826 2426 env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
9ee6e8bb
PB
2427 break;
2428 case EXCP_EXCEPTION_EXIT:
2429 do_v7m_exception_exit(env);
2430 return;
2431 default:
2432 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
2433 return; /* Never happens. Keep compiler happy. */
2434 }
2435
2436 /* Align stack pointer. */
2437 /* ??? Should only do this if Configuration Control Register
2438 STACKALIGN bit is set. */
2439 if (env->regs[13] & 4) {
ab19b0ec 2440 env->regs[13] -= 4;
9ee6e8bb
PB
2441 xpsr |= 0x200;
2442 }
6c95676b 2443 /* Switch to the handler mode. */
9ee6e8bb
PB
2444 v7m_push(env, xpsr);
2445 v7m_push(env, env->regs[15]);
2446 v7m_push(env, env->regs[14]);
2447 v7m_push(env, env->regs[12]);
2448 v7m_push(env, env->regs[3]);
2449 v7m_push(env, env->regs[2]);
2450 v7m_push(env, env->regs[1]);
2451 v7m_push(env, env->regs[0]);
2452 switch_v7m_sp(env, 0);
c98d174c
PM
2453 /* Clear IT bits */
2454 env->condexec_bits = 0;
9ee6e8bb
PB
2455 env->regs[14] = lr;
2456 addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
2457 env->regs[15] = addr & 0xfffffffe;
2458 env->thumb = addr & 1;
2459}
2460
b5ff1b31 2461/* Handle a CPU exception. */
97a8ea5a 2462void arm_cpu_do_interrupt(CPUState *cs)
b5ff1b31 2463{
97a8ea5a
AF
2464 ARMCPU *cpu = ARM_CPU(cs);
2465 CPUARMState *env = &cpu->env;
b5ff1b31
FB
2466 uint32_t addr;
2467 uint32_t mask;
2468 int new_mode;
2469 uint32_t offset;
2470
e6f010cc
AF
2471 assert(!IS_M(env));
2472
3f1beaca
PM
2473 arm_log_exception(env->exception_index);
2474
b5ff1b31
FB
2475 /* TODO: Vectored interrupt controller. */
2476 switch (env->exception_index) {
2477 case EXCP_UDEF:
2478 new_mode = ARM_CPU_MODE_UND;
2479 addr = 0x04;
2480 mask = CPSR_I;
2481 if (env->thumb)
2482 offset = 2;
2483 else
2484 offset = 4;
2485 break;
2486 case EXCP_SWI:
8e71621f
PB
2487 if (semihosting_enabled) {
2488 /* Check for semihosting interrupt. */
2489 if (env->thumb) {
d31dd73e
BS
2490 mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code)
2491 & 0xff;
8e71621f 2492 } else {
d31dd73e 2493 mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code)
d8fd2954 2494 & 0xffffff;
8e71621f
PB
2495 }
2496 /* Only intercept calls from privileged modes, to provide some
2497 semblance of security. */
2498 if (((mask == 0x123456 && !env->thumb)
2499 || (mask == 0xab && env->thumb))
2500 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
2501 env->regs[0] = do_arm_semihosting(env);
3f1beaca 2502 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
8e71621f
PB
2503 return;
2504 }
2505 }
b5ff1b31
FB
2506 new_mode = ARM_CPU_MODE_SVC;
2507 addr = 0x08;
2508 mask = CPSR_I;
601d70b9 2509 /* The PC already points to the next instruction. */
b5ff1b31
FB
2510 offset = 0;
2511 break;
06c949e6 2512 case EXCP_BKPT:
9ee6e8bb 2513 /* See if this is a semihosting syscall. */
2ad207d4 2514 if (env->thumb && semihosting_enabled) {
d31dd73e 2515 mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
9ee6e8bb
PB
2516 if (mask == 0xab
2517 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
2518 env->regs[15] += 2;
2519 env->regs[0] = do_arm_semihosting(env);
3f1beaca 2520 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
9ee6e8bb
PB
2521 return;
2522 }
2523 }
81c05daf 2524 env->cp15.c5_insn = 2;
9ee6e8bb
PB
2525 /* Fall through to prefetch abort. */
2526 case EXCP_PREFETCH_ABORT:
3f1beaca
PM
2527 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
2528 env->cp15.c5_insn, env->cp15.c6_insn);
b5ff1b31
FB
2529 new_mode = ARM_CPU_MODE_ABT;
2530 addr = 0x0c;
2531 mask = CPSR_A | CPSR_I;
2532 offset = 4;
2533 break;
2534 case EXCP_DATA_ABORT:
3f1beaca
PM
2535 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
2536 env->cp15.c5_data, env->cp15.c6_data);
b5ff1b31
FB
2537 new_mode = ARM_CPU_MODE_ABT;
2538 addr = 0x10;
2539 mask = CPSR_A | CPSR_I;
2540 offset = 8;
2541 break;
2542 case EXCP_IRQ:
2543 new_mode = ARM_CPU_MODE_IRQ;
2544 addr = 0x18;
2545 /* Disable IRQ and imprecise data aborts. */
2546 mask = CPSR_A | CPSR_I;
2547 offset = 4;
2548 break;
2549 case EXCP_FIQ:
2550 new_mode = ARM_CPU_MODE_FIQ;
2551 addr = 0x1c;
2552 /* Disable FIQ, IRQ and imprecise data aborts. */
2553 mask = CPSR_A | CPSR_I | CPSR_F;
2554 offset = 4;
2555 break;
2556 default:
2557 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
2558 return; /* Never happens. Keep compiler happy. */
2559 }
2560 /* High vectors. */
2561 if (env->cp15.c1_sys & (1 << 13)) {
8641136c 2562 /* when enabled, base address cannot be remapped. */
b5ff1b31 2563 addr += 0xffff0000;
8641136c
NR
2564 } else {
2565 /* ARM v7 architectures provide a vector base address register to remap
2566 * the interrupt vector table.
2567 * This register is only followed in non-monitor mode, and has a secure
2568 * and un-secure copy. Since the cpu is always in a un-secure operation
2569 * and is never in monitor mode this feature is always active.
2570 * Note: only bits 31:5 are valid.
2571 */
2572 addr += env->cp15.c12_vbar;
b5ff1b31
FB
2573 }
2574 switch_mode (env, new_mode);
2575 env->spsr = cpsr_read(env);
9ee6e8bb
PB
2576 /* Clear IT bits. */
2577 env->condexec_bits = 0;
30a8cac1 2578 /* Switch to the new mode, and to the correct instruction set. */
6d7e6326 2579 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
b5ff1b31 2580 env->uncached_cpsr |= mask;
be5e7a76
DES
2581 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
2582 * and we should just guard the thumb mode on V4 */
2583 if (arm_feature(env, ARM_FEATURE_V4T)) {
2584 env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
2585 }
b5ff1b31
FB
2586 env->regs[14] = env->regs[15] + offset;
2587 env->regs[15] = addr;
259186a7 2588 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
b5ff1b31
FB
2589}
2590
2591/* Check section/page access permissions.
2592 Returns the page protection flags, or zero if the access is not
2593 permitted. */
0ecb72a5 2594static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
dd4ebc2e 2595 int access_type, int is_user)
b5ff1b31 2596{
9ee6e8bb
PB
2597 int prot_ro;
2598
dd4ebc2e 2599 if (domain_prot == 3) {
b5ff1b31 2600 return PAGE_READ | PAGE_WRITE;
dd4ebc2e 2601 }
b5ff1b31 2602
9ee6e8bb
PB
2603 if (access_type == 1)
2604 prot_ro = 0;
2605 else
2606 prot_ro = PAGE_READ;
2607
b5ff1b31
FB
2608 switch (ap) {
2609 case 0:
78600320 2610 if (access_type == 1)
b5ff1b31
FB
2611 return 0;
2612 switch ((env->cp15.c1_sys >> 8) & 3) {
2613 case 1:
2614 return is_user ? 0 : PAGE_READ;
2615 case 2:
2616 return PAGE_READ;
2617 default:
2618 return 0;
2619 }
2620 case 1:
2621 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
2622 case 2:
2623 if (is_user)
9ee6e8bb 2624 return prot_ro;
b5ff1b31
FB
2625 else
2626 return PAGE_READ | PAGE_WRITE;
2627 case 3:
2628 return PAGE_READ | PAGE_WRITE;
d4934d18 2629 case 4: /* Reserved. */
9ee6e8bb
PB
2630 return 0;
2631 case 5:
2632 return is_user ? 0 : prot_ro;
2633 case 6:
2634 return prot_ro;
d4934d18 2635 case 7:
0ab06d83 2636 if (!arm_feature (env, ARM_FEATURE_V6K))
d4934d18
PB
2637 return 0;
2638 return prot_ro;
b5ff1b31
FB
2639 default:
2640 abort();
2641 }
2642}
2643
0ecb72a5 2644static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
b2fa1797
PB
2645{
2646 uint32_t table;
2647
2648 if (address & env->cp15.c2_mask)
2649 table = env->cp15.c2_base1 & 0xffffc000;
2650 else
2651 table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
2652
2653 table |= (address >> 18) & 0x3ffc;
2654 return table;
2655}
2656
0ecb72a5 2657static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
a8170e5e 2658 int is_user, hwaddr *phys_ptr,
77a71dd1 2659 int *prot, target_ulong *page_size)
b5ff1b31
FB
2660{
2661 int code;
2662 uint32_t table;
2663 uint32_t desc;
2664 int type;
2665 int ap;
2666 int domain;
dd4ebc2e 2667 int domain_prot;
a8170e5e 2668 hwaddr phys_addr;
b5ff1b31 2669
9ee6e8bb
PB
2670 /* Pagetable walk. */
2671 /* Lookup l1 descriptor. */
b2fa1797 2672 table = get_level1_table_address(env, address);
9ee6e8bb
PB
2673 desc = ldl_phys(table);
2674 type = (desc & 3);
dd4ebc2e
JCD
2675 domain = (desc >> 5) & 0x0f;
2676 domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
9ee6e8bb 2677 if (type == 0) {
601d70b9 2678 /* Section translation fault. */
9ee6e8bb
PB
2679 code = 5;
2680 goto do_fault;
2681 }
dd4ebc2e 2682 if (domain_prot == 0 || domain_prot == 2) {
9ee6e8bb
PB
2683 if (type == 2)
2684 code = 9; /* Section domain fault. */
2685 else
2686 code = 11; /* Page domain fault. */
2687 goto do_fault;
2688 }
2689 if (type == 2) {
2690 /* 1Mb section. */
2691 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
2692 ap = (desc >> 10) & 3;
2693 code = 13;
d4c430a8 2694 *page_size = 1024 * 1024;
9ee6e8bb
PB
2695 } else {
2696 /* Lookup l2 entry. */
2697 if (type == 1) {
2698 /* Coarse pagetable. */
2699 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
2700 } else {
2701 /* Fine pagetable. */
2702 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
2703 }
2704 desc = ldl_phys(table);
2705 switch (desc & 3) {
2706 case 0: /* Page translation fault. */
2707 code = 7;
2708 goto do_fault;
2709 case 1: /* 64k page. */
2710 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
2711 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
d4c430a8 2712 *page_size = 0x10000;
ce819861 2713 break;
9ee6e8bb
PB
2714 case 2: /* 4k page. */
2715 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
2716 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
d4c430a8 2717 *page_size = 0x1000;
ce819861 2718 break;
9ee6e8bb
PB
2719 case 3: /* 1k page. */
2720 if (type == 1) {
2721 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2722 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
2723 } else {
2724 /* Page translation fault. */
2725 code = 7;
2726 goto do_fault;
2727 }
2728 } else {
2729 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
2730 }
2731 ap = (desc >> 4) & 3;
d4c430a8 2732 *page_size = 0x400;
ce819861
PB
2733 break;
2734 default:
9ee6e8bb
PB
2735 /* Never happens, but compiler isn't smart enough to tell. */
2736 abort();
ce819861 2737 }
9ee6e8bb
PB
2738 code = 15;
2739 }
dd4ebc2e 2740 *prot = check_ap(env, ap, domain_prot, access_type, is_user);
9ee6e8bb
PB
2741 if (!*prot) {
2742 /* Access permission fault. */
2743 goto do_fault;
2744 }
3ad493fc 2745 *prot |= PAGE_EXEC;
9ee6e8bb
PB
2746 *phys_ptr = phys_addr;
2747 return 0;
2748do_fault:
2749 return code | (domain << 4);
2750}
2751
0ecb72a5 2752static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
a8170e5e 2753 int is_user, hwaddr *phys_ptr,
77a71dd1 2754 int *prot, target_ulong *page_size)
9ee6e8bb
PB
2755{
2756 int code;
2757 uint32_t table;
2758 uint32_t desc;
2759 uint32_t xn;
de9b05b8 2760 uint32_t pxn = 0;
9ee6e8bb
PB
2761 int type;
2762 int ap;
de9b05b8 2763 int domain = 0;
dd4ebc2e 2764 int domain_prot;
a8170e5e 2765 hwaddr phys_addr;
9ee6e8bb
PB
2766
2767 /* Pagetable walk. */
2768 /* Lookup l1 descriptor. */
b2fa1797 2769 table = get_level1_table_address(env, address);
9ee6e8bb
PB
2770 desc = ldl_phys(table);
2771 type = (desc & 3);
de9b05b8
PM
2772 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
2773 /* Section translation fault, or attempt to use the encoding
2774 * which is Reserved on implementations without PXN.
2775 */
9ee6e8bb 2776 code = 5;
9ee6e8bb 2777 goto do_fault;
de9b05b8
PM
2778 }
2779 if ((type == 1) || !(desc & (1 << 18))) {
2780 /* Page or Section. */
dd4ebc2e 2781 domain = (desc >> 5) & 0x0f;
9ee6e8bb 2782 }
dd4ebc2e
JCD
2783 domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
2784 if (domain_prot == 0 || domain_prot == 2) {
de9b05b8 2785 if (type != 1) {
9ee6e8bb 2786 code = 9; /* Section domain fault. */
de9b05b8 2787 } else {
9ee6e8bb 2788 code = 11; /* Page domain fault. */
de9b05b8 2789 }
9ee6e8bb
PB
2790 goto do_fault;
2791 }
de9b05b8 2792 if (type != 1) {
9ee6e8bb
PB
2793 if (desc & (1 << 18)) {
2794 /* Supersection. */
2795 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
d4c430a8 2796 *page_size = 0x1000000;
b5ff1b31 2797 } else {
9ee6e8bb
PB
2798 /* Section. */
2799 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
d4c430a8 2800 *page_size = 0x100000;
b5ff1b31 2801 }
9ee6e8bb
PB
2802 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
2803 xn = desc & (1 << 4);
de9b05b8 2804 pxn = desc & 1;
9ee6e8bb
PB
2805 code = 13;
2806 } else {
de9b05b8
PM
2807 if (arm_feature(env, ARM_FEATURE_PXN)) {
2808 pxn = (desc >> 2) & 1;
2809 }
9ee6e8bb
PB
2810 /* Lookup l2 entry. */
2811 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
2812 desc = ldl_phys(table);
2813 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
2814 switch (desc & 3) {
2815 case 0: /* Page translation fault. */
2816 code = 7;
b5ff1b31 2817 goto do_fault;
9ee6e8bb
PB
2818 case 1: /* 64k page. */
2819 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
2820 xn = desc & (1 << 15);
d4c430a8 2821 *page_size = 0x10000;
9ee6e8bb
PB
2822 break;
2823 case 2: case 3: /* 4k page. */
2824 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
2825 xn = desc & 1;
d4c430a8 2826 *page_size = 0x1000;
9ee6e8bb
PB
2827 break;
2828 default:
2829 /* Never happens, but compiler isn't smart enough to tell. */
2830 abort();
b5ff1b31 2831 }
9ee6e8bb
PB
2832 code = 15;
2833 }
dd4ebc2e 2834 if (domain_prot == 3) {
c0034328
JR
2835 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2836 } else {
de9b05b8
PM
2837 if (pxn && !is_user) {
2838 xn = 1;
2839 }
c0034328
JR
2840 if (xn && access_type == 2)
2841 goto do_fault;
9ee6e8bb 2842
c0034328
JR
2843 /* The simplified model uses AP[0] as an access control bit. */
2844 if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
2845 /* Access flag fault. */
2846 code = (code == 15) ? 6 : 3;
2847 goto do_fault;
2848 }
dd4ebc2e 2849 *prot = check_ap(env, ap, domain_prot, access_type, is_user);
c0034328
JR
2850 if (!*prot) {
2851 /* Access permission fault. */
2852 goto do_fault;
2853 }
2854 if (!xn) {
2855 *prot |= PAGE_EXEC;
2856 }
3ad493fc 2857 }
9ee6e8bb 2858 *phys_ptr = phys_addr;
b5ff1b31
FB
2859 return 0;
2860do_fault:
2861 return code | (domain << 4);
2862}
2863
3dde962f
PM
2864/* Fault type for long-descriptor MMU fault reporting; this corresponds
2865 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
2866 */
2867typedef enum {
2868 translation_fault = 1,
2869 access_fault = 2,
2870 permission_fault = 3,
2871} MMUFaultType;
2872
2873static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
2874 int access_type, int is_user,
a8170e5e 2875 hwaddr *phys_ptr, int *prot,
3dde962f
PM
2876 target_ulong *page_size_ptr)
2877{
2878 /* Read an LPAE long-descriptor translation table. */
2879 MMUFaultType fault_type = translation_fault;
2880 uint32_t level = 1;
2881 uint32_t epd;
2882 uint32_t tsz;
2883 uint64_t ttbr;
2884 int ttbr_select;
2885 int n;
a8170e5e 2886 hwaddr descaddr;
3dde962f
PM
2887 uint32_t tableattrs;
2888 target_ulong page_size;
2889 uint32_t attrs;
2890
2891 /* Determine whether this address is in the region controlled by
2892 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
2893 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
2894 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
2895 */
2896 uint32_t t0sz = extract32(env->cp15.c2_control, 0, 3);
2897 uint32_t t1sz = extract32(env->cp15.c2_control, 16, 3);
2898 if (t0sz && !extract32(address, 32 - t0sz, t0sz)) {
2899 /* there is a ttbr0 region and we are in it (high bits all zero) */
2900 ttbr_select = 0;
2901 } else if (t1sz && !extract32(~address, 32 - t1sz, t1sz)) {
2902 /* there is a ttbr1 region and we are in it (high bits all one) */
2903 ttbr_select = 1;
2904 } else if (!t0sz) {
2905 /* ttbr0 region is "everything not in the ttbr1 region" */
2906 ttbr_select = 0;
2907 } else if (!t1sz) {
2908 /* ttbr1 region is "everything not in the ttbr0 region" */
2909 ttbr_select = 1;
2910 } else {
2911 /* in the gap between the two regions, this is a Translation fault */
2912 fault_type = translation_fault;
2913 goto do_fault;
2914 }
2915
2916 /* Note that QEMU ignores shareability and cacheability attributes,
2917 * so we don't need to do anything with the SH, ORGN, IRGN fields
2918 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
2919 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
2920 * implement any ASID-like capability so we can ignore it (instead
2921 * we will always flush the TLB any time the ASID is changed).
2922 */
2923 if (ttbr_select == 0) {
2924 ttbr = ((uint64_t)env->cp15.c2_base0_hi << 32) | env->cp15.c2_base0;
2925 epd = extract32(env->cp15.c2_control, 7, 1);
2926 tsz = t0sz;
2927 } else {
2928 ttbr = ((uint64_t)env->cp15.c2_base1_hi << 32) | env->cp15.c2_base1;
2929 epd = extract32(env->cp15.c2_control, 23, 1);
2930 tsz = t1sz;
2931 }
2932
2933 if (epd) {
2934 /* Translation table walk disabled => Translation fault on TLB miss */
2935 goto do_fault;
2936 }
2937
2938 /* If the region is small enough we will skip straight to a 2nd level
2939 * lookup. This affects the number of bits of the address used in
2940 * combination with the TTBR to find the first descriptor. ('n' here
2941 * matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
2942 * from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
2943 */
2944 if (tsz > 1) {
2945 level = 2;
2946 n = 14 - tsz;
2947 } else {
2948 n = 5 - tsz;
2949 }
2950
2951 /* Clear the vaddr bits which aren't part of the within-region address,
2952 * so that we don't have to special case things when calculating the
2953 * first descriptor address.
2954 */
2955 address &= (0xffffffffU >> tsz);
2956
2957 /* Now we can extract the actual base address from the TTBR */
2958 descaddr = extract64(ttbr, 0, 40);
2959 descaddr &= ~((1ULL << n) - 1);
2960
2961 tableattrs = 0;
2962 for (;;) {
2963 uint64_t descriptor;
2964
2965 descaddr |= ((address >> (9 * (4 - level))) & 0xff8);
2966 descriptor = ldq_phys(descaddr);
2967 if (!(descriptor & 1) ||
2968 (!(descriptor & 2) && (level == 3))) {
2969 /* Invalid, or the Reserved level 3 encoding */
2970 goto do_fault;
2971 }
2972 descaddr = descriptor & 0xfffffff000ULL;
2973
2974 if ((descriptor & 2) && (level < 3)) {
2975 /* Table entry. The top five bits are attributes which may
2976 * propagate down through lower levels of the table (and
2977 * which are all arranged so that 0 means "no effect", so
2978 * we can gather them up by ORing in the bits at each level).
2979 */
2980 tableattrs |= extract64(descriptor, 59, 5);
2981 level++;
2982 continue;
2983 }
2984 /* Block entry at level 1 or 2, or page entry at level 3.
2985 * These are basically the same thing, although the number
2986 * of bits we pull in from the vaddr varies.
2987 */
2988 page_size = (1 << (39 - (9 * level)));
2989 descaddr |= (address & (page_size - 1));
2990 /* Extract attributes from the descriptor and merge with table attrs */
2991 attrs = extract64(descriptor, 2, 10)
2992 | (extract64(descriptor, 52, 12) << 10);
2993 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
2994 attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
2995 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
2996 * means "force PL1 access only", which means forcing AP[1] to 0.
2997 */
2998 if (extract32(tableattrs, 2, 1)) {
2999 attrs &= ~(1 << 4);
3000 }
3001 /* Since we're always in the Non-secure state, NSTable is ignored. */
3002 break;
3003 }
3004 /* Here descaddr is the final physical address, and attributes
3005 * are all in attrs.
3006 */
3007 fault_type = access_fault;
3008 if ((attrs & (1 << 8)) == 0) {
3009 /* Access flag */
3010 goto do_fault;
3011 }
3012 fault_type = permission_fault;
3013 if (is_user && !(attrs & (1 << 4))) {
3014 /* Unprivileged access not enabled */
3015 goto do_fault;
3016 }
3017 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3018 if (attrs & (1 << 12) || (!is_user && (attrs & (1 << 11)))) {
3019 /* XN or PXN */
3020 if (access_type == 2) {
3021 goto do_fault;
3022 }
3023 *prot &= ~PAGE_EXEC;
3024 }
3025 if (attrs & (1 << 5)) {
3026 /* Write access forbidden */
3027 if (access_type == 1) {
3028 goto do_fault;
3029 }
3030 *prot &= ~PAGE_WRITE;
3031 }
3032
3033 *phys_ptr = descaddr;
3034 *page_size_ptr = page_size;
3035 return 0;
3036
3037do_fault:
3038 /* Long-descriptor format IFSR/DFSR value */
3039 return (1 << 9) | (fault_type << 2) | level;
3040}
3041
77a71dd1
PM
3042static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
3043 int access_type, int is_user,
a8170e5e 3044 hwaddr *phys_ptr, int *prot)
9ee6e8bb
PB
3045{
3046 int n;
3047 uint32_t mask;
3048 uint32_t base;
3049
3050 *phys_ptr = address;
3051 for (n = 7; n >= 0; n--) {
3052 base = env->cp15.c6_region[n];
3053 if ((base & 1) == 0)
3054 continue;
3055 mask = 1 << ((base >> 1) & 0x1f);
3056 /* Keep this shift separate from the above to avoid an
3057 (undefined) << 32. */
3058 mask = (mask << 1) - 1;
3059 if (((base ^ address) & ~mask) == 0)
3060 break;
3061 }
3062 if (n < 0)
3063 return 2;
3064
3065 if (access_type == 2) {
3066 mask = env->cp15.c5_insn;
3067 } else {
3068 mask = env->cp15.c5_data;
3069 }
3070 mask = (mask >> (n * 4)) & 0xf;
3071 switch (mask) {
3072 case 0:
3073 return 1;
3074 case 1:
3075 if (is_user)
3076 return 1;
3077 *prot = PAGE_READ | PAGE_WRITE;
3078 break;
3079 case 2:
3080 *prot = PAGE_READ;
3081 if (!is_user)
3082 *prot |= PAGE_WRITE;
3083 break;
3084 case 3:
3085 *prot = PAGE_READ | PAGE_WRITE;
3086 break;
3087 case 5:
3088 if (is_user)
3089 return 1;
3090 *prot = PAGE_READ;
3091 break;
3092 case 6:
3093 *prot = PAGE_READ;
3094 break;
3095 default:
3096 /* Bad permission. */
3097 return 1;
3098 }
3ad493fc 3099 *prot |= PAGE_EXEC;
9ee6e8bb
PB
3100 return 0;
3101}
3102
702a9357
PM
3103/* get_phys_addr - get the physical address for this virtual address
3104 *
3105 * Find the physical address corresponding to the given virtual address,
3106 * by doing a translation table walk on MMU based systems or using the
3107 * MPU state on MPU based systems.
3108 *
3109 * Returns 0 if the translation was successful. Otherwise, phys_ptr,
3110 * prot and page_size are not filled in, and the return value provides
3111 * information on why the translation aborted, in the format of a
3112 * DFSR/IFSR fault register, with the following caveats:
3113 * * we honour the short vs long DFSR format differences.
3114 * * the WnR bit is never set (the caller must do this).
3115 * * for MPU based systems we don't bother to return a full FSR format
3116 * value.
3117 *
3118 * @env: CPUARMState
3119 * @address: virtual address to get physical address for
3120 * @access_type: 0 for read, 1 for write, 2 for execute
3121 * @is_user: 0 for privileged access, 1 for user
3122 * @phys_ptr: set to the physical address corresponding to the virtual address
3123 * @prot: set to the permissions for the page containing phys_ptr
3124 * @page_size: set to the size of the page containing phys_ptr
3125 */
0ecb72a5 3126static inline int get_phys_addr(CPUARMState *env, uint32_t address,
9ee6e8bb 3127 int access_type, int is_user,
a8170e5e 3128 hwaddr *phys_ptr, int *prot,
d4c430a8 3129 target_ulong *page_size)
9ee6e8bb
PB
3130{
3131 /* Fast Context Switch Extension. */
3132 if (address < 0x02000000)
3133 address += env->cp15.c13_fcse;
3134
3135 if ((env->cp15.c1_sys & 1) == 0) {
3136 /* MMU/MPU disabled. */
3137 *phys_ptr = address;
3ad493fc 3138 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
d4c430a8 3139 *page_size = TARGET_PAGE_SIZE;
9ee6e8bb
PB
3140 return 0;
3141 } else if (arm_feature(env, ARM_FEATURE_MPU)) {
d4c430a8 3142 *page_size = TARGET_PAGE_SIZE;
9ee6e8bb
PB
3143 return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
3144 prot);
3dde962f
PM
3145 } else if (extended_addresses_enabled(env)) {
3146 return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr,
3147 prot, page_size);
9ee6e8bb
PB
3148 } else if (env->cp15.c1_sys & (1 << 23)) {
3149 return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
d4c430a8 3150 prot, page_size);
9ee6e8bb
PB
3151 } else {
3152 return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
d4c430a8 3153 prot, page_size);
9ee6e8bb
PB
3154 }
3155}
3156
0ecb72a5 3157int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
97b348e7 3158 int access_type, int mmu_idx)
b5ff1b31 3159{
a8170e5e 3160 hwaddr phys_addr;
d4c430a8 3161 target_ulong page_size;
b5ff1b31 3162 int prot;
6ebbf390 3163 int ret, is_user;
b5ff1b31 3164
6ebbf390 3165 is_user = mmu_idx == MMU_USER_IDX;
d4c430a8
PB
3166 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
3167 &page_size);
b5ff1b31
FB
3168 if (ret == 0) {
3169 /* Map a single [sub]page. */
a8170e5e 3170 phys_addr &= ~(hwaddr)0x3ff;
b5ff1b31 3171 address &= ~(uint32_t)0x3ff;
3ad493fc 3172 tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
d4c430a8 3173 return 0;
b5ff1b31
FB
3174 }
3175
3176 if (access_type == 2) {
3177 env->cp15.c5_insn = ret;
3178 env->cp15.c6_insn = address;
3179 env->exception_index = EXCP_PREFETCH_ABORT;
3180 } else {
3181 env->cp15.c5_data = ret;
9ee6e8bb
PB
3182 if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
3183 env->cp15.c5_data |= (1 << 11);
b5ff1b31
FB
3184 env->cp15.c6_data = address;
3185 env->exception_index = EXCP_DATA_ABORT;
3186 }
3187 return 1;
3188}
3189
00b941e5 3190hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
b5ff1b31 3191{
00b941e5 3192 ARMCPU *cpu = ARM_CPU(cs);
a8170e5e 3193 hwaddr phys_addr;
d4c430a8 3194 target_ulong page_size;
b5ff1b31
FB
3195 int prot;
3196 int ret;
3197
00b941e5 3198 ret = get_phys_addr(&cpu->env, addr, 0, 0, &phys_addr, &prot, &page_size);
b5ff1b31 3199
00b941e5 3200 if (ret != 0) {
b5ff1b31 3201 return -1;
00b941e5 3202 }
b5ff1b31
FB
3203
3204 return phys_addr;
3205}
3206
0ecb72a5 3207void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
9ee6e8bb 3208{
39ea3d4e
PM
3209 if ((env->uncached_cpsr & CPSR_M) == mode) {
3210 env->regs[13] = val;
3211 } else {
f5206413 3212 env->banked_r13[bank_number(mode)] = val;
39ea3d4e 3213 }
9ee6e8bb
PB
3214}
3215
0ecb72a5 3216uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
9ee6e8bb 3217{
39ea3d4e
PM
3218 if ((env->uncached_cpsr & CPSR_M) == mode) {
3219 return env->regs[13];
3220 } else {
f5206413 3221 return env->banked_r13[bank_number(mode)];
39ea3d4e 3222 }
9ee6e8bb
PB
3223}
3224
0ecb72a5 3225uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
9ee6e8bb
PB
3226{
3227 switch (reg) {
3228 case 0: /* APSR */
3229 return xpsr_read(env) & 0xf8000000;
3230 case 1: /* IAPSR */
3231 return xpsr_read(env) & 0xf80001ff;
3232 case 2: /* EAPSR */
3233 return xpsr_read(env) & 0xff00fc00;
3234 case 3: /* xPSR */
3235 return xpsr_read(env) & 0xff00fdff;
3236 case 5: /* IPSR */
3237 return xpsr_read(env) & 0x000001ff;
3238 case 6: /* EPSR */
3239 return xpsr_read(env) & 0x0700fc00;
3240 case 7: /* IEPSR */
3241 return xpsr_read(env) & 0x0700edff;
3242 case 8: /* MSP */
3243 return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
3244 case 9: /* PSP */
3245 return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
3246 case 16: /* PRIMASK */
3247 return (env->uncached_cpsr & CPSR_I) != 0;
82845826
SH
3248 case 17: /* BASEPRI */
3249 case 18: /* BASEPRI_MAX */
9ee6e8bb 3250 return env->v7m.basepri;
82845826
SH
3251 case 19: /* FAULTMASK */
3252 return (env->uncached_cpsr & CPSR_F) != 0;
9ee6e8bb
PB
3253 case 20: /* CONTROL */
3254 return env->v7m.control;
3255 default:
3256 /* ??? For debugging only. */
3257 cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
3258 return 0;
3259 }
3260}
3261
0ecb72a5 3262void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
9ee6e8bb
PB
3263{
3264 switch (reg) {
3265 case 0: /* APSR */
3266 xpsr_write(env, val, 0xf8000000);
3267 break;
3268 case 1: /* IAPSR */
3269 xpsr_write(env, val, 0xf8000000);
3270 break;
3271 case 2: /* EAPSR */
3272 xpsr_write(env, val, 0xfe00fc00);
3273 break;
3274 case 3: /* xPSR */
3275 xpsr_write(env, val, 0xfe00fc00);
3276 break;
3277 case 5: /* IPSR */
3278 /* IPSR bits are readonly. */
3279 break;
3280 case 6: /* EPSR */
3281 xpsr_write(env, val, 0x0600fc00);
3282 break;
3283 case 7: /* IEPSR */
3284 xpsr_write(env, val, 0x0600fc00);
3285 break;
3286 case 8: /* MSP */
3287 if (env->v7m.current_sp)
3288 env->v7m.other_sp = val;
3289 else
3290 env->regs[13] = val;
3291 break;
3292 case 9: /* PSP */
3293 if (env->v7m.current_sp)
3294 env->regs[13] = val;
3295 else
3296 env->v7m.other_sp = val;
3297 break;
3298 case 16: /* PRIMASK */
3299 if (val & 1)
3300 env->uncached_cpsr |= CPSR_I;
3301 else
3302 env->uncached_cpsr &= ~CPSR_I;
3303 break;
82845826 3304 case 17: /* BASEPRI */
9ee6e8bb
PB
3305 env->v7m.basepri = val & 0xff;
3306 break;
82845826 3307 case 18: /* BASEPRI_MAX */
9ee6e8bb
PB
3308 val &= 0xff;
3309 if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
3310 env->v7m.basepri = val;
3311 break;
82845826
SH
3312 case 19: /* FAULTMASK */
3313 if (val & 1)
3314 env->uncached_cpsr |= CPSR_F;
3315 else
3316 env->uncached_cpsr &= ~CPSR_F;
3317 break;
9ee6e8bb
PB
3318 case 20: /* CONTROL */
3319 env->v7m.control = val & 3;
3320 switch_v7m_sp(env, (val & 2) != 0);
3321 break;
3322 default:
3323 /* ??? For debugging only. */
3324 cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
3325 return;
3326 }
3327}
3328
b5ff1b31 3329#endif
6ddbc6e4
PB
3330
3331/* Note that signed overflow is undefined in C. The following routines are
3332 careful to use unsigned types where modulo arithmetic is required.
3333 Failure to do so _will_ break on newer gcc. */
3334
3335/* Signed saturating arithmetic. */
3336
1654b2d6 3337/* Perform 16-bit signed saturating addition. */
6ddbc6e4
PB
3338static inline uint16_t add16_sat(uint16_t a, uint16_t b)
3339{
3340 uint16_t res;
3341
3342 res = a + b;
3343 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
3344 if (a & 0x8000)
3345 res = 0x8000;
3346 else
3347 res = 0x7fff;
3348 }
3349 return res;
3350}
3351
1654b2d6 3352/* Perform 8-bit signed saturating addition. */
6ddbc6e4
PB
3353static inline uint8_t add8_sat(uint8_t a, uint8_t b)
3354{
3355 uint8_t res;
3356
3357 res = a + b;
3358 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
3359 if (a & 0x80)
3360 res = 0x80;
3361 else
3362 res = 0x7f;
3363 }
3364 return res;
3365}
3366
1654b2d6 3367/* Perform 16-bit signed saturating subtraction. */
6ddbc6e4
PB
3368static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
3369{
3370 uint16_t res;
3371
3372 res = a - b;
3373 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
3374 if (a & 0x8000)
3375 res = 0x8000;
3376 else
3377 res = 0x7fff;
3378 }
3379 return res;
3380}
3381
1654b2d6 3382/* Perform 8-bit signed saturating subtraction. */
6ddbc6e4
PB
3383static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
3384{
3385 uint8_t res;
3386
3387 res = a - b;
3388 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
3389 if (a & 0x80)
3390 res = 0x80;
3391 else
3392 res = 0x7f;
3393 }
3394 return res;
3395}
3396
3397#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
3398#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
3399#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
3400#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
3401#define PFX q
3402
3403#include "op_addsub.h"
3404
3405/* Unsigned saturating arithmetic. */
460a09c1 3406static inline uint16_t add16_usat(uint16_t a, uint16_t b)
6ddbc6e4
PB
3407{
3408 uint16_t res;
3409 res = a + b;
3410 if (res < a)
3411 res = 0xffff;
3412 return res;
3413}
3414
460a09c1 3415static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
6ddbc6e4 3416{
4c4fd3f8 3417 if (a > b)
6ddbc6e4
PB
3418 return a - b;
3419 else
3420 return 0;
3421}
3422
3423static inline uint8_t add8_usat(uint8_t a, uint8_t b)
3424{
3425 uint8_t res;
3426 res = a + b;
3427 if (res < a)
3428 res = 0xff;
3429 return res;
3430}
3431
3432static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
3433{
4c4fd3f8 3434 if (a > b)
6ddbc6e4
PB
3435 return a - b;
3436 else
3437 return 0;
3438}
3439
3440#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
3441#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
3442#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
3443#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
3444#define PFX uq
3445
3446#include "op_addsub.h"
3447
3448/* Signed modulo arithmetic. */
3449#define SARITH16(a, b, n, op) do { \
3450 int32_t sum; \
db6e2e65 3451 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
6ddbc6e4
PB
3452 RESULT(sum, n, 16); \
3453 if (sum >= 0) \
3454 ge |= 3 << (n * 2); \
3455 } while(0)
3456
3457#define SARITH8(a, b, n, op) do { \
3458 int32_t sum; \
db6e2e65 3459 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
6ddbc6e4
PB
3460 RESULT(sum, n, 8); \
3461 if (sum >= 0) \
3462 ge |= 1 << n; \
3463 } while(0)
3464
3465
3466#define ADD16(a, b, n) SARITH16(a, b, n, +)
3467#define SUB16(a, b, n) SARITH16(a, b, n, -)
3468#define ADD8(a, b, n) SARITH8(a, b, n, +)
3469#define SUB8(a, b, n) SARITH8(a, b, n, -)
3470#define PFX s
3471#define ARITH_GE
3472
3473#include "op_addsub.h"
3474
3475/* Unsigned modulo arithmetic. */
3476#define ADD16(a, b, n) do { \
3477 uint32_t sum; \
3478 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
3479 RESULT(sum, n, 16); \
a87aa10b 3480 if ((sum >> 16) == 1) \
6ddbc6e4
PB
3481 ge |= 3 << (n * 2); \
3482 } while(0)
3483
3484#define ADD8(a, b, n) do { \
3485 uint32_t sum; \
3486 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
3487 RESULT(sum, n, 8); \
a87aa10b
AZ
3488 if ((sum >> 8) == 1) \
3489 ge |= 1 << n; \
6ddbc6e4
PB
3490 } while(0)
3491
3492#define SUB16(a, b, n) do { \
3493 uint32_t sum; \
3494 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
3495 RESULT(sum, n, 16); \
3496 if ((sum >> 16) == 0) \
3497 ge |= 3 << (n * 2); \
3498 } while(0)
3499
3500#define SUB8(a, b, n) do { \
3501 uint32_t sum; \
3502 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
3503 RESULT(sum, n, 8); \
3504 if ((sum >> 8) == 0) \
a87aa10b 3505 ge |= 1 << n; \
6ddbc6e4
PB
3506 } while(0)
3507
3508#define PFX u
3509#define ARITH_GE
3510
3511#include "op_addsub.h"
3512
3513/* Halved signed arithmetic. */
3514#define ADD16(a, b, n) \
3515 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
3516#define SUB16(a, b, n) \
3517 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
3518#define ADD8(a, b, n) \
3519 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
3520#define SUB8(a, b, n) \
3521 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
3522#define PFX sh
3523
3524#include "op_addsub.h"
3525
3526/* Halved unsigned arithmetic. */
3527#define ADD16(a, b, n) \
3528 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3529#define SUB16(a, b, n) \
3530 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3531#define ADD8(a, b, n) \
3532 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3533#define SUB8(a, b, n) \
3534 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3535#define PFX uh
3536
3537#include "op_addsub.h"
3538
3539static inline uint8_t do_usad(uint8_t a, uint8_t b)
3540{
3541 if (a > b)
3542 return a - b;
3543 else
3544 return b - a;
3545}
3546
3547/* Unsigned sum of absolute byte differences. */
3548uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
3549{
3550 uint32_t sum;
3551 sum = do_usad(a, b);
3552 sum += do_usad(a >> 8, b >> 8);
3553 sum += do_usad(a >> 16, b >>16);
3554 sum += do_usad(a >> 24, b >> 24);
3555 return sum;
3556}
3557
3558/* For ARMv6 SEL instruction. */
3559uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
3560{
3561 uint32_t mask;
3562
3563 mask = 0;
3564 if (flags & 1)
3565 mask |= 0xff;
3566 if (flags & 2)
3567 mask |= 0xff00;
3568 if (flags & 4)
3569 mask |= 0xff0000;
3570 if (flags & 8)
3571 mask |= 0xff000000;
3572 return (a & mask) | (b & ~mask);
3573}
3574
b90372ad
PM
3575/* VFP support. We follow the convention used for VFP instructions:
3576 Single precision routines have a "s" suffix, double precision a
4373f3ce
PB
3577 "d" suffix. */
3578
3579/* Convert host exception flags to vfp form. */
3580static inline int vfp_exceptbits_from_host(int host_bits)
3581{
3582 int target_bits = 0;
3583
3584 if (host_bits & float_flag_invalid)
3585 target_bits |= 1;
3586 if (host_bits & float_flag_divbyzero)
3587 target_bits |= 2;
3588 if (host_bits & float_flag_overflow)
3589 target_bits |= 4;
36802b6b 3590 if (host_bits & (float_flag_underflow | float_flag_output_denormal))
4373f3ce
PB
3591 target_bits |= 8;
3592 if (host_bits & float_flag_inexact)
3593 target_bits |= 0x10;
cecd8504
PM
3594 if (host_bits & float_flag_input_denormal)
3595 target_bits |= 0x80;
4373f3ce
PB
3596 return target_bits;
3597}
3598
0ecb72a5 3599uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
4373f3ce
PB
3600{
3601 int i;
3602 uint32_t fpscr;
3603
3604 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
3605 | (env->vfp.vec_len << 16)
3606 | (env->vfp.vec_stride << 20);
3607 i = get_float_exception_flags(&env->vfp.fp_status);
3a492f3a 3608 i |= get_float_exception_flags(&env->vfp.standard_fp_status);
4373f3ce
PB
3609 fpscr |= vfp_exceptbits_from_host(i);
3610 return fpscr;
3611}
3612
0ecb72a5 3613uint32_t vfp_get_fpscr(CPUARMState *env)
01653295
PM
3614{
3615 return HELPER(vfp_get_fpscr)(env);
3616}
3617
4373f3ce
PB
3618/* Convert vfp exception flags to target form. */
3619static inline int vfp_exceptbits_to_host(int target_bits)
3620{
3621 int host_bits = 0;
3622
3623 if (target_bits & 1)
3624 host_bits |= float_flag_invalid;
3625 if (target_bits & 2)
3626 host_bits |= float_flag_divbyzero;
3627 if (target_bits & 4)
3628 host_bits |= float_flag_overflow;
3629 if (target_bits & 8)
3630 host_bits |= float_flag_underflow;
3631 if (target_bits & 0x10)
3632 host_bits |= float_flag_inexact;
cecd8504
PM
3633 if (target_bits & 0x80)
3634 host_bits |= float_flag_input_denormal;
4373f3ce
PB
3635 return host_bits;
3636}
3637
0ecb72a5 3638void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
4373f3ce
PB
3639{
3640 int i;
3641 uint32_t changed;
3642
3643 changed = env->vfp.xregs[ARM_VFP_FPSCR];
3644 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
3645 env->vfp.vec_len = (val >> 16) & 7;
3646 env->vfp.vec_stride = (val >> 20) & 3;
3647
3648 changed ^= val;
3649 if (changed & (3 << 22)) {
3650 i = (val >> 22) & 3;
3651 switch (i) {
3652 case 0:
3653 i = float_round_nearest_even;
3654 break;
3655 case 1:
3656 i = float_round_up;
3657 break;
3658 case 2:
3659 i = float_round_down;
3660 break;
3661 case 3:
3662 i = float_round_to_zero;
3663 break;
3664 }
3665 set_float_rounding_mode(i, &env->vfp.fp_status);
3666 }
cecd8504 3667 if (changed & (1 << 24)) {
fe76d976 3668 set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
cecd8504
PM
3669 set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
3670 }
5c7908ed
PB
3671 if (changed & (1 << 25))
3672 set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
4373f3ce 3673
b12c390b 3674 i = vfp_exceptbits_to_host(val);
4373f3ce 3675 set_float_exception_flags(i, &env->vfp.fp_status);
3a492f3a 3676 set_float_exception_flags(0, &env->vfp.standard_fp_status);
4373f3ce
PB
3677}
3678
0ecb72a5 3679void vfp_set_fpscr(CPUARMState *env, uint32_t val)
01653295
PM
3680{
3681 HELPER(vfp_set_fpscr)(env, val);
3682}
3683
4373f3ce
PB
3684#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
3685
3686#define VFP_BINOP(name) \
ae1857ec 3687float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
4373f3ce 3688{ \
ae1857ec
PM
3689 float_status *fpst = fpstp; \
3690 return float32_ ## name(a, b, fpst); \
4373f3ce 3691} \
ae1857ec 3692float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
4373f3ce 3693{ \
ae1857ec
PM
3694 float_status *fpst = fpstp; \
3695 return float64_ ## name(a, b, fpst); \
4373f3ce
PB
3696}
3697VFP_BINOP(add)
3698VFP_BINOP(sub)
3699VFP_BINOP(mul)
3700VFP_BINOP(div)
3701#undef VFP_BINOP
3702
3703float32 VFP_HELPER(neg, s)(float32 a)
3704{
3705 return float32_chs(a);
3706}
3707
3708float64 VFP_HELPER(neg, d)(float64 a)
3709{
66230e0d 3710 return float64_chs(a);
4373f3ce
PB
3711}
3712
3713float32 VFP_HELPER(abs, s)(float32 a)
3714{
3715 return float32_abs(a);
3716}
3717
3718float64 VFP_HELPER(abs, d)(float64 a)
3719{
66230e0d 3720 return float64_abs(a);
4373f3ce
PB
3721}
3722
0ecb72a5 3723float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
4373f3ce
PB
3724{
3725 return float32_sqrt(a, &env->vfp.fp_status);
3726}
3727
0ecb72a5 3728float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
4373f3ce
PB
3729{
3730 return float64_sqrt(a, &env->vfp.fp_status);
3731}
3732
3733/* XXX: check quiet/signaling case */
3734#define DO_VFP_cmp(p, type) \
0ecb72a5 3735void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
4373f3ce
PB
3736{ \
3737 uint32_t flags; \
3738 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
3739 case 0: flags = 0x6; break; \
3740 case -1: flags = 0x8; break; \
3741 case 1: flags = 0x2; break; \
3742 default: case 2: flags = 0x3; break; \
3743 } \
3744 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
3745 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
3746} \
0ecb72a5 3747void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
4373f3ce
PB
3748{ \
3749 uint32_t flags; \
3750 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
3751 case 0: flags = 0x6; break; \
3752 case -1: flags = 0x8; break; \
3753 case 1: flags = 0x2; break; \
3754 default: case 2: flags = 0x3; break; \
3755 } \
3756 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
3757 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
3758}
3759DO_VFP_cmp(s, float32)
3760DO_VFP_cmp(d, float64)
3761#undef DO_VFP_cmp
3762
5500b06c 3763/* Integer to float and float to integer conversions */
4373f3ce 3764
5500b06c
PM
3765#define CONV_ITOF(name, fsz, sign) \
3766 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
3767{ \
3768 float_status *fpst = fpstp; \
85836979 3769 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
4373f3ce
PB
3770}
3771
5500b06c
PM
3772#define CONV_FTOI(name, fsz, sign, round) \
3773uint32_t HELPER(name)(float##fsz x, void *fpstp) \
3774{ \
3775 float_status *fpst = fpstp; \
3776 if (float##fsz##_is_any_nan(x)) { \
3777 float_raise(float_flag_invalid, fpst); \
3778 return 0; \
3779 } \
3780 return float##fsz##_to_##sign##int32##round(x, fpst); \
4373f3ce
PB
3781}
3782
5500b06c
PM
3783#define FLOAT_CONVS(name, p, fsz, sign) \
3784CONV_ITOF(vfp_##name##to##p, fsz, sign) \
3785CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
3786CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
4373f3ce 3787
5500b06c
PM
3788FLOAT_CONVS(si, s, 32, )
3789FLOAT_CONVS(si, d, 64, )
3790FLOAT_CONVS(ui, s, 32, u)
3791FLOAT_CONVS(ui, d, 64, u)
4373f3ce 3792
5500b06c
PM
3793#undef CONV_ITOF
3794#undef CONV_FTOI
3795#undef FLOAT_CONVS
4373f3ce
PB
3796
3797/* floating point conversion */
0ecb72a5 3798float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
4373f3ce 3799{
2d627737
PM
3800 float64 r = float32_to_float64(x, &env->vfp.fp_status);
3801 /* ARM requires that S<->D conversion of any kind of NaN generates
3802 * a quiet NaN by forcing the most significant frac bit to 1.
3803 */
3804 return float64_maybe_silence_nan(r);
4373f3ce
PB
3805}
3806
0ecb72a5 3807float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
4373f3ce 3808{
2d627737
PM
3809 float32 r = float64_to_float32(x, &env->vfp.fp_status);
3810 /* ARM requires that S<->D conversion of any kind of NaN generates
3811 * a quiet NaN by forcing the most significant frac bit to 1.
3812 */
3813 return float32_maybe_silence_nan(r);
4373f3ce
PB
3814}
3815
3816/* VFP3 fixed point conversion. */
622465e1 3817#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
5500b06c
PM
3818float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, \
3819 void *fpstp) \
4373f3ce 3820{ \
5500b06c 3821 float_status *fpst = fpstp; \
622465e1 3822 float##fsz tmp; \
5500b06c
PM
3823 tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
3824 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
4373f3ce 3825} \
5500b06c
PM
3826uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
3827 void *fpstp) \
4373f3ce 3828{ \
5500b06c 3829 float_status *fpst = fpstp; \
622465e1
PM
3830 float##fsz tmp; \
3831 if (float##fsz##_is_any_nan(x)) { \
5500b06c 3832 float_raise(float_flag_invalid, fpst); \
622465e1 3833 return 0; \
09d9487f 3834 } \
5500b06c
PM
3835 tmp = float##fsz##_scalbn(x, shift, fpst); \
3836 return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
622465e1
PM
3837}
3838
3839VFP_CONV_FIX(sh, d, 64, int16, )
3840VFP_CONV_FIX(sl, d, 64, int32, )
3841VFP_CONV_FIX(uh, d, 64, uint16, u)
3842VFP_CONV_FIX(ul, d, 64, uint32, u)
3843VFP_CONV_FIX(sh, s, 32, int16, )
3844VFP_CONV_FIX(sl, s, 32, int32, )
3845VFP_CONV_FIX(uh, s, 32, uint16, u)
3846VFP_CONV_FIX(ul, s, 32, uint32, u)
4373f3ce
PB
3847#undef VFP_CONV_FIX
3848
60011498 3849/* Half precision conversions. */
0ecb72a5 3850static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
60011498 3851{
60011498 3852 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
fb91678d
PM
3853 float32 r = float16_to_float32(make_float16(a), ieee, s);
3854 if (ieee) {
3855 return float32_maybe_silence_nan(r);
3856 }
3857 return r;
60011498
PB
3858}
3859
0ecb72a5 3860static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
60011498 3861{
60011498 3862 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
fb91678d
PM
3863 float16 r = float32_to_float16(a, ieee, s);
3864 if (ieee) {
3865 r = float16_maybe_silence_nan(r);
3866 }
3867 return float16_val(r);
60011498
PB
3868}
3869
0ecb72a5 3870float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2d981da7
PM
3871{
3872 return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
3873}
3874
0ecb72a5 3875uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2d981da7
PM
3876{
3877 return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
3878}
3879
0ecb72a5 3880float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2d981da7
PM
3881{
3882 return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
3883}
3884
0ecb72a5 3885uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2d981da7
PM
3886{
3887 return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
3888}
3889
dda3ec49 3890#define float32_two make_float32(0x40000000)
6aae3df1
PM
3891#define float32_three make_float32(0x40400000)
3892#define float32_one_point_five make_float32(0x3fc00000)
dda3ec49 3893
0ecb72a5 3894float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
4373f3ce 3895{
dda3ec49
PM
3896 float_status *s = &env->vfp.standard_fp_status;
3897 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
3898 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
43fe9bdb
PM
3899 if (!(float32_is_zero(a) || float32_is_zero(b))) {
3900 float_raise(float_flag_input_denormal, s);
3901 }
dda3ec49
PM
3902 return float32_two;
3903 }
3904 return float32_sub(float32_two, float32_mul(a, b, s), s);
4373f3ce
PB
3905}
3906
0ecb72a5 3907float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
4373f3ce 3908{
71826966 3909 float_status *s = &env->vfp.standard_fp_status;
9ea62f57
PM
3910 float32 product;
3911 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
3912 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
43fe9bdb
PM
3913 if (!(float32_is_zero(a) || float32_is_zero(b))) {
3914 float_raise(float_flag_input_denormal, s);
3915 }
6aae3df1 3916 return float32_one_point_five;
9ea62f57 3917 }
6aae3df1
PM
3918 product = float32_mul(a, b, s);
3919 return float32_div(float32_sub(float32_three, product, s), float32_two, s);
4373f3ce
PB
3920}
3921
8f8e3aa4
PB
3922/* NEON helpers. */
3923
56bf4fe2
CL
3924/* Constants 256 and 512 are used in some helpers; we avoid relying on
3925 * int->float conversions at run-time. */
3926#define float64_256 make_float64(0x4070000000000000LL)
3927#define float64_512 make_float64(0x4080000000000000LL)
3928
fe0e4872
CL
3929/* The algorithm that must be used to calculate the estimate
3930 * is specified by the ARM ARM.
3931 */
0ecb72a5 3932static float64 recip_estimate(float64 a, CPUARMState *env)
fe0e4872 3933{
1146a817
PM
3934 /* These calculations mustn't set any fp exception flags,
3935 * so we use a local copy of the fp_status.
3936 */
3937 float_status dummy_status = env->vfp.standard_fp_status;
3938 float_status *s = &dummy_status;
fe0e4872
CL
3939 /* q = (int)(a * 512.0) */
3940 float64 q = float64_mul(float64_512, a, s);
3941 int64_t q_int = float64_to_int64_round_to_zero(q, s);
3942
3943 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
3944 q = int64_to_float64(q_int, s);
3945 q = float64_add(q, float64_half, s);
3946 q = float64_div(q, float64_512, s);
3947 q = float64_div(float64_one, q, s);
3948
3949 /* s = (int)(256.0 * r + 0.5) */
3950 q = float64_mul(q, float64_256, s);
3951 q = float64_add(q, float64_half, s);
3952 q_int = float64_to_int64_round_to_zero(q, s);
3953
3954 /* return (double)s / 256.0 */
3955 return float64_div(int64_to_float64(q_int, s), float64_256, s);
3956}
3957
0ecb72a5 3958float32 HELPER(recpe_f32)(float32 a, CPUARMState *env)
4373f3ce 3959{
fe0e4872
CL
3960 float_status *s = &env->vfp.standard_fp_status;
3961 float64 f64;
3962 uint32_t val32 = float32_val(a);
3963
3964 int result_exp;
3965 int a_exp = (val32 & 0x7f800000) >> 23;
3966 int sign = val32 & 0x80000000;
3967
3968 if (float32_is_any_nan(a)) {
3969 if (float32_is_signaling_nan(a)) {
3970 float_raise(float_flag_invalid, s);
3971 }
3972 return float32_default_nan;
3973 } else if (float32_is_infinity(a)) {
3974 return float32_set_sign(float32_zero, float32_is_neg(a));
3975 } else if (float32_is_zero_or_denormal(a)) {
43fe9bdb
PM
3976 if (!float32_is_zero(a)) {
3977 float_raise(float_flag_input_denormal, s);
3978 }
fe0e4872
CL
3979 float_raise(float_flag_divbyzero, s);
3980 return float32_set_sign(float32_infinity, float32_is_neg(a));
3981 } else if (a_exp >= 253) {
3982 float_raise(float_flag_underflow, s);
3983 return float32_set_sign(float32_zero, float32_is_neg(a));
3984 }
3985
3986 f64 = make_float64((0x3feULL << 52)
3987 | ((int64_t)(val32 & 0x7fffff) << 29));
3988
3989 result_exp = 253 - a_exp;
3990
3991 f64 = recip_estimate(f64, env);
3992
3993 val32 = sign
3994 | ((result_exp & 0xff) << 23)
3995 | ((float64_val(f64) >> 29) & 0x7fffff);
3996 return make_float32(val32);
4373f3ce
PB
3997}
3998
e07be5d2
CL
3999/* The algorithm that must be used to calculate the estimate
4000 * is specified by the ARM ARM.
4001 */
0ecb72a5 4002static float64 recip_sqrt_estimate(float64 a, CPUARMState *env)
e07be5d2 4003{
1146a817
PM
4004 /* These calculations mustn't set any fp exception flags,
4005 * so we use a local copy of the fp_status.
4006 */
4007 float_status dummy_status = env->vfp.standard_fp_status;
4008 float_status *s = &dummy_status;
e07be5d2
CL
4009 float64 q;
4010 int64_t q_int;
4011
4012 if (float64_lt(a, float64_half, s)) {
4013 /* range 0.25 <= a < 0.5 */
4014
4015 /* a in units of 1/512 rounded down */
4016 /* q0 = (int)(a * 512.0); */
4017 q = float64_mul(float64_512, a, s);
4018 q_int = float64_to_int64_round_to_zero(q, s);
4019
4020 /* reciprocal root r */
4021 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
4022 q = int64_to_float64(q_int, s);
4023 q = float64_add(q, float64_half, s);
4024 q = float64_div(q, float64_512, s);
4025 q = float64_sqrt(q, s);
4026 q = float64_div(float64_one, q, s);
4027 } else {
4028 /* range 0.5 <= a < 1.0 */
4029
4030 /* a in units of 1/256 rounded down */
4031 /* q1 = (int)(a * 256.0); */
4032 q = float64_mul(float64_256, a, s);
4033 int64_t q_int = float64_to_int64_round_to_zero(q, s);
4034
4035 /* reciprocal root r */
4036 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
4037 q = int64_to_float64(q_int, s);
4038 q = float64_add(q, float64_half, s);
4039 q = float64_div(q, float64_256, s);
4040 q = float64_sqrt(q, s);
4041 q = float64_div(float64_one, q, s);
4042 }
4043 /* r in units of 1/256 rounded to nearest */
4044 /* s = (int)(256.0 * r + 0.5); */
4045
4046 q = float64_mul(q, float64_256,s );
4047 q = float64_add(q, float64_half, s);
4048 q_int = float64_to_int64_round_to_zero(q, s);
4049
4050 /* return (double)s / 256.0;*/
4051 return float64_div(int64_to_float64(q_int, s), float64_256, s);
4052}
4053
0ecb72a5 4054float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env)
4373f3ce 4055{
e07be5d2
CL
4056 float_status *s = &env->vfp.standard_fp_status;
4057 int result_exp;
4058 float64 f64;
4059 uint32_t val;
4060 uint64_t val64;
4061
4062 val = float32_val(a);
4063
4064 if (float32_is_any_nan(a)) {
4065 if (float32_is_signaling_nan(a)) {
4066 float_raise(float_flag_invalid, s);
4067 }
4068 return float32_default_nan;
4069 } else if (float32_is_zero_or_denormal(a)) {
43fe9bdb
PM
4070 if (!float32_is_zero(a)) {
4071 float_raise(float_flag_input_denormal, s);
4072 }
e07be5d2
CL
4073 float_raise(float_flag_divbyzero, s);
4074 return float32_set_sign(float32_infinity, float32_is_neg(a));
4075 } else if (float32_is_neg(a)) {
4076 float_raise(float_flag_invalid, s);
4077 return float32_default_nan;
4078 } else if (float32_is_infinity(a)) {
4079 return float32_zero;
4080 }
4081
4082 /* Normalize to a double-precision value between 0.25 and 1.0,
4083 * preserving the parity of the exponent. */
4084 if ((val & 0x800000) == 0) {
4085 f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
4086 | (0x3feULL << 52)
4087 | ((uint64_t)(val & 0x7fffff) << 29));
4088 } else {
4089 f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
4090 | (0x3fdULL << 52)
4091 | ((uint64_t)(val & 0x7fffff) << 29));
4092 }
4093
4094 result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
4095
4096 f64 = recip_sqrt_estimate(f64, env);
4097
4098 val64 = float64_val(f64);
4099
26cc6abf 4100 val = ((result_exp & 0xff) << 23)
e07be5d2
CL
4101 | ((val64 >> 29) & 0x7fffff);
4102 return make_float32(val);
4373f3ce
PB
4103}
4104
0ecb72a5 4105uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env)
4373f3ce 4106{
fe0e4872
CL
4107 float64 f64;
4108
4109 if ((a & 0x80000000) == 0) {
4110 return 0xffffffff;
4111 }
4112
4113 f64 = make_float64((0x3feULL << 52)
4114 | ((int64_t)(a & 0x7fffffff) << 21));
4115
4116 f64 = recip_estimate (f64, env);
4117
4118 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
4373f3ce
PB
4119}
4120
0ecb72a5 4121uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env)
4373f3ce 4122{
e07be5d2
CL
4123 float64 f64;
4124
4125 if ((a & 0xc0000000) == 0) {
4126 return 0xffffffff;
4127 }
4128
4129 if (a & 0x80000000) {
4130 f64 = make_float64((0x3feULL << 52)
4131 | ((uint64_t)(a & 0x7fffffff) << 21));
4132 } else { /* bits 31-30 == '01' */
4133 f64 = make_float64((0x3fdULL << 52)
4134 | ((uint64_t)(a & 0x3fffffff) << 22));
4135 }
4136
4137 f64 = recip_sqrt_estimate(f64, env);
4138
4139 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
4373f3ce 4140}
fe1479c3 4141
da97f52c
PM
4142/* VFPv4 fused multiply-accumulate */
4143float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
4144{
4145 float_status *fpst = fpstp;
4146 return float32_muladd(a, b, c, 0, fpst);
4147}
4148
4149float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
4150{
4151 float_status *fpst = fpstp;
4152 return float64_muladd(a, b, c, 0, fpst);
4153}
40cfacdd
WN
4154
4155/* ARMv8 VMAXNM/VMINNM */
4156float32 VFP_HELPER(maxnm, s)(float32 a, float32 b, void *fpstp)
4157{
4158 float_status *fpst = fpstp;
4159 return float32_maxnum(a, b, fpst);
4160}
4161
4162float64 VFP_HELPER(maxnm, d)(float64 a, float64 b, void *fpstp)
4163{
4164 float_status *fpst = fpstp;
4165 return float64_maxnum(a, b, fpst);
4166}
4167
4168float32 VFP_HELPER(minnm, s)(float32 a, float32 b, void *fpstp)
4169{
4170 float_status *fpst = fpstp;
4171 return float32_minnum(a, b, fpst);
4172}
4173
4174float64 VFP_HELPER(minnm, d)(float64 a, float64 b, void *fpstp)
4175{
4176 float_status *fpst = fpstp;
4177 return float64_minnum(a, b, fpst);
4178}