]> git.proxmox.com Git - qemu.git/blob - target-arm/helper.c
Merge branch 'arm-devs.for-upstream' of git://git.linaro.org/people/pmaydell/qemu-arm
[qemu.git] / target-arm / helper.c
1 #include "cpu.h"
2 #include "gdbstub.h"
3 #include "helper.h"
4 #include "host-utils.h"
5 #if !defined(CONFIG_USER_ONLY)
6 #include "hw/loader.h"
7 #endif
8 #include "sysemu.h"
9
10 static uint32_t cortexa15_cp15_c0_c1[8] = {
11 0x00001131, 0x00011011, 0x02010555, 0x00000000,
12 0x10201105, 0x20000000, 0x01240000, 0x02102211
13 };
14
15 static uint32_t cortexa15_cp15_c0_c2[8] = {
16 0x02101110, 0x13112111, 0x21232041, 0x11112131, 0x10011142, 0, 0, 0
17 };
18
19 static uint32_t cortexa9_cp15_c0_c1[8] =
20 { 0x1031, 0x11, 0x000, 0, 0x00100103, 0x20000000, 0x01230000, 0x00002111 };
21
22 static uint32_t cortexa9_cp15_c0_c2[8] =
23 { 0x00101111, 0x13112111, 0x21232041, 0x11112131, 0x00111142, 0, 0, 0 };
24
25 static uint32_t cortexa8_cp15_c0_c1[8] =
26 { 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
27
28 static uint32_t cortexa8_cp15_c0_c2[8] =
29 { 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
30
31 static uint32_t mpcore_cp15_c0_c1[8] =
32 { 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
33
34 static uint32_t mpcore_cp15_c0_c2[8] =
35 { 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
36
37 static uint32_t arm1136_cp15_c0_c1[8] =
38 { 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
39
40 static uint32_t arm1136_cp15_c0_c2[8] =
41 { 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
42
43 static uint32_t arm1176_cp15_c0_c1[8] =
44 { 0x111, 0x11, 0x33, 0, 0x01130003, 0x10030302, 0x01222100, 0 };
45
46 static uint32_t arm1176_cp15_c0_c2[8] =
47 { 0x0140011, 0x12002111, 0x11231121, 0x01102131, 0x01141, 0, 0, 0 };
48
49 static uint32_t cpu_arm_find_by_name(const char *name);
50
51 static inline void set_feature(CPUARMState *env, int feature)
52 {
53 env->features |= 1u << feature;
54 }
55
56 static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
57 {
58 env->cp15.c0_cpuid = id;
59 switch (id) {
60 case ARM_CPUID_ARM926:
61 set_feature(env, ARM_FEATURE_V5);
62 set_feature(env, ARM_FEATURE_VFP);
63 env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
64 env->cp15.c0_cachetype = 0x1dd20d2;
65 env->cp15.c1_sys = 0x00090078;
66 break;
67 case ARM_CPUID_ARM946:
68 set_feature(env, ARM_FEATURE_V5);
69 set_feature(env, ARM_FEATURE_MPU);
70 env->cp15.c0_cachetype = 0x0f004006;
71 env->cp15.c1_sys = 0x00000078;
72 break;
73 case ARM_CPUID_ARM1026:
74 set_feature(env, ARM_FEATURE_V5);
75 set_feature(env, ARM_FEATURE_VFP);
76 set_feature(env, ARM_FEATURE_AUXCR);
77 env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
78 env->cp15.c0_cachetype = 0x1dd20d2;
79 env->cp15.c1_sys = 0x00090078;
80 break;
81 case ARM_CPUID_ARM1136:
82 /* This is the 1136 r1, which is a v6K core */
83 set_feature(env, ARM_FEATURE_V6K);
84 /* Fall through */
85 case ARM_CPUID_ARM1136_R2:
86 /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
87 * older core than plain "arm1136". In particular this does not
88 * have the v6K features.
89 */
90 set_feature(env, ARM_FEATURE_V6);
91 set_feature(env, ARM_FEATURE_VFP);
92 /* These ID register values are correct for 1136 but may be wrong
93 * for 1136_r2 (in particular r0p2 does not actually implement most
94 * of the ID registers).
95 */
96 env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
97 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
98 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
99 memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
100 memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
101 env->cp15.c0_cachetype = 0x1dd20d2;
102 env->cp15.c1_sys = 0x00050078;
103 break;
104 case ARM_CPUID_ARM1176:
105 set_feature(env, ARM_FEATURE_V6K);
106 set_feature(env, ARM_FEATURE_VFP);
107 set_feature(env, ARM_FEATURE_VAPA);
108 env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b5;
109 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
110 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
111 memcpy(env->cp15.c0_c1, arm1176_cp15_c0_c1, 8 * sizeof(uint32_t));
112 memcpy(env->cp15.c0_c2, arm1176_cp15_c0_c2, 8 * sizeof(uint32_t));
113 env->cp15.c0_cachetype = 0x1dd20d2;
114 env->cp15.c1_sys = 0x00050078;
115 break;
116 case ARM_CPUID_ARM11MPCORE:
117 set_feature(env, ARM_FEATURE_V6K);
118 set_feature(env, ARM_FEATURE_VFP);
119 set_feature(env, ARM_FEATURE_VAPA);
120 env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
121 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
122 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
123 memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
124 memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
125 env->cp15.c0_cachetype = 0x1dd20d2;
126 break;
127 case ARM_CPUID_CORTEXA8:
128 set_feature(env, ARM_FEATURE_V7);
129 set_feature(env, ARM_FEATURE_VFP3);
130 set_feature(env, ARM_FEATURE_NEON);
131 set_feature(env, ARM_FEATURE_THUMB2EE);
132 env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
133 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
134 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
135 memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
136 memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
137 env->cp15.c0_cachetype = 0x82048004;
138 env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
139 env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
140 env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
141 env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
142 env->cp15.c1_sys = 0x00c50078;
143 break;
144 case ARM_CPUID_CORTEXA9:
145 set_feature(env, ARM_FEATURE_V7);
146 set_feature(env, ARM_FEATURE_VFP3);
147 set_feature(env, ARM_FEATURE_VFP_FP16);
148 set_feature(env, ARM_FEATURE_NEON);
149 set_feature(env, ARM_FEATURE_THUMB2EE);
150 /* Note that A9 supports the MP extensions even for
151 * A9UP and single-core A9MP (which are both different
152 * and valid configurations; we don't model A9UP).
153 */
154 set_feature(env, ARM_FEATURE_V7MP);
155 env->vfp.xregs[ARM_VFP_FPSID] = 0x41033090;
156 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
157 env->vfp.xregs[ARM_VFP_MVFR1] = 0x01111111;
158 memcpy(env->cp15.c0_c1, cortexa9_cp15_c0_c1, 8 * sizeof(uint32_t));
159 memcpy(env->cp15.c0_c2, cortexa9_cp15_c0_c2, 8 * sizeof(uint32_t));
160 env->cp15.c0_cachetype = 0x80038003;
161 env->cp15.c0_clid = (1 << 27) | (1 << 24) | 3;
162 env->cp15.c0_ccsid[0] = 0xe00fe015; /* 16k L1 dcache. */
163 env->cp15.c0_ccsid[1] = 0x200fe015; /* 16k L1 icache. */
164 env->cp15.c1_sys = 0x00c50078;
165 break;
166 case ARM_CPUID_CORTEXA15:
167 set_feature(env, ARM_FEATURE_V7);
168 set_feature(env, ARM_FEATURE_VFP4);
169 set_feature(env, ARM_FEATURE_VFP_FP16);
170 set_feature(env, ARM_FEATURE_NEON);
171 set_feature(env, ARM_FEATURE_THUMB2EE);
172 set_feature(env, ARM_FEATURE_ARM_DIV);
173 set_feature(env, ARM_FEATURE_V7MP);
174 set_feature(env, ARM_FEATURE_GENERIC_TIMER);
175 env->vfp.xregs[ARM_VFP_FPSID] = 0x410430f0;
176 env->vfp.xregs[ARM_VFP_MVFR0] = 0x10110222;
177 env->vfp.xregs[ARM_VFP_MVFR1] = 0x11111111;
178 memcpy(env->cp15.c0_c1, cortexa15_cp15_c0_c1, 8 * sizeof(uint32_t));
179 memcpy(env->cp15.c0_c2, cortexa15_cp15_c0_c2, 8 * sizeof(uint32_t));
180 env->cp15.c0_cachetype = 0x8444c004;
181 env->cp15.c0_clid = 0x0a200023;
182 env->cp15.c0_ccsid[0] = 0x701fe00a; /* 32K L1 dcache */
183 env->cp15.c0_ccsid[1] = 0x201fe00a; /* 32K L1 icache */
184 env->cp15.c0_ccsid[2] = 0x711fe07a; /* 4096K L2 unified cache */
185 env->cp15.c1_sys = 0x00c50078;
186 break;
187 case ARM_CPUID_CORTEXM3:
188 set_feature(env, ARM_FEATURE_V7);
189 set_feature(env, ARM_FEATURE_M);
190 break;
191 case ARM_CPUID_ANY: /* For userspace emulation. */
192 set_feature(env, ARM_FEATURE_V7);
193 set_feature(env, ARM_FEATURE_VFP4);
194 set_feature(env, ARM_FEATURE_VFP_FP16);
195 set_feature(env, ARM_FEATURE_NEON);
196 set_feature(env, ARM_FEATURE_THUMB2EE);
197 set_feature(env, ARM_FEATURE_ARM_DIV);
198 set_feature(env, ARM_FEATURE_V7MP);
199 break;
200 case ARM_CPUID_TI915T:
201 case ARM_CPUID_TI925T:
202 set_feature(env, ARM_FEATURE_V4T);
203 set_feature(env, ARM_FEATURE_OMAPCP);
204 env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring. */
205 env->cp15.c0_cachetype = 0x5109149;
206 env->cp15.c1_sys = 0x00000070;
207 env->cp15.c15_i_max = 0x000;
208 env->cp15.c15_i_min = 0xff0;
209 break;
210 case ARM_CPUID_PXA250:
211 case ARM_CPUID_PXA255:
212 case ARM_CPUID_PXA260:
213 case ARM_CPUID_PXA261:
214 case ARM_CPUID_PXA262:
215 set_feature(env, ARM_FEATURE_V5);
216 set_feature(env, ARM_FEATURE_XSCALE);
217 /* JTAG_ID is ((id << 28) | 0x09265013) */
218 env->cp15.c0_cachetype = 0xd172172;
219 env->cp15.c1_sys = 0x00000078;
220 break;
221 case ARM_CPUID_PXA270_A0:
222 case ARM_CPUID_PXA270_A1:
223 case ARM_CPUID_PXA270_B0:
224 case ARM_CPUID_PXA270_B1:
225 case ARM_CPUID_PXA270_C0:
226 case ARM_CPUID_PXA270_C5:
227 set_feature(env, ARM_FEATURE_V5);
228 set_feature(env, ARM_FEATURE_XSCALE);
229 /* JTAG_ID is ((id << 28) | 0x09265013) */
230 set_feature(env, ARM_FEATURE_IWMMXT);
231 env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
232 env->cp15.c0_cachetype = 0xd172172;
233 env->cp15.c1_sys = 0x00000078;
234 break;
235 case ARM_CPUID_SA1100:
236 case ARM_CPUID_SA1110:
237 set_feature(env, ARM_FEATURE_STRONGARM);
238 env->cp15.c1_sys = 0x00000070;
239 break;
240 default:
241 cpu_abort(env, "Bad CPU ID: %x\n", id);
242 break;
243 }
244
245 /* Some features automatically imply others: */
246 if (arm_feature(env, ARM_FEATURE_V7)) {
247 set_feature(env, ARM_FEATURE_VAPA);
248 set_feature(env, ARM_FEATURE_THUMB2);
249 if (!arm_feature(env, ARM_FEATURE_M)) {
250 set_feature(env, ARM_FEATURE_V6K);
251 } else {
252 set_feature(env, ARM_FEATURE_V6);
253 }
254 }
255 if (arm_feature(env, ARM_FEATURE_V6K)) {
256 set_feature(env, ARM_FEATURE_V6);
257 set_feature(env, ARM_FEATURE_MVFR);
258 }
259 if (arm_feature(env, ARM_FEATURE_V6)) {
260 set_feature(env, ARM_FEATURE_V5);
261 if (!arm_feature(env, ARM_FEATURE_M)) {
262 set_feature(env, ARM_FEATURE_AUXCR);
263 }
264 }
265 if (arm_feature(env, ARM_FEATURE_V5)) {
266 set_feature(env, ARM_FEATURE_V4T);
267 }
268 if (arm_feature(env, ARM_FEATURE_M)) {
269 set_feature(env, ARM_FEATURE_THUMB_DIV);
270 }
271 if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
272 set_feature(env, ARM_FEATURE_THUMB_DIV);
273 }
274 if (arm_feature(env, ARM_FEATURE_VFP4)) {
275 set_feature(env, ARM_FEATURE_VFP3);
276 }
277 if (arm_feature(env, ARM_FEATURE_VFP3)) {
278 set_feature(env, ARM_FEATURE_VFP);
279 }
280 }
281
282 /* TODO Move contents into arm_cpu_reset() in cpu.c,
283 * once cpu_reset_model_id() is eliminated,
284 * and then forward to cpu_reset() here.
285 */
286 void cpu_state_reset(CPUARMState *env)
287 {
288 uint32_t id;
289 uint32_t tmp = 0;
290
291 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
292 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
293 log_cpu_state(env, 0);
294 }
295
296 id = env->cp15.c0_cpuid;
297 tmp = env->cp15.c15_config_base_address;
298 memset(env, 0, offsetof(CPUARMState, breakpoints));
299 if (id)
300 cpu_reset_model_id(env, id);
301 env->cp15.c15_config_base_address = tmp;
302 #if defined (CONFIG_USER_ONLY)
303 env->uncached_cpsr = ARM_CPU_MODE_USR;
304 /* For user mode we must enable access to coprocessors */
305 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
306 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
307 env->cp15.c15_cpar = 3;
308 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
309 env->cp15.c15_cpar = 1;
310 }
311 #else
312 /* SVC mode with interrupts disabled. */
313 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
314 /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
315 clear at reset. Initial SP and PC are loaded from ROM. */
316 if (IS_M(env)) {
317 uint32_t pc;
318 uint8_t *rom;
319 env->uncached_cpsr &= ~CPSR_I;
320 rom = rom_ptr(0);
321 if (rom) {
322 /* We should really use ldl_phys here, in case the guest
323 modified flash and reset itself. However images
324 loaded via -kernel have not been copied yet, so load the
325 values directly from there. */
326 env->regs[13] = ldl_p(rom);
327 pc = ldl_p(rom + 4);
328 env->thumb = pc & 1;
329 env->regs[15] = pc & ~1;
330 }
331 }
332 env->vfp.xregs[ARM_VFP_FPEXC] = 0;
333 env->cp15.c2_base_mask = 0xffffc000u;
334 /* v7 performance monitor control register: same implementor
335 * field as main ID register, and we implement no event counters.
336 */
337 env->cp15.c9_pmcr = (id & 0xff000000);
338 #endif
339 set_flush_to_zero(1, &env->vfp.standard_fp_status);
340 set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
341 set_default_nan_mode(1, &env->vfp.standard_fp_status);
342 set_float_detect_tininess(float_tininess_before_rounding,
343 &env->vfp.fp_status);
344 set_float_detect_tininess(float_tininess_before_rounding,
345 &env->vfp.standard_fp_status);
346 tlb_flush(env, 1);
347 /* Reset is a state change for some CPUARMState fields which we
348 * bake assumptions about into translated code, so we need to
349 * tb_flush().
350 */
351 tb_flush(env);
352 }
353
354 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
355 {
356 int nregs;
357
358 /* VFP data registers are always little-endian. */
359 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
360 if (reg < nregs) {
361 stfq_le_p(buf, env->vfp.regs[reg]);
362 return 8;
363 }
364 if (arm_feature(env, ARM_FEATURE_NEON)) {
365 /* Aliases for Q regs. */
366 nregs += 16;
367 if (reg < nregs) {
368 stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
369 stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
370 return 16;
371 }
372 }
373 switch (reg - nregs) {
374 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
375 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
376 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
377 }
378 return 0;
379 }
380
381 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
382 {
383 int nregs;
384
385 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
386 if (reg < nregs) {
387 env->vfp.regs[reg] = ldfq_le_p(buf);
388 return 8;
389 }
390 if (arm_feature(env, ARM_FEATURE_NEON)) {
391 nregs += 16;
392 if (reg < nregs) {
393 env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
394 env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
395 return 16;
396 }
397 }
398 switch (reg - nregs) {
399 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
400 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
401 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
402 }
403 return 0;
404 }
405
406 CPUARMState *cpu_arm_init(const char *cpu_model)
407 {
408 ARMCPU *cpu;
409 CPUARMState *env;
410 uint32_t id;
411 static int inited = 0;
412
413 id = cpu_arm_find_by_name(cpu_model);
414 if (id == 0)
415 return NULL;
416 cpu = ARM_CPU(object_new(TYPE_ARM_CPU));
417 env = &cpu->env;
418 cpu_exec_init(env);
419 if (tcg_enabled() && !inited) {
420 inited = 1;
421 arm_translate_init();
422 }
423
424 env->cpu_model_str = cpu_model;
425 env->cp15.c0_cpuid = id;
426 cpu_state_reset(env);
427 if (arm_feature(env, ARM_FEATURE_NEON)) {
428 gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
429 51, "arm-neon.xml", 0);
430 } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
431 gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
432 35, "arm-vfp3.xml", 0);
433 } else if (arm_feature(env, ARM_FEATURE_VFP)) {
434 gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
435 19, "arm-vfp.xml", 0);
436 }
437 qemu_init_vcpu(env);
438 return env;
439 }
440
441 struct arm_cpu_t {
442 uint32_t id;
443 const char *name;
444 };
445
446 static const struct arm_cpu_t arm_cpu_names[] = {
447 { ARM_CPUID_ARM926, "arm926"},
448 { ARM_CPUID_ARM946, "arm946"},
449 { ARM_CPUID_ARM1026, "arm1026"},
450 { ARM_CPUID_ARM1136, "arm1136"},
451 { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
452 { ARM_CPUID_ARM1176, "arm1176"},
453 { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
454 { ARM_CPUID_CORTEXM3, "cortex-m3"},
455 { ARM_CPUID_CORTEXA8, "cortex-a8"},
456 { ARM_CPUID_CORTEXA9, "cortex-a9"},
457 { ARM_CPUID_CORTEXA15, "cortex-a15" },
458 { ARM_CPUID_TI925T, "ti925t" },
459 { ARM_CPUID_PXA250, "pxa250" },
460 { ARM_CPUID_SA1100, "sa1100" },
461 { ARM_CPUID_SA1110, "sa1110" },
462 { ARM_CPUID_PXA255, "pxa255" },
463 { ARM_CPUID_PXA260, "pxa260" },
464 { ARM_CPUID_PXA261, "pxa261" },
465 { ARM_CPUID_PXA262, "pxa262" },
466 { ARM_CPUID_PXA270, "pxa270" },
467 { ARM_CPUID_PXA270_A0, "pxa270-a0" },
468 { ARM_CPUID_PXA270_A1, "pxa270-a1" },
469 { ARM_CPUID_PXA270_B0, "pxa270-b0" },
470 { ARM_CPUID_PXA270_B1, "pxa270-b1" },
471 { ARM_CPUID_PXA270_C0, "pxa270-c0" },
472 { ARM_CPUID_PXA270_C5, "pxa270-c5" },
473 { ARM_CPUID_ANY, "any"},
474 { 0, NULL}
475 };
476
477 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
478 {
479 int i;
480
481 (*cpu_fprintf)(f, "Available CPUs:\n");
482 for (i = 0; arm_cpu_names[i].name; i++) {
483 (*cpu_fprintf)(f, " %s\n", arm_cpu_names[i].name);
484 }
485 }
486
487 /* return 0 if not found */
488 static uint32_t cpu_arm_find_by_name(const char *name)
489 {
490 int i;
491 uint32_t id;
492
493 id = 0;
494 for (i = 0; arm_cpu_names[i].name; i++) {
495 if (strcmp(name, arm_cpu_names[i].name) == 0) {
496 id = arm_cpu_names[i].id;
497 break;
498 }
499 }
500 return id;
501 }
502
503 static int bad_mode_switch(CPUARMState *env, int mode)
504 {
505 /* Return true if it is not valid for us to switch to
506 * this CPU mode (ie all the UNPREDICTABLE cases in
507 * the ARM ARM CPSRWriteByInstr pseudocode).
508 */
509 switch (mode) {
510 case ARM_CPU_MODE_USR:
511 case ARM_CPU_MODE_SYS:
512 case ARM_CPU_MODE_SVC:
513 case ARM_CPU_MODE_ABT:
514 case ARM_CPU_MODE_UND:
515 case ARM_CPU_MODE_IRQ:
516 case ARM_CPU_MODE_FIQ:
517 return 0;
518 default:
519 return 1;
520 }
521 }
522
523 uint32_t cpsr_read(CPUARMState *env)
524 {
525 int ZF;
526 ZF = (env->ZF == 0);
527 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
528 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
529 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
530 | ((env->condexec_bits & 0xfc) << 8)
531 | (env->GE << 16);
532 }
533
534 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
535 {
536 if (mask & CPSR_NZCV) {
537 env->ZF = (~val) & CPSR_Z;
538 env->NF = val;
539 env->CF = (val >> 29) & 1;
540 env->VF = (val << 3) & 0x80000000;
541 }
542 if (mask & CPSR_Q)
543 env->QF = ((val & CPSR_Q) != 0);
544 if (mask & CPSR_T)
545 env->thumb = ((val & CPSR_T) != 0);
546 if (mask & CPSR_IT_0_1) {
547 env->condexec_bits &= ~3;
548 env->condexec_bits |= (val >> 25) & 3;
549 }
550 if (mask & CPSR_IT_2_7) {
551 env->condexec_bits &= 3;
552 env->condexec_bits |= (val >> 8) & 0xfc;
553 }
554 if (mask & CPSR_GE) {
555 env->GE = (val >> 16) & 0xf;
556 }
557
558 if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
559 if (bad_mode_switch(env, val & CPSR_M)) {
560 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
561 * We choose to ignore the attempt and leave the CPSR M field
562 * untouched.
563 */
564 mask &= ~CPSR_M;
565 } else {
566 switch_mode(env, val & CPSR_M);
567 }
568 }
569 mask &= ~CACHED_CPSR_BITS;
570 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
571 }
572
573 /* Sign/zero extend */
574 uint32_t HELPER(sxtb16)(uint32_t x)
575 {
576 uint32_t res;
577 res = (uint16_t)(int8_t)x;
578 res |= (uint32_t)(int8_t)(x >> 16) << 16;
579 return res;
580 }
581
582 uint32_t HELPER(uxtb16)(uint32_t x)
583 {
584 uint32_t res;
585 res = (uint16_t)(uint8_t)x;
586 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
587 return res;
588 }
589
590 uint32_t HELPER(clz)(uint32_t x)
591 {
592 return clz32(x);
593 }
594
595 int32_t HELPER(sdiv)(int32_t num, int32_t den)
596 {
597 if (den == 0)
598 return 0;
599 if (num == INT_MIN && den == -1)
600 return INT_MIN;
601 return num / den;
602 }
603
604 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
605 {
606 if (den == 0)
607 return 0;
608 return num / den;
609 }
610
611 uint32_t HELPER(rbit)(uint32_t x)
612 {
613 x = ((x & 0xff000000) >> 24)
614 | ((x & 0x00ff0000) >> 8)
615 | ((x & 0x0000ff00) << 8)
616 | ((x & 0x000000ff) << 24);
617 x = ((x & 0xf0f0f0f0) >> 4)
618 | ((x & 0x0f0f0f0f) << 4);
619 x = ((x & 0x88888888) >> 3)
620 | ((x & 0x44444444) >> 1)
621 | ((x & 0x22222222) << 1)
622 | ((x & 0x11111111) << 3);
623 return x;
624 }
625
626 uint32_t HELPER(abs)(uint32_t x)
627 {
628 return ((int32_t)x < 0) ? -x : x;
629 }
630
631 #if defined(CONFIG_USER_ONLY)
632
633 void do_interrupt (CPUARMState *env)
634 {
635 env->exception_index = -1;
636 }
637
638 int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
639 int mmu_idx)
640 {
641 if (rw == 2) {
642 env->exception_index = EXCP_PREFETCH_ABORT;
643 env->cp15.c6_insn = address;
644 } else {
645 env->exception_index = EXCP_DATA_ABORT;
646 env->cp15.c6_data = address;
647 }
648 return 1;
649 }
650
651 /* These should probably raise undefined insn exceptions. */
652 void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val)
653 {
654 int op1 = (insn >> 8) & 0xf;
655 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
656 return;
657 }
658
659 uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn)
660 {
661 int op1 = (insn >> 8) & 0xf;
662 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
663 return 0;
664 }
665
666 void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
667 {
668 cpu_abort(env, "cp15 insn %08x\n", insn);
669 }
670
671 uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
672 {
673 cpu_abort(env, "cp15 insn %08x\n", insn);
674 }
675
676 /* These should probably raise undefined insn exceptions. */
677 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
678 {
679 cpu_abort(env, "v7m_mrs %d\n", reg);
680 }
681
682 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
683 {
684 cpu_abort(env, "v7m_mrs %d\n", reg);
685 return 0;
686 }
687
688 void switch_mode(CPUARMState *env, int mode)
689 {
690 if (mode != ARM_CPU_MODE_USR)
691 cpu_abort(env, "Tried to switch out of user mode\n");
692 }
693
694 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
695 {
696 cpu_abort(env, "banked r13 write\n");
697 }
698
699 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
700 {
701 cpu_abort(env, "banked r13 read\n");
702 return 0;
703 }
704
705 #else
706
707 /* Map CPU modes onto saved register banks. */
708 static inline int bank_number(CPUARMState *env, int mode)
709 {
710 switch (mode) {
711 case ARM_CPU_MODE_USR:
712 case ARM_CPU_MODE_SYS:
713 return 0;
714 case ARM_CPU_MODE_SVC:
715 return 1;
716 case ARM_CPU_MODE_ABT:
717 return 2;
718 case ARM_CPU_MODE_UND:
719 return 3;
720 case ARM_CPU_MODE_IRQ:
721 return 4;
722 case ARM_CPU_MODE_FIQ:
723 return 5;
724 }
725 cpu_abort(env, "Bad mode %x\n", mode);
726 return -1;
727 }
728
729 void switch_mode(CPUARMState *env, int mode)
730 {
731 int old_mode;
732 int i;
733
734 old_mode = env->uncached_cpsr & CPSR_M;
735 if (mode == old_mode)
736 return;
737
738 if (old_mode == ARM_CPU_MODE_FIQ) {
739 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
740 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
741 } else if (mode == ARM_CPU_MODE_FIQ) {
742 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
743 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
744 }
745
746 i = bank_number(env, old_mode);
747 env->banked_r13[i] = env->regs[13];
748 env->banked_r14[i] = env->regs[14];
749 env->banked_spsr[i] = env->spsr;
750
751 i = bank_number(env, mode);
752 env->regs[13] = env->banked_r13[i];
753 env->regs[14] = env->banked_r14[i];
754 env->spsr = env->banked_spsr[i];
755 }
756
757 static void v7m_push(CPUARMState *env, uint32_t val)
758 {
759 env->regs[13] -= 4;
760 stl_phys(env->regs[13], val);
761 }
762
763 static uint32_t v7m_pop(CPUARMState *env)
764 {
765 uint32_t val;
766 val = ldl_phys(env->regs[13]);
767 env->regs[13] += 4;
768 return val;
769 }
770
771 /* Switch to V7M main or process stack pointer. */
772 static void switch_v7m_sp(CPUARMState *env, int process)
773 {
774 uint32_t tmp;
775 if (env->v7m.current_sp != process) {
776 tmp = env->v7m.other_sp;
777 env->v7m.other_sp = env->regs[13];
778 env->regs[13] = tmp;
779 env->v7m.current_sp = process;
780 }
781 }
782
783 static void do_v7m_exception_exit(CPUARMState *env)
784 {
785 uint32_t type;
786 uint32_t xpsr;
787
788 type = env->regs[15];
789 if (env->v7m.exception != 0)
790 armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
791
792 /* Switch to the target stack. */
793 switch_v7m_sp(env, (type & 4) != 0);
794 /* Pop registers. */
795 env->regs[0] = v7m_pop(env);
796 env->regs[1] = v7m_pop(env);
797 env->regs[2] = v7m_pop(env);
798 env->regs[3] = v7m_pop(env);
799 env->regs[12] = v7m_pop(env);
800 env->regs[14] = v7m_pop(env);
801 env->regs[15] = v7m_pop(env);
802 xpsr = v7m_pop(env);
803 xpsr_write(env, xpsr, 0xfffffdff);
804 /* Undo stack alignment. */
805 if (xpsr & 0x200)
806 env->regs[13] |= 4;
807 /* ??? The exception return type specifies Thread/Handler mode. However
808 this is also implied by the xPSR value. Not sure what to do
809 if there is a mismatch. */
810 /* ??? Likewise for mismatches between the CONTROL register and the stack
811 pointer. */
812 }
813
814 static void do_interrupt_v7m(CPUARMState *env)
815 {
816 uint32_t xpsr = xpsr_read(env);
817 uint32_t lr;
818 uint32_t addr;
819
820 lr = 0xfffffff1;
821 if (env->v7m.current_sp)
822 lr |= 4;
823 if (env->v7m.exception == 0)
824 lr |= 8;
825
826 /* For exceptions we just mark as pending on the NVIC, and let that
827 handle it. */
828 /* TODO: Need to escalate if the current priority is higher than the
829 one we're raising. */
830 switch (env->exception_index) {
831 case EXCP_UDEF:
832 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
833 return;
834 case EXCP_SWI:
835 env->regs[15] += 2;
836 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
837 return;
838 case EXCP_PREFETCH_ABORT:
839 case EXCP_DATA_ABORT:
840 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
841 return;
842 case EXCP_BKPT:
843 if (semihosting_enabled) {
844 int nr;
845 nr = lduw_code(env->regs[15]) & 0xff;
846 if (nr == 0xab) {
847 env->regs[15] += 2;
848 env->regs[0] = do_arm_semihosting(env);
849 return;
850 }
851 }
852 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
853 return;
854 case EXCP_IRQ:
855 env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
856 break;
857 case EXCP_EXCEPTION_EXIT:
858 do_v7m_exception_exit(env);
859 return;
860 default:
861 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
862 return; /* Never happens. Keep compiler happy. */
863 }
864
865 /* Align stack pointer. */
866 /* ??? Should only do this if Configuration Control Register
867 STACKALIGN bit is set. */
868 if (env->regs[13] & 4) {
869 env->regs[13] -= 4;
870 xpsr |= 0x200;
871 }
872 /* Switch to the handler mode. */
873 v7m_push(env, xpsr);
874 v7m_push(env, env->regs[15]);
875 v7m_push(env, env->regs[14]);
876 v7m_push(env, env->regs[12]);
877 v7m_push(env, env->regs[3]);
878 v7m_push(env, env->regs[2]);
879 v7m_push(env, env->regs[1]);
880 v7m_push(env, env->regs[0]);
881 switch_v7m_sp(env, 0);
882 /* Clear IT bits */
883 env->condexec_bits = 0;
884 env->regs[14] = lr;
885 addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
886 env->regs[15] = addr & 0xfffffffe;
887 env->thumb = addr & 1;
888 }
889
890 /* Handle a CPU exception. */
891 void do_interrupt(CPUARMState *env)
892 {
893 uint32_t addr;
894 uint32_t mask;
895 int new_mode;
896 uint32_t offset;
897
898 if (IS_M(env)) {
899 do_interrupt_v7m(env);
900 return;
901 }
902 /* TODO: Vectored interrupt controller. */
903 switch (env->exception_index) {
904 case EXCP_UDEF:
905 new_mode = ARM_CPU_MODE_UND;
906 addr = 0x04;
907 mask = CPSR_I;
908 if (env->thumb)
909 offset = 2;
910 else
911 offset = 4;
912 break;
913 case EXCP_SWI:
914 if (semihosting_enabled) {
915 /* Check for semihosting interrupt. */
916 if (env->thumb) {
917 mask = lduw_code(env->regs[15] - 2) & 0xff;
918 } else {
919 mask = ldl_code(env->regs[15] - 4) & 0xffffff;
920 }
921 /* Only intercept calls from privileged modes, to provide some
922 semblance of security. */
923 if (((mask == 0x123456 && !env->thumb)
924 || (mask == 0xab && env->thumb))
925 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
926 env->regs[0] = do_arm_semihosting(env);
927 return;
928 }
929 }
930 new_mode = ARM_CPU_MODE_SVC;
931 addr = 0x08;
932 mask = CPSR_I;
933 /* The PC already points to the next instruction. */
934 offset = 0;
935 break;
936 case EXCP_BKPT:
937 /* See if this is a semihosting syscall. */
938 if (env->thumb && semihosting_enabled) {
939 mask = lduw_code(env->regs[15]) & 0xff;
940 if (mask == 0xab
941 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
942 env->regs[15] += 2;
943 env->regs[0] = do_arm_semihosting(env);
944 return;
945 }
946 }
947 env->cp15.c5_insn = 2;
948 /* Fall through to prefetch abort. */
949 case EXCP_PREFETCH_ABORT:
950 new_mode = ARM_CPU_MODE_ABT;
951 addr = 0x0c;
952 mask = CPSR_A | CPSR_I;
953 offset = 4;
954 break;
955 case EXCP_DATA_ABORT:
956 new_mode = ARM_CPU_MODE_ABT;
957 addr = 0x10;
958 mask = CPSR_A | CPSR_I;
959 offset = 8;
960 break;
961 case EXCP_IRQ:
962 new_mode = ARM_CPU_MODE_IRQ;
963 addr = 0x18;
964 /* Disable IRQ and imprecise data aborts. */
965 mask = CPSR_A | CPSR_I;
966 offset = 4;
967 break;
968 case EXCP_FIQ:
969 new_mode = ARM_CPU_MODE_FIQ;
970 addr = 0x1c;
971 /* Disable FIQ, IRQ and imprecise data aborts. */
972 mask = CPSR_A | CPSR_I | CPSR_F;
973 offset = 4;
974 break;
975 default:
976 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
977 return; /* Never happens. Keep compiler happy. */
978 }
979 /* High vectors. */
980 if (env->cp15.c1_sys & (1 << 13)) {
981 addr += 0xffff0000;
982 }
983 switch_mode (env, new_mode);
984 env->spsr = cpsr_read(env);
985 /* Clear IT bits. */
986 env->condexec_bits = 0;
987 /* Switch to the new mode, and to the correct instruction set. */
988 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
989 env->uncached_cpsr |= mask;
990 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
991 * and we should just guard the thumb mode on V4 */
992 if (arm_feature(env, ARM_FEATURE_V4T)) {
993 env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
994 }
995 env->regs[14] = env->regs[15] + offset;
996 env->regs[15] = addr;
997 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
998 }
999
1000 /* Check section/page access permissions.
1001 Returns the page protection flags, or zero if the access is not
1002 permitted. */
1003 static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
1004 int access_type, int is_user)
1005 {
1006 int prot_ro;
1007
1008 if (domain_prot == 3) {
1009 return PAGE_READ | PAGE_WRITE;
1010 }
1011
1012 if (access_type == 1)
1013 prot_ro = 0;
1014 else
1015 prot_ro = PAGE_READ;
1016
1017 switch (ap) {
1018 case 0:
1019 if (access_type == 1)
1020 return 0;
1021 switch ((env->cp15.c1_sys >> 8) & 3) {
1022 case 1:
1023 return is_user ? 0 : PAGE_READ;
1024 case 2:
1025 return PAGE_READ;
1026 default:
1027 return 0;
1028 }
1029 case 1:
1030 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
1031 case 2:
1032 if (is_user)
1033 return prot_ro;
1034 else
1035 return PAGE_READ | PAGE_WRITE;
1036 case 3:
1037 return PAGE_READ | PAGE_WRITE;
1038 case 4: /* Reserved. */
1039 return 0;
1040 case 5:
1041 return is_user ? 0 : prot_ro;
1042 case 6:
1043 return prot_ro;
1044 case 7:
1045 if (!arm_feature (env, ARM_FEATURE_V6K))
1046 return 0;
1047 return prot_ro;
1048 default:
1049 abort();
1050 }
1051 }
1052
1053 static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
1054 {
1055 uint32_t table;
1056
1057 if (address & env->cp15.c2_mask)
1058 table = env->cp15.c2_base1 & 0xffffc000;
1059 else
1060 table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
1061
1062 table |= (address >> 18) & 0x3ffc;
1063 return table;
1064 }
1065
1066 static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
1067 int is_user, uint32_t *phys_ptr, int *prot,
1068 target_ulong *page_size)
1069 {
1070 int code;
1071 uint32_t table;
1072 uint32_t desc;
1073 int type;
1074 int ap;
1075 int domain;
1076 int domain_prot;
1077 uint32_t phys_addr;
1078
1079 /* Pagetable walk. */
1080 /* Lookup l1 descriptor. */
1081 table = get_level1_table_address(env, address);
1082 desc = ldl_phys(table);
1083 type = (desc & 3);
1084 domain = (desc >> 5) & 0x0f;
1085 domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
1086 if (type == 0) {
1087 /* Section translation fault. */
1088 code = 5;
1089 goto do_fault;
1090 }
1091 if (domain_prot == 0 || domain_prot == 2) {
1092 if (type == 2)
1093 code = 9; /* Section domain fault. */
1094 else
1095 code = 11; /* Page domain fault. */
1096 goto do_fault;
1097 }
1098 if (type == 2) {
1099 /* 1Mb section. */
1100 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1101 ap = (desc >> 10) & 3;
1102 code = 13;
1103 *page_size = 1024 * 1024;
1104 } else {
1105 /* Lookup l2 entry. */
1106 if (type == 1) {
1107 /* Coarse pagetable. */
1108 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1109 } else {
1110 /* Fine pagetable. */
1111 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1112 }
1113 desc = ldl_phys(table);
1114 switch (desc & 3) {
1115 case 0: /* Page translation fault. */
1116 code = 7;
1117 goto do_fault;
1118 case 1: /* 64k page. */
1119 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1120 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1121 *page_size = 0x10000;
1122 break;
1123 case 2: /* 4k page. */
1124 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1125 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1126 *page_size = 0x1000;
1127 break;
1128 case 3: /* 1k page. */
1129 if (type == 1) {
1130 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1131 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1132 } else {
1133 /* Page translation fault. */
1134 code = 7;
1135 goto do_fault;
1136 }
1137 } else {
1138 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1139 }
1140 ap = (desc >> 4) & 3;
1141 *page_size = 0x400;
1142 break;
1143 default:
1144 /* Never happens, but compiler isn't smart enough to tell. */
1145 abort();
1146 }
1147 code = 15;
1148 }
1149 *prot = check_ap(env, ap, domain_prot, access_type, is_user);
1150 if (!*prot) {
1151 /* Access permission fault. */
1152 goto do_fault;
1153 }
1154 *prot |= PAGE_EXEC;
1155 *phys_ptr = phys_addr;
1156 return 0;
1157 do_fault:
1158 return code | (domain << 4);
1159 }
1160
1161 static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
1162 int is_user, uint32_t *phys_ptr, int *prot,
1163 target_ulong *page_size)
1164 {
1165 int code;
1166 uint32_t table;
1167 uint32_t desc;
1168 uint32_t xn;
1169 int type;
1170 int ap;
1171 int domain;
1172 int domain_prot;
1173 uint32_t phys_addr;
1174
1175 /* Pagetable walk. */
1176 /* Lookup l1 descriptor. */
1177 table = get_level1_table_address(env, address);
1178 desc = ldl_phys(table);
1179 type = (desc & 3);
1180 if (type == 0) {
1181 /* Section translation fault. */
1182 code = 5;
1183 domain = 0;
1184 goto do_fault;
1185 } else if (type == 2 && (desc & (1 << 18))) {
1186 /* Supersection. */
1187 domain = 0;
1188 } else {
1189 /* Section or page. */
1190 domain = (desc >> 5) & 0x0f;
1191 }
1192 domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
1193 if (domain_prot == 0 || domain_prot == 2) {
1194 if (type == 2)
1195 code = 9; /* Section domain fault. */
1196 else
1197 code = 11; /* Page domain fault. */
1198 goto do_fault;
1199 }
1200 if (type == 2) {
1201 if (desc & (1 << 18)) {
1202 /* Supersection. */
1203 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1204 *page_size = 0x1000000;
1205 } else {
1206 /* Section. */
1207 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1208 *page_size = 0x100000;
1209 }
1210 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1211 xn = desc & (1 << 4);
1212 code = 13;
1213 } else {
1214 /* Lookup l2 entry. */
1215 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1216 desc = ldl_phys(table);
1217 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1218 switch (desc & 3) {
1219 case 0: /* Page translation fault. */
1220 code = 7;
1221 goto do_fault;
1222 case 1: /* 64k page. */
1223 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1224 xn = desc & (1 << 15);
1225 *page_size = 0x10000;
1226 break;
1227 case 2: case 3: /* 4k page. */
1228 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1229 xn = desc & 1;
1230 *page_size = 0x1000;
1231 break;
1232 default:
1233 /* Never happens, but compiler isn't smart enough to tell. */
1234 abort();
1235 }
1236 code = 15;
1237 }
1238 if (domain_prot == 3) {
1239 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1240 } else {
1241 if (xn && access_type == 2)
1242 goto do_fault;
1243
1244 /* The simplified model uses AP[0] as an access control bit. */
1245 if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1246 /* Access flag fault. */
1247 code = (code == 15) ? 6 : 3;
1248 goto do_fault;
1249 }
1250 *prot = check_ap(env, ap, domain_prot, access_type, is_user);
1251 if (!*prot) {
1252 /* Access permission fault. */
1253 goto do_fault;
1254 }
1255 if (!xn) {
1256 *prot |= PAGE_EXEC;
1257 }
1258 }
1259 *phys_ptr = phys_addr;
1260 return 0;
1261 do_fault:
1262 return code | (domain << 4);
1263 }
1264
1265 static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type,
1266 int is_user, uint32_t *phys_ptr, int *prot)
1267 {
1268 int n;
1269 uint32_t mask;
1270 uint32_t base;
1271
1272 *phys_ptr = address;
1273 for (n = 7; n >= 0; n--) {
1274 base = env->cp15.c6_region[n];
1275 if ((base & 1) == 0)
1276 continue;
1277 mask = 1 << ((base >> 1) & 0x1f);
1278 /* Keep this shift separate from the above to avoid an
1279 (undefined) << 32. */
1280 mask = (mask << 1) - 1;
1281 if (((base ^ address) & ~mask) == 0)
1282 break;
1283 }
1284 if (n < 0)
1285 return 2;
1286
1287 if (access_type == 2) {
1288 mask = env->cp15.c5_insn;
1289 } else {
1290 mask = env->cp15.c5_data;
1291 }
1292 mask = (mask >> (n * 4)) & 0xf;
1293 switch (mask) {
1294 case 0:
1295 return 1;
1296 case 1:
1297 if (is_user)
1298 return 1;
1299 *prot = PAGE_READ | PAGE_WRITE;
1300 break;
1301 case 2:
1302 *prot = PAGE_READ;
1303 if (!is_user)
1304 *prot |= PAGE_WRITE;
1305 break;
1306 case 3:
1307 *prot = PAGE_READ | PAGE_WRITE;
1308 break;
1309 case 5:
1310 if (is_user)
1311 return 1;
1312 *prot = PAGE_READ;
1313 break;
1314 case 6:
1315 *prot = PAGE_READ;
1316 break;
1317 default:
1318 /* Bad permission. */
1319 return 1;
1320 }
1321 *prot |= PAGE_EXEC;
1322 return 0;
1323 }
1324
1325 static inline int get_phys_addr(CPUARMState *env, uint32_t address,
1326 int access_type, int is_user,
1327 uint32_t *phys_ptr, int *prot,
1328 target_ulong *page_size)
1329 {
1330 /* Fast Context Switch Extension. */
1331 if (address < 0x02000000)
1332 address += env->cp15.c13_fcse;
1333
1334 if ((env->cp15.c1_sys & 1) == 0) {
1335 /* MMU/MPU disabled. */
1336 *phys_ptr = address;
1337 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1338 *page_size = TARGET_PAGE_SIZE;
1339 return 0;
1340 } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1341 *page_size = TARGET_PAGE_SIZE;
1342 return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1343 prot);
1344 } else if (env->cp15.c1_sys & (1 << 23)) {
1345 return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1346 prot, page_size);
1347 } else {
1348 return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1349 prot, page_size);
1350 }
1351 }
1352
1353 int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
1354 int access_type, int mmu_idx)
1355 {
1356 uint32_t phys_addr;
1357 target_ulong page_size;
1358 int prot;
1359 int ret, is_user;
1360
1361 is_user = mmu_idx == MMU_USER_IDX;
1362 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1363 &page_size);
1364 if (ret == 0) {
1365 /* Map a single [sub]page. */
1366 phys_addr &= ~(uint32_t)0x3ff;
1367 address &= ~(uint32_t)0x3ff;
1368 tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
1369 return 0;
1370 }
1371
1372 if (access_type == 2) {
1373 env->cp15.c5_insn = ret;
1374 env->cp15.c6_insn = address;
1375 env->exception_index = EXCP_PREFETCH_ABORT;
1376 } else {
1377 env->cp15.c5_data = ret;
1378 if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1379 env->cp15.c5_data |= (1 << 11);
1380 env->cp15.c6_data = address;
1381 env->exception_index = EXCP_DATA_ABORT;
1382 }
1383 return 1;
1384 }
1385
1386 target_phys_addr_t cpu_get_phys_page_debug(CPUARMState *env, target_ulong addr)
1387 {
1388 uint32_t phys_addr;
1389 target_ulong page_size;
1390 int prot;
1391 int ret;
1392
1393 ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1394
1395 if (ret != 0)
1396 return -1;
1397
1398 return phys_addr;
1399 }
1400
1401 void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val)
1402 {
1403 int cp_num = (insn >> 8) & 0xf;
1404 int cp_info = (insn >> 5) & 7;
1405 int src = (insn >> 16) & 0xf;
1406 int operand = insn & 0xf;
1407
1408 if (env->cp[cp_num].cp_write)
1409 env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1410 cp_info, src, operand, val);
1411 }
1412
1413 uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn)
1414 {
1415 int cp_num = (insn >> 8) & 0xf;
1416 int cp_info = (insn >> 5) & 7;
1417 int dest = (insn >> 16) & 0xf;
1418 int operand = insn & 0xf;
1419
1420 if (env->cp[cp_num].cp_read)
1421 return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1422 cp_info, dest, operand);
1423 return 0;
1424 }
1425
1426 /* Return basic MPU access permission bits. */
1427 static uint32_t simple_mpu_ap_bits(uint32_t val)
1428 {
1429 uint32_t ret;
1430 uint32_t mask;
1431 int i;
1432 ret = 0;
1433 mask = 3;
1434 for (i = 0; i < 16; i += 2) {
1435 ret |= (val >> i) & mask;
1436 mask <<= 2;
1437 }
1438 return ret;
1439 }
1440
1441 /* Pad basic MPU access permission bits to extended format. */
1442 static uint32_t extended_mpu_ap_bits(uint32_t val)
1443 {
1444 uint32_t ret;
1445 uint32_t mask;
1446 int i;
1447 ret = 0;
1448 mask = 3;
1449 for (i = 0; i < 16; i += 2) {
1450 ret |= (val & mask) << i;
1451 mask <<= 2;
1452 }
1453 return ret;
1454 }
1455
1456 void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
1457 {
1458 int op1;
1459 int op2;
1460 int crm;
1461
1462 op1 = (insn >> 21) & 7;
1463 op2 = (insn >> 5) & 7;
1464 crm = insn & 0xf;
1465 switch ((insn >> 16) & 0xf) {
1466 case 0:
1467 /* ID codes. */
1468 if (arm_feature(env, ARM_FEATURE_XSCALE))
1469 break;
1470 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1471 break;
1472 if (arm_feature(env, ARM_FEATURE_V7)
1473 && op1 == 2 && crm == 0 && op2 == 0) {
1474 env->cp15.c0_cssel = val & 0xf;
1475 break;
1476 }
1477 goto bad_reg;
1478 case 1: /* System configuration. */
1479 if (arm_feature(env, ARM_FEATURE_V7)
1480 && op1 == 0 && crm == 1 && op2 == 0) {
1481 env->cp15.c1_scr = val;
1482 break;
1483 }
1484 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1485 op2 = 0;
1486 switch (op2) {
1487 case 0:
1488 if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1489 env->cp15.c1_sys = val;
1490 /* ??? Lots of these bits are not implemented. */
1491 /* This may enable/disable the MMU, so do a TLB flush. */
1492 tlb_flush(env, 1);
1493 break;
1494 case 1: /* Auxiliary control register. */
1495 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1496 env->cp15.c1_xscaleauxcr = val;
1497 break;
1498 }
1499 /* Not implemented. */
1500 break;
1501 case 2:
1502 if (arm_feature(env, ARM_FEATURE_XSCALE))
1503 goto bad_reg;
1504 if (env->cp15.c1_coproc != val) {
1505 env->cp15.c1_coproc = val;
1506 /* ??? Is this safe when called from within a TB? */
1507 tb_flush(env);
1508 }
1509 break;
1510 default:
1511 goto bad_reg;
1512 }
1513 break;
1514 case 2: /* MMU Page table control / MPU cache control. */
1515 if (arm_feature(env, ARM_FEATURE_MPU)) {
1516 switch (op2) {
1517 case 0:
1518 env->cp15.c2_data = val;
1519 break;
1520 case 1:
1521 env->cp15.c2_insn = val;
1522 break;
1523 default:
1524 goto bad_reg;
1525 }
1526 } else {
1527 switch (op2) {
1528 case 0:
1529 env->cp15.c2_base0 = val;
1530 break;
1531 case 1:
1532 env->cp15.c2_base1 = val;
1533 break;
1534 case 2:
1535 val &= 7;
1536 env->cp15.c2_control = val;
1537 env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1538 env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1539 break;
1540 default:
1541 goto bad_reg;
1542 }
1543 }
1544 break;
1545 case 3: /* MMU Domain access control / MPU write buffer control. */
1546 env->cp15.c3 = val;
1547 tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1548 break;
1549 case 4: /* Reserved. */
1550 goto bad_reg;
1551 case 5: /* MMU Fault status / MPU access permission. */
1552 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1553 op2 = 0;
1554 switch (op2) {
1555 case 0:
1556 if (arm_feature(env, ARM_FEATURE_MPU))
1557 val = extended_mpu_ap_bits(val);
1558 env->cp15.c5_data = val;
1559 break;
1560 case 1:
1561 if (arm_feature(env, ARM_FEATURE_MPU))
1562 val = extended_mpu_ap_bits(val);
1563 env->cp15.c5_insn = val;
1564 break;
1565 case 2:
1566 if (!arm_feature(env, ARM_FEATURE_MPU))
1567 goto bad_reg;
1568 env->cp15.c5_data = val;
1569 break;
1570 case 3:
1571 if (!arm_feature(env, ARM_FEATURE_MPU))
1572 goto bad_reg;
1573 env->cp15.c5_insn = val;
1574 break;
1575 default:
1576 goto bad_reg;
1577 }
1578 break;
1579 case 6: /* MMU Fault address / MPU base/size. */
1580 if (arm_feature(env, ARM_FEATURE_MPU)) {
1581 if (crm >= 8)
1582 goto bad_reg;
1583 env->cp15.c6_region[crm] = val;
1584 } else {
1585 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1586 op2 = 0;
1587 switch (op2) {
1588 case 0:
1589 env->cp15.c6_data = val;
1590 break;
1591 case 1: /* ??? This is WFAR on armv6 */
1592 case 2:
1593 env->cp15.c6_insn = val;
1594 break;
1595 default:
1596 goto bad_reg;
1597 }
1598 }
1599 break;
1600 case 7: /* Cache control. */
1601 env->cp15.c15_i_max = 0x000;
1602 env->cp15.c15_i_min = 0xff0;
1603 if (op1 != 0) {
1604 goto bad_reg;
1605 }
1606 /* No cache, so nothing to do except VA->PA translations. */
1607 if (arm_feature(env, ARM_FEATURE_VAPA)) {
1608 switch (crm) {
1609 case 4:
1610 if (arm_feature(env, ARM_FEATURE_V7)) {
1611 env->cp15.c7_par = val & 0xfffff6ff;
1612 } else {
1613 env->cp15.c7_par = val & 0xfffff1ff;
1614 }
1615 break;
1616 case 8: {
1617 uint32_t phys_addr;
1618 target_ulong page_size;
1619 int prot;
1620 int ret, is_user = op2 & 2;
1621 int access_type = op2 & 1;
1622
1623 if (op2 & 4) {
1624 /* Other states are only available with TrustZone */
1625 goto bad_reg;
1626 }
1627 ret = get_phys_addr(env, val, access_type, is_user,
1628 &phys_addr, &prot, &page_size);
1629 if (ret == 0) {
1630 /* We do not set any attribute bits in the PAR */
1631 if (page_size == (1 << 24)
1632 && arm_feature(env, ARM_FEATURE_V7)) {
1633 env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1634 } else {
1635 env->cp15.c7_par = phys_addr & 0xfffff000;
1636 }
1637 } else {
1638 env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1639 ((ret & (12 << 1)) >> 6) |
1640 ((ret & 0xf) << 1) | 1;
1641 }
1642 break;
1643 }
1644 }
1645 }
1646 break;
1647 case 8: /* MMU TLB control. */
1648 switch (op2) {
1649 case 0: /* Invalidate all (TLBIALL) */
1650 tlb_flush(env, 1);
1651 break;
1652 case 1: /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
1653 tlb_flush_page(env, val & TARGET_PAGE_MASK);
1654 break;
1655 case 2: /* Invalidate by ASID (TLBIASID) */
1656 tlb_flush(env, val == 0);
1657 break;
1658 case 3: /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
1659 tlb_flush_page(env, val & TARGET_PAGE_MASK);
1660 break;
1661 default:
1662 goto bad_reg;
1663 }
1664 break;
1665 case 9:
1666 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1667 break;
1668 if (arm_feature(env, ARM_FEATURE_STRONGARM))
1669 break; /* Ignore ReadBuffer access */
1670 switch (crm) {
1671 case 0: /* Cache lockdown. */
1672 switch (op1) {
1673 case 0: /* L1 cache. */
1674 switch (op2) {
1675 case 0:
1676 env->cp15.c9_data = val;
1677 break;
1678 case 1:
1679 env->cp15.c9_insn = val;
1680 break;
1681 default:
1682 goto bad_reg;
1683 }
1684 break;
1685 case 1: /* L2 cache. */
1686 /* Ignore writes to L2 lockdown/auxiliary registers. */
1687 break;
1688 default:
1689 goto bad_reg;
1690 }
1691 break;
1692 case 1: /* TCM memory region registers. */
1693 /* Not implemented. */
1694 goto bad_reg;
1695 case 12: /* Performance monitor control */
1696 /* Performance monitors are implementation defined in v7,
1697 * but with an ARM recommended set of registers, which we
1698 * follow (although we don't actually implement any counters)
1699 */
1700 if (!arm_feature(env, ARM_FEATURE_V7)) {
1701 goto bad_reg;
1702 }
1703 switch (op2) {
1704 case 0: /* performance monitor control register */
1705 /* only the DP, X, D and E bits are writable */
1706 env->cp15.c9_pmcr &= ~0x39;
1707 env->cp15.c9_pmcr |= (val & 0x39);
1708 break;
1709 case 1: /* Count enable set register */
1710 val &= (1 << 31);
1711 env->cp15.c9_pmcnten |= val;
1712 break;
1713 case 2: /* Count enable clear */
1714 val &= (1 << 31);
1715 env->cp15.c9_pmcnten &= ~val;
1716 break;
1717 case 3: /* Overflow flag status */
1718 env->cp15.c9_pmovsr &= ~val;
1719 break;
1720 case 4: /* Software increment */
1721 /* RAZ/WI since we don't implement the software-count event */
1722 break;
1723 case 5: /* Event counter selection register */
1724 /* Since we don't implement any events, writing to this register
1725 * is actually UNPREDICTABLE. So we choose to RAZ/WI.
1726 */
1727 break;
1728 default:
1729 goto bad_reg;
1730 }
1731 break;
1732 case 13: /* Performance counters */
1733 if (!arm_feature(env, ARM_FEATURE_V7)) {
1734 goto bad_reg;
1735 }
1736 switch (op2) {
1737 case 0: /* Cycle count register: not implemented, so RAZ/WI */
1738 break;
1739 case 1: /* Event type select */
1740 env->cp15.c9_pmxevtyper = val & 0xff;
1741 break;
1742 case 2: /* Event count register */
1743 /* Unimplemented (we have no events), RAZ/WI */
1744 break;
1745 default:
1746 goto bad_reg;
1747 }
1748 break;
1749 case 14: /* Performance monitor control */
1750 if (!arm_feature(env, ARM_FEATURE_V7)) {
1751 goto bad_reg;
1752 }
1753 switch (op2) {
1754 case 0: /* user enable */
1755 env->cp15.c9_pmuserenr = val & 1;
1756 /* changes access rights for cp registers, so flush tbs */
1757 tb_flush(env);
1758 break;
1759 case 1: /* interrupt enable set */
1760 /* We have no event counters so only the C bit can be changed */
1761 val &= (1 << 31);
1762 env->cp15.c9_pminten |= val;
1763 break;
1764 case 2: /* interrupt enable clear */
1765 val &= (1 << 31);
1766 env->cp15.c9_pminten &= ~val;
1767 break;
1768 }
1769 break;
1770 default:
1771 goto bad_reg;
1772 }
1773 break;
1774 case 10: /* MMU TLB lockdown. */
1775 /* ??? TLB lockdown not implemented. */
1776 break;
1777 case 12: /* Reserved. */
1778 goto bad_reg;
1779 case 13: /* Process ID. */
1780 switch (op2) {
1781 case 0:
1782 /* Unlike real hardware the qemu TLB uses virtual addresses,
1783 not modified virtual addresses, so this causes a TLB flush.
1784 */
1785 if (env->cp15.c13_fcse != val)
1786 tlb_flush(env, 1);
1787 env->cp15.c13_fcse = val;
1788 break;
1789 case 1:
1790 /* This changes the ASID, so do a TLB flush. */
1791 if (env->cp15.c13_context != val
1792 && !arm_feature(env, ARM_FEATURE_MPU))
1793 tlb_flush(env, 0);
1794 env->cp15.c13_context = val;
1795 break;
1796 default:
1797 goto bad_reg;
1798 }
1799 break;
1800 case 14: /* Generic timer */
1801 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
1802 /* Dummy implementation: RAZ/WI for all */
1803 break;
1804 }
1805 goto bad_reg;
1806 case 15: /* Implementation specific. */
1807 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1808 if (op2 == 0 && crm == 1) {
1809 if (env->cp15.c15_cpar != (val & 0x3fff)) {
1810 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1811 tb_flush(env);
1812 env->cp15.c15_cpar = val & 0x3fff;
1813 }
1814 break;
1815 }
1816 goto bad_reg;
1817 }
1818 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1819 switch (crm) {
1820 case 0:
1821 break;
1822 case 1: /* Set TI925T configuration. */
1823 env->cp15.c15_ticonfig = val & 0xe7;
1824 env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1825 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1826 break;
1827 case 2: /* Set I_max. */
1828 env->cp15.c15_i_max = val;
1829 break;
1830 case 3: /* Set I_min. */
1831 env->cp15.c15_i_min = val;
1832 break;
1833 case 4: /* Set thread-ID. */
1834 env->cp15.c15_threadid = val & 0xffff;
1835 break;
1836 case 8: /* Wait-for-interrupt (deprecated). */
1837 cpu_interrupt(env, CPU_INTERRUPT_HALT);
1838 break;
1839 default:
1840 goto bad_reg;
1841 }
1842 }
1843 if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) {
1844 switch (crm) {
1845 case 0:
1846 if ((op1 == 0) && (op2 == 0)) {
1847 env->cp15.c15_power_control = val;
1848 } else if ((op1 == 0) && (op2 == 1)) {
1849 env->cp15.c15_diagnostic = val;
1850 } else if ((op1 == 0) && (op2 == 2)) {
1851 env->cp15.c15_power_diagnostic = val;
1852 }
1853 default:
1854 break;
1855 }
1856 }
1857 break;
1858 }
1859 return;
1860 bad_reg:
1861 /* ??? For debugging only. Should raise illegal instruction exception. */
1862 cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1863 (insn >> 16) & 0xf, crm, op1, op2);
1864 }
1865
1866 uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
1867 {
1868 int op1;
1869 int op2;
1870 int crm;
1871
1872 op1 = (insn >> 21) & 7;
1873 op2 = (insn >> 5) & 7;
1874 crm = insn & 0xf;
1875 switch ((insn >> 16) & 0xf) {
1876 case 0: /* ID codes. */
1877 switch (op1) {
1878 case 0:
1879 switch (crm) {
1880 case 0:
1881 switch (op2) {
1882 case 0: /* Device ID. */
1883 return env->cp15.c0_cpuid;
1884 case 1: /* Cache Type. */
1885 return env->cp15.c0_cachetype;
1886 case 2: /* TCM status. */
1887 return 0;
1888 case 3: /* TLB type register. */
1889 return 0; /* No lockable TLB entries. */
1890 case 5: /* MPIDR */
1891 /* The MPIDR was standardised in v7; prior to
1892 * this it was implemented only in the 11MPCore.
1893 * For all other pre-v7 cores it does not exist.
1894 */
1895 if (arm_feature(env, ARM_FEATURE_V7) ||
1896 ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1897 int mpidr = env->cpu_index;
1898 /* We don't support setting cluster ID ([8..11])
1899 * so these bits always RAZ.
1900 */
1901 if (arm_feature(env, ARM_FEATURE_V7MP)) {
1902 mpidr |= (1 << 31);
1903 /* Cores which are uniprocessor (non-coherent)
1904 * but still implement the MP extensions set
1905 * bit 30. (For instance, A9UP.) However we do
1906 * not currently model any of those cores.
1907 */
1908 }
1909 return mpidr;
1910 }
1911 /* otherwise fall through to the unimplemented-reg case */
1912 default:
1913 goto bad_reg;
1914 }
1915 case 1:
1916 if (!arm_feature(env, ARM_FEATURE_V6))
1917 goto bad_reg;
1918 return env->cp15.c0_c1[op2];
1919 case 2:
1920 if (!arm_feature(env, ARM_FEATURE_V6))
1921 goto bad_reg;
1922 return env->cp15.c0_c2[op2];
1923 case 3: case 4: case 5: case 6: case 7:
1924 return 0;
1925 default:
1926 goto bad_reg;
1927 }
1928 case 1:
1929 /* These registers aren't documented on arm11 cores. However
1930 Linux looks at them anyway. */
1931 if (!arm_feature(env, ARM_FEATURE_V6))
1932 goto bad_reg;
1933 if (crm != 0)
1934 goto bad_reg;
1935 if (!arm_feature(env, ARM_FEATURE_V7))
1936 return 0;
1937
1938 switch (op2) {
1939 case 0:
1940 return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1941 case 1:
1942 return env->cp15.c0_clid;
1943 case 7:
1944 return 0;
1945 }
1946 goto bad_reg;
1947 case 2:
1948 if (op2 != 0 || crm != 0)
1949 goto bad_reg;
1950 return env->cp15.c0_cssel;
1951 default:
1952 goto bad_reg;
1953 }
1954 case 1: /* System configuration. */
1955 if (arm_feature(env, ARM_FEATURE_V7)
1956 && op1 == 0 && crm == 1 && op2 == 0) {
1957 return env->cp15.c1_scr;
1958 }
1959 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1960 op2 = 0;
1961 switch (op2) {
1962 case 0: /* Control register. */
1963 return env->cp15.c1_sys;
1964 case 1: /* Auxiliary control register. */
1965 if (arm_feature(env, ARM_FEATURE_XSCALE))
1966 return env->cp15.c1_xscaleauxcr;
1967 if (!arm_feature(env, ARM_FEATURE_AUXCR))
1968 goto bad_reg;
1969 switch (ARM_CPUID(env)) {
1970 case ARM_CPUID_ARM1026:
1971 return 1;
1972 case ARM_CPUID_ARM1136:
1973 case ARM_CPUID_ARM1136_R2:
1974 case ARM_CPUID_ARM1176:
1975 return 7;
1976 case ARM_CPUID_ARM11MPCORE:
1977 return 1;
1978 case ARM_CPUID_CORTEXA8:
1979 return 2;
1980 case ARM_CPUID_CORTEXA9:
1981 case ARM_CPUID_CORTEXA15:
1982 return 0;
1983 default:
1984 goto bad_reg;
1985 }
1986 case 2: /* Coprocessor access register. */
1987 if (arm_feature(env, ARM_FEATURE_XSCALE))
1988 goto bad_reg;
1989 return env->cp15.c1_coproc;
1990 default:
1991 goto bad_reg;
1992 }
1993 case 2: /* MMU Page table control / MPU cache control. */
1994 if (arm_feature(env, ARM_FEATURE_MPU)) {
1995 switch (op2) {
1996 case 0:
1997 return env->cp15.c2_data;
1998 break;
1999 case 1:
2000 return env->cp15.c2_insn;
2001 break;
2002 default:
2003 goto bad_reg;
2004 }
2005 } else {
2006 switch (op2) {
2007 case 0:
2008 return env->cp15.c2_base0;
2009 case 1:
2010 return env->cp15.c2_base1;
2011 case 2:
2012 return env->cp15.c2_control;
2013 default:
2014 goto bad_reg;
2015 }
2016 }
2017 case 3: /* MMU Domain access control / MPU write buffer control. */
2018 return env->cp15.c3;
2019 case 4: /* Reserved. */
2020 goto bad_reg;
2021 case 5: /* MMU Fault status / MPU access permission. */
2022 if (arm_feature(env, ARM_FEATURE_OMAPCP))
2023 op2 = 0;
2024 switch (op2) {
2025 case 0:
2026 if (arm_feature(env, ARM_FEATURE_MPU))
2027 return simple_mpu_ap_bits(env->cp15.c5_data);
2028 return env->cp15.c5_data;
2029 case 1:
2030 if (arm_feature(env, ARM_FEATURE_MPU))
2031 return simple_mpu_ap_bits(env->cp15.c5_insn);
2032 return env->cp15.c5_insn;
2033 case 2:
2034 if (!arm_feature(env, ARM_FEATURE_MPU))
2035 goto bad_reg;
2036 return env->cp15.c5_data;
2037 case 3:
2038 if (!arm_feature(env, ARM_FEATURE_MPU))
2039 goto bad_reg;
2040 return env->cp15.c5_insn;
2041 default:
2042 goto bad_reg;
2043 }
2044 case 6: /* MMU Fault address. */
2045 if (arm_feature(env, ARM_FEATURE_MPU)) {
2046 if (crm >= 8)
2047 goto bad_reg;
2048 return env->cp15.c6_region[crm];
2049 } else {
2050 if (arm_feature(env, ARM_FEATURE_OMAPCP))
2051 op2 = 0;
2052 switch (op2) {
2053 case 0:
2054 return env->cp15.c6_data;
2055 case 1:
2056 if (arm_feature(env, ARM_FEATURE_V6)) {
2057 /* Watchpoint Fault Adrress. */
2058 return 0; /* Not implemented. */
2059 } else {
2060 /* Instruction Fault Adrress. */
2061 /* Arm9 doesn't have an IFAR, but implementing it anyway
2062 shouldn't do any harm. */
2063 return env->cp15.c6_insn;
2064 }
2065 case 2:
2066 if (arm_feature(env, ARM_FEATURE_V6)) {
2067 /* Instruction Fault Adrress. */
2068 return env->cp15.c6_insn;
2069 } else {
2070 goto bad_reg;
2071 }
2072 default:
2073 goto bad_reg;
2074 }
2075 }
2076 case 7: /* Cache control. */
2077 if (crm == 4 && op1 == 0 && op2 == 0) {
2078 return env->cp15.c7_par;
2079 }
2080 /* FIXME: Should only clear Z flag if destination is r15. */
2081 env->ZF = 0;
2082 return 0;
2083 case 8: /* MMU TLB control. */
2084 goto bad_reg;
2085 case 9:
2086 switch (crm) {
2087 case 0: /* Cache lockdown */
2088 switch (op1) {
2089 case 0: /* L1 cache. */
2090 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2091 return 0;
2092 }
2093 switch (op2) {
2094 case 0:
2095 return env->cp15.c9_data;
2096 case 1:
2097 return env->cp15.c9_insn;
2098 default:
2099 goto bad_reg;
2100 }
2101 case 1: /* L2 cache */
2102 /* L2 Lockdown and Auxiliary control. */
2103 switch (op2) {
2104 case 0:
2105 /* L2 cache lockdown (A8 only) */
2106 return 0;
2107 case 2:
2108 /* L2 cache auxiliary control (A8) or control (A15) */
2109 if (ARM_CPUID(env) == ARM_CPUID_CORTEXA15) {
2110 /* Linux wants the number of processors from here.
2111 * Might as well set the interrupt-controller bit too.
2112 */
2113 return ((smp_cpus - 1) << 24) | (1 << 23);
2114 }
2115 return 0;
2116 case 3:
2117 /* L2 cache extended control (A15) */
2118 return 0;
2119 default:
2120 goto bad_reg;
2121 }
2122 default:
2123 goto bad_reg;
2124 }
2125 break;
2126 case 12: /* Performance monitor control */
2127 if (!arm_feature(env, ARM_FEATURE_V7)) {
2128 goto bad_reg;
2129 }
2130 switch (op2) {
2131 case 0: /* performance monitor control register */
2132 return env->cp15.c9_pmcr;
2133 case 1: /* count enable set */
2134 case 2: /* count enable clear */
2135 return env->cp15.c9_pmcnten;
2136 case 3: /* overflow flag status */
2137 return env->cp15.c9_pmovsr;
2138 case 4: /* software increment */
2139 case 5: /* event counter selection register */
2140 return 0; /* Unimplemented, RAZ/WI */
2141 default:
2142 goto bad_reg;
2143 }
2144 case 13: /* Performance counters */
2145 if (!arm_feature(env, ARM_FEATURE_V7)) {
2146 goto bad_reg;
2147 }
2148 switch (op2) {
2149 case 1: /* Event type select */
2150 return env->cp15.c9_pmxevtyper;
2151 case 0: /* Cycle count register */
2152 case 2: /* Event count register */
2153 /* Unimplemented, so RAZ/WI */
2154 return 0;
2155 default:
2156 goto bad_reg;
2157 }
2158 case 14: /* Performance monitor control */
2159 if (!arm_feature(env, ARM_FEATURE_V7)) {
2160 goto bad_reg;
2161 }
2162 switch (op2) {
2163 case 0: /* user enable */
2164 return env->cp15.c9_pmuserenr;
2165 case 1: /* interrupt enable set */
2166 case 2: /* interrupt enable clear */
2167 return env->cp15.c9_pminten;
2168 default:
2169 goto bad_reg;
2170 }
2171 default:
2172 goto bad_reg;
2173 }
2174 break;
2175 case 10: /* MMU TLB lockdown. */
2176 /* ??? TLB lockdown not implemented. */
2177 return 0;
2178 case 11: /* TCM DMA control. */
2179 case 12: /* Reserved. */
2180 goto bad_reg;
2181 case 13: /* Process ID. */
2182 switch (op2) {
2183 case 0:
2184 return env->cp15.c13_fcse;
2185 case 1:
2186 return env->cp15.c13_context;
2187 default:
2188 goto bad_reg;
2189 }
2190 case 14: /* Generic timer */
2191 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
2192 /* Dummy implementation: RAZ/WI for all */
2193 return 0;
2194 }
2195 goto bad_reg;
2196 case 15: /* Implementation specific. */
2197 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2198 if (op2 == 0 && crm == 1)
2199 return env->cp15.c15_cpar;
2200
2201 goto bad_reg;
2202 }
2203 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2204 switch (crm) {
2205 case 0:
2206 return 0;
2207 case 1: /* Read TI925T configuration. */
2208 return env->cp15.c15_ticonfig;
2209 case 2: /* Read I_max. */
2210 return env->cp15.c15_i_max;
2211 case 3: /* Read I_min. */
2212 return env->cp15.c15_i_min;
2213 case 4: /* Read thread-ID. */
2214 return env->cp15.c15_threadid;
2215 case 8: /* TI925T_status */
2216 return 0;
2217 }
2218 /* TODO: Peripheral port remap register:
2219 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
2220 * controller base address at $rn & ~0xfff and map size of
2221 * 0x200 << ($rn & 0xfff), when MMU is off. */
2222 goto bad_reg;
2223 }
2224 if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) {
2225 switch (crm) {
2226 case 0:
2227 if ((op1 == 4) && (op2 == 0)) {
2228 /* The config_base_address should hold the value of
2229 * the peripheral base. ARM should get this from a CPU
2230 * object property, but that support isn't available in
2231 * December 2011. Default to 0 for now and board models
2232 * that care can set it by a private hook */
2233 return env->cp15.c15_config_base_address;
2234 } else if ((op1 == 0) && (op2 == 0)) {
2235 /* power_control should be set to maximum latency. Again,
2236 default to 0 and set by private hook */
2237 return env->cp15.c15_power_control;
2238 } else if ((op1 == 0) && (op2 == 1)) {
2239 return env->cp15.c15_diagnostic;
2240 } else if ((op1 == 0) && (op2 == 2)) {
2241 return env->cp15.c15_power_diagnostic;
2242 }
2243 break;
2244 case 1: /* NEON Busy */
2245 return 0;
2246 case 5: /* tlb lockdown */
2247 case 6:
2248 case 7:
2249 if ((op1 == 5) && (op2 == 2)) {
2250 return 0;
2251 }
2252 break;
2253 default:
2254 break;
2255 }
2256 goto bad_reg;
2257 }
2258 return 0;
2259 }
2260 bad_reg:
2261 /* ??? For debugging only. Should raise illegal instruction exception. */
2262 cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
2263 (insn >> 16) & 0xf, crm, op1, op2);
2264 return 0;
2265 }
2266
2267 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
2268 {
2269 if ((env->uncached_cpsr & CPSR_M) == mode) {
2270 env->regs[13] = val;
2271 } else {
2272 env->banked_r13[bank_number(env, mode)] = val;
2273 }
2274 }
2275
2276 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
2277 {
2278 if ((env->uncached_cpsr & CPSR_M) == mode) {
2279 return env->regs[13];
2280 } else {
2281 return env->banked_r13[bank_number(env, mode)];
2282 }
2283 }
2284
2285 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2286 {
2287 switch (reg) {
2288 case 0: /* APSR */
2289 return xpsr_read(env) & 0xf8000000;
2290 case 1: /* IAPSR */
2291 return xpsr_read(env) & 0xf80001ff;
2292 case 2: /* EAPSR */
2293 return xpsr_read(env) & 0xff00fc00;
2294 case 3: /* xPSR */
2295 return xpsr_read(env) & 0xff00fdff;
2296 case 5: /* IPSR */
2297 return xpsr_read(env) & 0x000001ff;
2298 case 6: /* EPSR */
2299 return xpsr_read(env) & 0x0700fc00;
2300 case 7: /* IEPSR */
2301 return xpsr_read(env) & 0x0700edff;
2302 case 8: /* MSP */
2303 return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
2304 case 9: /* PSP */
2305 return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
2306 case 16: /* PRIMASK */
2307 return (env->uncached_cpsr & CPSR_I) != 0;
2308 case 17: /* BASEPRI */
2309 case 18: /* BASEPRI_MAX */
2310 return env->v7m.basepri;
2311 case 19: /* FAULTMASK */
2312 return (env->uncached_cpsr & CPSR_F) != 0;
2313 case 20: /* CONTROL */
2314 return env->v7m.control;
2315 default:
2316 /* ??? For debugging only. */
2317 cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2318 return 0;
2319 }
2320 }
2321
2322 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
2323 {
2324 switch (reg) {
2325 case 0: /* APSR */
2326 xpsr_write(env, val, 0xf8000000);
2327 break;
2328 case 1: /* IAPSR */
2329 xpsr_write(env, val, 0xf8000000);
2330 break;
2331 case 2: /* EAPSR */
2332 xpsr_write(env, val, 0xfe00fc00);
2333 break;
2334 case 3: /* xPSR */
2335 xpsr_write(env, val, 0xfe00fc00);
2336 break;
2337 case 5: /* IPSR */
2338 /* IPSR bits are readonly. */
2339 break;
2340 case 6: /* EPSR */
2341 xpsr_write(env, val, 0x0600fc00);
2342 break;
2343 case 7: /* IEPSR */
2344 xpsr_write(env, val, 0x0600fc00);
2345 break;
2346 case 8: /* MSP */
2347 if (env->v7m.current_sp)
2348 env->v7m.other_sp = val;
2349 else
2350 env->regs[13] = val;
2351 break;
2352 case 9: /* PSP */
2353 if (env->v7m.current_sp)
2354 env->regs[13] = val;
2355 else
2356 env->v7m.other_sp = val;
2357 break;
2358 case 16: /* PRIMASK */
2359 if (val & 1)
2360 env->uncached_cpsr |= CPSR_I;
2361 else
2362 env->uncached_cpsr &= ~CPSR_I;
2363 break;
2364 case 17: /* BASEPRI */
2365 env->v7m.basepri = val & 0xff;
2366 break;
2367 case 18: /* BASEPRI_MAX */
2368 val &= 0xff;
2369 if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2370 env->v7m.basepri = val;
2371 break;
2372 case 19: /* FAULTMASK */
2373 if (val & 1)
2374 env->uncached_cpsr |= CPSR_F;
2375 else
2376 env->uncached_cpsr &= ~CPSR_F;
2377 break;
2378 case 20: /* CONTROL */
2379 env->v7m.control = val & 3;
2380 switch_v7m_sp(env, (val & 2) != 0);
2381 break;
2382 default:
2383 /* ??? For debugging only. */
2384 cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2385 return;
2386 }
2387 }
2388
2389 void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2390 ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2391 void *opaque)
2392 {
2393 if (cpnum < 0 || cpnum > 14) {
2394 cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2395 return;
2396 }
2397
2398 env->cp[cpnum].cp_read = cp_read;
2399 env->cp[cpnum].cp_write = cp_write;
2400 env->cp[cpnum].opaque = opaque;
2401 }
2402
2403 #endif
2404
2405 /* Note that signed overflow is undefined in C. The following routines are
2406 careful to use unsigned types where modulo arithmetic is required.
2407 Failure to do so _will_ break on newer gcc. */
2408
2409 /* Signed saturating arithmetic. */
2410
2411 /* Perform 16-bit signed saturating addition. */
2412 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2413 {
2414 uint16_t res;
2415
2416 res = a + b;
2417 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2418 if (a & 0x8000)
2419 res = 0x8000;
2420 else
2421 res = 0x7fff;
2422 }
2423 return res;
2424 }
2425
2426 /* Perform 8-bit signed saturating addition. */
2427 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2428 {
2429 uint8_t res;
2430
2431 res = a + b;
2432 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2433 if (a & 0x80)
2434 res = 0x80;
2435 else
2436 res = 0x7f;
2437 }
2438 return res;
2439 }
2440
2441 /* Perform 16-bit signed saturating subtraction. */
2442 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2443 {
2444 uint16_t res;
2445
2446 res = a - b;
2447 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2448 if (a & 0x8000)
2449 res = 0x8000;
2450 else
2451 res = 0x7fff;
2452 }
2453 return res;
2454 }
2455
2456 /* Perform 8-bit signed saturating subtraction. */
2457 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2458 {
2459 uint8_t res;
2460
2461 res = a - b;
2462 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2463 if (a & 0x80)
2464 res = 0x80;
2465 else
2466 res = 0x7f;
2467 }
2468 return res;
2469 }
2470
2471 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2472 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2473 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
2474 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
2475 #define PFX q
2476
2477 #include "op_addsub.h"
2478
2479 /* Unsigned saturating arithmetic. */
2480 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2481 {
2482 uint16_t res;
2483 res = a + b;
2484 if (res < a)
2485 res = 0xffff;
2486 return res;
2487 }
2488
2489 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2490 {
2491 if (a > b)
2492 return a - b;
2493 else
2494 return 0;
2495 }
2496
2497 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2498 {
2499 uint8_t res;
2500 res = a + b;
2501 if (res < a)
2502 res = 0xff;
2503 return res;
2504 }
2505
2506 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2507 {
2508 if (a > b)
2509 return a - b;
2510 else
2511 return 0;
2512 }
2513
2514 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2515 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2516 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
2517 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
2518 #define PFX uq
2519
2520 #include "op_addsub.h"
2521
2522 /* Signed modulo arithmetic. */
2523 #define SARITH16(a, b, n, op) do { \
2524 int32_t sum; \
2525 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2526 RESULT(sum, n, 16); \
2527 if (sum >= 0) \
2528 ge |= 3 << (n * 2); \
2529 } while(0)
2530
2531 #define SARITH8(a, b, n, op) do { \
2532 int32_t sum; \
2533 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2534 RESULT(sum, n, 8); \
2535 if (sum >= 0) \
2536 ge |= 1 << n; \
2537 } while(0)
2538
2539
2540 #define ADD16(a, b, n) SARITH16(a, b, n, +)
2541 #define SUB16(a, b, n) SARITH16(a, b, n, -)
2542 #define ADD8(a, b, n) SARITH8(a, b, n, +)
2543 #define SUB8(a, b, n) SARITH8(a, b, n, -)
2544 #define PFX s
2545 #define ARITH_GE
2546
2547 #include "op_addsub.h"
2548
2549 /* Unsigned modulo arithmetic. */
2550 #define ADD16(a, b, n) do { \
2551 uint32_t sum; \
2552 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2553 RESULT(sum, n, 16); \
2554 if ((sum >> 16) == 1) \
2555 ge |= 3 << (n * 2); \
2556 } while(0)
2557
2558 #define ADD8(a, b, n) do { \
2559 uint32_t sum; \
2560 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2561 RESULT(sum, n, 8); \
2562 if ((sum >> 8) == 1) \
2563 ge |= 1 << n; \
2564 } while(0)
2565
2566 #define SUB16(a, b, n) do { \
2567 uint32_t sum; \
2568 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2569 RESULT(sum, n, 16); \
2570 if ((sum >> 16) == 0) \
2571 ge |= 3 << (n * 2); \
2572 } while(0)
2573
2574 #define SUB8(a, b, n) do { \
2575 uint32_t sum; \
2576 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2577 RESULT(sum, n, 8); \
2578 if ((sum >> 8) == 0) \
2579 ge |= 1 << n; \
2580 } while(0)
2581
2582 #define PFX u
2583 #define ARITH_GE
2584
2585 #include "op_addsub.h"
2586
2587 /* Halved signed arithmetic. */
2588 #define ADD16(a, b, n) \
2589 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2590 #define SUB16(a, b, n) \
2591 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2592 #define ADD8(a, b, n) \
2593 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2594 #define SUB8(a, b, n) \
2595 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2596 #define PFX sh
2597
2598 #include "op_addsub.h"
2599
2600 /* Halved unsigned arithmetic. */
2601 #define ADD16(a, b, n) \
2602 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2603 #define SUB16(a, b, n) \
2604 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2605 #define ADD8(a, b, n) \
2606 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2607 #define SUB8(a, b, n) \
2608 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2609 #define PFX uh
2610
2611 #include "op_addsub.h"
2612
2613 static inline uint8_t do_usad(uint8_t a, uint8_t b)
2614 {
2615 if (a > b)
2616 return a - b;
2617 else
2618 return b - a;
2619 }
2620
2621 /* Unsigned sum of absolute byte differences. */
2622 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2623 {
2624 uint32_t sum;
2625 sum = do_usad(a, b);
2626 sum += do_usad(a >> 8, b >> 8);
2627 sum += do_usad(a >> 16, b >>16);
2628 sum += do_usad(a >> 24, b >> 24);
2629 return sum;
2630 }
2631
2632 /* For ARMv6 SEL instruction. */
2633 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2634 {
2635 uint32_t mask;
2636
2637 mask = 0;
2638 if (flags & 1)
2639 mask |= 0xff;
2640 if (flags & 2)
2641 mask |= 0xff00;
2642 if (flags & 4)
2643 mask |= 0xff0000;
2644 if (flags & 8)
2645 mask |= 0xff000000;
2646 return (a & mask) | (b & ~mask);
2647 }
2648
2649 uint32_t HELPER(logicq_cc)(uint64_t val)
2650 {
2651 return (val >> 32) | (val != 0);
2652 }
2653
2654 /* VFP support. We follow the convention used for VFP instrunctions:
2655 Single precition routines have a "s" suffix, double precision a
2656 "d" suffix. */
2657
2658 /* Convert host exception flags to vfp form. */
2659 static inline int vfp_exceptbits_from_host(int host_bits)
2660 {
2661 int target_bits = 0;
2662
2663 if (host_bits & float_flag_invalid)
2664 target_bits |= 1;
2665 if (host_bits & float_flag_divbyzero)
2666 target_bits |= 2;
2667 if (host_bits & float_flag_overflow)
2668 target_bits |= 4;
2669 if (host_bits & (float_flag_underflow | float_flag_output_denormal))
2670 target_bits |= 8;
2671 if (host_bits & float_flag_inexact)
2672 target_bits |= 0x10;
2673 if (host_bits & float_flag_input_denormal)
2674 target_bits |= 0x80;
2675 return target_bits;
2676 }
2677
2678 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
2679 {
2680 int i;
2681 uint32_t fpscr;
2682
2683 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2684 | (env->vfp.vec_len << 16)
2685 | (env->vfp.vec_stride << 20);
2686 i = get_float_exception_flags(&env->vfp.fp_status);
2687 i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2688 fpscr |= vfp_exceptbits_from_host(i);
2689 return fpscr;
2690 }
2691
2692 uint32_t vfp_get_fpscr(CPUARMState *env)
2693 {
2694 return HELPER(vfp_get_fpscr)(env);
2695 }
2696
2697 /* Convert vfp exception flags to target form. */
2698 static inline int vfp_exceptbits_to_host(int target_bits)
2699 {
2700 int host_bits = 0;
2701
2702 if (target_bits & 1)
2703 host_bits |= float_flag_invalid;
2704 if (target_bits & 2)
2705 host_bits |= float_flag_divbyzero;
2706 if (target_bits & 4)
2707 host_bits |= float_flag_overflow;
2708 if (target_bits & 8)
2709 host_bits |= float_flag_underflow;
2710 if (target_bits & 0x10)
2711 host_bits |= float_flag_inexact;
2712 if (target_bits & 0x80)
2713 host_bits |= float_flag_input_denormal;
2714 return host_bits;
2715 }
2716
2717 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
2718 {
2719 int i;
2720 uint32_t changed;
2721
2722 changed = env->vfp.xregs[ARM_VFP_FPSCR];
2723 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2724 env->vfp.vec_len = (val >> 16) & 7;
2725 env->vfp.vec_stride = (val >> 20) & 3;
2726
2727 changed ^= val;
2728 if (changed & (3 << 22)) {
2729 i = (val >> 22) & 3;
2730 switch (i) {
2731 case 0:
2732 i = float_round_nearest_even;
2733 break;
2734 case 1:
2735 i = float_round_up;
2736 break;
2737 case 2:
2738 i = float_round_down;
2739 break;
2740 case 3:
2741 i = float_round_to_zero;
2742 break;
2743 }
2744 set_float_rounding_mode(i, &env->vfp.fp_status);
2745 }
2746 if (changed & (1 << 24)) {
2747 set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2748 set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2749 }
2750 if (changed & (1 << 25))
2751 set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2752
2753 i = vfp_exceptbits_to_host(val);
2754 set_float_exception_flags(i, &env->vfp.fp_status);
2755 set_float_exception_flags(0, &env->vfp.standard_fp_status);
2756 }
2757
2758 void vfp_set_fpscr(CPUARMState *env, uint32_t val)
2759 {
2760 HELPER(vfp_set_fpscr)(env, val);
2761 }
2762
2763 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2764
2765 #define VFP_BINOP(name) \
2766 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
2767 { \
2768 float_status *fpst = fpstp; \
2769 return float32_ ## name(a, b, fpst); \
2770 } \
2771 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
2772 { \
2773 float_status *fpst = fpstp; \
2774 return float64_ ## name(a, b, fpst); \
2775 }
2776 VFP_BINOP(add)
2777 VFP_BINOP(sub)
2778 VFP_BINOP(mul)
2779 VFP_BINOP(div)
2780 #undef VFP_BINOP
2781
2782 float32 VFP_HELPER(neg, s)(float32 a)
2783 {
2784 return float32_chs(a);
2785 }
2786
2787 float64 VFP_HELPER(neg, d)(float64 a)
2788 {
2789 return float64_chs(a);
2790 }
2791
2792 float32 VFP_HELPER(abs, s)(float32 a)
2793 {
2794 return float32_abs(a);
2795 }
2796
2797 float64 VFP_HELPER(abs, d)(float64 a)
2798 {
2799 return float64_abs(a);
2800 }
2801
2802 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
2803 {
2804 return float32_sqrt(a, &env->vfp.fp_status);
2805 }
2806
2807 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
2808 {
2809 return float64_sqrt(a, &env->vfp.fp_status);
2810 }
2811
2812 /* XXX: check quiet/signaling case */
2813 #define DO_VFP_cmp(p, type) \
2814 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
2815 { \
2816 uint32_t flags; \
2817 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2818 case 0: flags = 0x6; break; \
2819 case -1: flags = 0x8; break; \
2820 case 1: flags = 0x2; break; \
2821 default: case 2: flags = 0x3; break; \
2822 } \
2823 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2824 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2825 } \
2826 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
2827 { \
2828 uint32_t flags; \
2829 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2830 case 0: flags = 0x6; break; \
2831 case -1: flags = 0x8; break; \
2832 case 1: flags = 0x2; break; \
2833 default: case 2: flags = 0x3; break; \
2834 } \
2835 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2836 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2837 }
2838 DO_VFP_cmp(s, float32)
2839 DO_VFP_cmp(d, float64)
2840 #undef DO_VFP_cmp
2841
2842 /* Integer to float and float to integer conversions */
2843
2844 #define CONV_ITOF(name, fsz, sign) \
2845 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2846 { \
2847 float_status *fpst = fpstp; \
2848 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
2849 }
2850
2851 #define CONV_FTOI(name, fsz, sign, round) \
2852 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2853 { \
2854 float_status *fpst = fpstp; \
2855 if (float##fsz##_is_any_nan(x)) { \
2856 float_raise(float_flag_invalid, fpst); \
2857 return 0; \
2858 } \
2859 return float##fsz##_to_##sign##int32##round(x, fpst); \
2860 }
2861
2862 #define FLOAT_CONVS(name, p, fsz, sign) \
2863 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2864 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2865 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2866
2867 FLOAT_CONVS(si, s, 32, )
2868 FLOAT_CONVS(si, d, 64, )
2869 FLOAT_CONVS(ui, s, 32, u)
2870 FLOAT_CONVS(ui, d, 64, u)
2871
2872 #undef CONV_ITOF
2873 #undef CONV_FTOI
2874 #undef FLOAT_CONVS
2875
2876 /* floating point conversion */
2877 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
2878 {
2879 float64 r = float32_to_float64(x, &env->vfp.fp_status);
2880 /* ARM requires that S<->D conversion of any kind of NaN generates
2881 * a quiet NaN by forcing the most significant frac bit to 1.
2882 */
2883 return float64_maybe_silence_nan(r);
2884 }
2885
2886 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
2887 {
2888 float32 r = float64_to_float32(x, &env->vfp.fp_status);
2889 /* ARM requires that S<->D conversion of any kind of NaN generates
2890 * a quiet NaN by forcing the most significant frac bit to 1.
2891 */
2892 return float32_maybe_silence_nan(r);
2893 }
2894
2895 /* VFP3 fixed point conversion. */
2896 #define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2897 float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, \
2898 void *fpstp) \
2899 { \
2900 float_status *fpst = fpstp; \
2901 float##fsz tmp; \
2902 tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2903 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2904 } \
2905 uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2906 void *fpstp) \
2907 { \
2908 float_status *fpst = fpstp; \
2909 float##fsz tmp; \
2910 if (float##fsz##_is_any_nan(x)) { \
2911 float_raise(float_flag_invalid, fpst); \
2912 return 0; \
2913 } \
2914 tmp = float##fsz##_scalbn(x, shift, fpst); \
2915 return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2916 }
2917
2918 VFP_CONV_FIX(sh, d, 64, int16, )
2919 VFP_CONV_FIX(sl, d, 64, int32, )
2920 VFP_CONV_FIX(uh, d, 64, uint16, u)
2921 VFP_CONV_FIX(ul, d, 64, uint32, u)
2922 VFP_CONV_FIX(sh, s, 32, int16, )
2923 VFP_CONV_FIX(sl, s, 32, int32, )
2924 VFP_CONV_FIX(uh, s, 32, uint16, u)
2925 VFP_CONV_FIX(ul, s, 32, uint32, u)
2926 #undef VFP_CONV_FIX
2927
2928 /* Half precision conversions. */
2929 static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
2930 {
2931 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2932 float32 r = float16_to_float32(make_float16(a), ieee, s);
2933 if (ieee) {
2934 return float32_maybe_silence_nan(r);
2935 }
2936 return r;
2937 }
2938
2939 static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
2940 {
2941 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2942 float16 r = float32_to_float16(a, ieee, s);
2943 if (ieee) {
2944 r = float16_maybe_silence_nan(r);
2945 }
2946 return float16_val(r);
2947 }
2948
2949 float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2950 {
2951 return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2952 }
2953
2954 uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2955 {
2956 return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2957 }
2958
2959 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2960 {
2961 return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2962 }
2963
2964 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2965 {
2966 return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2967 }
2968
2969 #define float32_two make_float32(0x40000000)
2970 #define float32_three make_float32(0x40400000)
2971 #define float32_one_point_five make_float32(0x3fc00000)
2972
2973 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
2974 {
2975 float_status *s = &env->vfp.standard_fp_status;
2976 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2977 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2978 if (!(float32_is_zero(a) || float32_is_zero(b))) {
2979 float_raise(float_flag_input_denormal, s);
2980 }
2981 return float32_two;
2982 }
2983 return float32_sub(float32_two, float32_mul(a, b, s), s);
2984 }
2985
2986 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
2987 {
2988 float_status *s = &env->vfp.standard_fp_status;
2989 float32 product;
2990 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2991 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2992 if (!(float32_is_zero(a) || float32_is_zero(b))) {
2993 float_raise(float_flag_input_denormal, s);
2994 }
2995 return float32_one_point_five;
2996 }
2997 product = float32_mul(a, b, s);
2998 return float32_div(float32_sub(float32_three, product, s), float32_two, s);
2999 }
3000
3001 /* NEON helpers. */
3002
3003 /* Constants 256 and 512 are used in some helpers; we avoid relying on
3004 * int->float conversions at run-time. */
3005 #define float64_256 make_float64(0x4070000000000000LL)
3006 #define float64_512 make_float64(0x4080000000000000LL)
3007
3008 /* The algorithm that must be used to calculate the estimate
3009 * is specified by the ARM ARM.
3010 */
3011 static float64 recip_estimate(float64 a, CPUARMState *env)
3012 {
3013 /* These calculations mustn't set any fp exception flags,
3014 * so we use a local copy of the fp_status.
3015 */
3016 float_status dummy_status = env->vfp.standard_fp_status;
3017 float_status *s = &dummy_status;
3018 /* q = (int)(a * 512.0) */
3019 float64 q = float64_mul(float64_512, a, s);
3020 int64_t q_int = float64_to_int64_round_to_zero(q, s);
3021
3022 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
3023 q = int64_to_float64(q_int, s);
3024 q = float64_add(q, float64_half, s);
3025 q = float64_div(q, float64_512, s);
3026 q = float64_div(float64_one, q, s);
3027
3028 /* s = (int)(256.0 * r + 0.5) */
3029 q = float64_mul(q, float64_256, s);
3030 q = float64_add(q, float64_half, s);
3031 q_int = float64_to_int64_round_to_zero(q, s);
3032
3033 /* return (double)s / 256.0 */
3034 return float64_div(int64_to_float64(q_int, s), float64_256, s);
3035 }
3036
3037 float32 HELPER(recpe_f32)(float32 a, CPUARMState *env)
3038 {
3039 float_status *s = &env->vfp.standard_fp_status;
3040 float64 f64;
3041 uint32_t val32 = float32_val(a);
3042
3043 int result_exp;
3044 int a_exp = (val32 & 0x7f800000) >> 23;
3045 int sign = val32 & 0x80000000;
3046
3047 if (float32_is_any_nan(a)) {
3048 if (float32_is_signaling_nan(a)) {
3049 float_raise(float_flag_invalid, s);
3050 }
3051 return float32_default_nan;
3052 } else if (float32_is_infinity(a)) {
3053 return float32_set_sign(float32_zero, float32_is_neg(a));
3054 } else if (float32_is_zero_or_denormal(a)) {
3055 if (!float32_is_zero(a)) {
3056 float_raise(float_flag_input_denormal, s);
3057 }
3058 float_raise(float_flag_divbyzero, s);
3059 return float32_set_sign(float32_infinity, float32_is_neg(a));
3060 } else if (a_exp >= 253) {
3061 float_raise(float_flag_underflow, s);
3062 return float32_set_sign(float32_zero, float32_is_neg(a));
3063 }
3064
3065 f64 = make_float64((0x3feULL << 52)
3066 | ((int64_t)(val32 & 0x7fffff) << 29));
3067
3068 result_exp = 253 - a_exp;
3069
3070 f64 = recip_estimate(f64, env);
3071
3072 val32 = sign
3073 | ((result_exp & 0xff) << 23)
3074 | ((float64_val(f64) >> 29) & 0x7fffff);
3075 return make_float32(val32);
3076 }
3077
3078 /* The algorithm that must be used to calculate the estimate
3079 * is specified by the ARM ARM.
3080 */
3081 static float64 recip_sqrt_estimate(float64 a, CPUARMState *env)
3082 {
3083 /* These calculations mustn't set any fp exception flags,
3084 * so we use a local copy of the fp_status.
3085 */
3086 float_status dummy_status = env->vfp.standard_fp_status;
3087 float_status *s = &dummy_status;
3088 float64 q;
3089 int64_t q_int;
3090
3091 if (float64_lt(a, float64_half, s)) {
3092 /* range 0.25 <= a < 0.5 */
3093
3094 /* a in units of 1/512 rounded down */
3095 /* q0 = (int)(a * 512.0); */
3096 q = float64_mul(float64_512, a, s);
3097 q_int = float64_to_int64_round_to_zero(q, s);
3098
3099 /* reciprocal root r */
3100 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
3101 q = int64_to_float64(q_int, s);
3102 q = float64_add(q, float64_half, s);
3103 q = float64_div(q, float64_512, s);
3104 q = float64_sqrt(q, s);
3105 q = float64_div(float64_one, q, s);
3106 } else {
3107 /* range 0.5 <= a < 1.0 */
3108
3109 /* a in units of 1/256 rounded down */
3110 /* q1 = (int)(a * 256.0); */
3111 q = float64_mul(float64_256, a, s);
3112 int64_t q_int = float64_to_int64_round_to_zero(q, s);
3113
3114 /* reciprocal root r */
3115 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
3116 q = int64_to_float64(q_int, s);
3117 q = float64_add(q, float64_half, s);
3118 q = float64_div(q, float64_256, s);
3119 q = float64_sqrt(q, s);
3120 q = float64_div(float64_one, q, s);
3121 }
3122 /* r in units of 1/256 rounded to nearest */
3123 /* s = (int)(256.0 * r + 0.5); */
3124
3125 q = float64_mul(q, float64_256,s );
3126 q = float64_add(q, float64_half, s);
3127 q_int = float64_to_int64_round_to_zero(q, s);
3128
3129 /* return (double)s / 256.0;*/
3130 return float64_div(int64_to_float64(q_int, s), float64_256, s);
3131 }
3132
3133 float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env)
3134 {
3135 float_status *s = &env->vfp.standard_fp_status;
3136 int result_exp;
3137 float64 f64;
3138 uint32_t val;
3139 uint64_t val64;
3140
3141 val = float32_val(a);
3142
3143 if (float32_is_any_nan(a)) {
3144 if (float32_is_signaling_nan(a)) {
3145 float_raise(float_flag_invalid, s);
3146 }
3147 return float32_default_nan;
3148 } else if (float32_is_zero_or_denormal(a)) {
3149 if (!float32_is_zero(a)) {
3150 float_raise(float_flag_input_denormal, s);
3151 }
3152 float_raise(float_flag_divbyzero, s);
3153 return float32_set_sign(float32_infinity, float32_is_neg(a));
3154 } else if (float32_is_neg(a)) {
3155 float_raise(float_flag_invalid, s);
3156 return float32_default_nan;
3157 } else if (float32_is_infinity(a)) {
3158 return float32_zero;
3159 }
3160
3161 /* Normalize to a double-precision value between 0.25 and 1.0,
3162 * preserving the parity of the exponent. */
3163 if ((val & 0x800000) == 0) {
3164 f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3165 | (0x3feULL << 52)
3166 | ((uint64_t)(val & 0x7fffff) << 29));
3167 } else {
3168 f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3169 | (0x3fdULL << 52)
3170 | ((uint64_t)(val & 0x7fffff) << 29));
3171 }
3172
3173 result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
3174
3175 f64 = recip_sqrt_estimate(f64, env);
3176
3177 val64 = float64_val(f64);
3178
3179 val = ((result_exp & 0xff) << 23)
3180 | ((val64 >> 29) & 0x7fffff);
3181 return make_float32(val);
3182 }
3183
3184 uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env)
3185 {
3186 float64 f64;
3187
3188 if ((a & 0x80000000) == 0) {
3189 return 0xffffffff;
3190 }
3191
3192 f64 = make_float64((0x3feULL << 52)
3193 | ((int64_t)(a & 0x7fffffff) << 21));
3194
3195 f64 = recip_estimate (f64, env);
3196
3197 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3198 }
3199
3200 uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env)
3201 {
3202 float64 f64;
3203
3204 if ((a & 0xc0000000) == 0) {
3205 return 0xffffffff;
3206 }
3207
3208 if (a & 0x80000000) {
3209 f64 = make_float64((0x3feULL << 52)
3210 | ((uint64_t)(a & 0x7fffffff) << 21));
3211 } else { /* bits 31-30 == '01' */
3212 f64 = make_float64((0x3fdULL << 52)
3213 | ((uint64_t)(a & 0x3fffffff) << 22));
3214 }
3215
3216 f64 = recip_sqrt_estimate(f64, env);
3217
3218 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3219 }
3220
3221 /* VFPv4 fused multiply-accumulate */
3222 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
3223 {
3224 float_status *fpst = fpstp;
3225 return float32_muladd(a, b, c, 0, fpst);
3226 }
3227
3228 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
3229 {
3230 float_status *fpst = fpstp;
3231 return float64_muladd(a, b, c, 0, fpst);
3232 }
3233
3234 void HELPER(set_teecr)(CPUARMState *env, uint32_t val)
3235 {
3236 val &= 1;
3237 if (env->teecr != val) {
3238 env->teecr = val;
3239 tb_flush(env);
3240 }
3241 }