]> git.proxmox.com Git - qemu.git/blame - target-arm/helper.c
ARM TCG conversion 1/16.
[qemu.git] / target-arm / helper.c
CommitLineData
b5ff1b31
FB
1#include <stdio.h>
2#include <stdlib.h>
3#include <string.h>
4
5#include "cpu.h"
6#include "exec-all.h"
9ee6e8bb 7#include "gdbstub.h"
b26eefb6 8#include "helpers.h"
9ee6e8bb
PB
9
10static uint32_t cortexa8_cp15_c0_c1[8] =
11{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
12
13static uint32_t cortexa8_cp15_c0_c2[8] =
14{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
15
16static uint32_t mpcore_cp15_c0_c1[8] =
17{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
18
19static uint32_t mpcore_cp15_c0_c2[8] =
20{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
21
22static uint32_t arm1136_cp15_c0_c1[8] =
23{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
24
25static uint32_t arm1136_cp15_c0_c2[8] =
26{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
b5ff1b31 27
aaed909a
FB
28static uint32_t cpu_arm_find_by_name(const char *name);
29
f3d6b95e
PB
30static inline void set_feature(CPUARMState *env, int feature)
31{
32 env->features |= 1u << feature;
33}
34
35static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
36{
37 env->cp15.c0_cpuid = id;
38 switch (id) {
39 case ARM_CPUID_ARM926:
40 set_feature(env, ARM_FEATURE_VFP);
41 env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
c1713132 42 env->cp15.c0_cachetype = 0x1dd20d2;
610c3c8a 43 env->cp15.c1_sys = 0x00090078;
f3d6b95e 44 break;
ce819861
PB
45 case ARM_CPUID_ARM946:
46 set_feature(env, ARM_FEATURE_MPU);
47 env->cp15.c0_cachetype = 0x0f004006;
610c3c8a 48 env->cp15.c1_sys = 0x00000078;
ce819861 49 break;
f3d6b95e
PB
50 case ARM_CPUID_ARM1026:
51 set_feature(env, ARM_FEATURE_VFP);
52 set_feature(env, ARM_FEATURE_AUXCR);
53 env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
c1713132 54 env->cp15.c0_cachetype = 0x1dd20d2;
610c3c8a 55 env->cp15.c1_sys = 0x00090078;
c1713132 56 break;
9ee6e8bb
PB
57 case ARM_CPUID_ARM1136:
58 set_feature(env, ARM_FEATURE_V6);
59 set_feature(env, ARM_FEATURE_VFP);
60 set_feature(env, ARM_FEATURE_AUXCR);
61 env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
62 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
63 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
64 memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
65 memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
66 env->cp15.c0_cachetype = 0x1dd20d2;
67 break;
68 case ARM_CPUID_ARM11MPCORE:
69 set_feature(env, ARM_FEATURE_V6);
70 set_feature(env, ARM_FEATURE_V6K);
71 set_feature(env, ARM_FEATURE_VFP);
72 set_feature(env, ARM_FEATURE_AUXCR);
73 env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
74 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
75 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
76 memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
77 memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
78 env->cp15.c0_cachetype = 0x1dd20d2;
79 break;
80 case ARM_CPUID_CORTEXA8:
81 set_feature(env, ARM_FEATURE_V6);
82 set_feature(env, ARM_FEATURE_V6K);
83 set_feature(env, ARM_FEATURE_V7);
84 set_feature(env, ARM_FEATURE_AUXCR);
85 set_feature(env, ARM_FEATURE_THUMB2);
86 set_feature(env, ARM_FEATURE_VFP);
87 set_feature(env, ARM_FEATURE_VFP3);
88 set_feature(env, ARM_FEATURE_NEON);
89 env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
90 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
91 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
92 memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
93 memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
94 env->cp15.c0_cachetype = 0x1dd20d2;
95 break;
96 case ARM_CPUID_CORTEXM3:
97 set_feature(env, ARM_FEATURE_V6);
98 set_feature(env, ARM_FEATURE_THUMB2);
99 set_feature(env, ARM_FEATURE_V7);
100 set_feature(env, ARM_FEATURE_M);
101 set_feature(env, ARM_FEATURE_DIV);
102 break;
103 case ARM_CPUID_ANY: /* For userspace emulation. */
104 set_feature(env, ARM_FEATURE_V6);
105 set_feature(env, ARM_FEATURE_V6K);
106 set_feature(env, ARM_FEATURE_V7);
107 set_feature(env, ARM_FEATURE_THUMB2);
108 set_feature(env, ARM_FEATURE_VFP);
109 set_feature(env, ARM_FEATURE_VFP3);
110 set_feature(env, ARM_FEATURE_NEON);
111 set_feature(env, ARM_FEATURE_DIV);
112 break;
c3d2689d
AZ
113 case ARM_CPUID_TI915T:
114 case ARM_CPUID_TI925T:
115 set_feature(env, ARM_FEATURE_OMAPCP);
116 env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring. */
117 env->cp15.c0_cachetype = 0x5109149;
118 env->cp15.c1_sys = 0x00000070;
119 env->cp15.c15_i_max = 0x000;
120 env->cp15.c15_i_min = 0xff0;
121 break;
c1713132
AZ
122 case ARM_CPUID_PXA250:
123 case ARM_CPUID_PXA255:
124 case ARM_CPUID_PXA260:
125 case ARM_CPUID_PXA261:
126 case ARM_CPUID_PXA262:
127 set_feature(env, ARM_FEATURE_XSCALE);
128 /* JTAG_ID is ((id << 28) | 0x09265013) */
129 env->cp15.c0_cachetype = 0xd172172;
610c3c8a 130 env->cp15.c1_sys = 0x00000078;
c1713132
AZ
131 break;
132 case ARM_CPUID_PXA270_A0:
133 case ARM_CPUID_PXA270_A1:
134 case ARM_CPUID_PXA270_B0:
135 case ARM_CPUID_PXA270_B1:
136 case ARM_CPUID_PXA270_C0:
137 case ARM_CPUID_PXA270_C5:
138 set_feature(env, ARM_FEATURE_XSCALE);
139 /* JTAG_ID is ((id << 28) | 0x09265013) */
18c9b560
AZ
140 set_feature(env, ARM_FEATURE_IWMMXT);
141 env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
c1713132 142 env->cp15.c0_cachetype = 0xd172172;
610c3c8a 143 env->cp15.c1_sys = 0x00000078;
f3d6b95e
PB
144 break;
145 default:
146 cpu_abort(env, "Bad CPU ID: %x\n", id);
147 break;
148 }
149}
150
40f137e1
PB
151void cpu_reset(CPUARMState *env)
152{
f3d6b95e
PB
153 uint32_t id;
154 id = env->cp15.c0_cpuid;
155 memset(env, 0, offsetof(CPUARMState, breakpoints));
156 if (id)
157 cpu_reset_model_id(env, id);
40f137e1
PB
158#if defined (CONFIG_USER_ONLY)
159 env->uncached_cpsr = ARM_CPU_MODE_USR;
160 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
161#else
162 /* SVC mode with interrupts disabled. */
163 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
9ee6e8bb
PB
164 /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
165 clear at reset. */
166 if (IS_M(env))
167 env->uncached_cpsr &= ~CPSR_I;
40f137e1
PB
168 env->vfp.xregs[ARM_VFP_FPEXC] = 0;
169#endif
170 env->regs[15] = 0;
f3d6b95e 171 tlb_flush(env, 1);
40f137e1
PB
172}
173
aaed909a 174CPUARMState *cpu_arm_init(const char *cpu_model)
40f137e1
PB
175{
176 CPUARMState *env;
aaed909a 177 uint32_t id;
b26eefb6 178 static int inited = 0;
40f137e1 179
aaed909a
FB
180 id = cpu_arm_find_by_name(cpu_model);
181 if (id == 0)
182 return NULL;
40f137e1
PB
183 env = qemu_mallocz(sizeof(CPUARMState));
184 if (!env)
185 return NULL;
186 cpu_exec_init(env);
b26eefb6
PB
187 if (!inited) {
188 inited = 1;
189 arm_translate_init();
190 }
191
01ba9816 192 env->cpu_model_str = cpu_model;
aaed909a 193 env->cp15.c0_cpuid = id;
40f137e1 194 cpu_reset(env);
40f137e1
PB
195 return env;
196}
197
3371d272
PB
198struct arm_cpu_t {
199 uint32_t id;
200 const char *name;
201};
202
203static const struct arm_cpu_t arm_cpu_names[] = {
204 { ARM_CPUID_ARM926, "arm926"},
ce819861 205 { ARM_CPUID_ARM946, "arm946"},
3371d272 206 { ARM_CPUID_ARM1026, "arm1026"},
9ee6e8bb
PB
207 { ARM_CPUID_ARM1136, "arm1136"},
208 { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
209 { ARM_CPUID_CORTEXM3, "cortex-m3"},
210 { ARM_CPUID_CORTEXA8, "cortex-a8"},
c3d2689d 211 { ARM_CPUID_TI925T, "ti925t" },
c1713132
AZ
212 { ARM_CPUID_PXA250, "pxa250" },
213 { ARM_CPUID_PXA255, "pxa255" },
214 { ARM_CPUID_PXA260, "pxa260" },
215 { ARM_CPUID_PXA261, "pxa261" },
216 { ARM_CPUID_PXA262, "pxa262" },
217 { ARM_CPUID_PXA270, "pxa270" },
218 { ARM_CPUID_PXA270_A0, "pxa270-a0" },
219 { ARM_CPUID_PXA270_A1, "pxa270-a1" },
220 { ARM_CPUID_PXA270_B0, "pxa270-b0" },
221 { ARM_CPUID_PXA270_B1, "pxa270-b1" },
222 { ARM_CPUID_PXA270_C0, "pxa270-c0" },
223 { ARM_CPUID_PXA270_C5, "pxa270-c5" },
9ee6e8bb 224 { ARM_CPUID_ANY, "any"},
3371d272
PB
225 { 0, NULL}
226};
227
c732abe2 228void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
5adb4839
PB
229{
230 int i;
231
c732abe2 232 (*cpu_fprintf)(f, "Available CPUs:\n");
5adb4839 233 for (i = 0; arm_cpu_names[i].name; i++) {
c732abe2 234 (*cpu_fprintf)(f, " %s\n", arm_cpu_names[i].name);
5adb4839
PB
235 }
236}
237
aaed909a
FB
238/* return 0 if not found */
239static uint32_t cpu_arm_find_by_name(const char *name)
40f137e1 240{
3371d272
PB
241 int i;
242 uint32_t id;
243
244 id = 0;
3371d272
PB
245 for (i = 0; arm_cpu_names[i].name; i++) {
246 if (strcmp(name, arm_cpu_names[i].name) == 0) {
247 id = arm_cpu_names[i].id;
248 break;
249 }
250 }
aaed909a 251 return id;
40f137e1
PB
252}
253
254void cpu_arm_close(CPUARMState *env)
255{
256 free(env);
257}
258
9ee6e8bb
PB
259/* Polynomial multiplication is like integer multiplcation except the
260 partial products are XORed, not added. */
261uint32_t helper_neon_mul_p8(uint32_t op1, uint32_t op2)
262{
263 uint32_t mask;
264 uint32_t result;
265 result = 0;
266 while (op1) {
267 mask = 0;
268 if (op1 & 1)
269 mask |= 0xff;
270 if (op1 & (1 << 8))
271 mask |= (0xff << 8);
272 if (op1 & (1 << 16))
273 mask |= (0xff << 16);
274 if (op1 & (1 << 24))
275 mask |= (0xff << 24);
276 result ^= op2 & mask;
277 op1 = (op1 >> 1) & 0x7f7f7f7f;
278 op2 = (op2 << 1) & 0xfefefefe;
279 }
280 return result;
281}
282
2f4a40e5
AZ
283uint32_t cpsr_read(CPUARMState *env)
284{
285 int ZF;
286 ZF = (env->NZF == 0);
287 return env->uncached_cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
288 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
289 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
290 | ((env->condexec_bits & 0xfc) << 8)
291 | (env->GE << 16);
292}
293
294void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
295{
296 /* NOTE: N = 1 and Z = 1 cannot be stored currently */
297 if (mask & CPSR_NZCV) {
298 env->NZF = (val & 0xc0000000) ^ 0x40000000;
299 env->CF = (val >> 29) & 1;
300 env->VF = (val << 3) & 0x80000000;
301 }
302 if (mask & CPSR_Q)
303 env->QF = ((val & CPSR_Q) != 0);
304 if (mask & CPSR_T)
305 env->thumb = ((val & CPSR_T) != 0);
306 if (mask & CPSR_IT_0_1) {
307 env->condexec_bits &= ~3;
308 env->condexec_bits |= (val >> 25) & 3;
309 }
310 if (mask & CPSR_IT_2_7) {
311 env->condexec_bits &= 3;
312 env->condexec_bits |= (val >> 8) & 0xfc;
313 }
314 if (mask & CPSR_GE) {
315 env->GE = (val >> 16) & 0xf;
316 }
317
318 if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
319 switch_mode(env, val & CPSR_M);
320 }
321 mask &= ~CACHED_CPSR_BITS;
322 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
323}
324
b26eefb6
PB
325#define HELPER(x) helper_##x
326/* Sign/zero extend */
327uint32_t HELPER(sxtb16)(uint32_t x)
328{
329 uint32_t res;
330 res = (uint16_t)(int8_t)x;
331 res |= (uint32_t)(int8_t)(x >> 16) << 16;
332 return res;
333}
334
335uint32_t HELPER(uxtb16)(uint32_t x)
336{
337 uint32_t res;
338 res = (uint16_t)(uint8_t)x;
339 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
340 return res;
341}
342
5fafdf24 343#if defined(CONFIG_USER_ONLY)
b5ff1b31
FB
344
345void do_interrupt (CPUState *env)
346{
347 env->exception_index = -1;
348}
349
9ee6e8bb
PB
350/* Structure used to record exclusive memory locations. */
351typedef struct mmon_state {
352 struct mmon_state *next;
353 CPUARMState *cpu_env;
354 uint32_t addr;
355} mmon_state;
356
357/* Chain of current locks. */
358static mmon_state* mmon_head = NULL;
359
b5ff1b31 360int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
6ebbf390 361 int mmu_idx, int is_softmmu)
b5ff1b31
FB
362{
363 if (rw == 2) {
364 env->exception_index = EXCP_PREFETCH_ABORT;
365 env->cp15.c6_insn = address;
366 } else {
367 env->exception_index = EXCP_DATA_ABORT;
368 env->cp15.c6_data = address;
369 }
370 return 1;
371}
372
9ee6e8bb
PB
373static void allocate_mmon_state(CPUState *env)
374{
375 env->mmon_entry = malloc(sizeof (mmon_state));
376 if (!env->mmon_entry)
377 abort();
378 memset (env->mmon_entry, 0, sizeof (mmon_state));
379 env->mmon_entry->cpu_env = env;
380 mmon_head = env->mmon_entry;
381}
382
383/* Flush any monitor locks for the specified address. */
384static void flush_mmon(uint32_t addr)
385{
386 mmon_state *mon;
387
388 for (mon = mmon_head; mon; mon = mon->next)
389 {
390 if (mon->addr != addr)
391 continue;
392
393 mon->addr = 0;
394 break;
395 }
396}
397
398/* Mark an address for exclusive access. */
399void helper_mark_exclusive(CPUState *env, uint32_t addr)
400{
401 if (!env->mmon_entry)
402 allocate_mmon_state(env);
403 /* Clear any previous locks. */
404 flush_mmon(addr);
405 env->mmon_entry->addr = addr;
406}
407
408/* Test if an exclusive address is still exclusive. Returns zero
409 if the address is still exclusive. */
410int helper_test_exclusive(CPUState *env, uint32_t addr)
411{
412 int res;
413
414 if (!env->mmon_entry)
415 return 1;
416 if (env->mmon_entry->addr == addr)
417 res = 0;
418 else
419 res = 1;
420 flush_mmon(addr);
421 return res;
422}
423
424void helper_clrex(CPUState *env)
425{
426 if (!(env->mmon_entry && env->mmon_entry->addr))
427 return;
428 flush_mmon(env->mmon_entry->addr);
429}
430
9b3c35e0 431target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
b5ff1b31
FB
432{
433 return addr;
434}
435
436/* These should probably raise undefined insn exceptions. */
c1713132
AZ
437void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
438{
439 int op1 = (insn >> 8) & 0xf;
440 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
441 return;
442}
443
444uint32_t helper_get_cp(CPUState *env, uint32_t insn)
445{
446 int op1 = (insn >> 8) & 0xf;
447 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
448 return 0;
449}
450
b5ff1b31
FB
451void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
452{
453 cpu_abort(env, "cp15 insn %08x\n", insn);
454}
455
456uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
457{
458 cpu_abort(env, "cp15 insn %08x\n", insn);
459 return 0;
460}
461
9ee6e8bb
PB
462/* These should probably raise undefined insn exceptions. */
463void helper_v7m_msr(CPUState *env, int reg, uint32_t val)
464{
465 cpu_abort(env, "v7m_mrs %d\n", reg);
466}
467
468uint32_t helper_v7m_mrs(CPUState *env, int reg)
469{
470 cpu_abort(env, "v7m_mrs %d\n", reg);
471 return 0;
472}
473
b5ff1b31
FB
474void switch_mode(CPUState *env, int mode)
475{
476 if (mode != ARM_CPU_MODE_USR)
477 cpu_abort(env, "Tried to switch out of user mode\n");
478}
479
9ee6e8bb
PB
480void helper_set_r13_banked(CPUState *env, int mode, uint32_t val)
481{
482 cpu_abort(env, "banked r13 write\n");
483}
484
485uint32_t helper_get_r13_banked(CPUState *env, int mode)
486{
487 cpu_abort(env, "banked r13 read\n");
488 return 0;
489}
490
b5ff1b31
FB
491#else
492
8e71621f
PB
493extern int semihosting_enabled;
494
b5ff1b31
FB
495/* Map CPU modes onto saved register banks. */
496static inline int bank_number (int mode)
497{
498 switch (mode) {
499 case ARM_CPU_MODE_USR:
500 case ARM_CPU_MODE_SYS:
501 return 0;
502 case ARM_CPU_MODE_SVC:
503 return 1;
504 case ARM_CPU_MODE_ABT:
505 return 2;
506 case ARM_CPU_MODE_UND:
507 return 3;
508 case ARM_CPU_MODE_IRQ:
509 return 4;
510 case ARM_CPU_MODE_FIQ:
511 return 5;
512 }
513 cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
514 return -1;
515}
516
517void switch_mode(CPUState *env, int mode)
518{
519 int old_mode;
520 int i;
521
522 old_mode = env->uncached_cpsr & CPSR_M;
523 if (mode == old_mode)
524 return;
525
526 if (old_mode == ARM_CPU_MODE_FIQ) {
527 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 528 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
529 } else if (mode == ARM_CPU_MODE_FIQ) {
530 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 531 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
532 }
533
534 i = bank_number(old_mode);
535 env->banked_r13[i] = env->regs[13];
536 env->banked_r14[i] = env->regs[14];
537 env->banked_spsr[i] = env->spsr;
538
539 i = bank_number(mode);
540 env->regs[13] = env->banked_r13[i];
541 env->regs[14] = env->banked_r14[i];
542 env->spsr = env->banked_spsr[i];
543}
544
9ee6e8bb
PB
545static void v7m_push(CPUARMState *env, uint32_t val)
546{
547 env->regs[13] -= 4;
548 stl_phys(env->regs[13], val);
549}
550
551static uint32_t v7m_pop(CPUARMState *env)
552{
553 uint32_t val;
554 val = ldl_phys(env->regs[13]);
555 env->regs[13] += 4;
556 return val;
557}
558
559/* Switch to V7M main or process stack pointer. */
560static void switch_v7m_sp(CPUARMState *env, int process)
561{
562 uint32_t tmp;
563 if (env->v7m.current_sp != process) {
564 tmp = env->v7m.other_sp;
565 env->v7m.other_sp = env->regs[13];
566 env->regs[13] = tmp;
567 env->v7m.current_sp = process;
568 }
569}
570
571static void do_v7m_exception_exit(CPUARMState *env)
572{
573 uint32_t type;
574 uint32_t xpsr;
575
576 type = env->regs[15];
577 if (env->v7m.exception != 0)
578 armv7m_nvic_complete_irq(env->v7m.nvic, env->v7m.exception);
579
580 /* Switch to the target stack. */
581 switch_v7m_sp(env, (type & 4) != 0);
582 /* Pop registers. */
583 env->regs[0] = v7m_pop(env);
584 env->regs[1] = v7m_pop(env);
585 env->regs[2] = v7m_pop(env);
586 env->regs[3] = v7m_pop(env);
587 env->regs[12] = v7m_pop(env);
588 env->regs[14] = v7m_pop(env);
589 env->regs[15] = v7m_pop(env);
590 xpsr = v7m_pop(env);
591 xpsr_write(env, xpsr, 0xfffffdff);
592 /* Undo stack alignment. */
593 if (xpsr & 0x200)
594 env->regs[13] |= 4;
595 /* ??? The exception return type specifies Thread/Handler mode. However
596 this is also implied by the xPSR value. Not sure what to do
597 if there is a mismatch. */
598 /* ??? Likewise for mismatches between the CONTROL register and the stack
599 pointer. */
600}
601
602void do_interrupt_v7m(CPUARMState *env)
603{
604 uint32_t xpsr = xpsr_read(env);
605 uint32_t lr;
606 uint32_t addr;
607
608 lr = 0xfffffff1;
609 if (env->v7m.current_sp)
610 lr |= 4;
611 if (env->v7m.exception == 0)
612 lr |= 8;
613
614 /* For exceptions we just mark as pending on the NVIC, and let that
615 handle it. */
616 /* TODO: Need to escalate if the current priority is higher than the
617 one we're raising. */
618 switch (env->exception_index) {
619 case EXCP_UDEF:
620 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_USAGE);
621 return;
622 case EXCP_SWI:
623 env->regs[15] += 2;
624 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_SVC);
625 return;
626 case EXCP_PREFETCH_ABORT:
627 case EXCP_DATA_ABORT:
628 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_MEM);
629 return;
630 case EXCP_BKPT:
2ad207d4
PB
631 if (semihosting_enabled) {
632 int nr;
633 nr = lduw_code(env->regs[15]) & 0xff;
634 if (nr == 0xab) {
635 env->regs[15] += 2;
636 env->regs[0] = do_arm_semihosting(env);
637 return;
638 }
639 }
9ee6e8bb
PB
640 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_DEBUG);
641 return;
642 case EXCP_IRQ:
643 env->v7m.exception = armv7m_nvic_acknowledge_irq(env->v7m.nvic);
644 break;
645 case EXCP_EXCEPTION_EXIT:
646 do_v7m_exception_exit(env);
647 return;
648 default:
649 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
650 return; /* Never happens. Keep compiler happy. */
651 }
652
653 /* Align stack pointer. */
654 /* ??? Should only do this if Configuration Control Register
655 STACKALIGN bit is set. */
656 if (env->regs[13] & 4) {
657 env->regs[13] += 4;
658 xpsr |= 0x200;
659 }
660 /* Switch to the hander mode. */
661 v7m_push(env, xpsr);
662 v7m_push(env, env->regs[15]);
663 v7m_push(env, env->regs[14]);
664 v7m_push(env, env->regs[12]);
665 v7m_push(env, env->regs[3]);
666 v7m_push(env, env->regs[2]);
667 v7m_push(env, env->regs[1]);
668 v7m_push(env, env->regs[0]);
669 switch_v7m_sp(env, 0);
670 env->uncached_cpsr &= ~CPSR_IT;
671 env->regs[14] = lr;
672 addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
673 env->regs[15] = addr & 0xfffffffe;
674 env->thumb = addr & 1;
675}
676
b5ff1b31
FB
677/* Handle a CPU exception. */
678void do_interrupt(CPUARMState *env)
679{
680 uint32_t addr;
681 uint32_t mask;
682 int new_mode;
683 uint32_t offset;
684
9ee6e8bb
PB
685 if (IS_M(env)) {
686 do_interrupt_v7m(env);
687 return;
688 }
b5ff1b31
FB
689 /* TODO: Vectored interrupt controller. */
690 switch (env->exception_index) {
691 case EXCP_UDEF:
692 new_mode = ARM_CPU_MODE_UND;
693 addr = 0x04;
694 mask = CPSR_I;
695 if (env->thumb)
696 offset = 2;
697 else
698 offset = 4;
699 break;
700 case EXCP_SWI:
8e71621f
PB
701 if (semihosting_enabled) {
702 /* Check for semihosting interrupt. */
703 if (env->thumb) {
704 mask = lduw_code(env->regs[15] - 2) & 0xff;
705 } else {
706 mask = ldl_code(env->regs[15] - 4) & 0xffffff;
707 }
708 /* Only intercept calls from privileged modes, to provide some
709 semblance of security. */
710 if (((mask == 0x123456 && !env->thumb)
711 || (mask == 0xab && env->thumb))
712 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
713 env->regs[0] = do_arm_semihosting(env);
714 return;
715 }
716 }
b5ff1b31
FB
717 new_mode = ARM_CPU_MODE_SVC;
718 addr = 0x08;
719 mask = CPSR_I;
720 /* The PC already points to the next instructon. */
721 offset = 0;
722 break;
06c949e6 723 case EXCP_BKPT:
9ee6e8bb 724 /* See if this is a semihosting syscall. */
2ad207d4 725 if (env->thumb && semihosting_enabled) {
9ee6e8bb
PB
726 mask = lduw_code(env->regs[15]) & 0xff;
727 if (mask == 0xab
728 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
729 env->regs[15] += 2;
730 env->regs[0] = do_arm_semihosting(env);
731 return;
732 }
733 }
734 /* Fall through to prefetch abort. */
735 case EXCP_PREFETCH_ABORT:
b5ff1b31
FB
736 new_mode = ARM_CPU_MODE_ABT;
737 addr = 0x0c;
738 mask = CPSR_A | CPSR_I;
739 offset = 4;
740 break;
741 case EXCP_DATA_ABORT:
742 new_mode = ARM_CPU_MODE_ABT;
743 addr = 0x10;
744 mask = CPSR_A | CPSR_I;
745 offset = 8;
746 break;
747 case EXCP_IRQ:
748 new_mode = ARM_CPU_MODE_IRQ;
749 addr = 0x18;
750 /* Disable IRQ and imprecise data aborts. */
751 mask = CPSR_A | CPSR_I;
752 offset = 4;
753 break;
754 case EXCP_FIQ:
755 new_mode = ARM_CPU_MODE_FIQ;
756 addr = 0x1c;
757 /* Disable FIQ, IRQ and imprecise data aborts. */
758 mask = CPSR_A | CPSR_I | CPSR_F;
759 offset = 4;
760 break;
761 default:
762 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
763 return; /* Never happens. Keep compiler happy. */
764 }
765 /* High vectors. */
766 if (env->cp15.c1_sys & (1 << 13)) {
767 addr += 0xffff0000;
768 }
769 switch_mode (env, new_mode);
770 env->spsr = cpsr_read(env);
9ee6e8bb
PB
771 /* Clear IT bits. */
772 env->condexec_bits = 0;
6d7e6326 773 /* Switch to the new mode, and switch to Arm mode. */
b5ff1b31 774 /* ??? Thumb interrupt handlers not implemented. */
6d7e6326 775 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
b5ff1b31 776 env->uncached_cpsr |= mask;
6d7e6326 777 env->thumb = 0;
b5ff1b31
FB
778 env->regs[14] = env->regs[15] + offset;
779 env->regs[15] = addr;
780 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
781}
782
783/* Check section/page access permissions.
784 Returns the page protection flags, or zero if the access is not
785 permitted. */
786static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
787 int is_user)
788{
9ee6e8bb
PB
789 int prot_ro;
790
b5ff1b31
FB
791 if (domain == 3)
792 return PAGE_READ | PAGE_WRITE;
793
9ee6e8bb
PB
794 if (access_type == 1)
795 prot_ro = 0;
796 else
797 prot_ro = PAGE_READ;
798
b5ff1b31
FB
799 switch (ap) {
800 case 0:
78600320 801 if (access_type == 1)
b5ff1b31
FB
802 return 0;
803 switch ((env->cp15.c1_sys >> 8) & 3) {
804 case 1:
805 return is_user ? 0 : PAGE_READ;
806 case 2:
807 return PAGE_READ;
808 default:
809 return 0;
810 }
811 case 1:
812 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
813 case 2:
814 if (is_user)
9ee6e8bb 815 return prot_ro;
b5ff1b31
FB
816 else
817 return PAGE_READ | PAGE_WRITE;
818 case 3:
819 return PAGE_READ | PAGE_WRITE;
9ee6e8bb
PB
820 case 4: case 7: /* Reserved. */
821 return 0;
822 case 5:
823 return is_user ? 0 : prot_ro;
824 case 6:
825 return prot_ro;
b5ff1b31
FB
826 default:
827 abort();
828 }
829}
830
9ee6e8bb
PB
831static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
832 int is_user, uint32_t *phys_ptr, int *prot)
b5ff1b31
FB
833{
834 int code;
835 uint32_t table;
836 uint32_t desc;
837 int type;
838 int ap;
839 int domain;
840 uint32_t phys_addr;
841
9ee6e8bb
PB
842 /* Pagetable walk. */
843 /* Lookup l1 descriptor. */
844 if (address & env->cp15.c2_mask)
845 table = env->cp15.c2_base1;
846 else
847 table = env->cp15.c2_base0;
848 table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
849 desc = ldl_phys(table);
850 type = (desc & 3);
851 domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
852 if (type == 0) {
853 /* Secton translation fault. */
854 code = 5;
855 goto do_fault;
856 }
857 if (domain == 0 || domain == 2) {
858 if (type == 2)
859 code = 9; /* Section domain fault. */
860 else
861 code = 11; /* Page domain fault. */
862 goto do_fault;
863 }
864 if (type == 2) {
865 /* 1Mb section. */
866 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
867 ap = (desc >> 10) & 3;
868 code = 13;
869 } else {
870 /* Lookup l2 entry. */
871 if (type == 1) {
872 /* Coarse pagetable. */
873 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
874 } else {
875 /* Fine pagetable. */
876 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
877 }
878 desc = ldl_phys(table);
879 switch (desc & 3) {
880 case 0: /* Page translation fault. */
881 code = 7;
882 goto do_fault;
883 case 1: /* 64k page. */
884 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
885 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
ce819861 886 break;
9ee6e8bb
PB
887 case 2: /* 4k page. */
888 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
889 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
ce819861 890 break;
9ee6e8bb
PB
891 case 3: /* 1k page. */
892 if (type == 1) {
893 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
894 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
895 } else {
896 /* Page translation fault. */
897 code = 7;
898 goto do_fault;
899 }
900 } else {
901 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
902 }
903 ap = (desc >> 4) & 3;
ce819861
PB
904 break;
905 default:
9ee6e8bb
PB
906 /* Never happens, but compiler isn't smart enough to tell. */
907 abort();
ce819861 908 }
9ee6e8bb
PB
909 code = 15;
910 }
911 *prot = check_ap(env, ap, domain, access_type, is_user);
912 if (!*prot) {
913 /* Access permission fault. */
914 goto do_fault;
915 }
916 *phys_ptr = phys_addr;
917 return 0;
918do_fault:
919 return code | (domain << 4);
920}
921
922static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
923 int is_user, uint32_t *phys_ptr, int *prot)
924{
925 int code;
926 uint32_t table;
927 uint32_t desc;
928 uint32_t xn;
929 int type;
930 int ap;
931 int domain;
932 uint32_t phys_addr;
933
934 /* Pagetable walk. */
935 /* Lookup l1 descriptor. */
936 if (address & env->cp15.c2_mask)
937 table = env->cp15.c2_base1;
938 else
939 table = env->cp15.c2_base0;
940 table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
941 desc = ldl_phys(table);
942 type = (desc & 3);
943 if (type == 0) {
944 /* Secton translation fault. */
945 code = 5;
946 domain = 0;
947 goto do_fault;
948 } else if (type == 2 && (desc & (1 << 18))) {
949 /* Supersection. */
950 domain = 0;
b5ff1b31 951 } else {
9ee6e8bb
PB
952 /* Section or page. */
953 domain = (desc >> 4) & 0x1e;
954 }
955 domain = (env->cp15.c3 >> domain) & 3;
956 if (domain == 0 || domain == 2) {
957 if (type == 2)
958 code = 9; /* Section domain fault. */
959 else
960 code = 11; /* Page domain fault. */
961 goto do_fault;
962 }
963 if (type == 2) {
964 if (desc & (1 << 18)) {
965 /* Supersection. */
966 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
b5ff1b31 967 } else {
9ee6e8bb
PB
968 /* Section. */
969 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
b5ff1b31 970 }
9ee6e8bb
PB
971 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
972 xn = desc & (1 << 4);
973 code = 13;
974 } else {
975 /* Lookup l2 entry. */
976 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
977 desc = ldl_phys(table);
978 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
979 switch (desc & 3) {
980 case 0: /* Page translation fault. */
981 code = 7;
b5ff1b31 982 goto do_fault;
9ee6e8bb
PB
983 case 1: /* 64k page. */
984 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
985 xn = desc & (1 << 15);
986 break;
987 case 2: case 3: /* 4k page. */
988 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
989 xn = desc & 1;
990 break;
991 default:
992 /* Never happens, but compiler isn't smart enough to tell. */
993 abort();
b5ff1b31 994 }
9ee6e8bb
PB
995 code = 15;
996 }
997 if (xn && access_type == 2)
998 goto do_fault;
999
1000 *prot = check_ap(env, ap, domain, access_type, is_user);
1001 if (!*prot) {
1002 /* Access permission fault. */
1003 goto do_fault;
b5ff1b31 1004 }
9ee6e8bb 1005 *phys_ptr = phys_addr;
b5ff1b31
FB
1006 return 0;
1007do_fault:
1008 return code | (domain << 4);
1009}
1010
9ee6e8bb
PB
1011static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1012 int is_user, uint32_t *phys_ptr, int *prot)
1013{
1014 int n;
1015 uint32_t mask;
1016 uint32_t base;
1017
1018 *phys_ptr = address;
1019 for (n = 7; n >= 0; n--) {
1020 base = env->cp15.c6_region[n];
1021 if ((base & 1) == 0)
1022 continue;
1023 mask = 1 << ((base >> 1) & 0x1f);
1024 /* Keep this shift separate from the above to avoid an
1025 (undefined) << 32. */
1026 mask = (mask << 1) - 1;
1027 if (((base ^ address) & ~mask) == 0)
1028 break;
1029 }
1030 if (n < 0)
1031 return 2;
1032
1033 if (access_type == 2) {
1034 mask = env->cp15.c5_insn;
1035 } else {
1036 mask = env->cp15.c5_data;
1037 }
1038 mask = (mask >> (n * 4)) & 0xf;
1039 switch (mask) {
1040 case 0:
1041 return 1;
1042 case 1:
1043 if (is_user)
1044 return 1;
1045 *prot = PAGE_READ | PAGE_WRITE;
1046 break;
1047 case 2:
1048 *prot = PAGE_READ;
1049 if (!is_user)
1050 *prot |= PAGE_WRITE;
1051 break;
1052 case 3:
1053 *prot = PAGE_READ | PAGE_WRITE;
1054 break;
1055 case 5:
1056 if (is_user)
1057 return 1;
1058 *prot = PAGE_READ;
1059 break;
1060 case 6:
1061 *prot = PAGE_READ;
1062 break;
1063 default:
1064 /* Bad permission. */
1065 return 1;
1066 }
1067 return 0;
1068}
1069
1070static inline int get_phys_addr(CPUState *env, uint32_t address,
1071 int access_type, int is_user,
1072 uint32_t *phys_ptr, int *prot)
1073{
1074 /* Fast Context Switch Extension. */
1075 if (address < 0x02000000)
1076 address += env->cp15.c13_fcse;
1077
1078 if ((env->cp15.c1_sys & 1) == 0) {
1079 /* MMU/MPU disabled. */
1080 *phys_ptr = address;
1081 *prot = PAGE_READ | PAGE_WRITE;
1082 return 0;
1083 } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1084 return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1085 prot);
1086 } else if (env->cp15.c1_sys & (1 << 23)) {
1087 return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1088 prot);
1089 } else {
1090 return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1091 prot);
1092 }
1093}
1094
b5ff1b31 1095int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
6ebbf390 1096 int access_type, int mmu_idx, int is_softmmu)
b5ff1b31
FB
1097{
1098 uint32_t phys_addr;
1099 int prot;
6ebbf390 1100 int ret, is_user;
b5ff1b31 1101
6ebbf390 1102 is_user = mmu_idx == MMU_USER_IDX;
b5ff1b31
FB
1103 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
1104 if (ret == 0) {
1105 /* Map a single [sub]page. */
1106 phys_addr &= ~(uint32_t)0x3ff;
1107 address &= ~(uint32_t)0x3ff;
6ebbf390 1108 return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
b5ff1b31
FB
1109 is_softmmu);
1110 }
1111
1112 if (access_type == 2) {
1113 env->cp15.c5_insn = ret;
1114 env->cp15.c6_insn = address;
1115 env->exception_index = EXCP_PREFETCH_ABORT;
1116 } else {
1117 env->cp15.c5_data = ret;
9ee6e8bb
PB
1118 if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1119 env->cp15.c5_data |= (1 << 11);
b5ff1b31
FB
1120 env->cp15.c6_data = address;
1121 env->exception_index = EXCP_DATA_ABORT;
1122 }
1123 return 1;
1124}
1125
9b3c35e0 1126target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
b5ff1b31
FB
1127{
1128 uint32_t phys_addr;
1129 int prot;
1130 int ret;
1131
1132 ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
1133
1134 if (ret != 0)
1135 return -1;
1136
1137 return phys_addr;
1138}
1139
9ee6e8bb
PB
1140/* Not really implemented. Need to figure out a sane way of doing this.
1141 Maybe add generic watchpoint support and use that. */
1142
1143void helper_mark_exclusive(CPUState *env, uint32_t addr)
1144{
1145 env->mmon_addr = addr;
1146}
1147
1148int helper_test_exclusive(CPUState *env, uint32_t addr)
1149{
1150 return (env->mmon_addr != addr);
1151}
1152
1153void helper_clrex(CPUState *env)
1154{
1155 env->mmon_addr = -1;
1156}
1157
c1713132
AZ
1158void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
1159{
1160 int cp_num = (insn >> 8) & 0xf;
1161 int cp_info = (insn >> 5) & 7;
1162 int src = (insn >> 16) & 0xf;
1163 int operand = insn & 0xf;
1164
1165 if (env->cp[cp_num].cp_write)
1166 env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1167 cp_info, src, operand, val);
1168}
1169
1170uint32_t helper_get_cp(CPUState *env, uint32_t insn)
1171{
1172 int cp_num = (insn >> 8) & 0xf;
1173 int cp_info = (insn >> 5) & 7;
1174 int dest = (insn >> 16) & 0xf;
1175 int operand = insn & 0xf;
1176
1177 if (env->cp[cp_num].cp_read)
1178 return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1179 cp_info, dest, operand);
1180 return 0;
1181}
1182
ce819861
PB
1183/* Return basic MPU access permission bits. */
1184static uint32_t simple_mpu_ap_bits(uint32_t val)
1185{
1186 uint32_t ret;
1187 uint32_t mask;
1188 int i;
1189 ret = 0;
1190 mask = 3;
1191 for (i = 0; i < 16; i += 2) {
1192 ret |= (val >> i) & mask;
1193 mask <<= 2;
1194 }
1195 return ret;
1196}
1197
1198/* Pad basic MPU access permission bits to extended format. */
1199static uint32_t extended_mpu_ap_bits(uint32_t val)
1200{
1201 uint32_t ret;
1202 uint32_t mask;
1203 int i;
1204 ret = 0;
1205 mask = 3;
1206 for (i = 0; i < 16; i += 2) {
1207 ret |= (val & mask) << i;
1208 mask <<= 2;
1209 }
1210 return ret;
1211}
1212
b5ff1b31
FB
1213void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
1214{
9ee6e8bb
PB
1215 int op1;
1216 int op2;
1217 int crm;
b5ff1b31 1218
9ee6e8bb 1219 op1 = (insn >> 21) & 7;
b5ff1b31 1220 op2 = (insn >> 5) & 7;
ce819861 1221 crm = insn & 0xf;
b5ff1b31 1222 switch ((insn >> 16) & 0xf) {
9ee6e8bb
PB
1223 case 0:
1224 if (((insn >> 21) & 7) == 2) {
1225 /* ??? Select cache level. Ignore. */
1226 return;
1227 }
1228 /* ID codes. */
610c3c8a
AZ
1229 if (arm_feature(env, ARM_FEATURE_XSCALE))
1230 break;
c3d2689d
AZ
1231 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1232 break;
b5ff1b31
FB
1233 goto bad_reg;
1234 case 1: /* System configuration. */
c3d2689d
AZ
1235 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1236 op2 = 0;
b5ff1b31
FB
1237 switch (op2) {
1238 case 0:
ce819861 1239 if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
c1713132 1240 env->cp15.c1_sys = val;
b5ff1b31
FB
1241 /* ??? Lots of these bits are not implemented. */
1242 /* This may enable/disable the MMU, so do a TLB flush. */
1243 tlb_flush(env, 1);
1244 break;
9ee6e8bb 1245 case 1: /* Auxiliary cotrol register. */
610c3c8a
AZ
1246 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1247 env->cp15.c1_xscaleauxcr = val;
c1713132 1248 break;
610c3c8a 1249 }
9ee6e8bb
PB
1250 /* Not implemented. */
1251 break;
b5ff1b31 1252 case 2:
610c3c8a
AZ
1253 if (arm_feature(env, ARM_FEATURE_XSCALE))
1254 goto bad_reg;
b5ff1b31
FB
1255 env->cp15.c1_coproc = val;
1256 /* ??? Is this safe when called from within a TB? */
1257 tb_flush(env);
c1713132 1258 break;
b5ff1b31
FB
1259 default:
1260 goto bad_reg;
1261 }
1262 break;
ce819861
PB
1263 case 2: /* MMU Page table control / MPU cache control. */
1264 if (arm_feature(env, ARM_FEATURE_MPU)) {
1265 switch (op2) {
1266 case 0:
1267 env->cp15.c2_data = val;
1268 break;
1269 case 1:
1270 env->cp15.c2_insn = val;
1271 break;
1272 default:
1273 goto bad_reg;
1274 }
1275 } else {
9ee6e8bb
PB
1276 switch (op2) {
1277 case 0:
1278 env->cp15.c2_base0 = val;
1279 break;
1280 case 1:
1281 env->cp15.c2_base1 = val;
1282 break;
1283 case 2:
1284 env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1285 break;
1286 default:
1287 goto bad_reg;
1288 }
ce819861 1289 }
b5ff1b31 1290 break;
ce819861 1291 case 3: /* MMU Domain access control / MPU write buffer control. */
b5ff1b31 1292 env->cp15.c3 = val;
405ee3ad 1293 tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
b5ff1b31
FB
1294 break;
1295 case 4: /* Reserved. */
1296 goto bad_reg;
ce819861 1297 case 5: /* MMU Fault status / MPU access permission. */
c3d2689d
AZ
1298 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1299 op2 = 0;
b5ff1b31
FB
1300 switch (op2) {
1301 case 0:
ce819861
PB
1302 if (arm_feature(env, ARM_FEATURE_MPU))
1303 val = extended_mpu_ap_bits(val);
b5ff1b31
FB
1304 env->cp15.c5_data = val;
1305 break;
1306 case 1:
ce819861
PB
1307 if (arm_feature(env, ARM_FEATURE_MPU))
1308 val = extended_mpu_ap_bits(val);
b5ff1b31
FB
1309 env->cp15.c5_insn = val;
1310 break;
ce819861
PB
1311 case 2:
1312 if (!arm_feature(env, ARM_FEATURE_MPU))
1313 goto bad_reg;
1314 env->cp15.c5_data = val;
b5ff1b31 1315 break;
ce819861
PB
1316 case 3:
1317 if (!arm_feature(env, ARM_FEATURE_MPU))
1318 goto bad_reg;
1319 env->cp15.c5_insn = val;
b5ff1b31
FB
1320 break;
1321 default:
1322 goto bad_reg;
1323 }
1324 break;
ce819861
PB
1325 case 6: /* MMU Fault address / MPU base/size. */
1326 if (arm_feature(env, ARM_FEATURE_MPU)) {
1327 if (crm >= 8)
1328 goto bad_reg;
1329 env->cp15.c6_region[crm] = val;
1330 } else {
c3d2689d
AZ
1331 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1332 op2 = 0;
ce819861
PB
1333 switch (op2) {
1334 case 0:
1335 env->cp15.c6_data = val;
1336 break;
9ee6e8bb
PB
1337 case 1: /* ??? This is WFAR on armv6 */
1338 case 2:
ce819861
PB
1339 env->cp15.c6_insn = val;
1340 break;
1341 default:
1342 goto bad_reg;
1343 }
1344 }
1345 break;
b5ff1b31 1346 case 7: /* Cache control. */
c3d2689d
AZ
1347 env->cp15.c15_i_max = 0x000;
1348 env->cp15.c15_i_min = 0xff0;
b5ff1b31 1349 /* No cache, so nothing to do. */
9ee6e8bb 1350 /* ??? MPCore has VA to PA translation functions. */
b5ff1b31
FB
1351 break;
1352 case 8: /* MMU TLB control. */
1353 switch (op2) {
1354 case 0: /* Invalidate all. */
1355 tlb_flush(env, 0);
1356 break;
1357 case 1: /* Invalidate single TLB entry. */
1358#if 0
1359 /* ??? This is wrong for large pages and sections. */
1360 /* As an ugly hack to make linux work we always flush a 4K
1361 pages. */
1362 val &= 0xfffff000;
1363 tlb_flush_page(env, val);
1364 tlb_flush_page(env, val + 0x400);
1365 tlb_flush_page(env, val + 0x800);
1366 tlb_flush_page(env, val + 0xc00);
1367#else
1368 tlb_flush(env, 1);
1369#endif
1370 break;
9ee6e8bb
PB
1371 case 2: /* Invalidate on ASID. */
1372 tlb_flush(env, val == 0);
1373 break;
1374 case 3: /* Invalidate single entry on MVA. */
1375 /* ??? This is like case 1, but ignores ASID. */
1376 tlb_flush(env, 1);
1377 break;
b5ff1b31
FB
1378 default:
1379 goto bad_reg;
1380 }
1381 break;
ce819861 1382 case 9:
c3d2689d
AZ
1383 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1384 break;
ce819861
PB
1385 switch (crm) {
1386 case 0: /* Cache lockdown. */
9ee6e8bb
PB
1387 switch (op1) {
1388 case 0: /* L1 cache. */
1389 switch (op2) {
1390 case 0:
1391 env->cp15.c9_data = val;
1392 break;
1393 case 1:
1394 env->cp15.c9_insn = val;
1395 break;
1396 default:
1397 goto bad_reg;
1398 }
1399 break;
1400 case 1: /* L2 cache. */
1401 /* Ignore writes to L2 lockdown/auxiliary registers. */
1402 break;
1403 default:
1404 goto bad_reg;
1405 }
1406 break;
ce819861
PB
1407 case 1: /* TCM memory region registers. */
1408 /* Not implemented. */
1409 goto bad_reg;
b5ff1b31
FB
1410 default:
1411 goto bad_reg;
1412 }
1413 break;
1414 case 10: /* MMU TLB lockdown. */
1415 /* ??? TLB lockdown not implemented. */
1416 break;
b5ff1b31
FB
1417 case 12: /* Reserved. */
1418 goto bad_reg;
1419 case 13: /* Process ID. */
1420 switch (op2) {
1421 case 0:
d07edbfa
PB
1422 /* Unlike real hardware the qemu TLB uses virtual addresses,
1423 not modified virtual addresses, so this causes a TLB flush.
1424 */
1425 if (env->cp15.c13_fcse != val)
1426 tlb_flush(env, 1);
1427 env->cp15.c13_fcse = val;
b5ff1b31
FB
1428 break;
1429 case 1:
d07edbfa 1430 /* This changes the ASID, so do a TLB flush. */
ce819861
PB
1431 if (env->cp15.c13_context != val
1432 && !arm_feature(env, ARM_FEATURE_MPU))
d07edbfa
PB
1433 tlb_flush(env, 0);
1434 env->cp15.c13_context = val;
b5ff1b31 1435 break;
9ee6e8bb
PB
1436 case 2:
1437 env->cp15.c13_tls1 = val;
1438 break;
1439 case 3:
1440 env->cp15.c13_tls2 = val;
1441 break;
1442 case 4:
1443 env->cp15.c13_tls3 = val;
1444 break;
b5ff1b31
FB
1445 default:
1446 goto bad_reg;
1447 }
1448 break;
1449 case 14: /* Reserved. */
1450 goto bad_reg;
1451 case 15: /* Implementation specific. */
c1713132 1452 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
ce819861 1453 if (op2 == 0 && crm == 1) {
2e23213f
AZ
1454 if (env->cp15.c15_cpar != (val & 0x3fff)) {
1455 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1456 tb_flush(env);
1457 env->cp15.c15_cpar = val & 0x3fff;
1458 }
c1713132
AZ
1459 break;
1460 }
1461 goto bad_reg;
1462 }
c3d2689d
AZ
1463 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1464 switch (crm) {
1465 case 0:
1466 break;
1467 case 1: /* Set TI925T configuration. */
1468 env->cp15.c15_ticonfig = val & 0xe7;
1469 env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1470 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1471 break;
1472 case 2: /* Set I_max. */
1473 env->cp15.c15_i_max = val;
1474 break;
1475 case 3: /* Set I_min. */
1476 env->cp15.c15_i_min = val;
1477 break;
1478 case 4: /* Set thread-ID. */
1479 env->cp15.c15_threadid = val & 0xffff;
1480 break;
1481 case 8: /* Wait-for-interrupt (deprecated). */
1482 cpu_interrupt(env, CPU_INTERRUPT_HALT);
1483 break;
1484 default:
1485 goto bad_reg;
1486 }
1487 }
b5ff1b31
FB
1488 break;
1489 }
1490 return;
1491bad_reg:
1492 /* ??? For debugging only. Should raise illegal instruction exception. */
9ee6e8bb
PB
1493 cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1494 (insn >> 16) & 0xf, crm, op1, op2);
b5ff1b31
FB
1495}
1496
1497uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
1498{
9ee6e8bb
PB
1499 int op1;
1500 int op2;
1501 int crm;
b5ff1b31 1502
9ee6e8bb 1503 op1 = (insn >> 21) & 7;
b5ff1b31 1504 op2 = (insn >> 5) & 7;
c3d2689d 1505 crm = insn & 0xf;
b5ff1b31
FB
1506 switch ((insn >> 16) & 0xf) {
1507 case 0: /* ID codes. */
9ee6e8bb
PB
1508 switch (op1) {
1509 case 0:
1510 switch (crm) {
1511 case 0:
1512 switch (op2) {
1513 case 0: /* Device ID. */
1514 return env->cp15.c0_cpuid;
1515 case 1: /* Cache Type. */
1516 return env->cp15.c0_cachetype;
1517 case 2: /* TCM status. */
1518 return 0;
1519 case 3: /* TLB type register. */
1520 return 0; /* No lockable TLB entries. */
1521 case 5: /* CPU ID */
1522 return env->cpu_index;
1523 default:
1524 goto bad_reg;
1525 }
1526 case 1:
1527 if (!arm_feature(env, ARM_FEATURE_V6))
1528 goto bad_reg;
1529 return env->cp15.c0_c1[op2];
1530 case 2:
1531 if (!arm_feature(env, ARM_FEATURE_V6))
1532 goto bad_reg;
1533 return env->cp15.c0_c2[op2];
1534 case 3: case 4: case 5: case 6: case 7:
1535 return 0;
1536 default:
1537 goto bad_reg;
1538 }
1539 case 1:
1540 /* These registers aren't documented on arm11 cores. However
1541 Linux looks at them anyway. */
1542 if (!arm_feature(env, ARM_FEATURE_V6))
1543 goto bad_reg;
1544 if (crm != 0)
1545 goto bad_reg;
610c3c8a
AZ
1546 if (arm_feature(env, ARM_FEATURE_XSCALE))
1547 goto bad_reg;
b5ff1b31 1548 return 0;
9ee6e8bb
PB
1549 default:
1550 goto bad_reg;
b5ff1b31
FB
1551 }
1552 case 1: /* System configuration. */
c3d2689d
AZ
1553 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1554 op2 = 0;
b5ff1b31
FB
1555 switch (op2) {
1556 case 0: /* Control register. */
1557 return env->cp15.c1_sys;
1558 case 1: /* Auxiliary control register. */
c1713132 1559 if (arm_feature(env, ARM_FEATURE_XSCALE))
610c3c8a 1560 return env->cp15.c1_xscaleauxcr;
9ee6e8bb
PB
1561 if (!arm_feature(env, ARM_FEATURE_AUXCR))
1562 goto bad_reg;
1563 switch (ARM_CPUID(env)) {
1564 case ARM_CPUID_ARM1026:
1565 return 1;
1566 case ARM_CPUID_ARM1136:
1567 return 7;
1568 case ARM_CPUID_ARM11MPCORE:
1569 return 1;
1570 case ARM_CPUID_CORTEXA8:
1571 return 0;
1572 default:
1573 goto bad_reg;
1574 }
b5ff1b31 1575 case 2: /* Coprocessor access register. */
610c3c8a
AZ
1576 if (arm_feature(env, ARM_FEATURE_XSCALE))
1577 goto bad_reg;
b5ff1b31
FB
1578 return env->cp15.c1_coproc;
1579 default:
1580 goto bad_reg;
1581 }
ce819861
PB
1582 case 2: /* MMU Page table control / MPU cache control. */
1583 if (arm_feature(env, ARM_FEATURE_MPU)) {
1584 switch (op2) {
1585 case 0:
1586 return env->cp15.c2_data;
1587 break;
1588 case 1:
1589 return env->cp15.c2_insn;
1590 break;
1591 default:
1592 goto bad_reg;
1593 }
1594 } else {
9ee6e8bb
PB
1595 switch (op2) {
1596 case 0:
1597 return env->cp15.c2_base0;
1598 case 1:
1599 return env->cp15.c2_base1;
1600 case 2:
1601 {
1602 int n;
1603 uint32_t mask;
1604 n = 0;
1605 mask = env->cp15.c2_mask;
1606 while (mask) {
1607 n++;
1608 mask <<= 1;
1609 }
1610 return n;
1611 }
1612 default:
1613 goto bad_reg;
1614 }
1615 }
ce819861 1616 case 3: /* MMU Domain access control / MPU write buffer control. */
b5ff1b31
FB
1617 return env->cp15.c3;
1618 case 4: /* Reserved. */
1619 goto bad_reg;
ce819861 1620 case 5: /* MMU Fault status / MPU access permission. */
c3d2689d
AZ
1621 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1622 op2 = 0;
b5ff1b31
FB
1623 switch (op2) {
1624 case 0:
ce819861
PB
1625 if (arm_feature(env, ARM_FEATURE_MPU))
1626 return simple_mpu_ap_bits(env->cp15.c5_data);
b5ff1b31
FB
1627 return env->cp15.c5_data;
1628 case 1:
ce819861
PB
1629 if (arm_feature(env, ARM_FEATURE_MPU))
1630 return simple_mpu_ap_bits(env->cp15.c5_data);
1631 return env->cp15.c5_insn;
1632 case 2:
1633 if (!arm_feature(env, ARM_FEATURE_MPU))
1634 goto bad_reg;
1635 return env->cp15.c5_data;
1636 case 3:
1637 if (!arm_feature(env, ARM_FEATURE_MPU))
1638 goto bad_reg;
b5ff1b31
FB
1639 return env->cp15.c5_insn;
1640 default:
1641 goto bad_reg;
1642 }
9ee6e8bb 1643 case 6: /* MMU Fault address. */
ce819861 1644 if (arm_feature(env, ARM_FEATURE_MPU)) {
9ee6e8bb 1645 if (crm >= 8)
ce819861 1646 goto bad_reg;
9ee6e8bb 1647 return env->cp15.c6_region[crm];
ce819861 1648 } else {
c3d2689d
AZ
1649 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1650 op2 = 0;
9ee6e8bb
PB
1651 switch (op2) {
1652 case 0:
1653 return env->cp15.c6_data;
1654 case 1:
1655 if (arm_feature(env, ARM_FEATURE_V6)) {
1656 /* Watchpoint Fault Adrress. */
1657 return 0; /* Not implemented. */
1658 } else {
1659 /* Instruction Fault Adrress. */
1660 /* Arm9 doesn't have an IFAR, but implementing it anyway
1661 shouldn't do any harm. */
1662 return env->cp15.c6_insn;
1663 }
1664 case 2:
1665 if (arm_feature(env, ARM_FEATURE_V6)) {
1666 /* Instruction Fault Adrress. */
1667 return env->cp15.c6_insn;
1668 } else {
1669 goto bad_reg;
1670 }
1671 default:
1672 goto bad_reg;
1673 }
b5ff1b31
FB
1674 }
1675 case 7: /* Cache control. */
1676 /* ??? This is for test, clean and invaidate operations that set the
c1713132 1677 Z flag. We can't represent N = Z = 1, so it also clears
b5ff1b31
FB
1678 the N flag. Oh well. */
1679 env->NZF = 0;
1680 return 0;
1681 case 8: /* MMU TLB control. */
1682 goto bad_reg;
1683 case 9: /* Cache lockdown. */
9ee6e8bb
PB
1684 switch (op1) {
1685 case 0: /* L1 cache. */
1686 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1687 return 0;
1688 switch (op2) {
1689 case 0:
1690 return env->cp15.c9_data;
1691 case 1:
1692 return env->cp15.c9_insn;
1693 default:
1694 goto bad_reg;
1695 }
1696 case 1: /* L2 cache */
1697 if (crm != 0)
1698 goto bad_reg;
1699 /* L2 Lockdown and Auxiliary control. */
c3d2689d 1700 return 0;
b5ff1b31
FB
1701 default:
1702 goto bad_reg;
1703 }
1704 case 10: /* MMU TLB lockdown. */
1705 /* ??? TLB lockdown not implemented. */
1706 return 0;
1707 case 11: /* TCM DMA control. */
1708 case 12: /* Reserved. */
1709 goto bad_reg;
1710 case 13: /* Process ID. */
1711 switch (op2) {
1712 case 0:
1713 return env->cp15.c13_fcse;
1714 case 1:
1715 return env->cp15.c13_context;
9ee6e8bb
PB
1716 case 2:
1717 return env->cp15.c13_tls1;
1718 case 3:
1719 return env->cp15.c13_tls2;
1720 case 4:
1721 return env->cp15.c13_tls3;
b5ff1b31
FB
1722 default:
1723 goto bad_reg;
1724 }
1725 case 14: /* Reserved. */
1726 goto bad_reg;
1727 case 15: /* Implementation specific. */
c1713132 1728 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
c3d2689d 1729 if (op2 == 0 && crm == 1)
c1713132
AZ
1730 return env->cp15.c15_cpar;
1731
1732 goto bad_reg;
1733 }
c3d2689d
AZ
1734 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1735 switch (crm) {
1736 case 0:
1737 return 0;
1738 case 1: /* Read TI925T configuration. */
1739 return env->cp15.c15_ticonfig;
1740 case 2: /* Read I_max. */
1741 return env->cp15.c15_i_max;
1742 case 3: /* Read I_min. */
1743 return env->cp15.c15_i_min;
1744 case 4: /* Read thread-ID. */
1745 return env->cp15.c15_threadid;
1746 case 8: /* TI925T_status */
1747 return 0;
1748 }
1749 goto bad_reg;
1750 }
b5ff1b31
FB
1751 return 0;
1752 }
1753bad_reg:
1754 /* ??? For debugging only. Should raise illegal instruction exception. */
9ee6e8bb
PB
1755 cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
1756 (insn >> 16) & 0xf, crm, op1, op2);
b5ff1b31
FB
1757 return 0;
1758}
1759
9ee6e8bb
PB
1760void helper_set_r13_banked(CPUState *env, int mode, uint32_t val)
1761{
1762 env->banked_r13[bank_number(mode)] = val;
1763}
1764
1765uint32_t helper_get_r13_banked(CPUState *env, int mode)
1766{
1767 return env->banked_r13[bank_number(mode)];
1768}
1769
1770uint32_t helper_v7m_mrs(CPUState *env, int reg)
1771{
1772 switch (reg) {
1773 case 0: /* APSR */
1774 return xpsr_read(env) & 0xf8000000;
1775 case 1: /* IAPSR */
1776 return xpsr_read(env) & 0xf80001ff;
1777 case 2: /* EAPSR */
1778 return xpsr_read(env) & 0xff00fc00;
1779 case 3: /* xPSR */
1780 return xpsr_read(env) & 0xff00fdff;
1781 case 5: /* IPSR */
1782 return xpsr_read(env) & 0x000001ff;
1783 case 6: /* EPSR */
1784 return xpsr_read(env) & 0x0700fc00;
1785 case 7: /* IEPSR */
1786 return xpsr_read(env) & 0x0700edff;
1787 case 8: /* MSP */
1788 return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
1789 case 9: /* PSP */
1790 return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
1791 case 16: /* PRIMASK */
1792 return (env->uncached_cpsr & CPSR_I) != 0;
1793 case 17: /* FAULTMASK */
1794 return (env->uncached_cpsr & CPSR_F) != 0;
1795 case 18: /* BASEPRI */
1796 case 19: /* BASEPRI_MAX */
1797 return env->v7m.basepri;
1798 case 20: /* CONTROL */
1799 return env->v7m.control;
1800 default:
1801 /* ??? For debugging only. */
1802 cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
1803 return 0;
1804 }
1805}
1806
1807void helper_v7m_msr(CPUState *env, int reg, uint32_t val)
1808{
1809 switch (reg) {
1810 case 0: /* APSR */
1811 xpsr_write(env, val, 0xf8000000);
1812 break;
1813 case 1: /* IAPSR */
1814 xpsr_write(env, val, 0xf8000000);
1815 break;
1816 case 2: /* EAPSR */
1817 xpsr_write(env, val, 0xfe00fc00);
1818 break;
1819 case 3: /* xPSR */
1820 xpsr_write(env, val, 0xfe00fc00);
1821 break;
1822 case 5: /* IPSR */
1823 /* IPSR bits are readonly. */
1824 break;
1825 case 6: /* EPSR */
1826 xpsr_write(env, val, 0x0600fc00);
1827 break;
1828 case 7: /* IEPSR */
1829 xpsr_write(env, val, 0x0600fc00);
1830 break;
1831 case 8: /* MSP */
1832 if (env->v7m.current_sp)
1833 env->v7m.other_sp = val;
1834 else
1835 env->regs[13] = val;
1836 break;
1837 case 9: /* PSP */
1838 if (env->v7m.current_sp)
1839 env->regs[13] = val;
1840 else
1841 env->v7m.other_sp = val;
1842 break;
1843 case 16: /* PRIMASK */
1844 if (val & 1)
1845 env->uncached_cpsr |= CPSR_I;
1846 else
1847 env->uncached_cpsr &= ~CPSR_I;
1848 break;
1849 case 17: /* FAULTMASK */
1850 if (val & 1)
1851 env->uncached_cpsr |= CPSR_F;
1852 else
1853 env->uncached_cpsr &= ~CPSR_F;
1854 break;
1855 case 18: /* BASEPRI */
1856 env->v7m.basepri = val & 0xff;
1857 break;
1858 case 19: /* BASEPRI_MAX */
1859 val &= 0xff;
1860 if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
1861 env->v7m.basepri = val;
1862 break;
1863 case 20: /* CONTROL */
1864 env->v7m.control = val & 3;
1865 switch_v7m_sp(env, (val & 2) != 0);
1866 break;
1867 default:
1868 /* ??? For debugging only. */
1869 cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
1870 return;
1871 }
1872}
1873
c1713132
AZ
1874void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
1875 ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
1876 void *opaque)
1877{
1878 if (cpnum < 0 || cpnum > 14) {
1879 cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
1880 return;
1881 }
1882
1883 env->cp[cpnum].cp_read = cp_read;
1884 env->cp[cpnum].cp_write = cp_write;
1885 env->cp[cpnum].opaque = opaque;
1886}
1887
b5ff1b31 1888#endif
b26eefb6 1889