]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/helper.c
Great PowerPC emulation code resynchronisation and improvments:
[mirror_qemu.git] / target-arm / helper.c
CommitLineData
b5ff1b31
FB
1#include <stdio.h>
2#include <stdlib.h>
3#include <string.h>
4
5#include "cpu.h"
6#include "exec-all.h"
7
40f137e1
PB
8void cpu_reset(CPUARMState *env)
9{
10#if defined (CONFIG_USER_ONLY)
11 env->uncached_cpsr = ARM_CPU_MODE_USR;
12 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
13#else
14 /* SVC mode with interrupts disabled. */
15 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
16 env->vfp.xregs[ARM_VFP_FPEXC] = 0;
17#endif
18 env->regs[15] = 0;
19}
20
21CPUARMState *cpu_arm_init(void)
22{
23 CPUARMState *env;
24
25 env = qemu_mallocz(sizeof(CPUARMState));
26 if (!env)
27 return NULL;
28 cpu_exec_init(env);
29 cpu_reset(env);
30 tlb_flush(env, 1);
31 return env;
32}
33
34static inline void set_feature(CPUARMState *env, int feature)
35{
36 env->features |= 1u << feature;
37}
38
39void cpu_arm_set_model(CPUARMState *env, uint32_t id)
40{
41 env->cp15.c0_cpuid = id;
42 switch (id) {
43 case ARM_CPUID_ARM926:
44 set_feature(env, ARM_FEATURE_VFP);
45 env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
46 break;
47 case ARM_CPUID_ARM1026:
48 set_feature(env, ARM_FEATURE_VFP);
49 set_feature(env, ARM_FEATURE_AUXCR);
50 env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
51 break;
52 default:
53 cpu_abort(env, "Bad CPU ID: %x\n", id);
54 break;
55 }
56}
57
58void cpu_arm_close(CPUARMState *env)
59{
60 free(env);
61}
62
b5ff1b31
FB
63#if defined(CONFIG_USER_ONLY)
64
65void do_interrupt (CPUState *env)
66{
67 env->exception_index = -1;
68}
69
70int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
71 int is_user, int is_softmmu)
72{
73 if (rw == 2) {
74 env->exception_index = EXCP_PREFETCH_ABORT;
75 env->cp15.c6_insn = address;
76 } else {
77 env->exception_index = EXCP_DATA_ABORT;
78 env->cp15.c6_data = address;
79 }
80 return 1;
81}
82
83target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
84{
85 return addr;
86}
87
88/* These should probably raise undefined insn exceptions. */
89void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
90{
91 cpu_abort(env, "cp15 insn %08x\n", insn);
92}
93
94uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
95{
96 cpu_abort(env, "cp15 insn %08x\n", insn);
97 return 0;
98}
99
100void switch_mode(CPUState *env, int mode)
101{
102 if (mode != ARM_CPU_MODE_USR)
103 cpu_abort(env, "Tried to switch out of user mode\n");
104}
105
106#else
107
8e71621f
PB
108extern int semihosting_enabled;
109
b5ff1b31
FB
110/* Map CPU modes onto saved register banks. */
111static inline int bank_number (int mode)
112{
113 switch (mode) {
114 case ARM_CPU_MODE_USR:
115 case ARM_CPU_MODE_SYS:
116 return 0;
117 case ARM_CPU_MODE_SVC:
118 return 1;
119 case ARM_CPU_MODE_ABT:
120 return 2;
121 case ARM_CPU_MODE_UND:
122 return 3;
123 case ARM_CPU_MODE_IRQ:
124 return 4;
125 case ARM_CPU_MODE_FIQ:
126 return 5;
127 }
128 cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
129 return -1;
130}
131
132void switch_mode(CPUState *env, int mode)
133{
134 int old_mode;
135 int i;
136
137 old_mode = env->uncached_cpsr & CPSR_M;
138 if (mode == old_mode)
139 return;
140
141 if (old_mode == ARM_CPU_MODE_FIQ) {
142 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 143 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
144 } else if (mode == ARM_CPU_MODE_FIQ) {
145 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8637c67f 146 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
b5ff1b31
FB
147 }
148
149 i = bank_number(old_mode);
150 env->banked_r13[i] = env->regs[13];
151 env->banked_r14[i] = env->regs[14];
152 env->banked_spsr[i] = env->spsr;
153
154 i = bank_number(mode);
155 env->regs[13] = env->banked_r13[i];
156 env->regs[14] = env->banked_r14[i];
157 env->spsr = env->banked_spsr[i];
158}
159
160/* Handle a CPU exception. */
161void do_interrupt(CPUARMState *env)
162{
163 uint32_t addr;
164 uint32_t mask;
165 int new_mode;
166 uint32_t offset;
167
168 /* TODO: Vectored interrupt controller. */
169 switch (env->exception_index) {
170 case EXCP_UDEF:
171 new_mode = ARM_CPU_MODE_UND;
172 addr = 0x04;
173 mask = CPSR_I;
174 if (env->thumb)
175 offset = 2;
176 else
177 offset = 4;
178 break;
179 case EXCP_SWI:
8e71621f
PB
180 if (semihosting_enabled) {
181 /* Check for semihosting interrupt. */
182 if (env->thumb) {
183 mask = lduw_code(env->regs[15] - 2) & 0xff;
184 } else {
185 mask = ldl_code(env->regs[15] - 4) & 0xffffff;
186 }
187 /* Only intercept calls from privileged modes, to provide some
188 semblance of security. */
189 if (((mask == 0x123456 && !env->thumb)
190 || (mask == 0xab && env->thumb))
191 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
192 env->regs[0] = do_arm_semihosting(env);
193 return;
194 }
195 }
b5ff1b31
FB
196 new_mode = ARM_CPU_MODE_SVC;
197 addr = 0x08;
198 mask = CPSR_I;
199 /* The PC already points to the next instructon. */
200 offset = 0;
201 break;
202 case EXCP_PREFETCH_ABORT:
06c949e6 203 case EXCP_BKPT:
b5ff1b31
FB
204 new_mode = ARM_CPU_MODE_ABT;
205 addr = 0x0c;
206 mask = CPSR_A | CPSR_I;
207 offset = 4;
208 break;
209 case EXCP_DATA_ABORT:
210 new_mode = ARM_CPU_MODE_ABT;
211 addr = 0x10;
212 mask = CPSR_A | CPSR_I;
213 offset = 8;
214 break;
215 case EXCP_IRQ:
216 new_mode = ARM_CPU_MODE_IRQ;
217 addr = 0x18;
218 /* Disable IRQ and imprecise data aborts. */
219 mask = CPSR_A | CPSR_I;
220 offset = 4;
221 break;
222 case EXCP_FIQ:
223 new_mode = ARM_CPU_MODE_FIQ;
224 addr = 0x1c;
225 /* Disable FIQ, IRQ and imprecise data aborts. */
226 mask = CPSR_A | CPSR_I | CPSR_F;
227 offset = 4;
228 break;
229 default:
230 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
231 return; /* Never happens. Keep compiler happy. */
232 }
233 /* High vectors. */
234 if (env->cp15.c1_sys & (1 << 13)) {
235 addr += 0xffff0000;
236 }
237 switch_mode (env, new_mode);
238 env->spsr = cpsr_read(env);
6d7e6326 239 /* Switch to the new mode, and switch to Arm mode. */
b5ff1b31 240 /* ??? Thumb interrupt handlers not implemented. */
6d7e6326 241 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
b5ff1b31 242 env->uncached_cpsr |= mask;
6d7e6326 243 env->thumb = 0;
b5ff1b31
FB
244 env->regs[14] = env->regs[15] + offset;
245 env->regs[15] = addr;
246 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
247}
248
249/* Check section/page access permissions.
250 Returns the page protection flags, or zero if the access is not
251 permitted. */
252static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
253 int is_user)
254{
255 if (domain == 3)
256 return PAGE_READ | PAGE_WRITE;
257
258 switch (ap) {
259 case 0:
78600320 260 if (access_type == 1)
b5ff1b31
FB
261 return 0;
262 switch ((env->cp15.c1_sys >> 8) & 3) {
263 case 1:
264 return is_user ? 0 : PAGE_READ;
265 case 2:
266 return PAGE_READ;
267 default:
268 return 0;
269 }
270 case 1:
271 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
272 case 2:
273 if (is_user)
274 return (access_type == 1) ? 0 : PAGE_READ;
275 else
276 return PAGE_READ | PAGE_WRITE;
277 case 3:
278 return PAGE_READ | PAGE_WRITE;
279 default:
280 abort();
281 }
282}
283
284static int get_phys_addr(CPUState *env, uint32_t address, int access_type,
285 int is_user, uint32_t *phys_ptr, int *prot)
286{
287 int code;
288 uint32_t table;
289 uint32_t desc;
290 int type;
291 int ap;
292 int domain;
293 uint32_t phys_addr;
294
295 /* Fast Context Switch Extension. */
296 if (address < 0x02000000)
297 address += env->cp15.c13_fcse;
298
299 if ((env->cp15.c1_sys & 1) == 0) {
300 /* MMU diusabled. */
301 *phys_ptr = address;
302 *prot = PAGE_READ | PAGE_WRITE;
303 } else {
304 /* Pagetable walk. */
305 /* Lookup l1 descriptor. */
306 table = (env->cp15.c2 & 0xffffc000) | ((address >> 18) & 0x3ffc);
307 desc = ldl_phys(table);
308 type = (desc & 3);
309 domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
310 if (type == 0) {
311 /* Secton translation fault. */
312 code = 5;
313 goto do_fault;
314 }
315 if (domain == 0 || domain == 2) {
316 if (type == 2)
317 code = 9; /* Section domain fault. */
318 else
319 code = 11; /* Page domain fault. */
320 goto do_fault;
321 }
322 if (type == 2) {
323 /* 1Mb section. */
324 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
325 ap = (desc >> 10) & 3;
326 code = 13;
327 } else {
328 /* Lookup l2 entry. */
329 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
330 desc = ldl_phys(table);
331 switch (desc & 3) {
332 case 0: /* Page translation fault. */
333 code = 7;
334 goto do_fault;
335 case 1: /* 64k page. */
336 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
337 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
338 break;
339 case 2: /* 4k page. */
340 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
341 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
342 break;
343 case 3: /* 1k page. */
344 if (type == 1) {
345 /* Page translation fault. */
346 code = 7;
347 goto do_fault;
348 }
349 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
350 ap = (desc >> 4) & 3;
351 break;
352 default:
353 /* Never happens, but compiler isn't smart enough to tell. */
354 abort();
355 }
356 code = 15;
357 }
358 *prot = check_ap(env, ap, domain, access_type, is_user);
359 if (!*prot) {
360 /* Access permission fault. */
361 goto do_fault;
362 }
363 *phys_ptr = phys_addr;
364 }
365 return 0;
366do_fault:
367 return code | (domain << 4);
368}
369
370int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
371 int access_type, int is_user, int is_softmmu)
372{
373 uint32_t phys_addr;
374 int prot;
375 int ret;
376
377 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
378 if (ret == 0) {
379 /* Map a single [sub]page. */
380 phys_addr &= ~(uint32_t)0x3ff;
381 address &= ~(uint32_t)0x3ff;
382 return tlb_set_page (env, address, phys_addr, prot, is_user,
383 is_softmmu);
384 }
385
386 if (access_type == 2) {
387 env->cp15.c5_insn = ret;
388 env->cp15.c6_insn = address;
389 env->exception_index = EXCP_PREFETCH_ABORT;
390 } else {
391 env->cp15.c5_data = ret;
392 env->cp15.c6_data = address;
393 env->exception_index = EXCP_DATA_ABORT;
394 }
395 return 1;
396}
397
398target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
399{
400 uint32_t phys_addr;
401 int prot;
402 int ret;
403
404 ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
405
406 if (ret != 0)
407 return -1;
408
409 return phys_addr;
410}
411
412void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
413{
414 uint32_t op2;
415
416 op2 = (insn >> 5) & 7;
417 switch ((insn >> 16) & 0xf) {
418 case 0: /* ID codes. */
419 goto bad_reg;
420 case 1: /* System configuration. */
421 switch (op2) {
422 case 0:
423 env->cp15.c1_sys = val;
424 /* ??? Lots of these bits are not implemented. */
425 /* This may enable/disable the MMU, so do a TLB flush. */
426 tlb_flush(env, 1);
427 break;
428 case 2:
429 env->cp15.c1_coproc = val;
430 /* ??? Is this safe when called from within a TB? */
431 tb_flush(env);
432 default:
433 goto bad_reg;
434 }
435 break;
436 case 2: /* MMU Page table control. */
437 env->cp15.c2 = val;
438 break;
439 case 3: /* MMU Domain access control. */
440 env->cp15.c3 = val;
441 break;
442 case 4: /* Reserved. */
443 goto bad_reg;
444 case 5: /* MMU Fault status. */
445 switch (op2) {
446 case 0:
447 env->cp15.c5_data = val;
448 break;
449 case 1:
450 env->cp15.c5_insn = val;
451 break;
452 default:
453 goto bad_reg;
454 }
455 break;
456 case 6: /* MMU Fault address. */
457 switch (op2) {
458 case 0:
459 env->cp15.c6_data = val;
460 break;
461 case 1:
462 env->cp15.c6_insn = val;
463 break;
464 default:
465 goto bad_reg;
466 }
467 break;
468 case 7: /* Cache control. */
469 /* No cache, so nothing to do. */
470 break;
471 case 8: /* MMU TLB control. */
472 switch (op2) {
473 case 0: /* Invalidate all. */
474 tlb_flush(env, 0);
475 break;
476 case 1: /* Invalidate single TLB entry. */
477#if 0
478 /* ??? This is wrong for large pages and sections. */
479 /* As an ugly hack to make linux work we always flush a 4K
480 pages. */
481 val &= 0xfffff000;
482 tlb_flush_page(env, val);
483 tlb_flush_page(env, val + 0x400);
484 tlb_flush_page(env, val + 0x800);
485 tlb_flush_page(env, val + 0xc00);
486#else
487 tlb_flush(env, 1);
488#endif
489 break;
490 default:
491 goto bad_reg;
492 }
493 break;
494 case 9: /* Cache lockdown. */
495 switch (op2) {
496 case 0:
497 env->cp15.c9_data = val;
498 break;
499 case 1:
500 env->cp15.c9_insn = val;
501 break;
502 default:
503 goto bad_reg;
504 }
505 break;
506 case 10: /* MMU TLB lockdown. */
507 /* ??? TLB lockdown not implemented. */
508 break;
509 case 11: /* TCM DMA control. */
510 case 12: /* Reserved. */
511 goto bad_reg;
512 case 13: /* Process ID. */
513 switch (op2) {
514 case 0:
d07edbfa
PB
515 /* Unlike real hardware the qemu TLB uses virtual addresses,
516 not modified virtual addresses, so this causes a TLB flush.
517 */
518 if (env->cp15.c13_fcse != val)
519 tlb_flush(env, 1);
520 env->cp15.c13_fcse = val;
b5ff1b31
FB
521 break;
522 case 1:
d07edbfa
PB
523 /* This changes the ASID, so do a TLB flush. */
524 if (env->cp15.c13_context != val)
525 tlb_flush(env, 0);
526 env->cp15.c13_context = val;
b5ff1b31
FB
527 break;
528 default:
529 goto bad_reg;
530 }
531 break;
532 case 14: /* Reserved. */
533 goto bad_reg;
534 case 15: /* Implementation specific. */
535 /* ??? Internal registers not implemented. */
536 break;
537 }
538 return;
539bad_reg:
540 /* ??? For debugging only. Should raise illegal instruction exception. */
541 cpu_abort(env, "Unimplemented cp15 register read\n");
542}
543
544uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
545{
546 uint32_t op2;
547
548 op2 = (insn >> 5) & 7;
549 switch ((insn >> 16) & 0xf) {
550 case 0: /* ID codes. */
551 switch (op2) {
552 default: /* Device ID. */
40f137e1 553 return env->cp15.c0_cpuid;
b5ff1b31
FB
554 case 1: /* Cache Type. */
555 return 0x1dd20d2;
556 case 2: /* TCM status. */
557 return 0;
558 }
559 case 1: /* System configuration. */
560 switch (op2) {
561 case 0: /* Control register. */
562 return env->cp15.c1_sys;
563 case 1: /* Auxiliary control register. */
40f137e1
PB
564 if (arm_feature(env, ARM_FEATURE_AUXCR))
565 return 1;
566 goto bad_reg;
b5ff1b31
FB
567 case 2: /* Coprocessor access register. */
568 return env->cp15.c1_coproc;
569 default:
570 goto bad_reg;
571 }
572 case 2: /* MMU Page table control. */
573 return env->cp15.c2;
574 case 3: /* MMU Domain access control. */
575 return env->cp15.c3;
576 case 4: /* Reserved. */
577 goto bad_reg;
578 case 5: /* MMU Fault status. */
579 switch (op2) {
580 case 0:
581 return env->cp15.c5_data;
582 case 1:
583 return env->cp15.c5_insn;
584 default:
585 goto bad_reg;
586 }
587 case 6: /* MMU Fault address. */
588 switch (op2) {
589 case 0:
590 return env->cp15.c6_data;
591 case 1:
40f137e1
PB
592 /* Arm9 doesn't have an IFAR, but implementing it anyway shouldn't
593 do any harm. */
b5ff1b31
FB
594 return env->cp15.c6_insn;
595 default:
596 goto bad_reg;
597 }
598 case 7: /* Cache control. */
599 /* ??? This is for test, clean and invaidate operations that set the
600 Z flag. We can't represent N = Z = 1, so it also clears clears
601 the N flag. Oh well. */
602 env->NZF = 0;
603 return 0;
604 case 8: /* MMU TLB control. */
605 goto bad_reg;
606 case 9: /* Cache lockdown. */
607 switch (op2) {
608 case 0:
609 return env->cp15.c9_data;
610 case 1:
611 return env->cp15.c9_insn;
612 default:
613 goto bad_reg;
614 }
615 case 10: /* MMU TLB lockdown. */
616 /* ??? TLB lockdown not implemented. */
617 return 0;
618 case 11: /* TCM DMA control. */
619 case 12: /* Reserved. */
620 goto bad_reg;
621 case 13: /* Process ID. */
622 switch (op2) {
623 case 0:
624 return env->cp15.c13_fcse;
625 case 1:
626 return env->cp15.c13_context;
627 default:
628 goto bad_reg;
629 }
630 case 14: /* Reserved. */
631 goto bad_reg;
632 case 15: /* Implementation specific. */
633 /* ??? Internal registers not implemented. */
634 return 0;
635 }
636bad_reg:
637 /* ??? For debugging only. Should raise illegal instruction exception. */
638 cpu_abort(env, "Unimplemented cp15 register read\n");
639 return 0;
640}
641
642#endif