4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 /* Sparc MMU emulation */
26 int cpu_sparc_handle_mmu_fault (CPUState
*env
, uint32_t address
, int rw
,
27 int is_user
, int is_softmmu
);
31 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
35 spin_lock(&global_cpu_lock
);
40 spin_unlock(&global_cpu_lock
);
43 #if !defined(CONFIG_USER_ONLY)
45 #define MMUSUFFIX _mmu
46 #define GETPC() (__builtin_return_address(0))
49 #include "softmmu_template.h"
52 #include "softmmu_template.h"
55 #include "softmmu_template.h"
58 #include "softmmu_template.h"
61 /* try to fill the TLB and return an exception if error. If retaddr is
62 NULL, it means that the function was called in C code (i.e. not
63 from generated code or from helper.c) */
64 /* XXX: fix it to restore all registers */
65 void tlb_fill(unsigned long addr
, int is_write
, int is_user
, void *retaddr
)
72 /* XXX: hack to restore env in all cases, even if not called from
77 ret
= cpu_sparc_handle_mmu_fault(env
, addr
, is_write
, is_user
, 1);
80 /* now we have a real cpu fault */
81 pc
= (unsigned long)retaddr
;
84 /* the PC is inside the translated code. It means that we have
85 a virtual CPU fault */
86 cpu_restore_state(tb
, env
, pc
, NULL
);
89 raise_exception_err(ret
, env
->error_code
);
95 static const int access_table
[8][8] = {
96 { 0, 0, 0, 0, 2, 0, 3, 3 },
97 { 0, 0, 0, 0, 2, 0, 0, 0 },
98 { 2, 2, 0, 0, 0, 2, 3, 3 },
99 { 2, 2, 0, 0, 0, 2, 0, 0 },
100 { 2, 0, 2, 0, 2, 2, 3, 3 },
101 { 2, 0, 2, 0, 2, 0, 2, 0 },
102 { 2, 2, 2, 0, 2, 2, 3, 3 },
103 { 2, 2, 2, 0, 2, 2, 2, 0 }
107 static const int rw_table
[2][8] = {
108 { 0, 1, 0, 1, 0, 1, 0, 1 },
109 { 0, 1, 0, 1, 0, 0, 0, 0 }
112 int get_physical_address (CPUState
*env
, uint32_t *physical
, int *prot
,
113 int *access_index
, uint32_t address
, int rw
,
116 int access_perms
= 0;
117 target_phys_addr_t pde_ptr
;
118 uint32_t pde
, virt_addr
;
119 int error_code
= 0, is_dirty
;
120 unsigned long page_offset
;
122 virt_addr
= address
& TARGET_PAGE_MASK
;
123 if ((env
->mmuregs
[0] & MMU_E
) == 0) { /* MMU disabled */
125 *prot
= PAGE_READ
| PAGE_WRITE
;
129 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
130 /* Context base + context number */
131 pde_ptr
= (env
->mmuregs
[1] << 4) + (env
->mmuregs
[2] << 4);
132 cpu_physical_memory_read(pde_ptr
, (uint8_t *)&pde
, 4);
136 switch (pde
& PTE_ENTRYTYPE_MASK
) {
138 case 0: /* Invalid */
140 case 2: /* L0 PTE, maybe should not happen? */
141 case 3: /* Reserved */
144 pde_ptr
= ((address
>> 22) & ~3) + ((pde
& ~3) << 4);
145 cpu_physical_memory_read(pde_ptr
, (uint8_t *)&pde
, 4);
148 switch (pde
& PTE_ENTRYTYPE_MASK
) {
150 case 0: /* Invalid */
152 case 3: /* Reserved */
155 pde_ptr
= ((address
& 0xfc0000) >> 16) + ((pde
& ~3) << 4);
156 cpu_physical_memory_read(pde_ptr
, (uint8_t *)&pde
, 4);
159 switch (pde
& PTE_ENTRYTYPE_MASK
) {
161 case 0: /* Invalid */
163 case 3: /* Reserved */
166 pde_ptr
= ((address
& 0x3f000) >> 10) + ((pde
& ~3) << 4);
167 cpu_physical_memory_read(pde_ptr
, (uint8_t *)&pde
, 4);
170 switch (pde
& PTE_ENTRYTYPE_MASK
) {
172 case 0: /* Invalid */
174 case 1: /* PDE, should not happen */
175 case 3: /* Reserved */
178 virt_addr
= address
& TARGET_PAGE_MASK
;
179 page_offset
= (address
& TARGET_PAGE_MASK
) & (TARGET_PAGE_SIZE
- 1);
183 virt_addr
= address
& ~0x3ffff;
184 page_offset
= address
& 0x3ffff;
188 virt_addr
= address
& ~0xffffff;
189 page_offset
= address
& 0xffffff;
193 /* update page modified and dirty bits */
194 is_dirty
= (rw
& 1) && !(pde
& PG_MODIFIED_MASK
);
195 if (!(pde
& PG_ACCESSED_MASK
) || is_dirty
) {
197 pde
|= PG_ACCESSED_MASK
;
199 pde
|= PG_MODIFIED_MASK
;
200 tmppde
= bswap32(pde
);
201 cpu_physical_memory_write(pde_ptr
, (uint8_t *)&tmppde
, 4);
204 *access_index
= ((rw
& 1) << 2) | (rw
& 2) | (is_user
? 0 : 1);
205 access_perms
= (pde
& PTE_ACCESS_MASK
) >> PTE_ACCESS_SHIFT
;
206 error_code
= access_table
[*access_index
][access_perms
];
210 /* the page can be put in the TLB */
212 if (pde
& PG_MODIFIED_MASK
) {
213 /* only set write access if already dirty... otherwise wait
215 if (rw_table
[is_user
][access_perms
])
219 /* Even if large ptes, we map only one 4KB page in the cache to
220 avoid filling it too fast */
221 *physical
= ((pde
& PTE_ADDR_MASK
) << 4) + page_offset
;
225 /* Perform address translation */
226 int cpu_sparc_handle_mmu_fault (CPUState
*env
, uint32_t address
, int rw
,
227 int is_user
, int is_softmmu
)
230 uint32_t virt_addr
, paddr
;
232 int error_code
= 0, prot
, ret
= 0, access_index
;
234 if (env
->user_mode_only
) {
235 /* user mode only emulation */
240 error_code
= get_physical_address(env
, &paddr
, &prot
, &access_index
, address
, rw
, is_user
);
241 if (error_code
== 0) {
242 virt_addr
= address
& TARGET_PAGE_MASK
;
243 vaddr
= virt_addr
+ ((address
& TARGET_PAGE_MASK
) & (TARGET_PAGE_SIZE
- 1));
244 ret
= tlb_set_page(env
, vaddr
, paddr
, prot
, is_user
, is_softmmu
);
248 if (env
->mmuregs
[3]) /* Fault status register */
249 env
->mmuregs
[3] = 1; /* overflow (not read before another fault) */
250 env
->mmuregs
[3] |= (access_index
<< 5) | (error_code
<< 2) | 2;
251 env
->mmuregs
[4] = address
; /* Fault address register */
253 if (env
->mmuregs
[0] & MMU_NF
|| env
->psret
== 0) // No fault
256 env
->exception_index
= exception
;
257 env
->error_code
= error_code
;
261 void memcpy32(uint32_t *dst
, const uint32_t *src
)
273 void set_cwp(int new_cwp
)
275 /* put the modified wrap registers at their proper location */
276 if (env
->cwp
== (NWINDOWS
- 1))
277 memcpy32(env
->regbase
, env
->regbase
+ NWINDOWS
* 16);
279 /* put the wrap registers at their temporary location */
280 if (new_cwp
== (NWINDOWS
- 1))
281 memcpy32(env
->regbase
+ NWINDOWS
* 16, env
->regbase
);
282 env
->regwptr
= env
->regbase
+ (new_cwp
* 16);
286 * Begin execution of an interruption. is_int is TRUE if coming from
287 * the int instruction. next_eip is the EIP value AFTER the interrupt
288 * instruction. It is only relevant if is_int is TRUE.
290 void do_interrupt(int intno
, int is_int
, int error_code
,
291 unsigned int next_eip
, int is_hw
)
296 if (loglevel
& CPU_LOG_INT
) {
298 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d pc=%08x npc=%08x SP=%08x\n",
299 count
, intno
, error_code
, is_int
,
301 env
->npc
, env
->regwptr
[6]);
303 cpu_dump_state(env
, logfile
, fprintf
, 0);
308 fprintf(logfile
, " code=");
309 ptr
= (uint8_t *)env
->pc
;
310 for(i
= 0; i
< 16; i
++) {
311 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
313 fprintf(logfile
, "\n");
319 #if !defined(CONFIG_USER_ONLY)
320 if (env
->psret
== 0) {
321 fprintf(logfile
, "Trap while interrupts disabled, Error state!\n");
322 qemu_system_shutdown_request();
327 cwp
= (env
->cwp
- 1) & (NWINDOWS
- 1);
329 env
->regwptr
[9] = env
->pc
- 4; // XXX?
330 env
->regwptr
[10] = env
->pc
;
331 env
->psrps
= env
->psrs
;
333 env
->tbr
= (env
->tbr
& TBR_BASE_MASK
) | (intno
<< 4);
335 env
->npc
= env
->pc
+ 4;
336 env
->exception_index
= 0;
339 void raise_exception_err(int exception_index
, int error_code
)
341 raise_exception(exception_index
);
344 uint32_t mmu_probe(uint32_t address
, int mmulev
)
346 target_phys_addr_t pde_ptr
;
349 /* Context base + context number */
350 pde_ptr
= (env
->mmuregs
[1] << 4) + (env
->mmuregs
[2] << 4);
351 cpu_physical_memory_read(pde_ptr
, (uint8_t *)&pde
, 4);
353 switch (pde
& PTE_ENTRYTYPE_MASK
) {
355 case 0: /* Invalid */
356 case 2: /* PTE, maybe should not happen? */
357 case 3: /* Reserved */
362 pde_ptr
= ((address
>> 22) & ~3) + ((pde
& ~3) << 4);
363 cpu_physical_memory_read(pde_ptr
, (uint8_t *)&pde
, 4);
366 switch (pde
& PTE_ENTRYTYPE_MASK
) {
368 case 0: /* Invalid */
369 case 3: /* Reserved */
376 pde_ptr
= ((address
& 0xfc0000) >> 16) + ((pde
& ~3) << 4);
377 cpu_physical_memory_read(pde_ptr
, (uint8_t *)&pde
, 4);
380 switch (pde
& PTE_ENTRYTYPE_MASK
) {
382 case 0: /* Invalid */
383 case 3: /* Reserved */
390 pde_ptr
= ((address
& 0x3f000) >> 10) + ((pde
& ~3) << 4);
391 cpu_physical_memory_read(pde_ptr
, (uint8_t *)&pde
, 4);
394 switch (pde
& PTE_ENTRYTYPE_MASK
) {
396 case 0: /* Invalid */
397 case 1: /* PDE, should not happen */
398 case 3: /* Reserved */
412 uint32_t pa
, va
, va1
, va2
;
414 target_phys_addr_t pde_ptr
;
417 printf("MMU dump:\n");
418 pde_ptr
= (env
->mmuregs
[1] << 4) + (env
->mmuregs
[2] << 4);
419 cpu_physical_memory_read(pde_ptr
, (uint8_t *)&pde
, 4);
421 printf("Root ptr: 0x%08x, ctx: %d\n", env
->mmuregs
[1] << 4, env
->mmuregs
[2]);
422 for (n
= 0, va
= 0; n
< 256; n
++, va
+= 16 * 1024 * 1024) {
423 pde_ptr
= mmu_probe(va
, 2);
425 pa
= cpu_get_phys_page_debug(env
, va
);
426 printf("VA: 0x%08x, PA: 0x%08x PDE: 0x%08x\n", va
, pa
, pde_ptr
);
427 for (m
= 0, va1
= va
; m
< 64; m
++, va1
+= 256 * 1024) {
428 pde_ptr
= mmu_probe(va1
, 1);
430 pa
= cpu_get_phys_page_debug(env
, va1
);
431 printf(" VA: 0x%08x, PA: 0x%08x PDE: 0x%08x\n", va1
, pa
, pde_ptr
);
432 for (o
= 0, va2
= va1
; o
< 64; o
++, va2
+= 4 * 1024) {
433 pde_ptr
= mmu_probe(va2
, 0);
435 pa
= cpu_get_phys_page_debug(env
, va2
);
436 printf(" VA: 0x%08x, PA: 0x%08x PTE: 0x%08x\n", va2
, pa
, pde_ptr
);
443 printf("MMU dump ends\n");