]> git.proxmox.com Git - mirror_qemu.git/blob - target-sparc/helper.c
sparc64 fixes (Blue Swirl)
[mirror_qemu.git] / target-sparc / helper.c
1 /*
2 * sparc helpers
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
27
28 #include "cpu.h"
29 #include "exec-all.h"
30
31 //#define DEBUG_MMU
32
33 /* Sparc MMU emulation */
34
35 /* thread support */
36
37 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
38
39 void cpu_lock(void)
40 {
41 spin_lock(&global_cpu_lock);
42 }
43
44 void cpu_unlock(void)
45 {
46 spin_unlock(&global_cpu_lock);
47 }
48
49 #if defined(CONFIG_USER_ONLY)
50
51 int cpu_sparc_handle_mmu_fault(CPUState *env, target_ulong address, int rw,
52 int is_user, int is_softmmu)
53 {
54 if (rw & 2)
55 env->exception_index = TT_TFAULT;
56 else
57 env->exception_index = TT_DFAULT;
58 return 1;
59 }
60
61 #else
62
63 #ifndef TARGET_SPARC64
64 /*
65 * Sparc V8 Reference MMU (SRMMU)
66 */
67 static const int access_table[8][8] = {
68 { 0, 0, 0, 0, 2, 0, 3, 3 },
69 { 0, 0, 0, 0, 2, 0, 0, 0 },
70 { 2, 2, 0, 0, 0, 2, 3, 3 },
71 { 2, 2, 0, 0, 0, 2, 0, 0 },
72 { 2, 0, 2, 0, 2, 2, 3, 3 },
73 { 2, 0, 2, 0, 2, 0, 2, 0 },
74 { 2, 2, 2, 0, 2, 2, 3, 3 },
75 { 2, 2, 2, 0, 2, 2, 2, 0 }
76 };
77
78 /* 1 = write OK */
79 static const int rw_table[2][8] = {
80 { 0, 1, 0, 1, 0, 1, 0, 1 },
81 { 0, 1, 0, 1, 0, 0, 0, 0 }
82 };
83
84 int get_physical_address (CPUState *env, target_phys_addr_t *physical, int *prot,
85 int *access_index, target_ulong address, int rw,
86 int is_user)
87 {
88 int access_perms = 0;
89 target_phys_addr_t pde_ptr;
90 uint32_t pde;
91 target_ulong virt_addr;
92 int error_code = 0, is_dirty;
93 unsigned long page_offset;
94
95 virt_addr = address & TARGET_PAGE_MASK;
96 if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
97 *physical = address;
98 *prot = PAGE_READ | PAGE_WRITE;
99 return 0;
100 }
101
102 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user? 0 : 1);
103 *physical = 0xfffff000;
104
105 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
106 /* Context base + context number */
107 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
108 pde = ldl_phys(pde_ptr);
109
110 /* Ctx pde */
111 switch (pde & PTE_ENTRYTYPE_MASK) {
112 default:
113 case 0: /* Invalid */
114 return 1 << 2;
115 case 2: /* L0 PTE, maybe should not happen? */
116 case 3: /* Reserved */
117 return 4 << 2;
118 case 1: /* L0 PDE */
119 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
120 pde = ldl_phys(pde_ptr);
121
122 switch (pde & PTE_ENTRYTYPE_MASK) {
123 default:
124 case 0: /* Invalid */
125 return (1 << 8) | (1 << 2);
126 case 3: /* Reserved */
127 return (1 << 8) | (4 << 2);
128 case 1: /* L1 PDE */
129 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
130 pde = ldl_phys(pde_ptr);
131
132 switch (pde & PTE_ENTRYTYPE_MASK) {
133 default:
134 case 0: /* Invalid */
135 return (2 << 8) | (1 << 2);
136 case 3: /* Reserved */
137 return (2 << 8) | (4 << 2);
138 case 1: /* L2 PDE */
139 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
140 pde = ldl_phys(pde_ptr);
141
142 switch (pde & PTE_ENTRYTYPE_MASK) {
143 default:
144 case 0: /* Invalid */
145 return (3 << 8) | (1 << 2);
146 case 1: /* PDE, should not happen */
147 case 3: /* Reserved */
148 return (3 << 8) | (4 << 2);
149 case 2: /* L3 PTE */
150 virt_addr = address & TARGET_PAGE_MASK;
151 page_offset = (address & TARGET_PAGE_MASK) & (TARGET_PAGE_SIZE - 1);
152 }
153 break;
154 case 2: /* L2 PTE */
155 virt_addr = address & ~0x3ffff;
156 page_offset = address & 0x3ffff;
157 }
158 break;
159 case 2: /* L1 PTE */
160 virt_addr = address & ~0xffffff;
161 page_offset = address & 0xffffff;
162 }
163 }
164
165 /* update page modified and dirty bits */
166 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
167 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
168 pde |= PG_ACCESSED_MASK;
169 if (is_dirty)
170 pde |= PG_MODIFIED_MASK;
171 stl_phys_notdirty(pde_ptr, pde);
172 }
173 /* check access */
174 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
175 error_code = access_table[*access_index][access_perms];
176 if (error_code && !(env->mmuregs[0] & MMU_NF))
177 return error_code;
178
179 /* the page can be put in the TLB */
180 *prot = PAGE_READ;
181 if (pde & PG_MODIFIED_MASK) {
182 /* only set write access if already dirty... otherwise wait
183 for dirty access */
184 if (rw_table[is_user][access_perms])
185 *prot |= PAGE_WRITE;
186 }
187
188 /* Even if large ptes, we map only one 4KB page in the cache to
189 avoid filling it too fast */
190 *physical = ((pde & PTE_ADDR_MASK) << 4) + page_offset;
191 return error_code;
192 }
193
194 /* Perform address translation */
195 int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
196 int is_user, int is_softmmu)
197 {
198 target_ulong virt_addr;
199 target_phys_addr_t paddr;
200 unsigned long vaddr;
201 int error_code = 0, prot, ret = 0, access_index;
202
203 error_code = get_physical_address(env, &paddr, &prot, &access_index, address, rw, is_user);
204 if (error_code == 0) {
205 virt_addr = address & TARGET_PAGE_MASK;
206 vaddr = virt_addr + ((address & TARGET_PAGE_MASK) & (TARGET_PAGE_SIZE - 1));
207 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
208 return ret;
209 }
210
211 if (env->mmuregs[3]) /* Fault status register */
212 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
213 env->mmuregs[3] |= (access_index << 5) | error_code | 2;
214 env->mmuregs[4] = address; /* Fault address register */
215
216 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) {
217 // No fault mode: if a mapping is available, just override
218 // permissions. If no mapping is available, redirect accesses to
219 // neverland. Fake/overridden mappings will be flushed when
220 // switching to normal mode.
221 vaddr = address & TARGET_PAGE_MASK;
222 prot = PAGE_READ | PAGE_WRITE;
223 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
224 return ret;
225 } else {
226 if (rw & 2)
227 env->exception_index = TT_TFAULT;
228 else
229 env->exception_index = TT_DFAULT;
230 return 1;
231 }
232 }
233 #else
234 /*
235 * UltraSparc IIi I/DMMUs
236 */
237 static int get_physical_address_data(CPUState *env, target_phys_addr_t *physical, int *prot,
238 int *access_index, target_ulong address, int rw,
239 int is_user)
240 {
241 target_ulong mask;
242 unsigned int i;
243
244 if ((env->lsu & DMMU_E) == 0) { /* DMMU disabled */
245 *physical = address;
246 *prot = PAGE_READ | PAGE_WRITE;
247 return 0;
248 }
249
250 for (i = 0; i < 64; i++) {
251 switch ((env->dtlb_tte[i] >> 61) & 3) {
252 default:
253 case 0x0: // 8k
254 mask = 0xffffffffffffe000ULL;
255 break;
256 case 0x1: // 64k
257 mask = 0xffffffffffff0000ULL;
258 break;
259 case 0x2: // 512k
260 mask = 0xfffffffffff80000ULL;
261 break;
262 case 0x3: // 4M
263 mask = 0xffffffffffc00000ULL;
264 break;
265 }
266 // ctx match, vaddr match?
267 if (env->dmmuregs[1] == (env->dtlb_tag[i] & 0x1fff) &&
268 (address & mask) == (env->dtlb_tag[i] & ~0x1fffULL)) {
269 // valid, access ok?
270 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) == 0 ||
271 ((env->dtlb_tte[i] & 0x4) && is_user) ||
272 (!(env->dtlb_tte[i] & 0x2) && (rw == 1))) {
273 if (env->dmmuregs[3]) /* Fault status register */
274 env->dmmuregs[3] = 2; /* overflow (not read before another fault) */
275 env->dmmuregs[3] |= (is_user << 3) | ((rw == 1) << 2) | 1;
276 env->dmmuregs[4] = address; /* Fault address register */
277 env->exception_index = TT_DFAULT;
278 #ifdef DEBUG_MMU
279 printf("DFAULT at 0x%llx\n", address);
280 #endif
281 return 1;
282 }
283 *physical = (env->dtlb_tte[i] & mask & 0x1fffffff000ULL) + (address & ~mask & 0x1fffffff000ULL);
284 *prot = PAGE_READ;
285 if (env->dtlb_tte[i] & 0x2)
286 *prot |= PAGE_WRITE;
287 return 0;
288 }
289 }
290 #ifdef DEBUG_MMU
291 printf("DMISS at 0x%llx\n", address);
292 #endif
293 env->exception_index = TT_DMISS;
294 return 1;
295 }
296
297 static int get_physical_address_code(CPUState *env, target_phys_addr_t *physical, int *prot,
298 int *access_index, target_ulong address, int rw,
299 int is_user)
300 {
301 target_ulong mask;
302 unsigned int i;
303
304 if ((env->lsu & IMMU_E) == 0) { /* IMMU disabled */
305 *physical = address;
306 *prot = PAGE_READ;
307 return 0;
308 }
309
310 for (i = 0; i < 64; i++) {
311 switch ((env->itlb_tte[i] >> 61) & 3) {
312 default:
313 case 0x0: // 8k
314 mask = 0xffffffffffffe000ULL;
315 break;
316 case 0x1: // 64k
317 mask = 0xffffffffffff0000ULL;
318 break;
319 case 0x2: // 512k
320 mask = 0xfffffffffff80000ULL;
321 break;
322 case 0x3: // 4M
323 mask = 0xffffffffffc00000ULL;
324 break;
325 }
326 // ctx match, vaddr match?
327 if (env->dmmuregs[1] == (env->itlb_tag[i] & 0x1fff) &&
328 (address & mask) == (env->itlb_tag[i] & ~0x1fffULL)) {
329 // valid, access ok?
330 if ((env->itlb_tte[i] & 0x8000000000000000ULL) == 0 ||
331 ((env->itlb_tte[i] & 0x4) && is_user)) {
332 if (env->immuregs[3]) /* Fault status register */
333 env->immuregs[3] = 2; /* overflow (not read before another fault) */
334 env->immuregs[3] |= (is_user << 3) | 1;
335 env->exception_index = TT_TFAULT;
336 #ifdef DEBUG_MMU
337 printf("TFAULT at 0x%llx\n", address);
338 #endif
339 return 1;
340 }
341 *physical = (env->itlb_tte[i] & mask & 0x1fffffff000ULL) + (address & ~mask & 0x1fffffff000ULL);
342 *prot = PAGE_READ;
343 return 0;
344 }
345 }
346 #ifdef DEBUG_MMU
347 printf("TMISS at 0x%llx\n", address);
348 #endif
349 env->exception_index = TT_TMISS;
350 return 1;
351 }
352
353 int get_physical_address(CPUState *env, target_phys_addr_t *physical, int *prot,
354 int *access_index, target_ulong address, int rw,
355 int is_user)
356 {
357 if (rw == 2)
358 return get_physical_address_code(env, physical, prot, access_index, address, rw, is_user);
359 else
360 return get_physical_address_data(env, physical, prot, access_index, address, rw, is_user);
361 }
362
363 /* Perform address translation */
364 int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
365 int is_user, int is_softmmu)
366 {
367 target_ulong virt_addr, vaddr;
368 target_phys_addr_t paddr;
369 int error_code = 0, prot, ret = 0, access_index;
370
371 error_code = get_physical_address(env, &paddr, &prot, &access_index, address, rw, is_user);
372 if (error_code == 0) {
373 virt_addr = address & TARGET_PAGE_MASK;
374 vaddr = virt_addr + ((address & TARGET_PAGE_MASK) & (TARGET_PAGE_SIZE - 1));
375 #ifdef DEBUG_MMU
376 printf("Translate at 0x%llx -> 0x%llx, vaddr 0x%llx\n", address, paddr, vaddr);
377 #endif
378 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
379 return ret;
380 }
381 // XXX
382 return 1;
383 }
384
385 #endif
386 #endif
387
388 void memcpy32(target_ulong *dst, const target_ulong *src)
389 {
390 dst[0] = src[0];
391 dst[1] = src[1];
392 dst[2] = src[2];
393 dst[3] = src[3];
394 dst[4] = src[4];
395 dst[5] = src[5];
396 dst[6] = src[6];
397 dst[7] = src[7];
398 }
399
400 #if !defined(TARGET_SPARC64)
401 target_ulong mmu_probe(CPUState *env, target_ulong address, int mmulev)
402 {
403 target_phys_addr_t pde_ptr;
404 uint32_t pde;
405
406 /* Context base + context number */
407 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
408 pde = ldl_phys(pde_ptr);
409
410 switch (pde & PTE_ENTRYTYPE_MASK) {
411 default:
412 case 0: /* Invalid */
413 case 2: /* PTE, maybe should not happen? */
414 case 3: /* Reserved */
415 return 0;
416 case 1: /* L1 PDE */
417 if (mmulev == 3)
418 return pde;
419 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
420 pde = ldl_phys(pde_ptr);
421
422 switch (pde & PTE_ENTRYTYPE_MASK) {
423 default:
424 case 0: /* Invalid */
425 case 3: /* Reserved */
426 return 0;
427 case 2: /* L1 PTE */
428 return pde;
429 case 1: /* L2 PDE */
430 if (mmulev == 2)
431 return pde;
432 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
433 pde = ldl_phys(pde_ptr);
434
435 switch (pde & PTE_ENTRYTYPE_MASK) {
436 default:
437 case 0: /* Invalid */
438 case 3: /* Reserved */
439 return 0;
440 case 2: /* L2 PTE */
441 return pde;
442 case 1: /* L3 PDE */
443 if (mmulev == 1)
444 return pde;
445 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
446 pde = ldl_phys(pde_ptr);
447
448 switch (pde & PTE_ENTRYTYPE_MASK) {
449 default:
450 case 0: /* Invalid */
451 case 1: /* PDE, should not happen */
452 case 3: /* Reserved */
453 return 0;
454 case 2: /* L3 PTE */
455 return pde;
456 }
457 }
458 }
459 }
460 return 0;
461 }
462
463 #ifdef DEBUG_MMU
464 void dump_mmu(CPUState *env)
465 {
466 target_ulong va, va1, va2;
467 unsigned int n, m, o;
468 target_phys_addr_t pde_ptr, pa;
469 uint32_t pde;
470
471 printf("MMU dump:\n");
472 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
473 pde = ldl_phys(pde_ptr);
474 printf("Root ptr: " TARGET_FMT_lx ", ctx: %d\n", env->mmuregs[1] << 4, env->mmuregs[2]);
475 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
476 pde_ptr = mmu_probe(env, va, 2);
477 if (pde_ptr) {
478 pa = cpu_get_phys_page_debug(env, va);
479 printf("VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_lx " PDE: " TARGET_FMT_lx "\n", va, pa, pde_ptr);
480 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
481 pde_ptr = mmu_probe(env, va1, 1);
482 if (pde_ptr) {
483 pa = cpu_get_phys_page_debug(env, va1);
484 printf(" VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_lx " PDE: " TARGET_FMT_lx "\n", va1, pa, pde_ptr);
485 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
486 pde_ptr = mmu_probe(env, va2, 0);
487 if (pde_ptr) {
488 pa = cpu_get_phys_page_debug(env, va2);
489 printf(" VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_lx " PTE: " TARGET_FMT_lx "\n", va2, pa, pde_ptr);
490 }
491 }
492 }
493 }
494 }
495 }
496 printf("MMU dump ends\n");
497 }
498 #endif
499 #else
500 #ifdef DEBUG_MMU
501 void dump_mmu(CPUState *env)
502 {
503 unsigned int i;
504 const char *mask;
505
506 printf("MMU contexts: Primary: %lld, Secondary: %lld\n", env->dmmuregs[1], env->dmmuregs[2]);
507 if ((env->lsu & DMMU_E) == 0) {
508 printf("DMMU disabled\n");
509 } else {
510 printf("DMMU dump:\n");
511 for (i = 0; i < 64; i++) {
512 switch ((env->dtlb_tte[i] >> 61) & 3) {
513 default:
514 case 0x0:
515 mask = " 8k";
516 break;
517 case 0x1:
518 mask = " 64k";
519 break;
520 case 0x2:
521 mask = "512k";
522 break;
523 case 0x3:
524 mask = " 4M";
525 break;
526 }
527 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) != 0) {
528 printf("VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_lx ", %s, %s, %s, %s, ctx %lld\n",
529 env->dtlb_tag[i] & ~0x1fffULL,
530 env->dtlb_tte[i] & 0x1ffffffe000ULL,
531 mask,
532 env->dtlb_tte[i] & 0x4? "priv": "user",
533 env->dtlb_tte[i] & 0x2? "RW": "RO",
534 env->dtlb_tte[i] & 0x40? "locked": "unlocked",
535 env->dtlb_tag[i] & 0x1fffULL);
536 }
537 }
538 }
539 if ((env->lsu & IMMU_E) == 0) {
540 printf("IMMU disabled\n");
541 } else {
542 printf("IMMU dump:\n");
543 for (i = 0; i < 64; i++) {
544 switch ((env->itlb_tte[i] >> 61) & 3) {
545 default:
546 case 0x0:
547 mask = " 8k";
548 break;
549 case 0x1:
550 mask = " 64k";
551 break;
552 case 0x2:
553 mask = "512k";
554 break;
555 case 0x3:
556 mask = " 4M";
557 break;
558 }
559 if ((env->itlb_tte[i] & 0x8000000000000000ULL) != 0) {
560 printf("VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_lx ", %s, %s, %s, ctx %lld\n",
561 env->itlb_tag[i] & ~0x1fffULL,
562 env->itlb_tte[i] & 0x1ffffffe000ULL,
563 mask,
564 env->itlb_tte[i] & 0x4? "priv": "user",
565 env->itlb_tte[i] & 0x40? "locked": "unlocked",
566 env->itlb_tag[i] & 0x1fffULL);
567 }
568 }
569 }
570 }
571 #endif
572 #endif