]> git.proxmox.com Git - mirror_qemu.git/blob - target/sparc/ldst_helper.c
target-sparc: implement UA2005 scratchpad registers
[mirror_qemu.git] / target / sparc / ldst_helper.c
1 /*
2 * Helpers for loads and stores
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "tcg.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "asi.h"
27
28 //#define DEBUG_MMU
29 //#define DEBUG_MXCC
30 //#define DEBUG_UNALIGNED
31 //#define DEBUG_UNASSIGNED
32 //#define DEBUG_ASI
33 //#define DEBUG_CACHE_CONTROL
34
35 #ifdef DEBUG_MMU
36 #define DPRINTF_MMU(fmt, ...) \
37 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
38 #else
39 #define DPRINTF_MMU(fmt, ...) do {} while (0)
40 #endif
41
42 #ifdef DEBUG_MXCC
43 #define DPRINTF_MXCC(fmt, ...) \
44 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
45 #else
46 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
47 #endif
48
49 #ifdef DEBUG_ASI
50 #define DPRINTF_ASI(fmt, ...) \
51 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
52 #endif
53
54 #ifdef DEBUG_CACHE_CONTROL
55 #define DPRINTF_CACHE_CONTROL(fmt, ...) \
56 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
57 #else
58 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
59 #endif
60
61 #ifdef TARGET_SPARC64
62 #ifndef TARGET_ABI32
63 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
64 #else
65 #define AM_CHECK(env1) (1)
66 #endif
67 #endif
68
69 #define QT0 (env->qt0)
70 #define QT1 (env->qt1)
71
72 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
73 /* Calculates TSB pointer value for fault page size 8k or 64k */
74 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
75 uint64_t tag_access_register,
76 int page_size)
77 {
78 uint64_t tsb_base = tsb_register & ~0x1fffULL;
79 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
80 int tsb_size = tsb_register & 0xf;
81
82 /* discard lower 13 bits which hold tag access context */
83 uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
84
85 /* now reorder bits */
86 uint64_t tsb_base_mask = ~0x1fffULL;
87 uint64_t va = tag_access_va;
88
89 /* move va bits to correct position */
90 if (page_size == 8*1024) {
91 va >>= 9;
92 } else if (page_size == 64*1024) {
93 va >>= 12;
94 }
95
96 if (tsb_size) {
97 tsb_base_mask <<= tsb_size;
98 }
99
100 /* calculate tsb_base mask and adjust va if split is in use */
101 if (tsb_split) {
102 if (page_size == 8*1024) {
103 va &= ~(1ULL << (13 + tsb_size));
104 } else if (page_size == 64*1024) {
105 va |= (1ULL << (13 + tsb_size));
106 }
107 tsb_base_mask <<= 1;
108 }
109
110 return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
111 }
112
113 /* Calculates tag target register value by reordering bits
114 in tag access register */
115 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
116 {
117 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
118 }
119
120 static void replace_tlb_entry(SparcTLBEntry *tlb,
121 uint64_t tlb_tag, uint64_t tlb_tte,
122 CPUSPARCState *env1)
123 {
124 target_ulong mask, size, va, offset;
125
126 /* flush page range if translation is valid */
127 if (TTE_IS_VALID(tlb->tte)) {
128 CPUState *cs = CPU(sparc_env_get_cpu(env1));
129
130 size = 8192ULL << 3 * TTE_PGSIZE(tlb->tte);
131 mask = 1ULL + ~size;
132
133 va = tlb->tag & mask;
134
135 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
136 tlb_flush_page(cs, va + offset);
137 }
138 }
139
140 tlb->tag = tlb_tag;
141 tlb->tte = tlb_tte;
142 }
143
144 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
145 const char *strmmu, CPUSPARCState *env1)
146 {
147 unsigned int i;
148 target_ulong mask;
149 uint64_t context;
150
151 int is_demap_context = (demap_addr >> 6) & 1;
152
153 /* demap context */
154 switch ((demap_addr >> 4) & 3) {
155 case 0: /* primary */
156 context = env1->dmmu.mmu_primary_context;
157 break;
158 case 1: /* secondary */
159 context = env1->dmmu.mmu_secondary_context;
160 break;
161 case 2: /* nucleus */
162 context = 0;
163 break;
164 case 3: /* reserved */
165 default:
166 return;
167 }
168
169 for (i = 0; i < 64; i++) {
170 if (TTE_IS_VALID(tlb[i].tte)) {
171
172 if (is_demap_context) {
173 /* will remove non-global entries matching context value */
174 if (TTE_IS_GLOBAL(tlb[i].tte) ||
175 !tlb_compare_context(&tlb[i], context)) {
176 continue;
177 }
178 } else {
179 /* demap page
180 will remove any entry matching VA */
181 mask = 0xffffffffffffe000ULL;
182 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
183
184 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
185 continue;
186 }
187
188 /* entry should be global or matching context value */
189 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
190 !tlb_compare_context(&tlb[i], context)) {
191 continue;
192 }
193 }
194
195 replace_tlb_entry(&tlb[i], 0, 0, env1);
196 #ifdef DEBUG_MMU
197 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
198 dump_mmu(stdout, fprintf, env1);
199 #endif
200 }
201 }
202 }
203
204 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
205 uint64_t tlb_tag, uint64_t tlb_tte,
206 const char *strmmu, CPUSPARCState *env1)
207 {
208 unsigned int i, replace_used;
209
210 /* Try replacing invalid entry */
211 for (i = 0; i < 64; i++) {
212 if (!TTE_IS_VALID(tlb[i].tte)) {
213 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
214 #ifdef DEBUG_MMU
215 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
216 dump_mmu(stdout, fprintf, env1);
217 #endif
218 return;
219 }
220 }
221
222 /* All entries are valid, try replacing unlocked entry */
223
224 for (replace_used = 0; replace_used < 2; ++replace_used) {
225
226 /* Used entries are not replaced on first pass */
227
228 for (i = 0; i < 64; i++) {
229 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
230
231 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
232 #ifdef DEBUG_MMU
233 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
234 strmmu, (replace_used ? "used" : "unused"), i);
235 dump_mmu(stdout, fprintf, env1);
236 #endif
237 return;
238 }
239 }
240
241 /* Now reset used bit and search for unused entries again */
242
243 for (i = 0; i < 64; i++) {
244 TTE_SET_UNUSED(tlb[i].tte);
245 }
246 }
247
248 #ifdef DEBUG_MMU
249 DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
250 #endif
251 /* error state? */
252 }
253
254 #endif
255
256 #ifdef TARGET_SPARC64
257 /* returns true if access using this ASI is to have address translated by MMU
258 otherwise access is to raw physical address */
259 /* TODO: check sparc32 bits */
260 static inline int is_translating_asi(int asi)
261 {
262 /* Ultrasparc IIi translating asi
263 - note this list is defined by cpu implementation
264 */
265 switch (asi) {
266 case 0x04 ... 0x11:
267 case 0x16 ... 0x19:
268 case 0x1E ... 0x1F:
269 case 0x24 ... 0x2C:
270 case 0x70 ... 0x73:
271 case 0x78 ... 0x79:
272 case 0x80 ... 0xFF:
273 return 1;
274
275 default:
276 return 0;
277 }
278 }
279
280 static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr)
281 {
282 if (AM_CHECK(env1)) {
283 addr &= 0xffffffffULL;
284 }
285 return addr;
286 }
287
288 static inline target_ulong asi_address_mask(CPUSPARCState *env,
289 int asi, target_ulong addr)
290 {
291 if (is_translating_asi(asi)) {
292 addr = address_mask(env, addr);
293 }
294 return addr;
295 }
296 #endif
297
298 static void do_check_align(CPUSPARCState *env, target_ulong addr,
299 uint32_t align, uintptr_t ra)
300 {
301 if (addr & align) {
302 #ifdef DEBUG_UNALIGNED
303 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
304 "\n", addr, env->pc);
305 #endif
306 cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
307 }
308 }
309
310 void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align)
311 {
312 do_check_align(env, addr, align, GETPC());
313 }
314
315 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
316 defined(DEBUG_MXCC)
317 static void dump_mxcc(CPUSPARCState *env)
318 {
319 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
320 "\n",
321 env->mxccdata[0], env->mxccdata[1],
322 env->mxccdata[2], env->mxccdata[3]);
323 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
324 "\n"
325 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
326 "\n",
327 env->mxccregs[0], env->mxccregs[1],
328 env->mxccregs[2], env->mxccregs[3],
329 env->mxccregs[4], env->mxccregs[5],
330 env->mxccregs[6], env->mxccregs[7]);
331 }
332 #endif
333
334 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
335 && defined(DEBUG_ASI)
336 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
337 uint64_t r1)
338 {
339 switch (size) {
340 case 1:
341 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
342 addr, asi, r1 & 0xff);
343 break;
344 case 2:
345 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
346 addr, asi, r1 & 0xffff);
347 break;
348 case 4:
349 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
350 addr, asi, r1 & 0xffffffff);
351 break;
352 case 8:
353 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
354 addr, asi, r1);
355 break;
356 }
357 }
358 #endif
359
360 #ifndef TARGET_SPARC64
361 #ifndef CONFIG_USER_ONLY
362
363
364 /* Leon3 cache control */
365
366 static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr,
367 uint64_t val, int size)
368 {
369 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
370 addr, val, size);
371
372 if (size != 4) {
373 DPRINTF_CACHE_CONTROL("32bits only\n");
374 return;
375 }
376
377 switch (addr) {
378 case 0x00: /* Cache control */
379
380 /* These values must always be read as zeros */
381 val &= ~CACHE_CTRL_FD;
382 val &= ~CACHE_CTRL_FI;
383 val &= ~CACHE_CTRL_IB;
384 val &= ~CACHE_CTRL_IP;
385 val &= ~CACHE_CTRL_DP;
386
387 env->cache_control = val;
388 break;
389 case 0x04: /* Instruction cache configuration */
390 case 0x08: /* Data cache configuration */
391 /* Read Only */
392 break;
393 default:
394 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
395 break;
396 };
397 }
398
399 static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
400 int size)
401 {
402 uint64_t ret = 0;
403
404 if (size != 4) {
405 DPRINTF_CACHE_CONTROL("32bits only\n");
406 return 0;
407 }
408
409 switch (addr) {
410 case 0x00: /* Cache control */
411 ret = env->cache_control;
412 break;
413
414 /* Configuration registers are read and only always keep those
415 predefined values */
416
417 case 0x04: /* Instruction cache configuration */
418 ret = 0x10220000;
419 break;
420 case 0x08: /* Data cache configuration */
421 ret = 0x18220000;
422 break;
423 default:
424 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
425 break;
426 };
427 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
428 addr, ret, size);
429 return ret;
430 }
431
432 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
433 int asi, uint32_t memop)
434 {
435 int size = 1 << (memop & MO_SIZE);
436 int sign = memop & MO_SIGN;
437 CPUState *cs = CPU(sparc_env_get_cpu(env));
438 uint64_t ret = 0;
439 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
440 uint32_t last_addr = addr;
441 #endif
442
443 do_check_align(env, addr, size - 1, GETPC());
444 switch (asi) {
445 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
446 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
447 switch (addr) {
448 case 0x00: /* Leon3 Cache Control */
449 case 0x08: /* Leon3 Instruction Cache config */
450 case 0x0C: /* Leon3 Date Cache config */
451 if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
452 ret = leon3_cache_control_ld(env, addr, size);
453 }
454 break;
455 case 0x01c00a00: /* MXCC control register */
456 if (size == 8) {
457 ret = env->mxccregs[3];
458 } else {
459 qemu_log_mask(LOG_UNIMP,
460 "%08x: unimplemented access size: %d\n", addr,
461 size);
462 }
463 break;
464 case 0x01c00a04: /* MXCC control register */
465 if (size == 4) {
466 ret = env->mxccregs[3];
467 } else {
468 qemu_log_mask(LOG_UNIMP,
469 "%08x: unimplemented access size: %d\n", addr,
470 size);
471 }
472 break;
473 case 0x01c00c00: /* Module reset register */
474 if (size == 8) {
475 ret = env->mxccregs[5];
476 /* should we do something here? */
477 } else {
478 qemu_log_mask(LOG_UNIMP,
479 "%08x: unimplemented access size: %d\n", addr,
480 size);
481 }
482 break;
483 case 0x01c00f00: /* MBus port address register */
484 if (size == 8) {
485 ret = env->mxccregs[7];
486 } else {
487 qemu_log_mask(LOG_UNIMP,
488 "%08x: unimplemented access size: %d\n", addr,
489 size);
490 }
491 break;
492 default:
493 qemu_log_mask(LOG_UNIMP,
494 "%08x: unimplemented address, size: %d\n", addr,
495 size);
496 break;
497 }
498 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
499 "addr = %08x -> ret = %" PRIx64 ","
500 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
501 #ifdef DEBUG_MXCC
502 dump_mxcc(env);
503 #endif
504 break;
505 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */
506 case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */
507 {
508 int mmulev;
509
510 mmulev = (addr >> 8) & 15;
511 if (mmulev > 4) {
512 ret = 0;
513 } else {
514 ret = mmu_probe(env, addr, mmulev);
515 }
516 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
517 addr, mmulev, ret);
518 }
519 break;
520 case ASI_M_MMUREGS: /* SuperSparc MMU regs */
521 case ASI_LEON_MMUREGS: /* LEON3 MMU regs */
522 {
523 int reg = (addr >> 8) & 0x1f;
524
525 ret = env->mmuregs[reg];
526 if (reg == 3) { /* Fault status cleared on read */
527 env->mmuregs[3] = 0;
528 } else if (reg == 0x13) { /* Fault status read */
529 ret = env->mmuregs[3];
530 } else if (reg == 0x14) { /* Fault address read */
531 ret = env->mmuregs[4];
532 }
533 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
534 }
535 break;
536 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
537 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
538 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
539 break;
540 case ASI_KERNELTXT: /* Supervisor code access */
541 switch (size) {
542 case 1:
543 ret = cpu_ldub_code(env, addr);
544 break;
545 case 2:
546 ret = cpu_lduw_code(env, addr);
547 break;
548 default:
549 case 4:
550 ret = cpu_ldl_code(env, addr);
551 break;
552 case 8:
553 ret = cpu_ldq_code(env, addr);
554 break;
555 }
556 break;
557 case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */
558 case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */
559 case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */
560 case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */
561 break;
562 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
563 switch (size) {
564 case 1:
565 ret = ldub_phys(cs->as, (hwaddr)addr
566 | ((hwaddr)(asi & 0xf) << 32));
567 break;
568 case 2:
569 ret = lduw_phys(cs->as, (hwaddr)addr
570 | ((hwaddr)(asi & 0xf) << 32));
571 break;
572 default:
573 case 4:
574 ret = ldl_phys(cs->as, (hwaddr)addr
575 | ((hwaddr)(asi & 0xf) << 32));
576 break;
577 case 8:
578 ret = ldq_phys(cs->as, (hwaddr)addr
579 | ((hwaddr)(asi & 0xf) << 32));
580 break;
581 }
582 break;
583 case 0x30: /* Turbosparc secondary cache diagnostic */
584 case 0x31: /* Turbosparc RAM snoop */
585 case 0x32: /* Turbosparc page table descriptor diagnostic */
586 case 0x39: /* data cache diagnostic register */
587 ret = 0;
588 break;
589 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
590 {
591 int reg = (addr >> 8) & 3;
592
593 switch (reg) {
594 case 0: /* Breakpoint Value (Addr) */
595 ret = env->mmubpregs[reg];
596 break;
597 case 1: /* Breakpoint Mask */
598 ret = env->mmubpregs[reg];
599 break;
600 case 2: /* Breakpoint Control */
601 ret = env->mmubpregs[reg];
602 break;
603 case 3: /* Breakpoint Status */
604 ret = env->mmubpregs[reg];
605 env->mmubpregs[reg] = 0ULL;
606 break;
607 }
608 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
609 ret);
610 }
611 break;
612 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
613 ret = env->mmubpctrv;
614 break;
615 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
616 ret = env->mmubpctrc;
617 break;
618 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
619 ret = env->mmubpctrs;
620 break;
621 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
622 ret = env->mmubpaction;
623 break;
624 case ASI_USERTXT: /* User code access, XXX */
625 default:
626 cpu_unassigned_access(cs, addr, false, false, asi, size);
627 ret = 0;
628 break;
629
630 case ASI_USERDATA: /* User data access */
631 case ASI_KERNELDATA: /* Supervisor data access */
632 case ASI_P: /* Implicit primary context data access (v9 only?) */
633 case ASI_M_BYPASS: /* MMU passthrough */
634 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
635 /* These are always handled inline. */
636 g_assert_not_reached();
637 }
638 if (sign) {
639 switch (size) {
640 case 1:
641 ret = (int8_t) ret;
642 break;
643 case 2:
644 ret = (int16_t) ret;
645 break;
646 case 4:
647 ret = (int32_t) ret;
648 break;
649 default:
650 break;
651 }
652 }
653 #ifdef DEBUG_ASI
654 dump_asi("read ", last_addr, asi, size, ret);
655 #endif
656 return ret;
657 }
658
659 void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
660 int asi, uint32_t memop)
661 {
662 int size = 1 << (memop & MO_SIZE);
663 SPARCCPU *cpu = sparc_env_get_cpu(env);
664 CPUState *cs = CPU(cpu);
665
666 do_check_align(env, addr, size - 1, GETPC());
667 switch (asi) {
668 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
669 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
670 switch (addr) {
671 case 0x00: /* Leon3 Cache Control */
672 case 0x08: /* Leon3 Instruction Cache config */
673 case 0x0C: /* Leon3 Date Cache config */
674 if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
675 leon3_cache_control_st(env, addr, val, size);
676 }
677 break;
678
679 case 0x01c00000: /* MXCC stream data register 0 */
680 if (size == 8) {
681 env->mxccdata[0] = val;
682 } else {
683 qemu_log_mask(LOG_UNIMP,
684 "%08x: unimplemented access size: %d\n", addr,
685 size);
686 }
687 break;
688 case 0x01c00008: /* MXCC stream data register 1 */
689 if (size == 8) {
690 env->mxccdata[1] = val;
691 } else {
692 qemu_log_mask(LOG_UNIMP,
693 "%08x: unimplemented access size: %d\n", addr,
694 size);
695 }
696 break;
697 case 0x01c00010: /* MXCC stream data register 2 */
698 if (size == 8) {
699 env->mxccdata[2] = val;
700 } else {
701 qemu_log_mask(LOG_UNIMP,
702 "%08x: unimplemented access size: %d\n", addr,
703 size);
704 }
705 break;
706 case 0x01c00018: /* MXCC stream data register 3 */
707 if (size == 8) {
708 env->mxccdata[3] = val;
709 } else {
710 qemu_log_mask(LOG_UNIMP,
711 "%08x: unimplemented access size: %d\n", addr,
712 size);
713 }
714 break;
715 case 0x01c00100: /* MXCC stream source */
716 if (size == 8) {
717 env->mxccregs[0] = val;
718 } else {
719 qemu_log_mask(LOG_UNIMP,
720 "%08x: unimplemented access size: %d\n", addr,
721 size);
722 }
723 env->mxccdata[0] = ldq_phys(cs->as,
724 (env->mxccregs[0] & 0xffffffffULL) +
725 0);
726 env->mxccdata[1] = ldq_phys(cs->as,
727 (env->mxccregs[0] & 0xffffffffULL) +
728 8);
729 env->mxccdata[2] = ldq_phys(cs->as,
730 (env->mxccregs[0] & 0xffffffffULL) +
731 16);
732 env->mxccdata[3] = ldq_phys(cs->as,
733 (env->mxccregs[0] & 0xffffffffULL) +
734 24);
735 break;
736 case 0x01c00200: /* MXCC stream destination */
737 if (size == 8) {
738 env->mxccregs[1] = val;
739 } else {
740 qemu_log_mask(LOG_UNIMP,
741 "%08x: unimplemented access size: %d\n", addr,
742 size);
743 }
744 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 0,
745 env->mxccdata[0]);
746 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 8,
747 env->mxccdata[1]);
748 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16,
749 env->mxccdata[2]);
750 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24,
751 env->mxccdata[3]);
752 break;
753 case 0x01c00a00: /* MXCC control register */
754 if (size == 8) {
755 env->mxccregs[3] = val;
756 } else {
757 qemu_log_mask(LOG_UNIMP,
758 "%08x: unimplemented access size: %d\n", addr,
759 size);
760 }
761 break;
762 case 0x01c00a04: /* MXCC control register */
763 if (size == 4) {
764 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
765 | val;
766 } else {
767 qemu_log_mask(LOG_UNIMP,
768 "%08x: unimplemented access size: %d\n", addr,
769 size);
770 }
771 break;
772 case 0x01c00e00: /* MXCC error register */
773 /* writing a 1 bit clears the error */
774 if (size == 8) {
775 env->mxccregs[6] &= ~val;
776 } else {
777 qemu_log_mask(LOG_UNIMP,
778 "%08x: unimplemented access size: %d\n", addr,
779 size);
780 }
781 break;
782 case 0x01c00f00: /* MBus port address register */
783 if (size == 8) {
784 env->mxccregs[7] = val;
785 } else {
786 qemu_log_mask(LOG_UNIMP,
787 "%08x: unimplemented access size: %d\n", addr,
788 size);
789 }
790 break;
791 default:
792 qemu_log_mask(LOG_UNIMP,
793 "%08x: unimplemented address, size: %d\n", addr,
794 size);
795 break;
796 }
797 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
798 asi, size, addr, val);
799 #ifdef DEBUG_MXCC
800 dump_mxcc(env);
801 #endif
802 break;
803 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */
804 case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */
805 {
806 int mmulev;
807
808 mmulev = (addr >> 8) & 15;
809 DPRINTF_MMU("mmu flush level %d\n", mmulev);
810 switch (mmulev) {
811 case 0: /* flush page */
812 tlb_flush_page(CPU(cpu), addr & 0xfffff000);
813 break;
814 case 1: /* flush segment (256k) */
815 case 2: /* flush region (16M) */
816 case 3: /* flush context (4G) */
817 case 4: /* flush entire */
818 tlb_flush(CPU(cpu));
819 break;
820 default:
821 break;
822 }
823 #ifdef DEBUG_MMU
824 dump_mmu(stdout, fprintf, env);
825 #endif
826 }
827 break;
828 case ASI_M_MMUREGS: /* write MMU regs */
829 case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */
830 {
831 int reg = (addr >> 8) & 0x1f;
832 uint32_t oldreg;
833
834 oldreg = env->mmuregs[reg];
835 switch (reg) {
836 case 0: /* Control Register */
837 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
838 (val & 0x00ffffff);
839 /* Mappings generated during no-fault mode
840 are invalid in normal mode. */
841 if ((oldreg ^ env->mmuregs[reg])
842 & (MMU_NF | env->def->mmu_bm)) {
843 tlb_flush(CPU(cpu));
844 }
845 break;
846 case 1: /* Context Table Pointer Register */
847 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
848 break;
849 case 2: /* Context Register */
850 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
851 if (oldreg != env->mmuregs[reg]) {
852 /* we flush when the MMU context changes because
853 QEMU has no MMU context support */
854 tlb_flush(CPU(cpu));
855 }
856 break;
857 case 3: /* Synchronous Fault Status Register with Clear */
858 case 4: /* Synchronous Fault Address Register */
859 break;
860 case 0x10: /* TLB Replacement Control Register */
861 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
862 break;
863 case 0x13: /* Synchronous Fault Status Register with Read
864 and Clear */
865 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
866 break;
867 case 0x14: /* Synchronous Fault Address Register */
868 env->mmuregs[4] = val;
869 break;
870 default:
871 env->mmuregs[reg] = val;
872 break;
873 }
874 if (oldreg != env->mmuregs[reg]) {
875 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
876 reg, oldreg, env->mmuregs[reg]);
877 }
878 #ifdef DEBUG_MMU
879 dump_mmu(stdout, fprintf, env);
880 #endif
881 }
882 break;
883 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
884 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
885 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
886 break;
887 case ASI_M_TXTC_TAG: /* I-cache tag */
888 case ASI_M_TXTC_DATA: /* I-cache data */
889 case ASI_M_DATAC_TAG: /* D-cache tag */
890 case ASI_M_DATAC_DATA: /* D-cache data */
891 case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */
892 case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */
893 case ASI_M_FLUSH_REGION: /* I/D-cache flush region */
894 case ASI_M_FLUSH_CTX: /* I/D-cache flush context */
895 case ASI_M_FLUSH_USER: /* I/D-cache flush user */
896 break;
897 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
898 {
899 switch (size) {
900 case 1:
901 stb_phys(cs->as, (hwaddr)addr
902 | ((hwaddr)(asi & 0xf) << 32), val);
903 break;
904 case 2:
905 stw_phys(cs->as, (hwaddr)addr
906 | ((hwaddr)(asi & 0xf) << 32), val);
907 break;
908 case 4:
909 default:
910 stl_phys(cs->as, (hwaddr)addr
911 | ((hwaddr)(asi & 0xf) << 32), val);
912 break;
913 case 8:
914 stq_phys(cs->as, (hwaddr)addr
915 | ((hwaddr)(asi & 0xf) << 32), val);
916 break;
917 }
918 }
919 break;
920 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
921 case 0x31: /* store buffer data, Ross RT620 I-cache flush or
922 Turbosparc snoop RAM */
923 case 0x32: /* store buffer control or Turbosparc page table
924 descriptor diagnostic */
925 case 0x36: /* I-cache flash clear */
926 case 0x37: /* D-cache flash clear */
927 break;
928 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
929 {
930 int reg = (addr >> 8) & 3;
931
932 switch (reg) {
933 case 0: /* Breakpoint Value (Addr) */
934 env->mmubpregs[reg] = (val & 0xfffffffffULL);
935 break;
936 case 1: /* Breakpoint Mask */
937 env->mmubpregs[reg] = (val & 0xfffffffffULL);
938 break;
939 case 2: /* Breakpoint Control */
940 env->mmubpregs[reg] = (val & 0x7fULL);
941 break;
942 case 3: /* Breakpoint Status */
943 env->mmubpregs[reg] = (val & 0xfULL);
944 break;
945 }
946 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
947 env->mmuregs[reg]);
948 }
949 break;
950 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
951 env->mmubpctrv = val & 0xffffffff;
952 break;
953 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
954 env->mmubpctrc = val & 0x3;
955 break;
956 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
957 env->mmubpctrs = val & 0x3;
958 break;
959 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
960 env->mmubpaction = val & 0x1fff;
961 break;
962 case ASI_USERTXT: /* User code access, XXX */
963 case ASI_KERNELTXT: /* Supervisor code access, XXX */
964 default:
965 cpu_unassigned_access(CPU(sparc_env_get_cpu(env)),
966 addr, true, false, asi, size);
967 break;
968
969 case ASI_USERDATA: /* User data access */
970 case ASI_KERNELDATA: /* Supervisor data access */
971 case ASI_P:
972 case ASI_M_BYPASS: /* MMU passthrough */
973 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
974 case ASI_M_BCOPY: /* Block copy, sta access */
975 case ASI_M_BFILL: /* Block fill, stda access */
976 /* These are always handled inline. */
977 g_assert_not_reached();
978 }
979 #ifdef DEBUG_ASI
980 dump_asi("write", addr, asi, size, val);
981 #endif
982 }
983
984 #endif /* CONFIG_USER_ONLY */
985 #else /* TARGET_SPARC64 */
986
987 #ifdef CONFIG_USER_ONLY
988 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
989 int asi, uint32_t memop)
990 {
991 int size = 1 << (memop & MO_SIZE);
992 int sign = memop & MO_SIGN;
993 uint64_t ret = 0;
994
995 if (asi < 0x80) {
996 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
997 }
998 do_check_align(env, addr, size - 1, GETPC());
999 addr = asi_address_mask(env, asi, addr);
1000
1001 switch (asi) {
1002 case ASI_PNF: /* Primary no-fault */
1003 case ASI_PNFL: /* Primary no-fault LE */
1004 case ASI_SNF: /* Secondary no-fault */
1005 case ASI_SNFL: /* Secondary no-fault LE */
1006 if (page_check_range(addr, size, PAGE_READ) == -1) {
1007 ret = 0;
1008 break;
1009 }
1010 switch (size) {
1011 case 1:
1012 ret = cpu_ldub_data(env, addr);
1013 break;
1014 case 2:
1015 ret = cpu_lduw_data(env, addr);
1016 break;
1017 case 4:
1018 ret = cpu_ldl_data(env, addr);
1019 break;
1020 case 8:
1021 ret = cpu_ldq_data(env, addr);
1022 break;
1023 default:
1024 g_assert_not_reached();
1025 }
1026 break;
1027 break;
1028
1029 case ASI_P: /* Primary */
1030 case ASI_PL: /* Primary LE */
1031 case ASI_S: /* Secondary */
1032 case ASI_SL: /* Secondary LE */
1033 /* These are always handled inline. */
1034 g_assert_not_reached();
1035
1036 default:
1037 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1038 }
1039
1040 /* Convert from little endian */
1041 switch (asi) {
1042 case ASI_PNFL: /* Primary no-fault LE */
1043 case ASI_SNFL: /* Secondary no-fault LE */
1044 switch (size) {
1045 case 2:
1046 ret = bswap16(ret);
1047 break;
1048 case 4:
1049 ret = bswap32(ret);
1050 break;
1051 case 8:
1052 ret = bswap64(ret);
1053 break;
1054 }
1055 }
1056
1057 /* Convert to signed number */
1058 if (sign) {
1059 switch (size) {
1060 case 1:
1061 ret = (int8_t) ret;
1062 break;
1063 case 2:
1064 ret = (int16_t) ret;
1065 break;
1066 case 4:
1067 ret = (int32_t) ret;
1068 break;
1069 }
1070 }
1071 #ifdef DEBUG_ASI
1072 dump_asi("read", addr, asi, size, ret);
1073 #endif
1074 return ret;
1075 }
1076
1077 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1078 int asi, uint32_t memop)
1079 {
1080 int size = 1 << (memop & MO_SIZE);
1081 #ifdef DEBUG_ASI
1082 dump_asi("write", addr, asi, size, val);
1083 #endif
1084 if (asi < 0x80) {
1085 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1086 }
1087 do_check_align(env, addr, size - 1, GETPC());
1088
1089 switch (asi) {
1090 case ASI_P: /* Primary */
1091 case ASI_PL: /* Primary LE */
1092 case ASI_S: /* Secondary */
1093 case ASI_SL: /* Secondary LE */
1094 /* These are always handled inline. */
1095 g_assert_not_reached();
1096
1097 case ASI_PNF: /* Primary no-fault, RO */
1098 case ASI_SNF: /* Secondary no-fault, RO */
1099 case ASI_PNFL: /* Primary no-fault LE, RO */
1100 case ASI_SNFL: /* Secondary no-fault LE, RO */
1101 default:
1102 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1103 }
1104 }
1105
1106 #else /* CONFIG_USER_ONLY */
1107
1108 uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1109 int asi, uint32_t memop)
1110 {
1111 int size = 1 << (memop & MO_SIZE);
1112 int sign = memop & MO_SIGN;
1113 CPUState *cs = CPU(sparc_env_get_cpu(env));
1114 uint64_t ret = 0;
1115 #if defined(DEBUG_ASI)
1116 target_ulong last_addr = addr;
1117 #endif
1118
1119 asi &= 0xff;
1120
1121 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1122 || (cpu_has_hypervisor(env)
1123 && asi >= 0x30 && asi < 0x80
1124 && !(env->hpstate & HS_PRIV))) {
1125 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1126 }
1127
1128 do_check_align(env, addr, size - 1, GETPC());
1129 addr = asi_address_mask(env, asi, addr);
1130
1131 switch (asi) {
1132 case ASI_PNF:
1133 case ASI_PNFL:
1134 case ASI_SNF:
1135 case ASI_SNFL:
1136 {
1137 TCGMemOpIdx oi;
1138 int idx = (env->pstate & PS_PRIV
1139 ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
1140 : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
1141
1142 if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) {
1143 #ifdef DEBUG_ASI
1144 dump_asi("read ", last_addr, asi, size, ret);
1145 #endif
1146 /* exception_index is set in get_physical_address_data. */
1147 cpu_raise_exception_ra(env, cs->exception_index, GETPC());
1148 }
1149 oi = make_memop_idx(memop, idx);
1150 switch (size) {
1151 case 1:
1152 ret = helper_ret_ldub_mmu(env, addr, oi, GETPC());
1153 break;
1154 case 2:
1155 if (asi & 8) {
1156 ret = helper_le_lduw_mmu(env, addr, oi, GETPC());
1157 } else {
1158 ret = helper_be_lduw_mmu(env, addr, oi, GETPC());
1159 }
1160 break;
1161 case 4:
1162 if (asi & 8) {
1163 ret = helper_le_ldul_mmu(env, addr, oi, GETPC());
1164 } else {
1165 ret = helper_be_ldul_mmu(env, addr, oi, GETPC());
1166 }
1167 break;
1168 case 8:
1169 if (asi & 8) {
1170 ret = helper_le_ldq_mmu(env, addr, oi, GETPC());
1171 } else {
1172 ret = helper_be_ldq_mmu(env, addr, oi, GETPC());
1173 }
1174 break;
1175 default:
1176 g_assert_not_reached();
1177 }
1178 }
1179 break;
1180
1181 case ASI_AIUP: /* As if user primary */
1182 case ASI_AIUS: /* As if user secondary */
1183 case ASI_AIUPL: /* As if user primary LE */
1184 case ASI_AIUSL: /* As if user secondary LE */
1185 case ASI_P: /* Primary */
1186 case ASI_S: /* Secondary */
1187 case ASI_PL: /* Primary LE */
1188 case ASI_SL: /* Secondary LE */
1189 case ASI_REAL: /* Bypass */
1190 case ASI_REAL_IO: /* Bypass, non-cacheable */
1191 case ASI_REAL_L: /* Bypass LE */
1192 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1193 case ASI_N: /* Nucleus */
1194 case ASI_NL: /* Nucleus Little Endian (LE) */
1195 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1196 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1197 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1198 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1199 case ASI_TWINX_REAL: /* Real address, twinx */
1200 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1201 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1202 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1203 case ASI_TWINX_N: /* Nucleus, twinx */
1204 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1205 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1206 case ASI_TWINX_P: /* Primary, twinx */
1207 case ASI_TWINX_PL: /* Primary, twinx, LE */
1208 case ASI_TWINX_S: /* Secondary, twinx */
1209 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1210 /* These are always handled inline. */
1211 g_assert_not_reached();
1212
1213 case ASI_UPA_CONFIG: /* UPA config */
1214 /* XXX */
1215 break;
1216 case ASI_LSU_CONTROL: /* LSU */
1217 ret = env->lsu;
1218 break;
1219 case ASI_IMMU: /* I-MMU regs */
1220 {
1221 int reg = (addr >> 3) & 0xf;
1222 switch (reg) {
1223 case 0:
1224 /* 0x00 I-TSB Tag Target register */
1225 ret = ultrasparc_tag_target(env->immu.tag_access);
1226 break;
1227 case 3: /* SFSR */
1228 ret = env->immu.sfsr;
1229 break;
1230 case 5: /* TSB access */
1231 ret = env->immu.tsb;
1232 break;
1233 case 6:
1234 /* 0x30 I-TSB Tag Access register */
1235 ret = env->immu.tag_access;
1236 break;
1237 default:
1238 cpu_unassigned_access(cs, addr, false, false, 1, size);
1239 ret = 0;
1240 }
1241 break;
1242 }
1243 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */
1244 {
1245 /* env->immuregs[5] holds I-MMU TSB register value
1246 env->immuregs[6] holds I-MMU Tag Access register value */
1247 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
1248 8*1024);
1249 break;
1250 }
1251 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */
1252 {
1253 /* env->immuregs[5] holds I-MMU TSB register value
1254 env->immuregs[6] holds I-MMU Tag Access register value */
1255 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
1256 64*1024);
1257 break;
1258 }
1259 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1260 {
1261 int reg = (addr >> 3) & 0x3f;
1262
1263 ret = env->itlb[reg].tte;
1264 break;
1265 }
1266 case ASI_ITLB_TAG_READ: /* I-MMU tag read */
1267 {
1268 int reg = (addr >> 3) & 0x3f;
1269
1270 ret = env->itlb[reg].tag;
1271 break;
1272 }
1273 case ASI_DMMU: /* D-MMU regs */
1274 {
1275 int reg = (addr >> 3) & 0xf;
1276 switch (reg) {
1277 case 0:
1278 /* 0x00 D-TSB Tag Target register */
1279 ret = ultrasparc_tag_target(env->dmmu.tag_access);
1280 break;
1281 case 1: /* 0x08 Primary Context */
1282 ret = env->dmmu.mmu_primary_context;
1283 break;
1284 case 2: /* 0x10 Secondary Context */
1285 ret = env->dmmu.mmu_secondary_context;
1286 break;
1287 case 3: /* SFSR */
1288 ret = env->dmmu.sfsr;
1289 break;
1290 case 4: /* 0x20 SFAR */
1291 ret = env->dmmu.sfar;
1292 break;
1293 case 5: /* 0x28 TSB access */
1294 ret = env->dmmu.tsb;
1295 break;
1296 case 6: /* 0x30 D-TSB Tag Access register */
1297 ret = env->dmmu.tag_access;
1298 break;
1299 case 7:
1300 ret = env->dmmu.virtual_watchpoint;
1301 break;
1302 case 8:
1303 ret = env->dmmu.physical_watchpoint;
1304 break;
1305 default:
1306 cpu_unassigned_access(cs, addr, false, false, 1, size);
1307 ret = 0;
1308 }
1309 break;
1310 }
1311 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */
1312 {
1313 /* env->dmmuregs[5] holds D-MMU TSB register value
1314 env->dmmuregs[6] holds D-MMU Tag Access register value */
1315 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
1316 8*1024);
1317 break;
1318 }
1319 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */
1320 {
1321 /* env->dmmuregs[5] holds D-MMU TSB register value
1322 env->dmmuregs[6] holds D-MMU Tag Access register value */
1323 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
1324 64*1024);
1325 break;
1326 }
1327 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1328 {
1329 int reg = (addr >> 3) & 0x3f;
1330
1331 ret = env->dtlb[reg].tte;
1332 break;
1333 }
1334 case ASI_DTLB_TAG_READ: /* D-MMU tag read */
1335 {
1336 int reg = (addr >> 3) & 0x3f;
1337
1338 ret = env->dtlb[reg].tag;
1339 break;
1340 }
1341 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1342 break;
1343 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1344 ret = env->ivec_status;
1345 break;
1346 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1347 {
1348 int reg = (addr >> 4) & 0x3;
1349 if (reg < 3) {
1350 ret = env->ivec_data[reg];
1351 }
1352 break;
1353 }
1354 case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
1355 if (unlikely((addr >= 0x20) && (addr < 0x30))) {
1356 /* Hyperprivileged access only */
1357 cpu_unassigned_access(cs, addr, false, false, 1, size);
1358 }
1359 /* fall through */
1360 case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
1361 {
1362 unsigned int i = (addr >> 3) & 0x7;
1363 ret = env->scratch[i];
1364 break;
1365 }
1366 case ASI_DCACHE_DATA: /* D-cache data */
1367 case ASI_DCACHE_TAG: /* D-cache tag access */
1368 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1369 case ASI_AFSR: /* E-cache asynchronous fault status */
1370 case ASI_AFAR: /* E-cache asynchronous fault address */
1371 case ASI_EC_TAG_DATA: /* E-cache tag data */
1372 case ASI_IC_INSTR: /* I-cache instruction access */
1373 case ASI_IC_TAG: /* I-cache tag access */
1374 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1375 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1376 case ASI_EC_W: /* E-cache tag */
1377 case ASI_EC_R: /* E-cache tag */
1378 break;
1379 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */
1380 case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */
1381 case ASI_IMMU_DEMAP: /* I-MMU demap, WO */
1382 case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */
1383 case ASI_DMMU_DEMAP: /* D-MMU demap, WO */
1384 case ASI_INTR_W: /* Interrupt vector, WO */
1385 default:
1386 cpu_unassigned_access(cs, addr, false, false, 1, size);
1387 ret = 0;
1388 break;
1389 }
1390
1391 /* Convert to signed number */
1392 if (sign) {
1393 switch (size) {
1394 case 1:
1395 ret = (int8_t) ret;
1396 break;
1397 case 2:
1398 ret = (int16_t) ret;
1399 break;
1400 case 4:
1401 ret = (int32_t) ret;
1402 break;
1403 default:
1404 break;
1405 }
1406 }
1407 #ifdef DEBUG_ASI
1408 dump_asi("read ", last_addr, asi, size, ret);
1409 #endif
1410 return ret;
1411 }
1412
1413 void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1414 int asi, uint32_t memop)
1415 {
1416 int size = 1 << (memop & MO_SIZE);
1417 SPARCCPU *cpu = sparc_env_get_cpu(env);
1418 CPUState *cs = CPU(cpu);
1419
1420 #ifdef DEBUG_ASI
1421 dump_asi("write", addr, asi, size, val);
1422 #endif
1423
1424 asi &= 0xff;
1425
1426 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1427 || (cpu_has_hypervisor(env)
1428 && asi >= 0x30 && asi < 0x80
1429 && !(env->hpstate & HS_PRIV))) {
1430 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1431 }
1432
1433 do_check_align(env, addr, size - 1, GETPC());
1434 addr = asi_address_mask(env, asi, addr);
1435
1436 switch (asi) {
1437 case ASI_AIUP: /* As if user primary */
1438 case ASI_AIUS: /* As if user secondary */
1439 case ASI_AIUPL: /* As if user primary LE */
1440 case ASI_AIUSL: /* As if user secondary LE */
1441 case ASI_P: /* Primary */
1442 case ASI_S: /* Secondary */
1443 case ASI_PL: /* Primary LE */
1444 case ASI_SL: /* Secondary LE */
1445 case ASI_REAL: /* Bypass */
1446 case ASI_REAL_IO: /* Bypass, non-cacheable */
1447 case ASI_REAL_L: /* Bypass LE */
1448 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1449 case ASI_N: /* Nucleus */
1450 case ASI_NL: /* Nucleus Little Endian (LE) */
1451 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1452 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1453 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1454 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1455 case ASI_TWINX_REAL: /* Real address, twinx */
1456 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1457 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1458 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1459 case ASI_TWINX_N: /* Nucleus, twinx */
1460 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1461 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1462 case ASI_TWINX_P: /* Primary, twinx */
1463 case ASI_TWINX_PL: /* Primary, twinx, LE */
1464 case ASI_TWINX_S: /* Secondary, twinx */
1465 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1466 /* These are always handled inline. */
1467 g_assert_not_reached();
1468
1469 case ASI_UPA_CONFIG: /* UPA config */
1470 /* XXX */
1471 return;
1472 case ASI_LSU_CONTROL: /* LSU */
1473 env->lsu = val & (DMMU_E | IMMU_E);
1474 return;
1475 case ASI_IMMU: /* I-MMU regs */
1476 {
1477 int reg = (addr >> 3) & 0xf;
1478 uint64_t oldreg;
1479
1480 oldreg = env->immuregs[reg];
1481 switch (reg) {
1482 case 0: /* RO */
1483 return;
1484 case 1: /* Not in I-MMU */
1485 case 2:
1486 return;
1487 case 3: /* SFSR */
1488 if ((val & 1) == 0) {
1489 val = 0; /* Clear SFSR */
1490 }
1491 env->immu.sfsr = val;
1492 break;
1493 case 4: /* RO */
1494 return;
1495 case 5: /* TSB access */
1496 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
1497 PRIx64 "\n", env->immu.tsb, val);
1498 env->immu.tsb = val;
1499 break;
1500 case 6: /* Tag access */
1501 env->immu.tag_access = val;
1502 break;
1503 case 7:
1504 case 8:
1505 return;
1506 default:
1507 cpu_unassigned_access(cs, addr, true, false, 1, size);
1508 break;
1509 }
1510
1511 if (oldreg != env->immuregs[reg]) {
1512 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1513 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1514 }
1515 #ifdef DEBUG_MMU
1516 dump_mmu(stdout, fprintf, env);
1517 #endif
1518 return;
1519 }
1520 case ASI_ITLB_DATA_IN: /* I-MMU data in */
1521 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
1522 return;
1523 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1524 {
1525 /* TODO: auto demap */
1526
1527 unsigned int i = (addr >> 3) & 0x3f;
1528
1529 replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
1530
1531 #ifdef DEBUG_MMU
1532 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
1533 dump_mmu(stdout, fprintf, env);
1534 #endif
1535 return;
1536 }
1537 case ASI_IMMU_DEMAP: /* I-MMU demap */
1538 demap_tlb(env->itlb, addr, "immu", env);
1539 return;
1540 case ASI_DMMU: /* D-MMU regs */
1541 {
1542 int reg = (addr >> 3) & 0xf;
1543 uint64_t oldreg;
1544
1545 oldreg = env->dmmuregs[reg];
1546 switch (reg) {
1547 case 0: /* RO */
1548 case 4:
1549 return;
1550 case 3: /* SFSR */
1551 if ((val & 1) == 0) {
1552 val = 0; /* Clear SFSR, Fault address */
1553 env->dmmu.sfar = 0;
1554 }
1555 env->dmmu.sfsr = val;
1556 break;
1557 case 1: /* Primary context */
1558 env->dmmu.mmu_primary_context = val;
1559 /* can be optimized to only flush MMU_USER_IDX
1560 and MMU_KERNEL_IDX entries */
1561 tlb_flush(CPU(cpu));
1562 break;
1563 case 2: /* Secondary context */
1564 env->dmmu.mmu_secondary_context = val;
1565 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
1566 and MMU_KERNEL_SECONDARY_IDX entries */
1567 tlb_flush(CPU(cpu));
1568 break;
1569 case 5: /* TSB access */
1570 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
1571 PRIx64 "\n", env->dmmu.tsb, val);
1572 env->dmmu.tsb = val;
1573 break;
1574 case 6: /* Tag access */
1575 env->dmmu.tag_access = val;
1576 break;
1577 case 7: /* Virtual Watchpoint */
1578 env->dmmu.virtual_watchpoint = val;
1579 break;
1580 case 8: /* Physical Watchpoint */
1581 env->dmmu.physical_watchpoint = val;
1582 break;
1583 default:
1584 cpu_unassigned_access(cs, addr, true, false, 1, size);
1585 break;
1586 }
1587
1588 if (oldreg != env->dmmuregs[reg]) {
1589 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1590 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
1591 }
1592 #ifdef DEBUG_MMU
1593 dump_mmu(stdout, fprintf, env);
1594 #endif
1595 return;
1596 }
1597 case ASI_DTLB_DATA_IN: /* D-MMU data in */
1598 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
1599 return;
1600 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1601 {
1602 unsigned int i = (addr >> 3) & 0x3f;
1603
1604 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
1605
1606 #ifdef DEBUG_MMU
1607 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
1608 dump_mmu(stdout, fprintf, env);
1609 #endif
1610 return;
1611 }
1612 case ASI_DMMU_DEMAP: /* D-MMU demap */
1613 demap_tlb(env->dtlb, addr, "dmmu", env);
1614 return;
1615 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1616 env->ivec_status = val & 0x20;
1617 return;
1618 case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
1619 if (unlikely((addr >= 0x20) && (addr < 0x30))) {
1620 /* Hyperprivileged access only */
1621 cpu_unassigned_access(cs, addr, true, false, 1, size);
1622 }
1623 /* fall through */
1624 case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
1625 {
1626 unsigned int i = (addr >> 3) & 0x7;
1627 env->scratch[i] = val;
1628 return;
1629 }
1630 case ASI_DCACHE_DATA: /* D-cache data */
1631 case ASI_DCACHE_TAG: /* D-cache tag access */
1632 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1633 case ASI_AFSR: /* E-cache asynchronous fault status */
1634 case ASI_AFAR: /* E-cache asynchronous fault address */
1635 case ASI_EC_TAG_DATA: /* E-cache tag data */
1636 case ASI_IC_INSTR: /* I-cache instruction access */
1637 case ASI_IC_TAG: /* I-cache tag access */
1638 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1639 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1640 case ASI_EC_W: /* E-cache tag */
1641 case ASI_EC_R: /* E-cache tag */
1642 return;
1643 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */
1644 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */
1645 case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */
1646 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */
1647 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */
1648 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */
1649 case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */
1650 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1651 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1652 case ASI_PNF: /* Primary no-fault, RO */
1653 case ASI_SNF: /* Secondary no-fault, RO */
1654 case ASI_PNFL: /* Primary no-fault LE, RO */
1655 case ASI_SNFL: /* Secondary no-fault LE, RO */
1656 default:
1657 cpu_unassigned_access(cs, addr, true, false, 1, size);
1658 return;
1659 }
1660 }
1661 #endif /* CONFIG_USER_ONLY */
1662 #endif /* TARGET_SPARC64 */
1663
1664 #if !defined(CONFIG_USER_ONLY)
1665 #ifndef TARGET_SPARC64
1666 void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
1667 bool is_write, bool is_exec, int is_asi,
1668 unsigned size)
1669 {
1670 SPARCCPU *cpu = SPARC_CPU(cs);
1671 CPUSPARCState *env = &cpu->env;
1672 int fault_type;
1673
1674 #ifdef DEBUG_UNASSIGNED
1675 if (is_asi) {
1676 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
1677 " asi 0x%02x from " TARGET_FMT_lx "\n",
1678 is_exec ? "exec" : is_write ? "write" : "read", size,
1679 size == 1 ? "" : "s", addr, is_asi, env->pc);
1680 } else {
1681 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
1682 " from " TARGET_FMT_lx "\n",
1683 is_exec ? "exec" : is_write ? "write" : "read", size,
1684 size == 1 ? "" : "s", addr, env->pc);
1685 }
1686 #endif
1687 /* Don't overwrite translation and access faults */
1688 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
1689 if ((fault_type > 4) || (fault_type == 0)) {
1690 env->mmuregs[3] = 0; /* Fault status register */
1691 if (is_asi) {
1692 env->mmuregs[3] |= 1 << 16;
1693 }
1694 if (env->psrs) {
1695 env->mmuregs[3] |= 1 << 5;
1696 }
1697 if (is_exec) {
1698 env->mmuregs[3] |= 1 << 6;
1699 }
1700 if (is_write) {
1701 env->mmuregs[3] |= 1 << 7;
1702 }
1703 env->mmuregs[3] |= (5 << 2) | 2;
1704 /* SuperSPARC will never place instruction fault addresses in the FAR */
1705 if (!is_exec) {
1706 env->mmuregs[4] = addr; /* Fault address register */
1707 }
1708 }
1709 /* overflow (same type fault was not read before another fault) */
1710 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
1711 env->mmuregs[3] |= 1;
1712 }
1713
1714 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
1715 int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
1716 cpu_raise_exception_ra(env, tt, GETPC());
1717 }
1718
1719 /* flush neverland mappings created during no-fault mode,
1720 so the sequential MMU faults report proper fault types */
1721 if (env->mmuregs[0] & MMU_NF) {
1722 tlb_flush(cs);
1723 }
1724 }
1725 #else
1726 void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
1727 bool is_write, bool is_exec, int is_asi,
1728 unsigned size)
1729 {
1730 SPARCCPU *cpu = SPARC_CPU(cs);
1731 CPUSPARCState *env = &cpu->env;
1732
1733 #ifdef DEBUG_UNASSIGNED
1734 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
1735 "\n", addr, env->pc);
1736 #endif
1737
1738 if (is_exec) { /* XXX has_hypervisor */
1739 if (env->lsu & (IMMU_E)) {
1740 cpu_raise_exception_ra(env, TT_CODE_ACCESS, GETPC());
1741 } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
1742 cpu_raise_exception_ra(env, TT_INSN_REAL_TRANSLATION_MISS, GETPC());
1743 }
1744 } else {
1745 if (env->lsu & (DMMU_E)) {
1746 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1747 } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
1748 cpu_raise_exception_ra(env, TT_DATA_REAL_TRANSLATION_MISS, GETPC());
1749 }
1750 }
1751 }
1752 #endif
1753 #endif
1754
1755 #if !defined(CONFIG_USER_ONLY)
1756 void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1757 MMUAccessType access_type,
1758 int mmu_idx,
1759 uintptr_t retaddr)
1760 {
1761 SPARCCPU *cpu = SPARC_CPU(cs);
1762 CPUSPARCState *env = &cpu->env;
1763
1764 #ifdef DEBUG_UNALIGNED
1765 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
1766 "\n", addr, env->pc);
1767 #endif
1768 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
1769 }
1770
1771 /* try to fill the TLB and return an exception if error. If retaddr is
1772 NULL, it means that the function was called in C code (i.e. not
1773 from generated code or from helper.c) */
1774 /* XXX: fix it to restore all registers */
1775 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
1776 int mmu_idx, uintptr_t retaddr)
1777 {
1778 int ret;
1779
1780 ret = sparc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
1781 if (ret) {
1782 cpu_loop_exit_restore(cs, retaddr);
1783 }
1784 }
1785 #endif