]> git.proxmox.com Git - qemu.git/blob - target-alpha/helper.c
net: Refactor net_client_types
[qemu.git] / target-alpha / helper.c
1 /*
2 * Alpha emulation cpu helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23
24 #include "cpu.h"
25 #include "softfloat.h"
26
27 uint64_t cpu_alpha_load_fpcr (CPUState *env)
28 {
29 uint64_t r = 0;
30 uint8_t t;
31
32 t = env->fpcr_exc_status;
33 if (t) {
34 r = FPCR_SUM;
35 if (t & float_flag_invalid) {
36 r |= FPCR_INV;
37 }
38 if (t & float_flag_divbyzero) {
39 r |= FPCR_DZE;
40 }
41 if (t & float_flag_overflow) {
42 r |= FPCR_OVF;
43 }
44 if (t & float_flag_underflow) {
45 r |= FPCR_UNF;
46 }
47 if (t & float_flag_inexact) {
48 r |= FPCR_INE;
49 }
50 }
51
52 t = env->fpcr_exc_mask;
53 if (t & float_flag_invalid) {
54 r |= FPCR_INVD;
55 }
56 if (t & float_flag_divbyzero) {
57 r |= FPCR_DZED;
58 }
59 if (t & float_flag_overflow) {
60 r |= FPCR_OVFD;
61 }
62 if (t & float_flag_underflow) {
63 r |= FPCR_UNFD;
64 }
65 if (t & float_flag_inexact) {
66 r |= FPCR_INED;
67 }
68
69 switch (env->fpcr_dyn_round) {
70 case float_round_nearest_even:
71 r |= FPCR_DYN_NORMAL;
72 break;
73 case float_round_down:
74 r |= FPCR_DYN_MINUS;
75 break;
76 case float_round_up:
77 r |= FPCR_DYN_PLUS;
78 break;
79 case float_round_to_zero:
80 r |= FPCR_DYN_CHOPPED;
81 break;
82 }
83
84 if (env->fpcr_dnz) {
85 r |= FPCR_DNZ;
86 }
87 if (env->fpcr_dnod) {
88 r |= FPCR_DNOD;
89 }
90 if (env->fpcr_undz) {
91 r |= FPCR_UNDZ;
92 }
93
94 return r;
95 }
96
97 void cpu_alpha_store_fpcr (CPUState *env, uint64_t val)
98 {
99 uint8_t t;
100
101 t = 0;
102 if (val & FPCR_INV) {
103 t |= float_flag_invalid;
104 }
105 if (val & FPCR_DZE) {
106 t |= float_flag_divbyzero;
107 }
108 if (val & FPCR_OVF) {
109 t |= float_flag_overflow;
110 }
111 if (val & FPCR_UNF) {
112 t |= float_flag_underflow;
113 }
114 if (val & FPCR_INE) {
115 t |= float_flag_inexact;
116 }
117 env->fpcr_exc_status = t;
118
119 t = 0;
120 if (val & FPCR_INVD) {
121 t |= float_flag_invalid;
122 }
123 if (val & FPCR_DZED) {
124 t |= float_flag_divbyzero;
125 }
126 if (val & FPCR_OVFD) {
127 t |= float_flag_overflow;
128 }
129 if (val & FPCR_UNFD) {
130 t |= float_flag_underflow;
131 }
132 if (val & FPCR_INED) {
133 t |= float_flag_inexact;
134 }
135 env->fpcr_exc_mask = t;
136
137 switch (val & FPCR_DYN_MASK) {
138 case FPCR_DYN_CHOPPED:
139 t = float_round_to_zero;
140 break;
141 case FPCR_DYN_MINUS:
142 t = float_round_down;
143 break;
144 case FPCR_DYN_NORMAL:
145 t = float_round_nearest_even;
146 break;
147 case FPCR_DYN_PLUS:
148 t = float_round_up;
149 break;
150 }
151 env->fpcr_dyn_round = t;
152
153 env->fpcr_flush_to_zero
154 = (val & (FPCR_UNDZ|FPCR_UNFD)) == (FPCR_UNDZ|FPCR_UNFD);
155
156 env->fpcr_dnz = (val & FPCR_DNZ) != 0;
157 env->fpcr_dnod = (val & FPCR_DNOD) != 0;
158 env->fpcr_undz = (val & FPCR_UNDZ) != 0;
159 }
160
161 #if defined(CONFIG_USER_ONLY)
162 int cpu_alpha_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
163 int mmu_idx, int is_softmmu)
164 {
165 env->exception_index = EXCP_MMFAULT;
166 env->trap_arg0 = address;
167 return 1;
168 }
169 #else
170 void swap_shadow_regs(CPUState *env)
171 {
172 uint64_t i0, i1, i2, i3, i4, i5, i6, i7;
173
174 i0 = env->ir[8];
175 i1 = env->ir[9];
176 i2 = env->ir[10];
177 i3 = env->ir[11];
178 i4 = env->ir[12];
179 i5 = env->ir[13];
180 i6 = env->ir[14];
181 i7 = env->ir[25];
182
183 env->ir[8] = env->shadow[0];
184 env->ir[9] = env->shadow[1];
185 env->ir[10] = env->shadow[2];
186 env->ir[11] = env->shadow[3];
187 env->ir[12] = env->shadow[4];
188 env->ir[13] = env->shadow[5];
189 env->ir[14] = env->shadow[6];
190 env->ir[25] = env->shadow[7];
191
192 env->shadow[0] = i0;
193 env->shadow[1] = i1;
194 env->shadow[2] = i2;
195 env->shadow[3] = i3;
196 env->shadow[4] = i4;
197 env->shadow[5] = i5;
198 env->shadow[6] = i6;
199 env->shadow[7] = i7;
200 }
201
202 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
203 static int get_physical_address(CPUState *env, target_ulong addr,
204 int prot_need, int mmu_idx,
205 target_ulong *pphys, int *pprot)
206 {
207 target_long saddr = addr;
208 target_ulong phys = 0;
209 target_ulong L1pte, L2pte, L3pte;
210 target_ulong pt, index;
211 int prot = 0;
212 int ret = MM_K_ACV;
213
214 /* Ensure that the virtual address is properly sign-extended from
215 the last implemented virtual address bit. */
216 if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
217 goto exit;
218 }
219
220 /* Translate the superpage. */
221 /* ??? When we do more than emulate Unix PALcode, we'll need to
222 determine which KSEG is actually active. */
223 if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
224 /* User-space cannot access KSEG addresses. */
225 if (mmu_idx != MMU_KERNEL_IDX) {
226 goto exit;
227 }
228
229 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
230 We would not do this if the 48-bit KSEG is enabled. */
231 phys = saddr & ((1ull << 40) - 1);
232 phys |= (saddr & (1ull << 40)) << 3;
233
234 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
235 ret = -1;
236 goto exit;
237 }
238
239 /* Interpret the page table exactly like PALcode does. */
240
241 pt = env->ptbr;
242
243 /* L1 page table read. */
244 index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
245 L1pte = ldq_phys(pt + index*8);
246
247 if (unlikely((L1pte & PTE_VALID) == 0)) {
248 ret = MM_K_TNV;
249 goto exit;
250 }
251 if (unlikely((L1pte & PTE_KRE) == 0)) {
252 goto exit;
253 }
254 pt = L1pte >> 32 << TARGET_PAGE_BITS;
255
256 /* L2 page table read. */
257 index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
258 L2pte = ldq_phys(pt + index*8);
259
260 if (unlikely((L2pte & PTE_VALID) == 0)) {
261 ret = MM_K_TNV;
262 goto exit;
263 }
264 if (unlikely((L2pte & PTE_KRE) == 0)) {
265 goto exit;
266 }
267 pt = L2pte >> 32 << TARGET_PAGE_BITS;
268
269 /* L3 page table read. */
270 index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
271 L3pte = ldq_phys(pt + index*8);
272
273 phys = L3pte >> 32 << TARGET_PAGE_BITS;
274 if (unlikely((L3pte & PTE_VALID) == 0)) {
275 ret = MM_K_TNV;
276 goto exit;
277 }
278
279 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
280 # error page bits out of date
281 #endif
282
283 /* Check access violations. */
284 if (L3pte & (PTE_KRE << mmu_idx)) {
285 prot |= PAGE_READ | PAGE_EXEC;
286 }
287 if (L3pte & (PTE_KWE << mmu_idx)) {
288 prot |= PAGE_WRITE;
289 }
290 if (unlikely((prot & prot_need) == 0 && prot_need)) {
291 goto exit;
292 }
293
294 /* Check fault-on-operation violations. */
295 prot &= ~(L3pte >> 1);
296 ret = -1;
297 if (unlikely((prot & prot_need) == 0)) {
298 ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
299 prot_need & PAGE_WRITE ? MM_K_FOW :
300 prot_need & PAGE_READ ? MM_K_FOR : -1);
301 }
302
303 exit:
304 *pphys = phys;
305 *pprot = prot;
306 return ret;
307 }
308
309 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
310 {
311 target_ulong phys;
312 int prot, fail;
313
314 fail = get_physical_address(env, addr, 0, 0, &phys, &prot);
315 return (fail >= 0 ? -1 : phys);
316 }
317
318 int cpu_alpha_handle_mmu_fault(CPUState *env, target_ulong addr, int rw,
319 int mmu_idx, int is_softmmu)
320 {
321 target_ulong phys;
322 int prot, fail;
323
324 fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
325 if (unlikely(fail >= 0)) {
326 env->exception_index = EXCP_MMFAULT;
327 env->trap_arg0 = addr;
328 env->trap_arg1 = fail;
329 env->trap_arg2 = (rw == 2 ? -1 : rw);
330 return 1;
331 }
332
333 tlb_set_page(env, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
334 prot, mmu_idx, TARGET_PAGE_SIZE);
335 return 0;
336 }
337 #endif /* USER_ONLY */
338
339 void do_interrupt (CPUState *env)
340 {
341 int i = env->exception_index;
342
343 if (qemu_loglevel_mask(CPU_LOG_INT)) {
344 static int count;
345 const char *name = "<unknown>";
346
347 switch (i) {
348 case EXCP_RESET:
349 name = "reset";
350 break;
351 case EXCP_MCHK:
352 name = "mchk";
353 break;
354 case EXCP_SMP_INTERRUPT:
355 name = "smp_interrupt";
356 break;
357 case EXCP_CLK_INTERRUPT:
358 name = "clk_interrupt";
359 break;
360 case EXCP_DEV_INTERRUPT:
361 name = "dev_interrupt";
362 break;
363 case EXCP_MMFAULT:
364 name = "mmfault";
365 break;
366 case EXCP_UNALIGN:
367 name = "unalign";
368 break;
369 case EXCP_OPCDEC:
370 name = "opcdec";
371 break;
372 case EXCP_ARITH:
373 name = "arith";
374 break;
375 case EXCP_FEN:
376 name = "fen";
377 break;
378 case EXCP_CALL_PAL:
379 name = "call_pal";
380 break;
381 case EXCP_STL_C:
382 name = "stl_c";
383 break;
384 case EXCP_STQ_C:
385 name = "stq_c";
386 break;
387 }
388 qemu_log("INT %6d: %s(%#x) pc=%016" PRIx64 " sp=%016" PRIx64 "\n",
389 ++count, name, env->error_code, env->pc, env->ir[IR_SP]);
390 }
391
392 env->exception_index = -1;
393
394 #if !defined(CONFIG_USER_ONLY)
395 switch (i) {
396 case EXCP_RESET:
397 i = 0x0000;
398 break;
399 case EXCP_MCHK:
400 i = 0x0080;
401 break;
402 case EXCP_SMP_INTERRUPT:
403 i = 0x0100;
404 break;
405 case EXCP_CLK_INTERRUPT:
406 i = 0x0180;
407 break;
408 case EXCP_DEV_INTERRUPT:
409 i = 0x0200;
410 break;
411 case EXCP_MMFAULT:
412 i = 0x0280;
413 break;
414 case EXCP_UNALIGN:
415 i = 0x0300;
416 break;
417 case EXCP_OPCDEC:
418 i = 0x0380;
419 break;
420 case EXCP_ARITH:
421 i = 0x0400;
422 break;
423 case EXCP_FEN:
424 i = 0x0480;
425 break;
426 case EXCP_CALL_PAL:
427 i = env->error_code;
428 /* There are 64 entry points for both privileged and unprivileged,
429 with bit 0x80 indicating unprivileged. Each entry point gets
430 64 bytes to do its job. */
431 if (i & 0x80) {
432 i = 0x2000 + (i - 0x80) * 64;
433 } else {
434 i = 0x1000 + i * 64;
435 }
436 break;
437 default:
438 cpu_abort(env, "Unhandled CPU exception");
439 }
440
441 /* Remember where the exception happened. Emulate real hardware in
442 that the low bit of the PC indicates PALmode. */
443 env->exc_addr = env->pc | env->pal_mode;
444
445 /* Continue execution at the PALcode entry point. */
446 env->pc = env->palbr + i;
447
448 /* Switch to PALmode. */
449 if (!env->pal_mode) {
450 env->pal_mode = 1;
451 swap_shadow_regs(env);
452 }
453 #endif /* !USER_ONLY */
454 }
455
456 void cpu_dump_state (CPUState *env, FILE *f, fprintf_function cpu_fprintf,
457 int flags)
458 {
459 static const char *linux_reg_names[] = {
460 "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
461 "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
462 "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
463 "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
464 };
465 int i;
466
467 cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n",
468 env->pc, env->ps);
469 for (i = 0; i < 31; i++) {
470 cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i,
471 linux_reg_names[i], env->ir[i]);
472 if ((i % 3) == 2)
473 cpu_fprintf(f, "\n");
474 }
475
476 cpu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
477 env->lock_addr, env->lock_value);
478
479 for (i = 0; i < 31; i++) {
480 cpu_fprintf(f, "FIR%02d " TARGET_FMT_lx " ", i,
481 *((uint64_t *)(&env->fir[i])));
482 if ((i % 3) == 2)
483 cpu_fprintf(f, "\n");
484 }
485 cpu_fprintf(f, "\n");
486 }