]> git.proxmox.com Git - mirror_qemu.git/blob - target/loongarch/cpu.c
hw/cpu: Call object_class_is_abstract() once in cpu_class_by_name()
[mirror_qemu.git] / target / loongarch / cpu.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch CPU
4 *
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include "qemu/log.h"
10 #include "qemu/qemu-print.h"
11 #include "qapi/error.h"
12 #include "qemu/module.h"
13 #include "sysemu/qtest.h"
14 #include "exec/cpu_ldst.h"
15 #include "exec/exec-all.h"
16 #include "cpu.h"
17 #include "internals.h"
18 #include "fpu/softfloat-helpers.h"
19 #include "cpu-csr.h"
20 #include "sysemu/reset.h"
21 #include "tcg/tcg.h"
22 #include "vec.h"
23
24 const char * const regnames[32] = {
25 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
26 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
27 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
28 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
29 };
30
31 const char * const fregnames[32] = {
32 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
33 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
34 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
35 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
36 };
37
38 static const char * const excp_names[] = {
39 [EXCCODE_INT] = "Interrupt",
40 [EXCCODE_PIL] = "Page invalid exception for load",
41 [EXCCODE_PIS] = "Page invalid exception for store",
42 [EXCCODE_PIF] = "Page invalid exception for fetch",
43 [EXCCODE_PME] = "Page modified exception",
44 [EXCCODE_PNR] = "Page Not Readable exception",
45 [EXCCODE_PNX] = "Page Not Executable exception",
46 [EXCCODE_PPI] = "Page Privilege error",
47 [EXCCODE_ADEF] = "Address error for instruction fetch",
48 [EXCCODE_ADEM] = "Address error for Memory access",
49 [EXCCODE_SYS] = "Syscall",
50 [EXCCODE_BRK] = "Break",
51 [EXCCODE_INE] = "Instruction Non-Existent",
52 [EXCCODE_IPE] = "Instruction privilege error",
53 [EXCCODE_FPD] = "Floating Point Disabled",
54 [EXCCODE_FPE] = "Floating Point Exception",
55 [EXCCODE_DBP] = "Debug breakpoint",
56 [EXCCODE_BCE] = "Bound Check Exception",
57 [EXCCODE_SXD] = "128 bit vector instructions Disable exception",
58 [EXCCODE_ASXD] = "256 bit vector instructions Disable exception",
59 };
60
61 const char *loongarch_exception_name(int32_t exception)
62 {
63 assert(excp_names[exception]);
64 return excp_names[exception];
65 }
66
67 void G_NORETURN do_raise_exception(CPULoongArchState *env,
68 uint32_t exception,
69 uintptr_t pc)
70 {
71 CPUState *cs = env_cpu(env);
72
73 qemu_log_mask(CPU_LOG_INT, "%s: %d (%s)\n",
74 __func__,
75 exception,
76 loongarch_exception_name(exception));
77 cs->exception_index = exception;
78
79 cpu_loop_exit_restore(cs, pc);
80 }
81
82 static void loongarch_cpu_set_pc(CPUState *cs, vaddr value)
83 {
84 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
85 CPULoongArchState *env = &cpu->env;
86
87 set_pc(env, value);
88 }
89
90 static vaddr loongarch_cpu_get_pc(CPUState *cs)
91 {
92 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
93 CPULoongArchState *env = &cpu->env;
94
95 return env->pc;
96 }
97
98 #ifndef CONFIG_USER_ONLY
99 #include "hw/loongarch/virt.h"
100
101 void loongarch_cpu_set_irq(void *opaque, int irq, int level)
102 {
103 LoongArchCPU *cpu = opaque;
104 CPULoongArchState *env = &cpu->env;
105 CPUState *cs = CPU(cpu);
106
107 if (irq < 0 || irq >= N_IRQS) {
108 return;
109 }
110
111 env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0);
112
113 if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) {
114 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
115 } else {
116 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
117 }
118 }
119
120 static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env)
121 {
122 bool ret = 0;
123
124 ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) &&
125 !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)));
126
127 return ret;
128 }
129
130 /* Check if there is pending and not masked out interrupt */
131 static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env)
132 {
133 uint32_t pending;
134 uint32_t status;
135
136 pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS);
137 status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE);
138
139 return (pending & status) != 0;
140 }
141
142 static void loongarch_cpu_do_interrupt(CPUState *cs)
143 {
144 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
145 CPULoongArchState *env = &cpu->env;
146 bool update_badinstr = 1;
147 int cause = -1;
148 const char *name;
149 bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR);
150 uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS);
151
152 if (cs->exception_index != EXCCODE_INT) {
153 if (cs->exception_index < 0 ||
154 cs->exception_index >= ARRAY_SIZE(excp_names)) {
155 name = "unknown";
156 } else {
157 name = excp_names[cs->exception_index];
158 }
159
160 qemu_log_mask(CPU_LOG_INT,
161 "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx
162 " TLBRERA " TARGET_FMT_lx " %s exception\n", __func__,
163 env->pc, env->CSR_ERA, env->CSR_TLBRERA, name);
164 }
165
166 switch (cs->exception_index) {
167 case EXCCODE_DBP:
168 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1);
169 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC);
170 goto set_DERA;
171 set_DERA:
172 env->CSR_DERA = env->pc;
173 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1);
174 set_pc(env, env->CSR_EENTRY + 0x480);
175 break;
176 case EXCCODE_INT:
177 if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
178 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1);
179 goto set_DERA;
180 }
181 QEMU_FALLTHROUGH;
182 case EXCCODE_PIF:
183 case EXCCODE_ADEF:
184 cause = cs->exception_index;
185 update_badinstr = 0;
186 break;
187 case EXCCODE_SYS:
188 case EXCCODE_BRK:
189 case EXCCODE_INE:
190 case EXCCODE_IPE:
191 case EXCCODE_FPD:
192 case EXCCODE_FPE:
193 case EXCCODE_SXD:
194 case EXCCODE_ASXD:
195 env->CSR_BADV = env->pc;
196 QEMU_FALLTHROUGH;
197 case EXCCODE_BCE:
198 case EXCCODE_ADEM:
199 case EXCCODE_PIL:
200 case EXCCODE_PIS:
201 case EXCCODE_PME:
202 case EXCCODE_PNR:
203 case EXCCODE_PNX:
204 case EXCCODE_PPI:
205 cause = cs->exception_index;
206 break;
207 default:
208 qemu_log("Error: exception(%d) has not been supported\n",
209 cs->exception_index);
210 abort();
211 }
212
213 if (update_badinstr) {
214 env->CSR_BADI = cpu_ldl_code(env, env->pc);
215 }
216
217 /* Save PLV and IE */
218 if (tlbfill) {
219 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV,
220 FIELD_EX64(env->CSR_CRMD,
221 CSR_CRMD, PLV));
222 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE,
223 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
224 /* set the DA mode */
225 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1);
226 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0);
227 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA,
228 PC, (env->pc >> 2));
229 } else {
230 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE,
231 EXCODE_MCODE(cause));
232 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE,
233 EXCODE_SUBCODE(cause));
234 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV,
235 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV));
236 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE,
237 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
238 env->CSR_ERA = env->pc;
239 }
240
241 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0);
242 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0);
243
244 if (vec_size) {
245 vec_size = (1 << vec_size) * 4;
246 }
247
248 if (cs->exception_index == EXCCODE_INT) {
249 /* Interrupt */
250 uint32_t vector = 0;
251 uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS);
252 pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE);
253
254 /* Find the highest-priority interrupt. */
255 vector = 31 - clz32(pending);
256 set_pc(env, env->CSR_EENTRY + \
257 (EXCCODE_EXTERNAL_INT + vector) * vec_size);
258 qemu_log_mask(CPU_LOG_INT,
259 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
260 " cause %d\n" " A " TARGET_FMT_lx " D "
261 TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS"
262 TARGET_FMT_lx "\n",
263 __func__, env->pc, env->CSR_ERA,
264 cause, env->CSR_BADV, env->CSR_DERA, vector,
265 env->CSR_ECFG, env->CSR_ESTAT);
266 } else {
267 if (tlbfill) {
268 set_pc(env, env->CSR_TLBRENTRY);
269 } else {
270 set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size);
271 }
272 qemu_log_mask(CPU_LOG_INT,
273 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
274 " cause %d%s\n, ESTAT " TARGET_FMT_lx
275 " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx
276 "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu
277 " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc,
278 tlbfill ? env->CSR_TLBRERA : env->CSR_ERA,
279 cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT,
280 env->CSR_ECFG,
281 tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV,
282 env->CSR_BADI, env->gpr[11], cs->cpu_index,
283 env->CSR_ASID);
284 }
285 cs->exception_index = -1;
286 }
287
288 static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
289 vaddr addr, unsigned size,
290 MMUAccessType access_type,
291 int mmu_idx, MemTxAttrs attrs,
292 MemTxResult response,
293 uintptr_t retaddr)
294 {
295 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
296 CPULoongArchState *env = &cpu->env;
297
298 if (access_type == MMU_INST_FETCH) {
299 do_raise_exception(env, EXCCODE_ADEF, retaddr);
300 } else {
301 do_raise_exception(env, EXCCODE_ADEM, retaddr);
302 }
303 }
304
305 static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
306 {
307 if (interrupt_request & CPU_INTERRUPT_HARD) {
308 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
309 CPULoongArchState *env = &cpu->env;
310
311 if (cpu_loongarch_hw_interrupts_enabled(env) &&
312 cpu_loongarch_hw_interrupts_pending(env)) {
313 /* Raise it */
314 cs->exception_index = EXCCODE_INT;
315 loongarch_cpu_do_interrupt(cs);
316 return true;
317 }
318 }
319 return false;
320 }
321 #endif
322
323 #ifdef CONFIG_TCG
324 static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
325 const TranslationBlock *tb)
326 {
327 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
328 CPULoongArchState *env = &cpu->env;
329
330 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
331 set_pc(env, tb->pc);
332 }
333
334 static void loongarch_restore_state_to_opc(CPUState *cs,
335 const TranslationBlock *tb,
336 const uint64_t *data)
337 {
338 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
339 CPULoongArchState *env = &cpu->env;
340
341 set_pc(env, data[0]);
342 }
343 #endif /* CONFIG_TCG */
344
345 static bool loongarch_cpu_has_work(CPUState *cs)
346 {
347 #ifdef CONFIG_USER_ONLY
348 return true;
349 #else
350 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
351 CPULoongArchState *env = &cpu->env;
352 bool has_work = false;
353
354 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
355 cpu_loongarch_hw_interrupts_pending(env)) {
356 has_work = true;
357 }
358
359 return has_work;
360 #endif
361 }
362
363 static void loongarch_la464_initfn(Object *obj)
364 {
365 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
366 CPULoongArchState *env = &cpu->env;
367 int i;
368
369 for (i = 0; i < 21; i++) {
370 env->cpucfg[i] = 0x0;
371 }
372
373 cpu->dtb_compatible = "loongarch,Loongson-3A5000";
374 env->cpucfg[0] = 0x14c010; /* PRID */
375
376 uint32_t data = 0;
377 data = FIELD_DP32(data, CPUCFG1, ARCH, 2);
378 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1);
379 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1);
380 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f);
381 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f);
382 data = FIELD_DP32(data, CPUCFG1, UAL, 1);
383 data = FIELD_DP32(data, CPUCFG1, RI, 1);
384 data = FIELD_DP32(data, CPUCFG1, EP, 1);
385 data = FIELD_DP32(data, CPUCFG1, RPLV, 1);
386 data = FIELD_DP32(data, CPUCFG1, HP, 1);
387 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1);
388 env->cpucfg[1] = data;
389
390 data = 0;
391 data = FIELD_DP32(data, CPUCFG2, FP, 1);
392 data = FIELD_DP32(data, CPUCFG2, FP_SP, 1);
393 data = FIELD_DP32(data, CPUCFG2, FP_DP, 1);
394 data = FIELD_DP32(data, CPUCFG2, FP_VER, 1);
395 data = FIELD_DP32(data, CPUCFG2, LSX, 1),
396 data = FIELD_DP32(data, CPUCFG2, LASX, 1),
397 data = FIELD_DP32(data, CPUCFG2, LLFTP, 1);
398 data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1);
399 data = FIELD_DP32(data, CPUCFG2, LSPW, 1);
400 data = FIELD_DP32(data, CPUCFG2, LAM, 1);
401 env->cpucfg[2] = data;
402
403 env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */
404
405 data = 0;
406 data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1);
407 data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1);
408 env->cpucfg[5] = data;
409
410 data = 0;
411 data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1);
412 data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1);
413 data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1);
414 data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1);
415 data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1);
416 data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1);
417 data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1);
418 data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1);
419 env->cpucfg[16] = data;
420
421 data = 0;
422 data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3);
423 data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8);
424 data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6);
425 env->cpucfg[17] = data;
426
427 data = 0;
428 data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3);
429 data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8);
430 data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6);
431 env->cpucfg[18] = data;
432
433 data = 0;
434 data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15);
435 data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8);
436 data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6);
437 env->cpucfg[19] = data;
438
439 data = 0;
440 data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15);
441 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14);
442 data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6);
443 env->cpucfg[20] = data;
444
445 env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa);
446 loongarch_cpu_post_init(obj);
447 }
448
449 static void loongarch_la132_initfn(Object *obj)
450 {
451 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
452 CPULoongArchState *env = &cpu->env;
453
454 int i;
455
456 for (i = 0; i < 21; i++) {
457 env->cpucfg[i] = 0x0;
458 }
459
460 cpu->dtb_compatible = "loongarch,Loongson-1C103";
461 env->cpucfg[0] = 0x148042; /* PRID */
462
463 uint32_t data = 0;
464 data = FIELD_DP32(data, CPUCFG1, ARCH, 1); /* LA32 */
465 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1);
466 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1);
467 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x1f); /* 32 bits */
468 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x1f); /* 32 bits */
469 data = FIELD_DP32(data, CPUCFG1, UAL, 1);
470 data = FIELD_DP32(data, CPUCFG1, RI, 0);
471 data = FIELD_DP32(data, CPUCFG1, EP, 0);
472 data = FIELD_DP32(data, CPUCFG1, RPLV, 0);
473 data = FIELD_DP32(data, CPUCFG1, HP, 1);
474 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1);
475 env->cpucfg[1] = data;
476 }
477
478 static void loongarch_max_initfn(Object *obj)
479 {
480 /* '-cpu max' for TCG: we use cpu la464. */
481 loongarch_la464_initfn(obj);
482 }
483
484 static void loongarch_cpu_list_entry(gpointer data, gpointer user_data)
485 {
486 const char *typename = object_class_get_name(OBJECT_CLASS(data));
487
488 qemu_printf("%s\n", typename);
489 }
490
491 void loongarch_cpu_list(void)
492 {
493 GSList *list;
494 list = object_class_get_list_sorted(TYPE_LOONGARCH_CPU, false);
495 g_slist_foreach(list, loongarch_cpu_list_entry, NULL);
496 g_slist_free(list);
497 }
498
499 static void loongarch_cpu_reset_hold(Object *obj)
500 {
501 CPUState *cs = CPU(obj);
502 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
503 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(cpu);
504 CPULoongArchState *env = &cpu->env;
505
506 if (lacc->parent_phases.hold) {
507 lacc->parent_phases.hold(obj);
508 }
509
510 env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3;
511 env->fcsr0 = 0x0;
512
513 int n;
514 /* Set csr registers value after reset */
515 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0);
516 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0);
517 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1);
518 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0);
519 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 1);
520 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 1);
521
522 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0);
523 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0);
524 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0);
525 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0);
526
527 env->CSR_MISC = 0;
528
529 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0);
530 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0);
531
532 env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2));
533 env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0);
534 env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0);
535 env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0);
536 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0);
537 env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0);
538
539 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2);
540 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63);
541 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7);
542 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8);
543
544 for (n = 0; n < 4; n++) {
545 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0);
546 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0);
547 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0);
548 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0);
549 }
550
551 #ifndef CONFIG_USER_ONLY
552 env->pc = 0x1c000000;
553 memset(env->tlb, 0, sizeof(env->tlb));
554 #endif
555
556 restore_fp_status(env);
557 cs->exception_index = -1;
558 }
559
560 static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info)
561 {
562 info->print_insn = print_insn_loongarch;
563 }
564
565 static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp)
566 {
567 CPUState *cs = CPU(dev);
568 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev);
569 Error *local_err = NULL;
570
571 cpu_exec_realizefn(cs, &local_err);
572 if (local_err != NULL) {
573 error_propagate(errp, local_err);
574 return;
575 }
576
577 loongarch_cpu_register_gdb_regs_for_features(cs);
578
579 cpu_reset(cs);
580 qemu_init_vcpu(cs);
581
582 lacc->parent_realize(dev, errp);
583 }
584
585 #ifndef CONFIG_USER_ONLY
586 static void loongarch_qemu_write(void *opaque, hwaddr addr,
587 uint64_t val, unsigned size)
588 {
589 qemu_log_mask(LOG_UNIMP, "[%s]: Unimplemented reg 0x%" HWADDR_PRIx "\n",
590 __func__, addr);
591 }
592
593 static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size)
594 {
595 switch (addr) {
596 case VERSION_REG:
597 return 0x11ULL;
598 case FEATURE_REG:
599 return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI |
600 1ULL << IOCSRF_CSRIPI;
601 case VENDOR_REG:
602 return 0x6e6f73676e6f6f4cULL; /* "Loongson" */
603 case CPUNAME_REG:
604 return 0x303030354133ULL; /* "3A5000" */
605 case MISC_FUNC_REG:
606 return 1ULL << IOCSRM_EXTIOI_EN;
607 }
608 return 0ULL;
609 }
610
611 static const MemoryRegionOps loongarch_qemu_ops = {
612 .read = loongarch_qemu_read,
613 .write = loongarch_qemu_write,
614 .endianness = DEVICE_LITTLE_ENDIAN,
615 .valid = {
616 .min_access_size = 4,
617 .max_access_size = 8,
618 },
619 .impl = {
620 .min_access_size = 8,
621 .max_access_size = 8,
622 },
623 };
624 #endif
625
626 static bool loongarch_get_lsx(Object *obj, Error **errp)
627 {
628 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
629 bool ret;
630
631 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
632 ret = true;
633 } else {
634 ret = false;
635 }
636 return ret;
637 }
638
639 static void loongarch_set_lsx(Object *obj, bool value, Error **errp)
640 {
641 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
642
643 if (value) {
644 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1);
645 } else {
646 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 0);
647 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0);
648 }
649 }
650
651 static bool loongarch_get_lasx(Object *obj, Error **errp)
652 {
653 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
654 bool ret;
655
656 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) {
657 ret = true;
658 } else {
659 ret = false;
660 }
661 return ret;
662 }
663
664 static void loongarch_set_lasx(Object *obj, bool value, Error **errp)
665 {
666 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
667
668 if (value) {
669 if (!FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
670 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1);
671 }
672 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 1);
673 } else {
674 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0);
675 }
676 }
677
678 void loongarch_cpu_post_init(Object *obj)
679 {
680 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
681
682 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
683 object_property_add_bool(obj, "lsx", loongarch_get_lsx,
684 loongarch_set_lsx);
685 }
686 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) {
687 object_property_add_bool(obj, "lasx", loongarch_get_lasx,
688 loongarch_set_lasx);
689 }
690 }
691
692 static void loongarch_cpu_init(Object *obj)
693 {
694 #ifndef CONFIG_USER_ONLY
695 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
696 CPULoongArchState *env = &cpu->env;
697
698 qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS);
699 timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL,
700 &loongarch_constant_timer_cb, cpu);
701 memory_region_init_io(&env->system_iocsr, OBJECT(cpu), NULL,
702 env, "iocsr", UINT64_MAX);
703 address_space_init(&env->address_space_iocsr, &env->system_iocsr, "IOCSR");
704 memory_region_init_io(&env->iocsr_mem, OBJECT(cpu), &loongarch_qemu_ops,
705 NULL, "iocsr_misc", 0x428);
706 memory_region_add_subregion(&env->system_iocsr, 0, &env->iocsr_mem);
707 #endif
708 }
709
710 static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model)
711 {
712 ObjectClass *oc;
713
714 oc = object_class_by_name(cpu_model);
715 if (!oc) {
716 g_autofree char *typename
717 = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model);
718 oc = object_class_by_name(typename);
719 if (!oc) {
720 return NULL;
721 }
722 }
723
724 if (object_class_dynamic_cast(oc, TYPE_LOONGARCH_CPU)) {
725 return oc;
726 }
727 return NULL;
728 }
729
730 void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
731 {
732 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
733 CPULoongArchState *env = &cpu->env;
734 int i;
735
736 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
737 qemu_fprintf(f, " FCSR0 0x%08x fp_status 0x%02x\n", env->fcsr0,
738 get_float_exception_flags(&env->fp_status));
739
740 /* gpr */
741 for (i = 0; i < 32; i++) {
742 if ((i & 3) == 0) {
743 qemu_fprintf(f, " GPR%02d:", i);
744 }
745 qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]);
746 if ((i & 3) == 3) {
747 qemu_fprintf(f, "\n");
748 }
749 }
750
751 qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD);
752 qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD);
753 qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN);
754 qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT);
755 qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA);
756 qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV);
757 qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI);
758 qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY);
759 qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 ","
760 " PRCFG3=%016" PRIx64 "\n",
761 env->CSR_PRCFG1, env->CSR_PRCFG3, env->CSR_PRCFG3);
762 qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY);
763 qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV);
764 qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA);
765
766 /* fpr */
767 if (flags & CPU_DUMP_FPU) {
768 for (i = 0; i < 32; i++) {
769 qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i].vreg.D(0));
770 if ((i & 3) == 3) {
771 qemu_fprintf(f, "\n");
772 }
773 }
774 }
775 }
776
777 #ifdef CONFIG_TCG
778 #include "hw/core/tcg-cpu-ops.h"
779
780 static struct TCGCPUOps loongarch_tcg_ops = {
781 .initialize = loongarch_translate_init,
782 .synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
783 .restore_state_to_opc = loongarch_restore_state_to_opc,
784
785 #ifndef CONFIG_USER_ONLY
786 .tlb_fill = loongarch_cpu_tlb_fill,
787 .cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
788 .do_interrupt = loongarch_cpu_do_interrupt,
789 .do_transaction_failed = loongarch_cpu_do_transaction_failed,
790 #endif
791 };
792 #endif /* CONFIG_TCG */
793
794 #ifndef CONFIG_USER_ONLY
795 #include "hw/core/sysemu-cpu-ops.h"
796
797 static const struct SysemuCPUOps loongarch_sysemu_ops = {
798 .get_phys_page_debug = loongarch_cpu_get_phys_page_debug,
799 };
800
801 static int64_t loongarch_cpu_get_arch_id(CPUState *cs)
802 {
803 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
804
805 return cpu->phy_id;
806 }
807 #endif
808
809 static void loongarch_cpu_class_init(ObjectClass *c, void *data)
810 {
811 LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c);
812 CPUClass *cc = CPU_CLASS(c);
813 DeviceClass *dc = DEVICE_CLASS(c);
814 ResettableClass *rc = RESETTABLE_CLASS(c);
815
816 device_class_set_parent_realize(dc, loongarch_cpu_realizefn,
817 &lacc->parent_realize);
818 resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL,
819 &lacc->parent_phases);
820
821 cc->class_by_name = loongarch_cpu_class_by_name;
822 cc->has_work = loongarch_cpu_has_work;
823 cc->dump_state = loongarch_cpu_dump_state;
824 cc->set_pc = loongarch_cpu_set_pc;
825 cc->get_pc = loongarch_cpu_get_pc;
826 #ifndef CONFIG_USER_ONLY
827 cc->get_arch_id = loongarch_cpu_get_arch_id;
828 dc->vmsd = &vmstate_loongarch_cpu;
829 cc->sysemu_ops = &loongarch_sysemu_ops;
830 #endif
831 cc->disas_set_info = loongarch_cpu_disas_set_info;
832 cc->gdb_read_register = loongarch_cpu_gdb_read_register;
833 cc->gdb_write_register = loongarch_cpu_gdb_write_register;
834 cc->gdb_stop_before_watchpoint = true;
835
836 #ifdef CONFIG_TCG
837 cc->tcg_ops = &loongarch_tcg_ops;
838 #endif
839 }
840
841 static const gchar *loongarch32_gdb_arch_name(CPUState *cs)
842 {
843 return "loongarch32";
844 }
845
846 static void loongarch32_cpu_class_init(ObjectClass *c, void *data)
847 {
848 CPUClass *cc = CPU_CLASS(c);
849
850 cc->gdb_num_core_regs = 35;
851 cc->gdb_core_xml_file = "loongarch-base32.xml";
852 cc->gdb_arch_name = loongarch32_gdb_arch_name;
853 }
854
855 static const gchar *loongarch64_gdb_arch_name(CPUState *cs)
856 {
857 return "loongarch64";
858 }
859
860 static void loongarch64_cpu_class_init(ObjectClass *c, void *data)
861 {
862 CPUClass *cc = CPU_CLASS(c);
863
864 cc->gdb_num_core_regs = 35;
865 cc->gdb_core_xml_file = "loongarch-base64.xml";
866 cc->gdb_arch_name = loongarch64_gdb_arch_name;
867 }
868
869 #define DEFINE_LOONGARCH_CPU_TYPE(size, model, initfn) \
870 { \
871 .parent = TYPE_LOONGARCH##size##_CPU, \
872 .instance_init = initfn, \
873 .name = LOONGARCH_CPU_TYPE_NAME(model), \
874 }
875
876 static const TypeInfo loongarch_cpu_type_infos[] = {
877 {
878 .name = TYPE_LOONGARCH_CPU,
879 .parent = TYPE_CPU,
880 .instance_size = sizeof(LoongArchCPU),
881 .instance_align = __alignof(LoongArchCPU),
882 .instance_init = loongarch_cpu_init,
883
884 .abstract = true,
885 .class_size = sizeof(LoongArchCPUClass),
886 .class_init = loongarch_cpu_class_init,
887 },
888 {
889 .name = TYPE_LOONGARCH32_CPU,
890 .parent = TYPE_LOONGARCH_CPU,
891
892 .abstract = true,
893 .class_init = loongarch32_cpu_class_init,
894 },
895 {
896 .name = TYPE_LOONGARCH64_CPU,
897 .parent = TYPE_LOONGARCH_CPU,
898
899 .abstract = true,
900 .class_init = loongarch64_cpu_class_init,
901 },
902 DEFINE_LOONGARCH_CPU_TYPE(64, "la464", loongarch_la464_initfn),
903 DEFINE_LOONGARCH_CPU_TYPE(32, "la132", loongarch_la132_initfn),
904 DEFINE_LOONGARCH_CPU_TYPE(64, "max", loongarch_max_initfn),
905 };
906
907 DEFINE_TYPES(loongarch_cpu_type_infos)