]> git.proxmox.com Git - mirror_qemu.git/blob - target/mips/internal.h
target/mips: Move tlb_helper.c to tcg/sysemu/
[mirror_qemu.git] / target / mips / internal.h
1 /*
2 * MIPS internal definitions and helpers
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2 or later.
5 * See the COPYING file in the top-level directory.
6 */
7
8 #ifndef MIPS_INTERNAL_H
9 #define MIPS_INTERNAL_H
10
11 #include "exec/memattrs.h"
12 #ifdef CONFIG_TCG
13 #include "tcg/tcg-internal.h"
14 #endif
15
16 /*
17 * MMU types, the first four entries have the same layout as the
18 * CP0C0_MT field.
19 */
20 enum mips_mmu_types {
21 MMU_TYPE_NONE = 0,
22 MMU_TYPE_R4000 = 1, /* Standard TLB */
23 MMU_TYPE_BAT = 2, /* Block Address Translation */
24 MMU_TYPE_FMT = 3, /* Fixed Mapping */
25 MMU_TYPE_DVF = 4, /* Dual VTLB and FTLB */
26 MMU_TYPE_R3000,
27 MMU_TYPE_R6000,
28 MMU_TYPE_R8000
29 };
30
31 struct mips_def_t {
32 const char *name;
33 int32_t CP0_PRid;
34 int32_t CP0_Config0;
35 int32_t CP0_Config1;
36 int32_t CP0_Config2;
37 int32_t CP0_Config3;
38 int32_t CP0_Config4;
39 int32_t CP0_Config4_rw_bitmask;
40 int32_t CP0_Config5;
41 int32_t CP0_Config5_rw_bitmask;
42 int32_t CP0_Config6;
43 int32_t CP0_Config6_rw_bitmask;
44 int32_t CP0_Config7;
45 int32_t CP0_Config7_rw_bitmask;
46 target_ulong CP0_LLAddr_rw_bitmask;
47 int CP0_LLAddr_shift;
48 int32_t SYNCI_Step;
49 int32_t CCRes;
50 int32_t CP0_Status_rw_bitmask;
51 int32_t CP0_TCStatus_rw_bitmask;
52 int32_t CP0_SRSCtl;
53 int32_t CP1_fcr0;
54 int32_t CP1_fcr31_rw_bitmask;
55 int32_t CP1_fcr31;
56 int32_t MSAIR;
57 int32_t SEGBITS;
58 int32_t PABITS;
59 int32_t CP0_SRSConf0_rw_bitmask;
60 int32_t CP0_SRSConf0;
61 int32_t CP0_SRSConf1_rw_bitmask;
62 int32_t CP0_SRSConf1;
63 int32_t CP0_SRSConf2_rw_bitmask;
64 int32_t CP0_SRSConf2;
65 int32_t CP0_SRSConf3_rw_bitmask;
66 int32_t CP0_SRSConf3;
67 int32_t CP0_SRSConf4_rw_bitmask;
68 int32_t CP0_SRSConf4;
69 int32_t CP0_PageGrain_rw_bitmask;
70 int32_t CP0_PageGrain;
71 target_ulong CP0_EBaseWG_rw_bitmask;
72 uint64_t insn_flags;
73 enum mips_mmu_types mmu_type;
74 int32_t SAARP;
75 };
76
77 extern const char regnames[32][4];
78 extern const char fregnames[32][4];
79
80 extern const struct mips_def_t mips_defs[];
81 extern const int mips_defs_number;
82
83 bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
84 int mips_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
85 int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
86 void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
87 MMUAccessType access_type,
88 int mmu_idx, uintptr_t retaddr);
89
90 #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL)
91 #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL)
92 #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL)
93 #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL)
94 #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL)
95
96 #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL)
97 #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL)
98
99 #if !defined(CONFIG_USER_ONLY)
100
101 enum {
102 TLBRET_XI = -6,
103 TLBRET_RI = -5,
104 TLBRET_DIRTY = -4,
105 TLBRET_INVALID = -3,
106 TLBRET_NOMATCH = -2,
107 TLBRET_BADADDR = -1,
108 TLBRET_MATCH = 0
109 };
110
111 int get_physical_address(CPUMIPSState *env, hwaddr *physical,
112 int *prot, target_ulong real_address,
113 MMUAccessType access_type, int mmu_idx);
114 hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
115
116 typedef struct r4k_tlb_t r4k_tlb_t;
117 struct r4k_tlb_t {
118 target_ulong VPN;
119 uint32_t PageMask;
120 uint16_t ASID;
121 uint32_t MMID;
122 unsigned int G:1;
123 unsigned int C0:3;
124 unsigned int C1:3;
125 unsigned int V0:1;
126 unsigned int V1:1;
127 unsigned int D0:1;
128 unsigned int D1:1;
129 unsigned int XI0:1;
130 unsigned int XI1:1;
131 unsigned int RI0:1;
132 unsigned int RI1:1;
133 unsigned int EHINV:1;
134 uint64_t PFN[2];
135 };
136
137 struct CPUMIPSTLBContext {
138 uint32_t nb_tlb;
139 uint32_t tlb_in_use;
140 int (*map_address)(struct CPUMIPSState *env, hwaddr *physical, int *prot,
141 target_ulong address, MMUAccessType access_type);
142 void (*helper_tlbwi)(struct CPUMIPSState *env);
143 void (*helper_tlbwr)(struct CPUMIPSState *env);
144 void (*helper_tlbp)(struct CPUMIPSState *env);
145 void (*helper_tlbr)(struct CPUMIPSState *env);
146 void (*helper_tlbinv)(struct CPUMIPSState *env);
147 void (*helper_tlbinvf)(struct CPUMIPSState *env);
148 union {
149 struct {
150 r4k_tlb_t tlb[MIPS_TLB_MAX];
151 } r4k;
152 } mmu;
153 };
154
155 int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
156 target_ulong address, MMUAccessType access_type);
157 int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
158 target_ulong address, MMUAccessType access_type);
159 int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
160 target_ulong address, MMUAccessType access_type);
161 void r4k_helper_tlbwi(CPUMIPSState *env);
162 void r4k_helper_tlbwr(CPUMIPSState *env);
163 void r4k_helper_tlbp(CPUMIPSState *env);
164 void r4k_helper_tlbr(CPUMIPSState *env);
165 void r4k_helper_tlbinv(CPUMIPSState *env);
166 void r4k_helper_tlbinvf(CPUMIPSState *env);
167
168 void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
169 vaddr addr, unsigned size,
170 MMUAccessType access_type,
171 int mmu_idx, MemTxAttrs attrs,
172 MemTxResult response, uintptr_t retaddr);
173 extern const VMStateDescription vmstate_mips_cpu;
174
175 #endif /* !CONFIG_USER_ONLY */
176
177 #define cpu_signal_handler cpu_mips_signal_handler
178
179 static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env)
180 {
181 return (env->CP0_Status & (1 << CP0St_IE)) &&
182 !(env->CP0_Status & (1 << CP0St_EXL)) &&
183 !(env->CP0_Status & (1 << CP0St_ERL)) &&
184 !(env->hflags & MIPS_HFLAG_DM) &&
185 /*
186 * Note that the TCStatus IXMT field is initialized to zero,
187 * and only MT capable cores can set it to one. So we don't
188 * need to check for MT capabilities here.
189 */
190 !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT));
191 }
192
193 /* Check if there is pending and not masked out interrupt */
194 static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env)
195 {
196 int32_t pending;
197 int32_t status;
198 bool r;
199
200 pending = env->CP0_Cause & CP0Ca_IP_mask;
201 status = env->CP0_Status & CP0Ca_IP_mask;
202
203 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
204 /*
205 * A MIPS configured with a vectorizing external interrupt controller
206 * will feed a vector into the Cause pending lines. The core treats
207 * the status lines as a vector level, not as individual masks.
208 */
209 r = pending > status;
210 } else {
211 /*
212 * A MIPS configured with compatibility or VInt (Vectored Interrupts)
213 * treats the pending lines as individual interrupt lines, the status
214 * lines are individual masks.
215 */
216 r = (pending & status) != 0;
217 }
218 return r;
219 }
220
221 void mips_tcg_init(void);
222
223 void msa_reset(CPUMIPSState *env);
224
225 /* cp0_timer.c */
226 uint32_t cpu_mips_get_count(CPUMIPSState *env);
227 void cpu_mips_store_count(CPUMIPSState *env, uint32_t value);
228 void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value);
229 void cpu_mips_start_count(CPUMIPSState *env);
230 void cpu_mips_stop_count(CPUMIPSState *env);
231
232 static inline void mips_env_set_pc(CPUMIPSState *env, target_ulong value)
233 {
234 env->active_tc.PC = value & ~(target_ulong)1;
235 if (value & 1) {
236 env->hflags |= MIPS_HFLAG_M16;
237 } else {
238 env->hflags &= ~(MIPS_HFLAG_M16);
239 }
240 }
241
242 static inline void restore_pamask(CPUMIPSState *env)
243 {
244 if (env->hflags & MIPS_HFLAG_ELPA) {
245 env->PAMask = (1ULL << env->PABITS) - 1;
246 } else {
247 env->PAMask = PAMASK_BASE;
248 }
249 }
250
251 static inline int mips_vpe_active(CPUMIPSState *env)
252 {
253 int active = 1;
254
255 /* Check that the VPE is enabled. */
256 if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) {
257 active = 0;
258 }
259 /* Check that the VPE is activated. */
260 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) {
261 active = 0;
262 }
263
264 /*
265 * Now verify that there are active thread contexts in the VPE.
266 *
267 * This assumes the CPU model will internally reschedule threads
268 * if the active one goes to sleep. If there are no threads available
269 * the active one will be in a sleeping state, and we can turn off
270 * the entire VPE.
271 */
272 if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) {
273 /* TC is not activated. */
274 active = 0;
275 }
276 if (env->active_tc.CP0_TCHalt & 1) {
277 /* TC is in halt state. */
278 active = 0;
279 }
280
281 return active;
282 }
283
284 static inline int mips_vp_active(CPUMIPSState *env)
285 {
286 CPUState *other_cs = first_cpu;
287
288 /* Check if the VP disabled other VPs (which means the VP is enabled) */
289 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
290 return 1;
291 }
292
293 /* Check if the virtual processor is disabled due to a DVP */
294 CPU_FOREACH(other_cs) {
295 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
296 if ((&other_cpu->env != env) &&
297 ((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
298 return 0;
299 }
300 }
301 return 1;
302 }
303
304 static inline void compute_hflags(CPUMIPSState *env)
305 {
306 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
307 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
308 MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 |
309 MIPS_HFLAG_DSP_R3 | MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA |
310 MIPS_HFLAG_FRE | MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL);
311 if (env->CP0_Status & (1 << CP0St_ERL)) {
312 env->hflags |= MIPS_HFLAG_ERL;
313 }
314 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
315 !(env->CP0_Status & (1 << CP0St_ERL)) &&
316 !(env->hflags & MIPS_HFLAG_DM)) {
317 env->hflags |= (env->CP0_Status >> CP0St_KSU) &
318 MIPS_HFLAG_KSU;
319 }
320 #if defined(TARGET_MIPS64)
321 if ((env->insn_flags & ISA_MIPS3) &&
322 (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
323 (env->CP0_Status & (1 << CP0St_PX)) ||
324 (env->CP0_Status & (1 << CP0St_UX)))) {
325 env->hflags |= MIPS_HFLAG_64;
326 }
327
328 if (!(env->insn_flags & ISA_MIPS3)) {
329 env->hflags |= MIPS_HFLAG_AWRAP;
330 } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
331 !(env->CP0_Status & (1 << CP0St_UX))) {
332 env->hflags |= MIPS_HFLAG_AWRAP;
333 } else if (env->insn_flags & ISA_MIPS_R6) {
334 /* Address wrapping for Supervisor and Kernel is specified in R6 */
335 if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) &&
336 !(env->CP0_Status & (1 << CP0St_SX))) ||
337 (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) &&
338 !(env->CP0_Status & (1 << CP0St_KX)))) {
339 env->hflags |= MIPS_HFLAG_AWRAP;
340 }
341 }
342 #endif
343 if (((env->CP0_Status & (1 << CP0St_CU0)) &&
344 !(env->insn_flags & ISA_MIPS_R6)) ||
345 !(env->hflags & MIPS_HFLAG_KSU)) {
346 env->hflags |= MIPS_HFLAG_CP0;
347 }
348 if (env->CP0_Status & (1 << CP0St_CU1)) {
349 env->hflags |= MIPS_HFLAG_FPU;
350 }
351 if (env->CP0_Status & (1 << CP0St_FR)) {
352 env->hflags |= MIPS_HFLAG_F64;
353 }
354 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) &&
355 (env->CP0_Config5 & (1 << CP0C5_SBRI))) {
356 env->hflags |= MIPS_HFLAG_SBRI;
357 }
358 if (env->insn_flags & ASE_DSP_R3) {
359 /*
360 * Our cpu supports DSP R3 ASE, so enable
361 * access to DSP R3 resources.
362 */
363 if (env->CP0_Status & (1 << CP0St_MX)) {
364 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 |
365 MIPS_HFLAG_DSP_R3;
366 }
367 } else if (env->insn_flags & ASE_DSP_R2) {
368 /*
369 * Our cpu supports DSP R2 ASE, so enable
370 * access to DSP R2 resources.
371 */
372 if (env->CP0_Status & (1 << CP0St_MX)) {
373 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2;
374 }
375
376 } else if (env->insn_flags & ASE_DSP) {
377 /*
378 * Our cpu supports DSP ASE, so enable
379 * access to DSP resources.
380 */
381 if (env->CP0_Status & (1 << CP0St_MX)) {
382 env->hflags |= MIPS_HFLAG_DSP;
383 }
384
385 }
386 if (env->insn_flags & ISA_MIPS_R2) {
387 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
388 env->hflags |= MIPS_HFLAG_COP1X;
389 }
390 } else if (env->insn_flags & ISA_MIPS_R1) {
391 if (env->hflags & MIPS_HFLAG_64) {
392 env->hflags |= MIPS_HFLAG_COP1X;
393 }
394 } else if (env->insn_flags & ISA_MIPS4) {
395 /*
396 * All supported MIPS IV CPUs use the XX (CU3) to enable
397 * and disable the MIPS IV extensions to the MIPS III ISA.
398 * Some other MIPS IV CPUs ignore the bit, so the check here
399 * would be too restrictive for them.
400 */
401 if (env->CP0_Status & (1U << CP0St_CU3)) {
402 env->hflags |= MIPS_HFLAG_COP1X;
403 }
404 }
405 if (ase_msa_available(env)) {
406 if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) {
407 env->hflags |= MIPS_HFLAG_MSA;
408 }
409 }
410 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
411 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
412 env->hflags |= MIPS_HFLAG_FRE;
413 }
414 }
415 if (env->CP0_Config3 & (1 << CP0C3_LPA)) {
416 if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) {
417 env->hflags |= MIPS_HFLAG_ELPA;
418 }
419 }
420 }
421
422 void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc);
423 void cpu_mips_store_status(CPUMIPSState *env, target_ulong val);
424 void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val);
425
426 const char *mips_exception_name(int32_t exception);
427
428 void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception,
429 int error_code, uintptr_t pc);
430
431 static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
432 uint32_t exception,
433 uintptr_t pc)
434 {
435 do_raise_exception_err(env, exception, 0, pc);
436 }
437
438 #endif