1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * QEMU LoongArch TLB helpers
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
9 #include "qemu/osdep.h"
12 #include "internals.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
29 static int loongarch_map_tlb_entry(CPULoongArchState
*env
, hwaddr
*physical
,
30 int *prot
, target_ulong address
,
31 int access_type
, int index
, int mmu_idx
)
33 LoongArchTLB
*tlb
= &env
->tlb
[index
];
34 uint64_t plv
= mmu_idx
;
35 uint64_t tlb_entry
, tlb_ppn
;
36 uint8_t tlb_ps
, n
, tlb_v
, tlb_d
, tlb_plv
, tlb_nx
, tlb_nr
, tlb_rplv
;
38 if (index
>= LOONGARCH_STLB
) {
39 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
41 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
43 n
= (address
>> tlb_ps
) & 0x1;/* Odd or even */
45 tlb_entry
= n
? tlb
->tlb_entry1
: tlb
->tlb_entry0
;
46 tlb_v
= FIELD_EX64(tlb_entry
, TLBENTRY
, V
);
47 tlb_d
= FIELD_EX64(tlb_entry
, TLBENTRY
, D
);
48 tlb_plv
= FIELD_EX64(tlb_entry
, TLBENTRY
, PLV
);
49 tlb_ppn
= FIELD_EX64(tlb_entry
, TLBENTRY
, PPN
);
50 tlb_nx
= FIELD_EX64(tlb_entry
, TLBENTRY
, NX
);
51 tlb_nr
= FIELD_EX64(tlb_entry
, TLBENTRY
, NR
);
52 tlb_rplv
= FIELD_EX64(tlb_entry
, TLBENTRY
, RPLV
);
54 /* Check access rights */
56 return TLBRET_INVALID
;
59 if (access_type
== MMU_INST_FETCH
&& tlb_nx
) {
63 if (access_type
== MMU_DATA_LOAD
&& tlb_nr
) {
67 if (((tlb_rplv
== 0) && (plv
> tlb_plv
)) ||
68 ((tlb_rplv
== 1) && (plv
!= tlb_plv
))) {
72 if ((access_type
== MMU_DATA_STORE
) && !tlb_d
) {
77 * tlb_entry contains ppn[47:12] while 16KiB ppn is [47:15]
80 *physical
= (tlb_ppn
<< R_TLBENTRY_PPN_SHIFT
) |
81 (address
& MAKE_64BIT_MASK(0, tlb_ps
));
93 * One tlb entry holds an adjacent odd/even pair, the vpn is the
94 * content of the virtual page number divided by 2. So the
95 * compare vpn is bit[47:15] for 16KiB page. while the vppn
96 * field in tlb entry contains bit[47:13], so need adjust.
97 * virt_vpn = vaddr[47:13]
99 static bool loongarch_tlb_search(CPULoongArchState
*env
, target_ulong vaddr
,
103 uint16_t csr_asid
, tlb_asid
, stlb_idx
;
104 uint8_t tlb_e
, tlb_ps
, tlb_g
, stlb_ps
;
105 int i
, compare_shift
;
106 uint64_t vpn
, tlb_vppn
;
108 csr_asid
= FIELD_EX64(env
->CSR_ASID
, CSR_ASID
, ASID
);
109 stlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
110 vpn
= (vaddr
& TARGET_VIRT_MASK
) >> (stlb_ps
+ 1);
111 stlb_idx
= vpn
& 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
112 compare_shift
= stlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
115 for (i
= 0; i
< 8; ++i
) {
116 tlb
= &env
->tlb
[i
* 256 + stlb_idx
];
117 tlb_e
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, E
);
119 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
120 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
121 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
123 if ((tlb_g
== 1 || tlb_asid
== csr_asid
) &&
124 (vpn
== (tlb_vppn
>> compare_shift
))) {
125 *index
= i
* 256 + stlb_idx
;
132 for (i
= LOONGARCH_STLB
; i
< LOONGARCH_TLB_MAX
; ++i
) {
134 tlb_e
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, E
);
136 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
137 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
138 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
139 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
140 compare_shift
= tlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
141 vpn
= (vaddr
& TARGET_VIRT_MASK
) >> (tlb_ps
+ 1);
142 if ((tlb_g
== 1 || tlb_asid
== csr_asid
) &&
143 (vpn
== (tlb_vppn
>> compare_shift
))) {
152 static int loongarch_map_address(CPULoongArchState
*env
, hwaddr
*physical
,
153 int *prot
, target_ulong address
,
154 MMUAccessType access_type
, int mmu_idx
)
158 match
= loongarch_tlb_search(env
, address
, &index
);
160 return loongarch_map_tlb_entry(env
, physical
, prot
,
161 address
, access_type
, index
, mmu_idx
);
164 return TLBRET_NOMATCH
;
167 static int get_physical_address(CPULoongArchState
*env
, hwaddr
*physical
,
168 int *prot
, target_ulong address
,
169 MMUAccessType access_type
, int mmu_idx
)
171 int user_mode
= mmu_idx
== MMU_USER_IDX
;
172 int kernel_mode
= mmu_idx
== MMU_KERNEL_IDX
;
173 uint32_t plv
, base_c
, base_v
;
175 uint8_t da
= FIELD_EX64(env
->CSR_CRMD
, CSR_CRMD
, DA
);
176 uint8_t pg
= FIELD_EX64(env
->CSR_CRMD
, CSR_CRMD
, PG
);
178 /* Check PG and DA */
180 *physical
= address
& TARGET_PHYS_MASK
;
181 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
185 plv
= kernel_mode
| (user_mode
<< R_CSR_DMW_PLV3_SHIFT
);
186 base_v
= address
>> TARGET_VIRT_ADDR_SPACE_BITS
;
187 /* Check direct map window */
188 for (int i
= 0; i
< 4; i
++) {
189 base_c
= env
->CSR_DMW
[i
] >> TARGET_VIRT_ADDR_SPACE_BITS
;
190 if ((plv
& env
->CSR_DMW
[i
]) && (base_c
== base_v
)) {
191 *physical
= dmw_va2pa(address
);
192 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
197 /* Check valid extension */
198 addr_high
= sextract64(address
, TARGET_VIRT_ADDR_SPACE_BITS
, 16);
199 if (!(addr_high
== 0 || addr_high
== -1)) {
200 return TLBRET_BADADDR
;
204 return loongarch_map_address(env
, physical
, prot
, address
,
205 access_type
, mmu_idx
);
208 hwaddr
loongarch_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
210 LoongArchCPU
*cpu
= LOONGARCH_CPU(cs
);
211 CPULoongArchState
*env
= &cpu
->env
;
215 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, MMU_DATA_LOAD
,
216 cpu_mmu_index(env
, false)) != 0) {
222 static void raise_mmu_exception(CPULoongArchState
*env
, target_ulong address
,
223 MMUAccessType access_type
, int tlb_error
)
225 CPUState
*cs
= env_cpu(env
);
230 cs
->exception_index
= EXCCODE_ADEM
;
233 /* No TLB match for a mapped address */
234 if (access_type
== MMU_DATA_LOAD
) {
235 cs
->exception_index
= EXCCODE_PIL
;
236 } else if (access_type
== MMU_DATA_STORE
) {
237 cs
->exception_index
= EXCCODE_PIS
;
238 } else if (access_type
== MMU_INST_FETCH
) {
239 cs
->exception_index
= EXCCODE_PIF
;
241 env
->CSR_TLBRERA
= FIELD_DP64(env
->CSR_TLBRERA
, CSR_TLBRERA
, ISTLBR
, 1);
244 /* TLB match with no valid bit */
245 if (access_type
== MMU_DATA_LOAD
) {
246 cs
->exception_index
= EXCCODE_PIL
;
247 } else if (access_type
== MMU_DATA_STORE
) {
248 cs
->exception_index
= EXCCODE_PIS
;
249 } else if (access_type
== MMU_INST_FETCH
) {
250 cs
->exception_index
= EXCCODE_PIF
;
254 /* TLB match but 'D' bit is cleared */
255 cs
->exception_index
= EXCCODE_PME
;
258 /* Execute-Inhibit Exception */
259 cs
->exception_index
= EXCCODE_PNX
;
262 /* Read-Inhibit Exception */
263 cs
->exception_index
= EXCCODE_PNR
;
266 /* Privileged Exception */
267 cs
->exception_index
= EXCCODE_PPI
;
271 if (tlb_error
== TLBRET_NOMATCH
) {
272 env
->CSR_TLBRBADV
= address
;
273 env
->CSR_TLBREHI
= FIELD_DP64(env
->CSR_TLBREHI
, CSR_TLBREHI
, VPPN
,
274 extract64(address
, 13, 35));
276 if (!FIELD_EX64(env
->CSR_DBG
, CSR_DBG
, DST
)) {
277 env
->CSR_BADV
= address
;
279 env
->CSR_TLBEHI
= address
& (TARGET_PAGE_MASK
<< 1);
283 bool loongarch_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
284 MMUAccessType access_type
, int mmu_idx
,
285 bool probe
, uintptr_t retaddr
)
287 LoongArchCPU
*cpu
= LOONGARCH_CPU(cs
);
288 CPULoongArchState
*env
= &cpu
->env
;
291 int ret
= TLBRET_BADADDR
;
294 ret
= get_physical_address(env
, &physical
, &prot
, address
,
295 access_type
, mmu_idx
);
297 if (ret
== TLBRET_MATCH
) {
298 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
299 physical
& TARGET_PAGE_MASK
, prot
,
300 mmu_idx
, TARGET_PAGE_SIZE
);
301 qemu_log_mask(CPU_LOG_MMU
,
302 "%s address=%" VADDR_PRIx
" physical " TARGET_FMT_plx
303 " prot %d\n", __func__
, address
, physical
, prot
);
306 qemu_log_mask(CPU_LOG_MMU
,
307 "%s address=%" VADDR_PRIx
" ret %d\n", __func__
, address
,
313 raise_mmu_exception(env
, address
, access_type
, ret
);
314 cpu_loop_exit_restore(cs
, retaddr
);