]> git.proxmox.com Git - mirror_qemu.git/blob - target/loongarch/tlb_helper.c
target/loongarch: Add MMU support for LoongArch CPU.
[mirror_qemu.git] / target / loongarch / tlb_helper.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch TLB helpers
4 *
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 *
7 */
8
9 #include "qemu/osdep.h"
10
11 #include "cpu.h"
12 #include "internals.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
15 #include "exec/log.h"
16 #include "cpu-csr.h"
17
18 enum {
19 TLBRET_MATCH = 0,
20 TLBRET_BADADDR = 1,
21 TLBRET_NOMATCH = 2,
22 TLBRET_INVALID = 3,
23 TLBRET_DIRTY = 4,
24 TLBRET_RI = 5,
25 TLBRET_XI = 6,
26 TLBRET_PE = 7,
27 };
28
29 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
30 int *prot, target_ulong address,
31 int access_type, int index, int mmu_idx)
32 {
33 LoongArchTLB *tlb = &env->tlb[index];
34 uint64_t plv = mmu_idx;
35 uint64_t tlb_entry, tlb_ppn;
36 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
37
38 if (index >= LOONGARCH_STLB) {
39 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
40 } else {
41 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
42 }
43 n = (address >> tlb_ps) & 0x1;/* Odd or even */
44
45 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
46 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
47 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
48 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
49 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY, PPN);
50 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY, NX);
51 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY, NR);
52 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY, RPLV);
53
54 /* Check access rights */
55 if (!tlb_v) {
56 return TLBRET_INVALID;
57 }
58
59 if (access_type == MMU_INST_FETCH && tlb_nx) {
60 return TLBRET_XI;
61 }
62
63 if (access_type == MMU_DATA_LOAD && tlb_nr) {
64 return TLBRET_RI;
65 }
66
67 if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
68 ((tlb_rplv == 1) && (plv != tlb_plv))) {
69 return TLBRET_PE;
70 }
71
72 if ((access_type == MMU_DATA_STORE) && !tlb_d) {
73 return TLBRET_DIRTY;
74 }
75
76 /*
77 * tlb_entry contains ppn[47:12] while 16KiB ppn is [47:15]
78 * need adjust.
79 */
80 *physical = (tlb_ppn << R_TLBENTRY_PPN_SHIFT) |
81 (address & MAKE_64BIT_MASK(0, tlb_ps));
82 *prot = PAGE_READ;
83 if (tlb_d) {
84 *prot |= PAGE_WRITE;
85 }
86 if (!tlb_nx) {
87 *prot |= PAGE_EXEC;
88 }
89 return TLBRET_MATCH;
90 }
91
92 /*
93 * One tlb entry holds an adjacent odd/even pair, the vpn is the
94 * content of the virtual page number divided by 2. So the
95 * compare vpn is bit[47:15] for 16KiB page. while the vppn
96 * field in tlb entry contains bit[47:13], so need adjust.
97 * virt_vpn = vaddr[47:13]
98 */
99 static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
100 int *index)
101 {
102 LoongArchTLB *tlb;
103 uint16_t csr_asid, tlb_asid, stlb_idx;
104 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
105 int i, compare_shift;
106 uint64_t vpn, tlb_vppn;
107
108 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
109 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
110 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
111 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
112 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
113
114 /* Search STLB */
115 for (i = 0; i < 8; ++i) {
116 tlb = &env->tlb[i * 256 + stlb_idx];
117 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
118 if (tlb_e) {
119 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
120 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
121 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
122
123 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
124 (vpn == (tlb_vppn >> compare_shift))) {
125 *index = i * 256 + stlb_idx;
126 return true;
127 }
128 }
129 }
130
131 /* Search MTLB */
132 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
133 tlb = &env->tlb[i];
134 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
135 if (tlb_e) {
136 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
137 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
138 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
139 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
140 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
141 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
142 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
143 (vpn == (tlb_vppn >> compare_shift))) {
144 *index = i;
145 return true;
146 }
147 }
148 }
149 return false;
150 }
151
152 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
153 int *prot, target_ulong address,
154 MMUAccessType access_type, int mmu_idx)
155 {
156 int index, match;
157
158 match = loongarch_tlb_search(env, address, &index);
159 if (match) {
160 return loongarch_map_tlb_entry(env, physical, prot,
161 address, access_type, index, mmu_idx);
162 }
163
164 return TLBRET_NOMATCH;
165 }
166
167 static int get_physical_address(CPULoongArchState *env, hwaddr *physical,
168 int *prot, target_ulong address,
169 MMUAccessType access_type, int mmu_idx)
170 {
171 int user_mode = mmu_idx == MMU_USER_IDX;
172 int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
173 uint32_t plv, base_c, base_v;
174 int64_t addr_high;
175 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA);
176 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG);
177
178 /* Check PG and DA */
179 if (da & !pg) {
180 *physical = address & TARGET_PHYS_MASK;
181 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
182 return TLBRET_MATCH;
183 }
184
185 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT);
186 base_v = address >> TARGET_VIRT_ADDR_SPACE_BITS;
187 /* Check direct map window */
188 for (int i = 0; i < 4; i++) {
189 base_c = env->CSR_DMW[i] >> TARGET_VIRT_ADDR_SPACE_BITS;
190 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
191 *physical = dmw_va2pa(address);
192 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
193 return TLBRET_MATCH;
194 }
195 }
196
197 /* Check valid extension */
198 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16);
199 if (!(addr_high == 0 || addr_high == -1)) {
200 return TLBRET_BADADDR;
201 }
202
203 /* Mapped address */
204 return loongarch_map_address(env, physical, prot, address,
205 access_type, mmu_idx);
206 }
207
208 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
209 {
210 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
211 CPULoongArchState *env = &cpu->env;
212 hwaddr phys_addr;
213 int prot;
214
215 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
216 cpu_mmu_index(env, false)) != 0) {
217 return -1;
218 }
219 return phys_addr;
220 }
221
222 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address,
223 MMUAccessType access_type, int tlb_error)
224 {
225 CPUState *cs = env_cpu(env);
226
227 switch (tlb_error) {
228 default:
229 case TLBRET_BADADDR:
230 cs->exception_index = EXCCODE_ADEM;
231 break;
232 case TLBRET_NOMATCH:
233 /* No TLB match for a mapped address */
234 if (access_type == MMU_DATA_LOAD) {
235 cs->exception_index = EXCCODE_PIL;
236 } else if (access_type == MMU_DATA_STORE) {
237 cs->exception_index = EXCCODE_PIS;
238 } else if (access_type == MMU_INST_FETCH) {
239 cs->exception_index = EXCCODE_PIF;
240 }
241 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1);
242 break;
243 case TLBRET_INVALID:
244 /* TLB match with no valid bit */
245 if (access_type == MMU_DATA_LOAD) {
246 cs->exception_index = EXCCODE_PIL;
247 } else if (access_type == MMU_DATA_STORE) {
248 cs->exception_index = EXCCODE_PIS;
249 } else if (access_type == MMU_INST_FETCH) {
250 cs->exception_index = EXCCODE_PIF;
251 }
252 break;
253 case TLBRET_DIRTY:
254 /* TLB match but 'D' bit is cleared */
255 cs->exception_index = EXCCODE_PME;
256 break;
257 case TLBRET_XI:
258 /* Execute-Inhibit Exception */
259 cs->exception_index = EXCCODE_PNX;
260 break;
261 case TLBRET_RI:
262 /* Read-Inhibit Exception */
263 cs->exception_index = EXCCODE_PNR;
264 break;
265 case TLBRET_PE:
266 /* Privileged Exception */
267 cs->exception_index = EXCCODE_PPI;
268 break;
269 }
270
271 if (tlb_error == TLBRET_NOMATCH) {
272 env->CSR_TLBRBADV = address;
273 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, VPPN,
274 extract64(address, 13, 35));
275 } else {
276 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
277 env->CSR_BADV = address;
278 }
279 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1);
280 }
281 }
282
283 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
284 MMUAccessType access_type, int mmu_idx,
285 bool probe, uintptr_t retaddr)
286 {
287 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
288 CPULoongArchState *env = &cpu->env;
289 hwaddr physical;
290 int prot;
291 int ret = TLBRET_BADADDR;
292
293 /* Data access */
294 ret = get_physical_address(env, &physical, &prot, address,
295 access_type, mmu_idx);
296
297 if (ret == TLBRET_MATCH) {
298 tlb_set_page(cs, address & TARGET_PAGE_MASK,
299 physical & TARGET_PAGE_MASK, prot,
300 mmu_idx, TARGET_PAGE_SIZE);
301 qemu_log_mask(CPU_LOG_MMU,
302 "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx
303 " prot %d\n", __func__, address, physical, prot);
304 return true;
305 } else {
306 qemu_log_mask(CPU_LOG_MMU,
307 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
308 ret);
309 }
310 if (probe) {
311 return false;
312 }
313 raise_mmu_exception(env, address, access_type, ret);
314 cpu_loop_exit_restore(cs, retaddr);
315 }