]> git.proxmox.com Git - mirror_qemu.git/blob - target/loongarch/tlb_helper.c
Merge tag 'misc-fixes-pull-request' of https://gitlab.com/berrange/qemu into staging
[mirror_qemu.git] / target / loongarch / tlb_helper.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch TLB helpers
4 *
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 *
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/guest-random.h"
11
12 #include "cpu.h"
13 #include "internals.h"
14 #include "exec/helper-proto.h"
15 #include "exec/exec-all.h"
16 #include "exec/cpu_ldst.h"
17 #include "exec/log.h"
18 #include "cpu-csr.h"
19
20 enum {
21 TLBRET_MATCH = 0,
22 TLBRET_BADADDR = 1,
23 TLBRET_NOMATCH = 2,
24 TLBRET_INVALID = 3,
25 TLBRET_DIRTY = 4,
26 TLBRET_RI = 5,
27 TLBRET_XI = 6,
28 TLBRET_PE = 7,
29 };
30
31 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
32 int *prot, target_ulong address,
33 int access_type, int index, int mmu_idx)
34 {
35 LoongArchTLB *tlb = &env->tlb[index];
36 uint64_t plv = mmu_idx;
37 uint64_t tlb_entry, tlb_ppn;
38 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
39
40 if (index >= LOONGARCH_STLB) {
41 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
42 } else {
43 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
44 }
45 n = (address >> tlb_ps) & 0x1;/* Odd or even */
46
47 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
48 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
49 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
50 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
51 if (is_la64(env)) {
52 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
53 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX);
54 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR);
55 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV);
56 } else {
57 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN);
58 tlb_nx = 0;
59 tlb_nr = 0;
60 tlb_rplv = 0;
61 }
62
63 /* Remove sw bit between bit12 -- bit PS*/
64 tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1));
65
66 /* Check access rights */
67 if (!tlb_v) {
68 return TLBRET_INVALID;
69 }
70
71 if (access_type == MMU_INST_FETCH && tlb_nx) {
72 return TLBRET_XI;
73 }
74
75 if (access_type == MMU_DATA_LOAD && tlb_nr) {
76 return TLBRET_RI;
77 }
78
79 if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
80 ((tlb_rplv == 1) && (plv != tlb_plv))) {
81 return TLBRET_PE;
82 }
83
84 if ((access_type == MMU_DATA_STORE) && !tlb_d) {
85 return TLBRET_DIRTY;
86 }
87
88 *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
89 (address & MAKE_64BIT_MASK(0, tlb_ps));
90 *prot = PAGE_READ;
91 if (tlb_d) {
92 *prot |= PAGE_WRITE;
93 }
94 if (!tlb_nx) {
95 *prot |= PAGE_EXEC;
96 }
97 return TLBRET_MATCH;
98 }
99
100 /*
101 * One tlb entry holds an adjacent odd/even pair, the vpn is the
102 * content of the virtual page number divided by 2. So the
103 * compare vpn is bit[47:15] for 16KiB page. while the vppn
104 * field in tlb entry contains bit[47:13], so need adjust.
105 * virt_vpn = vaddr[47:13]
106 */
107 static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
108 int *index)
109 {
110 LoongArchTLB *tlb;
111 uint16_t csr_asid, tlb_asid, stlb_idx;
112 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
113 int i, compare_shift;
114 uint64_t vpn, tlb_vppn;
115
116 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
117 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
118 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
119 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
120 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
121
122 /* Search STLB */
123 for (i = 0; i < 8; ++i) {
124 tlb = &env->tlb[i * 256 + stlb_idx];
125 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
126 if (tlb_e) {
127 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
128 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
129 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
130
131 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
132 (vpn == (tlb_vppn >> compare_shift))) {
133 *index = i * 256 + stlb_idx;
134 return true;
135 }
136 }
137 }
138
139 /* Search MTLB */
140 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
141 tlb = &env->tlb[i];
142 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
143 if (tlb_e) {
144 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
145 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
146 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
147 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
148 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
149 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
150 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
151 (vpn == (tlb_vppn >> compare_shift))) {
152 *index = i;
153 return true;
154 }
155 }
156 }
157 return false;
158 }
159
160 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
161 int *prot, target_ulong address,
162 MMUAccessType access_type, int mmu_idx)
163 {
164 int index, match;
165
166 match = loongarch_tlb_search(env, address, &index);
167 if (match) {
168 return loongarch_map_tlb_entry(env, physical, prot,
169 address, access_type, index, mmu_idx);
170 }
171
172 return TLBRET_NOMATCH;
173 }
174
175 static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va,
176 target_ulong dmw)
177 {
178 if (is_la64(env)) {
179 return va & TARGET_VIRT_MASK;
180 } else {
181 uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG);
182 return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \
183 (pseg << R_CSR_DMW_32_VSEG_SHIFT);
184 }
185 }
186
187 static int get_physical_address(CPULoongArchState *env, hwaddr *physical,
188 int *prot, target_ulong address,
189 MMUAccessType access_type, int mmu_idx)
190 {
191 int user_mode = mmu_idx == MMU_IDX_USER;
192 int kernel_mode = mmu_idx == MMU_IDX_KERNEL;
193 uint32_t plv, base_c, base_v;
194 int64_t addr_high;
195 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA);
196 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG);
197
198 /* Check PG and DA */
199 if (da & !pg) {
200 *physical = address & TARGET_PHYS_MASK;
201 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
202 return TLBRET_MATCH;
203 }
204
205 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT);
206 if (is_la64(env)) {
207 base_v = address >> R_CSR_DMW_64_VSEG_SHIFT;
208 } else {
209 base_v = address >> R_CSR_DMW_32_VSEG_SHIFT;
210 }
211 /* Check direct map window */
212 for (int i = 0; i < 4; i++) {
213 if (is_la64(env)) {
214 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG);
215 } else {
216 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG);
217 }
218 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
219 *physical = dmw_va2pa(env, address, env->CSR_DMW[i]);
220 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
221 return TLBRET_MATCH;
222 }
223 }
224
225 /* Check valid extension */
226 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16);
227 if (!(addr_high == 0 || addr_high == -1)) {
228 return TLBRET_BADADDR;
229 }
230
231 /* Mapped address */
232 return loongarch_map_address(env, physical, prot, address,
233 access_type, mmu_idx);
234 }
235
236 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
237 {
238 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
239 CPULoongArchState *env = &cpu->env;
240 hwaddr phys_addr;
241 int prot;
242
243 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
244 cpu_mmu_index(env, false)) != 0) {
245 return -1;
246 }
247 return phys_addr;
248 }
249
250 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address,
251 MMUAccessType access_type, int tlb_error)
252 {
253 CPUState *cs = env_cpu(env);
254
255 switch (tlb_error) {
256 default:
257 case TLBRET_BADADDR:
258 cs->exception_index = access_type == MMU_INST_FETCH
259 ? EXCCODE_ADEF : EXCCODE_ADEM;
260 break;
261 case TLBRET_NOMATCH:
262 /* No TLB match for a mapped address */
263 if (access_type == MMU_DATA_LOAD) {
264 cs->exception_index = EXCCODE_PIL;
265 } else if (access_type == MMU_DATA_STORE) {
266 cs->exception_index = EXCCODE_PIS;
267 } else if (access_type == MMU_INST_FETCH) {
268 cs->exception_index = EXCCODE_PIF;
269 }
270 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1);
271 break;
272 case TLBRET_INVALID:
273 /* TLB match with no valid bit */
274 if (access_type == MMU_DATA_LOAD) {
275 cs->exception_index = EXCCODE_PIL;
276 } else if (access_type == MMU_DATA_STORE) {
277 cs->exception_index = EXCCODE_PIS;
278 } else if (access_type == MMU_INST_FETCH) {
279 cs->exception_index = EXCCODE_PIF;
280 }
281 break;
282 case TLBRET_DIRTY:
283 /* TLB match but 'D' bit is cleared */
284 cs->exception_index = EXCCODE_PME;
285 break;
286 case TLBRET_XI:
287 /* Execute-Inhibit Exception */
288 cs->exception_index = EXCCODE_PNX;
289 break;
290 case TLBRET_RI:
291 /* Read-Inhibit Exception */
292 cs->exception_index = EXCCODE_PNR;
293 break;
294 case TLBRET_PE:
295 /* Privileged Exception */
296 cs->exception_index = EXCCODE_PPI;
297 break;
298 }
299
300 if (tlb_error == TLBRET_NOMATCH) {
301 env->CSR_TLBRBADV = address;
302 if (is_la64(env)) {
303 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64,
304 VPPN, extract64(address, 13, 35));
305 } else {
306 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32,
307 VPPN, extract64(address, 13, 19));
308 }
309 } else {
310 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
311 env->CSR_BADV = address;
312 }
313 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1);
314 }
315 }
316
317 static void invalidate_tlb_entry(CPULoongArchState *env, int index)
318 {
319 target_ulong addr, mask, pagesize;
320 uint8_t tlb_ps;
321 LoongArchTLB *tlb = &env->tlb[index];
322
323 int mmu_idx = cpu_mmu_index(env, false);
324 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
325 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
326 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
327
328 if (index >= LOONGARCH_STLB) {
329 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
330 } else {
331 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
332 }
333 pagesize = MAKE_64BIT_MASK(tlb_ps, 1);
334 mask = MAKE_64BIT_MASK(0, tlb_ps + 1);
335
336 if (tlb_v0) {
337 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */
338 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
339 mmu_idx, TARGET_LONG_BITS);
340 }
341
342 if (tlb_v1) {
343 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */
344 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
345 mmu_idx, TARGET_LONG_BITS);
346 }
347 }
348
349 static void invalidate_tlb(CPULoongArchState *env, int index)
350 {
351 LoongArchTLB *tlb;
352 uint16_t csr_asid, tlb_asid, tlb_g;
353
354 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
355 tlb = &env->tlb[index];
356 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
357 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
358 if (tlb_g == 0 && tlb_asid != csr_asid) {
359 return;
360 }
361 invalidate_tlb_entry(env, index);
362 }
363
364 static void fill_tlb_entry(CPULoongArchState *env, int index)
365 {
366 LoongArchTLB *tlb = &env->tlb[index];
367 uint64_t lo0, lo1, csr_vppn;
368 uint16_t csr_asid;
369 uint8_t csr_ps;
370
371 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
372 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
373 if (is_la64(env)) {
374 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN);
375 } else {
376 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN);
377 }
378 lo0 = env->CSR_TLBRELO0;
379 lo1 = env->CSR_TLBRELO1;
380 } else {
381 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
382 if (is_la64(env)) {
383 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN);
384 } else {
385 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN);
386 }
387 lo0 = env->CSR_TLBELO0;
388 lo1 = env->CSR_TLBELO1;
389 }
390
391 if (csr_ps == 0) {
392 qemu_log_mask(CPU_LOG_MMU, "page size is 0\n");
393 }
394
395 /* Only MTLB has the ps fields */
396 if (index >= LOONGARCH_STLB) {
397 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
398 }
399
400 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn);
401 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1);
402 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
403 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid);
404
405 tlb->tlb_entry0 = lo0;
406 tlb->tlb_entry1 = lo1;
407 }
408
409 /* Return an random value between low and high */
410 static uint32_t get_random_tlb(uint32_t low, uint32_t high)
411 {
412 uint32_t val;
413
414 qemu_guest_getrandom_nofail(&val, sizeof(val));
415 return val % (high - low + 1) + low;
416 }
417
418 void helper_tlbsrch(CPULoongArchState *env)
419 {
420 int index, match;
421
422 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
423 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index);
424 } else {
425 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index);
426 }
427
428 if (match) {
429 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index);
430 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
431 return;
432 }
433
434 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
435 }
436
437 void helper_tlbrd(CPULoongArchState *env)
438 {
439 LoongArchTLB *tlb;
440 int index;
441 uint8_t tlb_ps, tlb_e;
442
443 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
444 tlb = &env->tlb[index];
445
446 if (index >= LOONGARCH_STLB) {
447 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
448 } else {
449 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
450 }
451 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
452
453 if (!tlb_e) {
454 /* Invalid TLB entry */
455 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
456 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0);
457 env->CSR_TLBEHI = 0;
458 env->CSR_TLBELO0 = 0;
459 env->CSR_TLBELO1 = 0;
460 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0);
461 } else {
462 /* Valid TLB entry */
463 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
464 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX,
465 PS, (tlb_ps & 0x3f));
466 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) <<
467 R_TLB_MISC_VPPN_SHIFT;
468 env->CSR_TLBELO0 = tlb->tlb_entry0;
469 env->CSR_TLBELO1 = tlb->tlb_entry1;
470 }
471 }
472
473 void helper_tlbwr(CPULoongArchState *env)
474 {
475 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
476
477 invalidate_tlb(env, index);
478
479 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
480 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc,
481 TLB_MISC, E, 0);
482 return;
483 }
484
485 fill_tlb_entry(env, index);
486 }
487
488 void helper_tlbfill(CPULoongArchState *env)
489 {
490 uint64_t address, entryhi;
491 int index, set, stlb_idx;
492 uint16_t pagesize, stlb_ps;
493
494 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
495 entryhi = env->CSR_TLBREHI;
496 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
497 } else {
498 entryhi = env->CSR_TLBEHI;
499 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
500 }
501
502 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
503
504 if (pagesize == stlb_ps) {
505 /* Only write into STLB bits [47:13] */
506 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT);
507
508 /* Choose one set ramdomly */
509 set = get_random_tlb(0, 7);
510
511 /* Index in one set */
512 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */
513
514 index = set * 256 + stlb_idx;
515 } else {
516 /* Only write into MTLB */
517 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1);
518 }
519
520 invalidate_tlb(env, index);
521 fill_tlb_entry(env, index);
522 }
523
524 void helper_tlbclr(CPULoongArchState *env)
525 {
526 LoongArchTLB *tlb;
527 int i, index;
528 uint16_t csr_asid, tlb_asid, tlb_g;
529
530 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
531 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
532
533 if (index < LOONGARCH_STLB) {
534 /* STLB. One line per operation */
535 for (i = 0; i < 8; i++) {
536 tlb = &env->tlb[i * 256 + (index % 256)];
537 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
538 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
539 if (!tlb_g && tlb_asid == csr_asid) {
540 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
541 }
542 }
543 } else if (index < LOONGARCH_TLB_MAX) {
544 /* All MTLB entries */
545 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
546 tlb = &env->tlb[i];
547 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
548 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
549 if (!tlb_g && tlb_asid == csr_asid) {
550 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
551 }
552 }
553 }
554
555 tlb_flush(env_cpu(env));
556 }
557
558 void helper_tlbflush(CPULoongArchState *env)
559 {
560 int i, index;
561
562 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
563
564 if (index < LOONGARCH_STLB) {
565 /* STLB. One line per operation */
566 for (i = 0; i < 8; i++) {
567 int s_idx = i * 256 + (index % 256);
568 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc,
569 TLB_MISC, E, 0);
570 }
571 } else if (index < LOONGARCH_TLB_MAX) {
572 /* All MTLB entries */
573 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
574 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
575 TLB_MISC, E, 0);
576 }
577 }
578
579 tlb_flush(env_cpu(env));
580 }
581
582 void helper_invtlb_all(CPULoongArchState *env)
583 {
584 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
585 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
586 TLB_MISC, E, 0);
587 }
588 tlb_flush(env_cpu(env));
589 }
590
591 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g)
592 {
593 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
594 LoongArchTLB *tlb = &env->tlb[i];
595 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
596
597 if (tlb_g == g) {
598 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
599 }
600 }
601 tlb_flush(env_cpu(env));
602 }
603
604 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info)
605 {
606 uint16_t asid = info & R_CSR_ASID_ASID_MASK;
607
608 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
609 LoongArchTLB *tlb = &env->tlb[i];
610 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
611 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
612
613 if (!tlb_g && (tlb_asid == asid)) {
614 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
615 }
616 }
617 tlb_flush(env_cpu(env));
618 }
619
620 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info,
621 target_ulong addr)
622 {
623 uint16_t asid = info & 0x3ff;
624
625 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
626 LoongArchTLB *tlb = &env->tlb[i];
627 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
628 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
629 uint64_t vpn, tlb_vppn;
630 uint8_t tlb_ps, compare_shift;
631
632 if (i >= LOONGARCH_STLB) {
633 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
634 } else {
635 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
636 }
637 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
638 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
639 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
640
641 if (!tlb_g && (tlb_asid == asid) &&
642 (vpn == (tlb_vppn >> compare_shift))) {
643 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
644 }
645 }
646 tlb_flush(env_cpu(env));
647 }
648
649 void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
650 target_ulong info, target_ulong addr)
651 {
652 uint16_t asid = info & 0x3ff;
653
654 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
655 LoongArchTLB *tlb = &env->tlb[i];
656 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
657 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
658 uint64_t vpn, tlb_vppn;
659 uint8_t tlb_ps, compare_shift;
660
661 if (i >= LOONGARCH_STLB) {
662 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
663 } else {
664 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
665 }
666 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
667 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
668 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
669
670 if ((tlb_g || (tlb_asid == asid)) &&
671 (vpn == (tlb_vppn >> compare_shift))) {
672 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
673 }
674 }
675 tlb_flush(env_cpu(env));
676 }
677
678 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
679 MMUAccessType access_type, int mmu_idx,
680 bool probe, uintptr_t retaddr)
681 {
682 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
683 CPULoongArchState *env = &cpu->env;
684 hwaddr physical;
685 int prot;
686 int ret;
687
688 /* Data access */
689 ret = get_physical_address(env, &physical, &prot, address,
690 access_type, mmu_idx);
691
692 if (ret == TLBRET_MATCH) {
693 tlb_set_page(cs, address & TARGET_PAGE_MASK,
694 physical & TARGET_PAGE_MASK, prot,
695 mmu_idx, TARGET_PAGE_SIZE);
696 qemu_log_mask(CPU_LOG_MMU,
697 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
698 " prot %d\n", __func__, address, physical, prot);
699 return true;
700 } else {
701 qemu_log_mask(CPU_LOG_MMU,
702 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
703 ret);
704 }
705 if (probe) {
706 return false;
707 }
708 raise_mmu_exception(env, address, access_type, ret);
709 cpu_loop_exit_restore(cs, retaddr);
710 }
711
712 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
713 target_ulong level, uint32_t mem_idx)
714 {
715 CPUState *cs = env_cpu(env);
716 target_ulong badvaddr, index, phys, ret;
717 int shift;
718 uint64_t dir_base, dir_width;
719 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1;
720
721 badvaddr = env->CSR_TLBRBADV;
722 base = base & TARGET_PHYS_MASK;
723
724 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
725 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
726 shift = (shift + 1) * 3;
727
728 if (huge) {
729 return base;
730 }
731 switch (level) {
732 case 1:
733 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE);
734 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH);
735 break;
736 case 2:
737 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE);
738 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH);
739 break;
740 case 3:
741 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE);
742 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH);
743 break;
744 case 4:
745 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE);
746 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH);
747 break;
748 default:
749 do_raise_exception(env, EXCCODE_INE, GETPC());
750 return 0;
751 }
752 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
753 phys = base | index << shift;
754 ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
755 return ret;
756 }
757
758 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
759 uint32_t mem_idx)
760 {
761 CPUState *cs = env_cpu(env);
762 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv;
763 int shift;
764 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1;
765 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
766 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
767
768 base = base & TARGET_PHYS_MASK;
769
770 if (huge) {
771 /* Huge Page. base is paddr */
772 tmp0 = base ^ (1 << LOONGARCH_PAGE_HUGE_SHIFT);
773 /* Move Global bit */
774 tmp0 = ((tmp0 & (1 << LOONGARCH_HGLOBAL_SHIFT)) >>
775 LOONGARCH_HGLOBAL_SHIFT) << R_TLBENTRY_G_SHIFT |
776 (tmp0 & (~(1 << LOONGARCH_HGLOBAL_SHIFT)));
777 ps = ptbase + ptwidth - 1;
778 if (odd) {
779 tmp0 += MAKE_64BIT_MASK(ps, 1);
780 }
781 } else {
782 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
783 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
784 shift = (shift + 1) * 3;
785 badv = env->CSR_TLBRBADV;
786
787 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
788 ptindex = ptindex & ~0x1; /* clear bit 0 */
789 ptoffset0 = ptindex << shift;
790 ptoffset1 = (ptindex + 1) << shift;
791
792 phys = base | (odd ? ptoffset1 : ptoffset0);
793 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
794 ps = ptbase;
795 }
796
797 if (odd) {
798 env->CSR_TLBRELO1 = tmp0;
799 } else {
800 env->CSR_TLBRELO0 = tmp0;
801 }
802 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps);
803 }