1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * LoongArch CPU helpers for qemu
5 * Copyright (c) 2024 Loongson Technology Corporation Limited
9 #include "qemu/osdep.h"
11 #include "internals.h"
14 static int loongarch_map_tlb_entry(CPULoongArchState
*env
, hwaddr
*physical
,
15 int *prot
, target_ulong address
,
16 int access_type
, int index
, int mmu_idx
)
18 LoongArchTLB
*tlb
= &env
->tlb
[index
];
19 uint64_t plv
= mmu_idx
;
20 uint64_t tlb_entry
, tlb_ppn
;
21 uint8_t tlb_ps
, n
, tlb_v
, tlb_d
, tlb_plv
, tlb_nx
, tlb_nr
, tlb_rplv
;
23 if (index
>= LOONGARCH_STLB
) {
24 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
26 tlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
28 n
= (address
>> tlb_ps
) & 0x1;/* Odd or even */
30 tlb_entry
= n
? tlb
->tlb_entry1
: tlb
->tlb_entry0
;
31 tlb_v
= FIELD_EX64(tlb_entry
, TLBENTRY
, V
);
32 tlb_d
= FIELD_EX64(tlb_entry
, TLBENTRY
, D
);
33 tlb_plv
= FIELD_EX64(tlb_entry
, TLBENTRY
, PLV
);
35 tlb_ppn
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, PPN
);
36 tlb_nx
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, NX
);
37 tlb_nr
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, NR
);
38 tlb_rplv
= FIELD_EX64(tlb_entry
, TLBENTRY_64
, RPLV
);
40 tlb_ppn
= FIELD_EX64(tlb_entry
, TLBENTRY_32
, PPN
);
46 /* Remove sw bit between bit12 -- bit PS*/
47 tlb_ppn
= tlb_ppn
& ~(((0x1UL
<< (tlb_ps
- 12)) -1));
49 /* Check access rights */
51 return TLBRET_INVALID
;
54 if (access_type
== MMU_INST_FETCH
&& tlb_nx
) {
58 if (access_type
== MMU_DATA_LOAD
&& tlb_nr
) {
62 if (((tlb_rplv
== 0) && (plv
> tlb_plv
)) ||
63 ((tlb_rplv
== 1) && (plv
!= tlb_plv
))) {
67 if ((access_type
== MMU_DATA_STORE
) && !tlb_d
) {
71 *physical
= (tlb_ppn
<< R_TLBENTRY_64_PPN_SHIFT
) |
72 (address
& MAKE_64BIT_MASK(0, tlb_ps
));
84 * One tlb entry holds an adjacent odd/even pair, the vpn is the
85 * content of the virtual page number divided by 2. So the
86 * compare vpn is bit[47:15] for 16KiB page. while the vppn
87 * field in tlb entry contains bit[47:13], so need adjust.
88 * virt_vpn = vaddr[47:13]
90 bool loongarch_tlb_search(CPULoongArchState
*env
, target_ulong vaddr
,
94 uint16_t csr_asid
, tlb_asid
, stlb_idx
;
95 uint8_t tlb_e
, tlb_ps
, tlb_g
, stlb_ps
;
97 uint64_t vpn
, tlb_vppn
;
99 csr_asid
= FIELD_EX64(env
->CSR_ASID
, CSR_ASID
, ASID
);
100 stlb_ps
= FIELD_EX64(env
->CSR_STLBPS
, CSR_STLBPS
, PS
);
101 vpn
= (vaddr
& TARGET_VIRT_MASK
) >> (stlb_ps
+ 1);
102 stlb_idx
= vpn
& 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
103 compare_shift
= stlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
106 for (i
= 0; i
< 8; ++i
) {
107 tlb
= &env
->tlb
[i
* 256 + stlb_idx
];
108 tlb_e
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, E
);
110 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
111 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
112 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
114 if ((tlb_g
== 1 || tlb_asid
== csr_asid
) &&
115 (vpn
== (tlb_vppn
>> compare_shift
))) {
116 *index
= i
* 256 + stlb_idx
;
123 for (i
= LOONGARCH_STLB
; i
< LOONGARCH_TLB_MAX
; ++i
) {
125 tlb_e
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, E
);
127 tlb_vppn
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, VPPN
);
128 tlb_ps
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, PS
);
129 tlb_asid
= FIELD_EX64(tlb
->tlb_misc
, TLB_MISC
, ASID
);
130 tlb_g
= FIELD_EX64(tlb
->tlb_entry0
, TLBENTRY
, G
);
131 compare_shift
= tlb_ps
+ 1 - R_TLB_MISC_VPPN_SHIFT
;
132 vpn
= (vaddr
& TARGET_VIRT_MASK
) >> (tlb_ps
+ 1);
133 if ((tlb_g
== 1 || tlb_asid
== csr_asid
) &&
134 (vpn
== (tlb_vppn
>> compare_shift
))) {
143 static int loongarch_map_address(CPULoongArchState
*env
, hwaddr
*physical
,
144 int *prot
, target_ulong address
,
145 MMUAccessType access_type
, int mmu_idx
)
149 match
= loongarch_tlb_search(env
, address
, &index
);
151 return loongarch_map_tlb_entry(env
, physical
, prot
,
152 address
, access_type
, index
, mmu_idx
);
155 return TLBRET_NOMATCH
;
158 static hwaddr
dmw_va2pa(CPULoongArchState
*env
, target_ulong va
,
162 return va
& TARGET_VIRT_MASK
;
164 uint32_t pseg
= FIELD_EX32(dmw
, CSR_DMW_32
, PSEG
);
165 return (va
& MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT
)) | \
166 (pseg
<< R_CSR_DMW_32_VSEG_SHIFT
);
170 int get_physical_address(CPULoongArchState
*env
, hwaddr
*physical
,
171 int *prot
, target_ulong address
,
172 MMUAccessType access_type
, int mmu_idx
)
174 int user_mode
= mmu_idx
== MMU_IDX_USER
;
175 int kernel_mode
= mmu_idx
== MMU_IDX_KERNEL
;
176 uint32_t plv
, base_c
, base_v
;
178 uint8_t da
= FIELD_EX64(env
->CSR_CRMD
, CSR_CRMD
, DA
);
179 uint8_t pg
= FIELD_EX64(env
->CSR_CRMD
, CSR_CRMD
, PG
);
181 /* Check PG and DA */
183 *physical
= address
& TARGET_PHYS_MASK
;
184 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
188 plv
= kernel_mode
| (user_mode
<< R_CSR_DMW_PLV3_SHIFT
);
190 base_v
= address
>> R_CSR_DMW_64_VSEG_SHIFT
;
192 base_v
= address
>> R_CSR_DMW_32_VSEG_SHIFT
;
194 /* Check direct map window */
195 for (int i
= 0; i
< 4; i
++) {
197 base_c
= FIELD_EX64(env
->CSR_DMW
[i
], CSR_DMW_64
, VSEG
);
199 base_c
= FIELD_EX64(env
->CSR_DMW
[i
], CSR_DMW_32
, VSEG
);
201 if ((plv
& env
->CSR_DMW
[i
]) && (base_c
== base_v
)) {
202 *physical
= dmw_va2pa(env
, address
, env
->CSR_DMW
[i
]);
203 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
208 /* Check valid extension */
209 addr_high
= sextract64(address
, TARGET_VIRT_ADDR_SPACE_BITS
, 16);
210 if (!(addr_high
== 0 || addr_high
== -1)) {
211 return TLBRET_BADADDR
;
215 return loongarch_map_address(env
, physical
, prot
, address
,
216 access_type
, mmu_idx
);
219 hwaddr
loongarch_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
221 LoongArchCPU
*cpu
= LOONGARCH_CPU(cs
);
222 CPULoongArchState
*env
= &cpu
->env
;
226 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, MMU_DATA_LOAD
,
227 cpu_mmu_index(env
, false)) != 0) {