]> git.proxmox.com Git - mirror_qemu.git/blob - target/microblaze/mmu.c
target-microblaze: mmu: Cleanup debug log messages
[mirror_qemu.git] / target / microblaze / mmu.c
1 /*
2 * Microblaze MMU emulation for qemu.
3 *
4 * Copyright (c) 2009 Edgar E. Iglesias
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24
25 static unsigned int tlb_decode_size(unsigned int f)
26 {
27 static const unsigned int sizes[] = {
28 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
29 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
30 };
31 assert(f < ARRAY_SIZE(sizes));
32 return sizes[f];
33 }
34
35 static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
36 {
37 CPUState *cs = CPU(mb_env_get_cpu(env));
38 struct microblaze_mmu *mmu = &env->mmu;
39 unsigned int tlb_size;
40 uint32_t tlb_tag, end, t;
41
42 t = mmu->rams[RAM_TAG][idx];
43 if (!(t & TLB_VALID))
44 return;
45
46 tlb_tag = t & TLB_EPN_MASK;
47 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
48 end = tlb_tag + tlb_size;
49
50 while (tlb_tag < end) {
51 tlb_flush_page(cs, tlb_tag);
52 tlb_tag += TARGET_PAGE_SIZE;
53 }
54 }
55
56 static void mmu_change_pid(CPUMBState *env, unsigned int newpid)
57 {
58 struct microblaze_mmu *mmu = &env->mmu;
59 unsigned int i;
60 uint32_t t;
61
62 if (newpid & ~0xff)
63 qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid);
64
65 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
66 /* Lookup and decode. */
67 t = mmu->rams[RAM_TAG][i];
68 if (t & TLB_VALID) {
69 if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
70 mmu_flush_idx(env, i);
71 }
72 }
73 }
74
75 /* rw - 0 = read, 1 = write, 2 = fetch. */
76 unsigned int mmu_translate(struct microblaze_mmu *mmu,
77 struct microblaze_mmu_lookup *lu,
78 target_ulong vaddr, int rw, int mmu_idx)
79 {
80 unsigned int i, hit = 0;
81 unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
82 uint64_t tlb_tag, tlb_rpn, mask;
83 uint32_t tlb_size, t0;
84
85 lu->err = ERR_MISS;
86 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
87 uint64_t t, d;
88
89 /* Lookup and decode. */
90 t = mmu->rams[RAM_TAG][i];
91 if (t & TLB_VALID) {
92 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
93 if (tlb_size < TARGET_PAGE_SIZE) {
94 qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size);
95 abort();
96 }
97
98 mask = ~((uint64_t)tlb_size - 1);
99 tlb_tag = t & TLB_EPN_MASK;
100 if ((vaddr & mask) != (tlb_tag & mask)) {
101 continue;
102 }
103 if (mmu->tids[i]
104 && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
105 continue;
106 }
107
108 /* Bring in the data part. */
109 d = mmu->rams[RAM_DATA][i];
110 tlb_ex = d & TLB_EX;
111 tlb_wr = d & TLB_WR;
112
113 /* Now let's see if there is a zone that overrides the protbits. */
114 tlb_zsel = (d >> 4) & 0xf;
115 t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
116 t0 &= 0x3;
117
118 if (tlb_zsel > mmu->c_mmu_zones) {
119 qemu_log_mask(LOG_GUEST_ERROR,
120 "tlb zone select out of range! %d\n", tlb_zsel);
121 t0 = 1; /* Ignore. */
122 }
123
124 if (mmu->c_mmu == 1) {
125 t0 = 1; /* Zones are disabled. */
126 }
127
128 switch (t0) {
129 case 0:
130 if (mmu_idx == MMU_USER_IDX)
131 continue;
132 break;
133 case 2:
134 if (mmu_idx != MMU_USER_IDX) {
135 tlb_ex = 1;
136 tlb_wr = 1;
137 }
138 break;
139 case 3:
140 tlb_ex = 1;
141 tlb_wr = 1;
142 break;
143 default: break;
144 }
145
146 lu->err = ERR_PROT;
147 lu->prot = PAGE_READ;
148 if (tlb_wr)
149 lu->prot |= PAGE_WRITE;
150 else if (rw == 1)
151 goto done;
152 if (tlb_ex)
153 lu->prot |=PAGE_EXEC;
154 else if (rw == 2) {
155 goto done;
156 }
157
158 tlb_rpn = d & TLB_RPN_MASK;
159
160 lu->vaddr = tlb_tag;
161 lu->paddr = tlb_rpn & mmu->c_addr_mask;
162 lu->paddr = tlb_rpn;
163 lu->size = tlb_size;
164 lu->err = ERR_HIT;
165 lu->idx = i;
166 hit = 1;
167 goto done;
168 }
169 }
170 done:
171 qemu_log_mask(CPU_LOG_MMU,
172 "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
173 vaddr, rw, tlb_wr, tlb_ex, hit);
174 return hit;
175 }
176
177 /* Writes/reads to the MMU's special regs end up here. */
178 uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
179 {
180 unsigned int i;
181 uint32_t r = 0;
182
183 if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
184 qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
185 return 0;
186 }
187 if (ext && rn != MMU_R_TLBLO) {
188 qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
189 return 0;
190 }
191
192 switch (rn) {
193 /* Reads to HI/LO trig reads from the mmu rams. */
194 case MMU_R_TLBLO:
195 case MMU_R_TLBHI:
196 if (!(env->mmu.c_mmu_tlb_access & 1)) {
197 qemu_log_mask(LOG_GUEST_ERROR,
198 "Invalid access to MMU reg %d\n", rn);
199 return 0;
200 }
201
202 i = env->mmu.regs[MMU_R_TLBX] & 0xff;
203 r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32);
204 if (rn == MMU_R_TLBHI)
205 env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
206 break;
207 case MMU_R_PID:
208 case MMU_R_ZPR:
209 if (!(env->mmu.c_mmu_tlb_access & 1)) {
210 qemu_log_mask(LOG_GUEST_ERROR,
211 "Invalid access to MMU reg %d\n", rn);
212 return 0;
213 }
214 r = env->mmu.regs[rn];
215 break;
216 case MMU_R_TLBX:
217 r = env->mmu.regs[rn];
218 break;
219 case MMU_R_TLBSX:
220 qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n");
221 break;
222 default:
223 qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
224 break;
225 }
226 qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r);
227 return r;
228 }
229
230 void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
231 {
232 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
233 uint64_t tmp64;
234 unsigned int i;
235 qemu_log_mask(CPU_LOG_MMU,
236 "%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]);
237
238 if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
239 qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
240 return;
241 }
242 if (ext && rn != MMU_R_TLBLO) {
243 qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
244 return;
245 }
246
247 switch (rn) {
248 /* Writes to HI/LO trig writes to the mmu rams. */
249 case MMU_R_TLBLO:
250 case MMU_R_TLBHI:
251 i = env->mmu.regs[MMU_R_TLBX] & 0xff;
252 if (rn == MMU_R_TLBHI) {
253 if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
254 qemu_log_mask(LOG_GUEST_ERROR,
255 "invalidating index %x at pc=%" PRIx64 "\n",
256 i, env->sregs[SR_PC]);
257 env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
258 mmu_flush_idx(env, i);
259 }
260 tmp64 = env->mmu.rams[rn & 1][i];
261 env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);
262 break;
263 case MMU_R_ZPR:
264 if (env->mmu.c_mmu_tlb_access <= 1) {
265 qemu_log_mask(LOG_GUEST_ERROR,
266 "Invalid access to MMU reg %d\n", rn);
267 return;
268 }
269
270 /* Changes to the zone protection reg flush the QEMU TLB.
271 Fortunately, these are very uncommon. */
272 if (v != env->mmu.regs[rn]) {
273 tlb_flush(CPU(cpu));
274 }
275 env->mmu.regs[rn] = v;
276 break;
277 case MMU_R_PID:
278 if (env->mmu.c_mmu_tlb_access <= 1) {
279 qemu_log_mask(LOG_GUEST_ERROR,
280 "Invalid access to MMU reg %d\n", rn);
281 return;
282 }
283
284 if (v != env->mmu.regs[rn]) {
285 mmu_change_pid(env, v);
286 env->mmu.regs[rn] = v;
287 }
288 break;
289 case MMU_R_TLBX:
290 /* Bit 31 is read-only. */
291 env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v);
292 break;
293 case MMU_R_TLBSX:
294 {
295 struct microblaze_mmu_lookup lu;
296 int hit;
297
298 if (env->mmu.c_mmu_tlb_access <= 1) {
299 qemu_log_mask(LOG_GUEST_ERROR,
300 "Invalid access to MMU reg %d\n", rn);
301 return;
302 }
303
304 hit = mmu_translate(&env->mmu, &lu,
305 v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
306 if (hit) {
307 env->mmu.regs[MMU_R_TLBX] = lu.idx;
308 } else {
309 env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK;
310 }
311 break;
312 }
313 default:
314 qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
315 break;
316 }
317 }
318
319 void mmu_init(struct microblaze_mmu *mmu)
320 {
321 int i;
322 for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
323 mmu->regs[i] = 0;
324 }
325 }